prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
from plotly.offline import plot,iplot
from scipy.stats import norm, kurtosis
import os
from scipy.signal import butter, lfilter, freqz
from scipy import signal
from sklearn.model_selection import train_test_split
from collections import Counter
import warnings
warnings.filterwarnings(action='once')
plt.rcParams["figure.figsize"] = 16,12
def create_labels():
labels = | pd.read_csv('../data/RawData/labels.txt', sep=" ", header=None) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import os
import argparse
from pathlib import Path
import joblib
import scipy.sparse
import string
import nltk
from nltk import word_tokenize
nltk.download('punkt')
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelBinarizer
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
'''
Preprocessing and preperation of data:
The purpose of this script is to prepare and preproces the raw textual data and the admission data needed for training and testing the classification model. This proces includes the following steps:
1. Clean and prepare admission data
2. Extract discharge summaries from note data
3. Remove newborn cases and in-hospital deaths
4. Bind note-data to 30-day readmission information
5. Split into train, validation and test set and balance training data by oversampling positive cases
6. Removal of special characters, numbers and de-identified brackets
7. Vectorise all discharge notes:
7a. Remove stop-words, most common words and very rare words (benchmarks need to be defined)
7b. Create set of TF-IDF weighted tokenised discharge notes
8. Output datasets and labels as CSV-files
'''
# Defining main function
def main(args):
notes_file = args.nf
admissions_file = args.af
NotePreprocessing(notes_file = notes_file, admissions_file = admissions_file)
# Defining class 'NotePreprocessing'
class NotePreprocessing:
def __init__(self, notes_file, admissions_file):
# Setting directory of input data
data_dir = self.setting_data_directory()
# Setting directory of output plots
out_dir = self.setting_output_directory()
# Loading notes
if notes_file is None:
notes = pd.read_csv(data_dir / "NOTEEVENT.csv")
else:
notes = pd.read_csv(data_dir / notes_file)
# Loading general admission data
if admissions_file is None:
admissions = pd.read_csv(data_dir / "ADMISSIONS.csv")
else:
noadmissionstes = pd.read_csv(admissions_file)
#-#-# PREPROCESSING ADMISSIONS DATA #-#-#
# Convert to datetime
admissions.ADMITTIME = pd.to_datetime(admissions.ADMITTIME, format = '%Y-%m-%d %H:%M:%S', errors = 'coerce')
admissions.DISCHTIME = | pd.to_datetime(admissions.DISCHTIME, format = '%Y-%m-%d %H:%M:%S', errors = 'coerce') | pandas.to_datetime |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt import settings
from vectorbt.utils.random import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
settings.returns['year_freq'] = '252 days' # same as empyrical
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_process_order_nb():
# Errors, ignored and rejected orders
log_record = np.empty(1, dtype=log_dt)[0]
log_record[0] = 0
log_record[1] = 0
log_record[2] = 0
log_record[3] = 0
log_record[-1] = 0
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=0))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=1))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
-100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.nan, 100., 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.inf, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., np.nan, 10., 1100.,
nb.create_order_nb(size=10, price=10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=-2), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=20), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., -100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, slippage=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=np.inf), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=0), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, max_size=-10), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=np.nan), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=-1), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=2), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., np.nan,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., -10.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., np.inf, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., -10., 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., np.nan, 1100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
assert cash_now == 100.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 10., 10., 1100.,
nb.create_order_nb(size=0, price=10), log_record)
assert cash_now == 100.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=15, price=10, max_size=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1., raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, reject_prob=1.), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, direction=Direction.All), log_record)
assert cash_now == 0.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.LongOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.ShortOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=np.inf, price=10, direction=Direction.ShortOnly), log_record)
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
np.inf, 100., 10., 1100.,
nb.create_order_nb(size=-np.inf, price=10, direction=Direction.All), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 1100.,
nb.create_order_nb(size=-10, price=10, direction=Direction.LongOnly), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, fixed_fees=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=100, price=10, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, min_size=100), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False,
raise_reject=True),
log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-200, price=10, direction=Direction.LongOnly, allow_partial=False), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
with pytest.raises(Exception) as e_info:
_ = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000, raise_reject=True), log_record)
cash_now, shares_now, order_result = nb.process_order_nb(
100., 100., 10., 1100.,
nb.create_order_nb(size=-10, price=10, fixed_fees=1000), log_record)
assert cash_now == 100.
assert shares_now == 100.
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 0.
assert shares_now == 8.18181818181818
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 180.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, fees=0.1, fixed_fees=1, slippage=0.1), log_record)
assert cash_now == 909.
assert shares_now == -100.
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-10, price=10, size_type=SizeType.TargetShares), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-100, price=10, size_type=SizeType.TargetValue), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.TargetPercent), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 50.
assert shares_now == 4.9
assert_same_tuple(order_result, OrderResult(
size=4.9, price=10.0, fees=1., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-1, price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., 10., 10., 100.,
nb.create_order_nb(size=-0.5, price=10, size_type=SizeType.Percent, fixed_fees=1.), log_record)
assert cash_now == 49.
assert shares_now == 5.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=1., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., -10., 10., 100.,
nb.create_order_nb(size=1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 0.
assert shares_now == 0.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
0., -10., 10., 100.,
nb.create_order_nb(size=-1., price=10, size_type=SizeType.Percent), log_record)
assert cash_now == 100.
assert shares_now == -20.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10), log_record)
assert cash_now == 0.
assert shares_now == 10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
cash_now, shares_now, order_result = nb.process_order_nb(
150., -5., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10), log_record)
assert cash_now == 200.
assert shares_now == -10.
assert_same_tuple(order_result, OrderResult(
size=5., price=10.0, fees=0., side=1, status=0, status_info=-1))
# Logging
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.nan, 0, 2, np.nan, 0., 0., 0., 0., np.inf, 0.,
True, False, True, 100., 0., np.nan, np.nan, np.nan, -1, 1, 0, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 0., 10., 10., 10., 0., 0, 0, -1, 0
))
_ = nb.process_order_nb(
100., 0., 10., 100.,
nb.create_order_nb(size=-np.inf, price=10, log=True), log_record)
assert_same_tuple(log_record, (
0, 0, 0, 0, 100., 0., 10., 100., -np.inf, 0, 2, 10., 0., 0., 0., 0., np.inf, 0.,
True, False, True, 200., -10., 10., 10., 0., 1, 0, -1, 0
))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_all(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='all', **kwargs)
def from_signals_longonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(price=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(price, entries, exits, direction='shortonly', **kwargs)
class TestFromSignals:
def test_one_column(self):
record_arrays_close(
from_signals_all().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_signals_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 200., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 200., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 3, 0, 100., 4., 0., 1),
(2, 0, 1, 100., 1., 0., 0), (3, 3, 1, 100., 4., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 3, 2, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 3, 0, 50., 4., 0., 0),
(2, 0, 1, 100., 1., 0., 1), (3, 3, 1, 50., 4., 0., 0),
(4, 0, 2, 100., 1., 0., 1), (5, 3, 2, 50., 4., 0., 0)
], dtype=order_dt)
)
portfolio = from_signals_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size(self):
record_arrays_close(
from_signals_all(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 2.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1), (4, 0, 3, 100.0, 1.0, 0.0, 0), (5, 3, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[-1, 0, 1, np.inf]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 3, 2, 1.0, 4.0, 0.0, 0), (4, 0, 3, 100.0, 1.0, 0.0, 1), (5, 3, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=0.5, size_type='percent')
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=0.5, size_type='percent', close_first=True, accumulate=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 3, 0, 31.25, 4., 0., 1), (3, 4, 0, 15.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 3, 0, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 25., 1., 0., 0),
(2, 0, 2, 12.5, 1., 0., 0), (3, 3, 0, 50., 4., 0., 1),
(4, 3, 1, 25., 4., 0., 1), (5, 3, 2, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 3, 0, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 3, 0, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 3, 0, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_signals_all(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.8, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.4, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.4, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_all(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 2.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.1, 0),
(3, 3, 1, 1.0, 4.0, 0.1, 1), (4, 0, 2, 1.0, 1.0, 1.0, 0), (5, 3, 2, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.1, 1),
(3, 3, 1, 1.0, 4.0, 0.1, 0), (4, 0, 2, 1.0, 1.0, 1.0, 1), (5, 3, 2, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_all(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 2.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.1, 0.0, 0),
(3, 3, 1, 1.0, 3.6, 0.0, 1), (4, 0, 2, 1.0, 2.0, 0.0, 0), (5, 3, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 0.9, 0.0, 1),
(3, 3, 1, 1.0, 4.4, 0.0, 0), (4, 0, 2, 1.0, 0.0, 0.0, 1), (5, 3, 2, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_all(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_all(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 4, 0, 0.5, 5.0, 0.0, 1),
(3, 0, 1, 1.0, 1.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 1),
(6, 0, 2, 1.0, 1.0, 0.0, 0), (7, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 3, 0, 0.5, 4.0, 0.0, 1), (2, 0, 1, 1.0, 1.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1), (4, 0, 2, 1.0, 1.0, 0.0, 0), (5, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 3, 0, 0.5, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_all(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 3, 0, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 3, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_close_first(self):
record_arrays_close(
from_signals_all(close_first=[[False, True]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1), (4, 4, 1, 80.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(
price=pd.Series(price.values[::-1], index=price.index),
entries=pd.Series(entries.values[::-1], index=price.index),
exits=pd.Series(exits.values[::-1], index=price.index),
close_first=[[False, True]]
).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1), (1, 3, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 20.0, 5.0, 0.0, 1),
(3, 3, 1, 20.0, 2.0, 0.0, 0), (4, 4, 1, 160.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1), (2, 3, 1, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 3, 0, 275.0, 4.0, 0.0, 0), (2, 0, 1, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_all(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 3, 0, 50.0, 4.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_all(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_all(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_accumulate(self):
record_arrays_close(
from_signals_all(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_log(self):
record_arrays_close(
from_signals_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 3, 0, 0, 0.0, 100.0, 4.0, 400.0, -np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 800.0, -100.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_conflict_mode(self):
kwargs = dict(
price=price.iloc[:3],
entries=pd.DataFrame([
[True, True, True, True, True],
[True, True, True, True, False],
[True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True],
[False, False, False, False, True],
[True, True, True, True, True]
]),
size=1.,
conflict_mode=[[
'ignore',
'entry',
'exit',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_all(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 0, 2, 1.0, 1.0, 0.0, 1),
(3, 1, 2, 2.0, 2.0, 0.0, 0), (4, 2, 2, 2.0, 3.0, 0.0, 1), (5, 1, 3, 1.0, 2.0, 0.0, 0),
(6, 2, 3, 2.0, 3.0, 0.0, 1), (7, 1, 4, 1.0, 2.0, 0.0, 1), (8, 2, 4, 2.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 0), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 1, 2, 1.0, 2.0, 0.0, 0),
(3, 2, 2, 1.0, 3.0, 0.0, 1), (4, 1, 3, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 1, 0, 1.0, 2.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 1), (2, 1, 2, 1.0, 2.0, 0.0, 1),
(3, 2, 2, 1.0, 3.0, 0.0, 0), (4, 1, 3, 1.0, 2.0, 0.0, 1), (5, 2, 3, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_all(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 3, 0, 1.0, 4.0, 0.0, 1), (1, 0, 1, 1.0, 1.0, 0.0, 0), (2, 3, 1, 2.0, 4.0, 0.0, 1),
(3, 0, 2, 1.0, 1.0, 0.0, 0), (4, 3, 2, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 1, 1.0, 1.0, 0.0, 0), (1, 3, 1, 1.0, 4.0, 0.0, 1), (2, 0, 2, 1.0, 1.0, 0.0, 0),
(3, 3, 2, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 3, 0, 0.25, 4.0, 0.0, 0), (2, 0, 1, 1.0, 1.0, 0.0, 1),
(3, 3, 1, 0.5, 4.0, 0.0, 0), (4, 0, 2, 1.0, 1.0, 0.0, 1), (5, 3, 2, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception) as e_info:
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 0, 1, 100.0, 1.0, 0.0, 0),
(3, 3, 1, 200.0, 4.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_signals_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 3, 0, 200.0, 4.0, 0.0, 1), (2, 3, 1, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_signals_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 3, 1, 200.0, 4.0, 0.0, 1), (2, 3, 0, 200.0, 4.0, 0.0, 1),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 3, 2, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_signals_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_signals_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
portfolio = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_max_orders(self):
_ = from_signals_all(price=price_wide)
_ = from_signals_all(price=price_wide, max_orders=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, max_orders=5)
def test_max_logs(self):
_ = from_signals_all(price=price_wide, log=True)
_ = from_signals_all(price=price_wide, log=True, max_logs=6)
with pytest.raises(Exception) as e_info:
_ = from_signals_all(price=price_wide, log=True, max_logs=5)
# ############# from_holding ############# #
class TestFromHolding:
def test_from_holding(self):
record_arrays_close(
vbt.Portfolio.from_holding(price).order_records,
vbt.Portfolio.from_signals(price, True, False, accumulate=False).order_records
)
# ############# from_random_signals ############# #
class TestFromRandom:
def test_from_random_n(self):
result = vbt.Portfolio.from_random_signals(price, n=2, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, True, False, False],
[False, True, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, n=[1, 2], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [True, False], [False, True], [False, False], [False, False]],
[[False, False], [False, True], [False, False], [False, True], [True, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.Int64Index([1, 2], dtype='int64', name='rand_n')
)
def test_from_random_prob(self):
result = vbt.Portfolio.from_random_signals(price, prob=0.5, seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[True, False, False, False, False],
[False, False, False, False, True]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
price.vbt.wrapper.index
)
pd.testing.assert_index_equal(
result.wrapper.columns,
price.vbt.wrapper.columns
)
result = vbt.Portfolio.from_random_signals(price, prob=[0.25, 0.5], seed=seed)
record_arrays_close(
result.order_records,
vbt.Portfolio.from_signals(
price,
[[False, True], [False, False], [False, False], [False, False], [True, False]],
[[False, False], [False, True], [False, False], [False, False], [False, False]]
).order_records
)
pd.testing.assert_index_equal(
result.wrapper.index,
pd.DatetimeIndex([
'2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'
], dtype='datetime64[ns]', freq=None)
)
pd.testing.assert_index_equal(
result.wrapper.columns,
pd.MultiIndex.from_tuples([(0.25, 0.25), (0.5, 0.5)], names=['rprob_entry_prob', 'rprob_exit_prob'])
)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_all(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='all', **kwargs)
def from_orders_longonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='longonly', **kwargs)
def from_orders_shortonly(price=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(price, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_all().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all()
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_all(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1), (8, 0, 2, 100.0, 1.0, 0.0, 0),
(9, 1, 2, 100.0, 2.0, 0.0, 1), (10, 3, 2, 50.0, 4.0, 0.0, 0), (11, 4, 2, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 0, 2, 100.0, 1.0, 0.0, 1), (5, 1, 2, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = from_orders_all(price=price_wide)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_all(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_all(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 198.01980198019803, 2.02, 0.0, 1),
(2, 3, 0, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 1),
(2, 3, 0, 49.504950495049506, 4.04, 0.0, 0), (3, 4, 0, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 1, 0, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
def test_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 3, 1, 1.0, 4.0, 0.4, 0), (7, 4, 1, 1.0, 5.0, 0.5, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 2.0, 1), (10, 3, 2, 1.0, 4.0, 4.0, 0), (11, 4, 2, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 3, 1, 1.0, 4.0, 0.4, 1), (7, 4, 1, 1.0, 5.0, 0.5, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 2.0, 0), (10, 3, 2, 1.0, 4.0, 4.0, 1), (11, 4, 2, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_all(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 3, 1, 1.0, 4.0, 0.1, 0), (7, 4, 1, 1.0, 5.0, 0.1, 1), (8, 0, 2, 1.0, 1.0, 1.0, 0),
(9, 1, 2, 1.0, 2.0, 1.0, 1), (10, 3, 2, 1.0, 4.0, 1.0, 0), (11, 4, 2, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 3, 1, 1.0, 4.0, 0.1, 1), (7, 4, 1, 1.0, 5.0, 0.1, 0), (8, 0, 2, 1.0, 1.0, 1.0, 1),
(9, 1, 2, 1.0, 2.0, 1.0, 0), (10, 3, 2, 1.0, 4.0, 1.0, 1), (11, 4, 2, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_all(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 3, 1, 1.0, 4.4, 0.0, 0), (7, 4, 1, 1.0, 4.5, 0.0, 1), (8, 0, 2, 1.0, 2.0, 0.0, 0),
(9, 1, 2, 1.0, 0.0, 0.0, 1), (10, 3, 2, 1.0, 8.0, 0.0, 0), (11, 4, 2, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 3, 1, 1.0, 3.6, 0.0, 1), (7, 4, 1, 1.0, 5.5, 0.0, 0), (8, 0, 2, 1.0, 0.0, 0.0, 1),
(9, 1, 2, 1.0, 4.0, 0.0, 0), (10, 3, 2, 1.0, 0.0, 0.0, 1), (11, 4, 2, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_all(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 1, 0, 0.5, 2.0, 0.0, 1), (2, 3, 0, 0.5, 4.0, 0.0, 0),
(3, 4, 0, 0.5, 5.0, 0.0, 1), (4, 0, 1, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 3, 1, 1.0, 4.0, 0.0, 0), (7, 4, 1, 1.0, 5.0, 0.0, 1), (8, 0, 2, 1.0, 1.0, 0.0, 0),
(9, 1, 2, 1.0, 2.0, 0.0, 1), (10, 3, 2, 1.0, 4.0, 0.0, 0), (11, 4, 2, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 1, 0, 0.5, 2.0, 0.0, 0), (2, 3, 0, 0.5, 4.0, 0.0, 1),
(3, 4, 0, 0.5, 5.0, 0.0, 0), (4, 0, 1, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 3, 1, 1.0, 4.0, 0.0, 1), (7, 4, 1, 1.0, 5.0, 0.0, 0), (8, 0, 2, 1.0, 1.0, 0.0, 1),
(9, 1, 2, 1.0, 2.0, 0.0, 0), (10, 3, 2, 1.0, 4.0, 0.0, 1), (11, 4, 2, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_all(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 3, 1, 1.0, 4.0, 0.0, 0),
(6, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 1.0, 2.0, 0.0, 1), (2, 3, 0, 1.0, 4.0, 0.0, 0),
(3, 4, 0, 1.0, 5.0, 0.0, 1), (4, 3, 1, 1.0, 4.0, 0.0, 0), (5, 4, 1, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 1, 0, 1.0, 2.0, 0.0, 0), (2, 3, 0, 1.0, 4.0, 0.0, 1),
(3, 4, 0, 1.0, 5.0, 0.0, 0), (4, 3, 1, 1.0, 4.0, 0.0, 1), (5, 4, 1, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 4, 1, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0), (4, 0, 1, 1000.0, 1.0, 0.0, 1), (5, 3, 1, 1000.0, 4.0, 0.0, 1),
(6, 4, 1, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1), (4, 0, 1, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 3, 1, 50.0, 4.0, 0.0, 0), (7, 4, 1, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 1, 0, 100.0, 2.0, 0.0, 0), (2, 0, 1, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_all(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 1000.0, 2.0, 0.0, 1), (2, 3, 0, 500.0, 4.0, 0.0, 0),
(3, 4, 0, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 2.0, 0.0, 1), (2, 3, 0, 50.0, 4.0, 0.0, 0),
(3, 4, 0, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 1, 0, 550.0, 2.0, 0.0, 0), (2, 3, 0, 1000.0, 4.0, 0.0, 1),
(3, 4, 0, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception) as e_info:
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_all(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 1.0, 100.0, np.inf, 0, 2, 1.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 1, 0, 0, 0.0, 100.0, 2.0, 200.0, -np.inf, 0, 2, 2.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 2, 0, 0, 400.0, -100.0, 3.0, 100.0, np.nan, 0, 2, 3.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 400.0, -100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 3, 0, 0, 400.0, -100.0, 4.0, 0.0, np.inf, 0, 2, 4.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 4, 0, 0, 0.0, 0.0, 5.0, 0.0, -np.inf, 0, 2, 5.0, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0,
True, False, True, 0.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 3, 0, 100.0, 4.0, 0.0, 0),
(3, 0, 1, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 3, 1, 100.0, 4.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1), (8, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
def test_cash_sharing(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
with pytest.raises(Exception) as e_info:
_ = portfolio.regroup(group_by=False)
def test_call_seq(self):
portfolio = from_orders_all(price=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1), (2, 1, 1, 200.0, 2.0, 0.0, 1),
(3, 3, 0, 200.0, 4.0, 0.0, 0), (4, 4, 0, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 1, 0, 200.0, 2.0, 0.0, 1),
(3, 3, 1, 200.0, 4.0, 0.0, 0), (4, 4, 1, 200.0, 5.0, 0.0, 1), (5, 0, 2, 100.0, 1.0, 0.0, 0),
(6, 1, 2, 200.0, 2.0, 0.0, 1), (7, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = from_orders_all(
price=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1), (2, 3, 1, 100.0, 4.0, 0.0, 0),
(3, 0, 2, 100.0, 1.0, 0.0, 0), (4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 3, 2, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
price=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
portfolio = from_orders_all(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 200.0, 1.0, 0.0, 1), (2, 1, 1, 200.0, 1.0, 0.0, 0),
(3, 2, 1, 400.0, 1.0, 0.0, 1), (4, 2, 0, 400.0, 1.0, 0.0, 0), (5, 3, 0, 800.0, 1.0, 0.0, 1),
(6, 3, 2, 800.0, 1.0, 0.0, 0), (7, 4, 2, 1400.0, 1.0, 0.0, 1), (8, 4, 1, 1400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_longonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 0), (1, 1, 2, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 2, 1, 100.0, 1.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 3, 0, 100.0, 1.0, 0.0, 1),
(6, 3, 2, 100.0, 1.0, 0.0, 0), (7, 4, 2, 100.0, 1.0, 0.0, 1), (8, 4, 1, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
portfolio = from_orders_shortonly(**kwargs)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 2, 100.0, 1.0, 0.0, 1), (1, 1, 1, 200.0, 1.0, 0.0, 1), (2, 1, 2, 100.0, 1.0, 0.0, 0),
(3, 2, 0, 300.0, 1.0, 0.0, 1), (4, 2, 1, 200.0, 1.0, 0.0, 0), (5, 3, 2, 400.0, 1.0, 0.0, 1),
(6, 3, 0, 300.0, 1.0, 0.0, 0), (7, 4, 1, 500.0, 1.0, 0.0, 1), (8, 4, 2, 400.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_target_shares(self):
record_arrays_close(
from_orders_all(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetshares').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=75., size_type='targetshares',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_all(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 2, 1, 8.333333333333332, 3.0, 0.0, 0),
(8, 3, 1, 4.166666666666668, 4.0, 0.0, 0), (9, 4, 1, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 2.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 1),
(4, 4, 0, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 25.0, 2.0, 0.0, 0),
(2, 2, 0, 8.333333333333332, 3.0, 0.0, 0), (3, 3, 0, 4.166666666666668, 4.0, 0.0, 0),
(4, 4, 0, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0),
(2, 1, 0, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 1, 2, 25.0, 2.0, 0.0, 0), (5, 2, 0, 8.333333333333332, 3.0, 0.0, 1),
(6, 2, 1, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 3, 0, 4.166666666666668, 4.0, 0.0, 1), (9, 3, 1, 4.166666666666668, 4.0, 0.0, 1),
(10, 3, 2, 4.166666666666668, 4.0, 0.0, 1), (11, 4, 0, 2.5, 5.0, 0.0, 1),
(12, 4, 1, 2.5, 5.0, 0.0, 1), (13, 4, 2, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1), (5, 0, 1, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 2, 1, 6.25, 3.0, 0.0, 0), (8, 3, 1, 2.34375, 4.0, 0.0, 0),
(9, 4, 1, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 12.5, 2.0, 0.0, 1), (2, 2, 0, 6.25, 3.0, 0.0, 1),
(3, 3, 0, 3.90625, 4.0, 0.0, 1), (4, 4, 0, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 1, 0, 37.5, 2.0, 0.0, 0), (2, 2, 0, 6.25, 3.0, 0.0, 0),
(3, 3, 0, 2.34375, 4.0, 0.0, 0), (4, 4, 0, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_all(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 12.5, 2., 0., 0),
(2, 2, 0, 4.16666667, 3., 0., 0), (3, 3, 0, 1.5625, 4., 0., 0),
(4, 4, 0, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([], dtype=order_dt)
)
record_arrays_close(
from_orders_all(
price=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 0, 1, 2.50000000e+01, 1., 0., 0),
(2, 0, 2, 1.25000000e+01, 1., 0., 0), (3, 1, 0, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 1, 2, 7.81250000e-01, 2., 0., 0),
(6, 2, 0, 2.60416667e-01, 3., 0., 0), (7, 2, 1, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 3, 0, 2.44140625e-02, 4., 0., 0),
(10, 3, 1, 1.22070312e-02, 4., 0., 0), (11, 3, 2, 6.10351562e-03, 4., 0., 0),
(12, 4, 0, 2.44140625e-03, 5., 0., 0), (13, 4, 1, 1.22070312e-03, 5., 0., 0),
(14, 4, 2, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_all(
price=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').holding_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_all(price=price_wide)
_ = from_orders_all(price=price_wide, max_orders=9)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_all(price=price_wide, log=True)
_ = from_orders_all(price=price_wide, log=True, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = from_orders_all(price=price_wide, log=True, max_logs=14)
# ############# from_order_func ############# #
@njit
def order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col])
@njit
def log_order_func_nb(oc, size):
return nb.create_order_nb(size=size if oc.i % 2 == 0 else -size, price=oc.close[oc.i, oc.col], log=True)
class TestFromOrderFunc:
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_one_column(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price.tolist(), order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(price, order_func_nb, np.inf, row_wise=test_row_wise)
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_multiple_columns(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(4, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (5, 0, 1, 100.0, 1.0, 0.0, 0),
(6, 1, 1, 200.0, 2.0, 0.0, 1), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.freq == day_dt
assert portfolio.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_shape(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5,), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert portfolio.wrapper.ndim == 1
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 1), row_wise=test_row_wise,
keys=pd.Index(['first'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Int64Index([0, 1, 2], dtype='int64', name='iteration_idx')
)
assert portfolio.wrapper.ndim == 2
portfolio = vbt.Portfolio.from_order_func(
price, order_func_nb, np.inf,
target_shape=(5, 3), row_wise=test_row_wise,
keys=pd.Index(['first', 'second', 'third'], name='custom'))
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
pd.Index(['first', 'second', 'third'], dtype='object', name='custom')
)
assert portfolio.wrapper.ndim == 2
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_group_by(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 0, 2, 100.0, 1.0, 0.0, 0), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 2, 200.0, 2.0, 0.0, 1),
(6, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (7, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(10, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (11, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(12, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (13, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 2, 0, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 1, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(8, 4, 0, 53.33333333333335, 5.0, 0.0, 0), (9, 4, 1, 53.33333333333335, 5.0, 0.0, 0),
(10, 0, 2, 100.0, 1.0, 0.0, 0), (11, 1, 2, 200.0, 2.0, 0.0, 1),
(12, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (13, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(14, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_cash_sharing(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf,
group_by=np.array([0, 0, 1]), cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
portfolio.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert portfolio.cash_sharing
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_call_seq(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 1, 1, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 0, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 200.0, 2.0, 0.0, 1),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 2, 0, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 0, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 0, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed', row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 0, 200.0, 2.0, 0.0, 1),
(4, 1, 2, 200.0, 2.0, 0.0, 1), (5, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(6, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (7, 3, 1, 333.33333333333337, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 1, 0, 200.0, 2.0, 0.0, 1), (3, 2, 1, 266.6666666666667, 3.0, 0.0, 0),
(4, 3, 1, 333.33333333333337, 4.0, 0.0, 1), (5, 4, 1, 266.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 2, 100.0, 1.0, 0.0, 0),
(2, 1, 1, 200.0, 2.0, 0.0, 1), (3, 1, 2, 200.0, 2.0, 0.0, 1),
(4, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (5, 2, 2, 133.33333333333334, 3.0, 0.0, 0),
(6, 3, 1, 66.66666666666669, 4.0, 0.0, 1), (7, 3, 0, 66.66666666666669, 4.0, 0.0, 1),
(8, 3, 2, 66.66666666666669, 4.0, 0.0, 1), (9, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 1, 1, 200.0, 2.0, 0.0, 1),
(2, 2, 1, 133.33333333333334, 3.0, 0.0, 0), (3, 3, 1, 66.66666666666669, 4.0, 0.0, 1),
(4, 3, 0, 66.66666666666669, 4.0, 0.0, 1), (5, 4, 1, 106.6666666666667, 5.0, 0.0, 0),
(6, 0, 2, 100.0, 1.0, 0.0, 0), (7, 1, 2, 200.0, 2.0, 0.0, 1),
(8, 2, 2, 133.33333333333334, 3.0, 0.0, 0), (9, 3, 2, 66.66666666666669, 4.0, 0.0, 1),
(10, 4, 2, 53.33333333333335, 5.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='auto', row_wise=test_row_wise
)
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
@njit
def segment_prep_func_nb(sc, target_hold_value):
order_size = np.copy(target_hold_value[sc.i, sc.from_col:sc.to_col])
order_size_type = np.full(sc.group_len, SizeType.TargetValue)
direction = np.full(sc.group_len, Direction.All)
order_value_out = np.empty(sc.group_len, dtype=np.float_)
sc.last_val_price[sc.from_col:sc.to_col] = sc.close[sc.i, sc.from_col:sc.to_col]
nb.sort_call_seq_nb(sc, order_size, order_size_type, direction, order_value_out)
return order_size, order_size_type, direction
@njit
def pct_order_func_nb(oc, order_size, order_size_type, direction):
col_i = oc.call_seq_now[oc.call_idx]
return nb.create_order_nb(
size=order_size[col_i],
size_type=order_size_type[col_i],
price=oc.close[oc.i, col_i],
direction=direction[col_i]
)
portfolio = vbt.Portfolio.from_order_func(
price_wide * 0 + 1, pct_order_func_nb, group_by=np.array([0, 0, 0]),
cash_sharing=True, segment_prep_func_nb=segment_prep_func_nb,
segment_prep_args=(target_hold_value.values,), row_wise=test_row_wise)
np.testing.assert_array_equal(
portfolio.call_seq.values,
np.array([
[0, 1, 2],
[2, 1, 0],
[0, 2, 1],
[1, 0, 2],
[2, 1, 0]
])
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=False),
target_hold_value
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_value(self, test_row_wise):
@njit
def target_val_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_val_order_func_nb(oc):
return nb.create_order_nb(size=50., size_type=SizeType.TargetValue, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_val_order_func_nb,
segment_prep_func_nb=target_val_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 2, 0, 8.333333333333332, 4.0, 0.0, 1), (3, 3, 0, 4.166666666666668, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_target_percent(self, test_row_wise):
@njit
def target_pct_segment_prep_func_nb(sc, val_price):
sc.last_val_price[sc.from_col:sc.to_col] = val_price[sc.i]
return ()
@njit
def target_pct_order_func_nb(oc):
return nb.create_order_nb(size=0.5, size_type=SizeType.TargetPercent, price=oc.close[oc.i, oc.col])
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb, row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 1, 0, 25.0, 3.0, 0.0, 0), (1, 2, 0, 8.333333333333332, 4.0, 0.0, 1),
(2, 3, 0, 1.0416666666666679, 5.0, 0.0, 1)
], dtype=order_dt)
)
portfolio = vbt.Portfolio.from_order_func(
price.iloc[1:], target_pct_order_func_nb,
segment_prep_func_nb=target_pct_segment_prep_func_nb,
segment_prep_args=(price.iloc[:-1].values,), row_wise=test_row_wise)
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0), (1, 1, 0, 25.0, 3.0, 0.0, 1),
(2, 3, 0, 3.125, 5.0, 0.0, 1)
], dtype=order_dt)
)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_init_cash(self, test_row_wise):
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=[1., 10., np.inf])
if test_row_wise:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 10.0, 1.0, 0.0, 0),
(2, 0, 2, 10.0, 1.0, 0.0, 0), (3, 1, 0, 10.0, 2.0, 0.0, 1),
(4, 1, 1, 10.0, 2.0, 0.0, 1), (5, 1, 2, 10.0, 2.0, 0.0, 1),
(6, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 2, 2, 10.0, 3.0, 0.0, 0), (9, 3, 0, 10.0, 4.0, 0.0, 1),
(10, 3, 1, 10.0, 4.0, 0.0, 1), (11, 3, 2, 10.0, 4.0, 0.0, 1),
(12, 4, 0, 8.0, 5.0, 0.0, 0), (13, 4, 1, 8.0, 5.0, 0.0, 0),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
else:
record_arrays_close(
portfolio.order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 1, 0, 10.0, 2.0, 0.0, 1),
(2, 2, 0, 6.666666666666667, 3.0, 0.0, 0), (3, 3, 0, 10.0, 4.0, 0.0, 1),
(4, 4, 0, 8.0, 5.0, 0.0, 0), (5, 0, 1, 10.0, 1.0, 0.0, 0),
(6, 1, 1, 10.0, 2.0, 0.0, 1), (7, 2, 1, 6.666666666666667, 3.0, 0.0, 0),
(8, 3, 1, 10.0, 4.0, 0.0, 1), (9, 4, 1, 8.0, 5.0, 0.0, 0),
(10, 0, 2, 10.0, 1.0, 0.0, 0), (11, 1, 2, 10.0, 2.0, 0.0, 1),
(12, 2, 2, 10.0, 3.0, 0.0, 0), (13, 3, 2, 10.0, 4.0, 0.0, 1),
(14, 4, 2, 10.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
assert type(portfolio._init_cash) == np.ndarray
base_portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=np.inf)
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.Auto)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.Auto
portfolio = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, 10., row_wise=test_row_wise, init_cash=InitCashMode.AutoAlign)
record_arrays_close(
portfolio.order_records,
base_portfolio.orders.values
)
assert portfolio._init_cash == InitCashMode.AutoAlign
def test_func_calls(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def group_prep_func_nb(gc, call_i, group_lst):
call_i[0] += 1
group_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,)
)
assert call_i[0] == 28
assert list(sim_lst) == [1]
assert list(group_lst) == [2, 18]
assert list(segment_lst) == [3, 6, 9, 12, 15, 19, 21, 23, 25, 27]
assert list(order_lst) == [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 20, 22, 24, 26, 28]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
group_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, True],
[False, False],
[False, True],
[False, False],
[False, True],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
group_prep_func_nb=group_prep_func_nb, group_prep_args=(group_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask
)
assert call_i[0] == 8
assert list(sim_lst) == [1]
assert list(group_lst) == [2]
assert list(segment_lst) == [3, 5, 7]
assert list(order_lst) == [4, 6, 8]
def test_func_calls_row_wise(self):
@njit
def prep_func_nb(simc, call_i, sim_lst):
call_i[0] += 1
sim_lst.append(call_i[0])
return (call_i,)
@njit
def row_prep_func_nb(gc, call_i, row_lst):
call_i[0] += 1
row_lst.append(call_i[0])
return (call_i,)
@njit
def segment_prep_func_nb(sc, call_i, segment_lst):
call_i[0] += 1
segment_lst.append(call_i[0])
return (call_i,)
@njit
def order_func_nb(oc, call_i, order_lst):
call_i[0] += 1
order_lst.append(call_i[0])
return NoOrder
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
row_wise=True
)
assert call_i[0] == 31
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 8, 14, 20, 26]
assert list(segment_lst) == [3, 6, 9, 12, 15, 18, 21, 24, 27, 30]
assert list(order_lst) == [4, 5, 7, 10, 11, 13, 16, 17, 19, 22, 23, 25, 28, 29, 31]
call_i = np.array([0])
sim_lst = List.empty_list(typeof(0))
row_lst = List.empty_list(typeof(0))
segment_lst = List.empty_list(typeof(0))
order_lst = List.empty_list(typeof(0))
active_mask = np.array([
[False, False],
[False, True],
[True, False],
[True, True],
[False, False],
])
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, order_lst,
group_by=np.array([0, 0, 1]),
prep_func_nb=prep_func_nb, prep_args=(call_i, sim_lst),
row_prep_func_nb=row_prep_func_nb, row_prep_args=(row_lst,),
segment_prep_func_nb=segment_prep_func_nb, segment_prep_args=(segment_lst,),
active_mask=active_mask,
row_wise=True
)
assert call_i[0] == 14
assert list(sim_lst) == [1]
assert list(row_lst) == [2, 5, 9]
assert list(segment_lst) == [3, 6, 10, 13]
assert list(order_lst) == [4, 7, 8, 11, 12, 14]
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_orders(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, order_func_nb, np.inf, row_wise=test_row_wise, max_orders=14)
@pytest.mark.parametrize(
"test_row_wise",
[False, True],
)
def test_max_logs(self, test_row_wise):
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise)
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=15)
with pytest.raises(Exception) as e_info:
_ = vbt.Portfolio.from_order_func(
price_wide, log_order_func_nb, np.inf, row_wise=test_row_wise, max_logs=14)
# ############# Portfolio ############# #
price_na = pd.DataFrame({
'a': [np.nan, 2., 3., 4., 5.],
'b': [1., 2., np.nan, 4., 5.],
'c': [1., 2., 3., 4., np.nan]
}, index=price.index)
order_size_new = pd.Series([1., 0.1, -1., -0.1, 1.])
directions = ['longonly', 'shortonly', 'all']
group_by = pd.Index(['first', 'first', 'second'], name='group')
portfolio = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=None,
init_cash=[100., 100., 100.], freq='1D'
) # independent
portfolio_grouped = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=False,
init_cash=[100., 100., 100.], freq='1D'
) # grouped
portfolio_shared = vbt.Portfolio.from_orders(
price_na, order_size_new, size_type='shares', direction=directions,
fees=0.01, fixed_fees=0.1, slippage=0.01, log=True,
call_seq='reversed', group_by=group_by, cash_sharing=True,
init_cash=[200., 100.], freq='1D'
) # shared
class TestPortfolio:
def test_config(self, tmp_path):
assert vbt.Portfolio.loads(portfolio['a'].dumps()) == portfolio['a']
assert vbt.Portfolio.loads(portfolio.dumps()) == portfolio
portfolio.save(tmp_path / 'portfolio')
assert vbt.Portfolio.load(tmp_path / 'portfolio') == portfolio
def test_wrapper(self):
pd.testing.assert_index_equal(
portfolio.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio.wrapper.columns,
price_na.columns
)
assert portfolio.wrapper.ndim == 2
assert portfolio.wrapper.grouper.group_by is None
assert portfolio.wrapper.grouper.allow_enable
assert portfolio.wrapper.grouper.allow_disable
assert portfolio.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.columns,
price_na.columns
)
assert portfolio_grouped.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_grouped.wrapper.grouper.group_by,
group_by
)
assert portfolio_grouped.wrapper.grouper.allow_enable
assert portfolio_grouped.wrapper.grouper.allow_disable
assert portfolio_grouped.wrapper.grouper.allow_modify
pd.testing.assert_index_equal(
portfolio_shared.wrapper.index,
price_na.index
)
pd.testing.assert_index_equal(
portfolio_shared.wrapper.columns,
price_na.columns
)
assert portfolio_shared.wrapper.ndim == 2
pd.testing.assert_index_equal(
portfolio_shared.wrapper.grouper.group_by,
group_by
)
assert not portfolio_shared.wrapper.grouper.allow_enable
assert portfolio_shared.wrapper.grouper.allow_disable
assert not portfolio_shared.wrapper.grouper.allow_modify
def test_indexing(self):
assert portfolio['a'].wrapper == portfolio.wrapper['a']
assert portfolio['a'].orders == portfolio.orders['a']
assert portfolio['a'].logs == portfolio.logs['a']
assert portfolio['a'].init_cash == portfolio.init_cash['a']
pd.testing.assert_series_equal(portfolio['a'].call_seq, portfolio.call_seq['a'])
assert portfolio['c'].wrapper == portfolio.wrapper['c']
assert portfolio['c'].orders == portfolio.orders['c']
assert portfolio['c'].logs == portfolio.logs['c']
assert portfolio['c'].init_cash == portfolio.init_cash['c']
pd.testing.assert_series_equal(portfolio['c'].call_seq, portfolio.call_seq['c'])
assert portfolio[['c']].wrapper == portfolio.wrapper[['c']]
assert portfolio[['c']].orders == portfolio.orders[['c']]
assert portfolio[['c']].logs == portfolio.logs[['c']]
pd.testing.assert_series_equal(portfolio[['c']].init_cash, portfolio.init_cash[['c']])
pd.testing.assert_frame_equal(portfolio[['c']].call_seq, portfolio.call_seq[['c']])
assert portfolio_grouped['first'].wrapper == portfolio_grouped.wrapper['first']
assert portfolio_grouped['first'].orders == portfolio_grouped.orders['first']
assert portfolio_grouped['first'].logs == portfolio_grouped.logs['first']
assert portfolio_grouped['first'].init_cash == portfolio_grouped.init_cash['first']
pd.testing.assert_frame_equal(portfolio_grouped['first'].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped[['first']].wrapper == portfolio_grouped.wrapper[['first']]
assert portfolio_grouped[['first']].orders == portfolio_grouped.orders[['first']]
assert portfolio_grouped[['first']].logs == portfolio_grouped.logs[['first']]
pd.testing.assert_series_equal(
portfolio_grouped[['first']].init_cash,
portfolio_grouped.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_grouped[['first']].call_seq, portfolio_grouped.call_seq[['a', 'b']])
assert portfolio_grouped['second'].wrapper == portfolio_grouped.wrapper['second']
assert portfolio_grouped['second'].orders == portfolio_grouped.orders['second']
assert portfolio_grouped['second'].logs == portfolio_grouped.logs['second']
assert portfolio_grouped['second'].init_cash == portfolio_grouped.init_cash['second']
pd.testing.assert_series_equal(portfolio_grouped['second'].call_seq, portfolio_grouped.call_seq['c'])
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].wrapper == portfolio_grouped.wrapper[['second']]
assert portfolio_grouped[['second']].orders == portfolio_grouped.orders[['second']]
assert portfolio_grouped[['second']].logs == portfolio_grouped.logs[['second']]
pd.testing.assert_series_equal(
portfolio_grouped[['second']].init_cash,
portfolio_grouped.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_grouped[['second']].call_seq, portfolio_grouped.call_seq[['c']])
assert portfolio_shared['first'].wrapper == portfolio_shared.wrapper['first']
assert portfolio_shared['first'].orders == portfolio_shared.orders['first']
assert portfolio_shared['first'].logs == portfolio_shared.logs['first']
assert portfolio_shared['first'].init_cash == portfolio_shared.init_cash['first']
pd.testing.assert_frame_equal(portfolio_shared['first'].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].wrapper == portfolio_shared.wrapper[['first']]
assert portfolio_shared[['first']].orders == portfolio_shared.orders[['first']]
assert portfolio_shared[['first']].logs == portfolio_shared.logs[['first']]
pd.testing.assert_series_equal(
portfolio_shared[['first']].init_cash,
portfolio_shared.init_cash[['first']])
pd.testing.assert_frame_equal(portfolio_shared[['first']].call_seq, portfolio_shared.call_seq[['a', 'b']])
assert portfolio_shared['second'].wrapper == portfolio_shared.wrapper['second']
assert portfolio_shared['second'].orders == portfolio_shared.orders['second']
assert portfolio_shared['second'].logs == portfolio_shared.logs['second']
assert portfolio_shared['second'].init_cash == portfolio_shared.init_cash['second']
pd.testing.assert_series_equal(portfolio_shared['second'].call_seq, portfolio_shared.call_seq['c'])
assert portfolio_shared[['second']].wrapper == portfolio_shared.wrapper[['second']]
assert portfolio_shared[['second']].orders == portfolio_shared.orders[['second']]
assert portfolio_shared[['second']].logs == portfolio_shared.logs[['second']]
pd.testing.assert_series_equal(
portfolio_shared[['second']].init_cash,
portfolio_shared.init_cash[['second']])
pd.testing.assert_frame_equal(portfolio_shared[['second']].call_seq, portfolio_shared.call_seq[['c']])
def test_regroup(self):
assert portfolio.regroup(None) == portfolio
assert portfolio.regroup(False) == portfolio
assert portfolio.regroup(group_by) != portfolio
pd.testing.assert_index_equal(portfolio.regroup(group_by).wrapper.grouper.group_by, group_by)
assert portfolio_grouped.regroup(None) == portfolio_grouped
assert portfolio_grouped.regroup(False) != portfolio_grouped
assert portfolio_grouped.regroup(False).wrapper.grouper.group_by is None
assert portfolio_grouped.regroup(group_by) == portfolio_grouped
assert portfolio_shared.regroup(None) == portfolio_shared
with pytest.raises(Exception) as e_info:
_ = portfolio_shared.regroup(False)
assert portfolio_shared.regroup(group_by) == portfolio_shared
def test_cash_sharing(self):
assert not portfolio.cash_sharing
assert not portfolio_grouped.cash_sharing
assert portfolio_shared.cash_sharing
def test_call_seq(self):
pd.testing.assert_frame_equal(
portfolio.call_seq,
pd.DataFrame(
np.array([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_grouped.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.call_seq,
pd.DataFrame(
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
]),
index=price_na.index,
columns=price_na.columns
)
)
def test_incl_unrealized(self):
assert not vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=False).incl_unrealized
assert vbt.Portfolio.from_orders(price_na, 1000., incl_unrealized=True).incl_unrealized
def test_orders(self):
record_arrays_close(
portfolio.orders.values,
np.array([
(0, 1, 0, 0.1, 2.02, 0.10202, 0), (1, 2, 0, 0.1, 2.9699999999999998, 0.10297, 1),
(2, 4, 0, 1.0, 5.05, 0.1505, 0), (3, 0, 1, 1.0, 0.99, 0.10990000000000001, 1),
(4, 1, 1, 0.1, 1.98, 0.10198, 1), (5, 3, 1, 0.1, 4.04, 0.10404000000000001, 0),
(6, 4, 1, 1.0, 4.95, 0.14950000000000002, 1), (7, 0, 2, 1.0, 1.01, 0.1101, 0),
(8, 1, 2, 0.1, 2.02, 0.10202, 0), (9, 2, 2, 1.0, 2.9699999999999998, 0.1297, 1),
(10, 3, 2, 0.1, 3.96, 0.10396000000000001, 1)
], dtype=order_dt)
)
result = pd.Series(
np.array([3, 4, 4]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_orders(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_orders(group_by=False).count(),
result
)
result = pd.Series(
np.array([7, 4]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_orders(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.orders.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.orders.count(),
result
)
def test_logs(self):
record_arrays_close(
portfolio.logs.values,
np.array([
(0, 0, 0, 0, 100.0, 0.0, np.nan, 100.0, 1.0, 0, 0, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.0, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(1, 1, 0, 0, 100.0, 0.0, 2.0, 100.0, 0.1, 0, 0, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.69598, 0.1, 0.1, 2.02, 0.10202, 0, 0, -1, 0),
(2, 2, 0, 0, 99.69598, 0.1, 3.0, 99.99598, -1.0, 0, 0, 3.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, 0.1, 2.9699999999999998, 0.10297, 1, 0, -1, 1),
(3, 3, 0, 0, 99.89001, 0.0, 4.0, 99.89001, -0.1, 0, 0, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 99.89001, 0.0, np.nan, np.nan, np.nan, -1, 2, 8, -1),
(4, 4, 0, 0, 99.89001, 0.0, 5.0, 99.89001, 1.0, 0, 0, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 94.68951, 1.0, 1.0, 5.05, 0.1505, 0, 0, -1, 2),
(5, 0, 1, 1, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 1, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.8801, -1.0, 1.0, 0.99, 0.10990000000000001, 1, 0, -1, 3),
(6, 1, 1, 1, 100.8801, -1.0, 2.0, 98.8801, 0.1, 0, 1, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, 0.1, 1.98, 0.10198, 1, 0, -1, 4),
(7, 2, 1, 1, 100.97612, -1.1, np.nan, np.nan, -1.0, 0, 1, np.nan, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.97612, -1.1, np.nan, np.nan, np.nan, -1, 1, 1, -1),
(8, 3, 1, 1, 100.97612, -1.1, 4.0, 96.57611999999999, -0.1, 0, 1, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 100.46808, -1.0, 0.1, 4.04, 0.10404000000000001, 0, 0, -1, 5),
(9, 4, 1, 1, 100.46808, -1.0, 5.0, 95.46808, 1.0, 0, 1, 5.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 105.26858, -2.0, 1.0, 4.95, 0.14950000000000002, 1, 0, -1, 6),
(10, 0, 2, 2, 100.0, 0.0, 1.0, 100.0, 1.0, 0, 2, 1.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.8799, 1.0, 1.0, 1.01, 0.1101, 0, 0, -1, 7),
(11, 1, 2, 2, 98.8799, 1.0, 2.0, 100.8799, 0.1, 0, 2, 2.0, 0.01, 0.1, 0.01, 1e-08, np.inf,
0.0, True, False, True, 98.57588000000001, 1.1, 0.1, 2.02, 0.10202, 0, 0, -1, 8),
(12, 2, 2, 2, 98.57588000000001, 1.1, 3.0, 101.87588000000001, -1.0, 0, 2, 3.0,
0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True, 101.41618000000001,
0.10000000000000009, 1.0, 2.9699999999999998, 0.1297, 1, 0, -1, 9),
(13, 3, 2, 2, 101.41618000000001, 0.10000000000000009, 4.0, 101.81618000000002,
-0.1, 0, 2, 4.0, 0.01, 0.1, 0.01, 1e-08, np.inf, 0.0, True, False, True,
101.70822000000001, 0.0, 0.1, 3.96, 0.10396000000000001, 1, 0, -1, 10),
(14, 4, 2, 2, 101.70822000000001, 0.0, np.nan, 101.70822000000001, 1.0, 0, 2, np.nan, 0.01, 0.1, 0.01,
1e-08, np.inf, 0.0, True, False, True, 101.70822000000001, 0.0, np.nan, np.nan, np.nan, -1, 1, 1, -1)
], dtype=log_dt)
)
result = pd.Series(
np.array([5, 5, 5]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_logs(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_logs(group_by=False).count(),
result
)
result = pd.Series(
np.array([10, 5]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_logs(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.logs.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.logs.count(),
result
)
def test_trades(self):
record_arrays_close(
portfolio.trades.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998, 0.10297,
-0.10999000000000003, -0.5445049504950497, 0, 1, 0),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0, 1),
(2, 1, 0.1, 0, 1.0799999999999998, 0.019261818181818182,
3, 4.04, 0.10404000000000001, -0.4193018181818182, -3.882424242424243, 1, 1, 2),
(3, 1, 2.0, 0, 3.015, 0.3421181818181819, 4, 5.0, 0.0,
-4.312118181818182, -0.7151108095884214, 1, 0, 2),
(4, 2, 1.0, 0, 1.1018181818181818, 0.19283636363636364, 2,
2.9699999999999998, 0.1297, 1.5456454545454543, 1.4028135313531351, 0, 1, 3),
(5, 2, 0.10000000000000009, 0, 1.1018181818181818, 0.019283636363636378,
3, 3.96, 0.10396000000000001, 0.1625745454545457, 1.4755115511551162, 0, 1, 3)
], dtype=trade_dt)
)
result = pd.Series(
np.array([2, 2, 2]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_trades(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_trades(group_by=False).count(),
result
)
result = pd.Series(
np.array([4, 2]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_trades(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.trades.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.trades.count(),
result
)
def test_positions(self):
record_arrays_close(
portfolio.positions.values,
np.array([
(0, 0, 0.1, 1, 2.02, 0.10202, 2, 2.9699999999999998,
0.10297, -0.10999000000000003, -0.5445049504950497, 0, 1),
(1, 0, 1.0, 4, 5.05, 0.1505, 4, 5.0, 0.0,
-0.20049999999999982, -0.03970297029702967, 0, 0),
(2, 1, 2.1, 0, 2.9228571428571426, 0.36138000000000003, 4, 4.954285714285714,
0.10404000000000001, -4.731420000000001, -0.7708406647116326, 1, 0),
(3, 2, 1.1, 0, 1.1018181818181818, 0.21212000000000003, 3,
3.06, 0.23366000000000003, 1.7082200000000003, 1.4094224422442245, 0, 1)
], dtype=position_dt)
)
result = pd.Series(
np.array([2, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_positions(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_positions(group_by=False).count(),
result
)
result = pd.Series(
np.array([3, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_positions(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.positions.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.positions.count(),
result
)
def test_drawdowns(self):
record_arrays_close(
portfolio.drawdowns.values,
np.array([
(0, 0, 0, 4, 4, 0), (1, 1, 0, 4, 4, 0), (2, 2, 2, 3, 4, 0)
], dtype=drawdown_dt)
)
result = pd.Series(
np.array([1, 1, 1]),
index=price_na.columns
).rename('count')
pd.testing.assert_series_equal(
portfolio.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.get_drawdowns(group_by=False).count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.get_drawdowns(group_by=False).count(),
result
)
result = pd.Series(
np.array([1, 1]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('count')
pd.testing.assert_series_equal(
portfolio.get_drawdowns(group_by=group_by).count(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.drawdowns.count(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.drawdowns.count(),
result
)
def test_close(self):
pd.testing.assert_frame_equal(portfolio.close, price_na)
pd.testing.assert_frame_equal(portfolio_grouped.close, price_na)
pd.testing.assert_frame_equal(portfolio_shared.close, price_na)
def test_fill_close(self):
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=False),
price_na
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=False),
price_na.ffill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=False, bfill=True),
price_na.bfill()
)
pd.testing.assert_frame_equal(
portfolio.fill_close(ffill=True, bfill=True),
price_na.ffill().bfill()
)
def test_share_flow(self):
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 0.1],
[-0.1, 0., -1.],
[0., 0., -0.1],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.share_flow(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 0.1, 0.],
[0., 0., 0.],
[0., -0.1, 0.],
[0., 1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -0.1, 0.1],
[-0.1, 0., -1.],
[0., 0.1, -0.1],
[1., -1., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.share_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.share_flow(),
result
)
def test_shares(self):
pd.testing.assert_frame_equal(
portfolio.shares(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.1, 0., 1.1],
[0., 0., 0.1],
[0., 0., 0.],
[1., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.shares(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 1.1, 0.],
[0., 1.1, 0.],
[0., 1., 0.],
[0., 2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.1, -1.1, 1.1],
[0., -1.1, 0.1],
[0., -1., 0.],
[1., -2., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.shares(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.shares(),
result
)
def test_pos_mask(self):
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='longonly'),
pd.DataFrame(
np.array([
[False, False, True],
[True, False, True],
[False, False, True],
[False, False, False],
[True, False, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(direction='shortonly'),
pd.DataFrame(
np.array([
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False],
[False, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[False, True, True],
[True, True, True],
[False, True, True],
[False, True, False],
[True, True, False]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[True, True],
[True, True],
[True, True],
[True, False],
[True, False]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.pos_mask(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.pos_mask(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.pos_mask(),
result
)
def test_pos_coverage(self):
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='longonly'),
pd.Series(np.array([0.4, 0., 0.6]), index=price_na.columns).rename('pos_coverage')
)
pd.testing.assert_series_equal(
portfolio.pos_coverage(direction='shortonly'),
pd.Series(np.array([0., 1., 0.]), index=price_na.columns).rename('pos_coverage')
)
result = pd.Series(np.array([0.4, 1., 0.6]), index=price_na.columns).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(group_by=False),
result
)
result = pd.Series(
np.array([0.7, 0.6]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('pos_coverage')
pd.testing.assert_series_equal(
portfolio.pos_coverage(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.pos_coverage(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.pos_coverage(),
result
)
def test_cash_flow(self):
pd.testing.assert_frame_equal(
portfolio.cash_flow(short_cash=False),
pd.DataFrame(
np.array([
[0., -1.0999, -1.1201],
[-0.30402, -0.29998, -0.30402],
[0.19403, 0., 2.8403],
[0., 0.29996, 0.29204],
[-5.2005, -5.0995, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., 0.8801, -1.1201],
[-0.30402, 0.09602, -0.30402],
[0.19403, 0., 2.8403],
[0., -0.50804, 0.29204],
[-5.2005, 4.8005, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0.8801, -1.1201],
[-0.208, -0.30402],
[0.19403, 2.8403],
[-0.50804, 0.29204],
[-0.4, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash_flow(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash_flow(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash_flow(),
result
)
def test_init_cash(self):
pd.testing.assert_series_equal(
portfolio.init_cash,
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_grouped.get_init_cash(group_by=False),
pd.Series(np.array([100., 100., 100.]), index=price_na.columns).rename('init_cash')
)
pd.testing.assert_series_equal(
portfolio_shared.get_init_cash(group_by=False),
pd.Series(np.array([200., 200., 100.]), index=price_na.columns).rename('init_cash')
)
result = pd.Series(
np.array([200., 100.]),
pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
pd.testing.assert_series_equal(
portfolio.get_init_cash(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.init_cash,
result
)
pd.testing.assert_series_equal(
portfolio_shared.init_cash,
result
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=None).init_cash,
pd.Series(
np.array([14000., 12000., 10000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.Auto, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 10000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=None).init_cash,
pd.Series(
np.array([14000., 14000., 14000.]),
index=price_na.columns
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
pd.testing.assert_series_equal(
vbt.Portfolio.from_orders(
price_na, 1000., init_cash=InitCashMode.AutoAlign, group_by=group_by, cash_sharing=True).init_cash,
pd.Series(
np.array([26000.0, 26000.0]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('init_cash')
)
def test_cash(self):
pd.testing.assert_frame_equal(
portfolio.cash(short_cash=False),
pd.DataFrame(
np.array([
[100., 98.9001, 98.8799],
[99.69598, 98.60012, 98.57588],
[99.89001, 98.60012, 101.41618],
[99.89001, 98.90008, 101.70822],
[94.68951, 93.80058, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[100., 100.8801, 98.8799],
[99.69598, 100.97612, 98.57588],
[99.89001, 100.97612, 101.41618],
[99.89001, 100.46808, 101.70822],
[94.68951, 105.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False),
pd.DataFrame(
np.array([
[200., 200.8801, 98.8799],
[199.69598, 200.97612, 98.57588],
[199.89001, 200.97612, 101.41618],
[199.89001, 200.46808, 101.70822],
[194.68951, 205.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[200.8801, 200.8801, 98.8799],
[200.6721, 200.97612, 98.57588000000001],
[200.86613, 200.6721, 101.41618000000001],
[200.35809, 200.35809, 101.70822000000001],
[199.95809, 205.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200.8801, 98.8799],
[200.6721, 98.57588],
[200.86613, 101.41618],
[200.35809, 101.70822],
[199.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.cash(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.cash(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.cash(),
result
)
def test_holding_value(self):
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 1.],
[0.2, 0., 2.2],
[0., 0., 0.3],
[0., 0., 0.],
[5., 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.holding_value(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 1., 0.],
[0., 2.2, 0.],
[0., np.nan, 0.],
[0., 4., 0.],
[0., 10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -1., 1.],
[0.2, -2.2, 2.2],
[0., np.nan, 0.3],
[0., -4., 0.],
[5., -10., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-1., 1.],
[-2., 2.2],
[np.nan, 0.3],
[-4., 0.],
[-5., 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.holding_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.holding_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.holding_value(),
result
)
def test_gross_exposure(self):
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='longonly'),
pd.DataFrame(
np.array([
[0., 0., 0.01001202],
[0.00200208, 0., 0.02183062],
[0., 0., 0.00294938],
[0., 0., 0.],
[0.05015573, 0., 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(direction='shortonly'),
pd.DataFrame(
np.array([
[0., 0.01001, 0.],
[0., 0.02182537, 0.],
[0., np.nan, 0.],
[0., 0.03887266, 0.],
[0., 0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[0., -0.01021449, 0.01001202],
[0.00200208, -0.02282155, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.0421496, 0.],
[0.05015573, -0.11933092, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.00505305, 0.01001202],
[0.00100052, -0.01120162, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.02052334, 0.],
[0.02503887, -0.05440679, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.005003, 0.01001202],
[-0.01006684, 0.02183062],
[np.nan, 0.00294938],
[-0.02037095, 0.],
[-0.02564654, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.gross_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.gross_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.gross_exposure(),
result
)
def test_net_exposure(self):
result = pd.DataFrame(
np.array([
[0., -0.01001, 0.01001202],
[0.00200208, -0.02182537, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.03887266, 0.],
[0.05015573, -0.09633858, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0050025, 0.01001202],
[0.00100052, -0.01095617, 0.02183062],
[0., np.nan, 0.00294938],
[0., -0.01971414, 0.],
[0.02503887, -0.04906757, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-0.00495344, 0.01001202],
[-0.00984861, 0.02183062],
[np.nan, 0.00294938],
[-0.01957348, 0.],
[-0.02323332, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.net_exposure(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.net_exposure(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.net_exposure(),
result
)
def test_value(self):
result = pd.DataFrame(
np.array([
[100., 99.8801, 99.8799],
[99.89598, 98.77612, 100.77588],
[99.89001, np.nan, 101.71618],
[99.89001, 96.46808, 101.70822],
[99.68951, 95.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False),
pd.DataFrame(
np.array([
[200., 199.8801, 99.8799],
[199.89598, 198.77612, 100.77588],
[199.89001, np.nan, 101.71618],
[199.89001, 196.46808, 101.70822],
[199.68951, 195.26858, 101.70822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.value(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[199.8801, 199.8801, 99.8799],
[198.6721, 198.77612000000002, 100.77588000000002],
[np.nan, np.nan, 101.71618000000001],
[196.35809, 196.35809, 101.70822000000001],
[194.95809, 195.15859, 101.70822000000001]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[199.8801, 99.8799],
[198.6721, 100.77588],
[np.nan, 101.71618],
[196.35809, 101.70822],
[194.95809, 101.70822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.value(),
result
)
def test_total_profit(self):
result = pd.Series(
np.array([-0.31049, -4.73142, 1.70822]),
index=price_na.columns
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(group_by=False),
result
)
result = pd.Series(
np.array([-5.04191, 1.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_profit')
pd.testing.assert_series_equal(
portfolio.total_profit(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_profit(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_profit(),
result
)
def test_final_value(self):
result = pd.Series(
np.array([99.68951, 95.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(group_by=False),
pd.Series(
np.array([199.68951, 195.26858, 101.70822]),
index=price_na.columns
).rename('final_value')
)
result = pd.Series(
np.array([194.95809, 101.70822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('final_value')
pd.testing.assert_series_equal(
portfolio.final_value(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.final_value(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.final_value(),
result
)
def test_total_return(self):
result = pd.Series(
np.array([-0.0031049, -0.0473142, 0.0170822]),
index=price_na.columns
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(group_by=False),
pd.Series(
np.array([-0.00155245, -0.0236571, 0.0170822]),
index=price_na.columns
).rename('total_return')
)
result = pd.Series(
np.array([-0.02520955, 0.0170822]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_return')
pd.testing.assert_series_equal(
portfolio.total_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_return(),
result
)
def test_returns(self):
result = pd.DataFrame(
np.array([
[0.00000000e+00, -1.19900000e-03, -1.20100000e-03],
[-1.04020000e-03, -1.10530526e-02, 8.97057366e-03],
[-5.97621646e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-2.00720773e-03, -1.24341648e-02, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False),
pd.DataFrame(
np.array([
[0.00000000e+00, -5.99500000e-04, -1.20100000e-03],
[-5.20100000e-04, -5.52321117e-03, 8.97057366e-03],
[-2.98655331e-05, np.nan, 9.33060570e-03],
[0.00000000e+00, np.nan, -7.82569695e-05],
[-1.00305163e-03, -6.10531746e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(group_by=False, in_sim_order=True),
pd.DataFrame(
np.array([
[0.0, -0.0005995000000000062, -1.20100000e-03],
[-0.0005233022960706736, -0.005523211165093367, 8.97057366e-03],
[np.nan, np.nan, 9.33060570e-03],
[0.0, np.nan, -7.82569695e-05],
[-0.0010273695869600474, -0.0061087373583639994, 0.00000000e+00]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[-5.99500000e-04, -1.20100000e-03],
[-6.04362315e-03, 8.97057366e-03],
[np.nan, 9.33060570e-03],
[np.nan, -7.82569695e-05],
[-7.12983101e-03, 0.00000000e+00]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.returns(),
result
)
def test_active_returns(self):
result = pd.DataFrame(
np.array([
[0., -np.inf, -np.inf],
[-np.inf, -1.10398, 0.89598],
[-0.02985, np.nan, 0.42740909],
[0., np.nan, -0.02653333],
[-np.inf, -0.299875, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[-np.inf, -np.inf],
[-1.208, 0.89598],
[np.nan, 0.42740909],
[np.nan, -0.02653333],
[-0.35, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.active_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.active_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.active_returns(),
result
)
def test_market_value(self):
result = pd.DataFrame(
np.array([
[100., 100., 100.],
[100., 200., 200.],
[150., 200., 300.],
[200., 400., 400.],
[250., 500., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(group_by=False),
pd.DataFrame(
np.array([
[200., 200., 100.],
[200., 400., 200.],
[300., 400., 300.],
[400., 800., 400.],
[500., 1000., 400.]
]),
index=price_na.index,
columns=price_na.columns
)
)
result = pd.DataFrame(
np.array([
[200., 100.],
[300., 200.],
[350., 300.],
[600., 400.],
[750., 400.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_value(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_value(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_value(),
result
)
def test_market_returns(self):
result = pd.DataFrame(
np.array([
[0., 0., 0.],
[0., 1., 1.],
[0.5, 0., 0.5],
[0.33333333, 1., 0.33333333],
[0.25, 0.25, 0.]
]),
index=price_na.index,
columns=price_na.columns
)
pd.testing.assert_frame_equal(
portfolio.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(group_by=False),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(group_by=False),
result
)
result = pd.DataFrame(
np.array([
[0., 0.],
[0.5, 1.],
[0.16666667, 0.5],
[0.71428571, 0.33333333],
[0.25, 0.]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
pd.testing.assert_frame_equal(
portfolio.market_returns(group_by=group_by),
result
)
pd.testing.assert_frame_equal(
portfolio_grouped.market_returns(),
result
)
pd.testing.assert_frame_equal(
portfolio_shared.market_returns(),
result
)
def test_total_market_return(self):
result = pd.Series(
np.array([1.5, 4., 3.]),
index=price_na.columns
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(group_by=False),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(group_by=False),
result
)
result = pd.Series(
np.array([2.75, 3.]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('total_market_return')
pd.testing.assert_series_equal(
portfolio.total_market_return(group_by=group_by),
result
)
pd.testing.assert_series_equal(
portfolio_grouped.total_market_return(),
result
)
pd.testing.assert_series_equal(
portfolio_shared.total_market_return(),
result
)
def test_return_method(self):
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(),
pd.DataFrame(
np.array([
[-0.0005995, -0.001201],
[-0.0066395, 0.0077588],
[-0.0066395, 0.0171618],
[-0.0066395, 0.0170822],
[-0.01372199, 0.0170822]
]),
index=price_na.index,
columns=pd.Index(['first', 'second'], dtype='object', name='group')
)
)
pd.testing.assert_frame_equal(
portfolio_shared.cumulative_returns(group_by=False),
pd.DataFrame(
np.array([
[0., -0.0005995, -0.001201],
[-0.0005201, -0.0061194, 0.0077588],
[-0.00054995, -0.0061194, 0.0171618],
[-0.00054995, -0.0061194, 0.0170822],
[-0.00155245, -0.01218736, 0.0170822]
]),
index=price_na.index,
columns=price_na.columns
)
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(),
pd.Series(
np.array([-20.82791491, 10.2576347]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(risk_free=0.01),
pd.Series(
np.array([-66.19490297745766, -19.873024060759022]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(year_freq='365D'),
pd.Series(
np.array([-25.06639947, 12.34506527]),
index=pd.Index(['first', 'second'], dtype='object', name='group')
).rename('sharpe_ratio')
)
pd.testing.assert_series_equal(
portfolio_shared.sharpe_ratio(group_by=False),
pd.Series(
np.array([-11.058998255347488, -21.39151322377427, 10.257634695847853]),
index=price_na.columns
).rename('sharpe_ratio')
)
def test_stats(self):
pd.testing.assert_series_equal(
portfolio.stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -1.1112299999999966,
-1.1112299999999966, 283.3333333333333, 66.66666666666667,
1.6451238489727062, 1.6451238489727062,
pd.Timedelta('3 days 08:00:00'), pd.Timedelta('3 days 08:00:00'),
1.3333333333333333, 33.333333333333336, -98.38058805880588,
-100.8038553855386, -99.59222172217225,
pd.Timedelta('2 days 08:00:00'), pd.Timedelta('2 days 04:00:00'),
0.10827272727272726, 1.2350921335789007, -0.01041305691622876,
-7.373390156195147, 25.695952942372134, 5717.085878360386
]),
index=pd.Index([
'Start', 'End', 'Duration', 'Init. Cash', 'Total Profit',
'Total Return [%]', 'Benchmark Return [%]', 'Position Coverage [%]',
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration',
'Avg. Drawdown Duration', 'Num. Trades', 'Win Rate [%]',
'Best Trade [%]', 'Worst Trade [%]', 'Avg. Trade [%]',
'Max. Trade Duration', 'Avg. Trade Duration', 'Expectancy', 'SQN',
'Gross Exposure', 'Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio'
], dtype='object'),
name='stats_mean')
)
pd.testing.assert_series_equal(
portfolio['a'].stats(),
pd.Series(
np.array([
pd.Timestamp('2020-01-01 00:00:00'), pd.Timestamp('2020-01-05 00:00:00'),
pd.Timedelta('5 days 00:00:00'), 100.0, -0.3104900000000015,
-0.3104900000000015, 150.0, 40.0, 0.3104900000000015,
0.3104900000000015, | pd.Timedelta('4 days 00:00:00') | pandas.Timedelta |
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import re
from math import ceil
import pandas as pd
from sklearn.metrics import classification_report
from scipy.stats import shapiro, boxcox, yeojohnson
from scipy.stats import probplot
from sklearn.preprocessing import LabelEncoder, PowerTransformer
from category_encoders.target_encoder import TargetEncoder
from sklearn.impute import SimpleImputer, MissingIndicator
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.linear_model import LinearRegression, LogisticRegression
# from .charts.classification_visualization import classification_visualization
# from .charts.charts import Plot, ScatterChart
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.utils.multiclass import unique_labels
from sklearn.manifold import TSNE
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
import json
from pyod.models.hbos import HBOS
from statsmodels.api import ProbPlot
# from .charts.charts_extras import (
# feature_importances_plot,
# regression_viz,
# classification_viz,
# )
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.svm import LinearSVC
import warnings
warnings.filterwarnings("ignore")
sns.set_palette("colorblind")
class CrawtoDS:
def __init__(
self,
data,
target,
test_data=None,
time_dependent=False,
features="infer",
problem="infer",
):
self.input_data = data
self.target = target
self.features = features
self.problem = problem
self.test_data = test_data
self.timedependent = time_dependent
if self.problem == "binary classification":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True, stratify=self.input_data[self.target],
)
elif self.problem == "regression":
self.train_data, self.valid_data = train_test_split(
self.input_data, shuffle=True,
)
def nan_features(input_data):
"""a little complicated. map creates a %nan values and returns the feature if greater than the threshold.
filter simply filters out the false values """
f = input_data.columns.values
len_df = len(input_data)
nan_features = list(
filter(
lambda x: x is not False,
map(
lambda x: x
if self.input_data[x].isna().sum() / len_df > 0.25
else False,
f,
),
)
)
return nan_features
def problematic_features(self):
f = self.input_data.columns.values
problematic_features = []
for i in f:
if "Id" in i:
problematic_features.append(i)
elif "ID" in i:
problematic_features.append(i)
return problematic_features
def undefined_features(self):
if self.features == "infer":
undefined_features = list(self.input_data.columns)
undefined_features.remove(self.target)
for i in self.nan_features:
undefined_features.remove(i)
for i in self.problematic_features:
undefined_features.remove(i)
return undefined_features
def numeric_features(self):
numeric_features = []
l = self.undefined_features
for i in l:
if self.input_data[i].dtype in ["float64", "float", "int", "int64"]:
if len(self.input_data[i].value_counts()) / len(self.input_data) < 0.1:
pass
else:
numeric_features.append(i)
return numeric_features
def categorical_features(self, threshold=10):
self.undefined_features
categorical_features = []
to_remove = []
l = self.undefined_features
for i in l:
if len(self.input_data[i].value_counts()) / len(self.input_data[i]) < 0.10:
categorical_features.append(i)
return categorical_features
def indicator(self):
indicator = MissingIndicator(features="all")
indicator.fit(self.train_data[self.undefined_features])
return indicator
def train_missing_indicator_df(self):
x = self.indicator.transform(self.train_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = [
i
for i in list(missing_indicator_df.columns.values)
if missing_indicator_df[i].max() == True
]
return missing_indicator_df[columns].replace({True: 1, False: 0})
def valid_missing_indicator_df(self):
x = self.indicator.transform(self.valid_data[self.undefined_features])
x_labels = ["missing_" + i for i in self.undefined_features]
missing_indicator_df = pd.DataFrame(x, columns=x_labels)
columns = list(self.train_missing_indicator_df)
return missing_indicator_df[columns].replace({True: 1, False: 0})
def numeric_imputer(self):
numeric_imputer = SimpleImputer(strategy="median", copy=True)
numeric_imputer.fit(self.train_data[self.numeric_features])
return numeric_imputer
def categorical_imputer(self):
categorical_imputer = SimpleImputer(strategy="most_frequent", copy=True)
categorical_imputer.fit(self.train_data[self.categorical_features])
return categorical_imputer
def train_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.train_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def valid_imputed_numeric_df(self):
x = self.numeric_imputer.transform(self.valid_data[self.numeric_features])
x_labels = [i + "_imputed" for i in self.numeric_features]
imputed_numeric_df = pd.DataFrame(x, columns=x_labels)
return imputed_numeric_df
def yeo_johnson_transformer(self):
yeo_johnson_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_transformer.fit(self.train_imputed_numeric_df)
return yeo_johnson_transformer
def yeo_johnson_target_transformer(self):
yeo_johnson_target_transformer = PowerTransformer(method="yeo-johnson", copy=True)
yeo_johnson_target_transformer.fit(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
return yeo_johnson_target_transformer
def train_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.train_imputed_numeric_df)
columns = self.train_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def valid_yeojohnson_df(self):
yj = self.yeo_johnson_transformer.transform(self.valid_imputed_numeric_df)
columns = self.valid_imputed_numeric_df.columns.values
columns = [i + "_yj" for i in columns]
yj = pd.DataFrame(yj, columns=columns)
return yj
def train_transformed_target(self):
if self.problem == "binary classification":
return self.train_data[self.target]
elif self.problem == "regression":
s = self.yeo_johnson_target_transformer.transform(
np.array(self.train_data[self.target]).reshape(-1, 1)
)
s = pd.DataFrame(s, columns=[self.target])
return s
def valid_transformed_target(self):
if self.problem == "binary classification":
return self.valid_data[self.target]
elif self.problem == "regression":
s = self.yeo_johnson_target_transformer.transform(
np.array(self.valid_data[self.target]).reshape(-1, 1)
)
s = pd.DataFrame(s, columns=[self.target])
return s
def train_imputed_categorical_df(self):
x = self.categorical_imputer.transform(self.train_data[self.categorical_features])
x_labels = [i + "_imputed" for i in self.categorical_features]
imputed_categorical_df = pd.DataFrame(x, columns=x_labels)
return imputed_categorical_df
def valid_imputed_categorical_df(self):
x = self.categorical_imputer.transform(self.valid_data[self.categorical_features])
x_labels = [i + "_imputed" for i in self.categorical_features]
imputed_categorical_df = pd.DataFrame(x, columns=x_labels)
return imputed_categorical_df
def hbos_transformer(self):
hbos = HBOS()
hbos.fit(self.train_transformed_data)
return hbos
def train_hbos_column(self):
hbos_t = self.hbos_transformer.predict(self.train_transformed_data)
return hbos_t
def valid_hbos_column(self):
hbos_v = self.hbos_transformer.predict(self.valid_transformed_data)
return hbos_v
def test_hbos_column(self):
hbos_test = self.hbos_transformer.predict(self.test_transformed_data)
return hbos_test
def target_encoder(self):
te = TargetEncoder(cols=self.train_imputed_categorical_df.columns.values)
te.fit(X=self.train_imputed_categorical_df, y=self.train_transformed_target)
return te
def train_target_encoded_df(self):
te = self.target_encoder.transform(self.train_imputed_categorical_df)
columns = list(
map(
lambda x: re.sub(r"_imputed", "_target_encoded", x),
list(self.train_imputed_categorical_df.columns.values),
)
)
te = | pd.DataFrame(data=te) | pandas.DataFrame |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from web3 import Web3
import time
import json
import io
plt.rc('figure', titleweight='bold')
plt.rc('axes', grid='True', linewidth=1.2, titlepad=20)
plt.rc('font', weight='bold', size=16)
plt.rc('lines', linewidth=3.5)
eXRD_rewards = '0xDF191bFbdE2e3E178e3336E63C18DD20d537c421'
with open('./infura.json') as f:
INFURA_URL = json.load(f)['url']
w3 = Web3(Web3.HTTPProvider(INFURA_URL))
with open('./eXRD_rewards.json') as f:
ABI = json.load(f)['result']
rewardsContract = w3.eth.contract(address=eXRD_rewards, abi=ABI)
emissionTimestamps = np.array([1605629336, 1608221336, 1608221858, 1610813858])
class RewardTrender():
baseIndex = pd.date_range(start='17-11-2020 17:00', periods=180*4, freq='6H')
emissionIndex = | pd.DatetimeIndex(emissionTimestamps*1e9) | pandas.DatetimeIndex |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 7 19:06:46 2021
@author: saUzu
"""
#%% Gerekli Kütüphaneler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#%% Dolar/Lira ve eğitim verisini hazırlama
#hamVeriler = pd.read_csv('02-21_egitim_verileri.csv')
# bitcoin için değişkenler ve değerler
hamVeriler = pd.read_csv('Bitcoin_USD_ogrenme.csv')
hamVeriler['Price'] = hamVeriler['Price'].str.replace(',','')
hamVeriler['Open'] = hamVeriler['Open'].str.replace(',','')
hamVeriler['High'] = hamVeriler['High'].str.replace(',','')
hamVeriler['Low'] = hamVeriler['Low'].str.replace(',','')
# bitcoin için bitiş
hamVeriler['Date'] = pd.to_datetime(hamVeriler['Date'])
hamVeriler = hamVeriler.sort_values(by='Date')
egitimVerisi = hamVeriler.iloc[:, 1:2].values
#%% Eğitim verisini ölçeklendirme
from sklearn.preprocessing import MinMaxScaler
olcek = MinMaxScaler(feature_range=(0, 1))
olcekli_egitimVerisi = olcek.fit_transform(egitimVerisi)
#%% Eğitim verilerini 60'a böldüm. Her 60 günde bir tahmin yaparak öğrenme işlemini gerçekleştirecek
X_egitim = []
y_egitim = []
for i in range(60, 4080):
X_egitim.append(olcekli_egitimVerisi[i-60:i, 0])
y_egitim.append(olcekli_egitimVerisi[i, 0])
X_egitim, y_egitim = np.array(X_egitim), np.array(y_egitim)
#%% Eğitim verilerini yeniden şekillendirme işlemi
X_egitim = np.reshape(X_egitim, (X_egitim.shape[0], X_egitim.shape[1], 1))
#%% LSTM için keras kütüphanesi
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
#%% RNN oluşturma
gerileme = Sequential()
#%% LSTM'nin ilk katmanı
gerileme.add(LSTM(units = 100, return_sequences = True, input_shape = (X_egitim.shape[1], 1)))
gerileme.add(Dropout(0.2))
#%% LSTM'nin ikinci katmanı
gerileme.add(LSTM(units = 100, return_sequences = True))
gerileme.add(Dropout(0.2))
#%% LSTM'nin üçüncü katmanı
gerileme.add(LSTM(units = 100, return_sequences = True))
gerileme.add(Dropout(0.2))
#%% LSTM'nin dördüncü katmanı
gerileme.add(LSTM(units = 100))
gerileme.add(Dropout(0.2))
#%% LSTM'nin Çıkış katmanı
gerileme.add(Dense(units = 1))
#%% RNN'yi çalıştırma
gerileme.compile(optimizer = 'adam', loss = 'mean_squared_error')
#%% RNN'yi eğitim verileri ile uyuşması işlemi
gerileme.fit(X_egitim, y_egitim, epochs = 100, batch_size = 100)
#%% Gerçek veriler
#denemeVerileri = pd.read_csv('deneme_verileri2.csv')
# bitcoin için değerler ve değişkenle
denemeVerileri = pd.read_csv('Bitcoin_USD_deneme.csv')
denemeVerileri['Price'] = denemeVerileri['Price'].str.replace(',','')
denemeVerileri['Open'] = denemeVerileri['Open'].str.replace(',','')
denemeVerileri['High'] = denemeVerileri['High'].str.replace(',','')
denemeVerileri['Low'] = denemeVerileri['Low'].str.replace(',','')
# bitcoin için bitiş
denemeVerileri['Date'] = | pd.to_datetime(denemeVerileri['Date']) | pandas.to_datetime |
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=funny_cols + ["strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# Test the case when a chromosome should not be split (now implemented with subtract)
df1 = pd.DataFrame(
[
["chrX", 3, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame([["chrX", 4]], columns=["chrom", "pos"])
df2["start"] = df2["pos"].values
df2["end"] = df2["pos"].values
df_result = (
pd.DataFrame(
[
["chrX", 3, 4],
["chrX", 4, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# subtract should ignore null rows
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 1, 5]],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
["chrX", 1, 5],
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_subtracted = pd.DataFrame(
[
["chr1", 1, 4],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_subtracted, bioframe.subtract(df1, df2))
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert len(bioframe.subtract(df1, df2)) == 0 # empty df1 but valid chroms in df2
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df1)
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df2)
def test_setdiff():
cols1 = ["chrom1", "start", "end"]
cols2 = ["chrom2", "start", "end"]
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=cols1 + ["strand", "animal"],
)
df2 = pd.DataFrame(
[
["chrX", 7, 10, "-", "dog"],
["chr1", 6, 10, "-", "cat"],
["chr1", 6, 10, "-", "cat"],
],
columns=cols2 + ["strand", "animal"],
)
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=None,
)
)
== 0
) # everything overlaps
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["animal"],
)
)
== 1
) # two overlap, one remains
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["strand"],
)
)
== 2
) # one overlaps, two remain
# setdiff should ignore nan rows
df1 = pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])[
["chrom1", "start", "end", "strand", "animal"]
]
df1 = df1.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
df2 = pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])[
["chrom2", "start", "end", "strand", "animal"]
]
df2 = df2.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
assert (2, 5) == np.shape(bioframe.setdiff(df1, df1, cols1=cols1, cols2=cols1))
assert (2, 5) == np.shape(bioframe.setdiff(df1, df2, cols1=cols1, cols2=cols2))
assert (4, 5) == np.shape(
bioframe.setdiff(df1, df2, on=["strand"], cols1=cols1, cols2=cols2)
)
def test_count_overlaps():
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[
["chr1", 6, 10, "+", "dog"],
["chr1", 6, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
assert (
bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 2, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 0, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand", "animal"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([0, 0, 0])
).all()
# overlaps with pd.NA
counts_no_nans = bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
df1_na = (pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)[["chrom1", "start", "end", "strand", "animal"]]
df2_na = (pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])).astype(
{
"start2": pd.Int64Dtype(),
"end2": pd.Int64Dtype(),
}
)[["chrom2", "start2", "end2", "strand", "animal"]]
counts_nans_inserted_after = (
pd.concat([pd.DataFrame([pd.NA]), counts_no_nans, pd.DataFrame([pd.NA])])
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype(),})[
["chrom1", "start", "end", "strand", "animal", "count"]
]
counts_nans = bioframe.count_overlaps(
df1_na,
df2_na,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
pd.testing.assert_frame_equal(
counts_nans,
bioframe.count_overlaps(
df1_na,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
),
)
assert (
counts_nans["count"].values
== counts_nans_inserted_after["count"].fillna(0).values
).all()
### coverage without return_input returns a single column dataFrame
pd.testing.assert_frame_equal(
bioframe.count_overlaps(
df1_na,
df2_na,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_input=False,
),
pd.DataFrame(counts_nans["count"]),
)
def test_assign_view():
## default assignment case
view_df = pd.DataFrame(
[
["chr11", 1, 8, "chr11p"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr11", 0, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
df_assigned = pd.DataFrame(
[
["chr11", 0, 10, "+", "chr11p"],
],
columns=["chrom", "start", "end", "strand", "view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_assigned, bioframe.assign_view(df, view_df))
# assignment with funny view_name_col and an interval on chr2 not cataloged in the view_df
view_df = pd.DataFrame(
[
["chrX", 1, 8, "oranges"],
["chrX", 8, 20, "grapefruit"],
["chr1", 0, 10, "apples"],
],
columns=["chrom", "start", "end", "fruit"],
)
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
)
df_assigned = pd.DataFrame(
[
["chr1", 0, 10, "+", "apples"],
["chrX", 5, 10, "+", "oranges"],
["chrX", 0, 5, "+", "oranges"],
],
columns=["chrom", "start", "end", "strand", "funny_view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_assigned,
bioframe.assign_view(
df,
view_df,
view_name_col="fruit",
df_view_col="funny_view_region",
drop_unassigned=True,
),
)
### keep the interval with NA as its region if drop_unassigned is False
df_assigned = pd.DataFrame(
[
["chr1", 0, 10, "+", "apples"],
["chrX", 5, 10, "+", "oranges"],
["chrX", 0, 5, "+", "oranges"],
["chr2", 5, 10, "+", pd.NA],
],
columns=["chrom", "start", "end", "strand", "funny_view_region"],
)
df_assigned = df_assigned.astype(
{"chrom": str, "start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_assigned,
bioframe.assign_view(
df,
view_df,
view_name_col="fruit",
df_view_col="funny_view_region",
drop_unassigned=False,
),
)
### assign_view with NA values assigns a view of none
df = pd.DataFrame(
[
["chr1", 0, 10, "+"],
["chrX", 5, 10, "+"],
[pd.NA, pd.NA, pd.NA, "+"],
["chrX", 0, 5, "+"],
["chr2", 5, 10, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": | pd.Int64Dtype() | pandas.Int64Dtype |
"""Script to import trodes, micro-controller, and config data
"""
import glob
import pickle
import os
from collections import defaultdict
import numpy as np
import pandas as pd
from tqdm import tqdm
from software.preprocessing.video_data.DLC.Reconstruction import get_kinematic_data
from software.preprocessing.config_data.config_parser import import_config_data
from software.preprocessing.controller_data.controller_data_parser import import_controller_data, get_reach_indices, get_reach_times
from software.preprocessing.reaching_without_borders.rwb import match_times, get_successful_trials
from software.preprocessing.trodes_data.experiment_data_parser import import_trodes_data
from software.preprocessing.trodes_data.calibration_data_parser import get_traces_frame
def load_files(trodes_dir, exp_name, controller_path, config_dir, rat, session,video_path, analysis=False,
cns_flag=False, pns=False,positional=False, force_rerun_of_data = True, sample_rate = 150):
"""
Parameters
----------
save_path : str, path location to save data extraction at ex '/larry/lobsters/home/book/'
trodes_dir : directory containing trodes .rec file
exp_name : name of folder containing .rec file/ video file
controller_path : full path to micro-controller data
config_dir : directory containing .json file with configuration parameters
rat : name of rat eg RM16
session : name of experimental session eg S1
analysis : boolean, set as True to extract experimental analysis
video_path : path to video data
cns_flag : boolean, manual set of cns path
pns : boolean, manual set of pns path
Returns
-------
dataframe : pandas dataframe containing experimental values for a single experimental session
"""
# importing data
exp_names = exp_name[2:-1]
exp_names = exp_names.rsplit('.', 1)[0]
trodes_dir = trodes_dir.rsplit('/', 1)[0]
# search trodes_dir for files named
experimental_data_found = 0
for ff in glob.glob(trodes_dir +'/**experimental_df.h5'):
experimental_data_found = ff
if force_rerun_of_data:
experimental_data_found = 0
if experimental_data_found:
dataframe = pd.read_hdf(experimental_data_found)
else:
print('Generating sensor data manually!')
if positional:
positional_data = get_traces_frame(trodes_dir, exp_names)
r_x = positional_data['x_start_position']
r_y = positional_data['y_start_position']
r_z = positional_data['z_start_position']
t_x = positional_data['x_duration']
d_x = positional_data['x_displacement']
t_y = positional_data['y_duration']
d_y = positional_data['y_displacement']
t_z = positional_data['z_duration']
d_z = positional_data['z_displacement']
if cns_flag:
os.chdir(cns_flag)
trodes_data = import_trodes_data(trodes_dir, exp_names,sampling_rate = sample_rate ) # take first entry of list (starmap returns list)
if pns:
os.chdir(pns)
try:
config_data = import_config_data(config_dir)
controller_data = import_controller_data(controller_path)
except:
print('Cant get config or controller data')
if analysis:
x_pot=trodes_data['analog']['x_pot']
y_pot=trodes_data['analog']['y_pot']
z_pot=trodes_data['analog']['z_pot']
lick_data = trodes_data['DIO']['IR_beam']
true_time = match_times(controller_data, trodes_data)
reach_indices = get_reach_indices(controller_data)
successful_trials = get_successful_trials(controller_data, true_time, trodes_data)
reach_masks = get_reach_times(true_time, reach_indices)
reach_masks_start = np.asarray(reach_masks['start'])
reach_masks_stop = np.asarray(reach_masks['stop'])
reach_indices_start = reach_indices['start']
reach_indices_stop = reach_indices['stop']
trial_masks = trial_mask(true_time, reach_indices_start, reach_indices_stop, successful_trials)
dataframe = to_df(exp_names, config_data, true_time, successful_trials, trial_masks, rat, session, lick_data, controller_data, reach_indices,
x_pot,y_pot,z_pot,reach_masks_start,reach_masks_stop)
exp_save_dir = trodes_dir + '/experimental_df.h5'
dataframe.to_hdf(exp_save_dir,key='df')
return dataframe
def name_scrape(file):
"""
Parameters
----------
file - string of a file name
pns - string, address of pns folder
Returns
-------
controller_file - string containing address of controller file
trodes_files - string containing address of trodes files
config_file - string containing address of config file
exp_name - string containing experiment name eg 'RMxxYYYYMMDD_time', found through parsing the trodes file
"""
# controller_data
name = file.split('/')[6]
path_d = file.rsplit('/', 2)[0]
path_d = file.replace('/cns', '/PNS_data')
path_d = path_d.rsplit('/R', 2)[0]
config_path = path_d + '/workspaces'
controller_path = path_d + '/sensor_data'
video_path = path_d + '/videos/**.csv'
# trodes_data
n = file.rsplit('/', 1)[1]
if '/S' in file:
sess = file.rsplit('/S')
sess = str(sess[1]) # get 'session' part of the namestring
ix = 'S' + sess[0]
exp_name = str(ix) + n
return controller_path, config_path, exp_name, name, ix, n, video_path
def host_off(cns,save_path = False):
"""
Parameters
----------
save_path : path to save experimental dataframe
cns : path to cns
Returns
-------
save_df : complete experimental data frame
"""
pns = '/clusterfs/NSDS_data/brnelson/PNS_data/'
cns_pattern = cns + '/**/*.rec'
print(cns_pattern)
# cns is laid out rat/day/session/file_name/localdir (we want to be in localdir)
# search for all directory paths containing .rec files
save_df = pd.DataFrame()
for file in tqdm(glob.glob(cns_pattern,recursive=True)):
controller_path, config_path, exp_name, name, ix, trodes_name,video_path = name_scrape(file)
print(exp_name + ' is being added..')
list_of_df = load_files(file, exp_name, controller_path, config_path, name, ix,video_path,
analysis=True, cns_flag=cns, pns=pns,force_rerun_of_data = True, sample_rate = 600)
save_df= | pd.concat([save_df,list_of_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Tests that dialects are properly handled during parsing
for all of the parsers defined in parsers.py
"""
import csv
import pytest
from pandas.compat import StringIO
from pandas.errors import ParserWarning
from pandas import DataFrame
import pandas.util.testing as tm
def test_dialect(all_parsers):
parser = all_parsers
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
# Conflicting dialect quoting.
with tm.assert_produces_warning(ParserWarning):
df = parser.read_csv(StringIO(data), dialect=dia)
data = """\
label1,label2,label3
index1,a,c,e
index2,b,d,f
"""
exp = parser.read_csv(StringIO(data))
exp.replace("a", "\"a", inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(all_parsers):
dialect_name = "mydialect"
parser = all_parsers
data = """\
fruit:vegetable
apple:broccoli
pear:tomato
"""
exp = DataFrame({
"fruit": ["apple", "pear"],
"vegetable": ["broccoli", "tomato"]
})
csv.register_dialect(dialect_name, delimiter=":")
# Conflicting dialect delimiter.
with | tm.assert_produces_warning(ParserWarning) | pandas.util.testing.assert_produces_warning |
import numpy as np
import pandas as pd
from matplotlib import *
# .........................Series.......................#
x1 = np.array([1, 2, 3, 4])
s = pd.Series(x1, index=[1, 2, 3, 4])
print(s)
# .......................DataFrame......................#
x2 = np.array([1, 2, 3, 4, 5, 6])
s = pd.DataFrame(x2)
print(s)
x3 = np.array([['Alex', 10], ['Nishit', 21], ['Aman', 22]])
s = pd.DataFrame(x3, columns=['Name', 'Age'])
print(s)
data = {'Name': ['Tom', 'Jack', 'Steve', 'Ricky'], 'Age': [28, 34, 29, 42]}
df = | pd.DataFrame(data, index=['rank1', 'rank2', 'rank3', 'rank4']) | pandas.DataFrame |
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.statistics.variants.log import get as variants_module
from pm4py.objects.conversion.log.versions.to_dataframe import get_dataframe_from_event_stream
import pm4py
import pandas as pd
from scipy.stats import wasserstein_distance
import collections
import time
from amun.guessing_advantage import AggregateType
def earth_mover_dist_freq(log1, log2):
dfg1, start_activities, end_activities = pm4py.discover_dfg(log1)
del log1
dfg2, start_activities, end_activities = pm4py.discover_dfg(log2)
del log2
dic1=dict(dfg1)
dic2=dict(dfg2)
keys = set(list(dic1.keys()) + list(dic2.keys()))
for key in keys:
if key not in dic1.keys():
dic1[key] = 0
if key not in dic2.keys():
dic2[key] = 0
dic1 = collections.OrderedDict(sorted(dic1.items()))
dic2 = collections.OrderedDict(sorted(dic2.items()))
v1=list(dic1.values())
v2=list(dic2.values())
distance = wasserstein_distance(v1,v2)
return distance
def earth_mover_dist_time(log1, log2):
start = time.time()
data = get_dataframe_from_event_stream(log1)
dfg1= get_dfg_time(data)
del log1
del data
data = get_dataframe_from_event_stream(log2)
dfg2= get_dfg_time(data)
del log2
del data
end = time.time()
diff = end - start
print("log to DFG : %s (minutes)" % (diff / 60.0))
start=time.time()
keys = set(list(dfg1.keys()) + list(dfg2.keys()))
for key in keys:
if key not in dfg1.keys():
dfg1[key] = 0
if key not in dfg2.keys():
dfg2[key] = 0
end = time.time()
diff = end - start
print("keys loop : %s (minutes)" % (diff / 60.0))
dic1 = collections.OrderedDict(sorted(dfg1.items()))
dic2 = collections.OrderedDict(sorted(dfg2.items()))
v1=list(dic1.values())
v2=list(dic2.values())
start = time.time()
distance = wasserstein_distance(v1,v2)
end = time.time()
diff = end - start
print("wasserstein_distance : %s (minutes)" % (diff / 60.0))
return distance
def get_dfg_time(data):
"""
Returns the DFG matrix as a dictionary of lists. The key is the pair of acitivities
and the value is a list of values
"""
#moving first row to the last one
temp_row= data.iloc[0]
data2=data.copy()
data2.drop(data2.index[0], inplace=True)
data2=data2.append(temp_row)
#changing column names
columns= data2.columns
columns= [i+"_2" for i in columns]
data2.columns=columns
#combining the two dataframes into one
data = data.reset_index()
data2=data2.reset_index()
data=pd.concat([data, data2], axis=1)
#filter the rows with the same case
data=data[data['case:concept:name'] == data['case:concept:name_2']]
#calculating time difference
data['time:timestamp']=pd.to_datetime(data['time:timestamp'],utc=True)
data['time:timestamp_2'] = | pd.to_datetime(data['time:timestamp_2'],utc=True) | pandas.to_datetime |
import unittest, os, json, gc
from ovejero import hierarchical_inference, model_trainer
from baobab import configs
from lenstronomy.Util.param_util import ellipticity2phi_q
from baobab import distributions
import numpy as np
from scipy import stats, special
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
class HierarchicalnferenceTest(unittest.TestCase):
def setUp(self):
# Open up the config file.
np.random.seed(2)
self.root_path = os.path.dirname(os.path.abspath(__file__))+'/test_data/'
self.cfg = configs.BaobabConfig.from_file(self.root_path +
'test_baobab_cfg.py')
self.cfg_pr = hierarchical_inference.load_prior_config(self.root_path +
'test_ovejero_cfg_prior.py')
self.cfg_cov = hierarchical_inference.load_prior_config(self.root_path +
'test_emp_cfg_prior.py')
self.lens_params = ['external_shear_gamma_ext','external_shear_psi_ext',
'lens_mass_center_x','lens_mass_center_y',
'lens_mass_e1','lens_mass_e2',
'lens_mass_gamma','lens_mass_theta_E']
self.lens_params_cov = ['external_shear_gamma_ext',
'external_shear_psi_ext',
'lens_mass_center_x','lens_mass_center_y',
'lens_mass_q','lens_mass_phi',
'lens_mass_gamma','lens_mass_theta_E']
self.eval_dict = hierarchical_inference.build_eval_dict(self.cfg,
self.lens_params)
self.eval_dict_prior = hierarchical_inference.build_eval_dict(
self.cfg_pr,self.lens_params,baobab_config=False)
self.eval_dict_cov = hierarchical_inference.build_eval_dict(
self.cfg_cov,self.lens_params_cov,baobab_config=False)
def tearDown(self):
# Clean up for memory
self.cfg = None
self.cfg_pr = None
self.cfg_cov = None
self.eval_dict = None
self.eval_dict_prior = None
self.eval_dict_cov = None
def test_build_eval_dict(self):
# Check that the eval dictionary is built correctly for a test config.
n_lens_param_p_params = [2,5,2,2,4,4,2,2]
# First we test the case without priors.
self.assertEqual(self.eval_dict['hyp_len'],23)
self.assertListEqual(list(self.eval_dict['hyp_values']),[-2.73,1.05,0.0,
0.5*np.pi,10.0,-0.5*np.pi,0.5*np.pi,0.0,0.102,0.0,0.102,4.0,4.0,
-0.55,0.55,4.0,4.0,-0.55,0.55,0.7,0.1,0.0,0.1])
self.assertListEqual(self.eval_dict['hyp_names'],[
'external_shear_gamma_ext:mu','external_shear_gamma_ext:sigma',
'external_shear_psi_ext:mu','external_shear_psi_ext:alpha',
'external_shear_psi_ext:p','external_shear_psi_ext:lower',
'external_shear_psi_ext:upper','lens_mass_center_x:mu',
'lens_mass_center_x:sigma','lens_mass_center_y:mu',
'lens_mass_center_y:sigma','lens_mass_e1:a',
'lens_mass_e1:b','lens_mass_e1:lower','lens_mass_e1:upper',
'lens_mass_e2:a','lens_mass_e2:b','lens_mass_e2:lower',
'lens_mass_e2:upper','lens_mass_gamma:mu',
'lens_mass_gamma:sigma','lens_mass_theta_E:mu',
'lens_mass_theta_E:sigma'])
total = 0
for li,lens_param in enumerate(self.lens_params):
n_p = n_lens_param_p_params[li]
self.assertListEqual(list(self.eval_dict[lens_param]['hyp_ind']),
list(range(total,total+n_p)))
self.assertFalse(self.eval_dict[lens_param]['eval_fn_kwargs'])
if n_p == 2:
self.assertTrue((self.eval_dict[lens_param]['eval_fn'] is
distributions.eval_normal_logpdf_approx) or (
self.eval_dict[lens_param]['eval_fn'] is
distributions.eval_lognormal_logpdf_approx))
if n_p == 4:
self.assertTrue(self.eval_dict[lens_param]['eval_fn'] is
distributions.eval_beta_logpdf_approx)
if n_p == 5:
self.assertTrue(self.eval_dict[lens_param]['eval_fn'] is
distributions.eval_generalized_normal_logpdf_approx)
total += n_p
# Now we test the case with priors.
self.assertEqual(self.eval_dict_prior['hyp_len'],14)
self.assertListEqual(list(self.eval_dict_prior['hyp_init']),[-2.73,1.05,
0.0,0.102,0.0,0.102,0.0,0.1,0.0,0.1,0.7,0.1,0.0,0.1])
self.assertListEqual(list(self.eval_dict_prior['hyp_sigma']),[0.5,0.05,
0.2,0.03,0.2,0.03,0.3,0.01,0.3,0.01,0.3,0.01,0.3,0.01])
self.assertListEqual(self.eval_dict_prior['hyp_names'],[
'external_shear_gamma_ext:mu','external_shear_gamma_ext:sigma',
'lens_mass_center_x:mu','lens_mass_center_x:sigma',
'lens_mass_center_y:mu','lens_mass_center_y:sigma','lens_mass_e1:mu',
'lens_mass_e1:sigma','lens_mass_e2:mu','lens_mass_e2:sigma',
'lens_mass_gamma:mu','lens_mass_gamma:sigma','lens_mass_theta_E:mu',
'lens_mass_theta_E:sigma'])
n_lens_param_p_params = [2,0,2,2,2,2,2,2]
total = 0
for li, lens_param in enumerate(self.lens_params):
n_p = n_lens_param_p_params[li]
if n_p==0:
self.assertFalse(list(self.eval_dict_prior[lens_param]['hyp_ind']
))
self.assertTrue((self.eval_dict_prior[lens_param]['eval_fn'] is
distributions.eval_uniform_logpdf_approx))
else:
self.assertListEqual(list(self.eval_dict_prior[lens_param]['hyp_ind']),
list(range(total,total+n_p)))
self.assertTrue((self.eval_dict_prior[lens_param]['eval_fn'] is
distributions.eval_normal_logpdf_approx) or (
self.eval_dict_prior[lens_param]['eval_fn'] is
distributions.eval_lognormal_logpdf_approx))
total += n_p
hyp_eval_values = np.log([1/10,1/10,1/10,1/10,1/10,1/10,1/2,1/10,1/2,
1/10,1/10,1/10,1/10,1/10])
self.assertEqual(len(hyp_eval_values),len(
self.eval_dict_prior['hyp_prior']))
for hpi, hyp_prior in enumerate(self.eval_dict_prior['hyp_prior']):
self.assertAlmostEqual(hyp_eval_values[hpi],hyp_prior(0.5))
hyp_eval_values = np.log([1/10,0,1/10,0,1/10,0,1/2,
0,1/2,0,1/10,0,1/10,0,1/10,0])
for hpi, hyp_prior in enumerate(self.eval_dict_prior['hyp_prior']):
self.assertAlmostEqual(hyp_eval_values[hpi],hyp_prior(-0.5))
# Now test a distribution with a covariance
self.assertEqual(self.eval_dict_cov['hyp_len'],15)
self.assertListEqual(list(self.eval_dict_cov['hyp_init']),[-2.73,1.05,
0.0,0.102,0.0,0.102,0.242, -0.408, 0.696,0.5,0.5,0.5,0.4,0.4,0.4])
self.assertListEqual(list(self.eval_dict_cov['hyp_sigma']),[0.5,0.05,
0.2,0.03,0.2,0.03,0.1,0.1,0.1,0.5,0.5,0.5,0.4,0.4,0.4])
self.assertListEqual(self.eval_dict_cov['hyp_names'],[
'external_shear_gamma_ext:mu','external_shear_gamma_ext:sigma',
'lens_mass_center_x:mu','lens_mass_center_x:sigma',
'lens_mass_center_y:mu','lens_mass_center_y:sigma','cov_mu_0',
'cov_mu_1','cov_mu_2','cov_tril_0','cov_tril_1','cov_tril_2',
'cov_tril_3','cov_tril_4','cov_tril_5'])
def test_log_p_omega(self):
# Check that the log_p_omega function returns the desired value for both
# dicts.
hyp=np.ones(14)*0.5
self.assertAlmostEqual(hierarchical_inference.log_p_omega(hyp,
self.eval_dict_prior),np.sum(np.log([1/10,1/10,1/10,1/10,1/10,1/10,
1/2,1/10,1/2,1/10,1/10,1/10,1/10,1/10])))
hyp=-np.ones(14)*0.5
self.assertAlmostEqual(hierarchical_inference.log_p_omega(hyp,
self.eval_dict_prior),-np.inf)
hyp=np.ones(15)*0.5
self.assertAlmostEqual(hierarchical_inference.log_p_omega(hyp,
self.eval_dict_cov),np.log(1/10)*15)
hyp[-1] = -1
self.assertAlmostEqual(hierarchical_inference.log_p_omega(hyp,
self.eval_dict_cov),-np.inf)
def test_log_p_xi_omega(self):
# Test that the log_p_xi_omega function returns the correct value
# for some sample data points.
hyp = np.array([-2.73,1.05,0.0,0.102,0.0,0.102,0.0,0.1,0.0,0.1,0.7,0.1,
0.0,0.1])
samples = np.ones((8,2,2))*0.3
def hand_calc_log_pdf(samples,hyp):
# Add each one of the probabilities
scipy_pdf = stats.lognorm.logpdf(samples[0],scale=np.exp(hyp[0]),
s=hyp[1])
scipy_pdf += stats.uniform.logpdf(samples[1],loc=-0.5*np.pi,
scale=np.pi)
scipy_pdf += stats.norm.logpdf(samples[2],loc=hyp[2],scale=hyp[3])
scipy_pdf += stats.norm.logpdf(samples[3],loc=hyp[4],scale=hyp[5])
scipy_pdf += stats.norm.logpdf(samples[4],loc=hyp[6],scale=hyp[7])
scipy_pdf += stats.norm.logpdf(samples[5],loc=hyp[8],scale=hyp[9])
scipy_pdf += stats.lognorm.logpdf(samples[6],scale=np.exp(
hyp[10]),s=hyp[11])
scipy_pdf += stats.lognorm.logpdf(samples[7],scale=np.exp(
hyp[12]),s=hyp[13])
return scipy_pdf
def hand_calc_log_pdf_cov(samples,hyp):
# Add each one of the probabilities
scipy_pdf = stats.lognorm.logpdf(samples[0],scale=np.exp(hyp[0]),
s=hyp[1])
scipy_pdf += stats.uniform.logpdf(samples[1],loc=-0.5*np.pi,
scale=np.pi)
scipy_pdf += stats.norm.logpdf(samples[2],loc=hyp[2],scale=hyp[3])
scipy_pdf += stats.norm.logpdf(samples[3],loc=hyp[4],scale=hyp[5])
scipy_pdf += stats.uniform.logpdf(samples[5],loc=-0.5*np.pi,
scale=np.pi)
# Now calculate the covariance matrix values.
cov_samples = samples[[7,4,6]]
mu = [0.242,-0.408,0.696]
cov = np.array([[0.25, 0.25, 0.2],
[0.25, 0.5, 0.4],[0.2, 0.4, 0.48]])
for i in range(len(scipy_pdf)):
for j in range(len(scipy_pdf[0])):
scipy_pdf[i,j] += stats.multivariate_normal.logpdf(
np.log(cov_samples[:,i,j]),mean=mu,cov=cov)
scipy_pdf[i,j] -= np.log(stats.norm(mu[1],
np.sqrt(cov[1,1])).cdf(1))
return scipy_pdf
np.testing.assert_array_almost_equal(
hierarchical_inference.log_p_xi_omega(samples,hyp,
self.eval_dict_prior,self.lens_params),
hand_calc_log_pdf(samples,hyp))
samples = np.random.uniform(size=(8,2,3))*0.3
np.testing.assert_array_almost_equal(
hierarchical_inference.log_p_xi_omega(samples,hyp,
self.eval_dict_prior,self.lens_params),
hand_calc_log_pdf(samples,hyp))
hyp = np.array([-2.73,1.10,0.0,0.2,0.1,0.2,0.0,0.1,0.0,0.1,0.8,0.1,
0.0,0.1])
np.testing.assert_array_almost_equal(
hierarchical_inference.log_p_xi_omega(samples,hyp,
self.eval_dict_prior,self.lens_params),
hand_calc_log_pdf(samples,hyp))
hyp = np.array([-2.73,1.05,0.0,0.102,0.0,0.102,0.242,-0.408,0.696,0.5,
0.5,0.5,0.4,0.4,0.4])
np.testing.assert_array_almost_equal(
hierarchical_inference.log_p_xi_omega(samples,hyp,
self.eval_dict_cov,self.lens_params_cov),
hand_calc_log_pdf_cov(samples,hyp))
class HierarchicalClassTest(unittest.TestCase):
def setUp(self):
# Open up the config file.
self.root_path = os.path.dirname(os.path.abspath(__file__))+'/test_data/'
with open(self.root_path+'test.json','r') as json_f:
self.cfg = json.load(json_f)
self.interim_baobab_omega_path = self.root_path+'test_baobab_cfg.py'
self.target_ovejero_omega_path = self.root_path+'test_ovejero_cfg_prior.py'
self.target_baobab_omega_path = self.root_path+'test_baobab_cfg_target.py'
self.lens_params = self.cfg['dataset_params']['lens_params']
self.num_params = len(self.lens_params)
self.batch_size = 20
self.normalized_param_path = self.root_path + 'new_metadata.csv'
self.normalization_constants_path = self.root_path + 'norm.csv'
self.final_params = self.cfg['training_params']['final_params']
self.cfg['dataset_params']['normalization_constants_path'] = 'norm.csv'
self.cfg['training_params']['bnn_type'] = 'diag'
self.tf_record_path = self.root_path+self.cfg['validation_params'][
'tf_record_path']
# We'll have to make the tf record and clean it up at the end
model_trainer.prepare_tf_record(self.cfg,self.root_path,
self.tf_record_path,self.final_params,
train_or_test='train')
self.hclass = hierarchical_inference.HierarchicalClass(self.cfg,
self.interim_baobab_omega_path,self.target_ovejero_omega_path,
self.root_path,self.tf_record_path,self.target_baobab_omega_path,
lite_class=True)
os.remove(self.tf_record_path)
def tearDown(self):
# Do some cleanup for memory management
self.hclass.infer_class = None
self.hclass = None
self.cfg = None
# Force collection
gc.collect()
def test_init(self):
# Check that the true hyperparameter values were correctly initialized.
true_hyp_values = [-2.73,1.05,0.0,0.102,0.0,0.102,0.0,0.1,0.0,0.1,0.7,
0.1,0.0,0.1]
self.assertListEqual(self.hclass.true_hyp_values,true_hyp_values)
def test_gen_samples(self):
# Test that generating samples gives reasonable outputs.
class ToyModel():
def __init__(self,mean,covariance,batch_size,al_std):
# We want to make sure our performance is consistent for a
# test
np.random.seed(4)
self.mean=mean
self.covariance = covariance
self.batch_size = batch_size
self.al_std = al_std
def predict(self,image):
# We won't actually be using the image. We just want it for
# testing.
return tf.constant(np.concatenate([np.random.multivariate_normal(
self.mean,self.covariance,self.batch_size),np.zeros((
self.batch_size,len(self.mean)))+self.al_std],axis=-1),
tf.float32)
# Start with a simple covariance matrix example.
mean = np.ones(self.num_params)*2
covariance = np.diag(np.ones(self.num_params))
al_std = -1000
diag_model = ToyModel(mean,covariance,self.batch_size,al_std)
# We don't want any flipping going on
self.hclass.infer_class.flip_mat_list = [
np.diag(np.ones(self.num_params))]
# Create tf record. This won't be used, but it has to be there for
# the function to be able to pull some images.
# Make fake norms data
fake_norms = {}
for lens_param in self.final_params + self.lens_params:
fake_norms[lens_param] = np.array([0.0,1.0])
fake_norms = | pd.DataFrame(data=fake_norms) | pandas.DataFrame |
import matplotlib.pylab as plt
import pandas as pd
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from pandas import Series
from pandas import DataFrame
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
from math import sqrt
import numpy as np
import sys
def test_stationarity(timeseries):
# Determing rolling statistics
rolmean = pd.rolling_mean(timeseries, window=30)
rolstd = pd.rolling_std(timeseries, window=30)
# Plot rolling statistics:
orig = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolmean, color='red', label='Rolling Mean')
std = plt.plot(rolstd, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean & Standard Deviation')
plt.show(block=False)
# Perform Dickey-Fuller test:
print ('Results of Dickey-Fuller Test:')
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
for key, value in dftest[4].items():
dfoutput['Critical Value (%s)' % key] = value
print(dfoutput)
def _proper_model(ts_log_diff, maxLag):
best_p = 0
best_q = 0
best_bic = sys.maxsize
best_model=None
for p in np.arange(maxLag):
for q in np.arange(maxLag):
model = ARMA(ts_log_diff, order=(p, q))
try:
results_ARMA = model.fit(disp=-1)
except:
continue
bic = results_ARMA.bic
print (bic, best_bic)
if bic < best_bic:
best_p = p
best_q = q
best_bic = bic
best_model = results_ARMA
return best_p,best_q,best_model
df = | pd.read_csv('user_balance_table_all.csv', index_col='user_id', names=['user_id', 'report_date', 'tBalance', 'yBalance', 'total_purchase_amt', 'direct_purchase_amt', 'purchase_bal_amt', 'purchase_bank_amt', 'total_redeem_amt', 'consume_amt', 'transfer_amt', 'tftobal_amt', 'tftocard_amt', 'share_amt', 'category1', 'category2', 'category3', 'category4'
], parse_dates=[1]) | pandas.read_csv |
import pandas as pd
import S3Api
from sklearn.feature_extraction.text import CountVectorizer, ENGLISH_STOP_WORDS, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC, SVC
from nltk.stem import WordNetLemmatizer
import nltk
from sklearn.metrics import ConfusionMatrixDisplay
import matplotlib.pyplot as plt
import glob
from wordcloud import WordCloud, STOPWORDS
import numpy as np
words = set(nltk.corpus.words.words())
lemmatizer = WordNetLemmatizer()
class CustomSearchNB_SVM:
def __init__(self, file_storage, s3_api):
""" Create a new instance of the CustomSearchNB_SVM class
Parameters
----------
:param file_storage: FileStorage, Required
The file storage class used to store raw/processed data
:param s3_api: S3_API, Required
The S3 api wrapper class used to store data in AWS S3
----------
"""
self._file_storage = file_storage
self._s3_api = s3_api
self.__processed_data_location = 'processed_data/search_results/cleaned_search_data.csv'
self.__processed_pdf_data_location = '/Users/sampastoriza/Documents/Programming/DataScienceDevelopment/DataSciencePortfolioCode/PandemicComparison/processed_data/corpus_data/cleaned_corpus_data.csv'
self.__naive_bayes_data_location = 'naive_bayes_data/search_results/'
self.__naive_bayes_data_visualizations_location = 'naive_bayes_data_visualizations/search_results/'
self.__svm_data_location = 'svm_data/search_results/'
self.__svm_data_visualizations_location = 'svm_data_visualizations/search_results/'
self._file_storage.create_directory_if_not_exists(self.__naive_bayes_data_location)
self._file_storage.create_directory_if_not_exists(self.__naive_bayes_data_visualizations_location)
self._file_storage.create_directory_if_not_exists(self.__svm_data_location)
self._file_storage.create_directory_if_not_exists(self.__svm_data_visualizations_location)
self._additional_stop_words = ['title', 'journal', 'volume', 'author', 'scholar', 'article', 'issue', 'food',
'hunger', 'people', 'million', 'world', 'security', 'insecurity', 'covid',
'locust', 'drought', 'ebola']
self._defined_stop_words = set(ENGLISH_STOP_WORDS.union(self._additional_stop_words))
def filter_non_english_words(self, corpus):
""" Filters, lowercases, and lemmatizes non english words using the nltk word list.
Partial credit goes to this Stackoverflow answer
https://stackoverflow.com/questions/41290028/removing-non-english-words-from-text-using-python
Parameters
----------
:param corpus: String, Required
The corpus of text
----------
Returns
-------
:return: String
A corpus of text without non-english words
-------
"""
filtered_vocabulary = [lemmatizer.lemmatize(w.lower()) for w in nltk.wordpunct_tokenize(corpus) if
w.lower() in words]
filtered_vocabulary = [w for w in filtered_vocabulary if len(w) > 2 and w not in self._defined_stop_words]
return " ".join(filtered_vocabulary)
def run_analysis(self):
processed_df = pd.read_csv(self.__processed_data_location, index_col=False)
processed_pdf_df = pd.read_csv(self.__processed_pdf_data_location, index_col=False)
df = pd.concat([processed_df, processed_pdf_df], ignore_index=True)
df['text'] = df['text'].apply(self.filter_non_english_words)
self.visualize_processed_search_data(df)
labels = list(set(df['topic']))
print('Labels', labels)
vectorizer = CountVectorizer()
v = vectorizer.fit_transform(df['text'])
vocab = vectorizer.get_feature_names_out()
values = v.toarray()
v_df = pd.DataFrame(values, columns=vocab)
v_df.insert(loc=0, column='LABEL', value=df['topic'])
print('Resulting dataframe', v_df)
file_path = f'{self.__naive_bayes_data_location}labeled_dataframe_count.csv'
v_df.to_csv(file_path, index=False)
print('Wrote labeled dataframe to csv')
train_df, test_df = train_test_split(v_df, test_size=0.3)
train_df.to_csv(f'{self.__naive_bayes_data_location}training_set_count.csv', index=False)
train_df.to_csv(f'{self.__naive_bayes_data_location}testing_set_count.csv', index=False)
print('Split data into training and testing sets')
self.__run_naive_bayes_analysis(train_df, test_df, vectorizer)
vectorizer = TfidfVectorizer()
v = vectorizer.fit_transform(df['text'])
vocab = vectorizer.get_feature_names_out()
values = v.toarray()
v_df = pd.DataFrame(values, columns=vocab)
v_df.insert(loc=0, column='LABEL', value=df['topic'])
print('Resulting dataframe', v_df)
file_path = f'{self.__svm_data_location}labeled_dataframe_tfidf.csv'
v_df.to_csv(file_path, index=False)
print('Wrote labeled dataframe to csv')
train_df, test_df = train_test_split(v_df, test_size=0.3)
train_df.to_csv(f'{self.__svm_data_location}training_set_tfidf.csv', index=False)
train_df.to_csv(f'{self.__svm_data_location}testing_set_tfidf.csv', index=False)
self.__run_svm_analysis(train_df, test_df)
def __run_naive_bayes_analysis(self, train_df, test_df, vectorizer):
print('Running Naive Bayes Analysis')
train_labels = train_df['LABEL']
train_df = train_df.drop(['LABEL'], axis=1)
test_labels = test_df['LABEL']
test_df = test_df.drop(['LABEL'], axis=1)
nb_model = MultinomialNB()
nb_model.fit(train_df, train_labels)
nb_prediction = nb_model.predict(test_df)
nb_confusion = pd.crosstab(test_labels, nb_prediction, rownames=['Actual'], colnames=['Predicted'], margins=True)
nb_confusion.to_csv(f'{self.__naive_bayes_data_location}confusion_matrix_nb.csv')
print('Made predictions based on the text. Below is the confusion matrix.')
print(nb_confusion)
ConfusionMatrixDisplay.from_predictions(test_labels, nb_prediction)
plt.savefig(f'{self.__naive_bayes_data_visualizations_location}confusion_matrix_visual_nb.png')
zipped = list(zip(vectorizer.get_feature_names_out(), np.exp(nb_model.feature_log_prob_[0])))
sorted_zip = sorted(zipped, key=lambda t: t[1], reverse=True)
x, y = zip(*sorted_zip[:10])
feature_importance_df = pd.DataFrame({'TopFeatures': x, 'Importance': y})
feature_importance_df.to_csv(f'{self.__naive_bayes_data_location}feature_importance_nb_{nb_model.classes_[0]}.csv', index=False)
self.__plot_variable_importance(x, y, nb_model.classes_[0])
zipped = list(zip(vectorizer.get_feature_names_out(), np.exp(nb_model.feature_log_prob_[1])))
sorted_zip = sorted(zipped, key=lambda t: t[1], reverse=True)
x, y = zip(*sorted_zip[:10])
feature_importance_df = pd.DataFrame({'TopFeatures': x, 'Importance': y})
feature_importance_df.to_csv(f'{self.__naive_bayes_data_location}feature_importance_nb_{nb_model.classes_[1]}.csv', index=False)
self.__plot_variable_importance(x, y, nb_model.classes_[1])
zipped = list(zip(vectorizer.get_feature_names_out(), np.exp(nb_model.feature_log_prob_[2])))
sorted_zip = sorted(zipped, key=lambda t: t[1], reverse=True)
x, y = zip(*sorted_zip[:10])
feature_importance_df = pd.DataFrame({'TopFeatures': x, 'Importance': y})
feature_importance_df.to_csv(f'{self.__naive_bayes_data_location}feature_importance_nb_{nb_model.classes_[2]}.csv', index=False)
self.__plot_variable_importance(x, y, nb_model.classes_[2])
print(nb_model.feature_log_prob_)
zipped = list(zip(vectorizer.get_feature_names_out(), np.exp(nb_model.feature_log_prob_[3])))
sorted_zip = sorted(zipped, key=lambda t: t[1], reverse=True)
x, y = zip(*sorted_zip[:10])
feature_importance_df = | pd.DataFrame({'TopFeatures': x, 'Importance': y}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import json
# %matplotlib inline
# from plotly.graph_objs import *
import statsmodels.api as sm
import warnings
import yfinance as yf
warnings.filterwarnings('ignore')
import seaborn as sns
import itertools
def forecastwithoption(compName, day):
comp = yf.Ticker(compName)
# get historical market data
df = comp.history(period="max")
# Exploratory Data Analysis:
df.isnull().sum()
print(df.shape)
# transform to datetime object here..
df.index = pd.to_datetime(df.index)
df_groupby = df.groupby(['Date'])['Close'].mean()
df_groupby.sort_index(inplace=True)
y = df_groupby
y = y.tail(110)
print(y)
lastDayOfDf = y.index.max().strftime("%m/%d/%Y")
firstDayOfDf = y.index.min().strftime("%m/%d/%Y")
print("first day: " + firstDayOfDf + ", Last day: " + lastDayOfDf)
"""
onemonthlater = pd.date_range(y.index.max(), periods=30, freq='1D')
threemonthlater = pd.date_range(y.index.max(), periods=90, freq='1D')
sixmonthlater = pd.date_range(y.index.max(), periods=180, freq='1D')
"""
# ARIMA stands for Auto Regression Integrated Moving Average.
# It is specified by three ordered parameters (p,d,q). Where:
# p is the order of the autoregressive model(number of time lags)
# d is the degree of differencing (number of times the data have had past values subtracted)
# q is the order of moving average model. Before building an ARIMA model,
# we have to make sure our data is stationary.
p = d = q = range(0, 2)
pdq = list(itertools.product(p, d, q))
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
print('Examples of parameter for SARIMA...')
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[1]))
print('SARIMAX: {} x {}'.format(pdq[1], seasonal_pdq[2]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[3]))
print('SARIMAX: {} x {}'.format(pdq[2], seasonal_pdq[4]))
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(y,
order=param,
seasonal_order=param_seasonal)
results = mod.fit(max_iter=50, method='powell')
print('SARIMA{},{} - AIC:{}'.format(param, param_seasonal, results.aic))
except Exception as ex:
print('Exception: ', ex)
"""mod = sm.tsa.statespace.SARIMAX(y, order=param, seasonal_order=param_seasonal,
enforce_stationarity=False, enforce_invertibility=False)"""
print(results.summary().tables[1])
pred = results.get_prediction(start=pd.to_datetime(firstDayOfDf), end=pd.to_datetime(lastDayOfDf), dynamic=False)
pred_ci = pred.conf_int()
print(pred_ci)
pred_uc = results.get_forecast(steps=90)
pred_ci = pred_uc.conf_int()
forecast = pred_uc.predicted_mean
print(forecast.head(day))
forecast = forecast.head(day)
jsonfiles = json.loads(forecast.to_json(orient='records'))
return jsonfiles
def forecastwithuploadcsv(csv, day):
df = csv
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pytest
from src.create_initial_states.create_initial_conditions import (
_scale_up_empirical_new_infections,
)
from src.create_initial_states.create_initial_conditions import (
create_group_specific_share_known_cases,
)
from src.create_initial_states.create_initial_infections import (
_add_variant_info_to_infections,
)
from src.create_initial_states.create_initial_infections import (
_calculate_group_infection_probs,
)
@pytest.fixture
def empirical_infections():
start = pd.Timestamp("2020-09-30")
a_day = pd.Timedelta(days=1)
df = pd.DataFrame()
df["date"] = [start + i * a_day for i in range(5)] * 4
df = df.sort_values("date")
df.reset_index(drop=True, inplace=True)
df["county"] = list("AABB") * 5
df["age_group_rki"] = ["young", "old"] * 10
np.random.seed(3984)
df["newly_infected"] = np.random.choice([0, 1], 20)
sr = df.set_index(["date", "county", "age_group_rki"])
return sr
@pytest.fixture
def cases():
ind_tuples = [("A", "young"), ("A", "old"), ("B", "young"), ("B", "old")]
index = pd.MultiIndex.from_tuples(ind_tuples, names=["county", "age_group_rki"])
df = pd.DataFrame(index=index)
df["2020-10-01"] = [1, 0, 0, 1]
df["2020-10-02"] = [1, 1, 0, 0]
df["2020-10-03"] = [1, 0, 0, 1]
return df
@pytest.fixture
def synthetic_data():
df = pd.DataFrame()
df["county"] = list("AABBBBAAA")
df["age_group_rki"] = ["young"] * 4 + ["old"] * 5
return df
def test_calculate_group_infection_probs(synthetic_data, cases):
pop_size = 14
undetected_multiplier = 1.5
res = _calculate_group_infection_probs(
synthetic_data=synthetic_data,
cases=undetected_multiplier * cases,
population_size=pop_size,
)
expected_on_synthetic_data = pd.DataFrame(
index=synthetic_data.index, columns=cases.columns
)
group_shares = np.array([2, 2, 2, 2, 2, 2, 3, 3, 3]) / 9
scaled_up_group_sizes = pop_size * group_shares
p1 = (
undetected_multiplier
* np.array([1, 1, 0, 0, 1, 1, 0, 0, 0])
/ scaled_up_group_sizes
)
p2 = (
undetected_multiplier
* np.array([1, 1, 0, 0, 0, 0, 1, 1, 1])
/ scaled_up_group_sizes
)
p3 = (
undetected_multiplier
* np.array([1, 1, 0, 0, 1, 1, 0, 0, 0])
/ scaled_up_group_sizes
)
expected_on_synthetic_data["2020-10-01"] = p1
expected_on_synthetic_data["2020-10-02"] = p2
expected_on_synthetic_data["2020-10-03"] = p3
expected = expected_on_synthetic_data.loc[[0, 2, 4, 6]]
expected.index = pd.MultiIndex.from_tuples(
[("A", "young"), ("B", "young"), ("B", "old"), ("A", "old")]
)
expected.index.names = ["county", "age_group_rki"]
pdt.assert_frame_equal(res.sort_index(), expected.sort_index())
def test_add_variant_info_to_infections():
df = pd.DataFrame()
dates = [pd.Timestamp("2021-03-14"), pd.Timestamp("2021-03-15")]
df[dates[0]] = [False, True] * 5
df[dates[1]] = [False] * 8 + [True, False]
virus_shares = {
"base_strain": pd.Series([1, 0.5], index=dates),
"other_strain": pd.Series([0, 0.5], index=dates),
}
np.random.seed(39223)
expected = | pd.DataFrame() | pandas.DataFrame |
from functools import wraps
import numpy as np
import datetime as dt
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_categorical, infer_dtype, is_object_dtype, is_string_dtype
from sklearn.decomposition import NMF, TruncatedSVD
from sklearn.feature_extraction.text import HashingVectorizer, TfidfTransformer
from sklearn.pipeline import make_pipeline
#TODO - create a simple class to dummify date columns
def dummify_date_cols(df):
if 'giadmd' in df.columns:
df['giadmd'] = pd.to_datetime(df['giadmd'], errors='coerce')
df['giadmd_year'] = df['giadmd'].dt.year.astype('Int64').astype('object')
df['giadmd_month'] = df['giadmd'].dt.month.astype('Int64').astype('object')
df = df.drop('giadmd', axis=1)
if 'girefs' in df.columns:
df['girefs'] = pd.to_datetime(df['girefs'], errors='coerce')
df['girefs_year'] = df['girefs'].dt.year.astype('Int64').astype('object')
df['girefs_month'] = df['girefs'].dt.month.astype('Int64').astype('object')
df = df.drop('girefs', axis=1)
if 'gidscd' in df.columns:
df['gidscd'] = pd.to_datetime(df['gidscd'], errors='coerce')
df['gidscd_year'] = df['gidscd'].dt.year.astype('Int64').astype('object')
df['gidscd_month'] = df['gidscd'].dt.month.astype('Int64').astype('object')
df = df.drop('gidscd', axis=1)
print("Shape after dummify:", df.shape)
return df
def format_missings(df):
for column in df.columns:
if is_numeric_dtype(df[column]):
fill_value = df[column].mean()
df[column] = df[column].fillna(fill_value, downcast=False)
elif is_object_dtype(df[column]) or is_string_dtype(df[column]):
df[column] = df[column].fillna('MISSING', downcast=False)
print("Shape after format_missing:", df.shape)
return df
def remove_features_with_missing_values(df, na_thres):
return df.loc[:, df.isna().mean() < na_thres]
def clean_floats(x):
if pd.isnull(x):
return x
elif type(x) is float:
return str(int(x))
else:
return x
def clean_up_floats(df):
for col in df.columns:
if is_object_dtype(df[col]) or is_string_dtype(df[col]):
df[col] = df[col].apply(clean_floats)
print('Shape after clean_floats:', df.shape)
return df
#Decorator to log information on functions
def log_pipe_step(func):
"""Decorator to log information about functions.
Use function.unwrapped to turn the decorator off.
"""
@wraps(func)
def wrapper(*args, **kwargs):
tic = dt.datetime.now()
result = func(*args, **kwargs)
time_taken = str(dt.datetime.now() - tic)
print(f"Ran {func.__name__} DF shape={result.shape} took {time_taken}s")
return result
wrapper.unwrapped = func
return wrapper
@log_pipe_step
def rev_codes_one_hot(df, n_codes=50):
"""Takes a df and the n_codes, returns a one_hot df.
Usage Example: df.pipe(rev_codes_one_hot, 10)
"""
df_copy = df.copy()
# single_code_map = df_copy.rev_codes.str.contains(';')
# top_codes = df_copy.loc[~single_code_map].rev_codes.value_counts(normalize=True).nlargest(n_codes).index
top_codes = ['300', '403', '320', '510', '402', '450', '420', '761', '981',
'MISSING', '972', '921', '480', '352', '511', '483', '333', '610',
'612', '943', '310', '740', '920', '430', '942', '401', '540', '351',
'324', '456', '521', '440', '350', '301', '730', '311', '300LA', '964',
'611', '987', '360', '361', '460', '731', '424', '510CL', '306', '413',
'940', '948', '482', '985', '320RA', '305', '983', '922', '450ER',
'434', '614', '780', '982', '410', '918', '636', '619', '469', '912',
'250', '444', '420PT']
for code in top_codes[:n_codes]:
df_copy[f'rev_code_{code}'] = df_copy.rev_codes.str.contains(code).astype('int')
df_copy = df_copy.drop('rev_codes', axis=1)
return df_copy
def rev_codes_nmf(df, n_components=10):
"""Takes a df and the n_codes, returns a nmf df.
Usage Example: df.pipe(rev_codes_nmf, 10)
"""
df_copy = df.copy()
# single_code_map = df_copy.rev_codes.str.contains(';')
# top_codes = df_copy.loc[~single_code_map].rev_codes.value_counts(normalize=True).nlargest(60).index
top_codes = ['300', '403', '320', '510', '402', '450', '420', '761', '981',
'MISSING', '972', '921', '480', '352', '511', '483', '333', '610',
'612', '943', '310', '740', '920', '430', '942', '401', '540', '351',
'324', '456', '521', '440', '350', '301', '730', '311', '300LA', '964',
'611', '987', '360', '361', '460', '731', '424', '510CL', '306', '413',
'940', '948', '482', '985', '320RA', '305', '983', '922', '450ER',
'434', '614', '780', '982', '410', '918', '636', '619', '469', '912',
'250', '444', '420PT']
codes_df = pd.DataFrame()
for code in top_codes:
codes_df[f'rev_codes_{code}'] = df_copy.rev_codes.str.contains(code).astype('int')
print('Starting NMF')
nmf = NMF(n_components=n_components)
W = nmf.fit_transform(codes_df)
col_names = [f"rev_component_{i}" for i in range(n_components)]
for i, name in enumerate(col_names):
df_copy[name] = W[:,i]
df_copy = df_copy.drop('rev_codes', axis=1)
return df_copy
def transform_diagnosis(df):
"""Transform the text diagnosis to features for classification.
The HashingVectorizer converts text to matrics, while the TfidfTransformer provides inverse
document frequencies, resulting in a sparse matrix. Last, the SVD reduces dimensions to improve
the work of Tree-based models.
Inspired by https://scikit-learn.org/stable/auto_examples/text/plot_document_clustering.html#sphx-glr-auto-examples-text-plot-document-clustering-py
Usage: df.pipe(transform_diagnosis)
"""
n_features = 10000
n_components=25
dataset = df.fddiagtx
hasher = HashingVectorizer(n_features=n_features,
stop_words='english',
alternate_sign=False,
norm=None)
vectorizer = make_pipeline(hasher, TfidfTransformer())
sparse_matrix = vectorizer.fit_transform(dataset)
svd = TruncatedSVD(n_components=n_components)
regr = svd.fit_transform(sparse_matrix)
col_names = [f"diag_component_{i}" for i in range(n_components)]
for i, name in enumerate(col_names):
df[name] = regr[:,i]
df = df.drop('fddiagtx', axis=1)
return df
def clean_floats(x):
if pd.isnull(x):
return x
elif type(x) is float:
return str(int(x))
else:
return x
def clean_up_floats(df):
for col in df.columns:
if is_object_dtype(df[col]) or is_string_dtype(df[col]):
df[col] = df[col].apply(clean_floats)
print('Shape after clean_floats:', df.shape)
return df
def remove_rows_with_many_nas(df, portion_nonna):
thresh = portion_nonna*df.shape[1]
return df.dropna(thresh=thresh)
def fillna_with_missing(df, subset):
if not isinstance(subset, list):
subset = [subset]
print('Mean NAs Before filling with MISSING')
print(df.loc[:, subset].isna().mean())
df.loc[:, subset] = df.loc[:, subset].loc[:, subset].fillna('MISSING')
return df
def get_query(query):
sql_query =""
with open(query, 'r') as fh:
for line in fh:
line = line.replace("\n",' ')
sql_query= sql_query + line
#print(line)
return sql_query
def most_common_token(s, n):
if is_numeric_dtype(s):
s = s.astype('string')
long_string = s.str.cat(sep=' ')
c = Counter(long_string.split(' '))
del c['MISSING']
for k in c.keys():
c[k] = round(c[k]/len(s), 3)
return c.most_common(n)
def get_zero_variance(df):
features = ['giclnt_string', 'giatyp_string', 'gicfac_string',
'rev_string', 'APC_string', 'appaynam_string', 'applan__string',
'mue_string', 'rarc_string', 'ub4bx67_string', 'sum(trpadj)', 'dbantp',
'dbaqtr', 'dbaday', 'dbctyp', 'ud4ubseq', 'unit', 'modifier', 'lgmid',
'er_flag', 'ddstatus', 'ddrcause', 'rddesc', 'ddcode', 'pass_thru_flag',
'lsat_flag']
mapper = df.loc[:, features].nunique().loc[df.loc[:, features].nunique() == 1]
filters = [col for col in mapper.index.values if col in features]
if len(filters) > 0:
df_results = df.loc[:, filters].mode().T.to_dict()[0]
df_results = dict(sorted(df_results.items()))
result = str(df_results)
result = result.replace("{", "").replace("}", "")
return result
else:
return np.nan
def get_clusters_summary(df,cluster_columns, cols_to_summarize):
cluster_grouping = df.groupby(cluster_columns)
common_features_per_cluster = cluster_grouping.apply(get_zero_variance).reset_index(name='cluster_common_features')
for col in cols_to_summarize:
result_ = cluster_grouping[col].apply(most_common_token,3).reset_index(name=f'top_most_frequent_{col}')
common_features_per_cluster = common_features_per_cluster.merge(result_,
how='left',
on = cluster_columns
)
common_features_per_cluster.columns = [col.replace("_string", "") for col in common_features_per_cluster.columns]
return common_features_per_cluster
def pull_account_number_and_fac_id(data, chunksize=9999):
"""Takes the cluster/novelty report as input.
Returns a pandas DF with Account dbmid, dbref1 and dbcfac for the dbmid in the report"""
list_of_ids = data['dbmid'].astype('string').to_list()
n_chunks = len(list_of_ids)//chunksize
final_result = pd.DataFrame()
for i in range(n_chunks+1):
id_chunk = list_of_ids[i*chunksize: (1+i)*chunksize]
query = f"""SELECT DISTINCT dbmid, dbref1, dbcfac FROM acedta.dbinfo WHERE dbmid in ({','.join(id_chunk)})"""
interim_result = pd.read_sql(query, con=process())
final_result = pd.concat([final_result, interim_result])
return final_result.reset_index(drop=True)
def add_nthrive(data, chunksize=9999):
"""Takes the cluster/novelty report as input.
Returns a pandas DF with nThrive report for accounts with the same
concat(_FAC, '_', accountnumber) as combined_key in the report """
list_of_ids = data['combined_key'].astype('string').to_list()
n_chunks = len(list_of_ids)//chunksize
final_result = pd.DataFrame()
for i in range(n_chunks+1):
id_chunk = list_of_ids[i*chunksize: (1+i)*chunksize]
query = f"""SELECT *, concat(_FAC, '_', accountnumber) as combined_key FROM datascience.nTrive_dataset WHERE concat(_FAC, '_', accountnumber) in ({",".join(f"'{w}'" for w in id_chunk)})"""
interim_result = pd.read_sql(query, con=process())
final_result = | pd.concat([final_result, interim_result]) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import base64
import glob
from scipy.signal import medfilt
from scipy.integrate import trapz
import xml.etree.ElementTree as et
from datetime import date
today = date.today()
np.warnings.filterwarnings('ignore')
sns.set(style="darkgrid")
roots = []
root_names = []
for n in glob.glob('*.xml'):
roots.append(et.parse(n).getroot())
root_names.append(n)
def modified_z_score(intensity):
median_int = np.median(intensity)
mad_int = np.median([np.abs(intensity - median_int)])
if mad_int == 0:
mad_int = 1
modified_z_scores = 0.6745 * (intensity - median_int) / mad_int
return modified_z_scores
def df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
x += 5
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
x += 5
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def half_df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
x += 2
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
x += 2
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def hanging_line(point1, point2):
a = (point2[1] - point1[1])/(np.cosh(point2[0] % 600) - np.cosh(point1[0] % 600))
b = point1[1] - a*np.cosh(point1[0] % 600)
x = np.linspace(point1[0], point2[0], (point2[0] - point1[0])+1)
y = a*np.cosh(x % 600) + b
return (x,y)
Tags = {'tags':[]}
tags = {'tags':[]}
for root in roots:
if len(root.find('{http://www3.medical.philips.com}waveforms').getchildren()) == 2:
if int(root.find('{http://www3.medical.philips.com}waveforms')[1].attrib['samplespersec']) == 1000:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
tag = {}
tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = 0
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Failed' or root[6][1][0][14].text == 'Failed' or (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid'):
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = int(elem[0].text)
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
tag['Qonset'] = np.nan
tag['Qrsdur'] = np.nan
tag['Qoffset'] = np.nan
tag['Tonset'] = np.nan
tag['Qtint'] = np.nan
tag['Toffset'] = np.nan
tag['Tdur'] = np.nan
else:
tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
tag['Qrsdur'] = int(root[6][0][29].text)
tag['Qoffset'] = tag['Qonset'] + tag['Qrsdur']
tag['Tonset'] = int(elem[4].text)
tag['Qtint'] = int(root[6][1][0][18].text)
tag['Toffset'] = tag['Qonset'] + tag['Qtint']
tag['Tdur'] = tag['Qoffset'] - tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None and root[6][0][31].text != 'Failed': tag['QTC'] = int(root[6][0][31].text)
tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
tag['HeartRate'] = np.nan
tag['RRint'] = np.nan
tag['AtrialRate'] = np.nan
tag['QRSFrontAxis'] = np.nan
tag['QTC'] = np.nan
tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
tag['Date'] = time['date']
tag['Time'] = time['time']
tag['Sex'] = root[5][0][6].text
tag['ID'] = root[5][0][0].text
tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
tag['Waveform'] = elem[6].text
# tag['LongWaveform'] = root[8][0].text
tags['tags'].append(tag)
else:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
Tag = {}
Tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = float(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = 0
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == None or root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = int(elem[0].text)
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][18].text == None or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
Tag['Qonset'] = np.nan
Tag['Qrsdur'] = np.nan
Tag['Qoffset'] = np.nan
Tag['Tonset'] = np.nan
Tag['Qtint'] = np.nan
Tag['Toffset'] = np.nan
Tag['Tdur'] = np.nan
else:
Tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
Tag['Qrsdur'] = int(root[6][0][29].text)
Tag['Qoffset'] = Tag['Qonset'] + Tag['Qrsdur']
Tag['Tonset'] = int(elem[4].text)
Tag['Qtint'] = int(root[6][1][0][18].text)
Tag['Toffset'] = Tag['Qonset'] + Tag['Qtint']
Tag['Tdur'] = Tag['Qoffset'] - Tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): Tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: Tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: Tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': Tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None: Tag['QTC'] = int(root[6][0][31].text)
Tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
Tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
Tag['HeartRate'] = np.nan
Tag['RRint'] = np.nan
Tag['AtrialRate'] = np.nan
Tag['QRSFrontAxis'] = np.nan
Tag['QTC'] = np.nan
Tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
Tag['Date'] = time['date']
Tag['Time'] = time['time']
Tag['Sex'] = root[5][0][6].text
Tag['ID'] = root[5][0][0].text
Tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if len(root[5][0].find('{http://www3.medical.philips.com}age')) > 0:
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
Tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
Tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
Tag['Waveform'] = elem[6].text
# Tag['LongWaveform'] = root[8][0].text
Tags['tags'].append(Tag)
half_data = pd.DataFrame(Tags['tags'])
data = pd.DataFrame(tags['tags'])
del roots
del root
del elem
count1000 = int(len(data)/12)
count500 = int(len(half_data)/12)
count = count1000 + count500
if len(data) > 0:
array = np.unique(data[data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_data = data.loc[data['ID'].isin(array) & data['Date'].isin(array) & data['Time'].isin(array)]
data.drop(missing_data.index, axis=0,inplace=True)
missing_data = missing_data.reset_index(drop=True)
del tag
del tags
data = data.reset_index(drop=True)
for n in range(count1000):
data.Tonset[n*12:(n+1)*12] = np.repeat(int(data.Tonset[n*12:(n+1)*12].sum()/12), 12)
data.Pdur[n*12:(n+1)*12] = np.repeat(int(data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(data.Waveform)):
t = base64.b64decode(data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
df = pd.DataFrame(a)
df.insert(0, 'Lead', data['Lead'])
blank = []
for n in range(count1000):
blank.append(pd.pivot_table(df[(n*12):(n+1)*12], columns=df.Lead))
test = | pd.concat(blank) | pandas.concat |
import dask
import glob
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import os
import pandas as pd
from pymicro.view.vol_utils import compute_affine_transform
import scipy
import scipy.optimize
import secrets
matplotlib.use("Agg")
TRACKID = "track"
X = "x"
Y = "y"
Z = "z"
FRAME = "frame"
CELLID = "cell"
def drop_matched(matched: pd.DataFrame, df1: pd.DataFrame, df2: pd.DataFrame):
"""Remove the matched rows from df1 and df2. Matched results from merging df1 with df2.
Important order of df1 and df2 matters."""
# extract df1 and df2 from matched
matched_x = matched[[x for x in matched.columns if "_x" in x]].copy()
matched_y = matched[[y for y in matched.columns if "_y" in y]].copy()
matched_x.columns = [x.replace("_x", "") for x in matched_x.columns]
matched_y.columns = [y.replace("_y", "") for y in matched_y.columns]
# Add frame column and reorder
matched_x[FRAME] = matched[FRAME]
matched_y[FRAME] = matched[FRAME]
matched_x[CELLID] = matched[CELLID]
matched_y[CELLID] = matched[CELLID]
matched_x = matched_x[df1.columns]
matched_y = matched_y[df2.columns]
df1_new = pd.concat([df1, matched_x])
df2_new = pd.concat([df2, matched_y])
df1_new = df1_new.drop_duplicates(keep=False)
df2_new = df2_new.drop_duplicates(keep=False)
return df1_new, df2_new
def filter_tracks(df: pd.DataFrame, min_length: int = 10) -> pd.DataFrame:
"""Filter tracks based on length.
Arg:
df: dataframe containing the tracked data
min_length: integer specifying the min track length
Return:
filtered data frame."""
df = df[[X, Y, Z, FRAME, TRACKID, CELLID]]
df = df[df[CELLID] != 0].copy()
distribution_length = df[TRACKID].value_counts()
selection = distribution_length.index.values[
distribution_length.values > min_length
]
df = df[df[TRACKID].isin(selection)]
return df
def filter_overlapping(df: pd.DataFrame, max_overlaps: float = 0.5):
"""Return data.frame where tracks with overlaps higher than max_overlaps are filtered out.
Args:
df: dataframe with tracks to stitch
max_overlaps: maximum fraction of track that can overlap.
Tracks with higher overlaps will be filtered out.
Return:
filtered dataframe.
"""
while True:
# count number of duplicated timepoints per track
duplicated = df[df[FRAME].isin(df[df[FRAME].duplicated()][FRAME])][
TRACKID
].value_counts()
if len(duplicated) < 1:
return df
# duplicated track id
duplicated_tracks = duplicated.index.values
# number of duplication
duplicated_values = duplicated.values
# count number of timepoints per track
count_tracks_length = df[TRACKID].value_counts()
# if number of track is 1, by definition there is no overlapping
if len(count_tracks_length) == 1:
return df
# count track length of overlapping tracks
count_tracks_overlapping = count_tracks_length[
count_tracks_length.index.isin(duplicated_tracks)
]
# extract track id of shortest overlapping tracks
shortest_track_overlapping_idx = count_tracks_overlapping.idxmin()
# too long overlaps?
toolong = False
for track, value in zip(duplicated_tracks, duplicated_values):
fraction = value / len(df[df[TRACKID] == track])
if fraction > max_overlaps:
toolong = True
# if we found too many overlaps, remove shortest track and restart
if toolong:
df = df[df[TRACKID] != shortest_track_overlapping_idx].copy()
# if no too long overlaps, remove duplicates and return dataframe
if not toolong:
df = df.drop_duplicates(FRAME)
return df
def stitch(df: pd.DataFrame, max_dist: float = 1.6, max_overlaps: float = 0.5):
"""Stitch tracks with the same cell id. If tracks overlap, filters out
tracks with overlap higher than max_overlaps. Overlapping frames are filtered out randomly.
Arg:
df: dataframe containing the tracked data.
max_dist: maximum distance to match tracks from the same cell.
max_overlaps: maximum overlap allowed for each track.
Return:
dataframe with stitched tracks."""
res = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import pytest
from cardea.fhir import Patient
@pytest.fixture()
def patient_df():
return pd.DataFrame({"identifier": [0, 1, 2, 3],
"gender": ['female', 'female', 'male', 'female'],
"birthDate": ['10/21/2000', '7/2/2000', '1/10/2000', '9/16/2000'],
"active": ['True', 'True', 'False', 'False']})
@pytest.fixture()
def patient_object(patient_df):
object_values = patient_df.to_dict('list')
return Patient(object_values)
@pytest.fixture()
def patient_object_df(patient_object):
return patient_object.get_dataframe()
def test_object_number_of_attributes(patient_object_df, patient_df):
assert len(patient_object_df.columns) == len(patient_df.columns)
def test_object_number_of_tuples(patient_object_df, patient_df):
assert len(patient_object_df) == len(patient_df)
def test_get_id(patient_object):
assert patient_object.get_id() == 'identifier'
def test_get_relationships(patient_object):
relationships = patient_object.get_relationships()
assert len(relationships) == 12
def test_get_eligible_relationships(patient_object):
elig_relationships = patient_object.get_eligible_relationships()
assert len(elig_relationships) == 1
def test_get_id_lookup_error(patient_df):
df = patient_df[['gender', 'birthDate']]
object_values = df.to_dict('list')
object = Patient(object_values)
with pytest.raises(LookupError):
object.get_id()
def test_assert_type_enum():
df = | pd.DataFrame({"identifier": [0, 1], "gender": ['female', 'F']}) | pandas.DataFrame |
import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = pd.MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq="D")
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = | pd.Int64Index([x[0] for x in result]) | pandas.Int64Index |
import time
import argparse
import networkx as nx
import numpy as np
import scipy.sparse as sp
from sklearn import preprocessing
import os
import pandas as pd
from math import ceil
from datetime import date, timedelta
import itertools
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import os
from functools import reduce
from utils import generate_graphs_britain, generate_graphs_by_day
step = 5
start_exp = 15
window = 7
os.chdir("/Italy")
labels = pd.read_csv("italy_labels.csv")
labels.loc[labels["name"]=="reggio_di_calabria","name"] = "reggio_calabria"
labels.loc[labels["name"]=="reggio_nell'emilia","name"] = "reggio_emilia"
labels.loc[labels["name"]=="bolzano","name"] = "bolzano_bozen"
labels.loc[labels["name"]=="l'aquila","name"] = "la_aquila"
del labels["id"]
labels = labels.set_index("name")
sdate = date(2020, 2, 24)
edate = date(2020, 5, 12)
delta = edate - sdate
dates = [sdate + timedelta(days=i) for i in range(delta.days+1)]
dates = [str(date) for date in dates]
Gs = generate_graphs_by_day(dates,"IT")
#labels = labels[,:]
labels = labels.loc[Gs[0].nodes(),:]
labels = labels.loc[labels.sum(1).values>10,dates]
gs_adj = [nx.adjacency_matrix(kgs).toarray().T for kgs in Gs]
y = list()
for i,G in enumerate(Gs):
y.append(list())
for node in G.nodes():
y[i].append(labels.loc[node,dates[i]])
nodez = Gs[0].nodes()
main = pd.DataFrame(labels.loc[nodez,labels.columns[start_exp]:].mean(1))
main.columns = ["avg_cases"]
main["cases"] = pd.DataFrame(labels.loc[nodez,labels.columns[start_exp]:].sum(1))
main = main.reset_index()
#df = pd.DataFrame(labels.iloc[:,start_exp:].mean(1))
#df.columns = ["avg_cases"]
#df["cases"] = pd.DataFrame(labels.iloc[:,start_exp:].sum(1))
#df = df.reset_index()
os.chdir("/output")
x0 = []
x1 = []
x2 = []
x3 = []
x4 = []
for i in range(15,79):
try:
x0.append(pd.read_csv("out_IT_"+str(i)+"_0.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x1.append(pd.read_csv("out_IT_"+str(i)+"_1.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x2.append(pd.read_csv("out_IT_"+str(i)+"_2.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x3.append(pd.read_csv("out_IT_"+str(i)+"_3.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x4.append(pd.read_csv("out_IT_"+str(i)+"_4.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
n = x0[0]["n"]
cnt = 0
pds = []
pds_r = []
for i in range(0,len(x4)):
tmpx = [x0[i],x1[i],x2[i],x3[i],x4[i]] # step = 5
d = reduce(lambda p, l: p.add(l, fill_value=0), tmpx)
del d["n"]
d = d/step
par = d["l"].copy()
par[par<1]=1
pds.append(abs(d["o"]-d["l"]))
pds_r.append(abs(d["o"]-d["l"])/par)
pds_r = reduce(lambda p, l: p.add(l, fill_value=0), pds_r)/i
pds = reduce(lambda p, l: p.add(l, fill_value=0), pds)/i
df = pd.DataFrame({"relative":pds_r.values,"real":pds.values,"name":n })
tmp = df.merge(main,on='name')
tmp.to_csv("it_map_plot_"+str(step)+".csv")
#-------------------------------------
os.chdir("/Spain")
labels = pd.read_csv("spain_labels.csv")
labels = labels.set_index("name")
sdate = date(2020, 3, 12)
edate = date(2020, 5, 12)
#--- series of graphs and their respective dates
delta = edate - sdate
dates = [sdate + timedelta(days=i) for i in range(delta.days+1)]
dates = [str(date) for date in dates]
Gs = generate_graphs_by_day(dates,"ES")
l = Gs[0].nodes()
#l.remove("zaragoza")
labels = labels.loc[l,:]
labels = labels.loc[labels.sum(1).values>10,dates]
#nodez = Gs[0].nodes()
main = pd.DataFrame(labels.loc[:,labels.columns[start_exp]:].mean(1))
main.columns = ["avg_cases"]
main["cases"] = pd.DataFrame(labels.loc[:,labels.columns[start_exp]:].sum(1))
main = main.reset_index()
#df = pd.DataFrame(labels.iloc[:,start_exp:].mean(1))
#df.columns = ["avg_cases"]
#df["cases"] = pd.DataFrame(labels.iloc[:,start_exp:].sum(1))
#df = df.reset_index()
os.chdir("/output")
x0 = []
x1 = []
x2 = []
x3 = []
x4 = []
for i in range(15,62-step):
try:
x0.append(pd.read_csv("out_ES_"+str(i)+"_0.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x1.append(pd.read_csv("out_ES_"+str(i)+"_1.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x2.append(pd.read_csv("out_ES_"+str(i)+"_2.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x3.append(pd.read_csv("out_ES_"+str(i)+"_3.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x4.append(pd.read_csv("out_ES_"+str(i)+"_4.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
n = x0[0]["n"]
cnt = 0
pds = []
pds_r = []
for i in range(0,len(x4)):
tmpx = [x0[i],x1[i],x2[i],x3[i],x4[i]] # step = 5
d = reduce(lambda p, l: p.add(l, fill_value=0), tmpx)
del d["n"]
d = d/step
par = d["l"].copy()
par[par<1]=1
pds.append(abs(d["o"]-d["l"]))
pds_r.append(abs(d["o"]-d["l"])/par)
pds_r = reduce(lambda p, l: p.add(l, fill_value=0), pds_r)/i
pds = reduce(lambda p, l: p.add(l, fill_value=0), pds)/i
df = pd.DataFrame({"relative":pds_r.values,"real":pds.values,"name":n })
tmp = df.merge(main,on='name')
tmp.to_csv("es_map_plot_"+str(step)+".csv")
#---------------------------------
os.chdir("/France")
labels = pd.read_csv("france_labels.csv")
#del labels["id"]
labels = labels.set_index("name")
sdate = date(2020, 3, 10)
edate = date(2020, 5, 12)
#Gs = generate_graphs(dates)
delta = edate - sdate
dates = [sdate + timedelta(days=i) for i in range(delta.days+1)]
dates = [str(date) for date in dates]
labels = labels.loc[labels.sum(1).values>10,dates]
Gs = generate_graphs_by_day(dates,"FR")
labels = labels.loc[Gs[0].nodes(),:]
#nodez = Gs[0].nodes()
main = pd.DataFrame(labels.loc[:,labels.columns[start_exp]:].mean(1))
main.columns = ["avg_cases"]
main["cases"] = pd.DataFrame(labels.loc[:,labels.columns[start_exp]:].sum(1))
main = main.reset_index()
os.chdir("/output")
x0 = []
x1 = []
x2 = []
x3 = []
x4 = []
for i in range(15,64-step):
try:
x0.append(pd.read_csv("out_FR_"+str(i)+"_0.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x1.append(pd.read_csv("out_FR_"+str(i)+"_1.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x2.append(pd.read_csv("out_FR_"+str(i)+"_2.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x3.append(pd.read_csv("out_FR_"+str(i)+"_3.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
try:
x4.append(pd.read_csv("out_FR_"+str(i)+"_4.csv"))
#df.drop(df.columns[0],1))
except:
print(i)
n = x0[0]["n"]
cnt = 0
pds = []
pds_r = []
for i in range(0,len(x4)):
tmpx = [x0[i],x1[i],x2[i],x3[i],x4[i]] # step = 5
d = reduce(lambda p, l: p.add(l, fill_value=0), tmpx)
del d["n"]
d = d/step
par = d["l"].copy()
par[par<1]=1
pds.append(abs(d["o"]-d["l"]))
pds_r.append(abs(d["o"]-d["l"])/par)
pds_r = reduce(lambda p, l: p.add(l, fill_value=0), pds_r)/i
pds = reduce(lambda p, l: p.add(l, fill_value=0), pds)/i
df = pd.DataFrame({"relative":pds_r.values,"real":pds.values,"name":n })
tmp = df.merge(main,on='name')
tmp.to_csv("fr_map_plot_"+str(step)+".csv")
#---------------------------------
os.chdir("/Britain")
labels = | pd.read_csv("england_labels.csv") | pandas.read_csv |
from sqlalchemy import create_engine
import pandas as pd
from datetime import datetime
todays_date = datetime.today()
import json
import logging
logging.basicConfig(format=f"""%(asctime)s [%(levelname)s]\t%(message)s""",datefmt='%Y-%m-%d %H:%M:%S',level=logging.DEBUG)
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import os.path
def get_data_from_sql(query) :
engine = create_engine("""mssql+pyodbc://%s:%s@%s:1433/%s?driver=ODBC+Driver+17+for+SQL+Server""" % ('userbob','<PASSWORD>','ip-0-0-0-0.ec2.internal','maindb'),echo=False)
df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
######-*- coding: utf-8 -*-
import os, datetime, requests
from bs4 import BeautifulSoup
import pandas as pd
url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQuDj0R6K85sdtI8I-Tc7RCx8CnIxKUQue0TCUdrFOKDw9G3JRtGhl64laDd3apApEvIJTdPFJ9fEUL/pubhtml?gid=0&single=true'
work_path = '/path/to/working/dir'
def get_table():
req = requests.session()
response = req.get(url,headers={'Accept-Language': 'zh-TW'})
soup = BeautifulSoup(response.text, "lxml")
table = soup.find('table', {'class': 'waffle'})
trs = table.find_all('tr')[1:]
rows = list()
for tr in trs:
rows.append([td.text.replace('\n', '') for td in tr.find_all('td')])
columns = rows[0][:]
columns[0] = columns[0][4:]
columns[2:5] = [columns[0],columns[0],columns[0]]
rows = [r[1:] for r in rows]
df = pd.DataFrame(data=rows, columns=columns[1:])
return df
def biuld_nation():
df = get_table()
df_nation = df.drop(columns=columns[2])
df_nation.to_csv('nation.csv',index=False)
def biuld_database():
database = pd.read_csv('nation.csv')
df_nation.to_csv('database.csv',index=False)
def update_database():
database = pd.read_csv('database.csv')
df = get_table()
new = | pd.merge(database,df,on='Nation') | pandas.merge |
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
SSQ = { 'S1_pitch': [6, 18], 'S1_yaw': [10], 'S1_roll':[9],
'S1_surge': [9], 'S1_heave': [], 'S1_sway': [7, 14],
'S2_pitch': [6, 16], 'S2_yaw': [5,6,16], 'S2_roll':[5,6,7,9,16,17],
'S2_surge': [6,7], 'S2_heave': [], 'S2_sway': [6,7,13],
'S3_pitch': [10,11,14,19], 'S3_yaw': [6,10], 'S3_roll':[5],
'S3_surge': [4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],
'S3_heave': [], 'S3_sway': [3,4,5,6,7,8,12],
'S4': [2,3,4,5,6,7,12,15,22,23,24,25,26,27,33,43,54],
'S5': [7,8,24,25, 42, 54],
'S6': [3,4,17,18,19,20,21,22,23,36,37,38,39,40,41,42] }
EXP_SET = {'S1_pitch': 20, 'S1_yaw': 20, 'S1_roll': 20,
'S1_surge': 20, 'S1_heave': 10, 'S1_sway': 20,
'S2_pitch': 20, 'S2_yaw': 20, 'S2_roll': 20,
'S2_surge': 20, 'S2_heave': 10, 'S2_sway': 20,
'S3_pitch': 20, 'S3_yaw': 20, 'S3_roll': 20,
'S3_surge': 20, 'S3_heave': 10, 'S3_sway': 20,
'S4': 60, 'S5': 60, 'S6': 60}
tmp = {}
base = {}
for key in EXP_SET.keys():
base[key] = np.zeros(EXP_SET[key])
for key in SSQ.keys():
for sickness_occured in SSQ[key]:
base[key][sickness_occured - 1] = 1
base['S2_yaw'][6] = 2
base['S2_yaw'][6]
base['S2_roll'][6] = 2
base['S2_roll'][16] = 2
base['S2_surge'][8] = 4
save_path = './data/raw/ssq/'
for key in base.keys():
df = pd.DataFrame(base[key])
df.to_csv(save_path + key + '.csv')
for key in SSQ.keys():
df = | pd.read_csv(save_path + key + '.csv') | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"),
pd.date_range("2016-01-01", periods=3, freq="S")._data,
pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data,
# Miscellaneous invalid types
],
)
def test_parr_add_sub_invalid(self, other, box_with_array):
# GH#23215
rng = pd.period_range("1/1/2000", freq="D", periods=3)
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
other + rng
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
other - rng
# -----------------------------------------------------------------
# __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64]
def test_pi_add_sub_td64_array_non_tick_raises(self):
rng = pd.period_range("1/1/2000", freq="Q", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
with pytest.raises(IncompatibleFrequency):
rng + tdarr
with pytest.raises(IncompatibleFrequency):
tdarr + rng
with pytest.raises(IncompatibleFrequency):
rng - tdarr
with pytest.raises(TypeError):
tdarr - rng
def test_pi_add_sub_td64_array_tick(self):
# PeriodIndex + Timedelta-like is allowed only with
# tick-like frequencies
rng = pd.period_range("1/1/2000", freq="90D", periods=3)
tdi = pd.TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = pd.period_range("12/31/1999", freq="90D", periods=3)
result = rng + tdi
tm.assert_index_equal(result, expected)
result = rng + tdarr
tm.assert_index_equal(result, expected)
result = tdi + rng
tm.assert_index_equal(result, expected)
result = tdarr + rng
tm.assert_index_equal(result, expected)
expected = pd.period_range("1/2/2000", freq="90D", periods=3)
result = rng - tdi
tm.assert_index_equal(result, expected)
result = rng - tdarr
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
tdarr - rng
with pytest.raises(TypeError):
tdi - rng
# -----------------------------------------------------------------
# operations with array/Index of DateOffset objects
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_add_offset_array(self, box):
# GH#18849
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
offs = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = pd.PeriodIndex([pd.Period("2015Q2"), pd.Period("2015Q4")])
with tm.assert_produces_warning(PerformanceWarning):
res = pi + offs
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = offs + pi
tm.assert_index_equal(res2, expected)
unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
# addition/subtraction ops with incompatible offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi + unanchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
unanchored + pi
@pytest.mark.parametrize("box", [np.array, pd.Index])
def test_pi_sub_offset_array(self, box):
# GH#18824
pi = pd.PeriodIndex([pd.Period("2015Q1"), pd.Period("2016Q2")])
other = box(
[
pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12),
]
)
expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))])
with tm.assert_produces_warning(PerformanceWarning):
res = pi - other
tm.assert_index_equal(res, expected)
anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
pi - anchored
with pytest.raises(IncompatibleFrequency):
with tm.assert_produces_warning(PerformanceWarning):
anchored - pi
def test_pi_add_iadd_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng + one
expected = pd.period_range("2000-01-01 10:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
def test_pi_sub_isub_int(self, one):
"""
PeriodIndex.__sub__ and __isub__ with several representations of
the integer 1, e.g. int, np.int64, np.uint8, ...
"""
rng = pd.period_range("2000-01-01 09:00", freq="H", periods=10)
result = rng - one
expected = pd.period_range("2000-01-01 08:00", freq="H", periods=10)
tm.assert_index_equal(result, expected)
rng -= one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)])
def test_pi_sub_intlike(self, five):
rng = period_range("2007-01", periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_pi_sub_isub_offset(self):
# offset
# DateOffset
rng = pd.period_range("2014", "2024", freq="A")
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range("2009", "2019", freq="A")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
rng = pd.period_range("2014-01", "2016-12", freq="M")
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range("2013-08", "2016-07", freq="M")
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
def test_pi_add_offset_n_gt1(self, box_transpose_fail):
# GH#23215
# add offset to PeriodIndex with freq.n > 1
box, transpose = box_transpose_fail
per = pd.Period("2016-01", freq="2M")
pi = pd.PeriodIndex([per])
expected = pd.PeriodIndex(["2016-03"], freq="2M")
pi = tm.box_expected(pi, box, transpose=transpose)
expected = tm.box_expected(expected, box, transpose=transpose)
result = pi + per.freq
tm.assert_equal(result, expected)
result = per.freq + pi
tm.assert_equal(result, expected)
def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array):
# GH#23215
# PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0
pi = pd.PeriodIndex(["2016-01"], freq="2M")
expected = pd.PeriodIndex(["2016-04"], freq="2M")
# FIXME: with transposing these tests fail
pi = tm.box_expected(pi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = pi + to_offset("3M")
tm.assert_equal(result, expected)
result = to_offset("3M") + pi
tm.assert_equal(result, expected)
# ---------------------------------------------------------------
# __add__/__sub__ with integer arrays
@pytest.mark.parametrize("int_holder", [np.array, pd.Index])
@pytest.mark.parametrize("op", [operator.add, ops.radd])
def test_pi_add_intarray(self, int_holder, op):
# GH#19959
pi = pd.PeriodIndex([ | pd.Period("2015Q1") | pandas.Period |
import click
import logging
import numpy as np
import os
import pandas as pd
import requests
import sys
logger = logging.getLogger(__name__)
CSV_FILE = 'data/cacem-dechets.csv'
ANALYSE_DIR = 'data/analyse'
def requests_retry_session(retries=10, backoff_factor=0.5, status_forcelist=(500, 502, 504), session=None):
session = session or requests.Session()
retry = requests.packages.urllib3.util.retry.Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = requests.adapters.HTTPAdapter(max_retries=retry)
session.mount('https://', adapter)
return session
def collect_to_df(collecte_ids):
data = []
s = requests.Session()
for collecte_id in collecte_ids:
r = requests_retry_session(session=s).get(f'https://collecte-dechets.cacem.fr/get/collectes/{collecte_id}')
data_json = r.json()
data += [
[
data_json['adresse']['id'],
collecte['title'],
day,
collecte['week_type']
]
for collecte in data_json['collectes'] for day in collecte['days']
]
df = pd.DataFrame(data, columns=['adresse_id', 'type_collecte', 'jour', 'type_semaine'])
return df
def get_data(output_file):
communes = (pd.read_json('https://collecte-dechets.cacem.fr/get/communes', orient='record')
.rename(columns={'id': 'commune_id', 'name': 'commune_name'}))
quartiers = (pd.read_json('https://collecte-dechets.cacem.fr/get/quartiers', orient='record')
.rename(columns={'id': 'quartier_id', 'name': 'quartier_name'}))
adresses = ( | pd.read_json('https://collecte-dechets.cacem.fr/get/adresses', orient='record') | pandas.read_json |
# -*- coding: utf-8 -*-
import re
import warnings
from datetime import timedelta
from itertools import product
import pytest
import numpy as np
import pandas as pd
from pandas import (CategoricalIndex, DataFrame, Index, MultiIndex,
compat, date_range, period_range)
from pandas.compat import PY3, long, lrange, lzip, range, u, PYPY
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.indexes.base import InvalidIndexError
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas._libs.tslib import Timestamp
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal, assert_copy
from .common import Base
class TestMultiIndex(Base):
_holder = MultiIndex
_compat_props = ['shape', 'ndim', 'size']
def setup_method(self, method):
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
self.index_names = ['first', 'second']
self.indices = dict(index=MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels
], names=self.index_names,
verify_integrity=False))
self.setup_indices()
def create_index(self):
return self.index
def test_can_hold_identifiers(self):
idx = self.create_index()
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_boolean_context_compat2(self):
# boolean context compat
# GH7897
i1 = MultiIndex.from_tuples([('A', 1), ('A', 2)])
i2 = MultiIndex.from_tuples([('A', 1), ('A', 3)])
common = i1.intersection(i2)
def f():
if common:
pass
tm.assert_raises_regex(ValueError, 'The truth value of a', f)
def test_labels_dtypes(self):
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_where(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
def f():
i.where(True)
pytest.raises(NotImplementedError, f)
def test_where_array_like(self):
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
klasses = [list, tuple, np.array, pd.Series]
cond = [False, True]
for klass in klasses:
def f():
return i.where(klass(cond))
pytest.raises(NotImplementedError, f)
def test_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(m.repeat(reps), expected)
with tm.assert_produces_warning(FutureWarning):
result = m.repeat(n=reps)
tm.assert_index_equal(result, expected)
def test_numpy_repeat(self):
reps = 2
numbers = [1, 2, 3]
names = np.array(['foo', 'bar'])
m = MultiIndex.from_product([
numbers, names], names=names)
expected = MultiIndex.from_product([
numbers, names.repeat(reps)], names=names)
tm.assert_index_equal(np.repeat(m, reps), expected)
msg = "the 'axis' parameter is not supported"
tm.assert_raises_regex(
ValueError, msg, np.repeat, m, reps, axis=1)
def test_set_name_methods(self):
# so long as these are synonyms, we don't need to test set_names
assert self.index.rename == self.index.set_names
new_names = [name + "SUFFIX" for name in self.index_names]
ind = self.index.set_names(new_names)
assert self.index.names == self.index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = self.index.set_names(new_names[0], level=0)
assert self.index.names == self.index_names
assert ind.names == [new_names[0], self.index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], self.index_names[1]]
# set names for multiple levels
ind = self.index.set_names(new_names, level=[0, 1])
assert self.index.names == self.index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(self, inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
def test_set_levels_labels_directly(self):
# setting levels/labels directly raises AttributeError
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
self.index.levels = new_levels
with pytest.raises(AttributeError):
self.index.labels = new_labels
def test_set_levels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = self.index.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
# level changing [w/o mutation]
ind2 = self.index.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = self.index.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = self.index.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# level changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(self.index.levels, levels)
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(self.index.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(self.index.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = self.index.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_levels(['c'], level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
self.index.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
self.index.set_levels('c', level=0, inplace=inplace)
assert_matching(self.index.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
self.index.set_labels(1, level=0, inplace=inplace)
assert_matching(self.index.labels, original_index.labels,
check_dtype=True)
def test_set_labels(self):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = self.index.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
def assert_matching(actual, expected):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp, dtype=np.int8)
tm.assert_numpy_array_equal(act, exp)
# label changing [w/o mutation]
ind2 = self.index.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = self.index.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = self.index.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing specific level [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(self.index.labels, labels)
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(self.index.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = self.index.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(self.index.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(self):
levels, labels = self.index.levels, self.index.labels
names = self.index.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
self.index.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
self.index.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
self.index.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
self.index.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
self.index.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'string'):
self.index.set_names(names, level=0)
def test_set_levels_categorical(self):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
for ordered in [False, True]:
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_metadata_immutable(self):
levels, labels = self.index.levels, self.index.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = self.index.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_inplace_mutation_resets_values(self):
levels = [['a', 'b', 'c'], [4]]
levels2 = [[1, 2, 3], ['a']]
labels = [[0, 1, 0, 2, 2, 0], [0, 0, 0, 0, 0, 0]]
mi1 = MultiIndex(levels=levels, labels=labels)
mi2 = MultiIndex(levels=levels2, labels=labels)
vals = mi1.values.copy()
vals2 = mi2.values.copy()
assert mi1._tuples is not None
# Make sure level setting works
new_vals = mi1.set_levels(levels2).values
tm.assert_almost_equal(vals2, new_vals)
# Non-inplace doesn't kill _tuples [implementation detail]
tm.assert_almost_equal(mi1._tuples, vals)
# ...and values is still same too
tm.assert_almost_equal(mi1.values, vals)
# Inplace should kill _tuples
mi1.set_levels(levels2, inplace=True)
tm.assert_almost_equal(mi1.values, vals2)
# Make sure label setting works too
labels2 = [[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]]
exp_values = np.empty((6,), dtype=object)
exp_values[:] = [(long(1), 'a')] * 6
# Must be 1d array of tuples
assert exp_values.shape == (6,)
new_values = mi2.set_labels(labels2).values
# Not inplace shouldn't change
tm.assert_almost_equal(mi2._tuples, vals2)
# Should have correct values
tm.assert_almost_equal(exp_values, new_values)
# ...and again setting inplace should kill _tuples, etc
mi2.set_labels(labels2, inplace=True)
tm.assert_almost_equal(mi2.values, new_values)
def test_copy_in_constructor(self):
levels = np.array(["a", "b", "c"])
labels = np.array([1, 1, 2, 0, 0, 1, 1])
val = labels[0]
mi = MultiIndex(levels=[levels, levels], labels=[labels, labels],
copy=True)
assert mi.labels[0][0] == val
labels[0] = 15
assert mi.labels[0][0] == val
val = levels[0]
levels[0] = "PANDA"
assert mi.levels[0][0] == val
def test_set_value_keeps_names(self):
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
def test_copy_names(self):
# Check that adding a "names" parameter to the copy is honored
# GH14302
multi_idx = pd.Index([(1, 2), (3, 4)], names=['MyName1', 'MyName2'])
multi_idx1 = multi_idx.copy()
assert multi_idx.equals(multi_idx1)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx1.names == ['MyName1', 'MyName2']
multi_idx2 = multi_idx.copy(names=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx2)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx2.names == ['NewName1', 'NewName2']
multi_idx3 = multi_idx.copy(name=['NewName1', 'NewName2'])
assert multi_idx.equals(multi_idx3)
assert multi_idx.names == ['MyName1', 'MyName2']
assert multi_idx3.names == ['NewName1', 'NewName2']
def test_names(self):
# names are assigned in setup
names = self.index_names
level_names = [level.name for level in self.index.levels]
assert names == level_names
# setting bad names on existing
index = self.index
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names",
list(index.names) + ["third"])
tm.assert_raises_regex(ValueError, "^Length of names",
setattr, index, "names", [])
# initializing with bad names (should always be equivalent)
major_axis, minor_axis = self.index.levels
major_labels, minor_labels = self.index.labels
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first'])
tm.assert_raises_regex(ValueError, "^Length of names", MultiIndex,
levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels],
names=['first', 'second', 'third'])
# names are assigned
index.names = ["a", "b"]
ind_names = list(index.names)
level_names = [level.name for level in index.levels]
assert ind_names == level_names
def test_astype(self):
expected = self.index.copy()
actual = self.index.astype('O')
assert_copy(actual.levels, expected.levels)
assert_copy(actual.labels, expected.labels)
self.check_level_names(actual, expected.names)
with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"):
self.index.astype(np.dtype(int))
@pytest.mark.parametrize('ordered', [True, False])
def test_astype_category(self, ordered):
# GH 18630
msg = '> 1 ndim Categorical are not supported at this time'
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype(CategoricalDtype(ordered=ordered))
if ordered is False:
# dtype='category' defaults to ordered=False, so only test once
with tm.assert_raises_regex(NotImplementedError, msg):
self.index.astype('category')
def test_constructor_single_level(self):
result = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
assert isinstance(result, MultiIndex)
expected = Index(['foo', 'bar', 'baz', 'qux'], name='first')
tm.assert_index_equal(result.levels[0], expected)
assert result.names == ['first']
def test_constructor_no_levels(self):
tm.assert_raises_regex(ValueError, "non-zero number "
"of levels/labels",
MultiIndex, levels=[], labels=[])
both_re = re.compile('Must pass both levels and labels')
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(levels=[])
with tm.assert_raises_regex(TypeError, both_re):
MultiIndex(labels=[])
def test_constructor_mismatched_label_levels(self):
labels = [np.array([1]), np.array([2]), np.array([3])]
levels = ["a"]
tm.assert_raises_regex(ValueError, "Length of levels and labels "
"must be the same", MultiIndex,
levels=levels, labels=labels)
length_error = re.compile('>= length of level')
label_error = re.compile(r'Unequal label lengths: \[4, 2\]')
# important to check that it's looking at the right thing.
with tm.assert_raises_regex(ValueError, length_error):
MultiIndex(levels=[['a'], ['b']],
labels=[[0, 1, 2, 3], [0, 3, 4, 1]])
with tm.assert_raises_regex(ValueError, label_error):
MultiIndex(levels=[['a'], ['b']], labels=[[0, 0, 0, 0], [0, 0]])
# external API
with tm.assert_raises_regex(ValueError, length_error):
self.index.copy().set_levels([['a'], ['b']])
with tm.assert_raises_regex(ValueError, label_error):
self.index.copy().set_labels([[0, 0, 0, 0], [0, 0]])
def test_constructor_nonhashable_names(self):
# GH 20527
levels = [[1, 2], [u'one', u'two']]
labels = [[0, 0, 1, 1], [0, 1, 0, 1]]
names = ((['foo'], ['bar']))
message = "MultiIndex.name must be a hashable type"
tm.assert_raises_regex(TypeError, message,
MultiIndex, levels=levels,
labels=labels, names=names)
# With .rename()
mi = MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=('foo', 'bar'))
renamed = [['foor'], ['barr']]
tm.assert_raises_regex(TypeError, message, mi.rename, names=renamed)
# With .set_names()
tm.assert_raises_regex(TypeError, message, mi.set_names, names=renamed)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], ['1', '1', '2'],
['1', 'a', '1']])
def test_duplicate_level_names(self, names):
# GH18872
pytest.raises(ValueError, pd.MultiIndex.from_product,
[[0, 1]] * 3, names=names)
# With .rename()
mi = pd.MultiIndex.from_product([[0, 1]] * 3)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names)
# With .rename(., level=)
mi.rename(names[0], level=1, inplace=True)
tm.assert_raises_regex(ValueError, "Duplicated level name:",
mi.rename, names[:2], level=[0, 2])
def assert_multiindex_copied(self, copy, original):
# Levels should be (at least, shallow copied)
tm.assert_copy(copy.levels, original.levels)
tm.assert_almost_equal(copy.labels, original.labels)
# Labels doesn't matter which way copied
tm.assert_almost_equal(copy.labels, original.labels)
assert copy.labels is not original.labels
# Names doesn't matter which way copied
assert copy.names == original.names
assert copy.names is not original.names
# Sort order should be copied
assert copy.sortorder == original.sortorder
def test_copy(self):
i_copy = self.index.copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_shallow_copy(self):
i_copy = self.index._shallow_copy()
self.assert_multiindex_copied(i_copy, self.index)
def test_view(self):
i_view = self.index.view()
self.assert_multiindex_copied(i_view, self.index)
def check_level_names(self, index, names):
assert [level.name for level in index.levels] == list(names)
def test_changing_names(self):
# names should be applied to levels
level_names = [level.name for level in self.index.levels]
self.check_level_names(self.index, self.index.names)
view = self.index.view()
copy = self.index.copy()
shallow_copy = self.index._shallow_copy()
# changing names should change level names on object
new_names = [name + "a" for name in self.index.names]
self.index.names = new_names
self.check_level_names(self.index, new_names)
# but not on copies
self.check_level_names(view, level_names)
self.check_level_names(copy, level_names)
self.check_level_names(shallow_copy, level_names)
# and copies shouldn't change original
shallow_copy.names = [name + "c" for name in shallow_copy.names]
self.check_level_names(self.index, new_names)
def test_get_level_number_integer(self):
self.index.names = [1, 0]
assert self.index._get_level_number(1) == 0
assert self.index._get_level_number(0) == 1
pytest.raises(IndexError, self.index._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
self.index._get_level_number, 'fourth')
def test_from_arrays(self):
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# list of arrays as input
result = MultiIndex.from_arrays(arrays, names=self.index.names)
tm.assert_index_equal(result, self.index)
# infer correctly
result = MultiIndex.from_arrays([[pd.NaT, Timestamp('20130101')],
['a', 'b']])
assert result.levels[0].equals(Index([Timestamp('20130101')]))
assert result.levels[1].equals(Index(['a', 'b']))
def test_from_arrays_iterator(self):
# GH 18434
arrays = []
for lev, lab in zip(self.index.levels, self.index.labels):
arrays.append(np.asarray(lev).take(lab))
# iterator as input
result = MultiIndex.from_arrays(iter(arrays), names=self.index.names)
tm.assert_index_equal(result, self.index)
# invalid iterator input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of array-likes."):
MultiIndex.from_arrays(0)
def test_from_arrays_index_series_datetimetz(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3,
tz='Asia/Tokyo')
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_timedelta(self):
idx1 = pd.timedelta_range('1 days', freq='D', periods=3)
idx2 = pd.timedelta_range('2 hours', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_period(self):
idx1 = pd.period_range('2011-01-01', freq='D', periods=3)
idx2 = pd.period_range('2015-01-01', freq='H', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_datetimelike_mixed(self):
idx1 = pd.date_range('2015-01-01 10:00', freq='D', periods=3,
tz='US/Eastern')
idx2 = pd.date_range('2015-01-01 10:00', freq='H', periods=3)
idx3 = pd.timedelta_range('1 days', freq='D', periods=3)
idx4 = pd.period_range('2011-01-01', freq='D', periods=3)
result = pd.MultiIndex.from_arrays([idx1, idx2, idx3, idx4])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
tm.assert_index_equal(result.get_level_values(2), idx3)
tm.assert_index_equal(result.get_level_values(3), idx4)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1),
pd.Series(idx2),
pd.Series(idx3),
pd.Series(idx4)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
tm.assert_index_equal(result2.get_level_values(2), idx3)
tm.assert_index_equal(result2.get_level_values(3), idx4)
tm.assert_index_equal(result, result2)
def test_from_arrays_index_series_categorical(self):
# GH13743
idx1 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=False)
idx2 = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=True)
result = pd.MultiIndex.from_arrays([idx1, idx2])
tm.assert_index_equal(result.get_level_values(0), idx1)
tm.assert_index_equal(result.get_level_values(1), idx2)
result2 = pd.MultiIndex.from_arrays([pd.Series(idx1), pd.Series(idx2)])
tm.assert_index_equal(result2.get_level_values(0), idx1)
tm.assert_index_equal(result2.get_level_values(1), idx2)
result3 = pd.MultiIndex.from_arrays([idx1.values, idx2.values])
tm.assert_index_equal(result3.get_level_values(0), idx1)
tm.assert_index_equal(result3.get_level_values(1), idx2)
def test_from_arrays_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_arrays(arrays=[])
# 1 level
result = MultiIndex.from_arrays(arrays=[[]], names=['A'])
assert isinstance(result, MultiIndex)
expected = Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# N levels
for N in [2, 3]:
arrays = [[]] * N
names = list('ABC')[:N]
result = MultiIndex.from_arrays(arrays=arrays, names=names)
expected = MultiIndex(levels=[[]] * N, labels=[[]] * N,
names=names)
tm.assert_index_equal(result, expected)
def test_from_arrays_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i)
def test_from_arrays_different_lengths(self):
# see gh-13599
idx1 = [1, 2, 3]
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = []
idx2 = ['a', 'b']
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
idx1 = [1, 2, 3]
idx2 = []
tm.assert_raises_regex(ValueError, '^all arrays must '
'be same length$',
MultiIndex.from_arrays, [idx1, idx2])
def test_from_product(self):
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
result = MultiIndex.from_product([first, second], names=names)
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
tm.assert_index_equal(result, expected)
def test_from_product_iterator(self):
# GH 18434
first = ['foo', 'bar', 'buz']
second = ['a', 'b', 'c']
names = ['first', 'second']
tuples = [('foo', 'a'), ('foo', 'b'), ('foo', 'c'), ('bar', 'a'),
('bar', 'b'), ('bar', 'c'), ('buz', 'a'), ('buz', 'b'),
('buz', 'c')]
expected = MultiIndex.from_tuples(tuples, names=names)
# iterator as input
result = MultiIndex.from_product(iter([first, second]), names=names)
tm.assert_index_equal(result, expected)
# Invalid non-iterable input
with tm.assert_raises_regex(
TypeError, "Input must be a list / sequence of iterables."):
MultiIndex.from_product(0)
def test_from_product_empty(self):
# 0 levels
with tm.assert_raises_regex(
ValueError, "Must pass non-zero number of levels/labels"):
MultiIndex.from_product([])
# 1 level
result = MultiIndex.from_product([[]], names=['A'])
expected = pd.Index([], name='A')
tm.assert_index_equal(result.levels[0], expected)
# 2 levels
l1 = [[], ['foo', 'bar', 'baz'], []]
l2 = [[], [], ['a', 'b', 'c']]
names = ['A', 'B']
for first, second in zip(l1, l2):
result = MultiIndex.from_product([first, second], names=names)
expected = MultiIndex(levels=[first, second],
labels=[[], []], names=names)
tm.assert_index_equal(result, expected)
# GH12258
names = ['A', 'B', 'C']
for N in range(4):
lvl2 = lrange(N)
result = MultiIndex.from_product([[], lvl2, []], names=names)
expected = MultiIndex(levels=[[], lvl2, []],
labels=[[], [], []], names=names)
tm.assert_index_equal(result, expected)
def test_from_product_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
for i in invalid_inputs:
pytest.raises(TypeError, MultiIndex.from_product, iterables=i)
def test_from_product_datetimeindex(self):
dt_index = date_range('2000-01-01', periods=2)
mi = pd.MultiIndex.from_product([[1, 2], dt_index])
etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp(
'2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp(
'2000-01-01')), (2, pd.Timestamp('2000-01-02'))])
tm.assert_numpy_array_equal(mi.values, etalon)
def test_from_product_index_series_categorical(self):
# GH13743
first = ['foo', 'bar']
for ordered in [False, True]:
idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"),
ordered=ordered)
expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"),
categories=list("bac"),
ordered=ordered)
for arr in [idx, pd.Series(idx), idx.values]:
result = pd.MultiIndex.from_product([first, arr])
tm.assert_index_equal(result.get_level_values(1), expected)
def test_values_boxed(self):
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex(self):
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_append(self):
result = self.index[:3].append(self.index[3:])
assert result.equals(self.index)
foos = [self.index[:1], self.index[1:3], self.index[3:]]
result = foos[0].append(foos[1:])
assert result.equals(self.index)
# empty
result = self.index.append([])
assert result.equals(self.index)
def test_append_mixed_dtypes(self):
# GH 13660
dti = date_range('2011-01-01', freq='M', periods=3, )
dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern')
pi = period_range('2011-01', freq='M', periods=3)
mi = MultiIndex.from_arrays([[1, 2, 3],
[1.1, np.nan, 3.3],
['a', 'b', 'c'],
dti, dti_tz, pi])
assert mi.nlevels == 6
res = mi.append(mi)
exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3],
[1.1, np.nan, 3.3, 1.1, np.nan, 3.3],
['a', 'b', 'c', 'a', 'b', 'c'],
dti.append(dti),
dti_tz.append(dti_tz),
pi.append(pi)])
tm.assert_index_equal(res, exp)
other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z'],
['x', 'y', 'z'], ['x', 'y', 'z']])
res = mi.append(other)
exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'],
[1.1, np.nan, 3.3, 'x', 'y', 'z'],
['a', 'b', 'c', 'x', 'y', 'z'],
dti.append(pd.Index(['x', 'y', 'z'])),
dti_tz.append(pd.Index(['x', 'y', 'z'])),
pi.append(pd.Index(['x', 'y', 'z']))])
tm.assert_index_equal(res, exp)
def test_get_level_values(self):
result = self.index.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = self.index.get_level_values('first')
expected = self.index.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_int_with_na(self):
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na(self):
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_all_na(self):
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_reorder_levels(self):
# this blows up
tm.assert_raises_regex(IndexError, '^Too many levels',
self.index.reorder_levels, [2, 1, 0])
def test_nlevels(self):
assert self.index.nlevels == 2
def test_iter(self):
result = list(self.index)
expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'),
('baz', 'two'), ('qux', 'one'), ('qux', 'two')]
assert result == expected
def test_legacy_pickle(self):
if PY3:
pytest.skip("testing for legacy pickles not "
"support on py3")
path = tm.get_data_path('multiindex_v1.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_legacy_v2_unpickle(self):
# 0.7.3 -> 0.8.0 format manage
path = tm.get_data_path('mindex_073.pickle')
obj = pd.read_pickle(path)
obj2 = MultiIndex.from_tuples(obj.values)
assert obj.equals(obj2)
res = obj.get_indexer(obj)
exp = np.arange(len(obj), dtype=np.intp)
assert_almost_equal(res, exp)
res = obj.get_indexer(obj2[::-1])
exp = obj.get_indexer(obj[::-1])
exp2 = obj2.get_indexer(obj2[::-1])
assert_almost_equal(res, exp)
assert_almost_equal(exp, exp2)
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = MultiIndex.from_product(
[[1, 2], ['a', 'b'], date_range('20130101', periods=3,
tz='US/Eastern')
], names=['one', 'two', 'three'])
unpickled = tm.round_trip_pickle(index)
assert index.equal_levels(unpickled)
def test_from_tuples_index_values(self):
result = MultiIndex.from_tuples(self.index)
assert (result.values == self.index.values).all()
def test_contains(self):
assert ('foo', 'two') in self.index
assert ('bar', 'two') not in self.index
assert None not in self.index
def test_contains_top_level(self):
midx = MultiIndex.from_product([['A', 'B'], [1, 2]])
assert 'A' in midx
assert 'A' not in midx._engine
def test_contains_with_nat(self):
# MI with a NaT
mi = MultiIndex(levels=[['C'],
pd.date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
assert ('C', pd.Timestamp('2012-01-01')) in mi
for val in mi.values:
assert val in mi
def test_is_all_dates(self):
assert not self.index.is_all_dates
def test_is_numeric(self):
# MultiIndex is never numeric
assert not self.index.is_numeric()
def test_getitem(self):
# scalar
assert self.index[2] == ('bar', 'one')
# slice
result = self.index[2:5]
expected = self.index[[2, 3, 4]]
assert result.equals(expected)
# boolean
result = self.index[[True, False, True, False, True, True]]
result2 = self.index[np.array([True, False, True, False, True, True])]
expected = self.index[[0, 2, 4, 5]]
assert result.equals(expected)
assert result2.equals(expected)
def test_getitem_group_select(self):
sorted_idx, _ = self.index.sortlevel(0)
assert sorted_idx.get_loc('baz') == slice(3, 4)
assert sorted_idx.get_loc('foo') == slice(0, 2)
def test_get_loc(self):
assert self.index.get_loc(('foo', 'two')) == 1
assert self.index.get_loc(('baz', 'two')) == 3
pytest.raises(KeyError, self.index.get_loc, ('bar', 'two'))
pytest.raises(KeyError, self.index.get_loc, 'quux')
pytest.raises(NotImplementedError, self.index.get_loc, 'foo',
method='nearest')
# 3 levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
pytest.raises(KeyError, index.get_loc, (1, 1))
assert index.get_loc((2, 0)) == slice(3, 5)
def test_get_loc_duplicates(self):
index = Index([2, 2, 2, 2])
result = index.get_loc(2)
expected = slice(0, 4)
assert result == expected
# pytest.raises(Exception, index.get_loc, 2)
index = Index(['c', 'a', 'a', 'b', 'b'])
rs = index.get_loc('c')
xp = 0
assert rs == xp
def test_get_value_duplicates(self):
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_loc_level(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
loc, new_index = index.get_loc_level((0, 1))
expected = slice(1, 2)
exp_index = index[expected].droplevel(0).droplevel(0)
assert loc == expected
assert new_index.equals(exp_index)
loc, new_index = index.get_loc_level((0, 1, 0))
expected = 1
assert loc == expected
assert new_index is None
pytest.raises(KeyError, index.get_loc_level, (2, 2))
index = MultiIndex(levels=[[2000], lrange(4)], labels=[np.array(
[0, 0, 0, 0]), np.array([0, 1, 2, 3])])
result, new_index = index.get_loc_level((2000, slice(None, None)))
expected = slice(None, None)
assert result == expected
assert new_index.equals(index.droplevel(0))
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None])
def test_get_loc_nan(self, level, null_val):
# GH 18485 : NaN in MultiIndex
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
levels[level] = np.array([0, null_val], dtype=type(null_val))
key[level] = null_val
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_missing_nan(self):
# GH 8569
idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])
assert isinstance(idx.get_loc(1), slice)
pytest.raises(KeyError, idx.get_loc, 3)
pytest.raises(KeyError, idx.get_loc, np.nan)
pytest.raises(KeyError, idx.get_loc, [np.nan])
@pytest.mark.parametrize('dtype1', [int, float, bool, str])
@pytest.mark.parametrize('dtype2', [int, float, bool, str])
def test_get_loc_multiple_dtypes(self, dtype1, dtype2):
# GH 18520
levels = [np.array([0, 1]).astype(dtype1),
np.array([0, 1]).astype(dtype2)]
idx = pd.MultiIndex.from_product(levels)
assert idx.get_loc(idx[2]) == 2
@pytest.mark.parametrize('level', [0, 1])
@pytest.mark.parametrize('dtypes', [[int, float], [float, int]])
def test_get_loc_implicit_cast(self, level, dtypes):
# GH 18818, GH 15994 : as flat index, cast int to float and vice-versa
levels = [['a', 'b'], ['c', 'd']]
key = ['b', 'd']
lev_dtype, key_dtype = dtypes
levels[level] = np.array([0, 1], dtype=lev_dtype)
key[level] = key_dtype(1)
idx = MultiIndex.from_product(levels)
assert idx.get_loc(tuple(key)) == 3
def test_get_loc_cast_bool(self):
# GH 19086 : int is casted to bool, but not vice-versa
levels = [[False, True], np.arange(2, dtype='int64')]
idx = MultiIndex.from_product(levels)
assert idx.get_loc((0, 1)) == 1
assert idx.get_loc((1, 0)) == 2
pytest.raises(KeyError, idx.get_loc, (False, True))
pytest.raises(KeyError, idx.get_loc, (True, False))
def test_slice_locs(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
slob = slice(*idx.slice_locs(df.index[5], df.index[15]))
sliced = stacked[slob]
expected = df[5:16].stack()
tm.assert_almost_equal(sliced.values, expected.values)
slob = slice(*idx.slice_locs(df.index[5] + timedelta(seconds=30),
df.index[15] - timedelta(seconds=30)))
sliced = stacked[slob]
expected = df[6:15].stack()
tm.assert_almost_equal(sliced.values, expected.values)
def test_slice_locs_with_type_mismatch(self):
df = tm.makeTimeDataFrame()
stacked = df.stack()
idx = stacked.index
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs, (1, 3))
tm.assert_raises_regex(TypeError, '^Level type mismatch',
idx.slice_locs,
df.index[5] + timedelta(
seconds=30), (5, 2))
df = tm.makeCustomDataframe(5, 5)
stacked = df.stack()
idx = stacked.index
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(timedelta(seconds=30))
# TODO: Try creating a UnicodeDecodeError in exception message
with tm.assert_raises_regex(TypeError, '^Level type mismatch'):
idx.slice_locs(df.index[1], (16, "a"))
def test_slice_locs_not_sorted(self):
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
tm.assert_raises_regex(KeyError, "[Kk]ey length.*greater than "
"MultiIndex lexsort depth",
index.slice_locs, (1, 0, 1), (2, 1, 0))
# works
sorted_index, _ = index.sortlevel(0)
# should there be a test case here???
sorted_index.slice_locs((1, 0, 1), (2, 1, 0))
def test_slice_locs_partial(self):
sorted_idx, _ = self.index.sortlevel(0)
result = sorted_idx.slice_locs(('foo', 'two'), ('qux', 'one'))
assert result == (1, 5)
result = sorted_idx.slice_locs(None, ('qux', 'one'))
assert result == (0, 5)
result = sorted_idx.slice_locs(('foo', 'two'), None)
assert result == (1, len(sorted_idx))
result = sorted_idx.slice_locs('bar', 'baz')
assert result == (2, 4)
def test_slice_locs_not_contained(self):
# some searchsorted action
index = MultiIndex(levels=[[0, 2, 4, 6], [0, 2, 4]],
labels=[[0, 0, 0, 1, 1, 2, 3, 3, 3],
[0, 1, 2, 1, 2, 2, 0, 1, 2]], sortorder=0)
result = index.slice_locs((1, 0), (5, 2))
assert result == (3, 6)
result = index.slice_locs(1, 5)
assert result == (3, 6)
result = index.slice_locs((2, 2), (5, 2))
assert result == (3, 6)
result = index.slice_locs(2, 5)
assert result == (3, 6)
result = index.slice_locs((1, 0), (6, 3))
assert result == (3, 8)
result = index.slice_locs(-1, 10)
assert result == (0, len(index))
def test_consistency(self):
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_truncate(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
result = index.truncate(before=1)
assert 'foo' not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(after=1)
assert 2 not in result.levels[0]
assert 1 in result.levels[0]
result = index.truncate(before=1, after=2)
assert len(result.levels[0]) == 2
# after < before
pytest.raises(ValueError, index.truncate, 3, 1)
def test_get_indexer(self):
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)
minor_labels = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
idx1 = index[:5]
idx2 = index[[1, 3, 5]]
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))
r1 = idx2.get_indexer(idx1, method='pad')
e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='pad')
assert_almost_equal(r2, e1[::-1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)
assert_almost_equal(r1, e1)
r2 = idx2.get_indexer(idx1[::-1], method='backfill')
assert_almost_equal(r2, e1[::-1])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
# pass non-MultiIndex
r1 = idx1.get_indexer(idx2.values)
rexp1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, rexp1)
r1 = idx1.get_indexer([1, 2, 3])
assert (r1 == [-1, -1, -1]).all()
# create index with duplicates
idx1 = Index(lrange(10) + lrange(10))
idx2 = Index(lrange(20))
msg = "Reindexing only valid with uniquely valued Index objects"
with tm.assert_raises_regex(InvalidIndexError, msg):
idx1.get_indexer(idx2)
def test_get_indexer_nearest(self):
midx = MultiIndex.from_tuples([('a', 1), ('b', 2)])
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='nearest')
with pytest.raises(NotImplementedError):
midx.get_indexer(['a'], method='pad', tolerance=2)
def test_hash_collisions(self):
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_format(self):
self.index.format()
self.index[:0].format()
def test_format_integer_names(self):
index = MultiIndex(levels=[[0, 1], [0, 1]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1])
index.format(names=True)
def test_format_sparse_display(self):
index = MultiIndex(levels=[[0, 1], [0, 1], [0, 1], [0]],
labels=[[0, 0, 0, 1, 1, 1], [0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0]])
result = index.format()
assert result[3] == '1 0 0 0'
def test_format_sparse_config(self):
warn_filters = warnings.filters
warnings.filterwarnings('ignore', category=FutureWarning,
module=".*format")
# GH1538
pd.set_option('display.multi_sparse', False)
result = self.index.format()
assert result[1] == 'foo two'
tm.reset_display_options()
warnings.filters = warn_filters
def test_to_frame(self):
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
tuples = [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]
index = MultiIndex.from_tuples(tuples, names=['first', 'second'])
result = index.to_frame(index=False)
expected = DataFrame(tuples)
expected.columns = ['first', 'second']
tm.assert_frame_equal(result, expected)
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame(index=False)
expected = DataFrame(
{0: np.repeat(np.arange(5, dtype='int64'), 3),
1: np.tile(pd.date_range('20130101', periods=3), 5)})
tm.assert_frame_equal(result, expected)
index = MultiIndex.from_product([range(5),
pd.date_range('20130101', periods=3)])
result = index.to_frame()
expected.index = index
tm.assert_frame_equal(result, expected)
def test_to_hierarchical(self):
index = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two')])
result = index.to_hierarchical(3)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# K > 1
result = index.to_hierarchical(3, 2)
expected = MultiIndex(levels=[[1, 2], ['one', 'two']],
labels=[[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]])
tm.assert_index_equal(result, expected)
assert result.names == index.names
# non-sorted
index = MultiIndex.from_tuples([(2, 'c'), (1, 'b'),
(2, 'a'), (2, 'b')],
names=['N1', 'N2'])
result = index.to_hierarchical(2)
expected = MultiIndex.from_tuples([(2, 'c'), (2, 'c'), (1, 'b'),
(1, 'b'),
(2, 'a'), (2, 'a'),
(2, 'b'), (2, 'b')],
names=['N1', 'N2'])
tm.assert_index_equal(result, expected)
assert result.names == index.names
def test_bounds(self):
self.index._bounds
def test_equals_multi(self):
assert self.index.equals(self.index)
assert not self.index.equals(self.index.values)
assert self.index.equals(Index(self.index.values))
assert self.index.equal_levels(self.index)
assert not self.index.equals(self.index[:-1])
assert not self.index.equals(self.index[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
assert not self.index.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not self.index.equals(index)
def test_equals_missing_values(self):
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_identical(self):
mi = self.index.copy()
mi2 = self.index.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_is_(self):
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_union(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_union = piece1 | piece2
tups = sorted(self.index.values)
expected = MultiIndex.from_tuples(tups)
assert the_union.equals(expected)
# corner case, pass self or empty thing:
the_union = self.index.union(self.index)
assert the_union is self.index
the_union = self.index.union(self.index[:0])
assert the_union is self.index
# won't work in python 3
# tuples = self.index.values
# result = self.index[:4] | tuples[4:]
# assert result.equals(tuples)
# not valid for python 3
# def test_union_with_regular_index(self):
# other = Index(['A', 'B', 'C'])
# result = other.union(self.index)
# assert ('foo', 'one') in result
# assert 'B' in result
# result2 = self.index.union(other)
# assert result.equals(result2)
def test_intersection(self):
piece1 = self.index[:5][::-1]
piece2 = self.index[3:]
the_int = piece1 & piece2
tups = sorted(self.index[3:5].values)
expected = MultiIndex.from_tuples(tups)
assert the_int.equals(expected)
# corner case, pass self
the_int = self.index.intersection(self.index)
assert the_int is self.index
# empty intersection: disjoint
empty = self.index[:2] & self.index[2:]
expected = self.index[:0]
assert empty.equals(expected)
# can't do in python 3
# tuples = self.index.values
# result = self.index & tuples
# assert result.equals(tuples)
def test_sub(self):
first = self.index
# - now raises (previously was set op difference)
with pytest.raises(TypeError):
first - self.index[-3:]
with pytest.raises(TypeError):
self.index[-3:] - first
with pytest.raises(TypeError):
self.index[-3:] - first.tolist()
with pytest.raises(TypeError):
first.tolist() - self.index[-3:]
def test_difference(self):
first = self.index
result = first.difference(self.index[-3:])
expected = MultiIndex.from_tuples(sorted(self.index[:-3].values),
sortorder=0,
names=self.index.names)
assert isinstance(result, MultiIndex)
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: reflexive
result = self.index.difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: superset
result = self.index[-3:].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# empty difference: degenerate
result = self.index[:0].difference(self.index)
expected = self.index[:0]
assert result.equals(expected)
assert result.names == self.index.names
# names not the same
chunklet = self.index[-3:]
chunklet.names = ['foo', 'baz']
result = first.difference(chunklet)
assert result.names == (None, None)
# empty, but non-equal
result = self.index.difference(self.index.sortlevel(1)[0])
assert len(result) == 0
# raise Exception called with non-MultiIndex
result = first.difference(first.values)
assert result.equals(first[:0])
# name from empty array
result = first.difference([])
assert first.equals(result)
assert first.names == result.names
# name from non-empty array
result = first.difference([('foo', 'one')])
expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), (
'foo', 'two'), ('qux', 'one'), ('qux', 'two')])
expected.names = first.names
assert first.names == result.names
tm.assert_raises_regex(TypeError, "other must be a MultiIndex "
"or a list of tuples",
first.difference, [1, 2, 3, 4, 5])
def test_from_tuples(self):
tm.assert_raises_regex(TypeError, 'Cannot infer number of levels '
'from empty list',
MultiIndex.from_tuples, [])
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
# input tuples
result = MultiIndex.from_tuples(((1, 2), (3, 4)), names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_from_tuples_iterator(self):
# GH 18434
# input iterator for tuples
expected = MultiIndex(levels=[[1, 3], [2, 4]],
labels=[[0, 1], [0, 1]],
names=['a', 'b'])
result = MultiIndex.from_tuples(zip([1, 3], [2, 4]), names=['a', 'b'])
tm.assert_index_equal(result, expected)
# input non-iterables
with tm.assert_raises_regex(
TypeError, 'Input must be a list / sequence of tuple-likes.'):
MultiIndex.from_tuples(0)
def test_from_tuples_empty(self):
# GH 16777
result = MultiIndex.from_tuples([], names=['a', 'b'])
expected = MultiIndex.from_arrays(arrays=[[], []],
names=['a', 'b'])
tm.assert_index_equal(result, expected)
def test_argsort(self):
result = self.index.argsort()
expected = self.index.values.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_sortlevel(self):
import random
tuples = list(self.index)
random.shuffle(tuples)
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_sortlevel_not_sort_remaining(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
sorted_idx, _ = mi.sortlevel('A', sort_remaining=False)
assert sorted_idx.equals(mi)
def test_sortlevel_deterministic(self):
tuples = [('bar', 'one'), ('foo', 'two'), ('qux', 'two'),
('foo', 'one'), ('baz', 'two'), ('qux', 'one')]
index = MultiIndex.from_tuples(tuples)
sorted_idx, _ = index.sortlevel(0)
expected = MultiIndex.from_tuples(sorted(tuples))
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(0, ascending=False)
assert sorted_idx.equals(expected[::-1])
sorted_idx, _ = index.sortlevel(1)
by1 = sorted(tuples, key=lambda x: (x[1], x[0]))
expected = MultiIndex.from_tuples(by1)
assert sorted_idx.equals(expected)
sorted_idx, _ = index.sortlevel(1, ascending=False)
assert sorted_idx.equals(expected[::-1])
def test_dims(self):
pass
def test_drop(self):
dropped = self.index.drop([('foo', 'two'), ('qux', 'one')])
index = MultiIndex.from_tuples([('foo', 'two'), ('qux', 'one')])
dropped2 = self.index.drop(index)
expected = self.index[[0, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
tm.assert_index_equal(dropped2, expected)
dropped = self.index.drop(['bar'])
expected = self.index[[0, 1, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop('foo')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
index = MultiIndex.from_tuples([('bar', 'two')])
pytest.raises(KeyError, self.index.drop, [('bar', 'two')])
pytest.raises(KeyError, self.index.drop, index)
pytest.raises(KeyError, self.index.drop, ['foo', 'two'])
# partially correct argument
mixed_index = MultiIndex.from_tuples([('qux', 'one'), ('bar', 'two')])
pytest.raises(KeyError, self.index.drop, mixed_index)
# error='ignore'
dropped = self.index.drop(index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[0, 1, 2, 3, 5]]
tm.assert_index_equal(dropped, expected)
dropped = self.index.drop(['foo', 'two'], errors='ignore')
expected = self.index[[2, 3, 4, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop
dropped = self.index.drop(['foo', ('qux', 'one')])
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
# mixed partial / full drop / error='ignore'
mixed_index = ['foo', ('qux', 'one'), 'two']
pytest.raises(KeyError, self.index.drop, mixed_index)
dropped = self.index.drop(mixed_index, errors='ignore')
expected = self.index[[2, 3, 5]]
tm.assert_index_equal(dropped, expected)
def test_droplevel_with_names(self):
index = self.index[self.index.get_loc('foo')]
dropped = index.droplevel(0)
assert dropped.name == 'second'
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index.droplevel(0)
assert dropped.names == ('two', 'three')
dropped = index.droplevel('two')
expected = index.droplevel(1)
assert dropped.equals(expected)
def test_droplevel_list(self):
index = MultiIndex(
levels=[Index(lrange(4)), Index(lrange(4)), Index(lrange(4))],
labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])],
names=['one', 'two', 'three'])
dropped = index[:2].droplevel(['three', 'one'])
expected = index[:2].droplevel(2).droplevel(0)
assert dropped.equals(expected)
dropped = index[:2].droplevel([])
expected = index[:2]
assert dropped.equals(expected)
with pytest.raises(ValueError):
index[:2].droplevel(['one', 'two', 'three'])
with pytest.raises(KeyError):
index[:2].droplevel(['one', 'four'])
def test_drop_not_lexsorted(self):
# GH 12078
# define the lexsorted version of the multi-index
tuples = [('a', ''), ('b1', 'c1'), ('b2', 'c2')]
lexsorted_mi = MultiIndex.from_tuples(tuples, names=['b', 'c'])
assert lexsorted_mi.is_lexsorted()
# and the not-lexsorted version
df = pd.DataFrame(columns=['a', 'b', 'c', 'd'],
data=[[1, 'b1', 'c1', 3], [1, 'b2', 'c2', 4]])
df = df.pivot_table(index='a', columns=['b', 'c'], values='d')
df = df.reset_index()
not_lexsorted_mi = df.columns
assert not not_lexsorted_mi.is_lexsorted()
# compare the results
tm.assert_index_equal(lexsorted_mi, not_lexsorted_mi)
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_index_equal(lexsorted_mi.drop('a'),
not_lexsorted_mi.drop('a'))
def test_insert(self):
# key contained in all levels
new_index = self.index.insert(0, ('bar', 'two'))
assert new_index.equal_levels(self.index)
assert new_index[0] == ('bar', 'two')
# key not contained in all levels
new_index = self.index.insert(0, ('abc', 'three'))
exp0 = Index(list(self.index.levels[0]) + ['abc'], name='first')
tm.assert_index_equal(new_index.levels[0], exp0)
exp1 = Index(list(self.index.levels[1]) + ['three'], name='second')
tm.assert_index_equal(new_index.levels[1], exp1)
assert new_index[0] == ('abc', 'three')
# key wrong length
msg = "Item must have length equal to number of levels"
with tm.assert_raises_regex(ValueError, msg):
self.index.insert(0, ('foo2',))
left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]],
columns=['1st', '2nd', '3rd'])
left.set_index(['1st', '2nd'], inplace=True)
ts = left['3rd'].copy(deep=True)
left.loc[('b', 'x'), '3rd'] = 2
left.loc[('b', 'a'), '3rd'] = -1
left.loc[('b', 'b'), '3rd'] = 3
left.loc[('a', 'x'), '3rd'] = 4
left.loc[('a', 'w'), '3rd'] = 5
left.loc[('a', 'a'), '3rd'] = 6
ts.loc[('b', 'x')] = 2
ts.loc['b', 'a'] = -1
ts.loc[('b', 'b')] = 3
ts.loc['a', 'x'] = 4
ts.loc[('a', 'w')] = 5
ts.loc['a', 'a'] = 6
right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2],
['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4],
['a', 'w', 5], ['a', 'a', 6]],
columns=['1st', '2nd', '3rd'])
right.set_index(['1st', '2nd'], inplace=True)
# FIXME data types changes to float because
# of intermediate nan insertion;
tm.assert_frame_equal(left, right, check_dtype=False)
tm.assert_series_equal(ts, right['3rd'])
# GH9250
idx = [('test1', i) for i in range(5)] + \
[('test2', i) for i in range(6)] + \
[('test', 17), ('test', 18)]
left = pd.Series(np.linspace(0, 10, 11),
pd.MultiIndex.from_tuples(idx[:-2]))
left.loc[('test', 17)] = 11
left.loc[('test', 18)] = 12
right = pd.Series(np.linspace(0, 12, 13),
pd.MultiIndex.from_tuples(idx))
tm.assert_series_equal(left, right)
def test_take_preserve_name(self):
taken = self.index.take([3, 0, 1])
assert taken.names == self.index.names
def test_take_fill_value(self):
# GH 12631
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
result = idx.take(np.array([1, 0, -1]))
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
(np.nan, pd.NaT)]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
exp_vals = [('A', pd.Timestamp('2011-01-02')),
('A', pd.Timestamp('2011-01-01')),
('B', pd.Timestamp('2011-01-02'))]
expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt'])
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assert_raises_regex(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with pytest.raises(IndexError):
idx.take(np.array([1, -5]))
def take_invalid_kwargs(self):
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
@pytest.mark.parametrize('other',
[Index(['three', 'one', 'two']),
Index(['one']),
Index(['one', 'three'])])
def test_join_level(self, other, join_type):
join_index, lidx, ridx = other.join(self.index, how=join_type,
level='second',
return_indexers=True)
exp_level = other.join(self.index.levels[1], how=join_type)
assert join_index.levels[0].equals(self.index.levels[0])
assert join_index.levels[1].equals(exp_level)
# pare down levels
mask = np.array(
[x[1] in exp_level for x in self.index], dtype=bool)
exp_values = self.index.values[mask]
tm.assert_numpy_array_equal(join_index.values, exp_values)
if join_type in ('outer', 'inner'):
join_index2, ridx2, lidx2 = \
self.index.join(other, how=join_type, level='second',
return_indexers=True)
assert join_index.equals(join_index2)
tm.assert_numpy_array_equal(lidx, lidx2)
tm.assert_numpy_array_equal(ridx, ridx2)
tm.assert_numpy_array_equal(join_index2.values, exp_values)
def test_join_level_corner_case(self):
# some corner cases
idx = Index(['three', 'one', 'two'])
result = idx.join(self.index, level='second')
assert isinstance(result, MultiIndex)
tm.assert_raises_regex(TypeError, "Join.*MultiIndex.*ambiguous",
self.index.join, self.index, level=1)
def test_join_self(self, join_type):
res = self.index
joined = res.join(res, how=join_type)
assert res is joined
def test_join_multi(self):
# GH 10665
midx = pd.MultiIndex.from_product(
[np.arange(4), np.arange(4)], names=['a', 'b'])
idx = pd.Index([1, 2, 5], name='b')
# inner
jidx, lidx, ridx = midx.join(idx, how='inner', return_indexers=True)
exp_idx = pd.MultiIndex.from_product(
[np.arange(4), [1, 2]], names=['a', 'b'])
exp_lidx = np.array([1, 2, 5, 6, 9, 10, 13, 14], dtype=np.intp)
exp_ridx = np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=np.intp)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='inner', return_indexers=True)
tm.assert_index_equal(jidx, exp_idx)
tm.assert_numpy_array_equal(lidx, exp_lidx)
tm.assert_numpy_array_equal(ridx, exp_ridx)
# keep MultiIndex
jidx, lidx, ridx = midx.join(idx, how='left', return_indexers=True)
exp_ridx = np.array([-1, 0, 1, -1, -1, 0, 1, -1, -1, 0, 1, -1, -1, 0,
1, -1], dtype=np.intp)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
# flip
jidx, ridx, lidx = idx.join(midx, how='right', return_indexers=True)
tm.assert_index_equal(jidx, midx)
assert lidx is None
tm.assert_numpy_array_equal(ridx, exp_ridx)
def test_reindex(self):
result, indexer = self.index.reindex(list(self.index[:4]))
assert isinstance(result, MultiIndex)
self.check_level_names(result, self.index[:4].names)
result, indexer = self.index.reindex(list(self.index))
assert isinstance(result, MultiIndex)
assert indexer is None
self.check_level_names(result, self.index.names)
def test_reindex_level(self):
idx = Index(['one'])
target, indexer = self.index.reindex(idx, level='second')
target2, indexer2 = idx.reindex(self.index, level='second')
exp_index = self.index.join(idx, level='second', how='right')
exp_index2 = self.index.join(idx, level='second', how='left')
assert target.equals(exp_index)
exp_indexer = np.array([0, 2, 4])
tm.assert_numpy_array_equal(indexer, exp_indexer, check_dtype=False)
assert target2.equals(exp_index2)
exp_indexer2 = np.array([0, -1, 0, -1, 0, -1])
tm.assert_numpy_array_equal(indexer2, exp_indexer2, check_dtype=False)
tm.assert_raises_regex(TypeError, "Fill method not supported",
self.index.reindex, self.index,
method='pad', level='second')
tm.assert_raises_regex(TypeError, "Fill method not supported",
idx.reindex, idx, method='bfill',
level='first')
def test_duplicates(self):
assert not self.index.has_duplicates
assert self.index.append(self.index).has_duplicates
index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[
[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]])
assert index.has_duplicates
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), u('out'), u('z'), 22, u('y'), u('in'), u('z'), 129),
(u('x'), u('out'), u('z'), 25, u('y'), u('in'), u('z'), 111),
(u('x'), u('out'), u('z'), 28, u('y'), u('in'), u('z'), 114),
(u('x'), u('out'), u('z'), 29, u('y'), u('in'), | u('z') | pandas.compat.u |
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""demand io layer. mainly data loading and validation functions.
Function and variable from this module are available in Demand.yml
for validation.
Few function are also used by processing layer.
Important note: Some functions are just imported and not used
are for yaml validation. All definations(including those which are
imported from other modules) from this module are available in
yaml validation.
"""
import csv
import functools
import itertools
import os
import logging
from rumi.io import functionstore as fs
from rumi.io import loaders
from rumi.io import filemanager
from rumi.io import config
from rumi.io import constant
from rumi.io import common
from rumi.io import utilities
import pandas as pd
from rumi.io.common import balancing_area, balancing_time
from rumi.io.utilities import check_consumer_validity
from rumi.io.utilities import check_geographic_validity
from rumi.io.utilities import check_time_validity
from rumi.io.multiprocessutils import execute_in_process_pool
logger = logging.getLogger(__name__)
def get_consumer_levels(ds):
"""get number of consumer levels defined
for given demand sector
Parameters
----------
ds: str
Demand sector name
Returns
-------
1 or 2
"""
DS_Cons1_Map = loaders.get_parameter("DS_Cons1_Map")
type1 = DS_Cons1_Map[ds][-1]
Cons1_Cons2_Map = loaders.get_parameter("Cons1_Cons2_Map")
if Cons1_Cons2_Map and Cons1_Cons2_Map.get(type1, None):
return 2
return 1
def get_cons_columns(ds):
"""get maximum consumer columns for given demand sector
Parameters
-----------
ds: str
Demand sector name
Returns
-------
a list of consumer columns for given demand sector
"""
return list(constant.CONSUMER_TYPES[:get_consumer_levels(ds)])
def get_consumer_granularity(ds, specified_gran):
"""Converts CONSUMERALL to actual granularity
Parameters
-----------
demand_specs: str
Demand sector
Returns
-------
one of CONSUMERTYPE1,CONSUMERTYPE2
"""
if specified_gran != "CONSUMERALL":
return specified_gran
if get_consumer_levels(ds) == 1:
return "CONSUMERTYPE1"
else:
return "CONSUMERTYPE1"
def get_geographic_granularity(demand_sector,
energy_service,
energy_carrier):
DS_ES_EC_DemandGranularity_Map = loaders.get_parameter(
"DS_ES_EC_DemandGranularity_Map")
granularity_map = DS_ES_EC_DemandGranularity_Map.set_index(['DemandSector',
'EnergyService',
'EnergyCarrier'])
return granularity_map.loc[(demand_sector,
energy_service,
energy_carrier)]['GeographicGranularity']
def get_type(demand_sector, energy_service):
"""find type of service BOTTOMUP,EXTRANEOUS,GDPELASTICITY or RESIDUAL
"""
DS_ES_Map = loaders.get_parameter('DS_ES_Map')
DS_ES_Map = DS_ES_Map.set_index(['DemandSector', 'EnergyService'])
return DS_ES_Map.loc[(demand_sector, energy_service)]['InputType']
def get_BaseYearDemand(demand_sector):
"""loader function for parameter BaseYearDemand
"""
return get_demand_sector_parameter('BaseYearDemand',
demand_sector)
def get_DemandElasticity(demand_sector):
"""loader function for parameter DemandElasticity
"""
return get_demand_sector_parameter('DemandElasticity',
demand_sector)
def get_ExtraneousDemand(demand_sector):
"""loader function for parameter ExtraneousDemand
"""
extraneous = get_demand_sector_parameter('ExtraneousDemand',
demand_sector)
return extraneous
def get_ST_Efficiency(demand_sector):
"""ST_Efficiency loader function
"""
return get_demand_sector_parameter("ST_Efficiency",
demand_sector)
def get_ST_EmissionDetails(demand_sector):
"""ST_EmissionDetails loader function
"""
return get_demand_sector_parameter("ST_EmissionDetails",
demand_sector)
def get_ResidualDemand(demand_sector):
"""loader function for parameter ResidualDemand
"""
return get_demand_sector_parameter("ResidualDemand",
demand_sector)
def get_NumConsumers(demand_sector):
"""loader function for parameter NumConsumers
"""
return get_demand_sector_parameter('NumConsumers',
demand_sector)
def get_NumInstances(demand_sector, energy_service):
"""loader function for parameter NumInstances
"""
return get_DS_ES_parameter('NumInstances',
demand_sector,
energy_service)
def get_EfficiencyLevelSplit(demand_sector, energy_service):
"""loader function for parameter EfficiencyLevelSplit
"""
return get_DS_ES_parameter('EfficiencyLevelSplit',
demand_sector,
energy_service)
def get_ES_Demand(demand_sector,
energy_service,
service_tech):
"""loader function for parameter ES_Demand
should not be used directly. use loaders.get_parameter instead.
"""
prefix = f"{service_tech}_"
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
'ES_Demand',
prefix)
logger.debug(f"Reading {prefix}ES_Demand from file {filepath}")
return pd.read_csv(filepath)
def get_Penetration(demand_sector,
energy_service,
ST_combination):
"""loader function for parameter Penetration
"""
for item in itertools.permutations(ST_combination):
prefix = constant.ST_SEPARATOR_CHAR.join(
item) + constant.ST_SEPARATOR_CHAR
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
'Penetration',
prefix)
logger.debug(f"Searching for file {filepath}")
if os.path.exists(filepath):
logger.debug(f"Reading {prefix} from file {filepath}")
return pd.read_csv(filepath)
def get_demand_sector_parameter(param_name,
demand_sector):
"""loads demand sector parameter which lies inside demand_sector folder
"""
filepath = find_custom_demand_path(demand_sector, param_name)
logger.debug(f"Reading {param_name} from file {filepath}")
cols = list(filemanager.demand_specs()[param_name]['columns'].keys())
d = pd.read_csv(filepath)
return d[[c for c in cols if c in d.columns]]
def get_DS_ES_parameter(param_name,
demand_sector,
energy_service):
"""loads parameter which is inside demand_sector/energy_service folder
"""
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
param_name,
"")
logger.debug(f"Reading {param_name} from file {filepath}")
cols = list(filemanager.demand_specs()[param_name]['columns'].keys())
d = pd.read_csv(filepath)
return d[[c for c in cols if c in d.columns]]
def find_custom_DS_ES_filepath(demand_sector,
energy_service,
name,
prefix):
"""find actual location of data in case some data lies in scenario
"""
return find_custom_demand_path(demand_sector,
name,
energy_service,
prefix)
def find_custom_demand_path(demand_sector,
name,
energy_service="",
prefix=""):
"""find actual location of data in case some data lies in scenario
"""
return filemanager.find_filepath(name,
demand_sector,
energy_service,
fileprefix=prefix)
def get_mapped_items(DS_ES_EC_Map):
"""returns list of ECS from DS_ES_EC_Map
"""
return fs.flatten(fs.drop_columns(DS_ES_EC_Map, 2))
def get_RESIDUAL_ECs(DS_ES_Map, DS_ES_EC_Map):
df = DS_ES_Map.query("InputType == 'RESIDUAL'")[
['DemandSector', 'EnergyService']]
DS_ES = zip(df['DemandSector'], df['EnergyService'])
ECs = {(DS, ES): row[2:]
for DS, ES in DS_ES
for row in DS_ES_EC_Map if DS == row[0] and ES == row[1]}
return ECs
def derive_ES_EC(demand_sector, input_type):
"""return set of ES,EC combinations for given demand_sector and input_type but not_BOTTOMUP
"""
DS_ES_Map = loaders.get_parameter('DS_ES_Map')
DS_ES_EC_Map = loaders.get_parameter('DS_ES_EC_Map')
es_ec = fs.concat(*[[(row[1], ec) for ec in row[2:]]
for row in DS_ES_EC_Map if row[0] == demand_sector])
return [(es, ec) for es, ec in es_ec if len(DS_ES_Map.query(f"DemandSector=='{demand_sector}' & EnergyService=='{es}' & InputType=='{input_type}'")) > 0]
def check_RESIDUAL_EC(DS_ES_Map, DS_ES_EC_Map):
"""Each EC specified for a <DS, ES> combination,
whose InputType in DS_ES_Map is RESIDUAL,
must occur at least once in another
<DS, ES> combination for the same DS
"""
def x_in_y(x, y):
return any([ix in y for ix in x])
ECS = get_RESIDUAL_ECs(DS_ES_Map, DS_ES_EC_Map)
items1 = [row
for row in DS_ES_EC_Map
for DS, ES in ECS if row[0] == DS and row[1] != ES and x_in_y(ECS[(DS, ES)], row[2:])]
if len(items1) == 0 and ECS:
DS_ES_ST = expand_DS_ES_ST()
ST_Info = loaders.get_parameter('ST_Info')
items2 = []
for ECs in ECS.values():
for EC in ECs:
STS = ST_Info.query(f"EnergyCarrier == '{EC}'")[
'ServiceTech']
items2.extend([row for row in DS_ES_ST for DS, ES in ECS if row[0]
== DS and row[1] != ES and x_in_y(STS, row[2:])])
return not ECS or len(items1) > 0 or len(items2) > 0
def are_BOTTOMUP(DS_ES_X_Map, DS_ES_Map):
DS_ES = fs.transpose(fs.take_columns(DS_ES_X_Map, 2))
df = fs.combined_key_subset(DS_ES, DS_ES_Map).query(
"InputType != 'BOTTOMUP'")
return len(df) == 0
def not_BOTTOMUP(DS_ES_X_Map, DS_ES_Map):
DS_ES = fs.transpose(fs.take_columns(DS_ES_X_Map, 2))
df = fs.combined_key_subset(DS_ES, DS_ES_Map).query(
"InputType == 'BOTTOMUP'")
return len(df) == 0
def check_ALL_DS(DS_ES_X_Map):
"""
ES used with ALL as DS can not be used with any other DS.
This function checks if this is true.
"""
ES_with_ALL = [row[1] for row in DS_ES_X_Map if row[0] == "ALL"]
ES_without_ALL = [ES for ES in ES_with_ALL
for row in DS_ES_X_Map if row[0] != "ALL"]
return len(ES_without_ALL) == 0
def listcols(df):
return [df[c] for c in df.columns]
def check_ALL_ES(DS_ES_EC_DemandGranularity_Map):
"""function for validation
"""
DS_EC_ALL = DS_ES_EC_DemandGranularity_Map.query(
"EnergyService == 'ALL'")[['DemandSector', 'EnergyCarrier']]
DS_EC_NOALL = DS_ES_EC_DemandGranularity_Map.query(
"EnergyService != 'ALL'")[['DemandSector', 'EnergyCarrier']]
ALL = set(zip(*listcols(DS_EC_ALL)))
NOALL = set(zip(*listcols(DS_EC_NOALL)))
return not ALL & NOALL
def expand_DS_ALL(BOTTOMUP):
"""
Expands Map when DS is ALL
"""
if BOTTOMUP:
cond = "=="
data = loaders.load_param("DS_ES_ST_Map")
else:
data = loaders.load_param("DS_ES_EC_Map")
cond = "!="
DS_ES_Map = loaders.load_param("DS_ES_Map")
ESs = [row for row in data if row[0] == 'ALL']
for row in ESs:
ES = row[1]
data.remove(row)
nonbottomup = DS_ES_Map.query(
f"EnergyService == '{ES}' & InputType {cond} 'BOTTOMUP'")
if len(nonbottomup) > 0:
ds = nonbottomup['DemandSector']
for eachds in ds:
newrow = row.copy()
newrow[0] = eachds
data.append(newrow)
return data
def expand_DS_ES_EC():
return expand_DS_ALL(BOTTOMUP=False)
def expand_DS_ES_ST():
return expand_DS_ALL(BOTTOMUP=True)
def is_valid(DS, EC):
DS_ES_EC_Map = loaders.load_param("DS_ES_EC_Map")
DS_ES_ST_Map = loaders.load_param("DS_ES_ST_Map")
ST_Info = loaders.get_parameter("ST_Info")
ECS = [row for row in DS_ES_EC_Map if row[0] == DS and row[1] == EC]
STS = ST_Info.query(f"EnergyCarrier == '{EC}'")['ServiceTech']
DSS = [row[0] for row in DS_ES_ST_Map for ST in STS if row[2] == ST]
return ECS or DS in DSS
@functools.lru_cache()
def expand_DS_ES_EC_DemandGranularity_Map():
DS_ES_EC_DemandGranularity_Map = loaders.load_param(
"DS_ES_EC_DemandGranularity_Map")
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
data = DS_ES_EC_DemandGranularity_Map.to_dict(orient="records")
DSs = [d for d in data if d['EnergyService'] == 'ALL']
for DS in DSs:
data.remove(DS)
DemandSector = DS['DemandSector']
ALL_DS_ES = DS_ES_Map.query(f"DemandSector == '{DemandSector}'")[
['DemandSector', 'EnergyService']].to_dict(orient="records")
for item in ALL_DS_ES:
d = DS.copy()
d.update(item)
if is_valid(d['DemandSector'], d['EnergyCarrier']):
data.append(d)
return | pd.DataFrame(data) | pandas.DataFrame |
from challenge.agoda_cancellation_estimator import AgodaCancellationEstimator
from IMLearn.utils import split_train_test
from typing import Tuple
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
import numpy as np
import pandas as pd
import datetime
def load_data(filename: str, is_train: bool = False):
"""
Load Agoda booking cancellation dataset
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector in either of the following formats:
1) Single dataframe with last column representing the response
2) Tuple of pandas.DataFrame and Series
3) Tuple of ndarray of shape (n_samples, n_features) and ndarray of shape (n_samples,)
"""
df = | pd.read_csv(filename) | pandas.read_csv |
# Import Libraries, some are uncessary right now
import configparser
import pandas as pd
import numpy as np
import sys
import os
import random
import copy
import math
import scanpy as sc
from matplotlib import pyplot as plt
import matplotlib as mpl
import seaborn as sns
# null distribution fitting
from scipy.stats import norm
# bonferroni correction
from statsmodels.stats.multitest import multipletests
#CountsFile = sys.argv[1]
np.seterr(all = 'warn')
cfgFile = sys.argv[1] # '../switchy/SS2.ini'
# Helper functions
# Load the data a get filter into a usable form
def prepareData(CountsFile, datatype, highly_variable, n_highly_variable, onlyClones, remove_immune_receptors, normalize, filterCells):
""" Accepts: H5ad file where the adata.obs has a column "CLONE" denoting the clonal membership of the cell
dataype: "scaled" or anything else would make it return log
Returns: adata after filtering"""
adata = sc.read_h5ad(CountsFile)
adata, df = preprocessWScanpy(adata, datatype, highly_variable, n_highly_variable, remove_immune_receptors, normalize, filterCells)
# After filtering select only cells which are clones
if onlyClones == True:
# Logic for dropping non-clones from the klein dataset
#adata.obs.CLONE.fillna('None', inplace = True)
adata = adata[adata.obs.CLONE != 'NaN' ,:]
# Select only clones (applies to my dataset mostly)
selector = adata.obs.CLONE.value_counts() > 1
selector = selector[selector == True]
adata = adata[adata.obs.CLONE.isin(selector.index), :]
df = df[df.index.isin(adata.obs.index)]
return adata, df
def readConfig(cfgFile):
config = configparser.ConfigParser()
config.read(cfgFile)
stat_parameters = config['stat_parameters']
io = config['IO']
CountsFile = io['CountsFile']
out_dir = io['out_dir']
return stat_parameters, io, config
# Filter Genes and Cells to get a manageable datafram
def preprocessWScanpy(adata, datatype, highly_variable, n_highly_variable, remove_immune_receptors, normalize, filterCells):
# TODO: make this into an argument
# What is the best way to control parameters, probably a yaml file?
#sc.pp.calculate_qc_metrics(adata, inplace=True)
if remove_immune_receptors == True:
immune_receptors = pd.read_csv('/home/mswift/B_cells/CSR/sc_RNAseq/data_tables/metadata/immune_receptor_genes_keepConstantRegion.csv', index_col=0)
immune_receptors.columns = ['genes']
print("removing variable immune receptor genes which may drive clustering")
adata = adata[:, ~adata.var.index.isin(immune_receptors.genes)]
if filterCells == True:
# Filter Cells and Genes
sc.pp.filter_cells(adata, min_genes=800, inplace = True)
sc.pp.filter_cells(adata, min_counts=100000, inplace = True)
# always filter out the lowest expressed genes for computation time
sc.pp.filter_genes(adata, min_cells=4, inplace = True)
sc.pp.filter_genes(adata, min_counts=200, inplace = True)
print(adata.obs.shape, adata.var.shape, "shape of adata after filtering ")
# Make parameter in cfg
if normalize == True:
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata, base = 10)
adata.raw = adata
sc.pp.highly_variable_genes(adata, n_top_genes=n_highly_variable)
# datatype logic
if datatype == 'scaled':
sc.pp.scale(adata)
else:
pass
#Subset to highly variable gene
if highly_variable == True:
adata = adata[:,adata.var['highly_variable'] == True]
highly_variable_genes = adata.var.index[adata.var["highly_variable"] == True]
df = convertSparsetoDataFrame(adata)
return adata, df
def convertSparsetoDataFrame(adata):
""" Input: anndata object with sparse matrix as .X attribute
Returns: Pandas dataframe with rows as cells and columns as genes
My take: This is inefficient but convenient, I wrote the code based on this, which is in hindsight a bad idea, but it it more readable possibly?"""
# Get the gene expression values for each cell x gene
columns = adata.var.index.to_list()
index = adata.obs.index.to_list()
try:
denseArray = adata.X.toarray()
except:
denseArray = adata.X
df = pd.DataFrame(data = denseArray, index = index , columns = columns )
return df
def plotWaterfall(df, adata_obs, gene, label):
LabelsTesting = adata_obs.copy()
# Implementing the Hodgkin Protocol
fig, ax1 = plt.subplots(1,1)
LabelsTesting.loc[:,gene] = df[gene]
order = LabelsTesting.groupby(label)[gene].mean().sort_values(ascending = False).index
g = sns.stripplot(ax=ax1, data = LabelsTesting, x = LabelsTesting[label], y = gene, order = order, color = None)
#save_figure(fig, '{}_{}'.format(label, str(gene)))
return g
def plotCI(df, adata_obs, num_shuffles, gene, label, alpha):
# This is expensive to do twice, like this because I really am only plotting a subset of hits
LabelsTesting = pd.merge(adata.obs[label], df[gene], left_index=True, right_index=True)
tested_gene = []
statistics = []
# get the ordered means of the true labeling
observedlabel_mean = LabelsTesting.groupby(label)[gene].mean().sort_values(ascending = False)
ci_df = pd.DataFrame(observedlabel_mean)
# set up shuffling loop
#mean_shuffled_variances
#initialize dataframe
#mean_observedlabel_variance = .mean()
for i in range(num_shuffles):
# create copy out of superstition
LabelsTestingCopy = LabelsTesting.copy(deep = True)
# shuffle labels
LabelsTestingCopy[label] = np.random.permutation(LabelsTestingCopy[label].values)
#
shuffled_means = LabelsTestingCopy.groupby(label)[gene].mean()
ci_df = pd.merge(ci_df, shuffled_means, left_index=True, right_index=True)
#ci_df.iloc[:,1:].mean()
true_ordered_means = ci_df.iloc[:,0]
shuffled_means = ci_df.iloc[:,1:]
# Using T distribution
shuffled_means['lower'] = shuffled_means.apply(lambda row: scipy.stats.t.interval(alpha, row.shape[0]-1, loc = row.median(), scale=row.sem())[0], axis = 1)
shuffled_means['upper'] = shuffled_means.apply(lambda row: scipy.stats.t.interval(alpha, row.shape[0]-1, loc = row.median(), scale=row.sem())[1], axis = 1)
shuffled_means['lower_quant'] = shuffled_means.quantile(q = 0.025, axis = 1)
shuffled_means['upper_quant'] = shuffled_means.quantile(q = 0.975, axis = 1)
# merge data for plotting
data = pd.merge(true_ordered_means, shuffled_means, left_index = True, right_index= True)
data.reset_index(inplace = True)
data[label] = data[label].astype('str')
fig, ax = plt.subplots()
x = data[label]
# Frequentist confidence intervals
f_lowci = data['lower_quant']
f_upci = data['upper_quant']
true_data = true_ordered_means
g_lowci = data['lower']
g_upci = data['upper']
ax.plot(x, true_data, label = 'True Data Order')
#ax.plot(x, upci)
#ax.plot(x, lowci)
ax.fill_between(x, f_lowci, f_upci, alpha = 0.2, color = 'k', label = 'CI using real quantiles')
ax.fill_between(x, g_lowci, g_upci, alpha = 0.2, color = 'r', label = 'CI using T distribution')
plt.xlabel(label)
plt.ylabel(gene + ' \n mean expression (log CPM)')
plt.xticks()
ax.legend()
return data, shuffled_means
def plotTestHist(df, adata_obs, num_shuffles, gene, label):
# This is expensive to do twice, like this because I really am only plotting a subset of hits
LabelsTesting = adata_obs.copy()
tested_gene = []
statistics = []
LabelsTesting[gene] = df.loc[:,gene]
# get the ordered means of the true labeling
mean_shuffled_variances = []
observedlabel_var = LabelsTesting.groupby(label)[gene].var()
mean_observedlabel_variance = observedlabel_var.mean()
for i in range(num_shuffles):
# create copy
LabelsTestingCopy = LabelsTesting.copy(deep = True)
# shuffle labels
LabelsTestingCopy.loc[:,label] = np.random.permutation(LabelsTestingCopy[label].values)
shuffled_variances = LabelsTestingCopy.groupby(label)[gene].var()
# No need to have it ordered at this point
mean_shuffled_variances.append(shuffled_variances.mean())
mean_shuffled_variances = pd.Series(mean_shuffled_variances)
# Plot
fig, ax = plt.subplots(1,1)
data = mean_shuffled_variances
xmax = data.max() + 0.2
bins = np.linspace(0, xmax, 100)
plt.hist(data, bins = bins, color = 'midnightblue', alpha = 0.5)
plt.hist(data, bins = bins, color = 'midnightblue', histtype='step')
plt.axvline(mean_observedlabel_variance, 0, 1, c = 'red', ls = '--')
plt.yscale('log')
plt.xscale('linear')
plt.xlim(0, xmax)
plt.title(gene+'_'+label)
#save_figure(fig, gene+'_'+label, 'figures/permutationTests')
def compareVariances(df, LabelsTesting, num_shuffles, label, gene):
"For each gene compare the mean variances of a shuffled labeling to the observed labeling"
LabelsTesting.loc[:,'gene_name'] = df[gene]
mean_shuffled_variances = []
observedlabel_var = LabelsTesting.groupby(label)['gene_name'].var()
mean_observedlabel_variance = observedlabel_var.mean()
# do the shuffling
for i in range(num_shuffles):
# create copy
LabelsTestingCopy = LabelsTesting.copy(deep = True)
#shuffle labels
LabelsTestingCopy.loc[:, label] = np.random.permutation(LabelsTesting[label].values)
# groupby by label and compute variance
shuffled_variances = LabelsTestingCopy.groupby(label)['gene_name'].var()
# Mean variance of every labeled group
mean_shuffled_variances.append(shuffled_variances.mean())
#make list into series TODO refactor to just add to a series?
#This is the distribution of shuffled variances
mean_shuffled_variances = pd.Series(mean_shuffled_variances)
# Number of times shuffled variances are less than observed label variance, higher number would be intragroup variance is higher
test = mean_shuffled_variances <= mean_observedlabel_variance
# less equal to observed (i.e. True's) by the number of tests
# stat of 1 would be that shuffled variances always less or equal, 0 would be shuffled variances always more
# this is a frequentist p value? kinda ... we'll call it a score
gene_score = test.sum() / test.shape[0]
return gene_score, gene, mean_shuffled_variances, mean_observedlabel_variance
def calculatePvalue(mean_shuffled_variances, mean_observedlabel_variance):
# Fit a normal distribution to the null variances I calculated
mu, sigma = norm.fit(mean_shuffled_variances)
if sigma == 0:
#print('no p value possible for because null is not gaussian (sigma of zero)')
pvalue = np.nan
else:
pvalue = norm.cdf(mean_observedlabel_variance, loc = mu, scale = sigma)
return pvalue
def permuteCompareVariances2(df, adata_obs, num_shuffles, label):
""" df is the cell x gene dataframe, adata_obs is metadataframe that
contains cells as the index and a column called the label"""
# Get Annotation (clone data) and only what is in the scaled or transformed gene expression df
LabelsTesting = adata_obs[adata_obs.index.isin(df.index)]
tested_genes = []
gene_scores = []
pvals = []
genes = df.columns
print("Running Permutation Test with", label, "and", num_shuffles, 'Shuffles')
for gene in genes:
gene_score, gene, mean_shuffled_variances, mean_observedlabel_variance = compareVariances(df, LabelsTesting, num_shuffles, label, gene)
tested_genes.append(gene)
gene_scores.append(gene_score)
# Calculate p-value by fitting Gaussian to null distribution
#model = 'Gaussian' TODO could be other models
pval = calculatePvalue(mean_shuffled_variances, mean_observedlabel_variance)
pvals.append(pval)
print('Testing', gene)
scoresColumn = | pd.Series(gene_scores) | pandas.Series |
# coding: utf-8
# # 3 class discrimination of trialtype.
# ### Using sklean and skflow. Comparison to each of the 4 mice
# In[163]:
import tensorflow as tf
import tensorflow.contrib.learn as skflow
import numpy as np
import matplotlib.pyplot as plt
# get_ipython().magic('matplotlib inline')
import pandas as pd
import seaborn as sns
import random
from scipy.signal import resample
from scipy.stats import zscore
from scipy import interp
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn import metrics
from sklearn import cross_validation
from IPython import display # For plotting intermediate results
# In[365]:
# data loading function
def data_loader(mouse_name):
theta = pd.read_csv('~/work/whiskfree/data/theta_' + mouse_name + '.csv',header=None)
kappa = pd.read_csv('~/work/whiskfree/data/kappa_' + mouse_name + '.csv',header=None)
tt = pd.read_csv('~/work/whiskfree/data/trialtype_' + mouse_name + '.csv',header=None)
ch = pd.read_csv('~/work/whiskfree/data/choice_' + mouse_name + '.csv',header=None)
return theta, kappa, tt, ch
def data_parser(theta,kappa,tt,ch,tt_ch):
theta_r = np.array([[resample(theta.values.squeeze()[i,950:1440],50)] for i in range(0,theta.shape[0])])
theta_r = zscore(theta_r.squeeze(),axis=None)
kappa_r = np.array([[resample(kappa.values.squeeze()[i,950:1440],50)] for i in range(0,kappa.shape[0])])
kappa_r = zscore(kappa_r.squeeze(),axis=None)
kappa_df = pd.DataFrame(kappa_r)
theta_df = pd.DataFrame(theta_r)
both_df = | pd.concat([theta_df,kappa_df],axis=1) | pandas.concat |
"""Tests for the sdv.constraints.tabular module."""
import uuid
from datetime import datetime
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.constraints.tabular import (
Between, ColumnFormula, CustomConstraint, GreaterThan, Negative, OneHotEncoding, Positive,
Rounding, Unique, UniqueCombinations)
def dummy_transform_table(table_data):
return table_data
def dummy_reverse_transform_table(table_data):
return table_data
def dummy_is_valid_table(table_data):
return [True] * len(table_data)
def dummy_transform_table_column(table_data, column):
return table_data
def dummy_reverse_transform_table_column(table_data, column):
return table_data
def dummy_is_valid_table_column(table_data, column):
return [True] * len(table_data[column])
def dummy_transform_column(column_data):
return column_data
def dummy_reverse_transform_column(column_data):
return column_data
def dummy_is_valid_column(column_data):
return [True] * len(column_data)
class TestCustomConstraint():
def test___init__(self):
"""Test the ``CustomConstraint.__init__`` method.
The ``transform``, ``reverse_transform`` and ``is_valid`` methods
should be replaced by the given ones, importing them if necessary.
Setup:
- Create dummy functions (created above this class).
Input:
- dummy transform and revert_transform + is_valid FQN
Output:
- Instance with all the methods replaced by the dummy versions.
"""
is_valid_fqn = __name__ + '.dummy_is_valid_table'
# Run
instance = CustomConstraint(
transform=dummy_transform_table,
reverse_transform=dummy_reverse_transform_table,
is_valid=is_valid_fqn
)
# Assert
assert instance._transform == dummy_transform_table
assert instance._reverse_transform == dummy_reverse_transform_table
assert instance._is_valid == dummy_is_valid_table
def test__run_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy transform function with ``table_data`` argument.
Side Effects:
- Run transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` argument.
Side Effects:
- Run reverse transform function once with ``table_data`` as input.
Output:
- applied identity transformation "table_data = reverse_transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table,
return_value=table_data)
# Run
instance = CustomConstraint(reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` argument.
Side Effects:
- Run is valid function once with ``table_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table)
# Run
instance = CustomConstraint(is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args
assert called[0][1] == 'a'
dummy_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_table_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy reverse transform function with ``table_data`` and ``column`` arguments.
Side Effects:
- Run reverse transform function once with ``table_data`` and ``column`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_table_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args
assert called[0][1] == 'a'
dummy_reverse_transform_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_table_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "table" and "column" based functions.
Setup:
- Pass dummy is valid function with ``table_data`` and ``column`` argument.
Side Effects:
- Run is valid function once with ``table_data`` and ``column`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_table_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args
assert called[0][1] == 'a'
dummy_is_valid_mock.assert_called_once()
pd.testing.assert_frame_equal(called[0][0], table_data)
np.testing.assert_array_equal(is_valid, expected_out)
def test__run_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy transform function with ``column_data`` argument.
Side Effects:
- Run transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_transform_mock = Mock(side_effect=dummy_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', transform=dummy_transform_mock)
transformed = instance.transform(table_data)
# Asserts
called = dummy_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(transformed, dummy_transform_mock.return_value)
def test__run_reverse_transform_column(self):
"""Test the ``CustomConstraint._run`` method.
The ``_run`` method excutes ``transform`` and ``reverse_transform``
based on the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy reverse transform function with ``column_data`` argument.
Side Effects:
- Run reverse transform function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Applied identity transformation "table_data = transformed".
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_reverse_transform_mock = Mock(side_effect=dummy_reverse_transform_column,
return_value=table_data)
# Run
instance = CustomConstraint(columns='a', reverse_transform=dummy_reverse_transform_mock)
reverse_transformed = instance.reverse_transform(table_data)
# Asserts
called = dummy_reverse_transform_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
pd.testing.assert_frame_equal(
reverse_transformed, dummy_reverse_transform_mock.return_value)
def test__run_is_valid_column(self):
"""Test the ``CustomConstraint._run_is_valid`` method.
The ``_run_is_valid`` method excutes ``is_valid`` based on
the signature of the functions. In this test, we evaluate
the execution of "column" based functions.
Setup:
- Pass dummy is valid function with ``column_data`` argument.
Side Effects:
- Run is valid function twice, once with the attempt of
``table_data`` and ``column`` and second with ``column_data`` as input.
Output:
- Return a list of [True] of length ``table_data``.
"""
# Setup
table_data = pd.DataFrame({'a': [1, 2, 3]})
dummy_is_valid_mock = Mock(side_effect=dummy_is_valid_column)
# Run
instance = CustomConstraint(columns='a', is_valid=dummy_is_valid_mock)
is_valid = instance.is_valid(table_data)
# Asserts
expected_out = [True] * len(table_data)
called = dummy_is_valid_mock.call_args_list
assert len(called) == 2
# call 1 (try)
assert called[0][0][1] == 'a'
pd.testing.assert_frame_equal(called[0][0][0], table_data)
# call 2 (catch TypeError)
pd.testing.assert_series_equal(called[1][0][0], table_data['a'])
np.testing.assert_array_equal(is_valid, expected_out)
class TestUniqueCombinations():
def test___init__(self):
"""Test the ``UniqueCombinations.__init__`` method.
It is expected to create a new Constraint instance and receiving the names of
the columns that need to produce unique combinations.
Side effects:
- instance._colums == columns
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns)
# Assert
assert instance._columns == columns
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='transform')
# Assert
assert instance.rebuild_columns == tuple(columns)
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``UniqueCombinations.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Setup
columns = ['b', 'c']
# Run
instance = UniqueCombinations(columns=columns, handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init__with_one_column(self):
"""Test the ``UniqueCombinations.__init__`` method with only one constraint column.
Expect a ``ValueError`` because UniqueCombinations requires at least two
constraint columns.
Side effects:
- A ValueError is raised
"""
# Setup
columns = ['c']
# Run and assert
with pytest.raises(ValueError):
UniqueCombinations(columns=columns)
def test_fit(self):
"""Test the ``UniqueCombinations.fit`` method.
The ``UniqueCombinations.fit`` method is expected to:
- Call ``UniqueCombinations._valid_separator``.
- Find a valid separator for the data and generate the joint column name.
Input:
- Table data (pandas.DataFrame)
"""
# Setup
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
# Run
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
instance.fit(table_data)
# Asserts
expected_combinations = pd.DataFrame({
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
assert instance._separator == '#'
assert instance._joint_column == 'b#c'
pd.testing.assert_frame_equal(instance._combinations, expected_combinations)
def test_is_valid_true(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_false(self):
"""Test the ``UniqueCombinations.is_valid`` method.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['D', 'E', 'F'],
'c': ['g', 'h', 'i']
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_true(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data satisfies the constraint, result is a series of ``True`` values.
Input:
- Table data (pandas.DataFrame), satisfying the constraint.
Output:
- Series of ``True`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.is_valid(table_data)
expected_out = pd.Series([True, True, True], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_is_valid_non_string_false(self):
"""Test the ``UniqueCombinations.is_valid`` method with non string columns.
If the input data doesn't satisfy the constraint, result is a series of ``False`` values.
Input:
- Table data (pandas.DataFrame), which does not satisfy the constraint.
Output:
- Series of ``False`` values (pandas.Series)
Side effects:
- Since the ``is_valid`` method needs ``self._combinations``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
incorrect_table = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [6, 7, 8],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
out = instance.is_valid(incorrect_table)
# Assert
expected_out = pd.Series([False, False, False], name='b#c#d')
pd.testing.assert_series_equal(expected_out, out)
def test_transform(self):
"""Test the ``UniqueCombinations.transform`` method.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns concatenated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c'].items()]
except ValueError:
assert False
def test_transform_non_string(self):
"""Test the ``UniqueCombinations.transform`` method with non strings.
It is expected to return a Table data with the columns concatenated by the separator.
Input:
- Table data (pandas.DataFrame)
Output:
- Table data transformed, with the columns as UUIDs.
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
out = instance.transform(table_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out_a = pd.Series(['a', 'b', 'c'], name='a')
pd.testing.assert_series_equal(expected_out_a, out['a'])
try:
[uuid.UUID(u) for c, u in out['b#c#d'].items()]
except ValueError:
assert False
def test_transform_not_all_columns_provided(self):
"""Test the ``UniqueCombinations.transform`` method.
If some of the columns needed for the transform are missing, and
``fit_columns_model`` is False, it will raise a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns, fit_columns_model=False)
instance.fit(table_data)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test_reverse_transform(self):
"""Test the ``UniqueCombinations.reverse_transform`` method.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
columns = ['b', 'c']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': ['d', 'e', 'f'],
'c': ['g', 'h', 'i']
})
pd.testing.assert_frame_equal(expected_out, out)
def test_reverse_transform_non_string(self):
"""Test the ``UniqueCombinations.reverse_transform`` method with a non string column.
It is expected to return the original data separating the concatenated columns.
Input:
- Table data transformed (pandas.DataFrame)
Output:
- Original table data, with the concatenated columns separated (pandas.DataFrame)
Side effects:
- Since the ``transform`` method needs ``self._joint_column``, method ``fit``
must be called as well.
"""
# Setup
table_data = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
columns = ['b', 'c', 'd']
instance = UniqueCombinations(columns=columns)
instance.fit(table_data)
# Run
transformed_data = instance.transform(table_data)
out = instance.reverse_transform(transformed_data)
# Assert
assert instance._combinations_to_uuids is not None
assert instance._uuids_to_combinations is not None
expected_out = pd.DataFrame({
'a': ['a', 'b', 'c'],
'b': [1, 2, 3],
'c': ['g', 'h', 'i'],
'd': [2.4, 1.23, 5.6]
})
pd.testing.assert_frame_equal(expected_out, out)
class TestGreaterThan():
def test__validate_scalar(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = 'b'
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = 'b'
scalar = 'high'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_list(self):
"""Test the ``_validate_scalar`` method.
This method validates the inputs if and transforms them into
the correct format.
Input:
- scalar_column = 0
- column_names = ['b']
Output:
- column_names == ['b']
"""
# Setup
scalar_column = 0
column_names = ['b']
scalar = 'low'
# Run
out = GreaterThan._validate_scalar(scalar_column, column_names, scalar)
# Assert
out == ['b']
def test__validate_scalar_error(self):
"""Test the ``_validate_scalar`` method.
This method raises an error when the the scalar column is a list.
Input:
- scalar_column = 0
- column_names = 'b'
Side effect:
- Raise error since the scalar is a list
"""
# Setup
scalar_column = [0]
column_names = 'b'
scalar = 'high'
# Run / Assert
with pytest.raises(TypeError):
GreaterThan._validate_scalar(scalar_column, column_names, scalar)
def test__validate_inputs_high_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
Output:
- low == ['a']
- high == 3
- constraint_columns = ('a')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar='high', drop=None)
# Assert
low == ['a']
high == 3
constraint_columns == ('a',)
def test__validate_inputs_low_is_scalar(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 3
- high = 'b'
- scalar = 'low'
- drop = None
Output:
- low == 3
- high == ['b']
- constraint_columns = ('b')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=3, high='b', scalar='low', drop=None)
# Assert
low == 3
high == ['b']
constraint_columns == ('b',)
def test__validate_inputs_scalar_none(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 3 # where 3 is a column name
- scalar = None
- drop = None
Output:
- low == ['a']
- high == [3]
- constraint_columns = ('a', 3)
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=3, scalar=None, drop=None)
# Assert
low == ['a']
high == [3]
constraint_columns == ('a', 3)
def test__validate_inputs_scalar_none_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a']
- high = ['b', 'c']
- scalar = None
- drop = None
Output:
- low == ['a']
- high == ['b', 'c']
- constraint_columns = ('a', 'b', 'c')
"""
# Setup / Run
low, high, constraint_columns = GreaterThan._validate_inputs(
low=['a'], high=['b', 'c'], scalar=None, drop=None)
# Assert
low == ['a']
high == ['b', 'c']
constraint_columns == ('a', 'b', 'c')
def test__validate_inputs_scalar_none_two_lists(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = ['a', 0]
- high = ['b', 'c']
- scalar = None
- drop = None
Side effect:
- Raise error because both high and low are more than one column
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=['a', 0], high=['b', 'c'], scalar=None, drop=None)
def test__validate_inputs_scalar_unknown(self):
"""Test the ``_validate_inputs`` method.
This method checks ``scalar`` and formats the data based
on what is expected to be a list or not. In addition, it
returns the ``constraint_columns``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'unknown'
- drop = None
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high='b', scalar='unknown', drop=None)
def test__validate_inputs_drop_error_low(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 2
- high = 'b'
- scalar = 'low'
- drop = 'low'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low=2, high='b', scalar='low', drop='low')
def test__validate_inputs_drop_error_high(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 3
- scalar = 'high'
- drop = 'high'
Side effect:
- Raise error because scalar is unknown
"""
# Run / Assert
with pytest.raises(ValueError):
GreaterThan._validate_inputs(low='a', high=3, scalar='high', drop='high')
def test__validate_inputs_drop_success(self):
"""Test the ``_validate_inputs`` method.
Make sure the method raises an error if ``drop``==``scalar``
when ``scalar`` is not ``None``.
Input:
- low = 'a'
- high = 'b'
- scalar = 'high'
- drop = 'low'
Output:
- low = ['a']
- high = 0
- constraint_columns == ('a')
"""
# Run / Assert
low, high, constraint_columns = GreaterThan._validate_inputs(
low='a', high=0, scalar='high', drop='low')
assert low == ['a']
assert high == 0
assert constraint_columns == ('a',)
def test___init___(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes.
Input:
- low = 'a'
- high = 'b'
Side effects:
- instance._low == 'a'
- instance._high == 'b'
- instance._strict == False
"""
# Run
instance = GreaterThan(low='a', high='b')
# Asserts
assert instance._low == ['a']
assert instance._high == ['b']
assert instance._strict is False
assert instance._scalar is None
assert instance._drop is None
assert instance.constraint_columns == ('a', 'b')
def test___init__sets_rebuild_columns_if_not_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should only be set if the ``handling_strategy``
is not ``reject_sampling``.
Side effects:
- instance.rebuild_columns are set
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='transform')
# Assert
assert instance.rebuild_columns == ['b']
def test___init__does_not_set_rebuild_columns_reject_sampling(self):
"""Test the ``GreaterThan.__init__`` method.
The rebuild columns should not be set if the ``handling_strategy``
is ``reject_sampling``.
Side effects:
- instance.rebuild_columns are empty
"""
# Run
instance = GreaterThan(low='a', high='b', handling_strategy='reject_sampling')
# Assert
assert instance.rebuild_columns == ()
def test___init___high_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 'a'
- high = 0
- strict = True
- drop = 'low'
- scalar = 'high'
Side effects:
- instance._low == 'a'
- instance._high == 0
- instance._strict == True
- instance._drop = 'low'
- instance._scalar == 'high'
"""
# Run
instance = GreaterThan(low='a', high=0, strict=True, drop='low', scalar='high')
# Asserts
assert instance._low == ['a']
assert instance._high == 0
assert instance._strict is True
assert instance._scalar == 'high'
assert instance._drop == 'low'
assert instance.constraint_columns == ('a',)
def test___init___low_is_scalar(self):
"""Test the ``GreaterThan.__init__`` method.
The passed arguments should be stored as attributes. Make sure ``scalar``
is set to ``'high'``.
Input:
- low = 0
- high = 'a'
- strict = True
- drop = 'high'
- scalar = 'low'
Side effects:
- instance._low == 0
- instance._high == 'a'
- instance._stric == True
- instance._drop = 'high'
- instance._scalar == 'low'
"""
# Run
instance = GreaterThan(low=0, high='a', strict=True, drop='high', scalar='low')
# Asserts
assert instance._low == 0
assert instance._high == ['a']
assert instance._strict is True
assert instance._scalar == 'low'
assert instance._drop == 'high'
assert instance.constraint_columns == ('a',)
def test___init___strict_is_false(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater_equal``
when ``strict`` is set to ``False``.
Input:
- low = 'a'
- high = 'b'
- strict = False
"""
# Run
instance = GreaterThan(low='a', high='b', strict=False)
# Assert
assert instance.operator == np.greater_equal
def test___init___strict_is_true(self):
"""Test the ``GreaterThan.__init__`` method.
Ensure that ``operator`` is set to ``np.greater``
when ``strict`` is set to ``True``.
Input:
- low = 'a'
- high = 'b'
- strict = True
"""
# Run
instance = GreaterThan(low='a', high='b', strict=True)
# Assert
assert instance.operator == np.greater
def test__init__get_columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'high'
Side effects:
- self._columns_to_reconstruct == ['b']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
instance._columns_to_reconstruct == ['b']
def test__init__get_columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 'b'
- drop = 'low'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
instance._columns_to_reconstruct == ['a']
def test__init__get_columns_to_reconstruct_scalar_high(self):
"""Test the ``GreaterThan._get_columns_to_reconstruct`` method.
This method returns:
- ``_high`` if drop is "high"
- ``_low`` if drop is "low"
- ``_low`` if scalar is "high"
- ``_high`` otherwise
Setup:
- low = 'a'
- high = 0
- scalar = 'high'
Side effects:
- self._columns_to_reconstruct == ['a']
"""
# Setup
instance = GreaterThan(low='a', high=0, scalar='high')
instance._columns_to_reconstruct == ['a']
def test__get_value_column_list(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
"""
# Setup
instance = GreaterThan(low='a', high='b')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = table_data[['a']].values
np.testing.assert_array_equal(out, expected)
def test__get_value_scalar(self):
"""Test the ``GreaterThan._get_value`` method.
This method returns a scalar or a ndarray of values
depending on the type of the ``field``.
Input:
- Table with given data.
- field = 'low'
- scalar = 'low'
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
out = instance._get_value(table_data, 'low')
# Assert
expected = 3
assert out == expected
def test__get_diff_columns_name_low_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b#'], scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b##']
assert out == expected
def test__get_diff_columns_name_high_is_scalar(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal to the given columns plus
tokenized with '#'.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a#', 'b#']
assert out == expected
def test__get_diff_columns_name_scalar_is_none(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b#', scalar=None)
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b#': [4, 5, 6]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b##a']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_low(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=['a#', 'c'], high='b', scalar=None)
table_data = pd.DataFrame({
'a#': [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['a##b', 'c#b']
assert out == expected
def test__get_diff_columns_name_scalar_is_none_multi_column_high(self):
"""Test the ``GreaterThan._get_diff_columns_name`` method.
The returned names should be equal one name of the two columns
with a token between them.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low=0, high=['b', 'c'], scalar=None)
table_data = pd.DataFrame({
0: [1, 2, 4],
'b': [4, 5, 6],
'c#': [7, 8, 9]
})
out = instance._get_diff_columns_name(table_data)
# Assert
expected = ['b#0', 'c#0']
assert out == expected
def test__check_columns_exist_success(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
instance._check_columns_exist(table_data, 'high')
def test__check_columns_exist_error(self):
"""Test the ``GreaterThan._check_columns_exist`` method.
This method raises an error if the specified columns in
``low`` or ``high`` do not exist.
Input:
- Table with given data.
"""
# Setup
instance = GreaterThan(low='a', high='c')
# Run / Assert
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6]
})
instance._check_columns_exist(table_data, 'low')
with pytest.raises(KeyError):
instance._check_columns_exist(table_data, 'high')
def test__fit_only_one_datetime_arg(self):
"""Test the ``Between._fit`` method by passing in only one arg as datetime.
If only one of the high / low args is a datetime type, expect a ValueError.
Input:
- low is an int column
- high is a datetime
Output:
- n/a
Side Effects:
- ValueError
"""
# Setup
instance = GreaterThan(low='a', high=pd.to_datetime('2021-01-01'), scalar='high')
# Run and assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(ValueError):
instance._fit(table_data)
def test__fit__low_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='b')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_and_scalar_is_none(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='a', high=3)
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__low_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``low`` is set to a value not seen in ``table_data``.
Input:
- Table without ``low`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low='c', high=3, scalar='high')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__high_is_not_found_scalar_is_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should raise an error if
the ``high`` is set to a value not seen in ``table_data``.
Input:
- Table without ``high`` in columns.
Side Effect:
- KeyError.
"""
# Setup
instance = GreaterThan(low=3, high='c', scalar='low')
# Run / Assert
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6]
})
with pytest.raises(KeyError):
instance._fit(table_data)
def test__fit__columns_to_reconstruct_drop_high(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._high`` if ``instance_drop`` is `high`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_drop_low(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to ``instance._low`` if ``instance_drop`` is `low`.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', drop='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_default(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` by default.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__columns_to_reconstruct_high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `low` if ``instance._scalar`` is ``'high'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['a']
def test__fit__columns_to_reconstruct_low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_columns_to_reconstruct``
to `high` if ``instance._scalar`` is ``'low'``.
Input:
- Table with two columns.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._high``
"""
# Setup
instance = GreaterThan(low='a', high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._columns_to_reconstruct == ['b']
def test__fit__diff_columns_one_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the one column in ``instance.constraint_columns`` plus a
token if there is only one column in that set.
Input:
- Table with one column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({'a': [1, 2, 3]})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['a#']
def test__fit__diff_columns_multiple_columns(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should set ``_diff_columns``
to the two columns in ``instance.constraint_columns`` separated
by a token if there both columns are in that set.
Input:
- Table with two column.
Side Effect:
- ``_columns_to_reconstruct`` is ``instance._low``
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
})
instance._fit(table_data)
# Asserts
assert instance._diff_columns == ['b#a']
def test__fit_int(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of integers.
Side Effect:
- The _dtype attribute gets `int` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'i' for dtype in instance._dtype])
def test__fit_float(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns with the high one
being made of float values.
Side Effect:
- The _dtype attribute gets `float` as the value even if the low
column has a different dtype.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_datetime(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should only learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_low_is_scalar`` and ``high_is_scalar`` are ``False``.
Input:
- Table that contains two constrained columns of datetimes.
Side Effect:
- The _dtype attribute gets `datetime` as the value.
"""
# Setup
instance = GreaterThan(low='a', high='b')
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01']),
'b': pd.to_datetime(['2020-01-02'])
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'M' for dtype in instance._dtype])
def test__fit_type__high_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``low`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'high'``.
Input:
- Table that contains two constrained columns with the low one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low='a', high=3, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1., 2., 3.],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_type__low_is_scalar(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute
if ``_scalar`` is ``'low'``.
Input:
- Table that contains two constrained columns with the high one
being made of floats.
Side Effect:
- The _dtype attribute gets `float` as the value.
"""
# Setup
instance = GreaterThan(low=3, high='b', scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9]
})
instance._fit(table_data)
# Asserts
assert all([dtype.kind == 'f' for dtype in instance._dtype])
def test__fit_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=0, scalar='high')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test__fit_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._fit`` method.
The ``GreaterThan._fit`` method should learn and store the
``dtype`` of the ``high`` column as the ``_dtype`` attribute.
Input:
- Table that contains two constrained columns with different dtype.
"""
# Setup
instance = GreaterThan(low=0, high=['a', 'b'], scalar='low')
dtype_int = pd.Series([1]).dtype
dtype_float = np.dtype('float')
table_data = pd.DataFrame({
'a': [1, 2, 4],
'b': [4., 5., 6.]
})
instance._fit(table_data)
# Assert
expected_diff_columns = ['a#', 'b#']
expected_dtype = pd.Series([dtype_int, dtype_float], index=table_data.columns)
assert instance._diff_columns == expected_diff_columns
pd.testing.assert_series_equal(instance._dtype, expected_dtype)
def test_is_valid_strict_false(self):
"""Test the ``GreaterThan.is_valid`` method with strict False.
If strict is False, equal values should count as valid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- False should be returned for the strictly invalid row and True
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=False)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_strict_true(self):
"""Test the ``GreaterThan.is_valid`` method with strict True.
If strict is True, equal values should count as invalid.
Input:
- Table with a strictly valid row, a strictly invalid row and
a row that has the same value for both high and low.
Output:
- True should be returned for the strictly valid row and False
for the other two.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_high_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is a column name, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=False, scalar='low')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_low_is_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is a column name, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low='a', high=2, strict=False, scalar='high')
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a scalar, and low is multi column, then
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below high.
Output:
- True should be returned for the rows where the low
column is below high.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=2, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a scalar, and high is multi column, then
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low=2, high=['a', 'b'], strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan.is_valid`` method.
If scalar is none, and high is multi column, then
the values in that column should all be higher than
in the low column.
Input:
- Table with values above and below low.
Output:
- True should be returned for the rows where the high
column is above low.
"""
# Setup
instance = GreaterThan(low='b', high=['a', 'c'], strict=False)
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 2, 2],
'c': [7, 8, 9]
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [False, True, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_high_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If high is a datetime and low is a column,
the values in that column should all be lower than
``instance._high``.
Input:
- Table with values above and below `high`.
Output:
- True should be returned for the rows where the low
column is below `high`.
"""
# Setup
high_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low='a', high=high_dt, strict=False, scalar='high')
table_data = pd.DataFrame({
'a': [datetime(2020, 5, 17), datetime(2020, 2, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_low_is_datetime(self):
"""Test the ``GreaterThan.is_valid`` method.
If low is a datetime and high is a column,
the values in that column should all be higher than
``instance._low``.
Input:
- Table with values above and below `low`.
Output:
- True should be returned for the rows where the high
column is above `low`.
"""
# Setup
low_dt = pd.to_datetime('8/31/2021')
instance = GreaterThan(low=low_dt, high='a', strict=False, scalar='low')
table_data = pd.DataFrame({
'a': [datetime(2021, 9, 17), datetime(2021, 7, 1), datetime(2021, 9, 1)],
'b': [4, 2, 2],
})
# Run
out = instance.is_valid(table_data)
# Assert
expected_out = [True, False, True]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_nans(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a NaN row, expect that `is_valid` returns True.
Input:
- Table with a NaN row
Output:
- True should be returned for the NaN row.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, None, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test_is_valid_two_cols_with_one_nan(self):
"""Test the ``GreaterThan.is_valid`` method with nan values.
If there is a row in which we compare one NaN value with one
non-NaN value, expect that `is_valid` returns True.
Input:
- Table with a row that contains only one NaN value.
Output:
- True should be returned for the row with the NaN value.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
# Run
table_data = pd.DataFrame({
'a': [1, None, 3],
'b': [4, 5, 2],
'c': [7, 8, 9]
})
out = instance.is_valid(table_data)
# Assert
expected_out = [True, True, False]
np.testing.assert_array_equal(expected_out, out)
def test__transform_int_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_high(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the high column.
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the high column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_int_drop_low(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type int.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1. It should also drop the low column.
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with two columns two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4) and the low column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_float_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type float.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high and low columns and create a diff column with the
logarithm of the distance + 1.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with two constrained columns at a constant distance of
exactly 3 and one additional dummy column.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4., 5., 6.],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_datetime_drop_none(self):
"""Test the ``GreaterThan._transform`` method passing a high column of type datetime.
If the columns are of type datetime, ``_transform`` is expected
to convert the timedelta distance into numeric before applying
the +1 and logarithm.
Setup:
- ``_drop`` is set to ``None``, so all original columns will be in output.
Input:
- Table with values at a distance of exactly 1 second.
Output:
- Same table with a diff column of the logarithms
of the dinstance in nanoseconds + 1, which is np.log(1_000_000_001).
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._diff_columns = ['a#b']
instance._is_datetime = True
# Run
table_data = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_transform_not_all_columns_provided(self):
"""Test the ``GreaterThan.transform`` method.
If some of the columns needed for the transform are missing, it will raise
a ``MissingConstraintColumnError``.
Input:
- Table data (pandas.DataFrame)
Output:
- Raises ``MissingConstraintColumnError``.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, fit_columns_model=False)
# Run/Assert
with pytest.raises(MissingConstraintColumnError):
instance.transform(pd.DataFrame({'a': ['a', 'b', 'c']}))
def test__transform_high_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'high'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low='a', high=5, strict=True, scalar='high')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['a']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(5), np.log(4), np.log(3)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_low_is_scalar(self):
"""Test the ``GreaterThan._transform`` method with high as scalar.
The ``GreaterThan._transform`` method is expected to compute the distance
between the high scalar value and the low column and create a diff column
with the logarithm of the distance + 1.
Setup:
- ``_high`` is set to 5 and ``_scalar`` is ``'low'``.
Input:
- Table with one low column and two dummy columns.
Output:
- Same table with a diff column of the logarithms of the distances + 1,
which is np.log(4).
"""
# Setup
instance = GreaterThan(low=2, high='b', strict=True, scalar='low')
instance._diff_columns = ['a#b']
instance.constraint_columns = ['b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(3), np.log(4), np.log(5)],
})
pd.testing.assert_frame_equal(out, expected_out)
def test__transform_high_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'b'], high=3, strict=True, scalar='high')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(3), np.log(2), np.log(1)],
'b#': [np.log(0), np.log(-1), np.log(-2)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_low_is_scalar_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=3, high=['a', 'b'], strict=True, scalar='low')
instance._diff_columns = ['a#', 'b#']
instance.constraint_columns = ['a', 'b']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(-1), np.log(0), np.log(1)],
'b#': [np.log(2), np.log(3), np.log(4)],
})
pd.testing.assert_frame_equal(out, expected)
def test__transform_scalar_is_none_multi_column(self):
"""Test the ``GreaterThan._transform`` method.
The ``GreaterThan._transform`` method is expected to compute the logarithm
of given columns + 1.
Input:
- Table with given data.
Output:
- Same table with additional columns of the logarithms + 1.
"""
# Setup
instance = GreaterThan(low=['a', 'c'], high='b', strict=True)
instance._diff_columns = ['a#', 'c#']
instance.constraint_columns = ['a', 'c']
# Run
table_data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
out = instance._transform(table_data)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#': [np.log(4)] * 3,
'c#': [np.log(-2)] * 3,
})
pd.testing.assert_frame_equal(out, expected)
def test_reverse_transform_int_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'c': [7, 8, 9],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_float_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype float.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column
- convert the output to float values
- add back the dropped column
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the high column replaced by the low one + 3, as float values
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('float')]
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1.1, 2.2, 3.3],
'c': [7, 8, 9],
'b': [4.1, 5.2, 6.3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_high(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``high``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the high column replaced by the low one + one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='high')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'c': [1, 2],
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high column
- convert the output to integers
- add back the dropped column
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(4).
Output:
- Same table with the low column replaced by the high one - 3, as int
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': [4, 5, 6],
'c': [7, 8, 9],
'a': [1, 2, 3],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_low(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- subtract from the high column
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``low``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
Output:
- Same table with the low column replaced by the high one - one second
and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True, drop='low')
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2],
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00'])
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_int_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype int.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low column when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_datetime_drop_none(self):
"""Test the ``GreaterThan.reverse_transform`` method for dtype datetime.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- convert the distance to a timedelta
- add the low column when the row is invalid
- convert the output to datetimes
Setup:
- ``_drop`` is set to ``None``.
Input:
- Table with a diff column that contains the constant np.log(1_000_000_001).
The table should have one invalid row where the low column is
higher than the high column.
Output:
- Same table with the high column replaced by the low one + one second
for all invalid rows, and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high='b', strict=True)
instance._dtype = [np.dtype('<M8[ns]')]
instance._diff_columns = ['a#b']
instance._is_datetime = True
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-01T00:00:01']),
'c': [1, 2],
'a#b': [np.log(1_000_000_001), np.log(1_000_000_001)],
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': pd.to_datetime(['2020-01-01T00:00:00', '2020-01-02T00:00:00']),
'b': pd.to_datetime(['2020-01-01T00:00:01', '2020-01-02T00:00:01']),
'c': [1, 2]
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_low_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with low as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- add the low value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_low`` is set to an int and ``_scalar`` is ``'low'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low value is
higher than the high column.
Output:
- Same table with the high column replaced by the low value + 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low=3, high='b', strict=True, scalar='low')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['b']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 1, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 6, 6],
'c': [7, 8, 9],
})
pd.testing.assert_frame_equal(out, expected_out)
def test_reverse_transform_high_is_scalar(self):
"""Test the ``GreaterThan.reverse_transform`` method with high as a scalar.
The ``GreaterThan.reverse_transform`` method is expected to:
- apply an exponential to the input
- subtract 1
- subtract from the high value when the row is invalid
- convert the output to integers
Setup:
- ``_drop`` is set to ``None``.
- ``_high`` is set to an int and ``_scalar`` is ``'high'``.
Input:
- Table with a diff column that contains the constant np.log(4).
The table should have one invalid row where the low column is
higher than the high value.
Output:
- Same table with the low column replaced by the high one - 3 for all
invalid rows, as int and the diff column dropped.
"""
# Setup
instance = GreaterThan(low='a', high=3, strict=True, scalar='high')
instance._dtype = [pd.Series([1]).dtype] # exact dtype (32 or 64) depends on OS
instance._diff_columns = ['a#b']
instance._columns_to_reconstruct = ['a']
# Run
transformed = pd.DataFrame({
'a': [1, 2, 4],
'b': [4, 5, 6],
'c': [7, 8, 9],
'a#b': [np.log(4)] * 3,
})
out = instance.reverse_transform(transformed)
# Assert
expected_out = pd.DataFrame({
'a': [1, 2, 0],
'b': [4, 5, 6],
'c': [7, 8, 9],
})
| pd.testing.assert_frame_equal(out, expected_out) | pandas.testing.assert_frame_equal |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: | pd.Timestamp("2012-07-22 00:00:00") | pandas.Timestamp |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 <NAME> <<EMAIL>>
# Distributed under terms of the MIT license.
"""
"""
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import roc_curve
def rf_classify(metrics, trainable, testable, features, labeler, cutoffs, name,
clean_cutoffs=False):
"""Wrapper to run random forest and assign probabilities"""
rf = RandomForest(trainable, testable, features, cutoffs, labeler, name,
clean_cutoffs)
rf.run()
metrics.loc[rf.testable.index, name] = rf.probs
cutoffs = rf.cutoffs.copy()
# evidence = name.split('_')[0]
# rf.clean.to_csv('{0}_training.txt'.format(evidence), index=False, sep='\t')
del rf.clean
del rf.testable
del rf.rf
del rf
return cutoffs
class RandomForest:
def __init__(self, trainable, testable, features, cutoffs, labeler, name,
clean_cutoffs=False, max_train_size=100000):
def has_null_features(df):
return df[features].isnull().any(axis=1)
self.clean = trainable.loc[~has_null_features(trainable)].copy()
if self.clean.shape[0] == 0:
raise Exception('No clean variants found')
self.testable = testable.loc[~has_null_features(testable)].copy()
self.features = features
self.labeler = labeler
self.encoder = LabelEncoder().fit(['Fail', 'Pass'])
self.name = name
self.clean_cutoffs = clean_cutoffs
self.cutoff_features = cutoffs
self.cutoffs = None
self.max_train_size = max_train_size
def run(self):
sys.stderr.write('Labeling training data...\n')
self.label_training_data()
sys.stderr.write('Selecting training data...\n')
self.select_training_data()
sys.stderr.write('Learning probabilities...\n')
self.learn_probs()
sys.stderr.write('Learning cutoffs...\n')
self.learn_cutoffs()
sys.stderr.write('Trimming probabilities...\n')
self.cutoff_probs()
def label_training_data(self):
self.clean['label'] = self.labeler.label(self.clean)
def select_training_data(self):
self.train = self.clean.loc[self.clean.label != 'Unlabeled']
if self.train.shape[0] >= self.max_train_size:
max_subset_size = int(self.max_train_size / 2)
passes = self.train.loc[self.train.label == 'Pass']
if passes.shape[0] >= max_subset_size:
passes = passes.sample(max_subset_size)
fails = self.train.loc[self.train.label == 'Fail']
if fails.shape[0] >= max_subset_size:
fails = fails.sample(max_subset_size)
self.train = | pd.concat([passes, fails]) | pandas.concat |
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument("-pythonpath", "--pythonpath", type=str)
parser.add_argument("-tomo_name", "--tomo_name", type=str)
parser.add_argument("-config_file", "--config_file", type=str)
parser.add_argument("-fold", "--fold", type=str, default="None")
args = parser.parse_args()
pythonpath = args.pythonpath
sys.path.append(pythonpath)
import os
import ast
from os import listdir
import shutil
import pandas as pd
import numpy as np
# import seaborn as sns
from file_actions.readers.tomograms import load_tomogram
from file_actions.writers.csv import build_tom_motive_list
from file_actions.writers.tomogram import write_tomogram
from tomogram_utils.coordinates_toolbox.clustering import get_cluster_centroids, \
get_cluster_centroids_in_contact, get_cluster_centroids_colocalization
from paths.pipeline_dirs import get_probability_map_path, get_post_processed_prediction_path
from constants.config import Config
from constants.config import get_model_name
from networks.utils import get_training_testing_lists
config_file = args.config_file
config = Config(user_config_file=config_file)
tomo_name = args.tomo_name
fold = ast.literal_eval(args.fold)
calculate_motl = config.calculate_motl
model_path, model_name = get_model_name(config, fold)
snakemake_pattern = config.output_dir + "/predictions/" + model_name + "/" + tomo_name + "/" + config.pred_class + \
"/.{fold}.post_processed_prediction.mrc".format(fold=str(fold))
if isinstance(fold, int):
tomo_training_list, tomo_testing_list = get_training_testing_lists(config=config, fold=fold)
if tomo_name in tomo_testing_list:
run_job = True
else:
run_job = False
else:
run_job = True
if run_job:
print("Processing tomo", tomo_name)
tomo_output_dir, output_path = get_probability_map_path(config.output_dir, model_name, tomo_name,
config.pred_class)
for file in listdir(tomo_output_dir):
if "motl" in file:
print("A motive list already exists:", file)
shutil.move(os.path.join(tomo_output_dir, file), os.path.join(tomo_output_dir, "prev_" + file))
assert os.path.isfile(output_path)
prediction_dataset = load_tomogram(path_to_dataset=output_path)
output_shape = prediction_dataset.shape
prediction_dataset_thr = 1 * (prediction_dataset > config.threshold)
# set to zero the edges of tomogram
if isinstance(config.ignore_border_thickness, int):
ix = config.ignore_border_thickness
iy, iz = ix, ix
else:
ix, iy, iz = config.ignore_border_thickness
if iz > 0:
prediction_dataset_thr[:iz, :, :] = np.zeros_like(prediction_dataset_thr[:iz, :, :])
prediction_dataset_thr[-iz:, :, :] = np.zeros_like(prediction_dataset_thr[-iz:, :, :])
if iy > 0:
prediction_dataset_thr[:, :iy, :] = np.zeros_like(prediction_dataset_thr[:, :iy, :])
prediction_dataset_thr[:, -iy:, :] = np.zeros_like(prediction_dataset_thr[:, -iy:, :])
if ix > 0:
prediction_dataset_thr[:, :, :ix] = np.zeros_like(prediction_dataset_thr[:, :, :ix])
prediction_dataset_thr[:, :, -ix:] = np.zeros_like(prediction_dataset_thr[:, :, -ix:])
print("Region mask:", config.region_mask)
df = pd.read_csv(config.dataset_table, dtype={"tomo_name": str})
df.set_index("tomo_name", inplace=True)
masking_file = df[config.region_mask][tomo_name]
clusters_output_path = get_post_processed_prediction_path(output_dir=config.output_dir,
model_name=model_name,
tomo_name=tomo_name,
semantic_class=config.pred_class)
os.makedirs(tomo_output_dir, exist_ok=True)
contact_mode = config.contact_mode
if np.max(prediction_dataset_thr) == 0:
clusters_labeled_by_size = prediction_dataset_thr
centroids_list = []
cluster_size_list = []
else:
print("masking_file:", masking_file)
if isinstance(masking_file, float):
print("No intersecting mask available of the type {} for tomo {}.".format(config.region_mask, tomo_name))
prediction_dataset_thr = prediction_dataset_thr.astype(np.int8)
clusters_labeled_by_size, centroids_list, cluster_size_list = \
get_cluster_centroids(dataset=prediction_dataset_thr,
min_cluster_size=config.min_cluster_size,
max_cluster_size=config.max_cluster_size,
connectivity=config.clustering_connectivity)
else:
mask_indicator = load_tomogram(path_to_dataset=masking_file)
shx, shy, shz = [np.min([shl, shp]) for shl, shp in
zip(mask_indicator.shape, prediction_dataset_thr.shape)]
mask_indicator = mask_indicator[:shx, :shy, :shz]
prediction_dataset_thr = prediction_dataset_thr[:shx, :shy, :shz]
if contact_mode == "intersection":
prediction_dataset_thr = mask_indicator.astype(np.int8) * prediction_dataset_thr.astype(np.int8)
if np.max(prediction_dataset_thr) > 0:
clusters_labeled_by_size, centroids_list, cluster_size_list = \
get_cluster_centroids(dataset=prediction_dataset_thr,
min_cluster_size=config.min_cluster_size,
max_cluster_size=config.max_cluster_size,
connectivity=config.clustering_connectivity)
elif contact_mode == "contact":
if np.max(prediction_dataset_thr) > 0:
clusters_labeled_by_size, centroids_list, cluster_size_list = \
get_cluster_centroids_in_contact(dataset=prediction_dataset_thr,
min_cluster_size=config.min_cluster_size,
max_cluster_size=config.max_cluster_size,
contact_mask=mask_indicator,
connectivity=config.clustering_connectivity)
else:
assert contact_mode == "colocalization"
if np.max(prediction_dataset_thr) > 0:
clusters_labeled_by_size, centroids_list, cluster_size_list = \
get_cluster_centroids_colocalization(dataset=prediction_dataset_thr,
min_cluster_size=config.min_cluster_size,
max_cluster_size=config.max_cluster_size,
contact_mask=mask_indicator,
tol_contact=config.contact_distance,
connectivity=config.clustering_connectivity)
clusters_output_path = get_post_processed_prediction_path(output_dir=config.output_dir, model_name=model_name,
tomo_name=tomo_name, semantic_class=config.pred_class)
print("clusters_output_path", clusters_output_path)
clusters_output = 1*(clusters_labeled_by_size > 0)
write_tomogram(output_path=clusters_output_path, tomo_data=clusters_output)
os.makedirs(tomo_output_dir, exist_ok=True)
if calculate_motl:
motl_name = "motl_" + str(len(centroids_list)) + ".csv"
print("motl_name:", motl_name)
motl_file_name = os.path.join(tomo_output_dir, motl_name)
if len(centroids_list) > 0:
motive_list_df = build_tom_motive_list(
list_of_peak_coordinates=centroids_list,
list_of_peak_scores=cluster_size_list, in_tom_format=False)
motive_list_df.to_csv(motl_file_name, index=False, header=False)
print("Motive list saved in", motl_file_name)
else:
print("Saving empty list!")
motive_list_df = | pd.DataFrame({}) | pandas.DataFrame |
import requests as req
import pandas as pd
import datetime
import matplotlib.pyplot as plt
trades_df = pd.read_csv('trades.csv')
trades_df['DateOfTrade']= pd.to_datetime(trades_df['DateOfTrade'])
trades_df = trades_df.sort_values(by='DateOfTrade')
comp_columns = ['StartDate','EndDate','CCY','Amount']
composition = pd.DataFrame(columns=comp_columns)
for trade in trades_df.itertuples():
date_of_trade = getattr(trade,'DateOfTrade')
ccy = getattr(trade,'CCY')
amount = getattr(trade,'Amount')
if ccy not in composition['CCY'].to_list():
start_date = date_of_trade
end_date = pd.datetime(2050,12,31)
new_trade_df = pd.DataFrame({'StartDate':[start_date],'EndDate':[end_date],'CCY':[ccy],'Amount':[amount]})
composition = pd.concat([composition,new_trade_df])
else:
current_ccy = (composition['EndDate'] == '2050-12-31') & (composition['CCY'] == ccy)
if date_of_trade > composition.loc[current_ccy]['StartDate'][0]:
new_amount = composition.loc[current_ccy]['Amount'][0] + amount
if new_amount==0: #deletion
composition.loc[current_ccy, 'EndDate'] = date_of_trade
else:
composition.loc[current_ccy,'EndDate'] = date_of_trade
new_trade_df = | pd.DataFrame({'StartDate': [date_of_trade], 'EndDate': [end_date], 'CCY': [ccy], 'Amount': [new_amount]}) | pandas.DataFrame |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
import IMLearn.utils.utils as utils
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.io as pio
from IMLearn.learners.regressors.linear_regression import LinearRegression
pio.templates.default = "simple_white"
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
pd.set_option('display.colheader_justify', 'center')
pd.set_option('display.precision', 3)
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
houses_df = | pd.read_csv(filename) | pandas.read_csv |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
class TestPeriodIndexSeriesMethods(object):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
tm.assert_index_equal(result, exp)
def test_pi_ops_errors(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
s = pd.Series(idx)
msg = r"unsupported operand type\(s\)"
for obj in [idx, s]:
for ng in ["str", 1.5]:
with tm.assert_raises_regex(TypeError, msg):
obj + ng
with pytest.raises(TypeError):
# error message differs between PY2 and 3
ng + obj
with tm.assert_raises_regex(TypeError, msg):
obj - ng
with pytest.raises(TypeError):
np.add(obj, ng)
if _np_version_under1p10:
assert np.add(ng, obj) is NotImplemented
else:
with pytest.raises(TypeError):
np.add(ng, obj)
with pytest.raises(TypeError):
np.subtract(obj, ng)
if _np_version_under1p10:
assert np.subtract(ng, obj) is NotImplemented
else:
with pytest.raises(TypeError):
np.subtract(ng, obj)
def test_pi_ops_nat(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'NaT', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx, lambda x: np.add(x, 2), expected)
self._check(idx + 2, lambda x: x - 2, idx)
self._check(idx + 2, lambda x: np.subtract(x, 2), idx)
# freq with mult
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='2M', name='idx')
expected = PeriodIndex(['2011-07', '2011-08',
'NaT', '2011-10'], freq='2M', name='idx')
self._check(idx, lambda x: x + 3, expected)
self._check(idx, lambda x: 3 + x, expected)
self._check(idx, lambda x: np.add(x, 3), expected)
self._check(idx + 3, lambda x: x - 3, idx)
self._check(idx + 3, lambda x: np.subtract(x, 3), idx)
def test_pi_ops_array_int(self):
idx = PeriodIndex(['2011-01', '2011-02', 'NaT',
'2011-04'], freq='M', name='idx')
f = lambda x: x + np.array([1, 2, 3, 4])
exp = PeriodIndex(['2011-02', '2011-04', 'NaT',
'2011-08'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.add(x, np.array([4, -1, 1, 2]))
exp = PeriodIndex(['2011-05', '2011-01', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: x - np.array([1, 2, 3, 4])
exp = PeriodIndex(['2010-12', '2010-12', 'NaT',
'2010-12'], freq='M', name='idx')
self._check(idx, f, exp)
f = lambda x: np.subtract(x, np.array([3, 2, 3, -2]))
exp = PeriodIndex(['2010-10', '2010-12', 'NaT',
'2011-06'], freq='M', name='idx')
self._check(idx, f, exp)
def test_pi_ops_offset(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
f = lambda x: x + pd.offsets.Day()
exp = PeriodIndex(['2011-01-02', '2011-02-02', '2011-03-02',
'2011-04-02'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x + pd.offsets.Day(2)
exp = PeriodIndex(['2011-01-03', '2011-02-03', '2011-03-03',
'2011-04-03'], freq='D', name='idx')
self._check(idx, f, exp)
f = lambda x: x - pd.offsets.Day(2)
exp = PeriodIndex(['2010-12-30', '2011-01-30', '2011-02-27',
'2011-03-30'], freq='D', name='idx')
self._check(idx, f, exp)
def test_pi_offset_errors(self):
idx = PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01',
'2011-04-01'], freq='D', name='idx')
s = pd.Series(idx)
# Series op is applied per Period instance, thus error is raised
# from Period
msg_idx = r"Input has different freq from PeriodIndex\(freq=D\)"
msg_s = r"Input cannot be converted to Period\(freq=D\)"
for obj, msg in [(idx, msg_idx), (s, msg_s)]:
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
obj + pd.offsets.Hour(2)
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
pd.offsets.Hour(2) + obj
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
obj - pd.offsets.Hour(2)
def test_pi_sub_period(self):
# GH 13071
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
result = idx - pd.Period('2012-01', freq='M')
exp = pd.Index([-12, -11, -10, -9], name='idx')
tm.assert_index_equal(result, exp)
result = np.subtract(idx, pd.Period('2012-01', freq='M'))
tm.assert_index_equal(result, exp)
result = | pd.Period('2012-01', freq='M') | pandas.Period |
# JADESOUND FP
# 20211207
import numpy as np
import pandas as pd
import scipy as sp
import pickle
import scikits.bootstrap as bootstrap
#if things start breaking with no warning comment out these two lines
import warnings
warnings.filterwarnings('ignore')
import statsmodels.api as sm
import statsmodels.stats as smstats
import os
import csv
from state_abr import abr
import matplotlib.pyplot as plt
def leastSquares(X, y):
olsFit = sm.regression.linear_model.OLS(y, sm.add_constant(X), missing='drop').fit()
return olsFit
def summStat(r):
print(r.summary())
return
def descrStats(data):
descr = smstats.descriptivestats.describe(data)
return descr
def pearson(X, y):
"""
Parameters
----------
X : Column of DataFrame
Variable 1
y : Column of DataFrame
Variable 2
Returns
-------
pearson_p : tuple
Value one = Pearson's correlation coefficient
Value two = two-tailed p-value
"""
pearson_p = sp.stats.pearsonr(X, y)
return pearson_p
def calc_county_percents(data, pop_data, variable):
outdata = pd.DataFrame(columns=["State", "Percentage", "Confidence Interval", "Population"])
state_df = pop_data[["State","County","Value"]]
state_df['State'].str.strip()
var_df = data.loc[data["Variable_Code"] == variable][["State", "County", "Variable_Code", "Value"]]
for state in abr.values():
try:
var_counts = var_df.loc[var_df["State"] == state]
pop_counts = state_df.loc[state_df["State"] == state][["County", "Value"]]
merged = pd.merge(var_counts, pop_counts, on="County")
merged["Normalized"] = merged["Value_x"] * merged["Value_y"]
normalized_total = merged["Normalized"].sum()
total_pop = pop_counts["Value"].sum()
perc_pop = normalized_total/total_pop
ci = bootstrap.ci(var_counts["Value"])
except KeyError:
total_pop = 0
ci = [0, 0]
perc_pop = 0
new_line = {"State":state, "Percentage":perc_pop, "Confidence Interval":ci, "Population":total_pop}
outdata = outdata.append(new_line, ignore_index=True)
return outdata
def process_indicator_data(data):
outdata = pd.DataFrame(columns=["State", "Percentage", "Confidence Interval"])
for state in abr.keys():
state_abr = abr[state]
try:
state_data = data.loc[data["State"] == state]
percent = state_data["Value"].sum()/len(state_data["Value"])
ci = bootstrap.ci(state_data["Value"])
except:
percent = 0
ci = [0,0]
new_line = {"State":state_abr, "Percentage":percent, "Confidence Interval":ci}
outdata = outdata.append(new_line, ignore_index=True)
return outdata
def scatter_plot(x, y, x_axis, y_axis, title):
x = x.sort_values(by="State")
y = y.sort_values(by="State")
plt.scatter(x["Percentage"], y["Percentage"])
results = leastSquares(x["Percentage"], y["Percentage"])
print(title)
print(results.summary())
plt.plot(x['Percentage'], x['Percentage']*results.params[1] + results.params[0])
text = 'p-value=' + str(round(results.pvalues[1],5))
plt.text(x["Percentage"].max()-4, y['Percentage'].min(), text)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.title(title)
plt.show()
def test():
d = [1,2,3,4,5,6,7,8,9,10]
d1 = [10,9,8,7,6,5,4,3,2,1]
dataframe = {'X':d,"y":d1}
df = pd.DataFrame(dataframe)
fit = leastSquares(d, d1)
summStat(fit)
descriptions = descrStats(df)
print(descriptions)
if __name__ == '__main__':
indicator_seven_days = pd.read_csv("data/Indicators_of_Anxiety_or_Depression_Based_on_Reported_Frequency_of_Symptoms_During_Last_7_Days.csv")
indicator_four_weeks = pd.read_csv("data/Indicators_of_Reduced_Access_to_Care_Due_to_the_Coronavirus_Pandemic_During_Last_4_Weeks.csv")
state_and_county = | pd.read_csv("data/StateAndCountyData.csv") | pandas.read_csv |
from src.typeDefs.section_1_7.section_1_7_2 import ISection_1_7_2
import datetime as dt
from src.repos.metricsData.metricsDataRepo import MetricsDataRepo
import pandas as pd
def fetchSection1_7_2Context(appDbConnStr: str, startDt: dt.datetime, endDt: dt.datetime) -> ISection_1_7_2:
mRepo = MetricsDataRepo(appDbConnStr)
# get voltage data for this month
maxVoltData = mRepo.getDailyVoltDataByLevel(400, "Max", startDt, endDt)
maxVoltDf = pd.DataFrame(maxVoltData)
maxVoltDf["data_val"] = pd.to_numeric(
maxVoltDf["data_val"], errors='coerce')
maxVoltSeries = maxVoltDf.groupby("entity_name").apply(getMax)
maxVoltSeries = maxVoltSeries.round()
maxVoltSeries = maxVoltSeries.rename("max_vol")
minVoltData = mRepo.getDailyVoltDataByLevel(400, "Min", startDt, endDt)
minVoltDf = pd.DataFrame(minVoltData)
minVoltDf["data_val"] = pd.to_numeric(
minVoltDf["data_val"], errors='coerce')
minVoltSeries = minVoltDf.groupby("entity_name").apply(getMin)
minVoltSeries = minVoltSeries.round()
minVoltSeries = minVoltSeries.rename("min_vol")
lessVoltPercData = mRepo.getDailyVoltDataByLevel(
400, "%Time <380 or 728", startDt, endDt)
lessVoltPercDf = pd.DataFrame(lessVoltPercData)
lessVoltPercDf["data_val"] = pd.to_numeric(
lessVoltPercDf["data_val"], errors='coerce')
lessVoltPercSeries = lessVoltPercDf.groupby("entity_name").apply(getMean)
lessVoltPercSeries = lessVoltPercSeries.round(2)
lessVoltPercSeries = lessVoltPercSeries.rename("less_perc")
bandVoltPercData = mRepo.getDailyVoltDataByLevel(
400, "%Time within IEGC Band", startDt, endDt)
bandVoltPercDf = | pd.DataFrame(bandVoltPercData) | pandas.DataFrame |
import IndeedInfoGetter
import PopulationCPIHolder
import pandas
class IndexComputer:
@staticmethod
def ComputeIndex(Skill):
CompleteObjArray = []
print(type(PopulationCPIHolder.PopulationCPIHolder.getPopCpi()))
colOne = []
colTwo = []
for x in PopulationCPIHolder.PopulationCPIHolder.getPopCpi():
x.numOfJobs = IndeedInfoGetter.IndeedInfoGetter.getNumberOfJobs(Skill, x.City.split(', ')[0], x.City.split(', ')[1])
x.avgSalary = IndeedInfoGetter.IndeedInfoGetter.getAverageSalary(Skill, x.City.split(', ')[0], x.City.split(', ')[1])
colOne.append(float(x.numOfJobs.replace(',',''))/x.Population)
colTwo.append(x.avgSalary/x.CPI)
CompleteObjArray.append(x)
df = | pandas.DataFrame(data={'One':colOne,'Two':colTwo}) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Breast Cancer Detection
# In[1]:
import warnings
warnings.filterwarnings('ignore')
# In[2]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[3]:
df = pd.read_csv("breast.csv")
# In[4]:
df
# In[5]:
df.head()
# In[6]:
df.columns
# In[7]:
df.info()
# In[8]:
df['Unnamed: 32']
# In[9]:
df = df.drop("Unnamed: 32", axis=1)
# In[10]:
df.head()
# In[11]:
df.drop('id', axis=1, inplace=True)
# In[12]:
l=list(df.columns)
l
# In[13]:
df.head(2)
# In[14]:
df['diagnosis'].unique()
# In[15]:
sns.countplot(df['diagnosis'], label="Count",);
# In[16]:
df['diagnosis'].value_counts()
# In[17]:
df.shape
# # Explore The Data
# In[18]:
df.describe()
# In[19]:
#correlation plot
corr = df.corr()
corr
# In[20]:
corr.shape
# In[21]:
plt.figure(figsize=(10,10))
sns.heatmap(corr);
# In[22]:
#sns.pairplot(df)
#plt.show()
# In[23]:
df.head()
# In[24]:
df['diagnosis'] = df['diagnosis'].map({'M':1, 'B':0})
df.to_csv('tits.csv')
df.head()
# In[25]:
df['diagnosis'].unique()
# In[26]:
X=df.drop('diagnosis',axis=1)
X.head()
# In[27]:
y=df['diagnosis']
y.head()
# # Train Test Split
# In[28]:
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3)
# In[29]:
print(X_train.shape ,X_test.shape)
print(y_train.shape, y_test.shape)
# In[30]:
X_train.head(1)
# In[31]:
from sklearn.preprocessing import StandardScaler
sc= StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
# In[32]:
X_train
# In[33]:
X_test
# # Machine learning Models
# ## Logistic Regression
# In[34]:
from sklearn.linear_model import LogisticRegression
lr= LogisticRegression(random_state = 5)
lr.fit(X_train,y_train)
# In[35]:
y_pred = lr.predict(X_test)
y_pred
# In[36]:
y_test
# In[37]:
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
cm = confusion_matrix(y_test, y_pred)
print('Confusion Matrix')
print(cm)
print("Accuracy Score : ",accuracy_score(y_test, y_pred))
print(classification_report(y_test,y_pred, digits=5))
# In[38]:
lr_acc = accuracy_score(y_test, y_pred)
# In[39]:
results = pd.DataFrame()
results
# In[40]:
tempResult = pd.DataFrame({'Algorithm':['Logistic Regression Method'], 'Accuracy':[lr_acc]})
results = pd.concat([results, tempResult])
results
# # Decision Tree Classifier
# In[41]:
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
# In[42]:
y_pred = dtc.predict(X_test)
y_pred
# In[43]:
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
cm = confusion_matrix(y_test, y_pred)
print('Confusion Matrix')
print(cm)
print("Accuracy Score : ",accuracy_score(y_test, y_pred))
print(classification_report(y_test,y_pred, digits=5))
# In[44]:
dtc_acc = accuracy_score(y_test, y_pred)
# In[45]:
tempResult = pd.DataFrame({'Algorithm':['Decision Tree Classifier Method'], 'Accuracy':[dtc_acc]})
results = pd.concat([results, tempResult])
results = results[['Algorithm','Accuracy']]
results
# # Random Forest Classifier
# In[46]:
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
rfc.fit(X_train, y_train)
# In[47]:
y_pred = rfc.predict(X_test)
y_pred
# In[48]:
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
cm = confusion_matrix(y_test, y_pred)
print('Confusion Matrix')
print(cm)
print("Accuracy Score : ",accuracy_score(y_test, y_pred))
print(classification_report(y_test,y_pred, digits=5))
# In[49]:
rfc_acc = accuracy_score(y_test, y_pred)
print(rfc_acc)
# In[50]:
tempResults = pd.DataFrame({'Algorithm':['Random Forest Classifier Method'], 'Accuracy':[rfc_acc]})
results = pd.concat( [results, tempResults] )
results = results[['Algorithm','Accuracy']]
results
# # Support Vector Classifier
# In[51]:
from sklearn import svm
svc = svm.SVC()
svc.fit(X_train,y_train)
# In[52]:
y_pred = svc.predict(X_test)
y_pred
# In[53]:
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
cm = confusion_matrix(y_test, y_pred)
print('Confusion Matrix')
print(cm)
print("Accuracy Score : ",accuracy_score(y_test, y_pred))
print(classification_report(y_test,y_pred, digits=5))
# In[54]:
svc_acc = accuracy_score(y_test, y_pred)
print(svc_acc)
# In[55]:
tempResults = pd.DataFrame({'Algorithm':['Support Vector Classifier Method'], 'Accuracy':[svc_acc]})
results = pd.concat( [results, tempResults] )
results = results[['Algorithm','Accuracy']]
results
# # KNN Classifier
# In[56]:
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 3, metric = 'euclidean', p = 2)
knn.fit(X_train, y_train)
# In[57]:
y_pred = knn.predict(X_test)
y_pred
# In[58]:
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
cm = confusion_matrix(y_test, y_pred)
print('Confusion Matrix')
print(cm)
print("Accuracy Score : ",accuracy_score(y_test, y_pred))
print(classification_report(y_test,y_pred, digits=5))
# In[59]:
knn_acc = accuracy_score(y_test, y_pred)
print(knn_acc)
# In[60]:
tempResults = | pd.DataFrame({'Algorithm':['K-Nearest-Neighbor Classification Method'], 'Accuracy':[knn_acc]}) | pandas.DataFrame |
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import PeriodArray, period_array
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]),
([pd.Period("2017", "D"), None], None, [17167, iNaT]),
(pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]),
(pd.date_range("2017", periods=3), None, [17167, 17168, 17169]),
(pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]),
],
)
def test_period_array_ok(data, freq, expected):
result = period_array(data, freq=freq).asi8
expected = np.asarray(expected, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_period_array_readonly_object():
# https://github.com/pandas-dev/pandas/issues/25403
pa = period_array([pd.Period("2019-01-01")])
arr = np.asarray(pa, dtype="object")
arr.setflags(write=False)
result = period_array(arr)
tm.assert_period_array_equal(result, pa)
result = pd.Series(arr)
tm.assert_series_equal(result, pd.Series(pa))
result = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
def test_from_datetime64_freq_changes():
# https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
result = PeriodArray._from_datetime64(arr, freq="M")
expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M")
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize(
"data, freq, msg",
[
(
[pd.Period("2017", "D"), pd.Period("2017", "A")],
None,
"Input has different freq",
),
([pd.Period("2017", "D")], "A", "Input has different freq"),
],
)
def test_period_array_raises(data, freq, msg):
with pytest.raises(IncompatibleFrequency, match=msg):
period_array(data, freq)
def test_period_array_non_period_series_raies():
ser = | pd.Series([1, 2, 3]) | pandas.Series |
from django.db.models import Q
from django.db import models
from django.db.utils import OperationalError
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.functional import lazy
from django.core.cache import cache
from django.core.validators import MinValueValidator,MaxValueValidator
import datetime
import sys
import os
import json
from EventsAPP.consumers import PublishEvent
from django.dispatch import receiver
from django.db.models.signals import pre_save,post_save,post_delete,pre_delete
from django.contrib.contenttypes.fields import GenericRelation
from MainAPP.constants import REGISTERS_DB_PATH,SUBSYSTEMS_CHOICES
import MainAPP.signals
import utils.BBDD
import pandas as pd
import numpy as np
import logging
from abc import abstractstaticmethod
logger = logging.getLogger("project")
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# settings from https://steelkiwi.com/blog/practical-application-singleton-design-pattern/
class SingletonModel(models.Model):
class Meta:
abstract = True
def save(self, *args, **kwargs):
self.pk = 1
super(SingletonModel, self).save(*args, **kwargs)
self.set_cache()
def set_cache(self):
cache.set(self.__class__.__name__, self)
@classmethod
def checkIfExists(cls):
try:
obj = cls.objects.get(pk=1)
return True
except cls.DoesNotExist:
return False
@classmethod
def load(cls):
if cache.get(cls.__name__) is None:
obj, created = cls.objects.get_or_create(pk=1)
if not created:
obj.set_cache()
return cache.get(cls.__name__)
class SiteSettings(SingletonModel):
class Meta:
verbose_name = _('Settings')
FACILITY_NAME= models.CharField(verbose_name=_('Name of the installation'),max_length=100,
help_text=_('Descriptive name for the installation.'),default='My house')
SITE_DNS= models.CharField(verbose_name=_('Name of the domain to access the application'),
help_text=_('This is the DNS address that gives access to the application from the internet.'),
max_length=100,default='myDIY4dot0House.net')
VERSION_AUTO_DETECT=models.BooleanField(verbose_name=_('Autodetect new software releases'),
help_text=_('Automatically checks the repository for new software'),default=True)
VERSION_AUTO_UPDATE=models.BooleanField(verbose_name=_('Apply automatically new software releases'),
help_text=_('Automatically updates to (and applies) the latest software'),default=False)
VERSION_CODE= models.CharField(verbose_name=_('Code of the version of the application framework'),
max_length=100,default='',blank=True)
VERSION_DEVELOPER=models.BooleanField(verbose_name=_('Follow the beta development versions'),
help_text=_('Tracks the development versions (may result in unstable behaviour)'),default=False)
NTPSERVER_RESTART_TIMEDELTA=models.PositiveSmallIntegerField(verbose_name=_('NTP server restart time delta'),
help_text=_('Time difference in minutes that will trigger a restart of the NTP server'),default=5)
WIFI_SSID= models.CharField(verbose_name=_('WIFI network identificator'),
help_text=_('This is the name of the WiFi network generated to communicate with the slaves'),
max_length=50,default='DIY4dot0')
WIFI_PASSW= models.CharField(verbose_name=_('WIFI network passphrase'),
help_text=_('This is the encryption password for the WIFI network'),
max_length=50,default='<PASSWORD>')
WIFI_IP= models.GenericIPAddressField(verbose_name=_('IP address for the WIFI network'),
help_text=_('This is the IP address for the WiFi network generated to communicate with the slaves'),
protocol='IPv4', default='10.10.10.1')
WIFI_MASK= models.GenericIPAddressField(verbose_name=_('WIFI network mask'),
help_text=_('This is the mask of the WiFi network generated to communicate with the slaves'),
protocol='IPv4', default='255.255.255.0')
WIFI_GATE= models.GenericIPAddressField(verbose_name=_('WIFI network gateway'),
help_text=_('This is the gateway for the WiFi network generated to communicate with the slaves'),
protocol='IPv4', default='10.10.10.1')
ETH_DHCP=models.BooleanField(verbose_name=_('Enable DHCP on the LAN network'),
help_text=_('Includes the server in the DHCP pool'),default=True)
ETH_IP= models.GenericIPAddressField(verbose_name=_('IP address for the LAN network'),
help_text=_('This is the IP for the LAN network that is providing the internet access.'),
protocol='IPv4', default='172.16.31.10')
ETH_MASK= models.GenericIPAddressField(verbose_name=_('Mask for the LAN network'),
help_text=_('This is the mask for the LAN network that is providing the internet access.'),
protocol='IPv4', default='255.255.255.0')
ETH_GATE= models.GenericIPAddressField(verbose_name=_('Gateway of the LAN network'),
help_text=_('This is the gateway IP of the LAN network that is providing the internet access.'),
protocol='IPv4', default='1.1.1.1')
PROXY_AUTO_DENYIP=models.BooleanField(verbose_name=_('Enable automatic IP blocking'),
help_text=_('Feature that blocks automatically WAN IPs with more than certain denied attempts in 24 h.'),default=True)
AUTODENY_ATTEMPTS=models.PositiveSmallIntegerField(verbose_name=_('Number of denied attempts needed to block an IP'),
help_text=_('The number of denied accesses in 24h that will result in an IP being blocked.'),default=40)
PROXY_CREDENTIALS=models.BooleanField(verbose_name=_('Require credentials to access the server'),
help_text=_('Increased access security by including an additional barrier on the proxy.'),default=True)
PROXY_USER1=models.CharField(verbose_name=_('Username 1'),
max_length=10,help_text=_('First username enabled to get through the proxy barrier.'),default='user1')
PROXY_PASSW1=models.CharField(verbose_name=_('Password for username 1'),
max_length=10,help_text=_('First username password.'),default='<PASSWORD>')
PROXY_USER2=models.CharField(verbose_name=_('Username 2'),
max_length=10,help_text=_('First username enabled to get through the proxy barrier.'),default='user2')
PROXY_PASSW2=models.CharField(verbose_name=_('Password for username 2'),
max_length=10,help_text=_('First username password.'),default='<PASSWORD>')
TELEGRAM_TOKEN=models.CharField(verbose_name=_('Token for the telegram bot'),blank=True,
max_length=100,help_text=_('The token assigned by the BothFather'),default='')
TELEGRAM_CHATID=models.CharField(verbose_name=_('Chat ID'),blank=True,
max_length=100,help_text=_('The ID of the chat to use'),default='')
IBERDROLA_USER=models.CharField(verbose_name=_('Iberdrola username'),blank=True,
max_length=50,help_text=_('Username registered into the Iberdrola Distribucion webpage'),default='')
IBERDROLA_PASSW=models.CharField(verbose_name=_('Iberdrola password'),blank=True,
max_length=50,help_text=_('Password registered on the Iberdrola Distribucion webpage'),default='')
OWM_TOKEN=models.CharField(verbose_name=_('Token for the openweathermap page'),blank=True,
max_length=100,help_text=_('The token assigned by the OpenWeatherMap service. You should ask yours following https://openweathermap.org/appid'),default='')
ESIOS_TOKEN=models.CharField(verbose_name=_('Token for the ESIOS page'),blank=True,
max_length=100,help_text=_('The token assigned by the ESIOS service. You should ask for yours to: Consultas Sios <consult<EMAIL>>'),default='')
def store2DB(self,update_fields=None):
try:
self.save(update_fields=update_fields)
except OperationalError:
logger.error("Operational error on Django. System restarted")
import os
os.system("sudo reboot")
if update_fields!=None:
self.applyChanges(update_fields=update_fields)
@classmethod
def onBootTasks(cls):
cls.checkInternetConnection()
IP=cls.getMyLANIP()
SETTINGS=cls.load()
if IP != SETTINGS.ETH_IP:
SETTINGS.applyChanges(update_fields=['ETH_IP',])
def dailyTasks(self):
self.checkRepository()
self.checkDeniableIPs(attempts=self.AUTODENY_ATTEMPTS,hours=24)
def hourlyTasks(self):
self.checkDeniableIPs(attempts=self.AUTODENY_ATTEMPTS/10,hours=1)
def set_TELEGRAM_CHATID(self,value):
self.TELEGRAM_CHATID=str(value)
self.store2DB()
@staticmethod
def getMyLANIP():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('172.16.31.10', 1027))
except socket.error:
return None
return s.getsockname()[0]
@staticmethod
def checkInternetConnection():
import requests
try:
r = requests.get('http://google.com',timeout=1)
if r.status_code==200:
return True
else:
return False
except:
return False
def checkRepository(self,force=False):
from django.core.cache import cache
cache.set(key='loading',value=True,timeout=60)
if self.VERSION_AUTO_DETECT or force:
from utils.GitHub import checkDeveloperUpdates,checkReleaseUpdates,updateDeveloper,updateRelease
from .constants import GIT_PATH
if self.VERSION_DEVELOPER:
release=checkDeveloperUpdates(root=GIT_PATH)
else:
release=checkReleaseUpdates(root=GIT_PATH,currentVersion=self.VERSION_CODE)
if release['tag']!=None:
self.VERSION_CODE=release['tag']
self.save(update_fields=['VERSION_CODE',])
if release['update'] and (self.VERSION_AUTO_UPDATE or force):
from utils.Watchdogs import WATCHDOG
from DevicesAPP.constants import POLLING_WATCHDOG_TIMER,POLLING_WATCHDOG_VAR
#process=WATCHDOG(name='PollingWatchdog',interval=POLLING_WATCHDOG_TIMER,cachevar=POLLING_WATCHDOG_VAR)
#process.pause()
try:
if self.VERSION_DEVELOPER:
revision=updateDeveloper(root=GIT_PATH)
else:
revision=updateRelease(root=GIT_PATH,tag=release['tag'])
if revision!=None:
self.VERSION_CODE=revision
self.save(update_fields=['VERSION_CODE',])
except Exception as exc:
logger.error('Error checking repository: ' + str(exc))
#process.resume()
cache.set(key='loading',value=False,timeout=None)
def addressInNetwork(self,ip2check):
import ipaddress
"Is an address from the ETH network"
CIDR=sum([bin(int(x)).count("1") for x in self.ETH_MASK.split(".")])
host = ipaddress.ip_interface(self.ETH_IP+'/'+str(CIDR))
return ipaddress.ip_address(ip2check) in host.network.hosts()
def checkDeniableIPs(self,attempts,hours):
if self.PROXY_AUTO_DENYIP:
from utils.combinedLog import CombinedLogParser
updated=False
instance=CombinedLogParser()
for element in instance.getNginxAccessIPs(hours=int(hours),codemin=400):
if element['trials']>=attempts and not self.addressInNetwork(ip2check=element['IP']):
from utils.Nginx import NginxManager
if NginxManager.blockIP(IP=element['IP'])!=-1:
updated=True
if updated:
NginxManager.reload()
@staticmethod
def update_hostapd(WIFI_SSID,WIFI_PASSW,updated):
from .constants import HOSTAPD_CONF_PATH,HOSTAPD_GENERIC_CONF_PATH
try:
f1 = open(HOSTAPD_GENERIC_CONF_PATH, 'r')
open(HOSTAPD_CONF_PATH, 'w').close() # deletes the contents
f2 = open(HOSTAPD_CONF_PATH, 'w')
except:
text=_('Error opening the file ') + HOSTAPD_CONF_PATH
PublishEvent(Severity=2,Text=text,Persistent=True,Code='FileIOError-0')
return
for line in f1:
f2.write(line.replace('WIFI_SSID', WIFI_SSID)
.replace('WIFI_PASSW', WIFI_PASSW))
f1.close()
f2.close()
if 'WIFI_SSID' in updated:
text='Modified Hostapd field WIFI_SSID to ' + str(WIFI_SSID)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Hostapd-WIFI_SSID')
if 'WIFI_PASSW' in updated:
text='Modified Hostapd field WIFI_PASSW to ' + str(WIFI_PASSW)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Hostapd-WIFI_PASSW')
@staticmethod
def execute_certbot():
from subprocess import Popen, PIPE
from .constants import CERTBOT_PATH
cmd='sudo ' + CERTBOT_PATH + ' --nginx --no-self-upgrade'
process = Popen(cmd, shell=True,
stdout=PIPE,stdin=PIPE, stderr=PIPE,universal_newlines=True)
stdout, err = process.communicate(input='1')
if 'Some challenges have failed.' in err:
text=_('Some challenge failed. Check that the domain is directed to the WAN IP and the port 80 is directed to the DIY4dot0 server')
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Certbot-Fail')
@staticmethod
def update_interfaces(ETH_DHCP,ETH_IP,ETH_MASK,ETH_GATE,WIFI_IP,WIFI_MASK,WIFI_GATE,updated):
from .constants import INTERFACES_CONF_PATH,INTERFACES_GENERIC_CONF_PATH
try:
f1 = open(INTERFACES_GENERIC_CONF_PATH, 'r')
open(INTERFACES_CONF_PATH, 'w').close() # deletes the contents
f2 = open(INTERFACES_CONF_PATH, 'w')
except:
text=_('Error opening the file ') + INTERFACES_CONF_PATH
PublishEvent(Severity=2,Text=text,Persistent=True,Code='FileIOError-0')
return
if not ETH_DHCP:
for line in f1:
f2.write(line.replace('ETH_IP', ETH_IP)
.replace('ETH_MASK', ETH_MASK)
.replace('ETH_GATE', ETH_GATE)
.replace('WIFI_IP', WIFI_IP)
.replace('WIFI_MASK', WIFI_MASK)
.replace('WIFI_GATE', WIFI_GATE))
else:
for line in f1:
f2.write(line.replace('iface eth0 inet static', 'iface eth0 inet dhcp')
.replace('address ETH_IP', '')
.replace('netmask ETH_MASK', '')
.replace('gateway ETH_GATE', '')
.replace('WIFI_IP', WIFI_IP)
.replace('WIFI_MASK', WIFI_MASK)
.replace('WIFI_GATE', WIFI_GATE))
f1.close()
f2.close()
if ('ETH_DHCP' in updated) or ('ETH_IP' in updated) or ('ETH_MASK' in updated) or ('ETH_GATE' in updated):
text='Reconfiguring LAN interface eth0'
PublishEvent(Severity=0,Text=text,Persistent=False,Code='Interfaces-ETH_ETH0')
#os.system('sudo ip addr flush eth0')
#os.system('sudo systemctl restart networking')
if ('WIFI_IP' in updated) or ('WIFI_MASK' in updated) or ('WIFI_GATE' in updated):
text='Reconfiguring WIFI interface wlan0'
PublishEvent(Severity=0,Text=text,Persistent=False,Code='Interfaces-ETH_WLAN0')
#os.system('sudo ip addr flush wlan0')
#os.system('sudo systemctl restart networking')
if 'ETH_DHCP' in updated:
text='Modified Interfaces field ETH_DHCP to ' + str(ETH_DHCP)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Interfaces-ETH_DHCP')
if 'ETH_IP' in updated:
text='Modified Interfaces field ETH_IP to ' + str(ETH_IP)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Interfaces-ETH_IP')
if 'ETH_MASK' in updated:
text='Modified Interfaces field ETH_MASK to ' + str(ETH_MASK)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Interfaces-ETH_MASK')
if 'ETH_GATE' in updated:
text='Modified Interfaces field ETH_GATE to ' + str(ETH_GATE)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Interfaces-ETH_GATE')
if 'WIFI_IP' in updated:
text='Modified Interfaces field WIFI_IP to ' + str(WIFI_IP)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Interfaces-WIFI_IP')
if 'WIFI_MASK' in updated:
text='Modified Interfaces field WIFI_MASK to ' + str(WIFI_MASK)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Interfaces-WIFI_MASK')
if 'WIFI_GATE' in updated:
text='Modified Interfaces field WIFI_GATE to ' + str(WIFI_GATE)
PublishEvent(Severity=0,Text=text,Persistent=True,Code='Interfaces-WIFI_GATE')
def applyChanges(self,update_fields):
from django.core.cache import cache
cache.set(key='loading',value=True,timeout=60)
if ('SITE_DNS' in update_fields):
SiteSettings.execute_certbot()
if ('SITE_DNS' in update_fields) or ('ETH_IP' in update_fields):
# update /etc/nginx/sites-available/HomeAutomation.nginxconf
from utils.Nginx import NginxManager
NginxManager.editConfigFile(SITE_DNS=getattr(self,'SITE_DNS'),ETH_IP=getattr(self,'ETH_IP')) # yet does not work, it does not write the file
NginxManager.reload()
# update allowed_hosts in settings.local.env
from .constants import LOCALENV_PATH
self.editUniqueKeyedFile(path=LOCALENV_PATH,key='ALLOWED_HOSTS',delimiter='=',
newValue=getattr(self,'SITE_DNS')+','+getattr(self,'ETH_IP')+',127.0.0.1',
endChar='\n',addKey=True)
if (('ETH_DHCP' in update_fields) or ('ETH_IP' in update_fields) or ('ETH_MASK' in update_fields) or ('ETH_GATE' in update_fields) or
('WIFI_IP' in update_fields) or ('WIFI_MASK' in update_fields) or ('WIFI_GATE' in update_fields)):
SiteSettings.update_interfaces(ETH_DHCP=getattr(self,'ETH_DHCP'),
ETH_IP=getattr(self,'ETH_IP'),ETH_MASK=getattr(self,'ETH_MASK'),
ETH_GATE=getattr(self,'ETH_GATE'),WIFI_IP=getattr(self,'WIFI_IP'),
WIFI_MASK=getattr(self,'WIFI_MASK'),WIFI_GATE=getattr(self,'WIFI_GATE'),
updated=update_fields)
if ('WIFI_SSID' in update_fields) or ('WIFI_PASSW' in update_fields):
SiteSettings.update_hostapd(WIFI_SSID=getattr(self,'WIFI_SSID'),WIFI_PASSW=getattr(self,'WIFI_PASSW')
,updated=update_fields)
# update /etc/nginx/sites-available/HomeAutomation.nginxconf
if ('PROXY_CREDENTIALS' in update_fields):
from utils.Nginx import NginxManager
NginxManager.setProxyCredential(PROXY_CREDENTIALS=getattr(self,'PROXY_CREDENTIALS'))
NginxManager.reload()
if (('PROXY_USER1' in update_fields) or ('PROXY_PASSW1' in update_fields) or
('PROXY_USER2' in update_fields) or ('PROXY_PASSW2' in update_fields)):
if getattr(self,'PROXY_CREDENTIALS'):
from utils.Nginx import NginxManager
NginxManager.createUser(user=self.PROXY_USER1,passw=self.PROXY_PASSW1,firstUser=True)
NginxManager.createUser(user=self.PROXY_USER2,passw=self.PROXY_PASSW2,firstUser=False)
NginxManager.reload()
if ('VERSION_DEVELOPER' in update_fields):
self.checkRepository(force=True)
for field in update_fields:
if field in ['TELEGRAM_TOKEN','IBERDROLA_USER','IBERDROLA_PASSW','OWM_TOKEN','ESIOS_TOKEN']:
value=getattr(self,field).strip()
if value!='':
# update TELEGRAM_TOKEN in settings.local.env
from .constants import LOCALENV_PATH
self.editUniqueKeyedFile(path=LOCALENV_PATH,key=field,delimiter='=',
newValue=value,
endChar='\n',addKey=True)
cache.set(key='loading',value=False,timeout=None)
@staticmethod
def editKeyedFile(path,key,newValue,endChar=' ',nextLine=True):
'''
:param key: determines the text to look for the place to write
:param nextLine: determines if once the key is found, it is the next line where it should write
'''
try:
file = open(path, 'r')
except:
text=_('Error opening the file ') + path
PublishEvent(Severity=2,Text=text,Persistent=True,Code='FileIOError-0')
lines=file.readlines()
if len(lines)>0:
keyFound=False
for i,line in enumerate(lines):
if key in line or keyFound:
if not nextLine:
lines[i]=newValue+key+endChar
keyFound=False
elif keyFound:
lines[i]=newValue+endChar
keyFound=False
else:
keyFound=True
fileString=''.join(lines)
file.close()
from subprocess import Popen, PIPE
cmd="echo '"+fileString+"' | sudo tee "+ path
process = Popen(cmd, shell=True,
stdout=PIPE,stdin=PIPE, stderr=PIPE,universal_newlines=True)
stdout, err = process.communicate()
if err=='':
text='The key '+key+' on the file ' + path+ ' has been modified to ' +newValue
severity=0
else:
text='Error updating key ' + key+ 'on the file ' + path+ 'Error: ' + err
severity=3
PublishEvent(Severity=severity,Text=text,Persistent=True,Code='EditFile-'+key)
@staticmethod
def editUniqueKeyedFile(path,key,delimiter,newValue,endChar='',addKey=True):
try:
file = open(path, 'r')
except:
text=_('Error opening the file ') + path
PublishEvent(Severity=2,Text=text,Persistent=True,Code='FileIOError-0')
lines=file.readlines()
if len(lines)>0:
keyFound=False
for i,line in enumerate(lines):
contents=line.split(delimiter)
if len(contents)==2:
thisKey=contents[0]
if thisKey==key:
keyFound=True
lines[i]=key+delimiter+newValue+endChar
if not keyFound and addKey:
if not '\n' in lines[-1]:
lines[-1]=lines[-1]+'\n'
lines.append(key+delimiter+newValue+endChar)
fileString=''.join(lines)
file.close()
from subprocess import Popen, PIPE
cmd="echo '"+fileString+"' | sudo tee "+ path
#logger.info(cmd)
process = Popen(cmd, shell=True,
stdout=PIPE,stdin=PIPE, stderr=PIPE,universal_newlines=True)
stdout, err = process.communicate()
if err=='':
text='The key '+key+' on the file ' + path+ ' has been modified to ' +newValue
severity=0
else:
text='Error updating key ' + key+ ' on the file ' + path+ 'Error: ' + err
severity=3
PublishEvent(Severity=severity,Text=text,Persistent=True,Code='EditFile-'+key)
@receiver(post_save, sender=SiteSettings, dispatch_uid="update_SiteSettings")
def update_SiteSettings(sender, instance, update_fields,**kwargs):
pass
class Permissions(models.Model):
class Meta:
verbose_name = _('Permission')
verbose_name_plural = _('Permissions')
permissions = (
("view_heating_subsystem", "Can view the Heating subsystem"),
("view_garden_subsystem", "Can view the Garden subsystem"),
("view_access_subsystem", "Can view the Access subsystem"),
("view_user_track", "Can view the position of the tracked users"),
("reset_system", "Can force a reset of the system"),
("check_updates", "Can check for updates of the system"),
("view_devicesapp", "Can view the devicesAPP"),
("view_reportingapp", "Can view the reportingAPP"),
("view_subsystemsapp", "Can view the subsystemsAPP"),
("view_configurationapp", "Can access to the configurationAPP"),
("change_automationvar", "Can change the value of an automation variable"),
)
class Subsystems(models.Model):
class Meta:
verbose_name = _('Subsystem')
verbose_name_plural = _('Subsystems')
content_type = models.ForeignKey(ContentType)
object_id = models.CharField(max_length=50)
content_object = GenericForeignKey('content_type', 'object_id')
Name = models.PositiveSmallIntegerField(choices=SUBSYSTEMS_CHOICES)
@staticmethod
def getName2Display(Name):
for name in SUBSYSTEMS_CHOICES:
if name[0]==Name:
return name[1]
return None
def __str__(self):
return self.get_Name_display()
class AdditionalCalculations(models.Model):
class Meta:
verbose_name = _('Additional calculation')
verbose_name_plural = _('Additional calculations')
TIMESPAN_CHOICES=(
(0,_('An hour')),
(1,_('A day')),
(2,_('A week')),
(3,_('A month')),
)
PERIODICITY_CHOICES=(
(0,_('With every new value')),
(1,_('Every hour')),
(2,_('Every day at 0h')),
(3,_('Every week')),
(4,_('Every month')),
)
CALCULATION_CHOICES=(
(0,_('Duty cycle OFF')),
(1,_('Duty cycle ON')),
(2,_('Mean value')),
(3,_('Max value')),
(4,_('Min value')),
(5,_('Cummulative sum')),
(6,_('Integral over time')),
(7,_('Operation with two variables')),
)
TWOVARS_OPERATION_CHOICES=(
(0,_('Sum')),
(1,_('Substraction')),
(2,_('Product')),
(3,_('Division')),
(4,_('Sum then sum')),
(5,_('Product then sum')),
)
SinkVar= models.ForeignKey('MainAPP.AutomationVariables',on_delete=models.CASCADE,related_name='sinkvar',blank=True,null=True) # variable that holds the calculation
SourceVar= models.ForeignKey('MainAPP.AutomationVariables',on_delete=models.DO_NOTHING,related_name='sourcevar') # variable whose change triggers the calculation
Scale=models.FloatField(help_text=_('Constant to multiply the result of the calculation'),default=1)
Timespan= models.PositiveSmallIntegerField(help_text=_('What is the time span for the calculation'),choices=TIMESPAN_CHOICES,default=1)
Periodicity= models.PositiveSmallIntegerField(help_text=_('How often the calculation will be updated'),choices=PERIODICITY_CHOICES)
Calculation= models.PositiveSmallIntegerField(choices=CALCULATION_CHOICES)
Delay= models.PositiveSmallIntegerField(help_text=_('What is the delay (in hours) for the calculation from 00:00 h'),default=0,validators=[MinValueValidator(0),MaxValueValidator(23)])
Miscelaneous = models.CharField(max_length=1000,blank=True,null=True) # field that gathers data in json for calculations on more variables
def __init__(self,*args,**kwargs):
try:
self.df=kwargs.pop('df')
self.key=kwargs.pop('key')
except:
self.df=pd.DataFrame()
self.key=''
super(AdditionalCalculations, self).__init__(*args, **kwargs)
def store2DB(self):
from DevicesAPP.constants import DTYPE_FLOAT
label= str(self)
if self.SinkVar:
sinkVAR=self.SinkVar
if self.Calculation==7: # it is a two var calculation
Misc=json.loads(self.Miscelaneous)
sinkVAR.updateLabel(label)
sinkVAR.updateUnits(Misc['Units'])
else:
sinkVAR.updateLabel(label)
else:
if not self.Calculation in [0,1,7]: # it is not a duty calculation nor a two var calc
data={'Label':label,'Value':0,'DataType':DTYPE_FLOAT,'Units':self.SourceVar.Units,'UserEditable':False}
elif self.Calculation==7: # it is a two var calculation
Misc=json.loads(self.Miscelaneous)
data={'Label':label,'Value':0,'DataType':DTYPE_FLOAT,'Units':Misc['Units'],'UserEditable':False}
else:
data={'Label':label,'Value':0,'DataType':DTYPE_FLOAT,'Units':'%','UserEditable':False}
MainAPP.signals.SignalCreateMainDeviceVars.send(sender=None,Data=data)
sinkVAR=AutomationVariables.objects.get(Label=label)
self.SinkVar=sinkVAR
try:
self.save()
except OperationalError:
logger.error("Operational error on Django. System restarted")
import os
os.system("sudo reboot")
def __str__(self):
try:
if self.Calculation!=7:
return str(self.get_Calculation_display())+'('+self.SourceVar.Label + ')'
else:
Misc=json.loads(self.Miscelaneous)
AVAR=AutomationVariables.objects.get(pk=int(Misc['SourceVar2']))
operation=str(self.TWOVARS_OPERATION_CHOICES[int(Misc['TwoVarsOperation'])][1])
return operation+'('+self.SourceVar.Label +' vs ' +AVAR.Label+')'
except:
return self.key
def checkTrigger(self):
# if self.Calculation==7:
# return True
if self.Periodicity==0:
return False
else:
import datetime
now=datetime.datetime.now()
if self.Periodicity==1 and now.minute==0: # hourly calculation launched at minute XX:00
return True
elif now.hour==self.Delay and now.minute==0:
if self.Periodicity==2: # daily calculation launched on next day at 00:00
return True
elif self.Periodicity==3 and now.weekday()==0: # weekly calculation launched on Monday at 00:00
return True
elif self.Periodicity==4 and now.day==1: # monthly calculation launched on 1st day at 00:00
return True
return False
def initializeDB(self):
import datetime
import calendar
import pytz
from tzlocal import get_localzone
local_tz=get_localzone()
localdate = local_tz.localize(datetime.datetime.now())
now=datetime.datetime.now()
start_date = '01-01-' + str(now.year)+' 00:00:00'
date_format = '%d-%m-%Y %H:%M:%S'
if self.Timespan==0: # Every hour
offset=datetime.timedelta(hours=1)
elif self.Timespan==1: # Every day
offset=datetime.timedelta(hours=24)
elif self.Timespan==2: # Every week
offset=datetime.timedelta(weeks=1)
elif self.Timespan==3: # Every month
days=calendar.monthrange(now.year, 1)[1]
offset=datetime.timedelta(hours=days*24)
else:
return
toDate=pytz.utc.localize(datetime.datetime.strptime(start_date, date_format))+offset-localdate.utcoffset()
while toDate<=pytz.utc.localize(datetime.datetime.now()):
now=toDate
if self.Timespan==0: # Every hour
offset=datetime.timedelta(hours=1)
elif self.Timespan==1: # Every day
offset=datetime.timedelta(hours=24)
elif self.Timespan==2: # Every week
offset=datetime.timedelta(weeks=1)
elif self.Timespan==3: # Every month
days=calendar.monthrange(now.year, now.month)[1]
offset=datetime.timedelta(hours=days*24)
try:
self.calculate(toDate=toDate)
except Exception as exc:
logger.error(str(exc))
return
toDate=toDate+offset
def calculate(self,DBDate=None,toDate=None):
import datetime
import calendar
if toDate==None:
toDate=timezone.now()-datetime.timedelta(hours=self.Delay)
now=datetime.datetime.now()
else:
now=toDate
#toDate=datetime.datetime(year=2019,month=4,day=7)
if self.Timespan==0: # Every hour
offset=datetime.timedelta(hours=1)
elif self.Timespan==1: # Every day
offset=datetime.timedelta(hours=24)
elif self.Timespan==2: # Every week
offset=datetime.timedelta(weeks=1)
elif self.Timespan==3: # Every month
days=calendar.monthrange(now.year, now.month)[1]
offset=datetime.timedelta(hours=days*24)
else:
return
fromDate=toDate-offset
if DBDate==None:
DBDate=toDate-offset/2
toDate=toDate-datetime.timedelta(minutes=1)
query=self.SourceVar.getQuery(fromDate=fromDate,toDate=toDate)
self.df=pd.read_sql_query(sql=query['sql'],con=query['conn'],index_col='timestamp')
if not self.df.empty:
self.key=self.SourceVar.Tag
# TO FORCE THAT THE INITIAL ROW CONTAINS THE INITIAL DATE
addedtime= | pd.to_datetime(arg=self.df.index.values[0]) | pandas.to_datetime |
import joblib, argparse
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
def parse_arguments(parser):
parser.add_argument('--data_dir', type=str, default='C:/data/niosh_ifund/')
parser.add_argument('--mode', type=str, default='test')
parser.add_argument('--test_file', type=str, default='test.tsv')
parser.add_argument('--text_only', type=bool, default=True)
parser.add_argument('--train_blender', type=bool, default=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
# Importing the event code dictionary to convert the BERT indices
code_df = pd.read_csv(args.data_dir + 'code_dict.csv')
code_dict = dict(zip(code_df.value, code_df.event_code))
# Importing the scores from the 4 BERT runs
if args.mode == 'validate':
run_folder = 'val_runs'
elif args.mode == 'test':
run_folder = 'test_runs'
run1_probs = np.array(pd.read_csv(args.data_dir + run_folder +
'/run_1/test_results.tsv', sep='\t',
header=None))
run2_probs = np.array(pd.read_csv(args.data_dir + run_folder +
'/run_2/test_results.tsv', sep='\t',
header=None))
run3_probs = np.array(pd.read_csv(args.data_dir + run_folder +
'/run_3/test_results.tsv', sep='\t',
header=None))
run4_probs = np.array(pd.read_csv(args.data_dir + run_folder +
'/run_4/test_results.tsv', sep='\t',
header=None))
prob_list = [run1_probs, run2_probs, run3_probs, run4_probs]
# Grouping the probabilities for regular averaging
avg_probs = np.mean(prob_list, axis=0)
avg_guesses = np.array([code_dict[code]
for code in np.argmax(avg_probs, axis=1)])
# Grouping the probabilities for blending
wide_probs = np.concatenate(prob_list, axis=1)
# Producing guesses when only the input text is available
if args.text_only:
# Loading the blender model
# lgr = joblib.load(args.data_dir + 'blender.joblib')
# blend_guesses = lgr.predict(wide_probs)
# blend_probs = np.max(lgr.predict_proba(wide_probs), axis=1)
# print(blend_probs[0])
# Exporting the guesses to disk
ids = pd.read_csv(args.data_dir + args.test_file,
sep='\t')['id']
guess_df = pd.DataFrame(pd.concat([ids,
| pd.Series(avg_guesses) | pandas.Series |
from datetime import datetime
from typing import TypedDict
import pandas as pd
import pandas_datareader.data as web
from dateutil.relativedelta import relativedelta
class VolatilityLevels(TypedDict):
"""
VolatilityLevels defines a dict of volatility levels to categorize volatility
"""
minimum: float
moderate: float
average: float
elevated: float
extreme: float
def barometer() -> float:
"""
barometer retrieves various VIX data to calculate the percentile rank of crossovers
:return: float most recent barometer value
"""
# set helpful date values
three_years_ago = datetime.now() - relativedelta(years=3)
today = datetime.now()
# retrieve VIX9D
vix9d = pd.read_csv('https://cdn.cboe.com/api/global/us_indices/daily_prices/VIX9D_History.csv')
vix9d['DATE'] = pd.to_datetime(vix9d['DATE'])
vix9d = vix9d[['DATE', 'CLOSE']].rename(columns={'CLOSE': 'vix9d'}).set_index('DATE')
# retrieve VIX3M
vix3m = | pd.read_csv('https://cdn.cboe.com/api/global/us_indices/daily_prices/VIX3M_History.csv') | pandas.read_csv |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Test the projection module
"""
import unittest
import numpy as np
import pandas as pd
from numpy.polynomial import legendre
from pyrotor.projection import trajectory_to_coef
from pyrotor.projection import trajectories_to_coefs
from pyrotor.projection import compute_weighted_coef
from pyrotor.projection import coef_to_trajectory
def test_trajectory_to_coef():
# Test Legendre
y = pd.DataFrame({"A": [1, 2, 3, 4, 5],
"B": [-4, -1, 4, 11, 20]})
basis_dimension = {"A": 3, "B": 2}
basis_features = basis_dimension
basis = "legendre"
expected_coef = np.array([3., 2., 0., 6., 12.], dtype='float64')
result = trajectory_to_coef(y, basis, basis_features, basis_dimension)
np.testing.assert_almost_equal(expected_coef, result)
# Test B-spline
x = np.linspace(0, 1, 20)
y = pd.DataFrame({"A": x,
"B": x**2})
basis_features = {"knots": [.25, .5, .75], "A": 2, "B": 3}
basis_dimension = {"A": 6, "B": 7}
basis = "bspline"
expected_coef = np.array([0., .125, .375, .625, .875, 1.,
0., 0., 4.16666667e-02, 2.29166667e-01, 5.41666667e-01, 8.33333333e-01, 1.], dtype='float64')
result = trajectory_to_coef(y, basis, basis_features, basis_dimension)
np.testing.assert_almost_equal(expected_coef, result)
def test_trajectories_to_coefs():
# Test Legendre
y = [ | pd.DataFrame({"A": [1, 2, 3, 4, 5]}) | pandas.DataFrame |
#
# K2S-O: Real-time, multi-processed, multi-threaded time-series anomaly detection
#
# K2so pulls data from a time-series database on a pre-defined timing schedule, adding to an in-memory buffer each time.
# Median and wavelet filters are applied against the in-memory buffer to reduce noise. The resultant signal is then detrended
# using STLOESS and then run through a Season Hybrid Extreme Studentized Deviate to assess the waveform for statistical
# anomalies. Those anomalies are returned, paired with the original signal (as well as the filtered signal for cross-referencing) and
# then reported to OSNDS's alerting API. Logic has been added to group nomalies together into "events"; this is not an association approach,
# but rather a nearby clustering method prescribed by an end-user's set "reset time window". This limitation is intentional as we have aimed to
# make this code as applicable to multiple mission areas as possible. Performing association would require us to make certain assumptions
# of either the originating event of interest or the phenomenology of the data collection method. We highly encourage researchers to fork this
# code and embed their own association algorithims.
#
# This python script was collaboratively written by members of AFTAC/SI (<NAME>, <NAME>, and <NAME>), based upon the excellent work
# (written in R) by <NAME> (AFTAC/SI) which leveraged the foundational work of Twitter's Reasearch Team (as well as numerous open-source packages).
# Please see the end of this file for a full list of usage credits.
#
# Usage instructions:
#
# This code is ultimately called and executed from another script "k2so.py. You can call k2so.py via:
#
# python k2so.py -s [stations]
#
# Wherein the "-s" is an argument flag for "stations", as in which stations you would like k2so to monitor against.
# You must follow this flag by each station's ID, separated by spaces (single-word alphaumerics are accepted). For example:
#
# python k2so.py -s 1 2 3 4 X1 X12
#
# We will soon be adding a "-d" argument to the scripts execution that will force K2SO to operate in a DEBUG MODE. This mode will provide the
# end user with a log of K-2SO's output labed by Station ID. This feature is still in work.
# import pdb
import sys
import warnings
import numpy
from numpy.core.numeric import NaN
#from scipy.signal import waveforms, wavelets
from src import file_handler as logic
warnings.simplefilter(action='ignore', category=FutureWarning)
import tad
import scipy
import pandas as pd
import matplotlib.pyplot as plt
import os, math, time, requests, json
from influxdb import DataFrameClient
from twisted.internet import task, reactor
import skimage
from skimage.restoration import denoise_wavelet, estimate_sigma
import pprint
from pprint import pprint
# print(sys)
# print(warnings)
# print(numpy)
# print(tad)
# print(scipy)
# print(pd)
# print(plt)
# print(os)
# print(math)
# print(time)
# print(requests)
# print(json)
# print(DataFrameClient)
# print(task)
# print(reactor)
# print(skimage)
# print(pprint)
pd.set_option('display.max_rows', 4000)
class DataStore():
dict = []
last_event_id = 0
previous_valid_first_index = 0
previous_valid_first_value = 0
previous_valid_last_index = 0
previous_valid_last_value = 0
waveform = pd.DataFrame()
med_x = None
med_y = None
med_z = None
report_ID_buffer = []
class InfluxStore():
client = None
query_median = ""
query_data = ""
class Settings():
config = None
station = 0
trigger_cooldown = 0
debug = True
filter_coefficients = None
# create instances for each class, therby passing initial values for each downtsream variable
settings = Settings()
influx = InfluxStore()
data = DataStore()
def initialize_k2so():
kill = False
# All of k2so's settings (with the exception of which stations you;re running against) are assignable in the JSON file
# This function loads the JSON file and stores all of the user settings as a list, "config"
station_configuration = str('config/k2so_configuration_osnds_'+str(settings.station)+'.json')
print('\nOSNDS Station {0}: Attempting to load configuration file'.format(settings.station)) if settings.debug == True else None
try:
with open(station_configuration) as config:
try:
settings.config = json.load(config)
except Exception as e:
print('\nOSNDS Station {0}: There was an error parsing the configuration file'.format(settings.station)) if settings.debug == True else None
print(' Error: {0}'.format(e)) if settings.debug == True else None
kill = True
return kill
print('\nOSNDS Station {0}: The configuration file has been loaded'.format(settings.station)) if settings.debug == True else None
except FileNotFoundError as f:
print('\nOSNDS Station {0}: There appears to be no configuration file for\n this station. Please ensure that the following\n file exists:\n\n {1}'.format(settings.station, station_configuration))
print(' Error: {0}'.format(f)) if settings.debug == True else None
kill = True
return kill
kill = logic.file_handler(settings.station, settings.config)
if kill == True:
return kill
try:
influx.client = DataFrameClient(host = settings.config['influx']['host'], # (default) storage.osnds.net
port = settings.config['influx']['port'], # (default) 8086
database = settings.config['influx']['database'], # (default) livestream-test
username = settings.config['influx']['username'], # (default) <redacted>
password = settings.config['influx']['password'],) # (default) <redacted>
except Exception as e:
print('\nOSNDS Station {0}: There was an error initializing the client\n Please check your connection settings'.format(settings.station))
print(' Error: {0}'.format(e)) if settings.debug == True else None
kill = True
return kill
# InfluxDB (1.x) queries follow a similar style to SQL; "select * from <db>", etc.
#
# Influx will perform math for you as well. In this instance, InfluxDB is being asked to return (3) separate values:
# - Median of the X component over the past (2) minutes
# - Median of the Y component over the past (2) minutes
# - Median of the Z component over the past (2) minutes
#
# Since InfluxDB uses timestamps for its unique IDs, you have to include the time range "where time > now()-2m".
# now() = current time in ns since epoch
#
# The "topic" is specific to how OSNDS receives MQTT streams, in this case, its how we specify which OSNDS station we wish to pull from
data.last_event_id = settings.config['k2s0']['last_event_id']
settings.trigger_cooldown = settings.config['k2s0']['trigger_cooldown_s']*10**9
settings.debug = settings.config['k2s0']['debug']
influx.query_median = str("SELECT median(x), median(y), median(z) FROM {0}.{1}.{2} WHERE time > now()-{3}m AND data='{4}';".format(settings.config['influx']['database'], settings.config['influx']['retention'], settings.config['influx']['measurment'], str(settings.config['k2s0']['median_window_m']), settings.config['k2s0']['data_stream']))
# query, compute, and store the median values (based upon the query above)
kill = pull_medianValues()
# Influx will perform math for you. In this instance, InfluxDB is being asked to subtract the median values of X, Y, and Z from all future data pulls (respectively)
# All three components are then added together. This is to ensure that k2so triggers off of an anomaly in any component
influx.query_data = str("SELECT (x-({0})) + (y-({1})) + (z-({2})) FROM {3}.{4}.{5} WHERE time > now()-{6}s AND data='{7}' fill(previous);".format(str(data.med_x),str(data.med_y),str(data.med_z),settings.config['influx']['database'], settings.config['influx']['retention'], settings.config['influx']['measurment'], str(settings.config['k2s0']['time_window_s']), settings.config['k2s0']['data_stream']))
#k2s0_arguments = (settings.station, influx.query_data, influx.client, settings.config)
if kill == True:
return kill
else:
print('\nOSNDS Station {0}: K-2S0 has been successfully configured'.format(settings.station)) if settings.debug == True else None
return kill
def pull_medianValues():
if data.med_x == None:
# this try statement catches an error where the Influx DataFrameClient is unable to run the specified query
# this error will only occur if there is an issue with the client settings or query syntax
try:
#print(influx.query_median)
response = influx.client.query(influx.query_median) # send the initialization query to InfluxDB
# this try statement catches an error where the Influx DataFrameClient successfully connected to the database but there was no data to pull
# this happens when the user points k2s0 to a station that either doesnt exist or is currently offline
try:
median_values = response[settings.config['influx']['measurment']] # get the "livestream" dataframe from the returned list of dataframes "response"
data.med_x = median_values.loc[:,'median'][0] # get the median of X from the dataframe
data.med_y = median_values.loc[:,'median_1'][0] # get the median of Y from the dataframe
data.med_z = median_values.loc[:,'median_2'][0] # get the median of Z from the dataframe
kill = False
except Exception as e:
print('\nOSNDS Station {0}: The station appears to be offline at the moment (pull: median)'.format(settings.station))
print(' Error: {0}'.format(e)) if settings.debug == True else None
kill = True
return kill
except Exception as e:
print('\nOSNDS Station {0}: The Influx client experienced an error retrieving the median values'.format(settings.station))
print(' Error: {0}'.format(e)) if settings.debug == True else None
kill = True
return kill
else:
# store the current median values in temporary variables
previous_med_x = data.med_x
previous_med_y = data.med_y
previous_med_z = data.med_z
# get new median values
response = influx.client.query(influx.query_median) # send the initialization query to InfluxDB
median_values = response[settings.config['influx']['measurment']] # get the "livestream" dataframe from the returned list of dataframes "response"
# store new median values in a temporary variables
current_med_x = median_values.loc[:,'median'][0] # get the median of X from the dataframe
current_med_y = median_values.loc[:,'median_1'][0] # get the median of Y from the dataframe
current_med_z = median_values.loc[:,'median_2'][0] # get the median of Z from the dataframe
# average the current and previous median values
data.med_x = (current_med_x + previous_med_x) / 2 # return the average of the current and previous median values for X
data.med_y = (current_med_y + previous_med_y) / 2 # return the average of the current and previous median values for Y
data.med_z = (current_med_z + previous_med_z) / 2 # return the average of the current and previous median values for Z
print('\nOSNDS Station {0}: Updated median offset values are...\n \n X = {1} m/s2\n Y = {2} m/s2\n Z = {3} m/s2'.format(settings.station, data.med_x, data.med_y, data.med_z)) if settings.debug == True else None
return
def pull_fromInflux():
#print(influx.query_data)
response = influx.client.query(influx.query_data) # send the initialization query to InfluxDB
try:
signal = response[settings.config['influx']['measurment']] # get the "livestream" dataframe from the returned list of dataframes "response"
if signal.isnull().values.any() == False: # validate that there are no "NA" values within the dataframe
signal.index = pd.to_datetime(signal.index, format='%Y-%m-%d %H:%M:%S.%f%z', unit='ns') # convert the <string> datetime to a datetime type
signal.index = signal.index.astype('datetime64[ns]') # force the datetime type to be "datetime64[ns]"
else:
print('\nOSNDS Station {0}: The latest pull from InfluxDB returned null values'.format(settings.station))
if len(data.waveform) < settings.config['k2s0']['buffer']:
data.waveform = data.waveform.combine_first(signal)
print('\nOSNDS Station {0}: Successfully pulled new data (Buffer: {1} %)'.format(settings.station,math.ceil((len(data.waveform['x_y_z'])/settings.config['k2s0']['buffer'])*100))) if settings.debug == True else None
else:
data.waveform = data.waveform.combine_first(signal)
data.waveform = data.waveform.iloc[len(signal):]
print('\nOSNDS Station {0}: Successfully pulled new data (Buffer: {1} %)'.format(settings.station,math.ceil((len(data.waveform['x_y_z'])/settings.config['k2s0']['buffer'])*100))) if settings.debug == True else None
return
except KeyError as k:
print('\nOSNDS Station {0}: X - The station appears to be offline at the moment (pull: live).'.format(settings.station))
print(' Error: {0}'.format(k)) if settings.debug == True else None
return
def filter_waveform():
# Scipy's median filter applys a median filter to the input array using a local window-size given by "kernel_size". The array will automatically be zero-padded.
# Median filters are a great way to reduce higher-frequency noise, but you should be mindful that they essentially serve as a low-pass filter with a low, gaussian roll-off factor.
# print(f'entering filter_waveform')
if settings.debug == True:
start = time.time() # get start time
if settings.config['filtering']['enabled'] and settings.config['filtering']['bandpass_filter']['enabled'] == True:
sos = scipy.signal.butter(3, 4, 'hp', fs=settings.config['fft_processing']['sample_rate'], output='sos')
filtered = scipy.signal.sosfilt(sos, data.waveform['filtered'])
data.waveform['filtered'] = filtered
if settings.config['filtering']['enabled'] and settings.config['filtering']['median_filter']['enabled'] == True:
data.waveform['filtered'] = scipy.signal.medfilt(volume = data.waveform['filtered'], # input 1D signal
kernel_size = settings.config['filtering']['median_filter']['kernel_size']) # (defualt) 3
if settings.config['filtering']['enabled'] and settings.config['filtering']['wavelet_filter']['enabled'] == True:
# Skimage's wavelet filter
sigma_est = estimate_sigma( image = data.waveform['filtered'], # in this case, we are treating our 1D signal array as an image with a depth of 1-pixel and a length of n-pixels
multichannel=False) # color images are mutli-channeled (R, loop_value, B) whereas black/white images (or in our case a 1D signal array) are single-channeled
data.waveform['filtered'] = denoise_wavelet(
image = data.waveform['filtered'], # in this case, we are treating our 1D signal array as an image with a depth of 1-pixel and a length of n-pixels
sigma = sigma_est, # here we are incorporating the estimated sigma for the median-filtered signal
wavelet = settings.config['filtering']['wavelet_filter']['wavelet'], #
multichannel = False, # color images are mutli-channeled (R, loop_value, B) whereas black/white images (or in our case a 1D signal array) are single-channeled
rescale_sigma = True, #
method = settings.config['filtering']['wavelet_filter']['method'], #
mode = settings.config['filtering']['wavelet_filter']['thresholding']) #
if settings.config['plot_signal']['enabled'] == True:
plt.plot(data.waveform['x_y_z'], label='Original Signal')
plt.plot(data.waveform['filtered'], label='Filtered Signal')
plt.xlabel('Time Window (UTC)')
plt.ylabel(str(settings.config['plot_signal']['y_label']+" $"+settings.config['plot_signal']['y_units']+"$"))
plt.title('Filtered Signal Output')
plt.legend()
plt.show(block=False)
plt.pause(2)
plt.close()
if settings.debug == True:
end = time.time()
print('\nOSNDS Station {0}: {1} records filtered in {2} seconds'.format(settings.station, len(data.waveform.filtered), math.ceil((end-start)*10000)/10000)) if settings.debug == True else None
return
def detect_anomalies():
sample_rate = settings.config['fft_processing']['sample_rate']
signal_length = len(data.waveform['filtered'])
if settings.config['anomaly_detector'] == "tad":
# print(f'data.waveform[\'filtered\']:\n{1}',data.waveform['filtered'][1:5])
anomalies = tad.anomaly_detect_vec( x=data.waveform['filtered'], # pass the combined X+Y+Z waveform to the to the anomaly detector
alpha=.0001, # only return points that are deemed be be anomalous with a 99.9% threshold of confidence
period=math.ceil(signal_length/sample_rate), # 20% of the length of the signal, rounded up to an integer
direction="both", # look at both the positive and negative aspects of the signal
e_value=True, # add an additional column to the anoms output containing the expected value
plot=False) # plot the seasonal and linear trends of the signal, as well as the residual (detrended) data
# print(f'2. Detect anomalies:\n{1}',anomalies[1:5])
if settings.config['anomaly_detector'] == "global_shed_grubbs":
None
if settings.config['anomaly_detector'] == "global_shed_grubbs":
None
print('\nOSNDS Station {0}: K-2S0 detected {1} anomalies'.format(settings.station, len(anomalies))) if settings.debug == True else None
if len(anomalies) > settings.config['k2s0']['anomaly_threshold']: # serves as a basic filter for random suprious "anomalies" that can arise from any of the detection algorithims
# print(f'3. Anomaly length test:\n{1}',len(anomalies))
data.waveform['anomalies'] = anomalies
# print(f'4. Waveform anomalies:\n{1}',data.waveform['anomalies'][1:5])
data.waveform['anomalies'] = data.waveform['anomalies'].notna() # replaces NA values with boolean False, True values stay True
# print(f'5. Waveform anomalies replace NA values:\n{1}',data.waveform['anomalies'])[1:5]
error_handling()
parse_anomalies()
return
else:
data.waveform['anomalies'] = False # ensures that (in the case of no anomalies) all 'anomalies' values are False
data.waveform['id'] = NaN
data.waveform['reported'] = NaN
data.waveform['grafanaID'] = NaN
return
def error_handling():
# ensures there is an 'id' column within the dataframe on the first run (this prevents errors downstream)
#None if "id" in data.waveform else data.waveform['id'] = NaN
# ensures there is a 'reported' column within the dataframe on the first run (this prevents errors downstream)
#None if "reported" in data.waveform else data.waveform['reported'] = NaN
# ensures there is a 'grafanaID' column within the dataframe on the first run (this prevents errors downstream)
#None if "grafanaID" in data.waveform else data.waveform['grafanaID'] = NaN
return
def filter_dataframe_by_val(df,dict,val):
return (df.loc[df[dict]==val])
def parse_anomalies(): #just assigns event_ID to the events
anomalies_found_table = filter_dataframe_by_val(data.waveform,'anomalies',True)
# print(f'6. Parse_anomalies, anomalies found table:\n{1}',anomalies_found_table[1:5])
for index, loop_value in data.waveform.groupby([(data.waveform.anomalies != data.waveform.anomalies.shift()).cumsum()]):
if loop_value.anomalies.all() == True:
# has this anomaly group already been reported to OSNDS?
if data.waveform.loc[loop_value.first_valid_index():loop_value.last_valid_index(),'reported'].sum() > 0:
pass
else:
# is this anomaly group part of the previous anomaly group?
# print('settings.trigger_cooldown',settings.trigger_cooldown)
if (float(loop_value.first_valid_index().value) <= (data.previous_valid_last_value + settings.trigger_cooldown)):
data.waveform.loc[data.previous_valid_first_index:loop_value.last_valid_index(),'id'] = int(data.last_event_id)
data.previous_valid_last_index = loop_value.last_valid_index()
data.previous_valid_last_value = loop_value.last_valid_index().value
else:
data.last_event_id = data.last_event_id + 1
# saves the current unique event ID to a new column within the dataframe called "id" - this event id is only applied to the indexes
# bounded by the groupby function (e.loop_value. start index for group loop_value = loop_value.first_valid_index | ending index for group loop_value = loop_value.last_valid_index)
data.waveform.loc[loop_value.first_valid_index():loop_value.last_valid_index(),'id'] = int(data.last_event_id)
# store the timestamp of the first anomalous amplitude within the anomaly group
data.previous_valid_first_index = loop_value.first_valid_index()
data.previous_valid_first_value = loop_value.first_valid_index().value
# store the timestamp of the last anomalous amplitude within the anomaly group
data.previous_valid_last_index = loop_value.last_valid_index()
data.previous_valid_last_value = loop_value.last_valid_index().value
data.waveform['id'].fillna(0)
return
# def pumpkin_score_transmit(start,stop):
# print("here")
# influx.query_median = str("SELECT median(x), median(y), median(z) FROM {0}.{1}.{2} WHERE time between {3} and {4} AND data='{4}';".format(settings.config['influx']['database'], settings.config['influx']['retention'], settings.config['influx']['measurment'], str(settings.config['k2s0']['median_window_m']), settings.config['k2s0']['data_stream']))
# response=influx.client.query(influx.query_median)
# median_values = response[settings.config['influx']['measurment']] # get the "livestream" dataframe from the returned list of dataframes "response"
# data.med_x = median_values.loc[:,'median'][0] # get the median of X from the dataframe
# data.med_y = median_values.loc[:,'median_1'][0] # get the median of Y from the dataframe
# data.med_z = median_values.loc[:,'median_2'][0] # get the median of Z from the dataframe
# print()
def send_alert(alert_message):
alert_payload = {}
alert_url = "https://config.osnds.net/api/alerts" # OSNDS API URL for alerts (see Node-Red or NiFi for message handling)
utc_local_offset = ('{}{:0>2}{:0>2}'.format('-' if time.altzone > 0 else '+', abs(time.altzone) // 3600, abs(time.altzone // 60) % 60))
if alert_message['status'] == 'new':
alert_payload = {
"station" : int(settings.station), # which station the event occurred on
"k2so_id" : alert_message['id'], # unique event ID
"start_ns" : alert_message['start_ns'], # start time in nanoseconds since epoch
"stop_ns" : alert_message['stop_ns'], # stop time in nanoseconds since epoch
"start_real": alert_message['start_real'].strftime("%d-%b-%Y (%H:%M:%S.%f)-UTC"), # new startreal
"rss_time" : alert_message['start_real'].strftime("%a, %d %b %Y %H:%M:%S {}").format(utc_local_offset), # new startreal
"message" : alert_message['status'] # general event message (this is mostly a placeholder)
}
if alert_message['status'] == 'update':
alert_payload = {
"grafana_id" : alert_message['grafanaID'],
"k2so_id" : alert_message['id'], # unique event ID
"start_ns" : alert_message['start_ns'],
"stop_ns" : alert_message['stop_ns'],
"start_real": alert_message['start_real'].strftime("%d-%b-%Y (%H:%M:%S.%f)-UTC"), #start stopreal
"rss_time" : alert_message['start_real'].strftime("%a, %d %b %Y %H:%M:%S {}").format(utc_local_offset), # new startreal
"message" : alert_message['status']
}
if alert_message['status'] == 'stop':
# pumpkin_score_transmit(alert_message['start_ns'],alert_message['stop_ns'])
alert_payload = {
"station" : int(settings.station),
"k2so_id" : alert_message['id'], # unique event ID
"start_ns" : alert_message['start_ns'],
"stop_ns" : alert_message['stop_ns'],
"message" : alert_message['status'],
"start_real": alert_message['start_real'].strftime("%d-%b-%Y (%H:%M:%S.%f)-UTC"), #start stopreal
"stop_real": alert_message['stop_real'].strftime("%d-%b-%Y (%H:%M:%S.%f)-UTC"), #stop stopreal
"rss_time" : alert_message['start_real'].strftime("%a, %d %b %Y %H:%M:%S {}").format(utc_local_offset), # new startreal
"grafana_id" : alert_message['grafanaID'],
"vpp" : alert_message['score']
}
try:
alert_post = requests.post(alert_url, json=alert_payload, timeout = 1) # post message payload to the API URL and store the response
print('\nOSNDS Station {0}: API POST returned with code ({1}) and repsonse ({2})'.format(settings.station, alert_post.status_code, alert_post.text)) if settings.debug == True else None
if alert_post.status_code == 200:
if alert_message['status'] == 'new':
returnJSON = alert_post.text
returnDict = json.loads(str(returnJSON))
annotID = returnDict['id']
return {alert_post.status_code, annotID}
if alert_message['status'] == 'update':
return alert_post.status_code
print('\nOSNDS Station {0}: A new anomaly has been reported:\n Event ID: {1}\n Start Time (ns): {2}\n End Time (ns): {3}'.format(settings.station, data.last_event_id, alert_message['start_ns'], alert_message['stop_ns'])) #if settings.debug == True else None
else:
print('\nOSNDS Station {0}: A new anomaly has been detected but failed to be reported to OSNDS (Status Code: {1})'.format(settings.station, alert_post.status_code))
except Exception as e:
print('\nOSNDS Station {0}: A new anomaly has been detected but failed to be reported to OSNDS - please check internet connection'.format(settings.station))
print(' Error: {0}'.format(e)) if settings.debug == True else None
return
def event_publisher():
# event publisher operates by using a list of dictionaries. each detected event is group into a single entry in the list.
# print(f'entering event publisher routine')
# print(f'data.waveform:\n{1}\n',data.waveform[1:5])
# print(f'data.waveform.empty?:\n{1}\n',data.waveform.empty)
if data.waveform.empty:
pass
else:
# print(f'DataStore.dict',DataStore.dict)
if not DataStore.dict:
print('data DOESNT exist')
else:
print('data exists')
# print('time check', data.waveform.first_valid_index().value)
# print(DataStore.dict[0]['stop_ns'])
# print(data.waveform.first_valid_index().value-DataStore.dict[0]['stop_ns'])
if (data.waveform.first_valid_index().value)-DataStore.dict[0]['stop_ns'] > 2*settings.trigger_cooldown:
print('time exceeded!!')
# try:
##############################################################################
############ PUMPKIN CHUNKIN VPP RECORD AND SEND #############################
##############################################################################
# print(f"ID: {DataStore.dict[0]}")
# print(f"ID: {DataStore.dict[0]['id']}")
event_start_time_ns = data.waveform.loc[data.waveform.id==DataStore.dict[0]['id']].first_valid_index() #first timestamp for that event number
event_stop_time_ns = data.waveform.loc[data.waveform.id==DataStore.dict[0]['id']].last_valid_index() #last timestamp for that event number
df_slice = data.waveform[event_start_time_ns:event_stop_time_ns]
print(f"test date frame:\n{df_slice}\n")
xyz = df_slice["x_y_z"]
max_xyz = xyz.max()
min_xyz = xyz.min()
print(max_xyz, min_xyz)
vpp = (max_xyz-min_xyz)*100
print(vpp)
vpp_payload = {
"score" : int(vpp),
"user_id" : "auto", # unique event ID
}
pumpkin_chunkin_vpp_url = "https://config.osnds.net/pumpkin-contest/postJSON"
vpp_post = requests.post(pumpkin_chunkin_vpp_url, json=vpp_payload, timeout = 1)
############################################################################
############### PUMPKIN CHUNKIN VPP RECORD AND SEND STOP ###################
############################################################################
DataStore.dict[0].update(
{
'status' : 'stop',
"score" : int(vpp),
}
)
send_alert(DataStore.dict[0])
# except:
# print("++++++++++++ error sending new event ++++++++++++")
pprint('!!!! POP{} !!!!'.format(0))
DataStore.dict.pop(0)
else:
print('time not exceeded')
try:
print(data.waveform[1:5])
# time.sleep(2)
unique_event_numbers = data.waveform.id.unique() # get unique values in events, i.e. null,1,2,3
except KeyError as k:
data.waveform['id'] = NaN
return
# debuging code here
# print('data.waveform',data.waveform)
# print('filter by anomalies',filter_dataframe_by_val(data.waveform,'anomalies',True))
filtered_unique_event_numbers = unique_event_numbers[~ | pd.isna(unique_event_numbers) | pandas.isna |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": | pandas.StringDtype() | pandas.StringDtype |
import os
import time
import fire
import random
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix, auc, roc_curve
from xgboost import XGBClassifier
## to detach from monitor
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from helpers import save_obj, load_obj
def get_submission(
X_train, X_valid, y_train, y_valid, X_test,
train_params={}, eval_metric='auc',
save=False, load=False, mdl_name='xgb_class'
):
start_time = time.time()
end_time = start_time
if load:
classifier = load_obj(mdl_name)
else:
classifier = XGBClassifier(**train_params)
classifier.fit(X_train.values, y_train.values.ravel(), eval_metric=eval_metric)
end_time = time.time()
if save:
save_obj(classifier, mdl_name)
print('model saved')
train_pred = classifier.predict(X_train.values)
valid_pred = classifier.predict(X_valid.values)
test_pred = classifier.predict(X_test.values)
fpr, tpr, _ = roc_curve(y_train.values, train_pred, pos_label=1)
train_loss = auc(fpr, tpr)
fpr, tpr, _ = roc_curve(y_valid.values, valid_pred, pos_label=1)
valid_loss = auc(fpr, tpr)
feature_importances = classifier.feature_importances_
feature_names = X_train.columns.values
sorted_idx = np.argsort(feature_importances*-1) # descending order
summary = '====== XGBClassifier Training Summary ======\n'
for idx in sorted_idx:
summary += '[{:<25s}] | {:<10.4f}\n'.format(feature_names[idx], feature_importances[idx])
summary += '>>> training_time={:10.2f}min\n'.format((end_time-start_time)/60)
summary += '>>> Final AUC: {:10.4f}(Training), {:10.4f}(Validation)\n'.format(train_loss,valid_loss)
# Generate submission
submission = pd.DataFrame(data=test_pred,index=X_test.index, columns=['Next_Premium'])
submission_train = pd.DataFrame(data=train_pred,index=X_train.index, columns=['Next_Premium'])
submission_valid = | pd.DataFrame(data=valid_pred,index=X_valid.index, columns=['Next_Premium']) | pandas.DataFrame |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import os
import pandas as pd
import pandas.testing as pdt
import biom
import shutil
import json
import numpy as np
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
import skbio
import qiime2
from q2_types.feature_table import (FeatureTable, PercentileNormalized)
from qiime2.plugins import sample_classifier
from q2_sample_classifier.tests.test_base_class import \
SampleClassifierTestPluginBase
from q2_sample_classifier.classify import (
regress_samples_ncv, classify_samples_ncv, fit_classifier, fit_regressor,
detect_outliers, split_table, predict_classification,
predict_regression)
from q2_sample_classifier.utilities import (
_set_parameters_and_estimator, _train_adaboost_base_estimator,
_match_series_or_die, _extract_features)
from q2_sample_classifier import (
SampleEstimatorDirFmt, PickleFormat)
class SampleEstimatorTestBase(SampleClassifierTestPluginBase):
package = 'q2_sample_classifier.tests'
def setUp(self):
super().setUp()
def _load_biom(table_fp):
table_fp = self.get_data_path(table_fp)
table = qiime2.Artifact.load(table_fp)
table = table.view(biom.Table)
return table
def _load_cmc(md_fp, column):
md_fp = self.get_data_path(md_fp)
md = pd.read_csv(md_fp, sep='\t', header=0, index_col=0)
md = qiime2.CategoricalMetadataColumn(md[column])
return md
table_chard_fp = _load_biom('chardonnay.table.qza')
mdc_chard_fp = _load_cmc('chardonnay.map.txt', 'Region')
pipeline, importances = fit_classifier(
table_chard_fp, mdc_chard_fp, random_state=123,
n_estimators=2, n_jobs=1, optimize_feature_selection=True,
parameter_tuning=True, missing_samples='ignore')
transformer = self.get_transformer(
Pipeline, SampleEstimatorDirFmt)
self._sklp = transformer(pipeline)
sklearn_pipeline = self._sklp.sklearn_pipeline.view(PickleFormat)
self.sklearn_pipeline = str(sklearn_pipeline)
self.pipeline = pipeline
def _custom_setup(self, version):
with open(os.path.join(self.temp_dir.name,
'sklearn_version.json'), 'w') as fh:
fh.write(json.dumps({'sklearn-version': version}))
shutil.copy(self.sklearn_pipeline, self.temp_dir.name)
return SampleEstimatorDirFmt(
self.temp_dir.name, mode='r')
class EstimatorsTests(SampleClassifierTestPluginBase):
def setUp(self):
super().setUp()
def _load_biom(table_fp):
table_fp = self.get_data_path(table_fp)
table = qiime2.Artifact.load(table_fp)
table = table.view(biom.Table)
return table
def _load_md(md_fp):
md_fp = self.get_data_path(md_fp)
md = pd.read_csv(md_fp, sep='\t', header=0, index_col=0)
md = qiime2.Metadata(md)
return md
def _load_nmc(md_fp, column):
md_fp = self.get_data_path(md_fp)
md = pd.read_csv(md_fp, sep='\t', header=0, index_col=0)
md = qiime2.NumericMetadataColumn(md[column])
return md
def _load_cmc(md_fp, column):
md_fp = self.get_data_path(md_fp)
md = pd.read_csv(md_fp, sep='\t', header=0, index_col=0)
md = qiime2.CategoricalMetadataColumn(md[column])
return md
self.table_chard_fp = _load_biom('chardonnay.table.qza')
self.md_chard_fp = _load_md('chardonnay.map.txt')
self.mdc_chard_fp = _load_cmc('chardonnay.map.txt', 'Region')
self.table_ecam_fp = _load_biom('ecam-table-maturity.qza')
self.md_ecam_fp = _load_md('ecam_map_maturity.txt')
self.mdc_ecam_fp = _load_nmc('ecam_map_maturity.txt', 'month')
self.exp_imp = pd.read_csv(
self.get_data_path('importance.tsv'), sep='\t', header=0,
index_col=0, names=['feature', 'importance'])
self.exp_pred = pd.read_csv(
self.get_data_path('predictions.tsv'), sep='\t', header=0,
index_col=0, squeeze=True)
index = pd.Index(['A', 'B', 'C', 'D'], name='id')
self.table_percnorm = qiime2.Artifact.import_data(
FeatureTable[PercentileNormalized], pd.DataFrame(
[[20.0, 20.0, 50.0, 10.0], [10.0, 10.0, 70.0, 10.0],
[90.0, 8.0, 1.0, 1.0], [30.0, 15.0, 20.0, 35.0]],
index=index,
columns=['feat1', 'feat2', 'feat3', 'feat4'])).view(biom.Table)
self.mdc_percnorm = qiime2.CategoricalMetadataColumn(
pd.Series(['X', 'X', 'Y', 'Y'], index=index, name='name'))
# test feature extraction
def test_extract_features(self):
table = self.table_ecam_fp
dicts = _extract_features(table)
dv = DictVectorizer()
dv.fit(dicts)
features = table.ids('observation')
self.assertEqual(set(dv.get_feature_names()), set(features))
self.assertEqual(len(dicts), len(table.ids()))
for dict_row, (table_row, _, _) in zip(dicts, table.iter()):
for feature, count in zip(features, table_row):
if count == 0:
self.assertTrue(feature not in dict_row)
else:
self.assertEqual(dict_row[feature], count)
def test_classify_samples_from_dist(self):
# -- setup -- #
# 1,2 are a group, 3,4 are a group
sample_ids = ('f1', 'f2', 's1', 's2')
distance_matrix = skbio.DistanceMatrix([
[0, 1, 4, 4],
[1, 0, 4, 4],
[4, 4, 0, 1],
[4, 4, 1, 0],
], ids=sample_ids)
dm = qiime2.Artifact.import_data('DistanceMatrix', distance_matrix)
categories = pd.Series(('skinny', 'skinny', 'fat', 'fat'),
index=sample_ids[::-1], name='body_mass')
categories.index.name = 'SampleID'
metadata = qiime2.CategoricalMetadataColumn(categories)
# -- test -- #
res = sample_classifier.actions.classify_samples_from_dist(
distance_matrix=dm, metadata=metadata, k=1)
pred = res[0].view(pd.Series).sort_values()
expected = pd.Series(('fat', 'skinny', 'fat', 'skinny'),
index=['f1', 's1', 'f2', 's2'])
not_expected = pd.Series(('fat', 'fat', 'fat', 'skinny'),
index=sample_ids)
# order matters for pd.Series.equals()
self.assertTrue(expected.sort_index().equals(pred.sort_index()))
self.assertFalse(not_expected.sort_index().equals(pred.sort_index()))
def test_classify_samples_from_dist_with_group_of_single_item(self):
# -- setup -- #
# 1 is a group, 2,3,4 are a group
sample_ids = ('f1', 's1', 's2', 's3')
distance_matrix = skbio.DistanceMatrix([
[0, 2, 3, 3],
[2, 0, 1, 1],
[3, 1, 0, 1],
[3, 1, 1, 0],
], ids=sample_ids)
dm = qiime2.Artifact.import_data('DistanceMatrix', distance_matrix)
categories = pd.Series(('fat', 'skinny', 'skinny', 'skinny'),
index=sample_ids, name='body_mass')
categories.index.name = 'SampleID'
metadata = qiime2.CategoricalMetadataColumn(categories)
# -- test -- #
res = sample_classifier.actions.classify_samples_from_dist(
distance_matrix=dm, metadata=metadata, k=1)
pred = res[0].view(pd.Series)
expected = pd.Series(('skinny', 'skinny', 'skinny', 'skinny'),
index=sample_ids)
self.assertTrue(expected.sort_index().equals(pred.sort_index()))
def test_2nn(self):
# -- setup -- #
# 2 nearest neighbors of each sample are
# f1: s1, s2 (classified as skinny)
# s1: f1, s2 (closer to f1 so fat)
# s2: f1, (s1 or s3) (closer to f1 so fat)
# s3: s1, s2 (skinny)
sample_ids = ('f1', 's1', 's2', 's3')
distance_matrix = skbio.DistanceMatrix([
[0, 2, 1, 5],
[2, 0, 3, 4],
[1, 3, 0, 3],
[5, 4, 3, 0],
], ids=sample_ids)
dm = qiime2.Artifact.import_data('DistanceMatrix', distance_matrix)
categories = pd.Series(('fat', 'skinny', 'skinny', 'skinny'),
index=sample_ids, name='body_mass')
categories.index.name = 'SampleID'
metadata = qiime2.CategoricalMetadataColumn(categories)
# -- test -- #
res = sample_classifier.actions.classify_samples_from_dist(
distance_matrix=dm, metadata=metadata, k=2)
pred = res[0].view(pd.Series)
expected = pd.Series(('skinny', 'fat', 'fat', 'skinny'),
index=sample_ids)
self.assertTrue(expected.sort_index().equals(pred.sort_index()))
# test that each classifier works and delivers an expected accuracy result
# when a random seed is set.
def test_classifiers(self):
for classifier in ['RandomForestClassifier', 'ExtraTreesClassifier',
'GradientBoostingClassifier', 'AdaBoostClassifier',
'LinearSVC', 'SVC', 'KNeighborsClassifier']:
table_fp = self.get_data_path('chardonnay.table.qza')
table = qiime2.Artifact.load(table_fp)
res = sample_classifier.actions.classify_samples(
table=table, metadata=self.mdc_chard_fp,
test_size=0.5, cv=1, n_estimators=10, n_jobs=1,
estimator=classifier, random_state=123,
parameter_tuning=False, optimize_feature_selection=False,
missing_samples='ignore')
pred = res[2].view(pd.Series)
pred, truth = _match_series_or_die(
pred, self.mdc_chard_fp.to_series(), 'ignore')
accuracy = accuracy_score(truth, pred)
self.assertAlmostEqual(
accuracy, seeded_results[classifier], places=4,
msg='Accuracy of %s classifier was %f, but expected %f' % (
classifier, accuracy, seeded_results[classifier]))
# test if training classifier with pipeline classify_samples raises
# warning when test_size = 0.0
def test_classify_samples_w_all_train_set(self):
with self.assertWarnsRegex(Warning, "not representative of "
"your model's performance"):
table_fp = self.get_data_path('chardonnay.table.qza')
table = qiime2.Artifact.load(table_fp)
sample_classifier.actions.classify_samples(
table=table, metadata=self.mdc_chard_fp,
test_size=0.0, cv=1, n_estimators=10, n_jobs=1,
estimator='RandomForestClassifier', random_state=123,
parameter_tuning=False, optimize_feature_selection=False,
missing_samples='ignore')
# test that the plugin methods/visualizers work
def test_regress_samples_ncv(self):
y_pred, importances = regress_samples_ncv(
self.table_ecam_fp, self.mdc_ecam_fp, random_state=123,
n_estimators=2, n_jobs=1, stratify=True, parameter_tuning=True,
missing_samples='ignore')
def test_classify_samples_ncv(self):
y_pred, importances, probabilities = classify_samples_ncv(
self.table_chard_fp, self.mdc_chard_fp, random_state=123,
n_estimators=2, n_jobs=1, missing_samples='ignore')
# test reproducibility of classifier results, probabilities
def test_classify_samples_ncv_accuracy(self):
dat = biom.Table(np.array(
[[4446, 9828, 3208, 776, 118, 4175, 657, 251, 7505, 617],
[1855, 8716, 3257, 1251, 3205, 2557, 4251, 7405, 1417, 1215],
[6616, 281, 8616, 291, 261, 253, 9075, 252, 7385, 4068]]),
observation_ids=['o1', 'o2', 'o3'],
sample_ids=['s1', 's2', 's3', 's4', 's5',
's6', 's7', 's8', 's9', 's10'])
md = qiime2.CategoricalMetadataColumn(pd.Series(
['red', 'red', 'red', 'red', 'red',
'blue', 'blue', 'blue', 'blue', 'blue'],
index=pd.Index(['s1', 's2', 's3', 's4', 's5',
's6', 's7', 's8', 's9', 's10'],
name='sample-id'), name='color'))
y_pred, importances, probabilities = classify_samples_ncv(
dat, md, random_state=123, n_estimators=2, n_jobs=1,
missing_samples='ignore')
exp_pred = pd.Series(
['blue', 'red', 'red', 'blue', 'blue',
'blue', 'blue', 'red', 'blue', 'blue'],
index=pd.Index(['s4', 's6', 's1', 's10', 's5', 's8', 's2', 's9',
's3', 's7'], dtype='object', name='SampleID'),
name='prediction')
exp_importances = pd.DataFrame(
[0.595111111111111, 0.23155555555555551, 0.17333333333333334],
index=pd.Index(['o3', 'o1', 'o2'], name='feature'),
columns=['importance'])
exp_probabilities = pd.DataFrame(
[[0.5, 0.5], [0., 1.], [0., 1.], [0.5, 0.5], [0.5, 0.5],
[0.5, 0.5], [0.5, 0.5], [0., 1.], [1., 0.], [1., 0.]],
index=pd.Index(['s4', 's6', 's1', 's10', 's5', 's8', 's2', 's9',
's3', 's7'], name='SampleID'),
columns=['blue', 'red'])
pdt.assert_series_equal(y_pred, exp_pred)
| pdt.assert_frame_equal(importances, exp_importances) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert isinstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
with catch_warnings(record=True):
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with catch_warnings(record=True):
for p in [tm.makePanel4D(), tm.add_nans_panel4d( | tm.makePanel4D() | pandas.util.testing.makePanel4D |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 17:40:53 2021
@author: ali_d
"""
#Pandas
import pandas as pd
import numpy as np
#data
numbers = [20,30,40,50]
print("----")
leters = ["a","b","c","d",40]
pandas_pd = pd.Series(numbers)
pandas_pd1 = pd.Series(leters)
print(pandas_pd)
print(type(pandas_pd))
print(pandas_pd1)
scalers = 5
print(pd.Series(scalers))
print("---")
pandas_series1 = pd.Series(numbers,["a","b","c","d"])
print(pandas_series1)
print("---")
dict = {"a":15,"b":25,"c":35,"d":45}
pandas_series2 = pd.Series(dict)
print(pandas_series2)
print("---")
a = np.random.randint(10,100,5)
pandas_series3 = pd.Series(a)
print(pandas_series3)
print("---")
pandas_series3 = pd.Series(a,["a","b","c","d","e"])
print(pandas_series3)
print("---")
pandas_series4 = pd.Series([20,30,40,50],["a","b","c","d"])
print(pandas_series4[0])
print(pandas_series4["a"])
print(pandas_series4[:2])
print(pandas_series4[-2])
print(pandas_series4["a"])
print(pandas_series4["d"])
print(pandas_series4["a"])
#
print(pandas_series4.ndim) #1boyutlu liste oldugunu soyluyor
print(pandas_series4.dtype)#type
print(pandas_series4.shape)
print(pandas_series4.sum())
print(pandas_series4.max())#max
print(pandas_series4.min())#min
print(pandas_series4+pandas_series4)
print(pandas_series4+1000)
print("---")
print(pandas_series4>35)
print("---")
result = pandas_series4 % 2 ==0
print(result)
print("---")
print(pandas_series4[pandas_series4 %2 ==0])
print(pandas_series4[pandas_series4 %2 ==1])
print("---")
opel2018 = pd.Series([20,30,40,10],["astra","corsa","mokka","insignia"])
opel2019 = pd.Series([20,80,40,20,None],["astra","corsa","mokka","insignia","Grandland"])
total = opel2018+opel2019
print(total)
#%% Pandas dataFrame
import pandas as pd
s1 = pd.Series([3,2,0,1])
s2 = pd.Series([0,3,7,2])
data = dict(apples=s1,oranges = s2)
print(data)
print("---")
df=pd.DataFrame(data)
print(df)
print("-------")
# "
# df1= pd.DateFrame()
# print(df1)
# "
df1 = pd.DataFrame([1,2,3,4,5])
print(df1)
print("---")
df2 = pd.DataFrame([["Ahmet",50],["Ali",60],["Yağmur",70],["Çınar",80]],columns = ["Name","Grade"],index=[1,2,3,4])
print(df2)
#columns = sütünlar
print("---")
dict1 = {"Name":["Ahmet","Ali","Yağmur","Çınar"],
"Grade":[50,60,70,80]
}
#Grade =sınıf
pd4= pd.DataFrame(dict1)
print(pd4)
liste = [["Ahmet",50],["Ali",60],["Yağmur",70],["Çınar",80]]
##
dict1 = {"Name":["Ahmet","Ali","Yağmur","Çınar"],
"Grade":[50,60,70,80]}
a1 = (pd.DataFrame(dict1,index=["212","232","236","456"]))
dict_list=[
{"Name":"Ahmet","Grade":50},
{"Name":"Alis","Grade":60},
{"Name":"Uğurcan","Grade":70},
{"Name":"Hasan","Grade":80},
{"Name":"Miray","Grade":90}
]
a2 = pd.DataFrame(dict_list)
#%% Pandas ile DataFrame calısma
import pandas as pd
import numpy as np
a=np.random.randn(3,3)
df = pd.DataFrame(a,index=["A","B","C"],columns=["Column1","Column2","Column3"])
print(df)
result = df
print("---")
print(result["Column1"])
print("---")
print(type(result["Column1"]))
print("---")
result = df[["Column1","Column2"]]
print(result)
print("---")
result1 = df.loc["A"]
print(result1)
print("---")
result2 = type(df.loc["A"])
print(result2)
print("---")
result3 = df.loc[:]
print(result3)
print("---")
result4 = df.loc[:,["Column1","Column2"]]
print(result4)
print("---")
result5 = df.loc[:,["Column1","Column3"]]
print(result5)
print("---")
result6 = df.loc["A":"B","Column2"]
print(result6)
print("---")
a=df.iloc[1]
print(a)
print("---1")
b =df.iloc[2]
print(b)
print("---2")
c=df.iloc[0]
print(c)
print("---3")
#%% fitlreleme
data = np.random.randint(10,100,75).reshape(15,5)
dfx = pd.DataFrame(data,columns=["Columns1","Columns2","Columns3","Columns4","Columns5"])
print(dfx)
print("---")
df = dfx.columns
print(df)
print("---")
df = dfx.head()
print(df)
print("---")
df =dfx.head(10)
print(df)
print("---")
df =dfx.tail()
print(df)
print("---")
df = dfx.tail(10)
print(df)
print("---")
df =dfx["Columns1"].head()
print(df)
print("---")
df=dfx.Columns1.head()
print(df)
print("---")
df = dfx[["Columns1","Columns2"]].head()
print(df)
print("----")
df = dfx[["Columns1","Columns2"]].tail()
print(df)
print("---")
#df = dfx[5:15] 5 -15 arasındakılerı alırım
df = dfx[5:15][["Columns1","Columns2"]].head()
print(df)
print("---")
df = dfx[5:15][["Columns1","Columns2"]].tail()
print(df)
print("---"*10)
df = dfx > 50
print(df)
print("----")
df = dfx[dfx > 50]
print(df)
print("---")
df = dfx[dfx % 2 == 0]
print(df)
print("---")
df = dfx[df["Columns1"] > 50]
print(df)
print("---")
df = dfx[df["Columns1"] > 50][["Columns1","Columns2"]]
print(df)
print("---")
#df = dfx.query("Columns1 >= 10 & Columns1 % 2 == 1")
#df = dfx.query("Columns1 >= 10 & Columns1 % 2 == 1")[["Columns1","Columns2"]]
#(df)
#Query = Sorgu
#%% DataFrame GroupBy
import pandas as pd
import numpy as np
peronel = {"Çalışan":["<NAME>","<NAME>","<NAME>","<NAME>","<NAME>","<NAME>","<NAME>"],
"Departman":["İnsan kaynakları","Bilgi İşlem","Muhasebe","İnsan Kaynakları","Bilgi İşlem","Muhasebe","Bilgi İşlem"],
"Yaş":[30,25,45,50,23,34,42],
"Semt":["KadıKöy","Tuzla","Maltepe","Tuzla","Maltepe","Tuzla","KadıKöy"],
"Maaş":[5000,3000,4000,3500,2750,6500,4500]}
df = pd.DataFrame(peronel)
print(df)
print("---")
result = df["Maaş"].sum()
print(result)
print("---")
result1 = df.groupby("Departman")
print(result1)
print("---")
result2 = df.groupby("Departman").groups
print(result2)
print("---")
result3 = df.groupby(["Departman","Semt"]).groups
print(result3)
print()
print("---")
semtler = df.groupby("Semt")
for name,group in semtler:
print(name)
print(group)
print()
print("---")
print()
for name,group in df.groupby("Departman"):
print(name)
print(group)
print()
print("--------------")
print()
xv = df.groupby("Semt").get_group("KadıKöy")
print(xv)
print("--------------")
xv1 = df.groupby("Departman").get_group("Muhasebe")
print(xv1)
print("---------------")
xv2 = df.groupby("Departman").sum()
print(xv2)
print("---------------")
xv3 = df.groupby("Departman").mean()
print(xv3)
print("--------------")
xv4 = df.groupby("Departman")["Maaş"].mean()
print(xv4)
print("--------------")
xv5 = df.groupby("Semt")["Çalışan"].count()
print(xv5)
print("-------------")
xv6 = df.groupby("Departman")["Yaş"].max()
print(xv6)
print("-------------")
xv7 = df.groupby("Departman")["Maaş"].max()["Muhasebe"]
print(xv7)
print("-------------")
xv8 = df.groupby("Departman").agg([np.sum,np.mean,np.max,np.min]).loc["Muhasebe"]
print(xv8)
#%% Pandas ile Kayıp ve Bozuk Veri Analizi
import pandas as pd
import numpy as np
data = np.random.randint(20,200,15).reshape(5,3)
print(data)
df = pd.DataFrame(data,index = ["a","c","e","f","h"], columns = ["Column1","Column2","Column3"])
print(df)
print("---")
df = df.reindex(["a","b","c","d","e","f","g","h"])
print(df)
print("---")
newColumn =[np.nan,30,np.nan,51,np.nan,30,np.nan,10]
df["Column4"]=newColumn
result =df
result=df.drop("Column1",axis =1)
print("---")
result=df.drop(["Column1","Column2"],axis =1)
print("---")
result = df.drop("a",axis=0)
print("---")
result = df.drop(["a","b","c"],axis=0)
print("---")
result = df.isnull()
print(result)
print("---")
result = df.notnull()
print(result)
print("---")
result = df.isnull().sum()
print(result)
print("---")
result = df["Column1"].isnull().sum()
print(result)
print()
result =df["Column2"].isnull().sum()
print(result)
print("---")
result = df[df["Column1"].isnull()]
print(result)
print("---")
result = df[df["Column1"].isnull()]["Column1"]
print(result)
print("---")
result = df[df["Column1"].notnull()]["Column1"]
print(result)
print("---")
print()
result = df.dropna()
print(result)
print("---")
print(df)
print("---")
result = df.dropna(axis = 1)
print(result)
print("---")
result = df.dropna(how="any")
print(result)
print("---")
result = df.dropna(how="all")
print(result)
print("---")
result = df.dropna(subset=["Column1","Column2"],how="all")
print(result)
print("----")
result = df.dropna(subset=["Column1","Column2"],how="all")
print(result)
print("---")
result = df.dropna(thresh=2)
print(result)
print("---")
result = df.dropna(thresh=4)
print(result)
print("----")
result = df.fillna(value = "no input")
print(result)
print("---")
result = df.fillna(value = 1)
print(result)
print("---")
result = df.sum().sum()
print(result)
print("---")
result = df.size
print(result)
print("---")
result = df.isnull().sum()
print(result)
print("---")
result = df.isnull().sum().sum()
print(result)
print("----")
##############
def ortalama(df):
toplam = df.sum().sum()
adet = df.size - df.isnull().sum().sum()
return toplam / adet
result = df.fillna(value = ortalama(df))
print(result)
##############
#%% Pandas ile String Fonksiyonlar
import pandas as pd
customers = {
"CostomerId":[1,2,3,4],
"firstName":["Ahmet","Ali","Hasan","Can"],
"lastName":["Yılmaz","Korkmaz","Çelik","Toprak"],
}
orders = {
"OrderId":[10,11,12,13],
"CustomerId":[1,2,5,7],
"OrderDate":["2010-07-04","2010-08-04","2010-07-07","2012-07-04"],
}
df_customers = pd.DataFrame(customers,columns=["CostomerId","firstName","lastName"])
df_orders = pd.DataFrame(orders,columns=["OrderId","CustomerId","OrderDate"])
result = pd.merge(left_on = df_customers,right_on= df_orders, how="inner")
#Merge = Birleştirmek
#%%
customersA = {
"CostomerId":[1,2,3,4],
"firstName":["Ahmet","Ali","Hasan","Can"],
"lastName":["Yılmaz","Korkmaz","Çelik","Toprak"]
}
ordersB = {
"OrderId":[10,11,12,13],
"FirstName":["Yağmur","Çınar","Cengiz","Can"],
"LastName":["Bilge","Turan","Yılmaz","Turan"]
}
df_customersA = pd.DataFrame(customersA,columns=["CostomerId","firstName","lastName"])
df_ordersB = pd.DataFrame(ordersB,columns=["OrderId","CustomerId","OrderDate"])
result = pd.concat([df_customersA,df_ordersB])
print(result)
#%%
np.random.seed(0)
left = pd.DataFrame({'key': ['A', 'B', 'C', 'D'], 'value': np.random.randn(4)})
right = pd.DataFrame({'key': ['B', 'D', 'E', 'F'], 'value': np.random.randn(4)})
#%% Pandas ile DataFrame Metotları
import pandas as pd
import numpy as np
data = {
"Column1":[1,2,3,4,5],
"Column2":[10,20,13,45,25],
"Column3":["abc","bca","ade","cba","dea"]
}
df = | pd.DataFrame(data) | pandas.DataFrame |
"""
Wrappers to help with Vowpal Wabbit (VW).
"""
import sys
from collections import Counter
import pandas as pd
import numpy as np
from scipy.special import gammaln, digamma, psi # gamma function utils
from . import text_processors
from ..common import smart_open, TokenError
from ..common_math import series_to_frame
###############################################################################
# Globals
###############################################################################
EPS = 1e-100
def parse_varinfo(varinfo_file):
"""
Uses the output of the vw-varinfo utility to get a DataFrame with variable
info.
Parameters
----------
varinfo_file : Path or buffer
The output of vw-varinfo
"""
with smart_open(varinfo_file) as open_file:
# For some reason, pandas is confused...so just split the lines
# Create a dict {item1: [...], item2: [...],...} for each item in the
# header
header = open_file.next().split()
rows = {col_name: [] for col_name in header}
for line in open_file:
for i, item in enumerate(line.split()):
rows[header[i]].append(item)
# Create a data frame
varinfo = | pd.DataFrame(rows) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import os
import pandas as pd
from collections import deque
import matplotlib.pyplot as plt
import rrcf
# Read data
def data_explore(is_show=True):
abs_path = os.path.abspath(__file__)
data_paths = abs_path.strip().split("/")[:-1]
data_paths.extend(["..", "resources", "website_flow.csv"])
data_path = os.path.join(*data_paths)
if not data_path.startswith("/"):
data_path = "/" + data_path
data_frame = pd.read_csv(data_path)
data_frame.sort_values(["time"], inplace=True)
data_frame["time"] = | pd.to_datetime(data_frame["time"], unit="s") | pandas.to_datetime |
import hashlib
import json
import re
import os
from pathlib import Path
from typing import Callable, Optional
import numpy as np
import pandas as pd
from phc.easy.query.fhir_aggregation import FhirAggregation
from phc.util.csv_writer import CSVWriter
TABLE_REGEX = r"^[^F]+FROM (\w+)"
DIR = "~/Downloads/phc/api-cache"
DATE_FORMAT_REGEX = (
r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{3})?([-+]\d{4}|Z)"
)
DATA_LAKE = "data_lake"
FHIR_DSL = "fhir_dsl"
class APICache:
@staticmethod
def filename_for_sql(sql: str, extension: str = "parquet"):
results = re.findall(TABLE_REGEX, sql)
table_name = results[0] if len(results) > 0 else "table"
hexdigest = hashlib.sha256(sql.encode("utf-8")).hexdigest()[0:8]
return "_".join([DATA_LAKE, table_name, hexdigest]) + "." + extension
@staticmethod
def does_cache_for_sql_exist(sql: str, extension: str = "parquet") -> bool:
return (
Path(DIR)
.expanduser()
.joinpath(APICache.filename_for_sql(sql, extension))
.exists()
)
@staticmethod
def filename_for_query(query: dict, namespace: Optional[str] = None):
"Descriptive filename with hash of query for easy retrieval"
is_aggregation = FhirAggregation.is_aggregation_query(query)
agg_description = "agg" if is_aggregation else ""
column_description = (
f"{len(query.get('columns', []))}col"
if not is_aggregation and isinstance(query.get("columns"), list)
else ""
)
where_description = "where" if query.get("where") else ""
unique_hash = hashlib.sha256(
json.dumps(query).encode("utf-8")
).hexdigest()[0:8]
path_name = [
# Exclude UUIDs but not paths with dashes
c.replace("-", "_")
for c in query.get("path", "").split("/")
if "-" not in c or len(c) != 36
]
components = [
namespace or "",
*path_name,
*[d.get("table", "") for d in query.get("from", [])],
agg_description,
column_description,
where_description,
unique_hash,
]
extension = "json" if is_aggregation else "csv"
return "_".join([c for c in components if len(c) > 0]) + "." + extension
@staticmethod
def does_cache_for_query_exist(
query: dict, namespace: Optional[str] = None
) -> bool:
return (
Path(DIR)
.expanduser()
.joinpath(APICache.filename_for_query(query, namespace))
.exists()
)
@staticmethod
def load_cache_for_query(
query: dict, namespace: Optional[str] = None
) -> pd.DataFrame:
filename = str(
Path(DIR)
.expanduser()
.joinpath(APICache.filename_for_query(query, namespace))
)
print(f'[CACHE] Loading from "{filename}"')
if FhirAggregation.is_aggregation_query(query):
with open(filename, "r") as f:
return FhirAggregation(json.load(f))
return APICache.read_csv(filename)
@staticmethod
def build_cache_callback(
query: dict,
transform: Callable[[pd.DataFrame], pd.DataFrame],
nested_key: Optional[str] = "_source",
namespace: Optional[str] = None,
):
"Build a CSV callback (not used for aggregations)"
folder = Path(DIR).expanduser()
folder.mkdir(parents=True, exist_ok=True)
filename = str(
folder.joinpath(APICache.filename_for_query(query, namespace))
)
writer = CSVWriter(filename)
def handle_batch(batch, is_finished):
batch = (
batch
if nested_key is None
else map(lambda r: r[nested_key], batch)
)
df = pd.DataFrame(batch)
if len(df) != 0:
writer.write(transform(df))
if is_finished and not os.path.exists(filename):
return | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import sys
import glob
import torch
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from rdkit import RDLogger
#from moses.script_utils import read_smiles_csv
def detokenize(inp,vocab):
output = ""
for i in inp:
token = list(vocab.keys())[list(vocab.values()).index(int(i))]
if(token=='<bos>'):
continue
elif(token=='<eos>'):
break
else:
output = output+token
return output
def get_smiles_from_lbann_tensors(fdir, vocab_path):
###################
# First input files
###################
input_files = glob.glob(fdir+"*_input_seq.csv")
ins = np.loadtxt(input_files[0], delimiter=",")
for i, f in enumerate(input_files):
if(i > 0) :
ins = np.concatenate((ins, np.loadtxt(f,delimiter=",")))
num_cols = ins.shape[1]
print("Num cols ", num_cols)
num_samples = ins.shape[0]
print("Num samples ", num_samples)
vocab = pd.read_csv(vocab_file, delimiter=" ", header=None).to_dict()[0]
vocab = dict([(v,k) for k,v in vocab.items()])
samples = [detokenize(i_x,vocab) for i_x in ins[:,0:]]
samples = pd.DataFrame(samples, columns=['SMILES'])
print("Save gt files to " , "gt_"+"smiles.txt")
samples.to_csv("gt_"+"smiles.txt", index=False)
####################
# Second input files
####################
input_files = glob.glob(fdir+"*_pred_seq.csv")
ins = np.loadtxt(input_files[0], delimiter=",")
for i, f in enumerate(input_files):
if(i > 0) :
ins = np.concatenate((ins, np.loadtxt(f,delimiter=",")))
num_cols = ins.shape[1]
print("Num cols ", num_cols)
num_samples = ins.shape[0]
print("Num samples ", num_samples)
vocab = | pd.read_csv(vocab_file, delimiter=" ", header=None) | pandas.read_csv |
import csv
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserError
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
import pandas.core.common as com
from pandas.io.common import get_handle
MIXED_FLOAT_DTYPES = ["float16", "float32", "float64"]
MIXED_INT_DTYPES = [
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
]
class TestDataFrameToCSV:
def read_csv(self, path, **kwargs):
params = {"index_col": 0, "parse_dates": True}
params.update(**kwargs)
return read_csv(path, **params)
def test_to_csv_from_csv1(self, float_frame, datetime_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv1__") as path:
float_frame["A"][:5] = np.nan
float_frame.to_csv(path)
float_frame.to_csv(path, columns=["A", "B"])
float_frame.to_csv(path, header=False)
float_frame.to_csv(path, index=False)
# test roundtrip
# freq does not roundtrip
datetime_frame.index = datetime_frame.index._with_freq(None)
datetime_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(datetime_frame, recons)
datetime_frame.to_csv(path, index_label="index")
recons = self.read_csv(path, index_col=None)
assert len(recons.columns) == len(datetime_frame.columns) + 1
# no index
datetime_frame.to_csv(path, index=False)
recons = self.read_csv(path, index_col=None)
tm.assert_almost_equal(datetime_frame.values, recons.values)
# corner case
dm = DataFrame(
{
"s1": Series(range(3), index=np.arange(3)),
"s2": Series(range(2), index=np.arange(2)),
}
)
dm.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(dm, recons)
def test_to_csv_from_csv2(self, float_frame):
with tm.ensure_clean("__tmp_to_csv_from_csv2__") as path:
# duplicate index
df = DataFrame(
np.random.randn(3, 3), index=["a", "a", "b"], columns=["x", "y", "z"]
)
df.to_csv(path)
result = self.read_csv(path)
tm.assert_frame_equal(result, df)
midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])
df = DataFrame(np.random.randn(3, 3), index=midx, columns=["x", "y", "z"])
df.to_csv(path)
result = self.read_csv(path, index_col=[0, 1, 2], parse_dates=False)
tm.assert_frame_equal(result, df, check_names=False)
# column aliases
col_aliases = Index(["AA", "X", "Y", "Z"])
float_frame.to_csv(path, header=col_aliases)
rs = self.read_csv(path)
xp = float_frame.copy()
xp.columns = col_aliases
tm.assert_frame_equal(xp, rs)
msg = "Writing 4 cols but got 2 aliases"
with pytest.raises(ValueError, match=msg):
float_frame.to_csv(path, header=["AA", "X"])
def test_to_csv_from_csv3(self):
with tm.ensure_clean("__tmp_to_csv_from_csv3__") as path:
df1 = DataFrame(np.random.randn(3, 1))
df2 = DataFrame(np.random.randn(3, 1))
df1.to_csv(path)
df2.to_csv(path, mode="a", header=False)
xp = pd.concat([df1, df2])
rs = read_csv(path, index_col=0)
rs.columns = [int(label) for label in rs.columns]
xp.columns = [int(label) for label in xp.columns]
tm.assert_frame_equal(xp, rs)
def test_to_csv_from_csv4(self):
with tm.ensure_clean("__tmp_to_csv_from_csv4__") as path:
# GH 10833 (TimedeltaIndex formatting)
dt = pd.Timedelta(seconds=1)
df = DataFrame(
{"dt_data": [i * dt for i in range(3)]},
index=Index([i * dt for i in range(3)], name="dt_index"),
)
df.to_csv(path)
result = read_csv(path, index_col="dt_index")
result.index = pd.to_timedelta(result.index)
result["dt_data"] = pd.to_timedelta(result["dt_data"])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_to_csv_from_csv5(self, timezone_frame):
# tz, 8260
with tm.ensure_clean("__tmp_to_csv_from_csv5__") as path:
timezone_frame.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=["A"])
converter = (
lambda c: to_datetime(result[c])
.dt.tz_convert("UTC")
.dt.tz_convert(timezone_frame[c].dt.tz)
)
result["B"] = converter("B")
result["C"] = converter("C")
tm.assert_frame_equal(result, timezone_frame)
def test_to_csv_cols_reordering(self):
# GH3454
chunksize = 5
N = int(chunksize * 2.5)
df = tm.makeCustomDataframe(N, 3)
cs = df.columns
cols = [cs[2], cs[0]]
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
tm.assert_frame_equal(df[cols], rs_c, check_names=False)
def test_to_csv_new_dupe_cols(self):
def _check_df(df, cols=None):
with tm.ensure_clean() as path:
df.to_csv(path, columns=cols, chunksize=chunksize)
rs_c = read_csv(path, index_col=0)
# we wrote them in a different order
# so compare them in that order
if cols is not None:
if df.columns.is_unique:
rs_c.columns = cols
else:
indexer, missing = df.columns.get_indexer_non_unique(cols)
rs_c.columns = df.columns.take(indexer)
for c in cols:
obj_df = df[c]
obj_rs = rs_c[c]
if isinstance(obj_df, Series):
tm.assert_series_equal(obj_df, obj_rs)
else:
tm.assert_frame_equal(obj_df, obj_rs, check_names=False)
# wrote in the same order
else:
rs_c.columns = df.columns
tm.assert_frame_equal(df, rs_c, check_names=False)
chunksize = 5
N = int(chunksize * 2.5)
# dupe cols
df = tm.makeCustomDataframe(N, 3)
df.columns = ["a", "a", "b"]
_check_df(df, None)
# dupe cols with selection
cols = ["b", "a"]
_check_df(df, cols)
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
def make_dtnat_arr(n, nnat=None):
if nnat is None:
nnat = int(n * 0.1) # 10%
s = list(date_range("2000", freq="5min", periods=n))
if nnat:
for i in np.random.randint(0, len(s), nnat):
s[i] = NaT
i = np.random.randint(100)
s[-i] = NaT
s[i] = NaT
return s
chunksize = 1000
# N=35000
s1 = make_dtnat_arr(chunksize + 5)
s2 = make_dtnat_arr(chunksize + 5, 0)
# s3=make_dtnjat_arr(chunksize+5,0)
with tm.ensure_clean("1.csv") as pth:
df = DataFrame({"a": s1, "b": s2})
df.to_csv(pth, chunksize=chunksize)
recons = self.read_csv(pth).apply(to_datetime)
tm.assert_frame_equal(df, recons, check_names=False)
@pytest.mark.slow
def test_to_csv_moar(self):
def _do_test(
df, r_dtype=None, c_dtype=None, rnlvl=None, cnlvl=None, dupe_col=False
):
kwargs = {"parse_dates": False}
if cnlvl:
if rnlvl is not None:
kwargs["index_col"] = list(range(rnlvl))
kwargs["header"] = list(range(cnlvl))
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
else:
kwargs["header"] = 0
with tm.ensure_clean("__tmp_to_csv_moar__") as path:
df.to_csv(path, encoding="utf8", chunksize=chunksize)
recons = self.read_csv(path, **kwargs)
def _to_uni(x):
if not isinstance(x, str):
return x.decode("utf8")
return x
if dupe_col:
# read_Csv disambiguates the columns by
# labeling them dupe.1,dupe.2, etc'. monkey patch columns
recons.columns = df.columns
if rnlvl and not cnlvl:
delta_lvl = [recons.iloc[:, i].values for i in range(rnlvl - 1)]
ix = MultiIndex.from_arrays([list(recons.index)] + delta_lvl)
recons.index = ix
recons = recons.iloc[:, rnlvl - 1 :]
type_map = {"i": "i", "f": "f", "s": "O", "u": "O", "dt": "O", "p": "O"}
if r_dtype:
if r_dtype == "u": # unicode
r_dtype = "O"
recons.index = np.array(
[_to_uni(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[_to_uni(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "dt": # unicode
r_dtype = "O"
recons.index = np.array(
[Timestamp(label) for label in recons.index], dtype=r_dtype
)
df.index = np.array(
[Timestamp(label) for label in df.index], dtype=r_dtype
)
elif r_dtype == "p":
r_dtype = "O"
idx_list = to_datetime(recons.index)
recons.index = np.array(
[Timestamp(label) for label in idx_list], dtype=r_dtype
)
df.index = np.array(
list(map(Timestamp, df.index.to_timestamp())), dtype=r_dtype
)
else:
r_dtype = type_map.get(r_dtype)
recons.index = np.array(recons.index, dtype=r_dtype)
df.index = np.array(df.index, dtype=r_dtype)
if c_dtype:
if c_dtype == "u":
c_dtype = "O"
recons.columns = np.array(
[_to_uni(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[_to_uni(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "dt":
c_dtype = "O"
recons.columns = np.array(
[Timestamp(label) for label in recons.columns], dtype=c_dtype
)
df.columns = np.array(
[Timestamp(label) for label in df.columns], dtype=c_dtype
)
elif c_dtype == "p":
c_dtype = "O"
col_list = to_datetime(recons.columns)
recons.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
col_list = df.columns.to_timestamp()
df.columns = np.array(
[Timestamp(label) for label in col_list], dtype=c_dtype
)
else:
c_dtype = type_map.get(c_dtype)
recons.columns = np.array(recons.columns, dtype=c_dtype)
df.columns = np.array(df.columns, dtype=c_dtype)
tm.assert_frame_equal(df, recons, check_names=False)
N = 100
chunksize = 1000
ncols = 4
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(nrows, ncols, r_idx_type="dt", c_idx_type="s"),
"dt",
"s",
)
for r_idx_type, c_idx_type in [("i", "i"), ("s", "s"), ("u", "dt"), ("p", "p")]:
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
2,
10,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_type=r_idx_type, c_idx_type=c_idx_type
),
r_idx_type,
c_idx_type,
)
for ncols in [1, 2, 3, 4]:
base = chunksize // ncols
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols))
for nrows in [10, N - 2, N - 1, N, N + 1, N + 2]:
df = tm.makeCustomDataframe(nrows, 3)
cols = list(df.columns)
cols[:2] = ["dupe", "dupe"]
cols[-2:] = ["dupe", "dupe"]
ix = list(df.index)
ix[:2] = ["rdupe", "rdupe"]
ix[-2:] = ["rdupe", "rdupe"]
df.index = ix
df.columns = cols
_do_test(df, dupe_col=True)
_do_test(DataFrame(index=np.arange(10)))
_do_test(
tm.makeCustomDataframe(chunksize // 2 + 1, 2, r_idx_nlevels=2), rnlvl=2
)
for ncols in [2, 3, 4]:
base = int(chunksize // ncols)
for nrows in [
10,
N - 2,
N - 1,
N,
N + 1,
N + 2,
2 * N - 2,
2 * N - 1,
2 * N,
2 * N + 1,
2 * N + 2,
base - 1,
base,
base + 1,
]:
_do_test(tm.makeCustomDataframe(nrows, ncols, r_idx_nlevels=2), rnlvl=2)
_do_test(tm.makeCustomDataframe(nrows, ncols, c_idx_nlevels=2), cnlvl=2)
_do_test(
tm.makeCustomDataframe(
nrows, ncols, r_idx_nlevels=2, c_idx_nlevels=2
),
rnlvl=2,
cnlvl=2,
)
def test_to_csv_from_csv_w_some_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["G"] = np.nan
f = lambda x: [np.inf, np.nan][np.random.rand() < 0.5]
float_frame["H"] = float_frame.index.map(f)
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_from_csv_w_all_infs(self, float_frame):
# test roundtrip with inf, -inf, nan, as full columns and mix
float_frame["E"] = np.inf
float_frame["F"] = -np.inf
with tm.ensure_clean() as path:
float_frame.to_csv(path)
recons = self.read_csv(path)
tm.assert_frame_equal(float_frame, recons)
tm.assert_frame_equal(np.isinf(float_frame), np.isinf(recons))
def test_to_csv_no_index(self):
# GH 3624, after appending columns, to_csv fails
with tm.ensure_clean("__tmp_to_csv_no_index__") as path:
df = DataFrame({"c1": [1, 2, 3], "c2": [4, 5, 6]})
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
df["c3"] = Series([7, 8, 9], dtype="int64")
df.to_csv(path, index=False)
result = read_csv(path)
tm.assert_frame_equal(df, result)
def test_to_csv_with_mix_columns(self):
# gh-11637: incorrect output when a mix of integer and string column
# names passed as columns parameter in to_csv
df = DataFrame({0: ["a", "b", "c"], 1: ["aa", "bb", "cc"]})
df["test"] = "txt"
assert df.to_csv() == df.to_csv(columns=[0, 1, "test"])
def test_to_csv_headers(self):
# GH6186, the presence or absence of `index` incorrectly
# causes to_csv to have different header semantics.
from_df = DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
to_df = DataFrame([[1, 2], [3, 4]], columns=["X", "Y"])
with tm.ensure_clean("__tmp_to_csv_headers__") as path:
from_df.to_csv(path, header=["X", "Y"])
recons = self.read_csv(path)
tm.assert_frame_equal(to_df, recons)
from_df.to_csv(path, index=False, header=["X", "Y"])
recons = self.read_csv(path)
return_value = recons.reset_index(inplace=True)
assert return_value is None
tm.assert_frame_equal(to_df, recons)
def test_to_csv_multiindex(self, float_frame, datetime_frame):
frame = float_frame
old_index = frame.index
arrays = np.arange(len(old_index) * 2).reshape(2, -1)
new_index = MultiIndex.from_arrays(arrays, names=["first", "second"])
frame.index = new_index
with tm.ensure_clean("__tmp_to_csv_multiindex__") as path:
frame.to_csv(path, header=False)
frame.to_csv(path, columns=["A", "B"])
# round trip
frame.to_csv(path)
df = self.read_csv(path, index_col=[0, 1], parse_dates=False)
# TODO to_csv drops column name
tm.assert_frame_equal(frame, df, check_names=False)
assert frame.index.names == df.index.names
# needed if setUp becomes a class method
float_frame.index = old_index
# try multiindex with dates
tsframe = datetime_frame
old_index = tsframe.index
new_index = [old_index, np.arange(len(old_index))]
tsframe.index = MultiIndex.from_arrays(new_index)
tsframe.to_csv(path, index_label=["time", "foo"])
recons = self.read_csv(path, index_col=[0, 1])
# TODO to_csv drops column name
| tm.assert_frame_equal(tsframe, recons, check_names=False) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""Análisis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1IgGsebdiJRAdXOeW7I9wXXZQAtyPXlXQ
Install dependencies
"""
#%reset -f
#!pip install psycopg2
"""Import libraries"""
import psycopg2
import numpy as np
import pandas as pd
import time
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
"""[Connect to database](https://pynative.com/python-postgresql-tutorial/)"""
def algoritmo():
try:
connection = psycopg2.connect(user = "postgres",
password = "<PASSWORD>",
host = "192.168.3.11",
port = "5432",
database = "dataproject1")
cursor = connection.cursor()
# Print PostgreSQL Connection properties
print ( connection.get_dsn_parameters(),"\n")
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
print("You are connected to - ", record,"\n")
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to PostgreSQL", error)
"""Obtain "datos" of the cities and columns names"""
cursor.execute("SELECT * FROM datos;")
record = cursor.fetchall()
cursor.execute("SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'datos'")
columns_name = cursor.fetchall()
"""Convert array of arrays to single array"""
array_columns_name = np.array(columns_name)
array_columns_name = np.concatenate( array_columns_name, axis=0 )
#print(array_columns_name)
"""Transform result of query to a pandas dataframe"""
df = pd.DataFrame(record, columns=array_columns_name)
df.head()
"""Obtain "clientes" of the clients responses"""
cursor.execute("SELECT * FROM clientes ORDER BY client_id DESC;")
record = cursor.fetchall()
cursor.execute("SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'clientes'")
columns_name = cursor.fetchall()
array_columns_name = np.array(columns_name)
array_columns_name = np.concatenate( array_columns_name, axis=0 )
clientes = | pd.DataFrame(record, columns=array_columns_name) | pandas.DataFrame |
import pytest
import numpy as np
import pandas as pd
from datetime import datetime
from pandas.util import testing as tm
from pandas import DataFrame, MultiIndex, compat, Series, bdate_range, Index
def test_apply_issues():
# GH 5788
s = """2011.05.16,00:00,1.40893
2011.05.16,01:00,1.40760
2011.05.16,02:00,1.40750
2011.05.16,03:00,1.40649
2011.05.17,02:00,1.40893
2011.05.17,03:00,1.40760
2011.05.17,04:00,1.40750
2011.05.17,05:00,1.40649
2011.05.18,02:00,1.40893
2011.05.18,03:00,1.40760
2011.05.18,04:00,1.40750
2011.05.18,05:00,1.40649"""
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'],
parse_dates=[['date', 'time']])
df = df.set_index('date_time')
expected = df.groupby(df.index.date).idxmax()
result = df.groupby(df.index.date).apply(lambda x: x.idxmax())
tm.assert_frame_equal(result, expected)
# GH 5789
# don't auto coerce dates
df = pd.read_csv(
compat.StringIO(s), header=None, names=['date', 'time', 'value'])
exp_idx = pd.Index(
['2011.05.16', '2011.05.17', '2011.05.18'
], dtype=object, name='date')
expected = Series(['00:00', '02:00', '02:00'], index=exp_idx)
result = df.groupby('date').apply(
lambda x: x['time'][x['value'].idxmax()])
tm.assert_series_equal(result, expected)
def test_apply_trivial():
# GH 20066
# trivial apply: ignore input and return a constant dataframe.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df.iloc[1:], df.iloc[1:]],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df.iloc[1:])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH#20066; function passed into apply "
"returns a DataFrame with the same index "
"as the one to create GroupBy object.",
strict=True)
def test_apply_trivial_fail():
# GH 20066
# trivial apply fails if the constant dataframe has the same index
# with the one used to create GroupBy object.
df = pd.DataFrame({'key': ['a', 'a', 'b', 'b', 'a'],
'data': [1.0, 2.0, 3.0, 4.0, 5.0]},
columns=['key', 'data'])
expected = pd.concat([df, df],
axis=1, keys=['float64', 'object'])
result = df.groupby([str(x) for x in df.dtypes],
axis=1).apply(lambda x: df)
tm.assert_frame_equal(result, expected)
def test_fast_apply():
# make sure that fast apply is correctly called
# rather than raising any kind of error
# otherwise the python path will be callsed
# which slows things down
N = 1000
labels = np.random.randint(0, 2000, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame({'key': labels,
'key2': labels2,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
def f(g):
return 1
g = df.groupby(['key', 'key2'])
grouper = g.grouper
splitter = grouper._get_splitter(g._selected_obj, axis=g.axis)
group_keys = grouper._get_group_keys()
values, mutated = splitter.fast_apply(f, group_keys)
assert not mutated
def test_apply_with_mixed_dtype():
# GH3480, apply with mixed dtype on axis=1 breaks in 0.11
df = DataFrame({'foo1': np.random.randn(6),
'foo2': ['one', 'two', 'two', 'three', 'one', 'two']})
result = df.apply(lambda x: x, axis=1)
tm.assert_series_equal(df.get_dtype_counts(), result.get_dtype_counts())
# GH 3610 incorrect dtype conversion with as_index=False
df = DataFrame({"c1": [1, 2, 6, 6, 8]})
df["c2"] = df.c1 / 2.0
result1 = df.groupby("c2").mean().reset_index().c2
result2 = df.groupby("c2", as_index=False).mean().c2
tm.assert_series_equal(result1, result2)
def test_groupby_as_index_apply(df):
# GH #4648 and #3417
df = DataFrame({'item_id': ['b', 'b', 'a', 'c', 'a', 'b'],
'user_id': [1, 2, 1, 1, 3, 1],
'time': range(6)})
g_as = df.groupby('user_id', as_index=True)
g_not_as = df.groupby('user_id', as_index=False)
res_as = g_as.head(2).index
res_not_as = g_not_as.head(2).index
exp = Index([0, 1, 2, 4])
tm.assert_index_equal(res_as, exp)
tm.assert_index_equal(res_not_as, exp)
res_as_apply = g_as.apply(lambda x: x.head(2)).index
res_not_as_apply = g_not_as.apply(lambda x: x.head(2)).index
# apply doesn't maintain the original ordering
# changed in GH5610 as the as_index=False returns a MI here
exp_not_as_apply = MultiIndex.from_tuples([(0, 0), (0, 2), (1, 1), (
2, 4)])
tp = [(1, 0), (1, 2), (2, 1), (3, 4)]
exp_as_apply = MultiIndex.from_tuples(tp, names=['user_id', None])
tm.assert_index_equal(res_as_apply, exp_as_apply)
tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)
ind = Index(list('abcde'))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
res = df.groupby(0, as_index=False).apply(lambda x: x).index
tm.assert_index_equal(res, ind)
def test_apply_concat_preserve_names(three_group):
grouped = three_group.groupby(['A', 'B'])
def desc(group):
result = group.describe()
result.index.name = 'stat'
return result
def desc2(group):
result = group.describe()
result.index.name = 'stat'
result = result[:len(group)]
# weirdo
return result
def desc3(group):
result = group.describe()
# names are different
result.index.name = 'stat_%d' % len(group)
result = result[:len(group)]
# weirdo
return result
result = grouped.apply(desc)
assert result.index.names == ('A', 'B', 'stat')
result2 = grouped.apply(desc2)
assert result2.index.names == ('A', 'B', 'stat')
result3 = grouped.apply(desc3)
assert result3.index.names == ('A', 'B', None)
def test_apply_series_to_frame():
def f(piece):
with np.errstate(invalid='ignore'):
logged = np.log(piece)
return DataFrame({'value': piece,
'demeaned': piece - piece.mean(),
'logged': logged})
dr = bdate_range('1/1/2000', periods=100)
ts = Series(np.random.randn(100), index=dr)
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(f)
assert isinstance(result, DataFrame)
tm.assert_index_equal(result.index, ts.index)
def test_apply_series_yield_constant(df):
result = df.groupby(['A', 'B'])['C'].apply(len)
assert result.index.names[:2] == ('A', 'B')
def test_apply_frame_yield_constant(df):
# GH13568
result = df.groupby(['A', 'B']).apply(len)
assert isinstance(result, Series)
assert result.name is None
result = df.groupby(['A', 'B'])[['C', 'D']].apply(len)
assert isinstance(result, Series)
assert result.name is None
def test_apply_frame_to_series(df):
grouped = df.groupby(['A', 'B'])
result = grouped.apply(len)
expected = grouped.count()['C']
tm.assert_index_equal(result.index, expected.index)
tm.assert_numpy_array_equal(result.values, expected.values)
def test_apply_frame_concat_series():
def trans(group):
return group.groupby('B')['C'].sum().sort_values()[:2]
def trans2(group):
grouped = group.groupby(df.reindex(group.index)['B'])
return grouped.sum().sort_values()[:2]
df = DataFrame({'A': np.random.randint(0, 5, 1000),
'B': np.random.randint(0, 5, 1000),
'C': np.random.randn(1000)})
result = df.groupby('A').apply(trans)
exp = df.groupby('A')['C'].apply(trans2)
tm.assert_series_equal(result, exp, check_names=False)
assert result.name == 'C'
def test_apply_transform(ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x * 2)
expected = grouped.transform(lambda x: x * 2)
tm.assert_series_equal(result, expected)
def test_apply_multikey_corner(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
def f(group):
return group.sort_values('A')[-5:]
result = grouped.apply(f)
for key, group in grouped:
tm.assert_frame_equal(result.loc[key], f(group))
def test_apply_chunk_view():
# Low level tinkering could be unsafe, make sure not
df = DataFrame({'key': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'value': compat.lrange(9)})
# return view
f = lambda x: x[:2]
result = df.groupby('key', group_keys=False).apply(f)
expected = df.take([0, 1, 3, 4, 6, 7])
tm.assert_frame_equal(result, expected)
def test_apply_no_name_column_conflict():
df = DataFrame({'name': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2],
'name2': [0, 0, 0, 1, 1, 1, 0, 0, 1, 1],
'value': compat.lrange(10)[::-1]})
# it works! #2605
grouped = df.groupby(['name', 'name2'])
grouped.apply(lambda x: x.sort_values('value', inplace=True))
def test_apply_typecast_fail():
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(
['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)})
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_multiindex_fail():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
v = group['v']
group['v2'] = (v - v.min()) / (v.max() - v.min())
return group
result = df.groupby('d').apply(f)
expected = df.copy()
expected['v2'] = np.tile([0., 0.5, 1], 2)
tm.assert_frame_equal(result, expected)
def test_apply_corner(tsframe):
result = tsframe.groupby(lambda x: x.year).apply(lambda x: x * 2)
expected = tsframe * 2
tm.assert_frame_equal(result, expected)
def test_apply_without_copy():
# GH 5545
# returning a non-copy in an applied function fails
data = DataFrame({'id_field': [100, 100, 200, 300],
'category': ['a', 'b', 'c', 'c'],
'value': [1, 2, 3, 4]})
def filt1(x):
if x.shape[0] == 1:
return x.copy()
else:
return x[x.category == 'c']
def filt2(x):
if x.shape[0] == 1:
return x
else:
return x[x.category == 'c']
expected = data.groupby('id_field').apply(filt1)
result = data.groupby('id_field').apply(filt2)
tm.assert_frame_equal(result, expected)
def test_apply_corner_cases():
# #535, can't use sliding iterator
N = 1000
labels = np.random.randint(0, 100, size=N)
df = DataFrame({'key': labels,
'value1': np.random.randn(N),
'value2': ['foo', 'bar', 'baz', 'qux'] * (N // 4)})
grouped = df.groupby('key')
def f(g):
g['value3'] = g['value1'] * 2
return g
result = grouped.apply(f)
assert 'value3' in result
def test_apply_numeric_coercion_when_datetime():
# In the past, group-by/apply operations have been over-eager
# in converting dtypes to numeric, in the presence of datetime
# columns. Various GH issues were filed, the reproductions
# for which are here.
# GH 15670
df = pd.DataFrame({'Number': [1, 2],
'Date': ["2017-03-02"] * 2,
'Str': ["foo", "inf"]})
expected = df.groupby(['Number']).apply(lambda x: x.iloc[0])
df.Date = pd.to_datetime(df.Date)
result = df.groupby(['Number']).apply(lambda x: x.iloc[0])
tm.assert_series_equal(result['Str'], expected['Str'])
# GH 15421
df = pd.DataFrame({'A': [10, 20, 30],
'B': ['foo', '3', '4'],
'T': [pd.Timestamp("12:31:22")] * 3})
def get_B(g):
return g.iloc[0][['B']]
result = df.groupby('A').apply(get_B)['B']
expected = df.B
expected.index = df.A
tm.assert_series_equal(result, expected)
# GH 14423
def predictions(tool):
out = pd.Series(index=['p1', 'p2', 'useTime'], dtype=object)
if 'step1' in list(tool.State):
out['p1'] = str(tool[tool.State == 'step1'].Machine.values[0])
if 'step2' in list(tool.State):
out['p2'] = str(tool[tool.State == 'step2'].Machine.values[0])
out['useTime'] = str(
tool[tool.State == 'step2'].oTime.values[0])
return out
df1 = pd.DataFrame({'Key': ['B', 'B', 'A', 'A'],
'State': ['step1', 'step2', 'step1', 'step2'],
'oTime': ['', '2016-09-19 05:24:33',
'', '2016-09-19 23:59:04'],
'Machine': ['23', '36L', '36R', '36R']})
df2 = df1.copy()
df2.oTime = pd.to_datetime(df2.oTime)
expected = df1.groupby('Key').apply(predictions).p1
result = df2.groupby('Key').apply(predictions).p1
tm.assert_series_equal(expected, result)
def test_time_field_bug():
# Test a fix for the following error related to GH issue 11324 When
# non-key fields in a group-by dataframe contained time-based fields
# that were not returned by the apply function, an exception would be
# raised.
df = pd.DataFrame({'a': 1, 'b': [datetime.now() for nn in range(10)]})
def func_with_no_date(batch):
return pd.Series({'c': 2})
def func_with_date(batch):
return pd.Series({'b': datetime(2015, 1, 1), 'c': 2})
dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date)
dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1])
dfg_no_conversion_expected.index.name = 'a'
dfg_conversion = df.groupby(by=['a']).apply(func_with_date)
dfg_conversion_expected = pd.DataFrame(
{'b': datetime(2015, 1, 1),
'c': 2}, index=[1])
dfg_conversion_expected.index.name = 'a'
tm.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
tm.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
def test_gb_apply_list_of_unequal_len_arrays():
# GH1738
df = DataFrame({'group1': ['a', 'a', 'a', 'b', 'b', 'b', 'a', 'a', 'a',
'b', 'b', 'b'],
'group2': ['c', 'c', 'd', 'd', 'd', 'e', 'c', 'c', 'd',
'd', 'd', 'e'],
'weight': [1.1, 2, 3, 4, 5, 6, 2, 4, 6, 8, 1, 2],
'value': [7.1, 8, 9, 10, 11, 12, 8, 7, 6, 5, 4, 3]})
df = df.set_index(['group1', 'group2'])
df_grouped = df.groupby(level=['group1', 'group2'], sort=True)
def noddy(value, weight):
out = np.array(value * weight).repeat(3)
return out
# the kernel function returns arrays of unequal length
# pandas sniffs the first one, sees it's an array and not
# a list, and assumed the rest are of equal length
# and so tries a vstack
# don't die
df_grouped.apply(lambda x: noddy(x.value, x.weight))
def test_groupby_apply_all_none():
# Tests to make sure no errors if apply function returns all None
# values. Issue 9684.
test_df = DataFrame({'groups': [0, 0, 1, 1],
'random_vars': [8, 7, 4, 5]})
def test_func(x):
pass
result = test_df.groupby('groups').apply(test_func)
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_groupby_apply_none_first():
# GH 12824. Tests if apply returns None first.
test_df1 = | DataFrame({'groups': [1, 1, 1, 2], 'vars': [0, 1, 2, 3]}) | pandas.DataFrame |
import pandas as pd
import difflib as df
from fuzzywuzzy import fuzz
from cleanco import cleanco
import numpy as np
def merge_files(df,df1,vendor_name_column,actual_name_column):
try:
merged=pd.merge(df,df1,how='left',left_on=vendor_name_column,right_on=actual_name_column,indicator=True)
merged_leftout=merged[merged['_merge'].isin(['left_only'])].dropna(how='all')
merged_leftout=merged_leftout.drop_duplicates()
merged_leftout= merged_leftout.drop('_merge',axis=1)
merged_leftout=merged_leftout.dropna(subset=[vendor_name_column])
merged_leftout=merged_leftout.drop_duplicates(subset=vendor_name_column)
return merged_leftout
except Exception as e:
print(type(e),e)
def vendor_name_match(vendor_names_file_path, actual_vendor_names_file_path,special_cases,actual_name_column,vendor_name_column,replace=False):
"""
Matches the partial vendor names to the actual vendor names
paramerters
=============
- vendor_names_file_path : path of excel/csv file name for which vendor name to be matched
- actual_vendor_names_file_path : path of excel/csv file name containing actual vendor name
- actual_name_column: column name of the file containing actual vendor name
- vendor_name_column: column name of the file containing vendor name to be matched
- replace : True if vendor name are to be replaced with actual vendor names
Returns
===========
dataframe
"""
index_to_be_removed=[]
if vendor_names_file_path.endswith('.xlsx'):
try:
data=pd.read_excel(vendor_names_file_path)
except Exception as e:
print(type(e),e)
elif vendor_names_file_path.endswith('.csv'):
try:
data=pd.read_csv(vendor_names_file_path)
except Exception as e:
print(type(e),e)
else:
raise TypeError('File to be matched should be either .xlsx or .csv format')
if actual_vendor_names_file_path.endswith('.xlsx'):
try:
actuals = | pd.read_excel(actual_vendor_names_file_path,usecols=[actual_name_column]) | pandas.read_excel |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 17 10:49:28 2020
@author: <NAME>
"""
import os
import json
import pandas as pd
import numpy as np
import configparser as cp
from tensorflow.keras.models import load_model
from support_modules.readers import log_reader as lr
from support_modules import support as sup
from model_prediction import interfaces as it
from model_prediction.analyzers import sim_evaluator as ev
class ModelPredictor():
"""
This is the man class encharged of the model evaluation
"""
def __init__(self, parms):
self.output_route = os.path.join('output_files', parms['folder'])
self.parms = parms
# load parameters
self.load_parameters()
self.model_name, _ = os.path.splitext(parms['model_file'])
self.model = load_model(os.path.join(self.output_route,
parms['model_file']))
self.log = self.load_log_test(self.output_route, self.parms)
self.samples = dict()
self.predictions = None
self.run_num = 0
self.model_def = dict()
self.read_model_definition(self.parms['model_type'])
print(self.model_def)
self.parms['additional_columns'] = self.model_def['additional_columns']
self.acc = self.execute_predictive_task()
def execute_predictive_task(self):
# create examples for next event and suffix
if self.parms['activity'] == 'pred_log':
self.parms['num_cases'] = len(self.log.caseid.unique())
else:
sampler = it.SamplesCreator()
sampler.create(self, self.parms['activity'])
# predict
self.imp = self.parms ['variant']
self.run_num = 0
for i in range(0, self.parms['rep']):
self.predict_values()
self.run_num += 1
# export predictions
self.export_predictions()
# assesment
evaluator = EvaluateTask()
if self.parms['activity'] == 'pred_log':
data = self.append_sources(self.log, self.predictions,
self.parms['one_timestamp'])
data['caseid'] = data['caseid'].astype(str)
return evaluator.evaluate(self.parms, data)
else:
return evaluator.evaluate(self.parms, self.predictions)
def predict_values(self):
# Predict values
executioner = it.PredictionTasksExecutioner()
executioner.predict(self, self.parms['activity'])
@staticmethod
def load_log_test(output_route, parms):
df_test = lr.LogReader(
os.path.join(output_route, 'parameters', 'test_log.csv'),
parms['read_options'])
df_test = pd.DataFrame(df_test.data)
df_test = df_test[~df_test.task.isin(['Start', 'End'])]
return df_test
def load_parameters(self):
# Loading of parameters from training
path = os.path.join(self.output_route,
'parameters',
'model_parameters.json')
with open(path) as file:
data = json.load(file)
if 'activity' in data:
del data['activity']
self.parms = {**self.parms, **{k: v for k, v in data.items()}}
self.parms['dim'] = {k: int(v) for k, v in data['dim'].items()}
if self.parms['one_timestamp']:
self.parms['scale_args'] = {
k: float(v) for k, v in data['scale_args'].items()}
else:
for key in data['scale_args'].keys():
self.parms['scale_args'][key] = {
k: float(v) for k, v in data['scale_args'][key].items()}
self.parms['index_ac'] = {int(k): v
for k, v in data['index_ac'].items()}
self.parms['index_rl'] = {int(k): v
for k, v in data['index_rl'].items()}
file.close()
self.ac_index = {v: k for k, v in self.parms['index_ac'].items()}
self.rl_index = {v: k for k, v in self.parms['index_rl'].items()}
def sampling(self, sampler):
sampler.register_sampler(self.parms['model_type'],
self.model_def['vectorizer'])
self.samples = sampler.create_samples(
self.parms, self.log, self.ac_index,
self.rl_index, self.model_def['additional_columns'])
def predict(self, executioner):
results = executioner.predict(self.parms,
self.model,
self.samples,
self.imp,
self.model_def['vectorizer'])
results = pd.DataFrame(results)
results['run_num'] = self.run_num
results['implementation'] = self.imp
if self.predictions is None:
self.predictions = results
else:
self.predictions = self.predictions.append(results,
ignore_index=True)
def export_predictions(self):
output_folder = os.path.join(self.output_route, 'results')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
filename = self.model_name + '_' + self.parms['activity'] + '.csv'
self.predictions.to_csv(os.path.join(output_folder, filename),
index=False)
@staticmethod
def append_sources(source_log, source_predictions, one_timestamp):
log = source_log.copy()
columns = ['caseid', 'task', 'end_timestamp', 'role']
if not one_timestamp:
columns += ['start_timestamp']
log = log[columns]
log['run_num'] = 0
log['implementation'] = 'log'
predictions = source_predictions.copy()
columns = log.columns
predictions = predictions[columns]
return log.append(predictions, ignore_index=True)
@staticmethod
def scale_feature(log, feature, parms, replace=False):
"""Scales a number given a technique.
Args:
log: Event-log to be scaled.
feature: Feature to be scaled.
method: Scaling method max, lognorm, normal, per activity.
replace (optional): replace the original value or keep both.
Returns:
Scaleded value between 0 and 1.
"""
method = parms['norm_method']
scale_args = parms['scale_args']
if method == 'lognorm':
log[feature + '_log'] = np.log1p(log[feature])
max_value = scale_args['max_value']
min_value = scale_args['min_value']
log[feature+'_norm'] = np.divide(
np.subtract(log[feature+'_log'], min_value), (max_value - min_value))
log = log.drop((feature + '_log'), axis=1)
elif method == 'normal':
max_value = scale_args['max_value']
min_value = scale_args['min_value']
log[feature+'_norm'] = np.divide(
np.subtract(log[feature], min_value), (max_value - min_value))
elif method == 'standard':
mean = scale_args['mean']
std = scale_args['std']
log[feature + '_norm'] = np.divide(np.subtract(log[feature], mean),
std)
elif method == 'max':
max_value = scale_args['max_value']
log[feature + '_norm'] = (np.divide(log[feature], max_value)
if max_value > 0 else 0)
elif method is None:
log[feature+'_norm'] = log[feature]
else:
raise ValueError(method)
if replace:
log = log.drop(feature, axis=1)
return log
def read_model_definition(self, model_type):
Config = cp.ConfigParser(interpolation=None)
Config.read('models_spec.ini')
#File name with extension
self.model_def['additional_columns'] = sup.reduce_list(
Config.get(model_type,'additional_columns'), dtype='str')
self.model_def['vectorizer'] = Config.get(model_type, 'vectorizer')
class EvaluateTask():
def evaluate(self, parms, data):
sampler = self._get_evaluator(parms['activity'])
return sampler(data, parms)
def _get_evaluator(self, activity):
if activity == 'predict_next':
return self._evaluate_predict_next
elif activity == 'pred_sfx':
return self._evaluate_pred_sfx
elif activity == 'pred_log':
return self._evaluate_predict_log
else:
raise ValueError(activity)
def _evaluate_predict_next(self, data, parms):
exp_desc = self.clean_parameters(parms.copy())
evaluator = ev.Evaluator(parms['one_timestamp'])
ac_sim = evaluator.measure('accuracy', data, 'ac')
rl_sim = evaluator.measure('accuracy', data, 'rl')
mean_ac = ac_sim.accuracy.mean()
exp_desc = pd.DataFrame([exp_desc])
exp_desc = pd.concat([exp_desc]*len(ac_sim), ignore_index=True)
ac_sim = | pd.concat([ac_sim, exp_desc], axis=1) | pandas.concat |
# 兼容 pythone2,3
from __future__ import print_function
# 导入相关python库
import os
import numpy as np
import pandas as pd
#设定随机数种子
np.random.seed(36)
#使用matplotlib库画图
import matplotlib
import seaborn
import matplotlib.pyplot as plot
from sklearn import datasets
#读取数据
housing = pd.read_csv('kc_train.csv')
target = pd.read_csv('kc_train2.csv') #销售价格
t = pd.read_csv('kc_test.csv') #测试数据
#this is a new branch
#数据预处理
housing.info() #查看是否有缺失值
#特征缩放
from sklearn.preprocessing import MinMaxScaler
minmax_scaler = MinMaxScaler()
minmax_scaler.fit(housing) #进行内部拟合,内部参数会发生变化
scaler_housing = minmax_scaler.transform(housing)
scaler_housing = | pd.DataFrame(scaler_housing, columns=housing.columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 04:11:27 2017
@author: konodera
nohup python -u 501_concat.py &
"""
import pandas as pd
import numpy as np
from tqdm import tqdm
import multiprocessing as mp
import gc
import utils
utils.start(__file__)
#==============================================================================
# def
#==============================================================================
def user_feature(df, name):
if 'train' in name:
name_ = 'trainT-0'
elif name == 'test':
name_ = 'test'
df = pd.merge(df, pd.read_pickle('../feature/{}/f101_order.p'.format(name_)),# same
on='order_id', how='left')
# timezone
df = pd.merge(df, pd.read_pickle('../input/mk/timezone.p'),
on='order_hour_of_day', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f102_user.p'.format(name)),
on='user_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f103_user.p'.format(name)),
on='user_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f104_user.p'.format(name)),
on='user_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f105_order.p'.format(name_)),# same
on='order_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f110_order.p'.format(name_)),# same
on='order_id', how='left')
gc.collect()
return df
def item_feature(df, name):
# aisle = pd.read_pickle('../input/mk/goods.p')[['product_id', 'aisle_id']]
# aisle = pd.get_dummies(aisle.rename(columns={'aisle_id':'item_aisle'}), columns=['item_aisle'])
# df = pd.merge(df, aisle, on='product_id', how='left')
organic = pd.read_pickle('../input/mk/products_feature.p')
df = pd.merge(df, organic, on='product_id', how='left')
# this could be worse
df = pd.merge(df, pd.read_pickle('../feature//{}/f202_product_hour.p'.format(name)),
on=['product_id','order_hour_of_day'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_uniq_product_hour.p'.format(name)),
on=['product_id','order_hour_of_day'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_product_dow.p'.format(name)),
on=['product_id','order_dow'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_uniq_product_dow.p'.format(name)),
on=['product_id','order_dow'], how='left')
gc.collect()
# low importance
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_product_timezone.p'.format(name)),
on=['product_id','timezone'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_uniq_product_timezone.p'.format(name)),
on=['product_id','timezone'], how='left')
# low importance
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_product_dow-timezone.p'.format(name)),
on=['product_id', 'order_dow', 'timezone'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_uniq_product_dow-timezone.p'.format(name)),
on=['product_id', 'order_dow', 'timezone'], how='left')
# no boost
df = pd.merge(df, pd.read_pickle('../feature/{}/f202_flat_product.p'.format(name)),
on=['product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f203_product.p'.format(name)),
on='product_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f205_order_product.p'.format(name)),
on=['order_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f207_product.p'.format(name)),
on='product_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f208_product.p'.format(name)),
on='product_id', how='left')
# low imp
df = pd.merge(df, pd.read_pickle('../feature/{}/f209_product.p'.format(name)),
on='product_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f210_product.p'.format(name)),
on='product_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f211_product.p'.format(name)),
on='product_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f212_product.p'.format(name)),
on='product_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f213_product-dow.p'.format(name)),
on=['product_id','order_dow'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f214_product.p'.format(name)),
on='product_id', how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f215_product.p'.format(name)),
on='product_id', how='left')
gc.collect()
return df
def user_item_feature(df, name):
df = pd.merge(df, pd.read_pickle('../feature/{}/f301_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f301_order-product_n5.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f302_order-product_all.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f303_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f304-1_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f304-2_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f304-3_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f305_order-product.p'.format(name)),
on=['order_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f306_user-product.p'.format(name)),
on=['user_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f306_user-product_n5.p'.format(name)),
on=['user_id', 'product_id'], how='left')
gc.collect()
df = pd.merge(df, pd.read_pickle('../feature/{}/f307_user-product-timezone.p'.format(name)),
on=['user_id', 'product_id','timezone'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f307_user-product-dow.p'.format(name)),
on=['user_id', 'product_id','order_dow'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f308_user-product-timezone.p'.format(name)),
on=['user_id', 'product_id','timezone'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f308_user-product-dow.p'.format(name)),
on=['user_id', 'product_id','order_dow'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f309_user-product.p'.format(name)),
on=['user_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f309_user-product_n5.p'.format(name)),
on=['user_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f310_user-product.p'.format(name)),
on=['user_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f312_user_product.p'.format(name)),
on=['user_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f312_user_product_n5.p'.format(name)),
on=['user_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f313_user_aisle.p'.format(name)),
on=['user_id', 'aisle_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f313_user_dep.p'.format(name)),
on=['user_id', 'department_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f314_user-product.p'.format(name)),
on=['user_id', 'product_id'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f315-1_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f315-2_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f315-3_order-product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f316_order_product.p'.format(name)),
on=['order_id', 'product_id'],how='left')
gc.collect()
return df
def daytime_feature(df, name):
df = pd.merge(df, pd.read_pickle('../feature/{}/f401_dow.p'.format(name)),
on=['order_dow'], how='left')
df = pd.merge(df, pd.read_pickle('../feature/{}/f401_hour.p'.format(name)),
on=['order_hour_of_day'], how='left')
return df
def concat_pred_item(T, dryrun=False):
if T==-1:
name = 'test'
else:
name = 'trainT-'+str(T)
#==============================================================================
print('load label')
#==============================================================================
# NOTE: order_id is label
print('load t3')
X_base = pd.read_pickle('../feature/X_base_t3.p')
label = pd.read_pickle('../feature/{}/label_reordered.p'.format(name))
# 'inner' for removing t-n_order_id == NaN
if 'train' in name:
df = pd.merge(X_base[X_base.is_train==1], label, on='order_id', how='inner')
elif name == 'test':
df = pd.merge(X_base[X_base.is_train==0], label, on='order_id', how='inner')
if dryrun:
print('dryrun')
df = df.sample(9999)
df = pd.merge(df, pd.read_pickle('../input/mk/goods.p')[['product_id', 'aisle_id', 'department_id']],
on='product_id', how='left')
print('{}.shape:{}\n'.format(name, df.shape))
#==============================================================================
print('user feature')
#==============================================================================
df = user_feature(df, name)
print('{}.shape:{}\n'.format(name, df.shape))
#==============================================================================
print('item feature')
#==============================================================================
df = item_feature(df, name)
print('{}.shape:{}\n'.format(name, df.shape))
#==============================================================================
print('reduce memory')
#==============================================================================
utils.reduce_memory(df)
ix_end = df.shape[1]
#==============================================================================
print('user x item')
#==============================================================================
df = user_item_feature(df, name)
print('{}.shape:{}\n'.format(name, df.shape))
#==============================================================================
print('user x item')
#==============================================================================
def compress(df, key):
"""
key: str
"""
df_ = df.drop_duplicates(key)[[key]].set_index(key)
dtypes = df.dtypes
col = dtypes[dtypes!='O'].index
col = [c for c in col if '_id' not in c]
gr = df.groupby(key)
for c in col:
df_[c+'-min'] = gr[c].min()
df_[c+'-mean'] = gr[c].mean()
df_[c+'-median'] = gr[c].median()
df_[c+'-max'] = gr[c].max()
df_[c+'-std'] = gr[c].std()
var = df_.var()
col = var[var==0].index
df_.drop(col, axis=1, inplace=True)
gc.collect()
return df_.reset_index()
key = 'order_id'
feature = compress(pd.read_pickle('../feature/{}/f301_order-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
feature = compress(pd.read_pickle('../feature/{}/f301_order-product_n5.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'order_id'
feature = compress(pd.read_pickle('../feature/{}/f302_order-product_all.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'order_id'
feature = compress(pd.read_pickle('../feature/{}/f303_order-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'order_id'
feature = compress(pd.read_pickle('../feature/{}/f304-1_order-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'order_id'
feature = compress(pd.read_pickle('../feature/{}/f304-2_order-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'order_id'
feature = compress(pd.read_pickle('../feature/{}/f304-3_order-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'order_id'
feature = compress(pd.read_pickle('../feature/{}/f305_order-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
gc.collect()
key = 'user_id'
feature = compress(pd.read_pickle('../feature/{}/f306_user-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
feature = compress(pd.read_pickle('../feature/{}/f306_user-product_n5.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'user_id'
feature = compress(pd.read_pickle('../feature/{}/f307_user-product-timezone.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'user_id'
feature = compress(pd.read_pickle('../feature/{}/f308_user-product-timezone.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'user_id'
feature = compress(pd.read_pickle('../feature/{}/f308_user-product-dow.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
key = 'user_id'
feature = compress(pd.read_pickle('../feature/{}/f309_user-product.p'.format(name)), key)
df = pd.merge(df, feature, on=key, how='left')
feature = compress(pd.read_pickle('../feature/{}/f309_user-product_n5.p'.format(name)), key)
df = | pd.merge(df, feature, on=key, how='left') | pandas.merge |
#!python
#!/usr/bin/env python
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def plot_stx_mt(nc_mr):
nc_mr = pd.DataFrame.from_records(nc_mr)
nc_mr.columns =["Mutation Rate (%)", "NC IR 0", "NC IR 2", "NC DIFF", "NBDM", "NC (PAQ)"]
ax = sns.lineplot(data= | pd.melt(nc_mr, ['Mutation Rate (%)'],var_name='Legend', value_name='NC') | pandas.melt |
import pandas
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
def evaluate_components(clf, x, y, n_iterations=500, check = 100,
evaluate = True, plot = True, thr = 0.95,
metric=None, random_state=123):
if type(x) != type(pandas.DataFrame()):
x = pandas.DataFrame(x)
# fit model
clf.fit(x,y)
n_comps = clf.n_components
# prepare output
results = pandas.DataFrame(index = range(n_comps * (n_iterations+1)),
columns = ['score', 'component', 'model'])
results.loc[:,'component'] = list(range(n_comps))*(n_iterations+1)
results.loc[range(n_comps),'model'] = ['True']*n_comps
results.loc[range(n_comps,n_comps*(n_iterations+1)), 'model'
] = ['Null']*(n_comps*n_iterations)
if not metric:
true_scores = [stats.pearsonr(clf.x_scores_[:,x], clf.y_scores_[:,x]
)[0]**2 for x in range(n_comps)]
else:
true_scores = [metric(clf.x_scores_[:,x], clf.y_scores_[:,x]
) for x in range(n_comps)]
results.loc[results[results.model=='True'].index,'score'] = true_scores
k = clf.n_components
# permute and refit model
rs = np.random.RandomState(random_state)
x.index = range(len(x.index))
for i in range(n_iterations):
new_ind = rs.permutation(x.index)
new_x = x.iloc[new_ind]
newmod = clf.fit(new_x,y)
if not metric:
new_scores = [stats.pearsonr(newmod.x_scores_[:,x],
newmod.y_scores_[:,x]
)[0]**2 for x in range(n_comps)]
else:
new_scores = [metric(newmod.x_scores_[:,x], newmod.y_scores_[:,x]
) for x in range(n_comps)]
results.loc[range(k, k+n_comps), 'score'] = new_scores
if check:
if i % check == 0:
print('finished iteration',i)
k += n_comps
if evaluate:
if plot:
cr = display_results(results, thr)
else:
cr = display_results(results, thr, False)
return results, cr
def display_results(results, thr = 0.95, plot=True):
if plot:
# plot components
sns.set_context('paper')
plt.close()
sns.catplot(x='component', y = 'score', hue='model', data=results,kind='point')
plt.show()
# get p-values
comp_results = pandas.DataFrame(index=results.component.unique(),
columns = ['r','p','sig'])
for i in results.component.unique():
nullz = results[(results.component==i) & (results.model=='Null')
]['score'].sort_values().values
real = results[(results.component==i) & (results.model=='True')]['score'].values[0]
comp_results.loc[i,'r'] = real
p = (len(nullz[nullz>real])+1) / len(nullz)
if p < (1 - thr):
comp_results.loc[i,['p','sig']] = [p, 1]
print('component %s: p = %s ***'%(i,p))
else:
comp_results.loc[i,['p','sig']] = [p, 0]
print('component %s: p = %s'%(i,p))
return comp_results
def bootstrap_features(clf, fit_model, X, y, n_iterations=500, check = 100, on ='x'):
if type(X) != type(pandas.DataFrame()):
X = pandas.DataFrame(X)
if type(y) != type(pandas.DataFrame()):
y = pandas.DataFrame(y)
# fit model
orig = fit_model
# prepare output
n_feats_x = X.shape[-1]
n_feats_y = y.shape[-1]
all_results_x = {}
all_results_y = {}
for i in range(orig.n_components):
results = pandas.DataFrame(index = range(n_iterations), columns = range(n_feats_x))
all_results_x.update({i: results})
results = pandas.DataFrame(index = range(n_iterations), columns = range(n_feats_y))
all_results_y.update({i: results})
bs_ratio_x = pandas.DataFrame(index = range(orig.n_components),
columns = range(n_feats_x))
bs_ratio_y = pandas.DataFrame(index = range(orig.n_components),
columns = range(n_feats_y))
# bootstrap
for i in range(n_iterations):
n_ind = np.random.choice(X.index, len(X.index))
n_samp = pandas.DataFrame(X.loc[n_ind],copy=True)
ny = | pandas.DataFrame(y.loc[n_ind],copy=True) | pandas.DataFrame |
import pandas as pd
import json
df = | pd.read_csv('baseDesafio.csv') | pandas.read_csv |
import pendulum as pdl
import sys
sys.path.append(".")
# the memoization-related library
import loguru
import itertools
import portion
import klepto.keymaps
import CacheIntervals as ci
from CacheIntervals.utils import flatten
from CacheIntervals.utils import pdl2pd, pd2pdl
from CacheIntervals.utils import Timer
from CacheIntervals.Intervals import pd2po, po2pd
from CacheIntervals.RecordInterval import RecordIntervals, RecordIntervalsPandas
class QueryRecorder:
'''
A helper class
'''
pass
class MemoizationWithIntervals(object):
'''
The purpose of this class is to optimise
the number of call to a function retrieving
possibly disjoint intervals:
- do standard caching for a given function
- additively call for a date posterior to one
already cached is supposed to yield a pandas
Frame which can be obtained by concatenating
the cached result and a -- hopefully much --
smaller query
Maintains a list of intervals that have been
called.
With a new interval:
-
'''
keymapper = klepto.keymaps.stringmap(typed=False, flat=False)
def __init__(self,
pos_args=None,
names_kwarg=None,
classrecorder=RecordIntervalsPandas,
aggregation=lambda listdfs: pd.concat(listdfs, axis=0),
debug=False,
# memoization=klepto.lru_cache(
# cache=klepto.archives.hdf_archive(
# f'{pdl.today().to_date_string()}_memoization.hdf5'),
# keymap=keymapper),
memoization=klepto.lru_cache(
cache=klepto.archives.dict_archive(),
keymap=keymapper),
**kwargs):
'''
:param pos_args: the indices of the positional
arguments that will be handled as intervals
:param names_kwarg: the name of the named parameters
that will be handled as intervals
:param classrecorder: the interval recorder type
we want to use
:param memoization: a memoization algorithm
'''
# A dictionary of positional arguments indices
# that are intervals
self.argsi = {}
self.kwargsi = {}
# if pos_args is not None:
# for posarg in pos_args:
# self.argsi[posarg] = classrecorder(**kwargs)
self.pos_args_itvl = pos_args if pos_args is not None else []
#print(self.args)
# if names_kwarg is not None:
# for namedarg in names_kwarg:
# self.kwargsi[namedarg] = classrecorder(**kwargs)
self.names_kwargs_itvl = names_kwarg if names_kwarg is not None else {}
#print(self.kwargs)
self.memoization = memoization
self.aggregation = aggregation
self.debugQ = debug
self.argsdflt = None
self.kwargsdflt = None
self.time_last_call = pdl.today()
self.classrecorder = classrecorder
self.kwargsrecorder = kwargs
self.argssolver = None
self.query_recorder = QueryRecorder()
def __call__(self, f):
'''
The interval memoization leads to several calls to the
standard memoised function and generates a list of return values.
The aggregation is needed for the doubly lazy
function to have the same signature as the
To access, the underlying memoized function pass
get_function_cachedQ=True to the kwargs of the
overloaded call (not of this function
:param f: the function to memoize
:return: the wrapper to the memoized function
'''
if self.argssolver is None:
self.argssolver = ci.Functions.ArgsSolver(f, split_args_kwargsQ=True)
@self.memoization
def f_cached(*args, **kwargs):
'''
The cached function is used for a double purpose:
1. for standard calls, will act as the memoised function in a traditional way
2. Additively when pass parameters of type QueryRecorder, it will create
or retrieve the interval recorders associated with the values of
non-interval parameters.
In this context, we use the cached function as we would a dictionary.
'''
QueryRecorderQ = False
args_new = []
kwargs_new = {}
'''
check whether this is a standard call to the user function
or a request for the interval recorders
'''
for i,arg in enumerate(args):
if isinstance(arg, QueryRecorder):
args_new.append(self.classrecorder(**self.kwargsrecorder))
QueryRecorderQ = True
else:
args_new.append(args[i])
for name in kwargs:
if isinstance(kwargs[name], QueryRecorder):
kwargs_new[name] = self.classrecorder(**self.kwargsrecorder)
QueryRecorderQ = True
else:
kwargs_new[name] = kwargs[name]
if QueryRecorderQ:
return args_new, kwargs_new
return f(*args, **kwargs)
def wrapper(*args, **kwargs):
if kwargs.get('get_function_cachedQ', False):
return f_cached
#loguru.logger.debug(f'function passed: {f_cached}')
loguru.logger.debug(f'args passed: {args}')
loguru.logger.debug(f'kwargs passed: {kwargs}')
# First pass: resolve the recorders
dargs_exp, kwargs_exp = self.argssolver(*args, **kwargs)
# Intervals are identified by position and keyword name
# 1. First get the interval recorders
args_exp = list(dargs_exp.values())
args_exp_copy = args_exp.copy()
kwargs_exp_copy = kwargs_exp.copy()
for i in self.pos_args_itvl:
args_exp_copy[i] = self.query_recorder
for name in self.names_kwargs_itvl:
kwargs_exp_copy[name] = self.query_recorder
args_with_ri, kwargs_with_ri = f_cached(*args_exp_copy, **kwargs_exp_copy)
# 2. Now get the the actual list of intervals
for i in self.pos_args_itvl:
# reuse args_exp_copy to store the list
args_exp_copy[i] = args_with_ri[i](args_exp[i])
for name in self.names_kwargs_itvl:
# reuse kwargs_exp_copy to store the list
kwargs_exp_copy[name] = kwargs_with_ri[name](kwargs_exp[name])
'''3. Then generate all combination of parameters
3.a - args'''
ns_args = range(len(args_exp))
lists_possible_args = [[args_exp[i]] if i not in self.pos_args_itvl else args_exp_copy[i] for i in ns_args]
# Take the cartesian product of these
calls_args = list( map(list,itertools.product(*lists_possible_args)))
'''3.b kwargs'''
#kwargs_exp_vals = kwargs_exp_copy.values()
names_kwargs = list(kwargs_exp_copy.keys())
lists_possible_kwargs = [[kwargs_exp[name]] if name not in self.names_kwargs_itvl
else kwargs_exp_copy[name] for name in names_kwargs]
calls_kwargs = list(map(lambda l: dict(zip(names_kwargs,l)), itertools.product(*lists_possible_kwargs)))
calls = list(itertools.product(calls_args, calls_kwargs))
if self.debugQ:
results = []
for call in calls:
with Timer() as timer:
results.append(f_cached(*call[0], **call[1]) )
print('Timer to demonstrate caching:')
timer.display(printQ=True)
else:
results = [f_cached(*call[0], **call[1]) for call in calls]
result = self.aggregation(results)
return result
return wrapper
if __name__ == "__main__":
import logging
import daiquiri
import pandas as pd
import time
daiquiri.setup(logging.DEBUG)
logging.getLogger('OneTick64').setLevel(logging.WARNING)
logging.getLogger('databnpp.ODCB').setLevel(logging.WARNING)
logging.getLogger('requests_kerberos').setLevel(logging.WARNING)
pd.set_option('display.max_rows', 200)
pd.set_option('display.width', 600)
pd.set_option('display.max_columns', 200)
tssixdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-5))
tsfivedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-4))
tsfourdaysago = pdl2pd(pdl.yesterday('UTC').add(days=-3))
tsthreedaysago = pdl2pd(pdl.yesterday('UTC').add(days=-2))
tstwodaysago = pdl2pd(pdl.yesterday('UTC').add(days=-1))
tsyesterday = pdl2pd(pdl.yesterday('UTC'))
tstoday = pdl2pd(pdl.today('UTC'))
tstomorrow = pdl2pd(pdl.tomorrow('UTC'))
tsintwodays = pdl2pd(pdl.tomorrow('UTC').add(days=1))
tsinthreedays = pdl2pd(pdl.tomorrow('UTC').add(days=2))
def print_calls(calls):
print( list( map( lambda i: (i.left, i.right), calls)))
def print_calls_dates(calls):
print( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
def display_calls(calls):
loguru.logger.info( list( map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Testing record intervals -> ok
if True:
itvals = RecordIntervals()
calls = itvals(portion.closed(pdl.yesterday(), pdl.today()))
print(list(map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()), calls)))
print(list(map(lambda i: type(i), calls)))
calls = itvals( portion.closed(pdl.yesterday().add(days=-1), pdl.today().add(days=1)))
#print(calls)
print( list( map( lambda i: (i.lower.to_date_string(), i.upper.to_date_string()),
calls)))
# Testing record intervals pandas -> ok
if True:
itvals = RecordIntervalsPandas()
# yesterday -> today
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday()), pdl2pd(pdl.today()), closed='left'))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> tomorrow: should yield 3 intervals
calls = itvals(pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)), pdl2pd(pdl.today().add(days=1))))
print( list( map( lambda i: (pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()), calls)))
# day before yesterday -> day after tomorrow: should yield 4 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-1)),
pdl2pd(pdl.tomorrow().add(days=1))))
print(
list(
map(
lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# 2 days before yesterday -> 2day after tomorrow: should yield 6 intervals
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)),
pdl2pd(pdl.tomorrow().add(days=2))))
print(list(map( lambda i:
(pd2pdl(i.left).to_date_string(), pd2pdl(i.right).to_date_string()),
calls)))
# Further tests on record intervals pandas
if False:
itvals = RecordIntervalsPandas()
calls = itvals(pd.Interval(tstwodaysago, tstomorrow, closed='left'))
display_calls(calls)
calls = itvals( pd.Interval(tstwodaysago, tsyesterday))
display_calls(calls)
calls = itvals(
pd.Interval(tstwodaysago, tsintwodays))
display_calls(calls)
calls = itvals(
pd.Interval(pdl2pd(pdl.yesterday().add(days=-2)),
pdl2pd(pdl.tomorrow().add(days=2))))
display_calls(calls)
# proof-of_concept of decorator to modify function parameters
if False:
class dector_arg:
# a toy model
def __init__(self,
pos_arg=None,
f_arg=None,
name_kwarg=None,
f_kwarg=None):
'''
:param pos_arg: the positional argument
:param f_arg: the function to apply to the positional argument
:param name_kwarg: the keyword argument
:param f_kwarg: the function to apply to the keyword argument
'''
self.args = {}
self.kwargs = {}
if pos_arg:
self.args[pos_arg] = f_arg
print(self.args)
if name_kwarg:
self.kwargs[name_kwarg] = f_kwarg
print(self.kwargs)
def __call__(self, f):
'''
the decorator action
:param f: the function to decorate
:return: a function whose arguments
have the function f_args and f_kwargs
pre-applied.
'''
self.f = f
def inner_func(*args, **kwargs):
print(f'function passed: {self.f}')
print(f'args passed: {args}')
print(f'kwargs passed: {kwargs}')
largs = list(args)
for i, f in self.args.items():
print(i)
print(args[i])
largs[i] = f(args[i])
for name, f in self.kwargs.items():
kwargs[name] = f(kwargs[name])
return self.f(*largs, **kwargs)
return inner_func
dec = dector_arg(pos_arg=0,
f_arg=lambda x: x + 1,
name_kwarg='z',
f_kwarg=lambda x: x + 1)
@dector_arg(1, lambda x: x + 1, 'z', lambda x: x + 1)
def g(x, y, z=3):
'''
The decorated function should add one to the second
positional argument and
:param x:
:param y:
:param z:
:return:
'''
print(f'x->{x}')
print(f'y->{y}')
print(f'z->{z}')
g(1, 10, z=100)
if False:
memo = MemoizationWithIntervals()
# testing MemoizationWithIntervals
# typical mechanism
if False:
@MemoizationWithIntervals(
None, ['interval'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_param(dummy1,dummy2, kdummy=1,
interval=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('****')
print(f'dummy1: {dummy1}, dummy2: {dummy2}')
print(f'kdummy: {kdummy}')
print(f'interval: {interval}')
return [dummy1, dummy2, kdummy, interval]
print('=*=*=*=* MECHANISM DEMONSTRATION =*=*=*=*')
print('==== First pass ===')
print("initialisation with an interval from yesterday to today")
# function_with_interval_params(pd.Interval(pdl.yesterday(), pdl.today(),closed='left'),
# interval1 = pd.Interval(pdl.yesterday().add(days=0),
# pdl.today(), closed='both')
# )
print( f'Final result:\n{function_with_interval_param(0, 1, interval=pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print("request for data from the day before yesterday to today")
print("expected split in two intervals with results from yesterday to today being cached")
print(
f'Final result: {function_with_interval_param(0,1, interval=pd.Interval(tstwodaysago, tstoday))}'
)
print('==== 3rd pass ===')
print("request for data from three days to yesterday")
print("expected split in two intervals")
print(f'Final result:\n {function_with_interval_param(0,1, interval=pd.Interval(tsthreedaysago, tsyesterday))}' )
print('==== 4th pass ===')
print("request for data from three days to tomorrow")
print("expected split in three intervals")
print(f'Final result:\n\
{function_with_interval_param(0,1, interval1= pd.Interval(tsthreedaysago, tstomorrow))}' )
print('==== 5th pass ===')
print("request for data from two days ago to today with different first argument")
print("No caching expected and one interval")
print( f'Final result:\n{function_with_interval_param(1, 1, interval=pd.Interval(tstwodaysago, tstoday))}' )
print('==== 6th pass ===')
print("request for data from three days ago to today with different first argument")
print("Two intervals expected")
print( f'Final result: {function_with_interval_param(1, 1, interval=pd.Interval(tsthreedaysago, tstoday))}' )
# Testing with an interval as position argument and one interval as keyword argument
if False:
@MemoizationWithIntervals(
[0], ['interval1'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
cache=klepto.archives.hdf_archive(
f'{pdl.today().to_date_string()}_memoisation.hdf5'),
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_params(interval0,
interval1=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('***')
print(f'interval0: {interval0}')
print(f'interval1: {interval1}')
return (interval0, interval1)
print('=*=*=*=* DEMONSTRATION WITH TWO INTERVAL PARAMETERS =*=*=*=*')
print('==== First pass ===')
print(f'Initialisation: first interval:\nyest to tday - second interval: two days ago to tomorrow')
print(f'Final result:\n{function_with_interval_params(pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print(f'Call with first interval:\n3 days ago to tday - second interval: unchanged')
print('Expected caching and split of first interval in two')
print( f'Final result: {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday))}' )
print('==== 3rd pass ===')
print(f'Call with first interval:\nunchanged - second interval: yest to today')
print('Expected only cached results and previous split of first interval')
print(f'Final result:\n {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1 = pd.Interval(tsyesterday, tstoday))}' )
print('==== 4th pass ===')
print(f'Call with first interval:\n3 days ago to today - second interval: yest to today')
print('Expected only cached results and only split of first interval')
print(f'Final result:\n {function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1 = pd.Interval(tsyesterday, tstoday))}' )
print('==== 5th pass ===')
print(f'Call with first interval:\n3 days ago to yesterday - second interval: 3 days ago to tomorrow')
print('Expected no split of first interval and split of second interval in two. Only one none-cached call')
print(f'Final result:\n\
{function_with_interval_params(pd.Interval(tsthreedaysago, tsyesterday), interval1= pd.Interval(tsthreedaysago, tstomorrow))}'
)
print('==== 6th pass ===')
print(f'Call with first interval:\n3 days ago to today - second interval: 3 days ago to tomorrow')
print('Expected split of first interval in two and split of second interval in two. One non-cached call: today - tomorrow x ')
print(f'Final result:\n\
{function_with_interval_params(pd.Interval(tsthreedaysago, tstoday), interval1=pd.Interval(tsthreedaysago, tstomorrow))}'
)
# Showing the issue with the current version
if False:
@MemoizationWithIntervals(None,
['interval'],
aggregation=list,
debug=True,
memoization=klepto.lru_cache(
maxsize=200,
keymap=klepto.keymaps.stringmap(typed=False, flat=False)))
def function_with_interval_param(valint,
interval=pd.Interval(tstwodaysago, tstomorrow)):
time.sleep(1)
print('**********************************')
print(f'valint: {valint}')
print(f'interval: {interval}')
return (valint, interval)
print('==== First pass ===')
print( f'Final result:\n{function_with_interval_param(2, interval=pd.Interval(tsyesterday, tstoday))}')
print('==== Second pass ===')
print(f'Final result: {function_with_interval_param(2, interval= | pd.Interval(tsthreedaysago, tstoday) | pandas.Interval |
import pandas as pd
from io import StringIO
from datetime import timedelta
from portfolio_construction import OptimisationPortfolioConstructionModel
from execution import Execution
from charting import InitCharts, PlotPerformanceChart, PlotExposureChart, PlotCountryExposureChart
def normalise(series, equal_ls=True):
if equal_ls:
series -= series.mean()
sum = series.abs().sum()
return series.apply(lambda x: x / sum)
class StockifySentiment(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2017, 1, 1) # Set Start Date
self.SetEndDate(2020, 5, 20)
self.SetCash(100000) # Set Strategy Cash
# Weighting style - normalise or alpha_max (alpha maximisation w/ optimisation)
self.weighting_style = 'normalise'
# Market neutral
self.mkt_neutral = True
# Audio feature to use
self.audio_feature = 'valence'
# Get data
self.data, self.etf_list, self.etf_country = self.DataSetup()
# Add ETFs
for etf in self.etf_list:
self.AddEquity(etf, Resolution.Minute)
# Portfolio construction model
self.CustomPortfolioConstructionModel = OptimisationPortfolioConstructionModel(turnover=1, max_wt=0.2,
longshort=True,
mkt_neutral=self.mkt_neutral)
# Execution model
self.CustomExecution = Execution(liq_tol=0.005)
# Schedule rebalancing
self.Schedule.On(self.DateRules.Every(DayOfWeek.Wednesday), self.TimeRules.BeforeMarketClose('IVV', 210),
Action(self.RebalancePortfolio))
# Init charting
InitCharts(self)
# Schedule charting
self.Schedule.On(self.DateRules.Every(DayOfWeek.Wednesday), self.TimeRules.BeforeMarketClose('IVV', 5),
Action(self.PlotCharts))
def OnData(self, data):
pass
def RebalancePortfolio(self):
df = self.data.loc[self.Time - timedelta(7):self.Time].reset_index().set_index('symbol')
if self.weighting_style == 'normalise':
portfolio = normalise(df['alpha_score'], equal_ls=self.mkt_neutral)
elif self.weighting_style == 'alpha_max':
df = df[['alpha_score']]
portfolio = self.CustomPortfolioConstructionModel.GenerateOptimalPortfolio(self, df)
else:
raise Exception('Invalid weighting style')
self.CustomExecution.ExecutePortfolio(self, portfolio)
def PlotCharts(self):
PlotPerformanceChart(self)
PlotExposureChart(self)
PlotCountryExposureChart(self)
def DataSetup(self):
df = pd.read_csv(StringIO(
self.Download('https://raw.githubusercontent.com/Ollie-Hooper/StockifySentiment/master/data/scores.csv')))
data = df[['date', 'country', f's_{self.audio_feature}']].copy()
data['date'] = pd.to_datetime(data['date'])
data.rename(columns={f's_{self.audio_feature}': 'alpha_score'}, inplace=True)
etf_df = pd.read_csv(StringIO(
self.Download('https://raw.githubusercontent.com/Ollie-Hooper/StockifySentiment/master/data/etf.csv')))
data = | pd.merge(data, etf_df) | pandas.merge |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = | pd.DataFrame({"A": dti, "B": ser}) | pandas.DataFrame |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = | pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt") | pandas.Series |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
myfile="../files/train_final.csv"
if os.path.isfile(myfile):
os.remove(myfile)
with open("../files/train.csv", "r") as rd:
with open("../files/train_final.csv", "a") as wr:
for i in range(1, 13):
wr.write("x" + str(i))
if (i != 12): wr.write(",")
wr.write("\n")
for line in rd:
line = line.strip()
if (line.startswith("[")):
line = line[1:len(line)]
if (line.endswith("]")):
line = line[:len(line) - 1]
wr.write(line + "\n")
train = pd.read_csv("../files/train_final.csv")
train["x1"] = pd.to_numeric(train["x1"], errors='coerce')
train["x2"] = pd.to_numeric(train["x2"], errors='coerce')
train["x3"] = pd.to_numeric(train["x3"], errors='coerce')
train["x4"] = pd.to_numeric(train["x4"], errors='coerce')
train["x5"] = pd.to_numeric(train["x5"], errors='coerce')
train["x6"] = pd.to_numeric(train["x6"], errors='coerce')
train["x7"] = | pd.to_numeric(train["x7"], errors='coerce') | pandas.to_numeric |
#!/usr/bin/env python3
# Pancancer_Aberrant_Pathway_Activity_Analysis scripts/targene_count_heatmaps.py
import os
import sys
import pandas as pd
import argparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'papaa'))
from tcga_util import add_version_argument
parser = argparse.ArgumentParser()
add_version_argument(parser)
parser.add_argument('-g', '--genes', default= 'ERBB2,PIK3CA,KRAS,AKT1',
help='string of the genes to extract or gene list file')
parser.add_argument('-p', '--path_genes',
help='pathway gene list file')
parser.add_argument('-s', '--classifier_decisions',
help='string of the location of classifier decisions file with predictions/scores')
parser.add_argument('-x', '--x_matrix', default=None,
help='Filename of features to use in model')
parser.add_argument( '--filename_mut', default=None,
help='Filename of sample/gene mutations to use in model')
parser.add_argument( '--filename_mut_burden', default=None,
help='Filename of sample mutation burden to use in model')
parser.add_argument( '--filename_sample', default=None,
help='Filename of patient/samples to use in model')
parser.add_argument( '--filename_copy_loss', default=None,
help='Filename of copy number loss')
parser.add_argument( '--filename_copy_gain', default=None,
help='Filename of copy number gain')
parser.add_argument( '--filename_cancer_gene_classification', default=None,
help='Filename of cancer gene classification table')
args = parser.parse_args()
# Load Constants
alt_folder = args.classifier_decisions
rnaseq_file = args.x_matrix
mut_file = args.filename_mut
sample_freeze_file = args.filename_sample
cancer_gene_file = args.filename_cancer_gene_classification
copy_loss_file = args.filename_copy_loss
copy_gain_file = args.filename_copy_gain
mutation_burden_file = args.filename_mut_burden
mutation_df = pd.read_table(mut_file, index_col=0)
sample_freeze = pd.read_table(sample_freeze_file, index_col=0)
copy_loss_df = pd.read_table(copy_loss_file, index_col=0)
copy_gain_df = pd.read_table(copy_gain_file, index_col=0)
cancer_genes_df = pd.read_table(cancer_gene_file)
results_path = alt_folder
try:
genes = args.genes
genes_df = pd.read_table(genes)
genes = genes_df['genes'].tolist()
except:
genes = args.genes.split(',')
# if list of pathway genes are provided in a file
try:
path_genes = args.path_genes
pathgenes_df = pd.read_table(path_genes)
path_genes = pathgenes_df['genes'].tolist()
except:
path_genes = path_genes.split(',')
n = pathgenes_df['og_tsg'].tolist()
n_OG = n.count('OG')
n_TSG = n.count('TSG')
# Subset mutation data
mutation_sub_df = mutation_df.loc[:, pathgenes_df['genes']]
# Find if the input genes are in this master list
genes_sub = cancer_genes_df[cancer_genes_df['Gene Symbol'].isin(pathgenes_df['genes'])]
# Add status to the Y matrix depending on if the gene is a tumor suppressor
# or an oncogene. An oncogene can be activated with copy number gains, but
# a tumor suppressor is inactivated with copy number loss
tumor_suppressor = pathgenes_df[pathgenes_df['og_tsg'] == 'TSG']
oncogene = pathgenes_df[pathgenes_df['og_tsg'] == 'OG']
# Subset copy number information
copy_loss_sub_df = copy_loss_df[tumor_suppressor['genes']]
copy_gain_sub_df = copy_gain_df[oncogene['genes']]
# ## Output Mutation, Copy Number, and Total Heatmap (Gene by Cancer-type)
mutation_sub_total_df = mutation_sub_df.assign(Total=mutation_sub_df.max(axis=1))
mut_disease_df = mutation_sub_total_df.merge(sample_freeze, left_index=True,
right_on='SAMPLE_BARCODE')
mut_heatmap_df = mut_disease_df.groupby('DISEASE').mean()
gene_avg = mut_disease_df.mean()
gene_avg.name = 'Total'
mut_heatmap_df = mut_heatmap_df.append(gene_avg)
sns.set_style("whitegrid")
plt.figure(figsize = (10,10),dpi= 300)
sns.heatmap(mut_heatmap_df, linewidths=0.2, linecolor='black',
cmap='Blues_r', square=True, cbar=True)
plt.autoscale(enable=True, axis ='x', tight = True)
plt.autoscale(enable=True, axis ='y', tight = True)
plt.ylabel('Cancer Types', fontsize=16)
plt.xlabel('Pathway Genes', fontsize=16)
plt.savefig(os.path.join(results_path, 'cancer_type_mutation_heatmap.pdf'))
copy_df = pd.concat([copy_gain_sub_df, copy_loss_sub_df], axis=1)
copy_total_df = copy_df.assign(Total=copy_df.max(axis=1))
copy_disease_df = copy_total_df.merge(sample_freeze, left_index=True,
right_on='SAMPLE_BARCODE')
copy_heatmap_df = copy_disease_df.groupby('DISEASE').mean()
copy_avg = copy_disease_df.mean()
copy_avg.name = 'Total'
copy_heatmap_df = copy_heatmap_df.append(copy_avg)
sns.set_style("whitegrid")
plt.figure(figsize = (10,10),dpi= 300)
sns.heatmap(copy_heatmap_df, linewidths=0.2, linecolor='black',
cmap='Blues_r', square=True)
plt.ylabel('Cancer Types', fontsize=16)
plt.xlabel('Pathway Genes', fontsize=16)
plt.autoscale(enable=True, axis ='x', tight = True)
plt.autoscale(enable=True, axis ='y', tight = True)
plt.savefig(os.path.join(results_path, 'cancer_type_copy_number_heatmap.pdf'))
# Combined heatmap
comb_heat = mutation_sub_df + copy_df
comb_heat[comb_heat == 2] = 1 # Replace duplicates with just one
comb_heat_df = comb_heat.merge(sample_freeze, left_index=True, right_on='SAMPLE_BARCODE')
comb_heat_total_df = comb_heat_df.assign(Total=comb_heat_df.max(axis=1))
comb_heatmap_df = comb_heat_total_df.groupby('DISEASE').mean()
comb_avg = comb_heat_total_df.mean()
comb_avg.name = 'Total'
comb_heatmap_plot = comb_heatmap_df.append(comb_avg)
sns.set_style("whitegrid")
plt.figure(figsize = (10,10),dpi= 300)
sns.heatmap(comb_heatmap_plot, linewidths=0.2, linecolor='black',
cmap='Blues_r', square=True)
plt.ylabel('Cancer Types', fontsize=16)
plt.xlabel('Pathway Genes', fontsize=16)
plt.autoscale(enable=True, axis ='x', tight = True)
plt.autoscale(enable=True, axis ='y', tight = True)
plt.tight_layout()
plt.savefig(os.path.join(results_path, 'cancer_type_combined_total_heatmap.pdf'))
# ## Generating Pathway Mapper Text Files
summary_score_df = (
pd.DataFrame(
[mut_heatmap_df.loc['Total', :], copy_heatmap_df.loc['Total', :]]
)
.transpose()
)
summary_score_df.columns = ['mutation', 'copy_number']
summary_score_df = summary_score_df * 100
summary_score_df = summary_score_df.round(decimals = 1)
# Create negative percentages for tumor suppressors in the Pathway
tum_sup_mult = pd.Series([1] * n_OG + [-1] * n_TSG + [1])
tum_sup_mult.index = summary_score_df.index
summary_score_df = summary_score_df.mul(tum_sup_mult, axis=0)
pathway_mapper_file = os.path.join(results_path, 'tables',
'pathway_mapper_percentages.txt')
summary_score_df.to_csv(pathway_mapper_file, sep='\t')
# ## Output number of targene events per sample
decision_file = os.path.join(results_path, 'classifier_decisions.tsv')
decisions_df = | pd.read_table(decision_file) | pandas.read_table |
# -*- coding: utf-8 -*-
import pandas as pd
import six
from tigeropen.common.response import TigerResponse
from tigeropen.common.util.string_utils import get_string
COLUMNS = ['symbol', 'field', 'date', 'value']
class FinancialDailyResponse(TigerResponse):
def __init__(self):
super(FinancialDailyResponse, self).__init__()
self.financial_daily = None
self._is_success = None
def parse_response_content(self, response_content):
response = super(FinancialDailyResponse, self).parse_response_content(response_content)
if 'is_success' in response:
self._is_success = response['is_success']
if self.data and isinstance(self.data, list):
items = list()
for item in self.data:
item_values = dict()
for key, value in item.items():
if isinstance(value, six.string_types):
value = get_string(value)
item_values[key] = value
items.append(item_values)
self.financial_daily = | pd.DataFrame(items, columns=COLUMNS) | pandas.DataFrame |
"""Filter copy number segments."""
import functools
import logging
import numpy as np
import pandas as pd
import hashlib
from .descriptives import weighted_median
def require_column(*colnames):
"""Wrapper to coordinate the segment-filtering functions.
Verify that the given columns are in the CopyNumArray the wrapped function
takes. Also log the number of rows in the array before and after filtration.
"""
if len(colnames) == 1:
msg = "'{}' filter requires column '{}'"
else:
msg = "'{}' filter requires columns " + \
", ".join(["'{}'"] * len(colnames))
def wrap(func):
@functools.wraps(func)
def wrapped_f(segarr):
filtname = func.__name__
if any(c not in segarr for c in colnames):
raise ValueError(msg.format(filtname, *colnames))
result = func(segarr)
logging.info("Filtered by '%s' from %d to %d rows",
filtname, len(segarr), len(result))
return result
return wrapped_f
return wrap
def squash_by_groups(cnarr, levels, by_arm=False):
"""Reduce CopyNumArray rows to a single row within each given level."""
# Enumerate runs of identical values
change_levels = enumerate_changes(levels)
assert (change_levels.index == levels.index).all()
assert cnarr.data.index.is_unique
assert levels.index.is_unique
assert change_levels.index.is_unique
if by_arm:
# Enumerate chromosome arms
arm_levels = []
for i, (_chrom, cnarm) in enumerate(cnarr.by_arm()):
arm_levels.append(np.repeat(i, len(cnarm)))
change_levels += np.concatenate(arm_levels)
else:
# Enumerate chromosomes
chrom_names = cnarr['chromosome'].unique()
chrom_col = (cnarr['chromosome']
.replace(chrom_names, np.arange(len(chrom_names))))
change_levels += chrom_col
data = cnarr.data.assign(_group=change_levels)
groupkey = ['_group']
if 'cn1' in cnarr:
# Keep allele-specific CNAs separate
data['_g1'] = enumerate_changes(cnarr['cn1'])
data['_g2'] = enumerate_changes(cnarr['cn2'])
groupkey.extend(['_g1', '_g2'])
data = (data.groupby(groupkey, as_index=False, group_keys=False, sort=False)
.apply(squash_region)
.reset_index(drop=True))
return cnarr.as_dataframe(data)
def enumerate_changes(levels):
"""Assign a unique integer to each run of identical values.
Repeated but non-consecutive values will be assigned different integers.
"""
return levels.diff().fillna(0).abs().cumsum().astype(int)
def squash_region(cnarr):
"""Reduce a CopyNumArray to 1 row, keeping fields sensible.
Most fields added by the `segmetrics` command will be dropped.
"""
assert 'weight' in cnarr
out = {'chromosome': [cnarr['chromosome'].iat[0]],
'start': cnarr['start'].iat[0],
'end': cnarr['end'].iat[-1],
}
region_weight = cnarr['weight'].sum()
if region_weight > 0:
out['log2'] = np.average(cnarr['log2'], weights=cnarr['weight'])
else:
out['log2'] = np.mean(cnarr['log2'])
out['gene'] = ','.join(cnarr['gene'].drop_duplicates())
out['probes'] = cnarr['probes'].sum() if 'probes' in cnarr else len(cnarr)
out['weight'] = region_weight
if 'depth' in cnarr:
if region_weight > 0:
out['depth'] = np.average(cnarr['depth'], weights=cnarr['weight'])
else:
out['depth'] = np.mean(cnarr['depth'])
if 'baf' in cnarr:
if region_weight > 0:
out['baf'] = np.average(cnarr['baf'], weights=cnarr['weight'])
else:
out['baf'] = np.mean(cnarr['baf'])
if 'cn' in cnarr:
if region_weight > 0:
out['cn'] = weighted_median(cnarr['cn'], cnarr['weight'])
else:
out['cn'] = np.median(cnarr['cn'])
if 'cn1' in cnarr:
if region_weight > 0:
out['cn1'] = weighted_median(cnarr['cn1'], cnarr['weight'])
else:
out['cn1'] = np.median(cnarr['cn1'])
out['cn2'] = out['cn'] - out['cn1']
if 'p_bintest' in cnarr:
# Only relevant for single-bin segments, but this seems safe/conservative
out['p_bintest'] = cnarr['p_bintest'].max()
return | pd.DataFrame(out) | pandas.DataFrame |
import pytest
import pandas.util._test_decorators as td
import pandas as pd
import pandas._testing as tm
arrays = [pd.array([1, 2, 3, None], dtype=dtype) for dtype in tm.ALL_EA_INT_DTYPES]
arrays += [pd.array([0.1, 0.2, 0.3, None], dtype=dtype) for dtype in tm.FLOAT_EA_DTYPES]
arrays += [pd.array([True, False, True, None], dtype="boolean")]
@pytest.fixture(params=arrays, ids=[a.dtype.name for a in arrays])
def data(request):
return request.param
@ | td.skip_if_no("pyarrow", min_version="0.15.0") | pandas.util._test_decorators.skip_if_no |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 16:03:03 2019
@author: prasas
"""
import pandas as pd
import statistics as stats
import math as math
import xlwt
def loadData(filename):
# load dataset
rawdata = pd.read_csv(filename,header=None);
dataset = pd.DataFrame(rawdata)
rawY=dataset.iloc[:, 20]
X=dataset.iloc[:, 0:20]
#fix the class output Y
f = lambda i : 1 if i > 0 else 0;
Y = list(map(f, rawY));
#fix the features
for feature in X:
# print(x[feature]);
median = stats.median(X[feature]);
#print(median);
X[feature] = list(map(lambda a: 1 if a >= median else 0, X[feature]))
#print(X[0])
#print(Y)
return X,Y
def loadtestData(filename):
# load dataset
rawdata = pd.read_csv(filename,header=None);
dataset = | pd.DataFrame(rawdata) | pandas.DataFrame |
# What's Cooking? kaggle competition.
# https://www.kaggle.com/c/whats-cooking-kernels-only
#
# Neural network with dense layer using Keras
#
# Input: list of ingredients
# Output: cuisine
#
# Author
# https://www.kaggle.com/vpapenko
import pandas as pd
from keras.preprocessing.text import Tokenizer
from sklearn.preprocessing import LabelBinarizer
from keras.models import Sequential
from keras.layers import Dense
train = | pd.read_json('../input/train.json') | pandas.read_json |
'''
General utility scripts
'''
import os
import ndjson
import pandas as pd
def validate_input(texts) -> list:
'''
Make sure texts are in the right format
before training PMI-SVD embeddings.
If no exceptions are raised, returns a list in
the following format: output[document][word]
'''
# check if empty
if not texts:
raise ValueError('"texts" input is empty!')
# check for string input
elif isinstance(texts, str):
# if there is a single line, parse as one doc
if texts.count('/n') == 0:
output = [[word.lower() for word in texts.split()]]
# if multiple lines, each line is a new doc
else:
texts = texts.split('\n')
output = [[word for word in doc.split()]
for doc in texts]
# check for input list
elif isinstance(texts, list):
# if the list is nested
if all(isinstance(doc, list) for doc in texts):
# validate that all items of sublists are str
unnest = [word for doc in texts for word in doc]
if all(isinstance(item, str) for item in unnest):
output = texts
else:
raise ValueError(
"input: all items in texts[i][j] must be strings")
# if the list is not nested
elif all(isinstance(doc, str) for doc in texts):
output = [[word for word in doc.split()]
for doc in texts]
# if any texts[i] are other types throw error
else:
raise ValueError("input: incompatible data type in texts[i]")
# break when input is neither str or list
else:
raise ValueError('texts must be str or list')
return output
def load_data(ndjson_path):
'''
Read a preprocessed file & convert to ttx format.
'''
with open(ndjson_path, 'r') as f:
obj = ndjson.load(f)
obj_dfs = [pd.DataFrame(dat) for dat in obj]
return obj_dfs
def make_folders(out_dir):
'''
Create folders for saving many models
out_dir : str
path to export models to
'''
# create main folder
if not os.path.exists(out_dir):
os.mkdir(out_dir)
# get output paths
report_dir = os.path.join(out_dir, "report_lines", "")
model_dir = os.path.join(out_dir, "models", "")
plot_dir = os.path.join(out_dir, "plots", "")
doctop_dir = os.path.join(out_dir, "doctop_mats", "")
# create sub-folders
for folder in [report_dir, model_dir, plot_dir, doctop_dir]:
# check if dir already exists
if not os.path.exists(folder):
os.mkdir(folder)
return None
def export_serialized(df, column='text', path=None):
'''
Serialize column to a dictionary,
where keys are ID and values are col.
Parameters
----------
df : pd.DataFrame
dataframe to unpack
column : str (default: 'text')
name of df's column to export
path : str, optional
where to save the resulting .ndjson object
'''
# get ID column
df_id = (
df
.reset_index()
.rename(columns={'index': 'ID'})
)
# convert data to list of dicts
serial_output = []
for i, row in df_id.iterrows():
doc = {'ID': row['ID'], column: row[column]}
serial_output.append(doc)
# if path is specified, save & be silent
if path:
with open(path, 'w') as f:
ndjson.dump(serial_output, f)
return None
# if no path, return list of dicts
else:
return serial_output
def compile_report(report_dir):
'''
Join partial reports from LDA training into one DF.
Returns a DF sorted in descending order by avg topic coherence in that model.
Parameters
----------
report_dir : str
path to directory, where reports are saved.
Report are serialized tuples in .ndjson format.
See lda training scripts for details.
'''
# get a list of paths to import
report_paths = []
for file in os.listdir(report_dir):
if file.endswith(".ndjson"):
# tuple with whole path and file name
path_and_file = tuple([report_dir + file, file])
# append both
report_paths.append(path_and_file)
# iterate through paths, converting them into DF rows
dfs = | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/python
######-*- coding: utf-8 -*-
import os, datetime, requests
from bs4 import BeautifulSoup
import pandas as pd
url = 'https://docs.google.com/spreadsheets/d/e/2PACX-1vQuDj0R6K85sdtI8I-Tc7RCx8CnIxKUQue0TCUdrFOKDw9G3JRtGhl64laDd3apApEvIJTdPFJ9fEUL/pubhtml?gid=0&single=true'
work_path = '/path/to/working/dir'
def get_table():
req = requests.session()
response = req.get(url,headers={'Accept-Language': 'zh-TW'})
soup = BeautifulSoup(response.text, "lxml")
table = soup.find('table', {'class': 'waffle'})
trs = table.find_all('tr')[1:]
rows = list()
for tr in trs:
rows.append([td.text.replace('\n', '') for td in tr.find_all('td')])
columns = rows[0][:]
columns[0] = columns[0][4:]
columns[2:5] = [columns[0],columns[0],columns[0]]
rows = [r[1:] for r in rows]
df = pd.DataFrame(data=rows, columns=columns[1:])
return df
def biuld_nation():
df = get_table()
df_nation = df.drop(columns=columns[2])
df_nation.to_csv('nation.csv',index=False)
def biuld_database():
database = | pd.read_csv('nation.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed May 03 15:01:31 2017
@author: jdkern
"""
import pandas as pd
import numpy as np
#read generator parameters into DataFrame
df_gen = pd.read_excel('NEISO_data_file/generators.xlsx',header=0)
#read transmission path parameters into DataFrame
df_paths = pd.read_csv('NEISO_data_file/paths.csv',header=0)
#list zones
zones = ['CT', 'ME', 'NH', 'NEMA', 'RI', 'SEMA', 'VT', 'WCMA']
##time series of load for each zone
df_load_all = pd.read_csv('../Time_series_data/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load_all = df_load_all[zones]
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/NEISO_dispatchable_hydro.csv',header=0)
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_excel('NEISO_data_file/must_run.xlsx',header=0)
# must run generation
must_run_CT = []
must_run_ME = []
must_run_NEMA = []
must_run_NH = []
must_run_RI = []
must_run_SEMA = []
must_run_VT = []
must_run_WCMA = []
must_run_CT = np.ones((8760,1))*df_must.loc[0,'CT']
must_run_ME = np.ones((8760,1))*df_must.loc[0,'ME']
must_run_NEMA = np.ones((8760,1))*df_must.loc[0,'NEMA']
must_run_NH = np.ones((8760,1))*df_must.loc[0,'NH']
must_run_RI = np.ones((8760,1))*df_must.loc[0,'RI']
must_run_SEMA = np.ones((8760,1))*df_must.loc[0,'SEMA']
must_run_VT = np.ones((8760,1))*df_must.loc[0,'VT']
must_run_WCMA = np.ones((8760,1))*df_must.loc[0,'WCMA']
must_run = np.column_stack((must_run_CT,must_run_ME,must_run_NEMA,must_run_NH,must_run_RI,must_run_SEMA,must_run_VT,must_run_WCMA))
df_total_must_run = pd.DataFrame(must_run,columns=('CT','ME','NEMA','NH','RI','SEMA','VT','WCMA'))
df_total_must_run.to_csv('NEISO_data_file/must_run_hourly.csv')
#natural gas prices
df_ng_all = pd.read_excel('../Time_series_data/Gas_prices/NG.xlsx', header=0)
df_ng_all = df_ng_all[zones]
#oil prices
df_oil_all = pd.read_excel('../Time_series_data/Oil_prices/Oil_prices.xlsx', header=0)
df_oil_all = df_oil_all[zones]
# time series of offshore wind generation for each zone
df_offshore_wind_all = pd.read_excel('../Time_series_data/Synthetic_wind_power/offshore_wind_power_sim.xlsx',header=0)
# time series of solar generation
df_solar = pd.read_excel('NEISO_data_file/hourly_solar_gen.xlsx',header=0)
solar_caps = pd.read_excel('NEISO_data_file/solar_caps.xlsx',header=0)
# time series of onshore wind generation
df_onshore_wind = pd.read_excel('NEISO_data_file/hourly_onshore_wind_gen.xlsx',header=0)
onshore_wind_caps = pd.read_excel('NEISO_data_file/wind_onshore_caps.xlsx',header=0)
def setup(year, Hub_height, Offshore_capacity):
##time series of natural gas prices for each zone
df_ng = globals()['df_ng_all'].copy()
df_ng = df_ng.reset_index()
##time series of oil prices for each zone
df_oil = globals()['df_oil_all'].copy()
df_oil = df_oil.reset_index()
##time series of load for each zone
df_load = globals()['df_load_all'].loc[year*8760:year*8760+8759].copy()
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily time series of dispatchable imports by path
df_imports = | pd.read_csv('Path_setup/NEISO_dispatchable_imports.csv',header=0) | pandas.read_csv |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'),
pd.Timedelta('6 days')])
tm.assert_series_equal(pd.Timedelta('3 days') + s, exp)
tm.assert_series_equal(s + pd.Timedelta('3 days'), exp)
s = pd.Series(['x', np.nan, 'x'])
tm.assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax']))
tm.assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa']))
def test_frame_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = DataFrame(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([2, 3, 4], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1
tm.assert_frame_equal(res, exp)
res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype)
exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_frame_equal(res, exp)
res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan
tm.assert_frame_equal(res, exp)
df = pd.DataFrame(['x', np.nan, 'x'])
tm.assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax']))
tm.assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_series_equal(self.ts + self.ts, self.ts + df['A'],
check_names=False)
tm.assert_series_equal(self.ts ** self.ts, self.ts ** df['A'],
check_names=False)
tm.assert_series_equal(self.ts < self.ts, self.ts < df['A'],
check_names=False)
tm.assert_series_equal(self.ts / self.ts, self.ts / df['A'],
check_names=False)
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all='ignore'):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x),
1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_ne(self):
ts = | Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) | pandas.Series |
import datetime
import logging
import os
import queue
import threading
import typing
from functools import partial
from io import StringIO
import pandas as pd
import psycopg2
from dateutil import tz
from dateutil.relativedelta import relativedelta
from atpy.data.cache.lmdb_cache import write
from atpy.data.ts_util import slice_periods
class BarsFilter(typing.NamedTuple):
ticker: str
interval_len: int
interval_type: str
bgn_prd: datetime.datetime
create_bars = \
"""
-- Table: public.{0}
DROP TABLE IF EXISTS public.{0};
CREATE TABLE public.{0}
(
"timestamp" timestamp without time zone NOT NULL,
symbol character varying COLLATE pg_catalog."default" NOT NULL,
open real NOT NULL,
high real NOT NULL,
low real NOT NULL,
close real NOT NULL,
volume integer NOT NULL,
"interval" character varying COLLATE pg_catalog."default" NOT NULL
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
"""
bars_indices = \
"""
-- Index: {0}_timestamp_ind
-- DROP INDEX public.{0}_timestamp_ind;
CREATE INDEX {0}_timestamp_ind
ON public.{0} USING btree
("timestamp")
TABLESPACE pg_default;
ALTER TABLE public.{0}
CLUSTER ON {0}_timestamp_ind;
CLUSTER {0};
-- Index: {0}_symbol_ind
-- DROP INDEX public.{0}_symbol_ind;
CREATE INDEX {0}_symbol_ind
ON public.{0} USING btree
(symbol COLLATE pg_catalog."default")
TABLESPACE pg_default;
-- Index: interval_ind
-- DROP INDEX public.{0}_interval_ind;
CREATE INDEX {0}_interval_ind
ON public.{0} USING btree
("interval" COLLATE pg_catalog."default")
TABLESPACE pg_default;
"""
create_json_data = \
"""
-- Table: public.{0}
DROP TABLE IF EXISTS public.{0};
CREATE TABLE public.{0}
(
json_data jsonb NOT NULL
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
"""
def update_to_latest(url: str, bars_table: str, noncache_provider: typing.Callable, symbols: set = None, time_delta_back: relativedelta = relativedelta(years=5), skip_if_older_than: relativedelta = None, cluster: bool = False):
con = psycopg2.connect(url)
con.autocommit = True
cur = con.cursor()
cur.execute("SELECT to_regclass('public.{0}')".format(bars_table))
exists = [t for t in cur][0][0] is not None
if not exists:
cur.execute(create_bars.format(bars_table))
if exists:
logging.getLogger(__name__).info("Skim off the top...")
cur.execute("delete from {0} where (symbol, timestamp, interval) in (select symbol, max(timestamp) as timestamp, interval from {0} group by symbol, interval)".format(bars_table))
logging.getLogger(__name__).info("Ranges...")
ranges = pd.read_sql("select symbol, max(timestamp) as timestamp, interval from {0} group by symbol, interval".format(bars_table), con=con, index_col=['symbol'])
if not ranges.empty:
ranges['timestamp'] = ranges['timestamp'].dt.tz_localize('UTC')
new_symbols = set() if symbols is None else symbols
if skip_if_older_than is not None:
skip_if_older_than = datetime.datetime.utcnow().replace(tzinfo=tz.gettz('UTC')) - skip_if_older_than
filters = dict()
for row in ranges.iterrows():
interval_len, interval_type = int(row[1][1].split('_')[0]), row[1][1].split('_')[1]
if (row[0], interval_len, interval_type) in new_symbols:
new_symbols.remove((row[0], interval_len, interval_type))
bgn_prd = row[1][0].to_pydatetime()
if skip_if_older_than is None or bgn_prd > skip_if_older_than:
filters[BarsFilter(ticker=row[0], bgn_prd=bgn_prd, interval_len=interval_len, interval_type=interval_type)] = None
bgn_prd = datetime.datetime.combine(datetime.datetime.utcnow().date() - time_delta_back, datetime.datetime.min.time()).replace(tzinfo=tz.gettz('UTC'))
for (symbol, interval_len, interval_type) in new_symbols:
filters[BarsFilter(ticker=symbol, bgn_prd=bgn_prd, interval_len=interval_len, interval_type=interval_type)] = None
logging.getLogger(__name__).info("Updating " + str(len(filters)) + " total symbols and intervals; New symbols and intervals: " + str(len(new_symbols)))
q = queue.Queue(maxsize=100)
threading.Thread(target=partial(noncache_provider, filters=filters, q=q), daemon=True).start()
global_counter = {"counter": 0}
def worker():
con = psycopg2.connect(url)
con.autocommit = True
while True:
tupl = q.get()
if tupl is None:
q.put(None)
return
ft, df = filters[tupl[0]], tupl[1]
# Prepare data
for c in [c for c in df.columns if c not in ['symbol', 'open', 'high', 'low', 'close', 'volume']]:
del df[c]
df['interval'] = str(ft.interval_len) + '_' + ft.interval_type
if df.iloc[0].name == ft.bgn_prd:
df = df.iloc[1:]
try:
insert_df(con, bars_table, df)
except Exception as err:
logging.getLogger(__name__).error("Error saving " + ft.ticker)
logging.getLogger(__name__).exception(err)
global_counter['counter'] += 1
i = global_counter['counter']
if i > 0 and (i % 20 == 0 or i == len(filters)):
logging.getLogger(__name__).info("Cached " + str(i) + " queries")
threads = [threading.Thread(target=worker) for _ in range(2)]
for t in threads:
t.start()
for t in threads:
t.join()
logging.getLogger(__name__).info("Done inserting data")
if not exists:
logging.getLogger(__name__).info("Creating indices...")
cur.execute(bars_indices.format(bars_table))
elif cluster:
logging.getLogger(__name__).info("Cluster...")
cur.execute("CLUSTER {0}".format(bars_table))
def request_bars(conn, bars_table: str, interval_len: int, interval_type: str, symbol: typing.Union[list, str] = None, bgn_prd: datetime.datetime = None, end_prd: datetime.datetime = None, ascending=True, selection='*'):
"""
Request bar data
:param conn: connection
:param bars_table: table name
:param interval_len: interval len
:param interval_type: interval type
:param symbol: symbol or a list of symbols
:param bgn_prd: start period (including)
:param end_prd: end period (excluding)
:param ascending: asc/desc
:param selection: what to select
:return: dataframe
"""
where, params = __bars_query_where(interval_len=interval_len, interval_type=interval_type, symbol=symbol, bgn_prd=bgn_prd, end_prd=end_prd)
sort = 'ASC' if ascending else 'DESC'
df = pd.read_sql("SELECT " + selection + " FROM " + bars_table + where + " ORDER BY timestamp " + sort + ", symbol", con=conn, index_col=['timestamp', 'symbol'], params=params)
if not df.empty:
del df['interval']
df = df.tz_localize('UTC', level='timestamp', copy=False)
if isinstance(symbol, str):
df.reset_index(level='symbol', inplace=True, drop=True)
for c in [c for c in ['volume', 'total_volume', 'number_of_trades'] if c in df.columns]:
df[c] = df[c].astype('uint64')
return df
def request_symbol_counts(conn, bars_table: str, interval_len: int, interval_type: str, symbol: typing.Union[list, str] = None, bgn_prd: datetime.datetime = None, end_prd: datetime.datetime = None):
"""
Request number of bars for each symbol
:param conn: connection
:param bars_table: table name
:param interval_len: interval len
:param interval_type: interval type
:param symbol: symbol or a list of symbols
:param bgn_prd: start period (including)
:param end_prd: end period (excluding)
:return: series
"""
where, params = __bars_query_where(interval_len=interval_len, interval_type=interval_type, symbol=symbol, bgn_prd=bgn_prd, end_prd=end_prd)
result = pd.read_sql("SELECT symbol, count(*) as count FROM " + bars_table + where + " GROUP BY symbol ORDER BY symbol ASC", con=conn, index_col='symbol', params=params)
if not result.empty:
result['count'] = result['count'].astype('uint64')
return result['count']
def __bars_query_where(interval_len: int, interval_type: str, symbol: typing.Union[list, str] = None, bgn_prd: datetime.datetime = None, end_prd: datetime.datetime = None):
where = " WHERE 1=1"
params = list()
if isinstance(symbol, list):
where += " AND symbol IN (%s)" % ','.join(['%s'] * len(symbol))
params += symbol
elif isinstance(symbol, str):
where += " AND symbol = %s"
params.append(symbol)
if interval_len is not None and interval_type is not None:
where += " AND interval = %s"
params.append(str(interval_len) + '_' + interval_type)
if bgn_prd is not None:
where += " AND timestamp >= %s"
params.append(str(bgn_prd))
if end_prd is not None:
where += " AND timestamp <= %s"
params.append(str(end_prd))
return where, params
def insert_df(conn, table_name: str, df: pd.DataFrame):
"""
insert dataframe to the database
:param conn: db connection
:param table_name: table name
:param df: dataframe to insert
"""
# To CSV
output = StringIO()
df.to_csv(output, sep='\t', header=False)
output.seek(0)
# Insert data
cursor = conn.cursor()
if isinstance(df.index, pd.MultiIndex):
columns = list(df.index.names) + list(df.columns)
else:
columns = [df.index.name] + list(df.columns)
cursor.copy_from(output, table_name, sep='\t', null='', columns=columns)
conn.commit()
cursor.close()
def insert_df_json(conn, table_name: str, df: pd.DataFrame):
"""
insert dataframe in json table
:param conn: db connection
:param table_name: table name
:param df: list of adjustments of the type [(timestamp: datetime.date, symbol: str, typ: str, value), ...]
"""
insert_json(conn=conn, table_name=table_name, data=df.reset_index().to_json(orient='records', lines=True))
def insert_json(conn, table_name: str, data: str):
"""
insert json data
:param conn: db connection
:param table_name: table name
:param data: json string (or strings, separated by new line character)
"""
output = StringIO(data)
# Insert data
cursor = conn.cursor()
cursor.copy_from(output, table_name, null='', columns=['json_data'])
conn.commit()
cursor.close()
def request_adjustments(conn, table_name: str, symbol: typing.Union[list, str] = None, bgn_prd: datetime.datetime = None, end_prd: datetime.datetime = None, adj_type: str = None, provider: str = None):
"""
add a list of splits/dividends to the database
:param conn: db connection
:param table_name: db connection
:param symbol: symbol / list of symbols
:param bgn_prd: begin period
:param end_prd: end period
:param provider: data provider
:param adj_type: adjustment type (split/dividend)
"""
where = " WHERE 1=1"
params = list()
if isinstance(symbol, list):
where += " AND json_data ->> 'symbol' IN (%s)" % ','.join(['%s'] * len(symbol))
params += symbol
elif isinstance(symbol, str):
where += " AND json_data ->> 'symbol' = %s"
params.append(symbol)
if bgn_prd is not None:
where += " AND CAST(json_data ->> 'timestamp' AS BIGINT) >= %s"
params.append(int(bgn_prd.timestamp() * 1000))
if end_prd is not None:
where += " AND CAST(json_data ->> 'timestamp' AS BIGINT) <= %s"
params.append(int(end_prd.timestamp() * 1000))
if provider is not None:
where += " AND json_data ->> 'provider' = %s"
params.append(provider)
if adj_type is not None:
where += " AND json_data ->> 'type' = %s"
params.append(adj_type)
else:
where += " AND json_data ->> 'type' in ('split', 'dividend')"
cursor = conn.cursor()
cursor.execute("select * from {0} {1}".format(table_name, where), params)
records = cursor.fetchall()
if len(records) > 0:
df = | pd.DataFrame([x[0] for x in records]) | pandas.DataFrame |
import time
import numpy as np
import pandas as pd
from pyomo.environ import *
from sklearn.cluster import AgglomerativeClustering, KMeans
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import MinMaxScaler
def time_model_solve(inputs, renewable, load, weight=None):
# Benchmark method
horizon = len(load) # Time horizon
m = ConcreteModel() # Model preparation and initial parameters
m.eta = inputs["effi"]
m.rps = inputs["rps"]
m.horizon = horizon
m.H = RangeSet(0, m.horizon - 1)
m.renewable = renewable
renewable_type = len(renewable)
m.R = RangeSet(0, renewable_type - 1)
m.load = load
m.c_bat = inputs["c_bat"]
m.c_bat_power = inputs["c_bat_power"]
m.c_renewable = [inputs['c_wind'], inputs['c_pv']]
m.c_gen_inv = inputs["c_gen_inv"]
m.mdc = inputs["mdc"]
m.c_gen_a = inputs["c_gen_a"]
m.c_gen_b = inputs["c_gen_b"]
if weight is None:
weight = np.ones(horizon)
m.weight = weight
sum_load = np.sum(m.load[i]*m.weight[i] for i in m.H)
m.renewable_cap = Var(m.R, domain=NonNegativeReals)
m.max_energy = Var(domain=NonNegativeReals) # Battery energy capacity
m.max_power = Var(domain=NonNegativeReals) # Battery power capacity
m.pd = Var(m.H, domain=Reals) # Battery discharging power at time i
m.pc = Var(m.H, domain=Reals) # Battery charging power at time i
m.e = Var(m.H, domain=Reals) # Battery Energy Stored (SOC) at time i
m.curtail = Var(m.H, domain=Reals) # Curtailed renewable energy at time i
m.gen = Var(m.H, domain=Reals) # Thermal generator power output at time i
m.total_cost = Var(domain=Reals)
m.cost_sto_inv = Var(domain=Reals)
m.cost_pv_inv = Var(domain=Reals)
m.cost_gen_inv = Var(domain=Reals)
m.cost_var = Var(domain=Reals)
# Number of online thermal generator units at time i
m.n = Var(m.H, domain=NonNegativeIntegers)
# Number of starting-up thermal generator units at time i
m.n_start = Var(m.H, domain=NonNegativeIntegers)
# Number of shutting-down thermal generator units at time i
m.n_shut = Var(m.H, domain=NonNegativeIntegers)
# Number of thermal generator unit number
m.N = Var(domain=NonNegativeIntegers)
m.gen_cap = inputs["gen_cap"]
m.up_time = inputs["up_time"] # Minimum online time
m.down_time = inputs["down_time"] # Minimum offline time
# constraints set
function_i = 0
function_list = []
def fun(m, i):
return m.gen[i] + sum(m.renewable_cap[r] * renewable[r][i] for r in m.R) + m.pd[i] - m.pc[i] - m.curtail[i] - \
m.load[i] == 0
function_list.append(fun)
m.balance_cons = Constraint(m.H, rule=function_list[function_i]) # Load balance
function_i += 1
def fun(m, i):
if i == 0:
return m.e[i] - m.e[m.horizon - 1] + m.pd[m.horizon - 1] / m.eta - m.pc[m.horizon - 1] * m.eta == 0
else:
return m.e[i] - (m.e[i - 1] - m.pd[i - 1] / m.eta + m.pc[i - 1] * m.eta) == 0
function_list.append(fun)
m.soc3 = Constraint(m.H, rule=function_list[function_i]) # Storage constraints-storage change
function_i += 1
def fun(m, i):
return m.e[i] >= 0
function_list.append(fun)
m.soc1 = Constraint(m.H, rule=function_list[function_i]) # Storage constraints-nonnegative storage
function_i += 1
def fun(m, i):
return m.e[i] - m.max_energy <= 0
function_list.append(fun)
m.soc2 = Constraint(m.H, rule=function_list[function_i]) # Storage constraints-maximum storage
function_i += 1
def fun(m, i):
return m.pc[i] >= 0
function_list.append(fun)
m.var_b6 = Constraint(m.H, rule=function_list[function_i]) # Storage constraints-nonnegative charging
function_i += 1
def fun(m, i):
return m.pd[i] >= 0
function_list.append(fun)
m.var_b7 = Constraint(m.H, rule=function_list[function_i]) # Storage constraints-nonnegative discharging
function_i += 1
def fun(m, i):
return m.pc[i] <= m.max_power
function_list.append(fun)
m.var_b8 = Constraint(m.H, rule=function_list[function_i]) # Storage constraints-maximum charging power
function_i += 1
def fun(m, i):
return m.pd[i] <= m.max_power
function_list.append(fun)
m.var_b9 = Constraint(m.H, rule=function_list[function_i]) # Storage constraints-maximum discharging power
function_i += 1
def fun(m, i):
return m.curtail[i] >= 0
function_list.append(fun)
m.var_b3 = Constraint(m.H, rule=function_list[function_i]) #
function_i += 1
def fun(m, i):
return m.gen[i] - 0.1 * m.gen_cap * m.n[i] >= 0
function_list.append(fun)
m.var_gen1 = Constraint(m.H, rule=function_list[function_i])
# Thermal generator constraints-minimum generation percentage(0.1)
function_i += 1
def fun(m, i):
return m.gen[i] - m.n[i] * m.gen_cap <= 0
function_list.append(fun)
m.var_gen2 = Constraint(m.H, rule=function_list[function_i])
# Thermal generator constraints-maximum generation percentage(1.0)
function_i += 1
def fun(m, i):
return m.n[i] - m.N <= 0
function_list.append(fun)
m.var_gen3 = Constraint(m.H, rule=function_list[function_i]) # Thermal generator constraints-maximum units
function_i += 1
def fun(m, i):
return m.n[i] >= 0
function_list.append(fun)
m.var_gen4 = Constraint(m.H, rule=function_list[function_i]) # Thermal generator constraints-minimum units
function_i += 1
def fun(m, i):
if i == 0:
return m.n[i] - m.n[m.horizon - 1] - (m.n_start[m.horizon - 1] - m.n_shut[m.horizon - 1]) == 0
else:
return m.n[i] - m.n[i - 1] - (m.n_start[i - 1] - m.n_shut[i - 1]) == 0
function_list.append(fun)
m.var_gen5 = Constraint(m.H, rule=function_list[function_i]) # Thermal generator constraints-units change
function_i += 1
def fun(m, i):
return m.n_start[i] >= 0
function_list.append(fun)
m.var_gen6 = Constraint(m.H, rule=function_list[function_i]) # Thermal generator constraints-nonnegative start_up
function_i += 1
def fun(m, i):
if i >= m.up_time:
t_start_list = np.arange(i - m.up_time, i)
else:
t_start_list = np.arange(0, i)
return m.n[i] - sum(m.n_start[k] for k in t_start_list) >= 0
function_list.append(fun)
m.var_gen7 = Constraint(m.H, rule=function_list[function_i]) # Thermal generator constraints-up_time
function_i += 1
def fun(m, i):
if i >= m.down_time:
t_shut_list = np.arange(i - m.down_time, i)
else:
t_shut_list = np.arange(0, i)
return m.N - m.n[i] - sum(m.n_shut[k] for k in t_shut_list) >= 0
function_list.append(fun)
m.var_gen8 = Constraint(m.H, rule=function_list[function_i]) # Thermal generator constraints-down_time
function_i += 1
def fun(m, i):
return m.n_shut[i] >= 0
function_list.append(fun)
m.var_gen9 = Constraint(m.H, rule=function_list[function_i]) # Thermal generator constraints-nonnegative shut_down
function_i += 1
def fun(m):
return sum(m.gen[i]*m.weight[i] for i in m.H) / sum_load - (1 - m.rps) <= 0
function_list.append(fun)
m.rps_limit = Constraint(rule=function_list[function_i]) # Thermal generator constraints-total percentage
function_i += 1
def obj_value(m):
return m.total_cost
def obj_function(m):
return m.total_cost == m.cost_var + m.cost_sto_inv + m.cost_pv_inv + m.cost_gen_inv
m.OF = Constraint(rule=obj_function)
def cost_gen_cal(m):
return m.cost_var == sum(m.weight[i] * (m.pd[i] + m.pc[i]) for i in m.H) * m.mdc + \
sum(m.weight[i] * m.gen[i] for i in m.H) * m.c_gen_b # Weight of variable costs
m.rev = Constraint(rule=cost_gen_cal)
def cost_storage_cal(m):
return m.cost_sto_inv == m.max_energy * m.c_bat + m.max_power * m.c_bat_power
m.cost_bat = Constraint(rule=cost_storage_cal) # Storage cost
def cost_renewable_cal(m):
return m.cost_pv_inv == sum(m.renewable_cap[r] * m.c_renewable[r] for r in m.R)
m.cost_pv = Constraint(rule=cost_renewable_cal) # Renewable cost
def cost_gen_cal(m):
return m.cost_gen_inv == m.gen_cap * m.c_gen_inv * m.N
m.cost_gen = Constraint(rule=cost_gen_cal) # Thermal cost
m.OBJ = Objective(rule=obj_value, sense=minimize)
# Solver
if inputs["solver"] == 1:
opt = SolverFactory('gurobi', executable="/usr/local/gurobi/linux64/bin/gurobi.sh")
opt.options['timelimit'] = inputs["timelimit"]
if inputs["print_log"] ==1:
results = opt.solve(m,tee=True)
else:
results = opt.solve(m)
else:
opt = SolverFactory('cplex')
opt.options['timelimit'] = inputs["timelimit"]
if inputs["print_log"] ==1:
results = opt.solve(m,tee=True)
else:
results = opt.solve(m)
return(m)
def sim_features(config, wind_all, pv_all, load_all):
"""
Run the optimization model for each period and generate a DataFrame containing the simulated features
specified in settings from config.
"""
inputs = config['inputs']
settings = config['settings']
inputs['c_renewable'] = [inputs['c_wind'], inputs['c_pv']]
features = []
feature_set = settings['feature_set']
period = settings['period']
day_num = settings['day_num']
periods = settings['day_num']*24//period
time_set = np.arange(24*day_num) # Time horizon specified to hour
renewable = [wind_all[time_set, settings['profile_id']], pv_all[time_set, settings['profile_id']]] if 'profile_id' in settings\
else [wind_all[time_set], pv_all[time_set]] # for NE
nrenewable = len(renewable)
period_renewable = [renewable[r].reshape(
periods, period) for r in range(nrenewable)]
period_load = load_all[time_set].reshape(periods, period)
for w in range(periods):
renewable = [r[w] for r in period_renewable]
load = period_load[w]
m = time_model_solve(inputs, renewable, load)
results = {}
for v in feature_set:
var_object = getattr(m, v)
if var_object.is_indexed():
for t in range(len(var_object)):
results[v+'_'+str(t)] = var_object[t].value
else:
results[v] = var_object.value
features.append(results)
return pd.DataFrame(features)
def cluster(settings, data, cluster_log=False):
"""
Given a dictionary of settings and a dataframe of data points to cluster, return the weight of
each representative week and the representative renewable and load values.
If cluster_log=True, returns a dictionary mapping cluster labels to points in each cluster.
"""
method = settings['method']
# Used for kmeans random state
init = settings['init'] if 'init' in settings else None
connectivity = settings['connectivity'] if 'connectivity' in settings else False
chronology = settings['chronology'] if 'chronology' in settings else False
ncluster = settings['ncluster']
df = data.copy()
period = settings['period']
periods = settings['periods']
nrenewable = settings['nrenewable']
period_df = settings['period_df']
renewable_range = [np.arange(r*period, (r+1)*period)
for r in range(nrenewable)]
load_range = np.arange(nrenewable*period, (nrenewable+1)*period)
if method == 'kmeans':
kmeans = KMeans(n_clusters=ncluster, random_state=init)
kmeans.fit(df)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
df['cluster'] = labels
else:
if connectivity:
# generate connectivity matrix
connections = kneighbors_graph(df, 10, include_self=False)
else:
connections = None
agglomerative = AgglomerativeClustering(
n_clusters=ncluster, linkage=method, connectivity=connections)
agglomerative.fit(df)
labels = agglomerative.labels_
n_features = len(df.columns)
df['cluster'] = labels
lens = {}
centroids = {}
for w in range(periods):
label = df.loc[w, 'cluster']
centroids.setdefault(label, [0]*n_features)
centroids[label] += df.loc[w, df.columns != 'cluster']
lens.setdefault(label, 0)
lens[label] += 1
for k in centroids:
centroids[k] /= float(lens[k])
weights = np.bincount(labels) # per period
weight = np.repeat(weights, period)
clusters = {}
for k in range(ncluster):
clusters[k] = df.loc[df['cluster'] == k]
# assuming only for combined case
if 'centroid' in settings and settings['centroid'] and settings['trial'] == 'combined':
rep_renewable = [np.concatenate([centroids[k][r] for k in range(ncluster)]) for r in renewable_range]
rep_load = np.concatenate([centroids[k][load_range]
for k in range(ncluster)])*100
else:
# Find representative points
rep = [None]*ncluster
for k in range(ncluster):
dist = {}
for j in range(weights[k]):
dist[clusters[k].index[j]] = np.linalg.norm(
df.loc[clusters[k].index[j], df.columns != 'cluster']-centroids[k][:])
rep[k] = min(dist, key=lambda k: dist[k])
if chronology:
rep.sort()
#print('representative week indices:', rep)
renewable = [period_df.loc[rep, r] for r in renewable_range]
load = period_df.loc[rep, load_range]*100
rep_renewable = [np.concatenate(
[renewable[r].loc[j, :] for j in rep]) for r in range(nrenewable)]
rep_load = np.concatenate([load.loc[j, :] for j in rep])
if cluster_log:
return weight, rep_renewable, rep_load, clusters
return weight, rep_renewable, rep_load
def test_clustering(inputs, settings, expected, data):
"""
Cluster the data using the settings, run the optimization model with the representative scenario generated,
and calculate the relative error with respect to the benchmark (expected).
"""
feature_set = ['renewable_cap', 'N', 'max_energy', 'max_power', 'total_cost'] \
if inputs['gen_cap'] == 1 else [
'renewable_cap', 'max_energy', 'max_power', 'total_cost']
error_terms = ['renewable_cap_0', 'renewable_cap_1', 'N', 'max_energy', 'max_power', 'total_cost'] \
if inputs['gen_cap'] == 1 else [
'renewable_cap_0', 'renewable_cap_1', 'max_energy', 'max_power', 'total_cost']
weight, rep_renewable, rep_load = cluster(settings, data)
m = time_model_solve(inputs, rep_renewable, rep_load, weight)
opt_results = {}
errors = {}
# for v in m.component_objects(Var, active=True):
for v in feature_set:
var_object = getattr(m, str(v))
if var_object.is_indexed():
opt_results[str(v)] = []
for t in range(len(var_object)):
opt_results[v].append(var_object[t].value)
elif len(var_object) == 1:
opt_results[v] = var_object.value
for re in range(len(opt_results['renewable_cap'])):
opt_results['renewable_cap_{}'.format(re)] = opt_results['renewable_cap'][re]
for e in error_terms:
errors[e + '_err'] = abs(opt_results[e] - expected[e])/(expected[e]+0.0001)
results = {**opt_results, **errors, 'mae': sum(value for value in errors.values()) / len(errors)}
return results
def run_trials(config, wind_all, pv_all, load_all, expected, features):
"""
Export a dataframe containing the results of clustering with the specified settings.
"""
inputs = config['inputs']
settings = config['settings']
ranges = config['ranges']
day_num = settings['day_num']
time_set = np.arange(24*day_num)
if 'profile_id' in settings:
profile_id = settings['profile_id']
renewable = [wind_all[time_set, profile_id],pv_all[time_set, profile_id]]
else:
# Denote that first profile is used and no other profile exists.
profile_id = -1
renewable = [wind_all[time_set], pv_all[time_set]]
nrenewable = len(renewable)
settings['nrenewable'] = nrenewable
period = settings['period']
periods = settings['day_num']*24//period
settings['periods'] = periods
period_renewable = [renewable[r].reshape(
periods, period) for r in range(nrenewable)]
period_load = load_all[time_set].reshape(periods, period)
period_data = np.hstack(period_renewable+[period_load])
period_df = | pd.DataFrame(period_data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import h5py
from tqdm import tqdm
class PredExpr:
def __init__(self, fn):
self.fn = fn
with h5py.File(self.fn, 'r') as f:
tmp = f['samples'][:].astype(str)
self.samples = pd.DataFrame({
'eid': tmp,
'idx': [ i for i in range(len(tmp)) ]
})
tmp = f['genes'][:].astype(str)
self.genes = pd.DataFrame({
'gene': tmp,
'idx': [ i for i in range(len(tmp)) ]
})
@staticmethod
def _get_range(n, chunksize=500):
tmp = list(np.arange(0, n, chunksize))
if tmp[-1] != n:
tmp = list(tmp) + [ n ]
return tmp[:-1].copy(), tmp[1:].copy()
def mul_weights(self, df_weight, samples, max_n=None, chunksize=1000):
df_sample_sub = pd.merge(
self.samples,
pd.DataFrame({'eid': samples}),
on='eid'
)
if max_n is not None and max_n < df_sample_sub.shape[0]:
df_sample_sub = df_sample_sub.iloc[:max_n, :].reset_index(drop=True)
df_weight_sub = pd.merge(
self.genes[['gene']],
df_weight,
on='gene', how='left'
)
df_weight_sub.fillna(0, inplace=True)
weight_mat = df_weight_sub.drop(columns=['gene']).values
header = list(df_weight_sub.drop(columns=['gene']).columns)
sample_idx = df_sample_sub.idx.values
starts, ends = self._get_range(sample_idx.shape[0], chunksize=chunksize)
o = []
f = h5py.File(self.fn, 'r')
for s, e in tqdm(zip(starts, ends), total=len(starts)):
mat = f['pred_expr'][:, sample_idx[s:e]]
mat = mat.T @ weight_mat
o.append(mat)
f.close()
o = np.concatenate(o, axis=0)
o = pd.DataFrame(o, columns=header)
o = pd.concat([
pd.DataFrame({'eid': df_sample_sub.eid}),
o
], axis=1)
return o
def pxcan2weight(df_spxcan, pval_cutoffs, weight_col='effect_size'):
pp = np.sort(np.array(pval_cutoffs))
oo = None
cols = [ 'gene' ]
for p in pp[::-1]:
sub = df_spxcan[ df_spxcan.pvalue <= p ][['gene', weight_col]].copy()
if oo is None:
oo = sub
else:
oo = | pd.merge(oo, sub, on='gene', how='left') | pandas.merge |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not is_categorical_dtype(right)):
return assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical, i.e.
* left.index.names == right.index.names
* left.columns.names == right.columns.names
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas.util.testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
AssertionError: Attributes are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panel_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_frame_equal(litem, ritem,
check_less_precise=check_less_precise,
check_names=check_names)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left : Index, Series, DataFrame, ExtensionArray, or np.ndarray
right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
**kwargs
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == 'block':
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left_index, right_index)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.values, right.values,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(np.asarray(left.values),
np.asarray(right.values))
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
if check_fill_value:
assert_attr_equal('default_fill_value', left, right, obj=obj)
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(
series, right[col],
check_dtype=check_dtype,
check_kind=check_kind,
check_fill_value=check_fill_value,
consolidate_block_indices=consolidate_block_indices
)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in | lrange(k) | pandas.compat.lrange |
'''
Author: <NAME>
filename: DORIS_NGS.py
Description: NGS strand analaysis for the DORIS project
'''
###GLOBALS###
reverse_primer={
"type1":"CAGGTACGCAGTTAGCACTC" #constant across all file-sets
}
payload={ #dictionary used for payloads
"type1": "CGTACTGCTCGATGAGTACTCTGCTCGACGAGATGAGACGAGTCTCTCGTAGACGAGAGCAGACTCAGTCATCGCGCTAGAGAGCA", #conanical payload1 5-bases (used in file sets 1-3), G at end is from promoter
"type2": "ACTGCTCGATGAGTACTCTGCTCGACGAGATGAGACGAGTCTCTCGTAGACGAGAGCAGACTCAGTCATCGCGCTAGAGAGCATAGAGTCGTG", #conanical payload 3 bases (used in file sets 1-3)
"type3": "CGTACTGCTCGATGAGTACTCTGCTCGACGAGATGAGACGAGTCTCTCGTAGACGAGAGCA" #payload for file-set 4
}
payload_to_forward={#sequences between the payload and forward sequences, used for file-set 1
"type1":"GCGCGCTATAGTGAGTCGTATTANNNNN",
"type2":"GCGCGCTATAGTGAGTCGTATTA",
"type3":""#empty--> not used
}
forward_primer={#dictionary used for forward primers
"type1": "TCCGTAGTCATATTGCCACG", #conanical forward primer, set-1 files
"type2": "GCGCGCAAAAAAAAAAAAAA", #forward primer for set-2 files
"type3": "GCGCGCAGTCAGATCAGCTATTACTG", #forward primer for set-3 files
"type4": "GACTCAGTCATCGCGCTAG" #forward primer for set-4 files
}
'''
complete strand = reverse_primer+*5-barcode*+*payload*+*3-barcode*+payload_to_forward+forward_primer
--barcode will depend on what sequence occurs after the reverse_primer. Test if there is an indiviual base G: indicates 5 base barcode.
Or test for a short sequence after the primer: ACTGCT indicates a 3 base barcode.
--there should be two payload versions for each set corresponding to each barcode
'''
file_strand_archs={ #holds characteristics for each file studied
"dsLi": {
"reverse_primer":"type1",
"forward_primer":"type1",
"payload_1":"type1",
"payload_2":"type2",
"payload_to_forward_1":"type1",
"payload_to_forward_2":"type2",
"set": 1
},
"hyLi": {
"reverse_primer":"type1",
"forward_primer":"type1",
"payload_1":"type1",
"payload_2":"type2",
"payload_to_forward_1":"type1",
"payload_to_forward_2":"type2",
"set": 1
},
"Pool_Stock": {
"reverse_primer":"type1",
"forward_primer":"type1",
"payload_1":"type1",
"payload_2":"type2",
"payload_to_forward_1":"type1",
"payload_to_forward_2":"type2",
"set": 1
},
"dsLi_cDNA_Tailed": {
"reverse_primer":"type1",
"forward_primer":"type2",
"payload_1":"type1",
"payload_2":"type2",
"payload_to_forward_1":"type3",
"payload_to_forward_2":"type3",
"set": 2
},
"hyLi_cDNA_Tailed": {
"reverse_primer":"type1",
"forward_primer":"type2",
"payload_1":"type1",
"payload_2":"type2",
"payload_to_forward_1":"type3",
"payload_to_forward_2":"type3",
"set": 2
},
"hyLi_cDNA_GC": {
"reverse_primer":"type1",
"forward_primer":"type3",
"payload_1":"type1",
"payload_2":"type2",
"payload_to_forward_1":"type3",
"payload_to_forward_2":"type3",
"set": 3
},
"dsLi_cDNA_GC": {
"reverse_primer":"type1",
"forward_primer":"type3",
"payload_1":"type1",
"payload_2":"type2",
"payload_to_forward_1":"type3",
"payload_to_forward_2":"type3",
"set": 3
},
"hyLi_cDNA_26": {
"reverse_primer":"type1",
"forward_primer":"type4",
"payload_1":"type3",
"payload_2":"type3",
"payload_to_forward_1":"type3",
"payload_to_forward_2":"type3",
"set": 4
},
"dsLi_cDNA_26": {
"reverse_primer":"type1",
"forward_primer":"type4",
"payload_1":"type3",
"payload_2":"type3",
"payload_to_forward_1":"type3",
"payload_to_forward_2":"type3",
"set": 4
}
}
##############END GLOBALS#############################
reverse_table={'A':'T','T':'A','G':'C','C':'G','N':'N', '\x00':'N'}
def calculate_reverse_compliment(strand):
reverse=strand[::-1]
#print strand
return ''.join([reverse_table[nuc] for nuc in reverse])
def convertBaseHelper(base,dec,s):
bases = ['A', 'C', 'G', 'T']
m = int(dec % base)
q = int(dec / base)
#print(s)
s = s + bases[m]
#print(s)
if q > 0:
return convertBaseHelper(base,q,s)
else:
return s
def convertBase(base,dec,length):
bases=['A','C','G','T']
s = convertBaseHelper(base,dec,'')
s = s.ljust(length,bases[0])
return s
def get_experiment_base_name(experiment_file):
exper_base=experiment_file.split("_rep_")[0]
assert not exper_base == ""
if "rep" in exper_base:
exper_base=exper_base.split("-rep-")[0]
assert not exper_base == ""
return exper_base
barcode_to_index_dict={}
def init_barcode_to_index_dict():
#for however many barcodes we have (1088) generate the corresponding sequence
#mapping is simply a base 4 mapping
global barcode_to_index_dict
index_array=range(0,1088)
base_3_array=[]
base_5_array=[]
for i in range(0,64):
barcode_to_index_dict[convertBase(4,i,3)[::-1]]=i #reverse it so most sig character is from left to right
base_3_array.append(convertBase(4,i,3)[::-1])
for i in range(64,1088):
barcode_to_index_dict[convertBase(4,i-64,5)[::-1]]=i
base_5_array.append(convertBase(4,i-64,5)[::-1])
#print convertBase(4,i-64,5)[::-1]
assert len(barcode_to_index_dict)==1088
assert len(base_3_array)==64
assert len(base_5_array)==1024
return base_3_array+base_5_array
#mapping: barcode --> index
def barcode_to_index(barcode):
global barcode_to_index_dict
#print barcode_to_index_dict
return barcode_to_index_dict[barcode]
def analyze_strands(file_base_name,file_key_name,strand_array,data_dictionary):
global reverse_primer
global file_strand_archs
global payload
#initialize keys used for data collection, key to data dictionary should acount for file name
count_key=file_key_name+"_"+"count"
error_rate_key=fq_file+"_"+"error_rate"
for strand in strand_array:
if len(strand)<100:
continue #remove largely short strands
reverse_p=reverse_primer[file_strand_archs[file_base_name]["reverse_primer"]]
reverse_p_RC=calculate_reverse_compliment(reverse_p)
find_FW=strand.find(reverse_p)
find_RC=strand.find(reverse_p_RC)
_strand=""
#check if strand is forward or reverse, then find the start and endpoints for the reverse primer
if not find_FW == -1:
#found forward strand
start_point=find_FW
end_point=min(find_FW+len(reverse_p),len(strand))
_strand=strand
#print "forward way start {} end {}".format(start_point,end_point)
assert end_point>=0
assert start_point>=0
elif not find_RC == -1:
#found the reverse complement of a strand, calculate the reverse complement
_strand=calculate_reverse_compliment(strand)
end_point=min(len(strand)-find_RC,len(_strand)-1)
start_point=end_point-len(reverse_p)
#print "reverse way start {} end {}".format(start_point,end_point)
assert end_point>=0
assert start_point>=0
else:
continue #did not find either
assert not _strand == ""
#know the bound points, end_points are non-inclusive, process _strand now
if _strand[end_point]=='G':#First base after reverse primer should be 'G' for NNNNN barcoes
#calculate the edit distance between the payload of the strand with that of the expected strand
N_5_payload=payload[file_strand_archs[file_base_name]["payload_1"]]
N_5_start=min(end_point+6,len(_strand)-1)
N_5_end=min(end_point+6+len(N_5_payload),len(_strand))
lv_N_5_payload=lv.distance(N_5_payload,_strand[N_5_start:N_5_end])
payload_ld=lv.distance(payload[file_strand_archs[file_base_name]["payload_1"]],payload[file_strand_archs[file_base_name]["payload_2"]])
#compare edit_distance(ngs,N_5) and edit_distance(N_5,N3)
if lv_N_5_payload < payload_ld/2 or (payload_ld==0 and lv_N_5_payload<7):
#we have a NNNNN-5 base barcode strand
barcode_start=end_point+1
barcode_end=barcode_start+5
barcode=_strand[barcode_start:barcode_end]
assert len(barcode)==5
data_dictionary[count_key][barcode_to_index(barcode)]+=1 #count barcode occurance
#calculate error rates for the NNNNN barcodes
if lv_N_5_payload < payload_ld/2 or (payload_ld==0 and lv_N_5_payload<7):
edit_operations=lv.editops(N_5_payload,_strand[end_point+6:end_point+6+len(N_5_payload)])
for op in edit_operations:
if data_dictionary[error_rate_key]["NNNNN"][op[0]][op[1]]==-1:
data_dictionary[error_rate_key]["NNNNN"][op[0]][op[1]]=1
else:
data_dictionary[error_rate_key]["NNNNN"][op[0]][op[1]]+=1
data_dictionary[error_rate_key]["NNNNN"]["total"]+=1
else:
#assume we have a NNN-3 base barcode strand
#use part of promoter region to be reference closest to the 3-base barcode, common across all the files studied
reference="GCGCGC"
#print "NNN 3 barcode"
reference_start=_strand.find(reference)
if reference_start==-1: continue
#reference occurs after the barcode
#calculate edit distance for ngs strand
NNN_3_payload=payload[file_strand_archs[file_base_name]["payload_2"]]
lv_NNN_3_payload=lv.distance(NNN_3_payload,_strand[end_point:end_point+len(NNN_3_payload)])
pre_barcode_start=reference_start-4
#caclulate edit distance for payload_1 and payload 2
payload_ld=lv.distance(payload[file_strand_archs[file_base_name]["payload_1"]],payload[file_strand_archs[file_base_name]["payload_2"]])
#make sure edit distance is close enough to payload_2 to ensure high confidence in NNN strand
if _strand[pre_barcode_start]=='A' and lv_NNN_3_payload<payload_ld/2:
#high confidence we found a barcode
barcode_start=pre_barcode_start+1
barcode=_strand[barcode_start:reference_start]
assert len(barcode)==3
data_dictionary[count_key][barcode_to_index(barcode)]+=1
#calculate error rates for the NNN barcode
if lv_NNN_3_payload<payload_ld/2:
edit_operations=lv.editops(NNN_3_payload,_strand[end_point:end_point+len(NNN_3_payload)])
for op in edit_operations:
if data_dictionary[error_rate_key]["NNN"][op[0]][op[1]]==-1:
data_dictionary[error_rate_key]["NNN"][op[0]][op[1]]=1
else:
data_dictionary[error_rate_key]["NNN"][op[0]][op[1]]+=1
data_dictionary[error_rate_key]["NNN"]["total"]+=1
if __name__=="__main__":
import argparse
import os
import pandas as pd #going to dump things out using data frames
import pickle as pi
import numpy as np
import Levenshtein as lv
barcode_array=init_barcode_to_index_dict() #this array will be used as row labels in the data frame
parser = argparse.ArgumentParser(description="Analyze strands for Doris")
parser.add_argument('--range', dest="fq_range", action="store", default="1-10", help="Range for fastq files")
parser.add_argument('--fastq_directory',dest="fq_dir", action="store", default=None, help="Directory for stripped fastq files")
args = parser.parse_args()
lower,upper=args.fq_range.split("-")
lower_int = int(lower)
upper_int = int(upper)
sample_files=[]
dir_sorted=os.listdir(args.fq_dir)
dir_sorted.sort()
data_dictionary={}
#get the working set of sample files based on fq_range
for _file in dir_sorted[lower_int:upper_int+1]:
if os.path.isfile(args.fq_dir+'/'+_file):
sample_files.append(_file)
#create output directory for DORIS Data
if not os.path.exists("DORIS_DATA"):
os.mkdir("DORIS_DATA")
for fq_file in sample_files:
print (fq_file)
file_path=args.fq_dir+'/'+fq_file
file_base_name=get_experiment_base_name(_file)
sequence_strands=[line.rstrip('\n') for line in open(file_path)]
#generate result dictionary keys, count_key --> barcode counting, total_reads --> sum of all barcodes, error_rate --> positional error rate within the strand
count_key=fq_file+"_"+"count"
total_reads_key=fq_file+"_"+"total_reads"
error_rate_key=fq_file+"_"+"error_rate"
if count_key not in data_dictionary:
#initialize an array of counters for counting each barcode
data_dictionary[count_key]=[0]*1088
if total_reads_key not in data_dictionary:
data_dictionary[total_reads_key]=['--']*1088
data_dictionary[total_reads_key][0]=len(sequence_strands)
if error_rate_key not in data_dictionary:
data_dictionary[error_rate_key]={}
data_dictionary[error_rate_key]["NNN"]={}
data_dictionary[error_rate_key]["NNNNN"]={}
data_dictionary[error_rate_key]["NNN"]["replace"]=[-1]*200
data_dictionary[error_rate_key]["NNN"]["delete"]=[-1]*200
data_dictionary[error_rate_key]["NNN"]["insert"]=[-1]*200
data_dictionary[error_rate_key]["NNN"]["total"]=0
data_dictionary[error_rate_key]["NNNNN"]["replace"]=[-1]*200
data_dictionary[error_rate_key]["NNNNN"]["delete"]=[-1]*200
data_dictionary[error_rate_key]["NNNNN"]["insert"]=[-1]*200
data_dictionary[error_rate_key]["NNNNN"]["total"]=0
analyze_strands(file_base_name,fq_file,sequence_strands,data_dictionary)
#make a data frame using the collected data
count_dump_path="DORIS_DATA/"+get_experiment_base_name(sample_files[0])+'_count.csv'
error_rate_dump_path="DORIS_DATA/"+get_experiment_base_name(sample_files[0])+'_error_rate.csv'
count_file=open(count_dump_path,'w+')
error_rate_file=open(error_rate_dump_path,'w+')
#arrange the error rate and conut results into individual directories to dump results as dataframes
count_dict={}
error_rate_dict={}
for key in sorted(data_dictionary.keys()):
if "count" in key or "total_reads" in key:
count_dict[key]=data_dictionary[key]
elif "error_rate" in key:
for strand_type in data_dictionary[key]:
for error_type in data_dictionary[key][strand_type]:
if "total" in error_type: continue
error_rate_dict_key=key.split('error_rate')[0]+error_type+"_"+strand_type
error_rate_dict[error_rate_dict_key]=data_dictionary[key][strand_type][error_type]
for count_index, count in enumerate(error_rate_dict[error_rate_dict_key]):
if count==-1:
error_rate_dict[error_rate_dict_key][count_index]=0
else:
error_rate_dict[error_rate_dict_key][count_index]=float(count)/float(data_dictionary[key][strand_type]["total"])
error_rate_frame=pd.DataFrame(error_rate_dict)
count_frame= | pd.DataFrame(count_dict,index=barcode_array) | pandas.DataFrame |
"""Step 2: Solving the problem under uncertainty."""
import cvxpy as cp
import fledge
import cobmo
import numpy as np
import os
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import shutil
import time
import tslearn.utils
import tslearn.clustering
def main(
scenario_in_sample_number=None,
scenarios_probability_weighted=None
):
# Settings.
scenario_name = 'course_project_step_2'
results_path = os.path.join(os.path.dirname(os.path.dirname(os.path.normpath(__file__))), 'results', 'step_2')
# Note that the same number of in-sample scenarios may yield different results, due to the clustering algorithm.
scenario_in_sample_number = 30 if scenario_in_sample_number is None else scenario_in_sample_number
scenarios_probability_weighted = True if scenarios_probability_weighted is None else scenarios_probability_weighted
results_path += f'_{scenario_in_sample_number}_weighted{scenarios_probability_weighted}'
# Clear / instantiate results directory.
try:
if os.path.isdir(results_path):
shutil.rmtree(results_path)
os.mkdir(results_path)
except PermissionError:
pass
# STEP 2.0: SETUP MODELS.
# Obtain data & models.
# Flexible DERs.
der_model_set = fledge.der_models.DERModelSet(scenario_name)
# Thermal grid.
thermal_grid_model = fledge.thermal_grid_models.ThermalGridModel(scenario_name)
thermal_grid_model.cooling_plant_efficiency = 10.0 # Change model parameter to incentivize use of thermal grid.
thermal_power_flow_solution_reference = fledge.thermal_grid_models.ThermalPowerFlowSolution(thermal_grid_model)
linear_thermal_grid_model = (
fledge.thermal_grid_models.LinearThermalGridModel(thermal_grid_model, thermal_power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_head_vector_minimum = 1.5 * thermal_power_flow_solution_reference.node_head_vector
branch_flow_vector_maximum = 10.0 * thermal_power_flow_solution_reference.branch_flow_vector
# Electric grid.
electric_grid_model = fledge.electric_grid_models.ElectricGridModelDefault(scenario_name)
power_flow_solution_reference = fledge.electric_grid_models.PowerFlowSolutionFixedPoint(electric_grid_model)
linear_electric_grid_model = (
fledge.electric_grid_models.LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution_reference)
)
# Define arbitrary operation limits.
node_voltage_magnitude_vector_minimum = 0.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
node_voltage_magnitude_vector_maximum = 1.5 * np.abs(electric_grid_model.node_voltage_vector_reference)
branch_power_magnitude_vector_maximum = 10.0 * electric_grid_model.branch_power_vector_magnitude_reference
# Energy price.
price_data_day_ahead = fledge.data_interface.PriceData(scenario_name)
price_data_real_time = fledge.data_interface.PriceData(scenario_name)
# Obtain time step index shorthands.
scenario_data = fledge.data_interface.ScenarioData(scenario_name)
timesteps = scenario_data.timesteps
timestep_interval_hours = (timesteps[1] - timesteps[0]) / pd.Timedelta('1h')
# Invert sign of losses.
# - Power values of loads are negative by convention. Hence, sign of losses should be negative for power balance.
# Thermal grid.
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= -1.0
linear_thermal_grid_model.thermal_power_flow_solution.pump_power *= -1.0
# Electric grid.
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= -1.0
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= -1.0
linear_electric_grid_model.power_flow_solution.loss *= -1.0
# Apply base power / voltage scaling.
# - Scale values to avoid numerical issues.
base_power = 1e6 # in MW.
base_voltage = 1e3 # in kV.
# Flexible DERs.
for der_model in der_model_set.flexible_der_models.values():
der_model.mapping_active_power_by_output *= 1 / base_power
der_model.mapping_reactive_power_by_output *= 1 / base_power
der_model.mapping_thermal_power_by_output *= 1 / base_power
# Thermal grid.
linear_thermal_grid_model.sensitivity_node_head_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power *= base_power
linear_thermal_grid_model.sensitivity_pump_power_by_der_power *= 1
# Electric grid.
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active *= base_power / base_voltage
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive *= base_power / base_voltage
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active *= 1
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive *= 1
linear_electric_grid_model.power_flow_solution.der_power_vector *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_1 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.branch_power_vector_2 *= 1 / base_power
linear_electric_grid_model.power_flow_solution.loss *= 1 / base_power
linear_electric_grid_model.power_flow_solution.node_voltage_vector *= 1 / base_voltage
# Limits
node_voltage_magnitude_vector_minimum /= base_voltage
node_voltage_magnitude_vector_maximum /= base_voltage
branch_power_magnitude_vector_maximum /= base_power
# Energy price.
# - Conversion of price values from S$/kWh to S$/p.u. for convenience. Currency S$ is SGD.
# - Power values of loads are negative by convention. Hence, sign of price values is inverted here.
price_data_day_ahead.price_timeseries *= -1.0 * base_power / 1e3 * timestep_interval_hours
price_data_real_time.price_timeseries *= -2.0 * base_power / 1e3 * timestep_interval_hours
# STEP 2.1: GENERATE SCENARIOS.
# Load irradiation data from CoBMo.
irradiation_timeseries = (
pd.read_sql(
"SELECT time, irradiation_horizontal FROM weather_timeseries WHERE weather_type = 'singapore_iwec'",
con=cobmo.data_interface.connect_database(),
parse_dates=['time'],
index_col='time'
)
)
# Resample / down-sample if needed.
irradiation_timeseries = (
irradiation_timeseries.resample(
pd.Timedelta(f'{timestep_interval_hours}h'),
label='left' # Using zero-order hold in the simulation.
).mean()
)
# Interpolate / up-sample if needed.
irradiation_timeseries = (
irradiation_timeseries.reindex(
pd.date_range(
irradiation_timeseries.index[0],
irradiation_timeseries.index[-1],
freq=pd.Timedelta(f'{timestep_interval_hours}h')
)
).interpolate(method='linear')
)
# Drop last time step (first hour of next year).
irradiation_timeseries = irradiation_timeseries.iloc[0:-1, :]
# Normalize.
irradiation_timeseries /= irradiation_timeseries.max().max()
# Obtain out-of-sample scenarios.
# - Pivot irradiation timeseries into table with column for each day of the year.
irradiation_timeseries.loc[:, 'dayofyear'] = irradiation_timeseries.index.dayofyear
irradiation_timeseries.loc[:, 'time_string'] = irradiation_timeseries.index.strftime('%H:%M')
irradiation_out_of_sample = (
irradiation_timeseries.pivot_table(
index='time_string',
columns='dayofyear',
values='irradiation_horizontal',
aggfunc=np.nanmean,
fill_value=0.0
)
)
# Append time step to match length of scenario time horizon.
irradiation_out_of_sample.loc['24:00', :] = 0.0
# Obtain scenario index short-hand.
out_of_sample_scenarios = irradiation_out_of_sample.columns
# Obtain in-sample scenarios.
# - Select representative scenarios by time series clustering.
clustering = tslearn.clustering.TimeSeriesKMeans(n_clusters=scenario_in_sample_number)
clustering = clustering.fit((tslearn.utils.to_time_series_dataset(irradiation_out_of_sample.transpose())))
irradiation_in_sample_mapping = (
pd.Index(
clustering.predict(tslearn.utils.to_time_series_dataset(irradiation_out_of_sample.transpose()))
)
)
irradiation_in_sample = (
pd.DataFrame(
clustering.cluster_centers_[:, :, 0].transpose(),
index=irradiation_out_of_sample.index,
columns=range(clustering.cluster_centers_.shape[0])
)
)
# Obtain scenario index short-hand.
in_sample_scenarios = irradiation_in_sample.columns
# STEP 2.2: SOLVE STOCHASTIC PROBLEM.
# Instantiate problem.
# - Utility object for optimization problem definition with CVXPY.
in_sample_problem = fledge.utils.OptimizationProblem()
# Define variables.
# - Scenario dimension is added by using dicts.
in_sample_problem.state_vector = dict.fromkeys(in_sample_scenarios)
in_sample_problem.control_vector = dict.fromkeys(in_sample_scenarios)
in_sample_problem.output_vector = dict.fromkeys(in_sample_scenarios)
in_sample_problem.der_thermal_power_vector = dict.fromkeys(in_sample_scenarios)
in_sample_problem.der_active_power_vector = dict.fromkeys(in_sample_scenarios)
in_sample_problem.der_reactive_power_vector = dict.fromkeys(in_sample_scenarios)
in_sample_problem.source_thermal_power_real_time = dict.fromkeys(in_sample_scenarios)
in_sample_problem.source_active_power_real_time = dict.fromkeys(in_sample_scenarios)
for scenario in in_sample_scenarios:
# Flexible DERs: State space vectors.
# - CVXPY only allows for 2-dimensional variables. Using dicts below to represent 3rd dimension.
in_sample_problem.state_vector[scenario] = dict.fromkeys(der_model_set.flexible_der_names)
in_sample_problem.control_vector[scenario] = dict.fromkeys(der_model_set.flexible_der_names)
in_sample_problem.output_vector[scenario] = dict.fromkeys(der_model_set.flexible_der_names)
for der_name in der_model_set.flexible_der_names:
in_sample_problem.state_vector[scenario][der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].states)
))
)
in_sample_problem.control_vector[scenario][der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].controls)
))
)
in_sample_problem.output_vector[scenario][der_name] = (
cp.Variable((
len(der_model_set.flexible_der_models[der_name].timesteps),
len(der_model_set.flexible_der_models[der_name].outputs)
))
)
# Flexible DERs: Power vectors.
in_sample_problem.der_thermal_power_vector[scenario] = (
cp.Variable((len(timesteps), len(thermal_grid_model.ders)))
)
in_sample_problem.der_active_power_vector[scenario] = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
in_sample_problem.der_reactive_power_vector[scenario] = (
cp.Variable((len(timesteps), len(electric_grid_model.ders)))
)
# Source variables: Real time.
in_sample_problem.source_thermal_power_real_time[scenario] = cp.Variable((len(timesteps), 1), nonpos=True)
in_sample_problem.source_active_power_real_time[scenario] = cp.Variable((len(timesteps), 1), nonpos=True)
# Source variables: Day ahead.
# in_sample_problem.source_thermal_power_day_ahead = cp.Variable((len(timesteps), 1), nonpos=True)
in_sample_problem.source_active_power_day_ahead = cp.Variable((len(timesteps), 1), nonpos=True)
# Define constraints.
for scenario in in_sample_scenarios:
# Flexible DERs.
for der_model in der_model_set.flexible_der_models.values():
# Initial state.
in_sample_problem.constraints.append(
in_sample_problem.state_vector[scenario][der_model.der_name][0, :]
==
der_model.state_vector_initial.values
)
# State equation.
in_sample_problem.constraints.append(
in_sample_problem.state_vector[scenario][der_model.der_name][1:, :]
==
cp.transpose(
der_model.state_matrix.values
@ cp.transpose(in_sample_problem.state_vector[scenario][der_model.der_name][:-1, :])
+ der_model.control_matrix.values
@ cp.transpose(in_sample_problem.control_vector[scenario][der_model.der_name][:-1, :])
+ der_model.disturbance_matrix.values
@ np.transpose(der_model.disturbance_timeseries.iloc[:-1, :].values)
)
)
# Output equation.
in_sample_problem.constraints.append(
in_sample_problem.output_vector[scenario][der_model.der_name]
==
cp.transpose(
der_model.state_output_matrix.values
@ cp.transpose(in_sample_problem.state_vector[scenario][der_model.der_name])
+ der_model.control_output_matrix.values
@ cp.transpose(in_sample_problem.control_vector[scenario][der_model.der_name])
+ der_model.disturbance_output_matrix.values
@ np.transpose(der_model.disturbance_timeseries.values)
)
)
# Output limits.
in_sample_problem.constraints.append(
in_sample_problem.output_vector[scenario][der_model.der_name]
>=
der_model.output_minimum_timeseries.values
)
# For PV power plant, adjust maximum generation limit according to scenario.
if der_model.der_type == 'flexible_generator':
output_maximum_timeseries = (
pd.concat([
der_model.active_power_nominal * irradiation_in_sample.loc[:, scenario].rename('active_power'),
der_model.reactive_power_nominal * irradiation_in_sample.loc[:, scenario].rename('reactive_power')
], axis='columns')
)
in_sample_problem.constraints.append(
in_sample_problem.output_vector[scenario][der_model.der_name]
<=
output_maximum_timeseries.replace(np.inf, 1e3).values
)
else:
in_sample_problem.constraints.append(
in_sample_problem.output_vector[scenario][der_model.der_name]
<=
der_model.output_maximum_timeseries.replace(np.inf, 1e3).values
)
# Power mapping.
der_index = int(fledge.utils.get_index(electric_grid_model.ders, der_name=der_model.der_name))
in_sample_problem.constraints.append(
in_sample_problem.der_active_power_vector[scenario][:, [der_index]]
==
cp.transpose(
der_model.mapping_active_power_by_output.values
@ cp.transpose(in_sample_problem.output_vector[scenario][der_model.der_name])
)
)
in_sample_problem.constraints.append(
in_sample_problem.der_reactive_power_vector[scenario][:, [der_index]]
==
cp.transpose(
der_model.mapping_reactive_power_by_output.values
@ cp.transpose(in_sample_problem.output_vector[scenario][der_model.der_name])
)
)
# - Thermal grid power mapping only for DERs which are connected to the thermal grid.
if der_model.der_name in thermal_grid_model.ders.get_level_values('der_name'):
der_index = int(fledge.utils.get_index(thermal_grid_model.ders, der_name=der_model.der_name))
in_sample_problem.constraints.append(
in_sample_problem.der_thermal_power_vector[scenario][:, [der_index]]
==
cp.transpose(
der_model.mapping_thermal_power_by_output.values
@ cp.transpose(in_sample_problem.output_vector[scenario][der_model.der_name])
)
)
# Thermal grid.
# Node head limit.
in_sample_problem.constraints.append(
np.array([node_head_vector_minimum.ravel()])
<=
cp.transpose(
linear_thermal_grid_model.sensitivity_node_head_by_der_power
@ cp.transpose(in_sample_problem.der_thermal_power_vector[scenario])
)
)
# Branch flow limit.
in_sample_problem.constraints.append(
cp.transpose(
linear_thermal_grid_model.sensitivity_branch_flow_by_der_power
@ cp.transpose(in_sample_problem.der_thermal_power_vector[scenario])
)
<=
np.array([branch_flow_vector_maximum.ravel()])
)
# Power balance.
in_sample_problem.constraints.append(
thermal_grid_model.cooling_plant_efficiency ** -1
* (
# in_sample_problem.source_thermal_power_day_ahead
in_sample_problem.source_thermal_power_real_time[scenario]
+ cp.sum(-1.0 * (
in_sample_problem.der_thermal_power_vector[scenario]
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
)
==
cp.transpose(
linear_thermal_grid_model.sensitivity_pump_power_by_der_power
@ cp.transpose(in_sample_problem.der_thermal_power_vector[scenario])
)
)
# Electric grid.
# Voltage limits.
in_sample_problem.constraints.append(
np.array([node_voltage_magnitude_vector_minimum.ravel()])
<=
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
in_sample_problem.der_active_power_vector[scenario]
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
in_sample_problem.der_reactive_power_vector[scenario]
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
in_sample_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ cp.transpose(
in_sample_problem.der_active_power_vector[scenario]
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ cp.transpose(
in_sample_problem.der_reactive_power_vector[scenario]
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([node_voltage_magnitude_vector_maximum.ravel()])
)
# Branch flow limits.
in_sample_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ cp.transpose(
in_sample_problem.der_active_power_vector[scenario]
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ cp.transpose(
in_sample_problem.der_reactive_power_vector[scenario]
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
in_sample_problem.constraints.append(
np.array([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2.ravel())])
+ cp.transpose(
linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ cp.transpose(
in_sample_problem.der_active_power_vector[scenario]
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ cp.transpose(
in_sample_problem.der_reactive_power_vector[scenario]
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
<=
np.array([branch_power_magnitude_vector_maximum.ravel()])
)
# Power balance.
in_sample_problem.constraints.append(
in_sample_problem.source_active_power_day_ahead
+ in_sample_problem.source_active_power_real_time[scenario]
+ cp.sum(-1.0 * (
in_sample_problem.der_active_power_vector[scenario]
), axis=1, keepdims=True) # Sum along DERs, i.e. sum for each timestep.
- (
in_sample_problem.source_thermal_power_real_time[scenario]
* thermal_grid_model.cooling_plant_efficiency ** -1
)
==
np.real(linear_electric_grid_model.power_flow_solution.loss)
+ cp.transpose(
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ cp.transpose(
in_sample_problem.der_active_power_vector[scenario]
- np.array([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
+ linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ cp.transpose(
in_sample_problem.der_reactive_power_vector[scenario]
- np.array([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector.ravel())])
)
)
)
# Define objective.
# Define variables for the objective components for convenience.
in_sample_problem.objective_day_ahead = cp.Variable((1,))
in_sample_problem.objective_real_time = cp.Variable((len(in_sample_scenarios,)))
# Day-ahead.
in_sample_problem.constraints.append(
in_sample_problem.objective_day_ahead
==
# (
# price_data_day_ahead.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
# @ in_sample_problem.source_thermal_power_day_ahead
# * thermal_grid_model.cooling_plant_efficiency ** -1
# )
(
price_data_day_ahead.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ in_sample_problem.source_active_power_day_ahead
)
)
in_sample_problem.objective += cp.sum(in_sample_problem.objective_day_ahead)
# Real-time.
for scenario_index, scenario in enumerate(in_sample_scenarios):
in_sample_problem.constraints.append(
in_sample_problem.objective_real_time[scenario_index]
==
# (
# price_data_real_time.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
# @ in_sample_problem.source_thermal_power_real_time[scenario]
# * thermal_grid_model.cooling_plant_efficiency ** -1
# )
(
price_data_real_time.price_timeseries.loc[:, ('active_power', 'source', 'source')].values.T
@ in_sample_problem.source_active_power_real_time[scenario]
)
)
if scenarios_probability_weighted:
in_sample_problem.objective += (
(np.sum(irradiation_in_sample_mapping == scenario) / len(irradiation_in_sample_mapping))
* cp.sum(in_sample_problem.objective_real_time[scenario_index])
)
else:
in_sample_problem.objective += (
len(in_sample_scenarios) ** -1 # Assuming equal probability.
* cp.sum(in_sample_problem.objective_real_time[scenario_index])
)
# Solve problem.
in_sample_time = -1.0 * time.time()
in_sample_problem.solve()
in_sample_time += time.time()
# Obtain results.
in_sample_objective_day_ahead = (
pd.Series(in_sample_problem.objective_day_ahead.value, index=['total'])
)
in_sample_objective_real_time = (
pd.Series(in_sample_problem.objective_real_time.value, index=in_sample_scenarios)
)
# in_sample_source_thermal_power_day_ahead = (
# pd.DataFrame(in_sample_problem.source_thermal_power_day_ahead.value, index=timesteps, columns=['total'])
# )
in_sample_source_active_power_day_ahead = (
pd.DataFrame(in_sample_problem.source_active_power_day_ahead.value, index=timesteps, columns=['total'])
)
in_sample_source_thermal_power_real_time = pd.DataFrame(0.0, index=timesteps, columns=in_sample_scenarios)
in_sample_source_active_power_real_time = | pd.DataFrame(0.0, index=timesteps, columns=in_sample_scenarios) | pandas.DataFrame |
import math
from datetime import timedelta, datetime
from enum import Enum
from typing import Optional
import numpy as np
import pandas as pd
from feast import FeatureView, Feature, ValueType, FeatureStore
from feast.data_source import DataSource
from pytz import FixedOffset, timezone, utc
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL = "event_timestamp"
class EventTimestampType(Enum):
TZ_NAIVE = 0
TZ_AWARE_UTC = 1
TZ_AWARE_FIXED_OFFSET = 2
TZ_AWARE_US_PACIFIC = 3
def _convert_event_timestamp(event_timestamp: pd.Timestamp, t: EventTimestampType):
if t == EventTimestampType.TZ_NAIVE:
return event_timestamp
elif t == EventTimestampType.TZ_AWARE_UTC:
return event_timestamp.replace(tzinfo=utc)
elif t == EventTimestampType.TZ_AWARE_FIXED_OFFSET:
return event_timestamp.replace(tzinfo=utc).astimezone(FixedOffset(60))
elif t == EventTimestampType.TZ_AWARE_US_PACIFIC:
return event_timestamp.replace(tzinfo=utc).astimezone(timezone("US/Pacific"))
def create_orders_df(
customers,
drivers,
start_date,
end_date,
order_count,
infer_event_timestamp_col=False,
) -> pd.DataFrame:
"""
Example df generated by this function:
| order_id | driver_id | customer_id | order_is_success | event_timestamp |
+----------+-----------+-------------+------------------+---------------------+
| 100 | 5004 | 1007 | 0 | 2021-03-10 19:31:15 |
| 101 | 5003 | 1006 | 0 | 2021-03-11 22:02:50 |
| 102 | 5010 | 1005 | 0 | 2021-03-13 00:34:24 |
| 103 | 5010 | 1001 | 1 | 2021-03-14 03:05:59 |
"""
df = pd.DataFrame()
df["order_id"] = [order_id for order_id in range(100, 100 + order_count)]
df["driver_id"] = np.random.choice(drivers, order_count)
df["customer_id"] = np.random.choice(customers, order_count)
df["order_is_success"] = np.random.randint(0, 2, size=order_count).astype(np.int32)
if infer_event_timestamp_col:
df["e_ts"] = [
_convert_event_timestamp(
pd.Timestamp(dt, unit="ms", tz="UTC").round("ms"),
EventTimestampType(3),
)
for idx, dt in enumerate(
pd.date_range(start=start_date, end=end_date, periods=order_count)
)
]
df.sort_values(
by=["e_ts", "order_id", "driver_id", "customer_id"], inplace=True,
)
else:
df[DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL] = [
_convert_event_timestamp(
pd.Timestamp(dt, unit="ms", tz="UTC").round("ms"),
EventTimestampType(idx % 4),
)
for idx, dt in enumerate(
pd.date_range(start=start_date, end=end_date, periods=order_count)
)
]
df.sort_values(
by=[
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL,
"order_id",
"driver_id",
"customer_id",
],
inplace=True,
)
return df
def create_driver_hourly_stats_df(drivers, start_date, end_date) -> pd.DataFrame:
"""
Example df generated by this function:
| event_timestamp | driver_id | conv_rate | acc_rate | avg_daily_trips | created |
|------------------+-----------+-----------+----------+-----------------+------------------|
| 2021-03-17 19:31 | 5010 | 0.229297 | 0.685843 | 861 | 2021-03-24 19:34 |
| 2021-03-17 20:31 | 5010 | 0.781655 | 0.861280 | 769 | 2021-03-24 19:34 |
| 2021-03-17 21:31 | 5010 | 0.150333 | 0.525581 | 778 | 2021-03-24 19:34 |
| 2021-03-17 22:31 | 5010 | 0.951701 | 0.228883 | 570 | 2021-03-24 19:34 |
| 2021-03-17 23:31 | 5010 | 0.819598 | 0.262503 | 473 | 2021-03-24 19:34 |
| | ... | ... | ... | ... | |
| 2021-03-24 16:31 | 5001 | 0.061585 | 0.658140 | 477 | 2021-03-24 19:34 |
| 2021-03-24 17:31 | 5001 | 0.088949 | 0.303897 | 618 | 2021-03-24 19:34 |
| 2021-03-24 18:31 | 5001 | 0.096652 | 0.747421 | 480 | 2021-03-24 19:34 |
| 2021-03-17 19:31 | 5005 | 0.142936 | 0.707596 | 466 | 2021-03-24 19:34 |
| 2021-03-17 19:31 | 5005 | 0.142936 | 0.707596 | 466 | 2021-03-24 19:34 |
"""
df_hourly = pd.DataFrame(
{
"event_timestamp": [
pd.Timestamp(dt, unit="ms", tz="UTC").round("ms")
for dt in pd.date_range(
start=start_date, end=end_date, freq="1H", closed="left"
)
]
# include a fixed timestamp for get_historical_features in the quickstart
+ [
pd.Timestamp(
year=2021, month=4, day=12, hour=7, minute=0, second=0, tz="UTC"
)
]
}
)
df_all_drivers = pd.DataFrame()
for driver in drivers:
df_hourly_copy = df_hourly.copy()
df_hourly_copy["driver_id"] = driver
df_all_drivers = pd.concat([df_hourly_copy, df_all_drivers])
df_all_drivers.reset_index(drop=True, inplace=True)
rows = df_all_drivers["event_timestamp"].count()
df_all_drivers["conv_rate"] = np.random.random(size=rows).astype(np.float32)
df_all_drivers["acc_rate"] = np.random.random(size=rows).astype(np.float32)
df_all_drivers["avg_daily_trips"] = np.random.randint(0, 1000, size=rows).astype(
np.int32
)
df_all_drivers["created"] = pd.to_datetime(pd.Timestamp.now(tz=None).round("ms"))
# Create duplicate rows that should be filtered by created timestamp
# TODO: These duplicate rows area indirectly being filtered out by the point in time join already. We need to
# inject a bad row at a timestamp where we know it will get joined to the entity dataframe, and then test that
# we are actually filtering it with the created timestamp
late_row = df_all_drivers[rows // 2 : rows // 2 + 1]
df_all_drivers = pd.concat([df_all_drivers, late_row, late_row], ignore_index=True)
return df_all_drivers
def create_customer_daily_profile_df(customers, start_date, end_date) -> pd.DataFrame:
"""
Example df generated by this function:
| event_timestamp | customer_id | current_balance | avg_passenger_count | lifetime_trip_count | created |
|------------------+-------------+-----------------+---------------------+---------------------+------------------|
| 2021-03-17 19:31 | 1010 | 0.889188 | 0.049057 | 412 | 2021-03-24 19:38 |
| 2021-03-18 19:31 | 1010 | 0.979273 | 0.212630 | 639 | 2021-03-24 19:38 |
| 2021-03-19 19:31 | 1010 | 0.976549 | 0.176881 | 70 | 2021-03-24 19:38 |
| 2021-03-20 19:31 | 1010 | 0.273697 | 0.325012 | 68 | 2021-03-24 19:38 |
| 2021-03-21 19:31 | 1010 | 0.438262 | 0.313009 | 192 | 2021-03-24 19:38 |
| | ... | ... | ... | ... | |
| 2021-03-19 19:31 | 1001 | 0.738860 | 0.857422 | 344 | 2021-03-24 19:38 |
| 2021-03-20 19:31 | 1001 | 0.848397 | 0.745989 | 106 | 2021-03-24 19:38 |
| 2021-03-21 19:31 | 1001 | 0.301552 | 0.185873 | 812 | 2021-03-24 19:38 |
| 2021-03-22 19:31 | 1001 | 0.943030 | 0.561219 | 322 | 2021-03-24 19:38 |
| 2021-03-23 19:31 | 1001 | 0.354919 | 0.810093 | 273 | 2021-03-24 19:38 |
"""
df_daily = pd.DataFrame(
{
"event_timestamp": [
| pd.Timestamp(dt, unit="ms", tz="UTC") | pandas.Timestamp |
import enum
import pandas as pd
import copy
from . import Parser
from .rule import Rule
class NormType(enum.Enum):
RAW = 0
NORM = 1
BASE = 2
class Path:
def __init__(self, word, base, rule):
self.word = word
self.base = base
self.rule = rule
def __repr__(self):
return "<Path:{} {}>".format(self.word, repr(self.rule))
class MorphemeMerger:
def __init__(self, mecab_args=''):
self.rule = None
self.mecab_args = mecab_args
def get_rule_pattern(self, text, norm=NormType.NORM,
skip=True):
"""
:param str text: Target text
:param NormType norm:
:return: (word, poss)
"""
parser = Parser(mecab_args=self.mecab_args)
morphemes = parser.parse(text)
i = 0
words = []
poss = []
n = len(morphemes)
while i < n:
paths, _i = self._rec_tree_check(morphemes, i, norm=norm)
if paths is not None:
if skip:
i = _i
else:
i += 1
words.append(''.join([path.word for path in paths]))
poss.append([path.rule.poss for path in paths])
else:
i += 1
return words, poss
def _default_noun_rule(self):
root = {}
rule = Rule(['名詞', 'nan', 'nan', 'nan', 'nan'])
root[rule] = {rule: {rule: {None: None}}}
return root
def set_rule_from_csv(self, rule_file_path, sep=','):
"""Create rule tree from csv file.
:param rule_file_path: Rule file path
:param str sep: default=','
:return: None
"""
rules = | pd.read_csv(rule_file_path, sep=sep) | pandas.read_csv |
import argparse
import glob
import csv
import numpy as np
import pandas as pd
def parse_arguments():
parser = argparse.ArgumentParser("Trains a simple BiLSTM to detect sentential arguments across multiple topics.")
parser.add_argument("--data", type=str, help="The path to the folder containing the TSV files with the training data.")
return parser.parse_args()
def read_data(data_path):
data = | pd.read_csv(data_path, sep="\t", names=["motion", "hypothesis", "evidence", "evidenceclass"], index_col=0) | pandas.read_csv |
# Este arquivo contém as funções usadas para ajustar as curvas PV
# e outras funções úteis
############################################################### BIBLIOTECAS:
import numpy as np # para fazer contas e mexer com matrizes
import pandas as pd # para montar DataFrames (tabelas de bancos de dados)
from pathlib import Path # para trabalhar com diretorios e arquivos
import pickle # para gravar e ler dados
import matplotlib.pyplot as plt # para gráficos
import seaborn as sns # para gráficos com DataFrames
from scipy.optimize import curve_fit # para ajuste das curvas dos modelos
import math # para erf()
from scipy.interpolate import interp1d # para interpolar os pontos PV
############################################################### MODELOS:
# função usada para fitar o modelo PV sigmoide (doente)
# b b
# V(x) = a + ---------------------- = a + ------------------------
# 1 + exp(-(x/d) + (c/d) 1 + exp(-x/d).exp(c/d)
#
# lim (x-> inf) V(x) = a + b
def sigmoidvenegas1(x, a, b, c, d):
return a + b/(1 + np.exp(-(x-c)/d))
########## paiva
def sigmoidpaiva(x,TLC,k1,k2):
return TLC/(1+(k1*np.exp(-k2*x)))
# modificação nossa: incluindo offset
def sigmoidpaivaoffset1(x,TLC,k1,k2,offset):
return TLC/(1+(k1*np.exp(-k2*x))) + offset
# baseado no artigo original do paiva1975, e incluindo offset:
def sigmoidpaivaoffset(x,TLC,k1,k2,offset):
return TLC/(1+(k1*TLC*np.exp(-k2*x))) + offset
######### venegas2
def sigmoidvenegas2(x,TLC,B,k,c,d):
return (TLC-(B*np.exp(-k*x)))/(1 + np.exp(-(x-c)/d))
# modificação nossa: incluindo offset
def sigmoidvenegas2offset(x,TLC,B,k,c,d,offset):
return (TLC-(B*np.exp(-k*x)))/(1 + np.exp(-(x-c)/d)) + offset
# sinal original: incorreto, pois aqui quando P -> c, V -> infty
def sigmoidvenegas2original(x,TLC,B,k,c,d):
return (TLC-(B*np.exp(-k*x)))/(1 - np.exp(-(x-c)/d))
######### murphy e engel
def sigmoidmurphy(x,VM,Vm,k1,k2,k3): ### CUIDADO: P = f(V) !!!
return ( k1/(VM-x) ) + ( k2/(Vm-x) ) + k3
# modificação nossa: incluindo offset
######### murphy e engel
def sigmoidmurphyoffset(x,TLC,offset,k1,k2,k3): ### CUIDADO: P = f(V) !!!
return ( k1/((TLC+offset)-x) ) + ( k2/(offset-x) ) + k3
######### recruit_unit
# Modelo exponencial simples de curva PV pulmonar (Salazar 1964)
# Volume = Vmax*(1-e^(-K*Paw))
# Paw = pressão na via aérea
# K = 'constante de tempo' da exponencial
def expsalazar(x,Vo,K):
return Vo*(1-np.exp(-K*x))
# modelo de unidades recrutadas com erf()
# ajustando a função para uma entrada array (para curve_fit)
def meu_erf_vec(Paw,mi,sigma):
saida_lst = []
for x_in in Paw:
x = (x_in-mi)/(sigma*1.5)
merf = math.erf(x)
saida_lst.append((merf/2)+0.5)
return np.array(saida_lst)
# modelo proposto pelo grupo (nós)
def sigmoid_recruit_units(Paw,K,Vmax,mi,sigma,offset):
Vmax_recrutado = Vmax*meu_erf_vec(Paw,mi,sigma)
V = Vmax_recrutado*(1-np.exp(-K*Paw)) + offset
return V
############################################################### FUNÇÕES:
'''
Carrega os arquivos .pickle das subpastas da pasta './porquinhos/'
e retorna um DataFrame com os dados.
As manobras C contém apenas 4 passos, e as D, apenas 5 passos.
'''
def carrega_pickles(folder = 'porquinhos'):
dataframes_lst = [] # lista de dataframe: Cada elemento da lista corresponde a um dataframe de um porco/manobra/dados PV
for file_name in Path(folder).rglob('*.pickle'):
print(f"\rLendo {file_name.name}\t\t\t")
with open(file_name, "rb") as file: # abre o arquivo.pickle
porquinho = pickle.load(file)
for manobra in porquinho: #Para cada manobra
if manobra == "D": # Posso fazer 3,4,5 passos
n_steps = 5
elif manobra == "C": # Posso fazer 3,4 passos
n_steps = 4
elif manobra == "B": # Posso fazer 3 passos
n_steps = 3
# Formato os dados de entrada
format_data = []
for pi, pe, wi, we in zip(porquinho[manobra]["p_i"], porquinho[manobra]["p_e"],
porquinho[manobra]["w_i"], porquinho[manobra]["w_e"]):
format_data.extend([pi,wi,pe,we])
format_data = np.array(format_data).reshape(-1,2) # monta matriz de N linhas e 2 colunas
##########################################################
caso = []
caso.append(porquinho.name)
caso.append(manobra)
caso.append(format_data)
caso.append(n_steps)
casodf = pd.DataFrame(caso, index = ['Animal', 'Manobra', 'Dados', 'n_steps']).T
dataframes_lst.append(casodf)
# Junta todos os dataframes da lista em um único DataFrame:
dadosdf = pd.concat(dataframes_lst, ignore_index=True)
# Extrai os dados de pressão e volume dos dados raw dos arquivos pickle:
pv_lst = []
for idx,caso in dadosdf.iterrows():
pv = []
ps,vs = Data2PV(caso.Dados)
pv.append(ps)
pv.append(vs)
pvdf = pd.DataFrame([pv], columns = ['Pressoes', 'Volumes'])
pv_lst.append(pvdf)
pvdf_all = | pd.concat(pv_lst, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
import h5py
import pandas as pd
import tempfile
import random
import math
from tqdm import tqdm
import numpy as np
import time
import cdt
# cdt.SETTINGS
import networkx as nx
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier as CLF
import torch
from torch import nn, optim
from cdt.data import load_dataset
from cdt.causality.pairwise import ANM, NCC, RCC
from cdt.causality.graph import GES, LiNGAM, PC, SAM, CAM
from baseline_common import compute_metrics
def test_networkx():
g = nx.DiGraph() # initialize a directed graph
l = list(g.nodes()) # list of nodes in the graph
a = nx.adj_matrix(g).todense() # Output the adjacency matrix of the graph
e = list(g.edges()) # list of edges in the graph
def test_ANM():
data, labels = load_dataset('tuebingen')
obj = ANM()
# This example uses the predict() method
# NOTE This is too slow
output = obj.predict(data)
# This example uses the orient_graph() method. The dataset used
# can be loaded using the cdt.data module
data, graph = load_dataset('sachs')
output = obj.orient_graph(data, nx.DiGraph(graph))
# To view the directed graph run the following command
nx.draw_networkx(output, font_size=8)
plt.show()
def test_NCC():
data, labels = load_dataset('tuebingen')
X_tr, X_te, y_tr, y_te = train_test_split(data, labels, train_size=.5)
obj = NCC()
obj.fit(X_tr, y_tr)
# This example uses the predict() method
output = obj.predict(X_te)
# NOTE: I'll need to compare with this
# This example uses the orient_graph() method. The dataset used
# can be loaded using the cdt.data module
data, graph = load_dataset("sachs")
output = obj.orient_graph(data, nx.Graph(graph))
#To view the directed graph run the following command
nx.draw_networkx(output, font_size=8)
plt.show()
def test():
os.chdir('julia/src')
fname = 'data/SF-10/d=10_k=1_gtype=SF_noise=Gaussian_mat=COR.hdf5'
f = h5py.File(fname, 'r')
raw_x = f['raw_x']
raw_y = f['raw_y']
x = raw_x[0]
y = raw_y[0]
df = pd.DataFrame(x.transpose())
# 1. make training corpus
train_dfx, train_dfy, test_dfx, test_dfy = construct_df(raw_x, raw_y)
# train
# test for performance
#
# CAUTION this is the training corpus
#
# FIXME this is even slower than training
sum(np.array(test_dfy ==1))
test_dfx[np.array(test_dfy == 1).ravel()]
pred = obj.predict(test_dfx[np.array(test_dfy == 1).ravel()].sample(20))
pred = obj.predict(test_dfx[np.array(test_dfy == 0).ravel()].sample(20))
pred = obj.predict(train_dfx[np.array(train_dfy == 1).ravel()].sample(20))
pred = obj.predict(train_dfx[np.array(train_dfy == 0).ravel()].sample(20))
# NOTE: it cannot even fit the training data well
#
# TODO I'm going to implement a neural network to fit the feature vector
obj.predict(aug_dfx[np.array(aug_dfy == 1).ravel()].sample(20))
obj.predict(aug_dfx[np.array(aug_dfy == 0).ravel()].sample(20))
obj.predict_NN_preprocess(aug_dfx[np.array(aug_dfy == 1).ravel()].sample(20))
obj.predict_NN_preprocess(aug_dfx[np.array(aug_dfy == 0).ravel()].sample(20))
pred = obj.predict(test_dfx[0:10])
type(pred)
pred_v = np.array(pred).reshape(-1)
y_v = np.array(dfy).reshape(-1).shape
# wrong for half on training data
(pred_v == y_v)
tmp = [obj.featurize_row(row.iloc[0],
row.iloc[1]) for idx, row in aug_dfx[0:8].iterrows()]
train = np.array([obj.featurize_row(row.iloc[0],
row.iloc[1])
for idx, row in aug_dfx.iterrows()])
class MyRCC(RCC):
def __init__(self):
super().__init__()
def preprocess(self, dfx, dfy):
# this is very slow, so I'm adding a separete method for computing this
print('constructing x (featurizing might be very slow) ..')
# FIXME this is very slow
x = np.vstack((np.array([self.featurize_row(row.iloc[0],
row.iloc[1])
for idx, row in dfx.iterrows()]),))
print(x.shape)
print('constructing labels ..')
y = np.vstack((dfy,)).ravel()
return x, y
def fit(self, x, y):
# CAUTION this x and y should not be dataframe, but preprocessed above
print('training CLF ..')
verbose = 1 if self.verbose else 0
# FIXME and this is very im-balanced
self.clf = CLF(verbose=verbose,
min_samples_leaf=self.L,
n_estimators=self.E,
max_depth=self.max_depth,
n_jobs=self.njobs).fit(x, y)
def fit_NN(self, x, y, num_epochs=1000):
d = x.shape[1]
# tx = torch.Tensor(x)
# ty = torch.Tensor(y).type(torch.long)
# ty = torch.Tensor(y)
model = nn.Sequential(nn.Linear(d, 100),
nn.Sigmoid(),
nn.Linear(100, 1),
nn.Sigmoid())
self.fc = model
# fit the fc model
opt = optim.Adam(model.parameters(), lr=1e-3)
# FIXME whehter to apply sigmoid first for this loss?
# FIXME binary or n-class?
# loss_fn = nn.CrossEntropyLoss()
# FIXME this requires y to be float
# FIXME do this need sigmoid?
loss_fn = nn.BCELoss()
for i in tqdm(range(num_epochs)):
outputs = nn.Sigmoid()(model(torch.Tensor(x)))
loss = loss_fn(outputs, torch.unsqueeze(torch.Tensor(y), 1))
opt.zero_grad()
loss.backward()
opt.step()
running_loss = loss.item()
# UPDATE disabled because this is distracting from tqdm progressbar
# if i % 100 == 0:
# print('running loss:', running_loss)
def predict(self, npx):
_dfx = pd.DataFrame(npx.transpose())
_dfx = construct_df_mat(_dfx)
print('featurizing x ..')
_x = np.vstack((np.array([self.featurize_row(row.iloc[0],
row.iloc[1])
for idx, row in _dfx.iterrows()]),))
# run on this
mat = self.clf.predict(_x)
# FIXME change this into a adjacency matrix
d = int(math.sqrt(mat.shape[0]))
mat = mat.reshape(d, d)
# set diagonal to 0
np.diag(mat)
np.fill_diagonal(mat, 0)
# TODO return networkx graph instance
graph = nx.DiGraph(mat)
# (outputs.squeeze() > 0.5).numpy().astype(np.int)
return graph
def predict_NN(self, npx):
# I want a whole graph to be predicted
# npx = test_x[0]
_dfx = pd.DataFrame(npx.transpose())
_dfx = construct_df_mat(_dfx)
print('featurizing x ..')
_x = np.vstack((np.array([self.featurize_row(row.iloc[0],
row.iloc[1])
for idx, row in _dfx.iterrows()]),))
# I'll directly return the adjacency matrix
# FIXME or just return a networkx graph instance?
tx = torch.Tensor(_x)
outputs = self.fc(tx).detach().numpy()
mat = outputs.squeeze() > 0.5
# FIXME change this into a adjacency matrix
d = int(math.sqrt(mat.shape[0]))
mat = mat.reshape(d, d)
# set diagonal to 0
np.diag(mat)
np.fill_diagonal(mat, 0)
# TODO return networkx graph instance
graph = nx.DiGraph(mat)
# (outputs.squeeze() > 0.5).numpy().astype(np.int)
return graph
def balance_df(dfx, dfy):
dfx.shape
dfy.shape
# 10% is 1, I'm going to make it 50% by duplicate 1 by 5
sum(np.array(dfy == 1).ravel())
one_index = np.array(dfy == 1)
zero_index = np.array(dfy == 0)
# OPTION 1: but the trained model seems to be still balanced towards 0, even
# on training data. Probably because the duplication of data
#
# and the data is smaller to train
aug_dfx = dfx.append([dfx[one_index]] * 9)
aug_dfy = dfy.append([dfy[one_index]] * 9)
# OPTION 2: reduce the number of 0 labels
num_1 = len(dfx[one_index])
num_0 = len(dfx[zero_index])
num_1
num_0
sample_index = random.sample(range(num_0), num_1)
aug_dfx = dfx[zero_index].take(sample_index).append(dfx[one_index])
aug_dfy = dfy[zero_index].take(sample_index).append(dfy[one_index])
aug_dfx.shape
aug_dfy.shape
sum(np.array(aug_dfy == 1))
return aug_dfx, aug_dfy
def construct_df_mat(x):
dfx = pd.DataFrame(columns={'A', 'B'})
d = x.shape[1]
ct = 0
for a in range(d):
for b in range(d):
name = "pair{}".format(ct)
ct+=1
dfx.loc[name] = pd.Series({'A': np.array(x[a]),
'B': np.array(x[b])})
return dfx
def construct_df(raw_x, raw_y):
# the data format should be:
#
# X: cols: variables
# rows: name: pairID, value: vector for each variable
# Y: cols: target, 0 or 1
# rows: name: pairID, it should be "whether there's an edge from A to B?"
#
# UPDATE the internal automatically train on reverse edge, using -y. I can
# add 0 as label, and the reverse will be 0 as well, and that duplicated
# training should be fine.
#
# I'll use 10 graphs from raw_x to make training pairs, and use the 10
# graphs for testing
#
ct = 0
dfx = | pd.DataFrame(columns={'A', 'B'}) | pandas.DataFrame |
# coding: utf-8
# # --------------------------- Bibliotecas Utilizadas ---------------------------
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn.svm import SVC
import sklearn.metrics as metrics
import sklearn.preprocessing as preprocessing
import pandas as pd
import numpy as np
pd.options.display.max_rows = 999
# # --------------------------- Read Files ---------------------------
# In[2]:
# Load in the data with `read_csv()`
horsesDataSet = pd.read_csv('horse.csv', header=0, delimiter=',')
horsesDataSetTest = pd.read_csv("horseTest.csv", header=0, delimiter=',')
#description of dataSet
descriptionHorsesDataSet = horsesDataSet.describe(include='all')
descriptionHorsesDataSetTest = horsesDataSetTest.describe(include='all')
# In[3]:
print('Base de Trieno (Arquivo horse.csv)\n')
descriptionHorsesDataSet
# In[4]:
print('\nBase de Teste (Arquivo horseTest.csv)\n')
descriptionHorsesDataSetTest
# # --------------------------- Exploratory analysis ---------------------------
# In[5]:
#first 5 and last 5 entries in dataSet
firstRowsDataSet = horsesDataSet.head(5)
lastRowsDataSet = horsesDataSet.tail(5)
# In[6]:
firstRowsDataSet
# In[7]:
lastRowsDataSet
# In[8]:
# sampling data
# Take a sample of 5
horsesDataSetSample = horsesDataSet.sample(5)
horsesDataSetSample
# In[9]:
#Nulls
result = pd.isnull(horsesDataSet)
result
# # --------------------------- Pre processing ---------------------------
# In[10]:
# iterate through each attribute and define the percentage of missing values
# populate array with zeros with column dimensions of dataset
qtd_nan = [0 for x in range(horsesDataSet.shape[1])]
# populate array with zeros with column dimensions of dataset
qtd_total = [0 for x in range(horsesDataSet.shape[1])]
i = 0
while i < horsesDataSet.shape[1]:
# get array of boolean describing each line as null or not for i attribute
attributeLinesIsNA = pd.isna(horsesDataSet.iloc[:, i])
# get current attribute label name
currentAttributeLabel = list(horsesDataSet)[i]
qtd_nan[i] = horsesDataSet.loc[attributeLinesIsNA, currentAttributeLabel].shape[0]
qtd_total[i] = horsesDataSet.loc[:, currentAttributeLabel].shape[0]
i = i+1
percentageArray = np.divide(qtd_nan, qtd_total)
# In[11]:
# dropping atributes
threshold = 0.5
PreProcessedHorseDataSet = horsesDataSet
PreProcessedHorseDataSetTest = horsesDataSetTest
i = 0
while i < horsesDataSet.shape[1]:
if percentageArray[i] > threshold:
# get current attribute label name
currentAttributeLabel = list(horsesDataSet)[i]
# drop attribute column if na values > threshold
PreProcessedHorseDataSet = PreProcessedHorseDataSet.drop(columns=currentAttributeLabel)
#drop from test
PreProcessedHorseDataSetTest = PreProcessedHorseDataSetTest.drop(columns=currentAttributeLabel)
i = i + 1
# In[12]:
# fill remaining lines with mean values (only numerical)
PreProcessedHorseDataSet = PreProcessedHorseDataSet.fillna(horsesDataSet.mean())
#PreProcessedHorseDataSetTest = PreProcessedHorseDataSetTest.fillna(horsesDataSetTest.mean())
# Show Statistics of DataSet
StatisticsPreProcessedHorseDataSet = PreProcessedHorseDataSet.describe(include='all')
# Altering Categorical missing values to Mode Value (value that appear the most often)
i = 0
while i < PreProcessedHorseDataSet.shape[1]:
# return the most frequent value (first index because mode() returns a DataFrame)
attributeMode = PreProcessedHorseDataSet.mode().iloc[0, i]
currentAttributeLabel = list(PreProcessedHorseDataSet)[i]
PreProcessedHorseDataSet[currentAttributeLabel] = PreProcessedHorseDataSet[currentAttributeLabel].fillna(attributeMode)
i = i+1
# Altering missing values [DATASET TEST]
#Saving values from train to insret in TEST with variable v
v = [0 for x in range(horsesDataSet.shape[1])]
i=0
while i < PreProcessedHorseDataSet.shape[1]:
if PreProcessedHorseDataSet.dtypes[i] == 'O':
v[i] = PreProcessedHorseDataSet.mode().iloc[0, i]
else:
v[i] = PreProcessedHorseDataSet.iloc[0, i].mean()
currentAttributeLabel = list(PreProcessedHorseDataSetTest)[i]
PreProcessedHorseDataSetTest[currentAttributeLabel] = PreProcessedHorseDataSetTest[currentAttributeLabel].fillna(v[i])
i = i+1
#i = 0
#while i < PreProcessedHorseDataSetTest.shape[1]:
# attributeMode = PreProcessedHorseDataSetTest.mode().iloc[0, i]
# currentAttributeLabel = list(PreProcessedHorseDataSetTest)[i]
# PreProcessedHorseDataSetTest[currentAttributeLabel] = PreProcessedHorseDataSetTest[currentAttributeLabel].fillna(attributeMode)
# i = i+1
# In[13]:
# categorical attribute binarization
categoricalHorseDataSet = PreProcessedHorseDataSet.select_dtypes(include='object')
categoricalHorseDataSet = categoricalHorseDataSet.drop('outcome', axis=1)
categoricalHorseDataSetDummy = pd.get_dummies(categoricalHorseDataSet)
PreProcessedHorseDataSet = pd.concat([categoricalHorseDataSetDummy, PreProcessedHorseDataSet.loc[:, 'outcome']], axis=1)
# categorical attribute binarization [DATASET TEST]
categoricalHorseDataSetTest = PreProcessedHorseDataSetTest.select_dtypes(include='object')
categoricalHorseDataSetTest = categoricalHorseDataSetTest.drop('outcome', axis=1)
categoricalHorseDataSetDummy = pd.get_dummies(categoricalHorseDataSetTest)
PreProcessedHorseDataSetTest = pd.concat([categoricalHorseDataSetDummy, PreProcessedHorseDataSetTest.loc[:, 'outcome']], axis=1)
# In[14]:
# Change values from euthanized to died
AttributesHorseDataSet = PreProcessedHorseDataSet.drop('outcome', axis=1)
TargetHorseDataSet = PreProcessedHorseDataSet.loc[:, 'outcome']
# mapping 'euthanized' values to 'died' to tune fitting
TargetHorseDataSet = TargetHorseDataSet.map(lambda x: 'died' if x == 'euthanized' else x)
PreProcessedHorseDataSet = pd.concat([AttributesHorseDataSet, TargetHorseDataSet], axis=1)
# Change values from euthanized to died [DATASET TEST]
AttributesHorseDataSetTest = PreProcessedHorseDataSetTest.drop('outcome', axis=1)
TargetHorseDataSetTest = PreProcessedHorseDataSetTest.loc[:, 'outcome']
# mapping 'euthanized' values to 'died' to tune fitting
TargetHorseDataSetTest = TargetHorseDataSetTest.map(lambda x: 'died' if x == 'euthanized' else x)
PreProcessedHorseDataSetTest = | pd.concat([AttributesHorseDataSetTest, TargetHorseDataSetTest], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
# gh-17970
df = DataFrame({"a": [1, 2, 3]}, index=list('abc'))
result = df.apply(lambda row: np.ones(1), axis=1)
expected = Series([np.ones(1) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
result = df.apply(lambda row: np.ones(2), axis=1)
expected = Series([np.ones(2) for t in df.itertuples()],
index=df.index)
assert_series_equal(result, expected)
# gh-17892
df = pd.DataFrame({'a': [pd.Timestamp('2010-02-01'),
pd.Timestamp('2010-02-04'),
pd.Timestamp('2010-02-05'),
pd.Timestamp('2010-02-06')],
'b': [9, 5, 4, 3],
'c': [5, 3, 4, 2],
'd': [1, 2, 3, 4]})
def fun(x):
return (1, 2)
result = df.apply(fun, axis=1)
expected = Series([(1, 2) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_coerce_for_shapes(self):
# we want column names to NOT be propagated
# just because the shape matches the input shape
df = DataFrame(np.random.randn(4, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1)
expected = Series([[1, 2] for t in df.itertuples()])
assert_series_equal(result, expected)
def test_consistent_names(self):
# if a Series is returned, we should use the resulting index names
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: Series([1, 2, 3],
index=['test', 'other', 'cols']),
axis=1)
expected = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other', 'cols'])
assert_frame_equal(result, expected)
result = df.apply(
lambda x: pd.Series([1, 2], index=['test', 'other']), axis=1)
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['test', 'other'])
assert_frame_equal(result, expected)
def test_result_type(self):
# result_type should be consistent no matter which
# path we take in the code
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='expand')
expected = df.copy()
expected.columns = [0, 1, 2]
assert_frame_equal(result, expected)
result = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
expected = df[['A', 'B']].copy()
expected.columns = [0, 1]
assert_frame_equal(result, expected)
# broadcast result
result = df.apply(lambda x: [1, 2, 3], axis=1, result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3],
index=columns),
axis=1,
result_type='broadcast')
expected = df.copy()
assert_frame_equal(result, expected)
# series result
result = df.apply(lambda x: Series([1, 2, 3], index=x.index), axis=1)
expected = df.copy()
assert_frame_equal(result, expected)
# series result with other index
columns = ['other', 'col', 'names']
result = df.apply(
lambda x: pd.Series([1, 2, 3], index=columns),
axis=1)
expected = df.copy()
expected.columns = columns
assert_frame_equal(result, expected)
@pytest.mark.parametrize("result_type", ['foo', 1])
def test_result_type_error(self, result_type):
# allowed result_type
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2, 3],
axis=1,
result_type=result_type)
@pytest.mark.parametrize(
"box",
[lambda x: list(x),
lambda x: tuple(x),
lambda x: np.array(x, dtype='int64')],
ids=['list', 'tuple', 'array'])
def test_consistency_for_boxed(self, box):
# passing an array or list should not affect the output shape
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
result = df.apply(lambda x: box([1, 2]), axis=1)
expected = Series([box([1, 2]) for t in df.itertuples()])
assert_series_equal(result, expected)
result = df.apply(lambda x: box([1, 2]), axis=1, result_type='expand')
expected = DataFrame(
np.tile(np.arange(2, dtype='int64'), 6).reshape(6, -1) + 1)
assert_frame_equal(result, expected)
def zip_frames(frames, axis=1):
"""
take a list of frames, zip them together under the
assumption that these all have the first frames' index/columns.
Returns
-------
new_frame : DataFrame
"""
if axis == 1:
columns = frames[0].columns
zipped = [f.loc[:, c] for c in columns for f in frames]
return pd.concat(zipped, axis=1)
else:
index = frames[0].index
zipped = [f.loc[i, :] for i in index for f in frames]
return pd.DataFrame(zipped)
class TestDataFrameAggregate(TestData):
def test_agg_transform(self, axis):
other_axis = 1 if axis in {0, 'index'} else 0
with np.errstate(all='ignore'):
f_abs = np.abs(self.frame)
f_sqrt = np.sqrt(self.frame)
# ufunc
result = self.frame.transform(np.sqrt, axis=axis)
expected = f_sqrt.copy()
assert_frame_equal(result, expected)
result = self.frame.apply(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
result = self.frame.transform(np.sqrt, axis=axis)
assert_frame_equal(result, expected)
# list-like
result = self.frame.apply([np.sqrt], axis=axis)
expected = f_sqrt.copy()
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.sqrt], axis=axis)
assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both
# functions per series and then concatting
result = self.frame.apply([np.abs, np.sqrt], axis=axis)
expected = zip_frames([f_abs, f_sqrt], axis=other_axis)
if axis in {0, 'index'}:
expected.columns = pd.MultiIndex.from_product(
[self.frame.columns, ['absolute', 'sqrt']])
else:
expected.index = pd.MultiIndex.from_product(
[self.frame.index, ['absolute', 'sqrt']])
assert_frame_equal(result, expected)
result = self.frame.transform([np.abs, 'sqrt'], axis=axis)
assert_frame_equal(result, expected)
def test_transform_and_agg_err(self, axis):
# cannot both transform and agg
def f():
self.frame.transform(['max', 'min'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.agg(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
def f():
with np.errstate(all='ignore'):
self.frame.transform(['max', 'sqrt'], axis=axis)
pytest.raises(ValueError, f)
df = pd.DataFrame({'A': range(5), 'B': 5})
def f():
with np.errstate(all='ignore'):
df.agg({'A': ['abs', 'sum'], 'B': ['mean', 'max']}, axis=axis)
@pytest.mark.parametrize('method', [
'abs', 'shift', 'pct_change', 'cumsum', 'rank',
])
def test_transform_method_name(self, method):
# https://github.com/pandas-dev/pandas/issues/19760
df = pd.DataFrame({"A": [-1, 2]})
result = df.transform(method)
expected = operator.methodcaller(method)(df)
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
import datetime
import json
import math
import os
from dash import Dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
from dash.dependencies import Input, Output, State, ALL
from dash.exceptions import PreventUpdate
import dash_html_components as html
import dash_table
from dateutil import tz
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from application import util
from application.models import db, Activity
def add_dashboard_to_flask(server):
"""Create a Plotly Dash dashboard with the specified server.
This is actually used to create a dashboard that piggybacks on a
Flask app, using that app its server.
"""
dash_app = Dash(
__name__,
server=server,
routes_pathname_prefix='/dash-log/',
external_stylesheets=[
dbc.themes.BOOTSTRAP,
# '/static/css/styles.css', # Not yet.
],
)
dash_app.layout = dbc.Container(
[
html.H1('Activity Summary (Training Log)'),
html.Hr(),
html.H2('Training Stress'),
dcc.Graph(
id='tss-graph',
figure=go.Figure(),
),
html.Hr(),
html.H2('Weekly Log'),
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id='bubble-dropdown',
options=[
{'label': x, 'value': x} for x in ['Distance', 'Time', 'Elevation', 'TSS']
],
value='Distance',
searchable=False,
clearable=False,
style={'font-size': '12px'}
),
width=2,
),
dbc.Col(
dbc.Row(
[
dbc.Col(
day,
style={'text-align': 'center', 'font-size': '11px'}
)
for day in ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
],
# justify='around',
# justify='center',
no_gutters=True,
),
align='center',
width=10,
)
],
id='calendar-header',
className='mb-3 pb-2 border-bottom',
style={'position': 'sticky', 'top': 0, 'zIndex': 1, 'background-color': 'white'}
),
# html.Hr(),
html.Div(id='calendar-rows'),
dbc.Row(
dbc.Button('Add more weeks', id='add-weeks', color='primary'),
justify='center',
className='mb-2',
),
dcc.Location(id='url'),
# dcc.Store(id='activity-data'), # data streams
# dcc.Store(id='activity-stats'), # could be strava response etc
# dcc.Store(id='calc-stats'), # storage for DF-to-stats calc
],
id='dash-container',
fluid=True,
)
@dash_app.callback(
Output('tss-graph', 'figure'),
Input('url', 'pathname')
)
def update_figure(path):
# Load dates and TSS from db in to DF.
activities=Activity.query.all()
fields = ['recorded', 'tss', 'title', 'elapsed_time_s']
df = pd.DataFrame(
[[getattr(a, field) for field in fields] for a in activities],
columns=fields
)
df = df.sort_values(by='recorded', axis=0)
# For now, convert to my tz - suggests setting TZ by user,
# not by activity.
df['recorded'] = df['recorded'].dt.tz_localize(tz.tzutc()).dt.tz_convert(tz.gettz('America/Denver'))
calc_ctl_atl(df)
return create_tss_fig(df)
@dash_app.callback(
Output('calendar-rows', 'children'),
# Input('url', 'pathname'),
Input('add-weeks', 'n_clicks'),
# Input('bubble-dropdown', 'value'),
State('calendar-rows', 'children'),
)
def update_calendar(n_clicks, children):
n_clicks = n_clicks or 0
# Load dates and TSS from db in to DF.
# TODO: Consider querying the database for dates, rather than
# loading them all into a DataFrame.
activities=Activity.query.all()
fields = ['id', 'recorded', 'tss', 'title', 'description',
'elapsed_time_s', 'moving_time_s', 'distance_m', 'elevation_m']
df = pd.DataFrame(
[[getattr(a, field) for field in fields] for a in activities],
columns=fields
)
df = df.sort_values(by='recorded', axis=0)
# For now, convert to my tz - suggests setting TZ by user,
# not by activity.
df['recorded'] = df['recorded'].dt.tz_localize(tz.tzutc()).dt.tz_convert(tz.gettz('America/Denver'))
df['weekday'] = df['recorded'].dt.weekday
# ** Coming soon: Special calendar view for current week **
children = children or []
today = datetime.datetime.today().date()
idx = today.weekday() # MON = 0, SUN = 6
# idx = (today.weekday() + 1) % 7 # MON = 0, SUN = 6 -> SUN = 0 .. SAT = 6
for i in range(3 * n_clicks, 3 * (n_clicks + 1)):
ix = idx + 7 * (i - 1)
mon_latest = today - datetime.timedelta(ix) # 0-6 days ago
mon_last = today - datetime.timedelta(ix+7) # 1+ weeks ago
df_week = df[
(df['recorded'].dt.date < mon_latest)
& (df['recorded'].dt.date >= mon_last)
]
children.append(dbc.Row([
dbc.Col(
children=create_week_sum(df_week, mon_last),
id=f'week-summary-{i}',
width=2,
),
dbc.Col(
# Eventually this will be just one part of one row.
dcc.Graph(
# id=f'week-cal-{i}',
id={'type': 'week-cal', 'index': i},
figure=create_week_cal(df_week),
config=dict(displayModeBar=False),
),
width=10,
)
]))
return children
@dash_app.callback(
Output({'type': 'week-cal', 'index': ALL}, 'figure'),
# Input('url', 'pathname'),
Input('bubble-dropdown', 'value'),
# Input('bubble-dropdown', 'value'),
State({'type': 'week-cal', 'index': ALL}, 'figure'),
# if I do this, adding rows does not work right if not using distance:
# prevent_initial_call=True,
)
def update_calendar(bubble_type, figures):
figures = [update_week_cal(figure, bubble_type) for figure in figures]
return figures
# init_callbacks(dash_app)
return dash_app.server
def calc_ctl_atl(df):
"""Add power-related columns to the DataFrame.
For more, see boulderhikes.views.ActivityListView
"""
# atl_pre = [0.0]
atl_0 = 0.0
atl_pre = [atl_0]
atl_post = [ df['tss'].iloc[0] / 7.0 + atl_0]
# ctl_pre = [0.0]
ctl_0 = 0.0
ctl_pre = [ctl_0]
ctl_post = [ df['tss'].iloc[0] / 42.0 + ctl_0]
for i in range(1, len(df)):
delta_t_days = (
df['recorded'].iloc[i] - df['recorded'].iloc[i-1]
).total_seconds() / (3600 * 24)
atl_pre.append(
(atl_pre[i-1] + df['tss'].iloc[i-1] / 7.0) * (6.0 / 7.0) ** delta_t_days
)
atl_post.append(
df['tss'].iloc[i] / 7.0 + atl_post[i-1] * (6.0 / 7.0) ** delta_t_days
)
ctl_pre.append(
(ctl_pre[i-1] + df['tss'].iloc[i-1] / 42.0) * (41.0 / 42.0) ** delta_t_days
)
ctl_post.append(
df['tss'].iloc[i] / 42.0 + ctl_post[i-1] * (41.0 / 42.0) ** delta_t_days
)
df['ATL_pre'] = atl_pre
df['CTL_pre'] = ctl_pre
df['ATL_post'] = atl_post
df['CTL_post'] = ctl_post
def create_tss_fig(df):
"""Catch-all controller function for dashboard layout logic.
Args:
df (pd.DataFrame): A DataFrame representing a time-indexed DataFrame
containing TSS for each recorded activity.
Returns:
plotly.graph_objs.Figure: fig to be used as child of a html.Div element.
"""
df_stress = pd.DataFrame.from_dict({
'ctl': | pd.concat([df['CTL_pre'], df['CTL_post']]) | pandas.concat |
"""HGNC."""
import re
import sys
import json
import logging
import numpy as np
import pandas as pd
from tqdm import tqdm
from typing import Dict
from pyorient import OrientDB
from collections import namedtuple
from ebel.tools import get_file_path
from ebel.manager.orientdb.constants import HGNC
from ebel.manager.orientdb import odb_meta, urls, odb_structure
from ebel.manager.rdbms.models import hgnc
logger = logging.getLogger(__name__)
HgncEntry4Update = namedtuple("HgncEntrySimple", ['hgnc_rid', 'label', 'location', 'symbol', 'suggested_corrections'])
class Hgnc(odb_meta.Graph):
"""HGNC class definition."""
def __init__(self, client: OrientDB = None):
"""Init HGNC."""
self.client = client
self.biodb_name = HGNC
self.urls = {self.biodb_name: urls.HGNC_JSON, 'human_ortholog': urls.HCOP_GZIP}
super().__init__(generics=odb_structure.hgnc_generics,
tables_base=hgnc.Base,
indices=odb_structure.hgnc_indices,
nodes=odb_structure.hgnc_nodes,
urls=self.urls,
biodb_name=self.biodb_name)
def __contains__(self, hgnc_id: object) -> bool:
"""Test existence of hgnc_id."""
if isinstance(hgnc_id, int):
hgnc_id = "HGNC:{}".format(hgnc_id)
r = self.execute("Select count(*) from bel where hgnc.id = '{}' limit 1".format(hgnc_id))
return bool(len(r[0].oRecordData['count']))
def __len__(self):
"""Count number of hgnc links in BEL graph."""
r = self.execute("Select count(*) from bel where hgnc IS NOT NULL")
return r[0].oRecordData['count']
def __repr__(self) -> str:
"""Represent HGNC."""
template = "{{BioDatabase:Hgnc}}[url:{url}, nodes:{nodes}, generics:{generics}]"
representation = template.format(
url=self.urls,
nodes=self.number_of_nodes,
generics=self.number_of_generics
)
return representation
def insert_data(self) -> Dict[str, int]:
"""Check if files missing for download or generic table empty. If True then insert data."""
inserted = dict()
inserted['hgnc'] = self.import_hgnc()
inserted['hgnc_rdbms'] = self.import_hgnc_into_rdbms()
inserted['human_orthologs'] = self.insert_orthologs()
self.session.commit()
return inserted
def import_hgnc_into_rdbms(self) -> int:
"""Insert HGNC database into RDBMS."""
logger.info('Insert HGNC database into RDBMS.')
file_path = get_file_path(self.urls[self.biodb_name], self.biodb_name)
df = pd.DataFrame(json.loads(open(file_path, 'r').read())['response']['docs'])
self._standardize_dataframe(df)
columns = ['hgnc_id', 'version', 'bioparadigms_slc', 'cd', 'cosmic', 'date_approved_reserved', 'date_modified',
'date_name_changed', 'date_symbol_changed', 'ensembl_gene_id', 'entrez_id', 'homeodb', 'horde_id',
'imgt', 'intermediate_filament_db', 'iuphar', 'lncipedia', 'lncrnadb',
'location', 'location_sortable', 'locus_group', 'locus_type', 'mamit_trnadb', 'merops', 'mirbase',
'name', 'orphanet', 'pseudogene_org', 'snornabase', 'status', 'symbol', 'ucsc_id', 'uuid',
'vega_id', 'agr']
df['id'] = pd.to_numeric(df.hgnc_id.str.split(':').str[1])
df.set_index('id', inplace=True)
df[columns].to_sql(hgnc.Hgnc.__tablename__, self.engine, if_exists='append')
df.hgnc_id = pd.to_numeric(df.hgnc_id.str.split(':').str[1])
for df_col, model, m_col in (('prev_symbol', hgnc.PrevSymbol, None),
('alias_symbol', hgnc.AliasSymbol, None),
('alias_name', hgnc.AliasName, None),
('ccds_id', hgnc.Ccds, 'identifier'),
('ena', hgnc.Ena, 'identifier'),
('enzyme_id', hgnc.Enzyme, 'ec_number'),
('gene_group', hgnc.GeneGroupName, 'name'),
('gene_group_id', hgnc.GeneGroupId, 'identifier'),
('uniprot_ids', hgnc.UniProt, 'accession'),
('rna_central_id', hgnc.RnaCentral, 'identifier'),
('rgd_id', hgnc.Rgd, 'identifier'),
('refseq_accession', hgnc.RefSeq, 'accession'),
('pubmed_id', hgnc.PubMed, 'pmid'),
('prev_name', hgnc.PrevName, None),
('omim_id', hgnc.Omim, 'identifier'),
('mgd_id', hgnc.Mgd, 'identifier'),
('lsdb', hgnc.Lsdb, 'identifier')):
df_1n_table = df[[df_col, 'hgnc_id']].explode(df_col).dropna()
if m_col:
df_1n_table.rename(columns={df_col: m_col}, inplace=True)
df_1n_table.to_sql(
model.__tablename__,
self.engine,
if_exists='append',
index=False)
return df.shape[0]
def import_hgnc(self) -> int:
"""Import HGNC into OrientDB."""
# if new hgnc is imported all hgnc links should be reset and hgnc table should be empty
self.execute('Update genetic_flow set hgnc=null')
self.execute('Delete from hgnc')
file_path = get_file_path(self.urls[self.biodb_name], self.biodb_name)
rows = json.loads(open(file_path, 'r').read().replace(u"\xa0", u" "))['response']['docs']
df = | pd.DataFrame(rows) | pandas.DataFrame |
# Built with python 3, dependencies installed with pip
# library to generate images - Pillow
# https://pillow.readthedocs.io/en/stable/installation.html
from PIL import Image
# library to work with arrays and dataframe
# https://numpy.org/
# https://pandas.pydata.org/
import numpy as np
import pandas as pd
import csv
import json
# library to interact with the operating system
import os
# library to generate random integer values
from random import seed
from random import randint
import sys
#print(sys.getrecursionlimit())
sys.setrecursionlimit(10000)
#print(sys.getrecursionlimit())
# gets path to be used in image creation mechanism, using os
dirname = os.path.dirname(os.path.abspath(__file__))
Races = ["Unknown","Halflings", "Men", "Elves", "Dwarves", "Gobelins", "Orcs", "Wizards", "Daemons", "Wraiths", "Dark Riders", "Dark Lord"]
Types = ["Male", "Female","Firebeards","Blacklocks","Broadbeams","Stiffbeards","Stonefoots","Ironfists","Longbeards","White", "Grey", "Wood", "Blue", "Tower", "None"]
Skins = ["Red","Eggplant","Granite","Dark Grey","Charcoal","Albino","Light","Mid","Dark","Purple","Camel","Wattle","Smokey Grey","Moon Grey","Sand","Green","Peach","Dust","Bone","Silk","None"]
Ears = ["Earring", "None"]
Haircolors = ["Black","Bronze","Mango","Dark Grey","Persian Blue","Sapphire","Indigo","Topaz","Burning Orange","Taupe & Cookie Brown","Brown & Cookie Brown","Taupe & Graphite","Brown & Graphite","Seashell & Grey","Seashell & Carbon Grey","Smokey Grey & Charcoal","Grey & Carbon Grey","Dark Grey & Silver","Granite & Seashell","Dark Grey & Black","Black & Granite","Carbon Grey","Seashell","Silver","Granite","Grey Goose","Mango & Brown","Ginger & Fair","Bronze & Chocolate","Fair & Wattle","Orange & Black Rose","Dark Grey & Silver","Butter","Red","Blond","Blonde","Orange","Fair","Grey","Ginger","Black Rose","Brown","None"]
Haircuts = ["Braids","Long Hair","Medium Layers","The Bob","Left Side Hair","Right Side Hair","Curly Hair","Prince Hair","King Hair","Straight Hair","Grunge Hair","Wild Hair","Perm Hair","Bedhead","Hockey Hair","Bald","Wedge Hair","Feathered Hair","Ponytail","None"]
Hairprops = ["Orc Helmet","Gobelins Crown","Dwarf Helmet","Elfic Tiara","Elfic Crown","Circlet","Punk Hat","Beanie","Fedora","Bandana","Knitted Cap","Men Crown","Police","Top Hat","Cap Forward","Cowboy Hat","Cap","Tiara","Flower","Shire Hat","Headband","Pilot Helmet","None"]
Necks = ["Choker","Gold Chain","Silver Chain","Ring Onchain","Brooch","None"]
Facialhairs = ["Big Beard","Muttonchops","Mustache","Handlebars","Front Beard Dark","Front Beard","Normal Beard","Normal Beard Black","Luxurious Beard","Goat","Chinstrap","Shadow Beard","None"]
Mouthprops = ["Cigarette","Medical Mask","Pipe","Vape","None"]
Eyecolors = ["Orange Eye Shadow","Orange","Purple","Blue Eye Shadow","Purple Eye Shadow","Green Eye Shadow","Black","Peach","Blue","White","Yellow","Red","None"]
Eyeprops = ["3D Glasses","VR","Classic Shades","Small Shades","Eye Patch","Nerd Glasses","Big Shades","Eye Mask","Horned Rim Glasses","Regular Shades","Welding Goggles","None"]
Noses = ["Clown Nose","None"]
Blemishes = ["Scare","Rosy Cheeks","Mole","None"]
Toothcolors = ["Brown","White","Gold","Blood","None"]
Mouths = ["Smile","Frown","None","Black Lipstick","Hot Lipstick","Purple Lipstick","Orange Lipstick"]
#Metada prep
def createCombo():
trait = {}
#trait["Name"] = name_ep
trait["Race"] = race_ep
trait["Type"] = type_ep
trait["Skin Tone"] = skin_ep
trait["Ears"] = ears_ep
trait["Hair Color"] = hair_color_ep
trait["Haircut"] = haircut_ep
trait["Hair Prop"] = hair_prop_ep
trait["Neck"] = neck_ep
trait["Facial Hair"] = facial_hair_ep
trait["Mouth Prop"] = mouth_prop_ep
trait["Eyes Color"] = eyes_color_ep
trait["Eyes Prop"] = eyes_prop_ep
trait["Nose"] = nose_ep
trait["Blemishe"] = blemishe_ep
trait["Tooth Color"] = tooth_color_ep
trait["Mouth"] = mouth_ep
if trait in traits:
filterlist1.append(x)
else:
return trait
traits = []
# sets final image dimensions as 480x480 pixels
# the original 24x24 pixel image will be expanded to these dimensions
dimensions = 480, 480
s=(24,24)
none = np.zeros(s)
# Variables to define the colors with the RGB system
nr = (0,0,0)
bl = (255,255,255)
BG1 = (0,110,110)
FR1 = nr
FR2 = bl
BR1 = nr
BR2 = bl
FR3 = nr
DE1 = bl
SK3 = bl
BE1 = nr
BE2 = (204,154,39)
BE3 = (102,28,51)
BE4 = (128,97,21)
BE7 = (104,70,31)
CG2 = (198,198,198)
CG3 = (241,68,0)
CG4 = (157,178,187)
CG1 = (0,0,0)
PI2 = (139,78,0)
PI3 = (109,57,0)
PI1 = (0,0,0)
PI4 = (139,160,169)
MO1 = (156,141,138)
MO2 = (148,118,83)
MO3 = (121,95,64)
MO4 = (86,48,21)
SM1 = (0,0,0)
FW1 = (0,0,0)
VP3 = (89,89,89)
VP2 = (57,0,255)
VP1 = (0,0,0)
CN1 = (231,0,0)
RC1 = (215,154,104)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
MK1 = (201,201,201)
MK2 = (177,177,177)
ER2 = (255,221,0)
ER1 = (0,0,0)
GC1 = (255,203,0)
RG1 = (255,160,0)
BO1 = (35,165,115)
SV1 = (223,223,223)
KR1 = (0,0,0)
HL1 = (212,0,0)
PL1 = (226,0,203)
NL1 = (122,0,0)
BL1 = (0,0,0)
CH1 = (127,73,0)
CH2 = (84,45,0)
CA1 = (145,0,185)
CA2 = (194,60,221)
BN1 = (2,85,198)
BN3 = (221,244,0)
BN2 = (231,0,0)
BN4 = (0,208,0)
BN5 = (0,0,0)
TH1 = (0,0,0)
TH2 = (238,0,0)
KC2 = (216,56,0)
KC3 = (157,39,0)
KC1 = (0,0,0)
HB1 = (255,255,255)
HB2 = (25,100,216)
FC2 = (81,81,81)
FC3 = (53,53,53)
FC1 = (0,0,0)
BA1 = (48,36,203)
BA2 = (39,31,167)
BA3 = (30,29,126)
FD1 = (63,47,28)
FD2 = (0,0,0)
PC2 = (38,47,75)
PC4 = (255,220,0)
PC1 = (0,0,0)
PC3 = (255,255,255)
TD1 = (240,240,240)
TD3 = (44,131,255)
TD2 = (255,0,0)
VR2 = (180,180,180)
VR3 = (141,141,141)
VR1 = (0,0,0)
CSH2 = (96,55,4)
CSH3 = (209,111,0)
CSH1 = (0,0,0)
SSH1 = (0,0,0)
EP1 = (0,0,0)
ND1 = (97,224,220)
ND2 = (0,0,0)
BSH2 = (115,0,67)
BSH3 = (153,0,89)
BSH4 = (188,0,92)
BSH1 = (0,0,0)
EM2 = (215,215,215)
EM1 = (0,0,0)
RSH1 = (0,0,0)
TI1 = (255,186,0)
TI2 = (255,0,0)
MH2 = (255,255,255)
MH1 = (0,0,0)
PH2 = (97,224,220)
PH1 = (250,128,114)
PH3 = (0,0,0)
WG3 = (97,224,220)
WG2 = (82,78,0)
WG1 = (28,27,0)
OH2 = (50,40,40)
OH1 = (90,65,55)
ETI = SV1 #(0,223,138)
HOB1 = (255,192,0)
HOB2 = (255,255,0)
HOB3 = (255,0,0)
HOB4 = (146,208,80)
HOB5 = (192,0,0)
GCR1 = (191,191,191)
GCR2 = (128,128,128)
GCR3 = (219,219,219)
GCR4 = (219,227,115)
GCR5 = (255,192,0)
KGC = (159,109,9)
FL1 = (219,227,115)
FL2 = (255,192,0)
FL3 = (146,208,80)
FL4 = (255,255,0)
EOY1 = (255,192,0)
EOY2 = (255,255,0)
ELT = (255,192,0)
DHL1 = (190,130,70)
DHL2 = (80,50,30)
DHL3 = (0,0,0)
THR1=(200,140,90)
# The matrix of each atty
ORC_HELMET=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,OH2,OH2,OH2,OH2,OH2,OH2,OH2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,OH2,OH2,OH2,OH2,OH2,OH2,OH2,OH2,OH2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH2,OH2,OH2,OH2,OH2,OH2,OH2,OH2,OH2,OH2,OH2,0,0,0,0,0,0],
[0,0,0,0,0,0,OH2,OH2,OH2,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH2,OH2,OH2,0,0,0,0,0],
[0,0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,OH1,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,0,0,OH1,OH1,OH1,0,0,OH1,OH1,OH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,0,0,OH1,OH1,OH1,0,0,OH1,OH1,OH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,OH1,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,OH1,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,OH1,0,OH1,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,0,0,0,0,0,0,OH1,OH1,OH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,0,0,0,0,0,0,OH1,OH1,OH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,OH1,OH1,0,0,0,0,0,0,OH1,OH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,OH1,OH1,0,0,0,0,OH1,OH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CIGARETTE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG1,CG1,CG1,CG1,CG1,CG1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,CG1,CG3,CG2,CG2,CG2,CG2,CG2,CG1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,CG1,CG1,CG1,CG1,CG1,CG1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
PIPE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,PI4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,PI4,PI4,PI4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,PI4,PI4,PI4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,PI4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,PI4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,PI1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,PI1,PI1,PI1,PI1,PI1,0,0,PI1,PI2,PI1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,PI1,PI2,PI2,PI2,PI1,0,PI1,PI2,PI1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,PI1,PI3,PI2,PI3,PI1,PI1,PI2,PI1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,PI1,PI3,PI2,PI2,PI2,PI1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,PI1,PI1,PI1,PI1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MOLE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SMILE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SM1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FROWN=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,FW1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
VAPE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,VP1,VP1,VP1,VP1,VP1,VP1,VP1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,VP1,VP2,VP3,VP3,VP3,VP3,VP3,VP1,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,VP1,VP1,VP1,VP1,VP1,VP1,VP1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NOSE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,CN1,CN1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,CN1,CN1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NOSE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,CN1,CN1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,CN1,CN1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NOSE_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,CN1,CN1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,CN1,CN1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MASK_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MK1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MK1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,MK1,0,0,0,0,0,0,MK1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK1,MK1,MK2,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK1,MK1,MK1,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK2,MK1,MK1,MK1,MK1,MK2,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,MK1,MK1,MK1,MK1,MK1,MK1,MK1,MK1,MK1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK1,MK1,MK1,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,MK1,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MASK_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MK1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,MK1,0,0,0,0,0,0,MK1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK1,MK1,MK2,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK1,MK1,MK1,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK2,MK1,MK1,MK1,MK1,MK2,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK1,MK1,MK1,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,MK1,MK1,MK1,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,MK1,MK1,MK1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EARS_0=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,ER2,ER1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EARS_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,ER2,ER1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EARS_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,ER2,ER1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EARS_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,ER2,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EARS_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,ER2,ER1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,ER1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
GoldChain_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,GC1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,GC1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,GC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCR1 = (20,20,20)
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
GoldChain_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,GC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,GC1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,GC1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RING_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,RG1,0,RG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,RG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BROCHE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,BO1,BO1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,0,0,0,0,0,0,0]
]
BROCHE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,BO1,BO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,0,0,0,0,0,0,0,0]
]
BROCHE_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,BO1,BO1,BO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,BO1,0,0,0,0,0,0,0,0,0]
]
SilverChain_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SV1,SV1,SV1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SilverChain_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SV1,SV1,SV1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
GoldChain_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,GC1,GC1,GC1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RING_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,RG1,0,RG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,RG1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SilverChain_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SV1,SV1,SV1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
GoldChain_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,GC1,GC1,GC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CHOKER=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,KR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,KR1,KR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,KR1,KR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RING_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,RG1,0,RG1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,RG1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0],
[0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BG1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0],
[0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BEANI_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BN1,BN1,BN1,BN1,BN1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BN5,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,BN2,BN2,BN3,BN3,BN3,BN1,BN1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,BN2,BN2,BN2,BN3,BN3,BN3,BN1,BN1,BN1,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BN2,BN2,BN2,BN3,BN3,BN3,BN3,BN3,BN1,BN1,BN1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BN2,BN2,BN3,BN3,BN3,BN3,BN3,BN3,BN3,BN1,BN1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,BG1,0,0,BN4,BN4,BN4,BN4,BN4,BN4,BN4,0,0,0,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BEANI_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,BG1,BN1,BN1,BN1,BN1,BN1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BN5,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BG1,BG1,BN2,BN2,BN3,BN3,BN3,BN1,BN1,BG1,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BG1,BN2,BN2,BN2,BN3,BN3,BN3,BN1,BN1,BN1,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BN2,BN2,BN2,BN3,BN3,BN3,BN3,BN3,BN1,BN1,BN1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BN2,BN2,BN3,BN3,BN3,BN3,BN3,BN3,BN3,BN1,BN1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BN4,BN4,BN4,BN4,BN4,BN4,BN4,0,0,BG1,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TOPHAT_1=[
[0,0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,0,0,0,0,0,0],
[0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0],
[0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0],
[0,0,0,0,0,BG1,0,0,0,0,0,0,0,0,0,0,0,0,BG1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TOPHAT_7=[
[0,0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0],
[0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
KNITTED_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,KC1,KC1,KC1,KC1,KC1,KC1,KC1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
KNITTED_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,BG1,KC1,KC1,KC1,KC1,KC1,KC1,KC1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HEADBAND_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,0,0,0,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,0,0,0,0,0,0],
[0,0,0,CH1,0,0,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,0,0,CH1,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,0,0,0,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,0,0,0,0,0],
[0,0,0,CH1,0,BG1,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,0,0,CH1,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FORCAP_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,FC1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,0,0,0,0,0,0],
[0,0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC2,FC2,FC2,FC1,BG1,0,0,0,0,0],
[0,0,0,0,0,FC1,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC1,FC2,FC2,FC1,0,0,0,0,0,0],
[0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FORCAP_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC2,FC2,FC2,FC1,BG1,0,0,0,0,0],
[0,0,0,0,0,FC1,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC1,FC2,FC2,FC1,0,0,0,0,0,0],
[0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BANDANA_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,0,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA2,BA2,BA2,BA2,BA1,BA3,BA2,BA1,BA2,BA1,0,0],
[0,0,0,0,0,0,0,0,0,BA2,BA2,BA2,0,0,0,0,0,0,BA3,BA2,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA3,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BANDANA_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,0,0,BA2,BA1,BA1,BA1,BA2,BA2,BA2,BA2,BA1,BA3,BA2,BA1,BA2,BA1,0,0],
[0,0,0,0,0,0,0,0,0,BA2,BA2,BA2,0,0,0,0,0,0,BA3,BA2,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA3,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FEDORA_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,BG1,0,0,0,0,0],
[0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0],
[0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FEDORA_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,BG1,0,0,0,0,0],
[0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0],
[0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
POLICE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC2,PC2,PC2,PC1,PC1,PC1,PC1,0,0,0,0,0,0],
[0,0,0,0,0,0,PC1,PC2,PC2,PC2,PC2,PC2,PC4,PC2,PC2,PC2,PC2,PC2,PC1,0,0,0,0,0],
[0,0,0,0,0,0,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,0,0,0,0,0],
[0,0,0,0,0,0,BG1,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,0,0,0,0,0,0],
[0,0,0,0,0,0,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,PC1,PC1,0,0,0,0,0,0],
[0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
POLICE_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC2,PC2,PC2,PC1,PC1,PC1,PC1,0,0,0,0,0,0],
[0,0,0,0,0,0,PC1,PC2,PC2,PC2,PC2,PC2,PC4,PC2,PC2,PC2,PC2,PC2,PC1,0,0,0,0,0],
[0,0,0,0,0,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,0,0,0,0,0],
[0,0,0,0,0,0,0,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,0,0,0,0,0,0],
[0,0,0,0,0,0,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,PC1,PC1,0,0,0,0,0,0],
[0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,BG1,0,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BEANI_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BN1,BN1,BN1,BN1,BN1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BN5,BG1,BG1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,BN2,BN2,BN3,BN3,BN3,BN1,BN1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BN2,BN2,BN2,BN3,BN3,BN3,BN1,BN1,BN1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BN2,BN2,BN2,BN3,BN3,BN3,BN3,BN3,BN1,BN1,BN1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BN2,BN2,BN3,BN3,BN3,BN3,BN3,BN3,BN3,BN1,BN1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,BG1,0,0,BN4,BN4,BN4,BN4,BN4,BN4,BN4,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BEANI_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BN1,BN1,BN1,BN1,BN1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,BN5,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BN2,BN2,BN3,BN3,BN1,BN1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BN2,BN2,BN2,BN3,BN3,BN1,BN1,BN1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BN2,BN2,BN2,BN3,BN3,BN3,BN3,BN1,BN1,BN1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BN2,BN2,BN3,BN3,BN3,BN3,BN3,BN3,BN1,BN1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BN4,BN4,BN4,BN4,BN4,BN4,BN4,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TOPHAT_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,BG1,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,BG1,BG1,BG1,0,0,0],
[0,0,0,0,0,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,0,0,0,0],
[0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TOPHAT_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0,0],
[0,0,0,0,0,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
KNITTED_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,KC1,KC1,KC1,KC1,KC1,KC1,KC1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0,0,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
KNITTED_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,KC1,KC1,KC1,KC1,KC1,KC1,KC1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
KNITTED_5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,KC1,KC1,KC1,KC1,KC1,KC1,KC1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC1,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HEADBAND_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,BG1,BG1,BG1,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,CH1,0,BG1,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,BG1,0,CH1,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,0,BG1,0,0,0,0,0,0,0,0,0,0,0,0,0,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,0,0,0,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,0,0,0,0,0,0],
[0,0,0,CH1,0,0,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,0,0,CH1,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,0,0,CH1,CH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,0,0,0,0,0,0,0],
[0,0,0,CH1,0,0,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,0,0,CH1,0,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,0,0,0,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,0,0,0,0,0,0],
[0,0,0,CH1,0,0,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,0,0,CH1,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FORCAP_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,0,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC2,FC2,FC2,FC1,BG1,BG1,0,0,0,0],
[0,0,0,0,0,FC1,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC1,FC2,FC2,FC1,0,BG1,0,0,0,0],
[0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FORCAP_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,FC1,FC2,FC2,FC2,FC2,FC2,FC3,FC1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC2,FC2,FC2,FC1,0,0,0,0,0,0,0],
[0,0,0,0,0,FC1,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC1,FC2,FC2,FC1,0,0,0,0,0,0,0],
[0,0,0,0,0,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BANDANA_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BG1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,0,BG1,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,BG1,0,BA2,BA1,BA1,BA1,BA2,BA2,BA2,BA2,BA1,BA3,BA2,BA1,BA2,BA1,0,0],
[0,0,0,0,0,BG1,0,0,0,BA2,BA2,BA2,0,0,0,0,0,0,BA3,BA2,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA3,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FEDORA_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,0,BG1,BG1,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,BG1,BG1,0,0,0,0],
[0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0],
[0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BG1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FEDORA_5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,BG1,0,0,0,0,0],
[0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0],
[0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
POLICE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BG1,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC2,PC2,PC2,PC1,PC1,PC1,PC1,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC4,PC2,PC2,PC2,PC2,PC2,PC1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,BG1,BG1,0,0,0],
[0,0,0,0,0,BG1,BG1,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,BG1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,PC1,PC1,0,BG1,0,0,0,0],
[0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
POLICE_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BG1,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC2,PC2,PC2,PC1,PC1,PC1,PC1,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC4,PC2,PC2,PC2,PC2,PC2,PC1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,BG1,BG1,0,0,0],
[0,0,0,0,0,BG1,BG1,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,BG1,BG1,0,0,0,0],
[0,0,0,0,0,0,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,PC1,PC1,0,BG1,0,0,0,0],
[0,0,0,0,0,0,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0],
[0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_8=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,0,BG1,0,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,0,BG1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,BG1,0,0,0,0],
[0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,BG1,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,BG1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TOPHAT_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,TH2,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,TH1,BG1,BG1,BG1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HEADBAND_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,0,0,0,CH1,CH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0,0,0,0,0],
[0,0,0,CH1,0,0,0,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,0,0,0,CH1,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
COWBOY_8=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,CH1,CH1,BG1,0,BG1,CH1,CH1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,0,0,0,0],
[0,0,0,CH1,0,BG1,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,BG1,BG1,CH1,0,0],
[0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0],
[0,0,0,0,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FORCAP_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC2,FC2,FC2,FC1,0,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,FC1,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC1,FC2,FC2,FC1,0,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,0,BG1,BG1,BG1,BG1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FORCAP_8=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,FC1,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC2,FC3,FC1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC2,FC2,FC2,FC1,0,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,FC1,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC3,FC1,FC2,FC2,FC1,0,0,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,FC1,0,0,0,BG1,BG1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BANDANA_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BA2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA2,BA2,BA2,BA2,BA1,BA3,BA2,BA1,BA2,BA1,0,0],
[0,0,0,0,0,0,0,0,0,BA2,BA2,BA2,0,0,0,0,0,0,BA3,BA2,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA3,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FEDORA_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,FD2,0,0,0,0,0,0],
[0,0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0,0],
[0,0,0,0,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,FD1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
POLICE_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,PC1,PC1,PC1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,PC1,PC1,PC1,PC1,PC2,PC2,PC2,PC1,PC1,PC1,PC1,BG1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC4,PC2,PC2,PC2,PC2,PC2,PC1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,BG1,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,PC3,PC1,0,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,PC1,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC2,PC1,PC1,PC1,0,BG1,BG1,BG1,BG1,0],
[0,BG1,BG1,BG1,BG1,BG1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,PC1,0,0,0,0,0,BG1,BG1,BG1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TD_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,TD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
VR_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ClassicShades_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH2,CSH2,CSH1,0,CSH1,CSH2,CSH2,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH3,CSH3,CSH1,0,CSH1,CSH3,CSH3,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CSH1,CSH1,0,0,0,CSH1,CSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SmallShades_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,SSH1,SSH1,0,0,0,SSH1,SSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,SSH1,SSH1,0,0,0,SSH1,SSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyePatch_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NerdGlasses_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,ND2,ND2,ND1,ND1,ND2,ND2,ND2,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,0,ND2,ND1,ND1,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BigShades_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH2,BSH2,BSH2,BSH1,BSH1,BSH1,BSH2,BSH2,BSH2,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BSH1,BSH1,BSH1,0,0,0,BSH1,BSH1,BSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyeMask_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,0,EM1,EM1,EM1,0,0,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,EM2,EM1,EM1,EM1,0,EM2,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HornedRimGlasses_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG4,HRG5,HRG3,0,0,HRG4,HRG5,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG3,HRG3,HRG3,0,0,HRG3,HRG3,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HornedRimGlasses_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG4,HRG5,HRG3,0,0,HRG4,HRG5,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG3,HRG3,0,0,0,HRG3,HRG3,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RegularShades_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,0,0,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,RSH1,RSH1,0,0,0,0,RSH1,RSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TD_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,TD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
VR_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ClassicShades_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH2,CSH2,CSH1,0,CSH1,CSH2,CSH2,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH3,CSH3,CSH1,0,CSH1,CSH3,CSH3,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CSH1,CSH1,0,0,0,CSH1,CSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SmallShades_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,SSH1,SSH1,0,0,0,SSH1,SSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,SSH1,SSH1,0,0,0,SSH1,SSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyePatch_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NerdGlasses_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,ND2,ND2,ND1,ND1,ND2,ND2,ND2,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,0,ND2,ND1,ND1,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BigShades_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH2,BSH2,BSH2,BSH1,BSH1,BSH1,BSH2,BSH2,BSH2,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BSH1,BSH1,BSH1,0,0,0,BSH1,BSH1,BSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyeMask_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,0,EM1,EM1,EM1,0,0,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,EM2,EM1,EM1,EM1,0,EM2,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HornedRimGlasses_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG4,HRG1,HRG3,0,0,HRG4,HRG1,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG3,HRG3,HRG3,0,0,HRG3,HRG3,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RegularShades_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,0,0,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,RSH1,RSH1,0,0,0,0,RSH1,RSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TD_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,TD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
VR_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ClassicShades_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH2,CSH2,CSH1,0,CSH1,CSH2,CSH2,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH3,CSH3,CSH1,0,CSH1,CSH3,CSH3,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CSH1,CSH1,0,0,0,CSH1,CSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SmallShades_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,SSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,SSH1,SSH1,0,0,0,SSH1,SSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,SSH1,SSH1,0,0,0,SSH1,SSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyePatch_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NerdGlasses_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,ND2,ND2,ND1,ND1,ND2,ND2,ND2,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,0,ND2,ND1,ND1,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BigShades_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH2,BSH2,BSH2,BSH1,BSH1,BSH1,BSH2,BSH2,BSH2,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BSH1,BSH1,BSH1,0,0,0,BSH1,BSH1,BSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyeMask_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,0,EM1,EM1,EM1,0,0,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,EM2,EM1,EM1,EM1,0,EM2,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HornedRimGlasses_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG4,HRG1,HRG3,0,0,HRG4,HRG1,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG3,HRG3,HRG3,0,0,HRG3,HRG3,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RegularShades_6=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0],
[0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,0,0,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,RSH1,RSH1,0,0,0,0,RSH1,RSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TIARA_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,TI1,TI1,0,TI1,TI1,TI1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,TI1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,TI1,TI2,TI1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,TI1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
KNITTED_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,KC1,KC1,KC1,KC1,KC1,KC1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HEADBAND_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HEADBAND_5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MILICAP_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH1,MH1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH2,MH1,MH1,MH1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH2,MH1,MH1,MH1,MH1,MH1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,0,0,0,0,0,0,0,0,0,0,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BANDANA_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BA2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA2,BA2,BA2,BA2,BA1,BA3,BA2,BA1,BA2,BA1,0,0],
[0,0,0,0,0,0,0,0,0,BA2,BA2,BA2,0,0,0,0,0,0,BA3,BA2,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA3,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
PILOT_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,PH1,PH1,PH1,PH1,PH1,PH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,PH3,PH3,PH3,PH3,PH3,PH3,PH3,PH3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH3,PH2,PH2,PH2,PH3,PH3,PH2,PH2,PH2,PH3,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH3,PH2,PH2,PH3,PH3,PH3,PH3,PH2,PH2,PH3,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH3,PH3,PH3,PH3,PH1,PH1,PH3,PH3,PH3,PH3,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,PH1,PH1,PH1,PH1,PH1,PH1,PH1,PH1,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,PH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,PH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,PH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyePatch_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
GOGOLES_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG1,WG2,WG2,WG1,WG1,WG1,WG2,WG2,WG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG2,WG3,WG3,WG2,WG1,WG2,WG3,WG3,WG2,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG2,WG3,WG3,WG2,WG1,WG2,WG3,WG3,WG2,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG1,WG2,WG2,WG1,0,WG1,WG2,WG2,WG1,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG1,0,0,0,0,0,0,0,0,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
VR_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RegularShades_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,0,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RSH1,RSH1,0,0,0,RSH1,RSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TD_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,TD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NerdGlasses_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,ND2,ND2,ND1,ND1,ND2,ND2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,0,ND2,ND1,ND1,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,ND2,ND2,0,0,0,ND2,ND2,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ClassicShades_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH2,CSH2,CSH1,0,CSH1,CSH2,CSH2,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH3,CSH3,CSH1,0,CSH1,CSH3,CSH3,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CSH1,CSH1,0,0,0,CSH1,CSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HornedRimGlasses_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG4,HRG1,HRG3,0,0,HRG4,HRG1,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG3,HRG3,HRG3,0,0,HRG3,HRG3,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BigShades_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH2,BSH2,BSH2,BSH1,BSH1,BSH1,BSH2,BSH2,BSH2,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BSH1,BSH1,BSH1,0,0,0,BSH1,BSH1,BSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyeMask_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,0,EM1,EM1,EM1,0,0,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,EM2,EM1,EM1,EM1,0,EM2,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TIARA_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,TI1,TI1,0,TI1,TI1,TI1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,TI1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,TI1,TI2,TI1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,TI1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TIARA_3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,TI1,TI1,TI1,0,TI1,TI1,TI1,TI1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,TI1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,TI1,TI2,TI1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,TI1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
KNITTED_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,KC1,KC1,KC1,KC1,KC1,KC1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,KC1,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC2,KC1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC3,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,BG1,KC1,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC2,KC3,KC1,BG1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HEADBAND_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HEADBAND_7=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB1,HB1,HB1,HB1,HB1,HB1,HB1,HB1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HB2,HB2,HB2,HB2,HB2,HB2,HB2,HB2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MILICAP_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH1,MH1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH2,MH1,MH1,MH1,BG1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH2,MH1,MH1,MH1,MH1,MH1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,MH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BANDANA_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA2,BA2,BA2,BA2,BA1,BA3,BA2,BA1,BA2,BA1,0,0],
[0,0,0,0,0,0,0,0,0,BA2,BA2,BA2,0,0,0,0,0,0,BA3,BA2,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA3,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BANDANA_5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BG1,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BA2,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,BA2,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA1,BA2,BG1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BA2,BA1,BA1,BA1,BA2,BA2,BA2,BA2,BA1,BA3,BA2,BA1,BA2,BA1,0,0],
[0,0,0,0,0,0,0,0,0,BA2,BA2,BA2,0,0,0,0,0,0,BA3,BA2,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA3,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BA1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
PILOT_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,PH1,PH1,PH1,PH1,PH1,PH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,PH3,PH3,PH3,PH3,PH3,PH3,PH3,PH3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH3,PH2,PH2,PH2,PH3,PH3,PH2,PH2,PH2,PH3,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH3,PH2,PH2,PH3,PH3,PH3,PH3,PH2,PH2,PH3,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH3,PH3,PH3,PH3,PH1,PH1,PH3,PH3,PH3,PH3,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,PH1,PH1,PH1,PH1,PH1,PH1,PH1,PH1,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,PH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,PH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,PH1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0,0,PH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
CAP_5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CA1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BG1,CA1,CA2,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,BG1,0,0,0,0,0,0],
[0,0,0,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,CA1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyePatch_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,EP1,EP1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,EP1,EP1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
GOGOLES_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG1,WG2,WG2,WG1,WG1,WG1,WG2,WG2,WG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG2,WG3,WG3,WG2,WG1,WG2,WG3,WG3,WG2,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG2,WG3,WG3,WG2,WG1,WG2,WG3,WG3,WG2,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG1,WG2,WG2,WG1,0,WG1,WG2,WG2,WG1,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,WG1,0,0,0,0,0,0,0,0,WG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
VR_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR2,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR2,VR3,VR1,0,0,0,0,0,0],
[0,0,0,0,0,0,VR1,VR3,VR2,VR2,VR2,VR2,VR2,VR2,VR2,VR3,VR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,VR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
RegularShades_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,RSH1,RSH1,RSH1,RSH1,0,RSH1,RSH1,RSH1,RSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RSH1,RSH1,0,0,0,RSH1,RSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
TD_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,TD1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD2,TD2,TD2,TD1,TD3,TD3,TD3,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,TD1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NerdGlasses_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND2,ND2,ND2,0,ND2,ND2,ND2,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,ND2,ND2,ND1,ND1,ND2,ND2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ND2,ND1,ND1,ND2,0,ND2,ND1,ND1,ND2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,ND2,ND2,0,0,0,ND2,ND2,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ClassicShades_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,CSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH2,CSH2,CSH1,0,CSH1,CSH2,CSH2,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,CSH1,CSH3,CSH3,CSH1,0,CSH1,CSH3,CSH3,CSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,CSH1,CSH1,0,0,0,CSH1,CSH1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HornedRimGlasses_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,HRG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,HRG1,HRG2,HRG2,HRG3,HRG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG4,HRG1,HRG3,0,0,HRG4,HRG1,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HRG3,HRG3,HRG3,0,0,HRG3,HRG3,HRG3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BigShades_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,BSH1,BSH1,BSH1,BSH1,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH2,BSH2,BSH2,BSH1,BSH1,BSH1,BSH2,BSH2,BSH2,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,BSH1,BSH3,BSH3,BSH3,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,BSH1,BSH4,BSH4,BSH4,BSH1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BSH1,BSH1,BSH1,0,0,0,BSH1,BSH1,BSH1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
EyeMask_4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,0,EM1,EM1,EM1,0,0,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,0,EM2,EM1,EM1,EM1,0,EM2,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,EM1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
BigBeard=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE1,0,0,0,0,0,0],
[0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE1,BE1,BE1,BE2,BE2,BE2,BE2,BE2,BE1,0,0,0,0,0],
[0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE1,0,0,0,0,0],
[0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE1,0,0,0,0,0],
[0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE1,BE1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE2,BE2,BE2,BE1,BE1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0,0]
]
NormalBeardBlack=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BE1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,0,0,0,0,0,0,0,0,BE1,BE1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,0,0,0,0,0,0,BE1,BE1,BE1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,BE1,BE3,BE3,BE3,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
FrontBeard=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE2,0,0,0,BE2,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE1,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Handlebars=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE2,BE4,BE4,BE4,BE2,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE4,0,0,0,BE4,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE4,0,0,0,BE4,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Muttonchops=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,0,BE2,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,BE2,BE2,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,BE2,BE2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Mustache=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
NormalBeard=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,0,BE2,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,BE2,0,0,0,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,BE2,BE2,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE2,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Chinstrap=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,0,BE2,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,0,BE2,BE2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,BE2,BE2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE2,0,0,0,0,0,BE2,BE2,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE1,BE2,BE2,BE2,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE1,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Goat=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE1,0,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE1,BE2,BE2,BE2,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BE1,BE2,BE1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BE1,0,0,0,0,0,0,0,0,0,0,0,0]
]
FrontBeardDark=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE7,BE7,BE7,BE7,BE7,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE7,0,0,0,BE7,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE7,BE7,BE7,BE7,BE7,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BE7,BE7,BE7,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE1,BE7,BE7,BE7,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
LuxuriousBeard=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,0,0,0,0,0,0,0,0,BE1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,0,0,0,0,0,0,BE1,BE1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,BE1,BE3,BE3,BE3,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE1,BE1,BE1,BE1,BE1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Elfe_Tiara =[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ETI,0,0,0,0,0,0,0,0,ETI,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,ETI,0,0,0,0,0,0,ETI,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,ETI,ETI,0,ETI,ETI,ETI,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,ETI,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Hob_Hat =[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HOB2,HOB3,HOB4,HOB2,HOB3,HOB4,HOB2,HOB3,HOB4,HOB2,0,0,0,0,0,0,0],
[0,0,0,0,0,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,0,0,0,0,0],
[0,0,0,0,0,0,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,0,0,0,0,0,0],
[0,0,0,0,0,BG1,0,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,BG1,0,0,0,0,0,0],
[0,0,0,0,BG1,0,BG1,HOB5,HOB5,HOB5,HOB5,HOB5,HOB5,HOB5,HOB5,HOB5,HOB5,0,BG1,0,0,0,0,0],
[0,0,0,BG1,BG1,BG1,HOB2,HOB2,HOB2,HOB2,HOB2,HOB2,HOB2,HOB2,HOB2,HOB2,HOB2,HOB2,BG1,BG1,0,0,0,0],
[0,0,0,0,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,0,0,0,0],
[0,0,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,HOB1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Gondor_Crown =[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,GCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,GCR1,GCR1,GCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,GCR1,GCR2,GCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,GCR1,BG1,BG1,BG1,BG1,GCR1,GCR2,GCR3,GCR2,GCR1,BG1,BG1,BG1,BG1,BG1,GCR1,0,0,0,0],
[0,0,0,0,0,GCR1,GCR1,GCR1,GCR1,GCR2,GCR3,GCR2,GCR3,GCR2,GCR1,GCR1,GCR1,GCR1,GCR1,0,0,0,0,0],
[0,0,0,0,0,0,GCR1,GCR1,GCR2,GCR3,GCR2,GCR4,GCR2,GCR3,GCR2,GCR1,GCR1,GCR1,0,0,0,0,0,0],
[0,0,0,0,0,0,GCR1,GCR2,GCR3,GCR2,GCR4,GCR5,GCR4,GCR2,GCR3,GCR2,GCR1,GCR1,0,0,0,0,0,0],
[0,0,0,0,0,0,GCR1,GCR1,GCR2,GCR4,GCR5,GCR5,GCR5,GCR4,GCR2,GCR1,GCR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,GCR5,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Gobelin_Crown =[
[0,0,0,0,0,0,0,0,0,0,0,0,KGC,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,KGC,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,KGC,0,0,0,KGC,0,0,0,KGC,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,KGC,0,0,0,KGC,0,0,0,KGC,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,KGC,0,KGC,0,KGC,0,KGC,0,KGC,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,KGC,0,KGC,0,KGC,0,KGC,0,KGC,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,KGC,KGC,KGC,KGC,KGC,KGC,KGC,KGC,KGC,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,KGC,KGC,KGC,KGC,KGC,KGC,KGC,KGC,KGC,KGC,KGC,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Flower =[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,FL3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,FL4,FL2,FL4,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,FL3,FL2,FL1,FL2,FL3,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,FL4,FL2,FL4,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,FL3,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Wo_Crown =[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,EOY2,EOY1,EOY2,EOY1,EOY2,EOY1,EOY2,EOY1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,EOY1,0,0,0,0,0,0,0,0,EOY2,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,EOY1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Elf_Crown =[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,ELT,ELT,ELT,0,0,0,ELT,ELT,ELT,ELT,ELT,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,ELT,0,ELT,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,ELT,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Helmet =[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,BG1,BG1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,BG1,BG1,BG1,BG1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BG1,DHL3,DHL1,DHL3,DHL3,DHL1,DHL3,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,BG1,BG1,BG1,BG1,DHL3,DHL2,DHL1,DHL2,DHL2,DHL2,DHL1,DHL3,BG1,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,BG1,BG1,BG1,DHL3,DHL2,DHL2,DHL1,DHL2,DHL2,DHL2,DHL1,DHL2,DHL3,BG1,BG1,BG1,BG1,0,0,0],
[0,0,0,0,0,BG1,DHL3,DHL2,DHL2,DHL1,DHL2,DHL2,DHL2,DHL2,DHL2,DHL1,DHL2,DHL3,BG1,BG1,0,0,0,0],
[0,0,0,0,0,BG1,DHL3,DHL2,DHL2,DHL1,DHL2,DHL2,DHL2,DHL2,DHL2,DHL1,DHL2,DHL3,0,BG1,0,0,0,0],
[0,0,0,0,0,BG1,DHL3,DHL1,DHL1,DHL1,DHL1,DHL1,DHL1,DHL1,DHL1,DHL1,DHL1,DHL3,0,0,0,0,0,0],
[0,0,0,0,0,0,DHL3,DHL1,0,DHL1,0,0,0,0,0,DHL1,DHL1,DHL3,0,0,0,0,0,0],
[0,0,0,0,0,0,DHL3,DHL1,0,0,0,0,0,0,0,DHL1,DHL1,DHL3,0,0,0,0,0,0],
[0,0,0,0,0,0,DHL3,DHL1,0,0,0,0,0,0,0,DHL1,DHL1,DHL3,0,0,0,0,0,0],
[0,0,0,0,0,0,DHL3,DHL1,0,0,0,0,0,0,DHL1,DHL1,DHL3,DHL3,0,0,0,0,0,0],
[0,0,0,0,0,0,0,DHL1,DHL1,0,0,0,0,0,DHL1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
Elfic_Krown=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,THR1,0,0,0,0,0,0,0,0,0,THR1,0,0,0,0,0,0],
[0,0,0,0,0,THR1,0,0,THR1,0,0,0,0,0,0,0,THR1,0,0,THR1,0,0,0,0],
[0,0,0,0,0,0,THR1,0,0,0,0,0,0,0,0,0,0,0,THR1,0,0,0,0,0],
[0,0,0,0,THR1,0,0,THR1,0,0,0,0,0,0,0,0,0,THR1,0,0,THR1,0,0,0],
[0,0,0,0,0,THR1,0,0,0,0,0,0,0,0,0,0,0,0,0,THR1,0,0,0,0],
[0,0,0,0,0,THR1,0,0,0,0,0,0,0,0,0,0,0,0,0,THR1,0,0,0,0],
[0,0,0,THR1,0,0,THR1,0,0,0,0,0,0,0,0,0,0,0,THR1,0,0,THR1,0,0],
[0,0,0,0,THR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,THR1,0,0,0],
[0,0,0,0,0,THR1,0,0,0,0,0,0,0,0,0,0,0,0,0,THR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
#Initiate the variables
# tells how many times to iterate through the following mechanism
# which equals the number of MidPunks
# for x in range(0-200)
# would generate 201 Midpunks numbered 0-200
list1 = range(11984)
filterlist1 = []
for x in list1:
a = 13080698
seed(x+a)
titi=0
titin=0
titine=0
toto=0
tata=0
tutu=0
tyty=0
tete=0
toutou=0
toctoc=0
tactac=0
tuctuc=0
tonton=0
tantan=0
neyo=0
neye=0
neya=0
neyh=0
neyu=0
neyw=0
b = randint(0,1000000)
if b > 950000:
race_ep = 'Halflings'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 875000:
HR1 = HR0
hair_color_ep ='Blond'
elif e > 750000:
HR1 = nr
hair_color_ep='Black'
elif e > 625000:
HR1 = HR2
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
hair_color_ep ='Black Rose'
else:
HR1 = HR7
hair_color_ep ='Brown'
HALFIN_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,HR1,0,0,HR1,HR1,HR1,HR1,0,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,HR1,HR1,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,HR1,HR1,0,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,HR1,0,HR1,0,0,HR1,HR1,HR1,HR1,0,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,HR1,HR1,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,HR1,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,0,HR1,0,0,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,HR1,0,0,0,HR1,HR1,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = HALFIN_HR1
haircut_ep ='Wild Hair'
elif f > 600000:
hair = HALFIN_HR2
haircut_ep ='Perm Hair'
elif f > 400000:
hair = HALFIN_HR3
haircut_ep ='Bedhead'
elif f > 200000:
hair = HALFIN_HR4
haircut_ep ='Hockey Hair'
else:
hair = HALFIN_HR5
haircut_ep ='Bald'
seed(f)
g=randint(0,1000000)
if g > 970000:
hair_prop = POLICE_6
hair_prop_ep = 'Police'
elif g > 950000:
hair_prop = TOPHAT_6
hair_prop_ep = 'Top Hat'
elif e > 900000:
hair_prop = HEADBAND_6
hair_prop_ep = 'Headband'
elif e > 850000:
hair_prop = FORCAP_8
hair_prop_ep = 'Cap Forward'
elif e > 830000:
hair_prop = COWBOY_8
hair_prop_ep = 'Cowboy Hat'
elif e > 790000:
hair_prop = CAP_8
hair_prop_ep = 'Cap'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif h > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif h > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
elif h > 780000:
neck = BROCHE_1
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_1
facial_hair = none
mouth_prop_ep = 'Medical Mask'
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_6
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_6
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_6
eyes_prop_ep ='Classic Shades'
elif j >830000:
eyes = SmallShades_6
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_6
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = NerdGlasses_6
eyes_prop_ep ='Nerd Glasses'
elif j > 680000:
eyes = BigShades_6
eyes_prop_ep ='Big Shades'
elif j > 650000:
eyes = EyeMask_6
eyes_prop_ep ='Eye Mask'
elif j > 600000:
eyes = HornedRimGlasses_6
eyes_prop_ep ='Horned Rim Glasses'
elif j > 550000:
eyes = RegularShades_6
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
HALFIN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = HALFIN
elif b > 900000:
race_ep = 'Halflings'
type_ep = 'Female'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
LI1 = (95,29,13)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
LI1 = (74,18,8)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_3
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
red = (255,0,0)
if e > 875000:
HR1 = HR0
HR2 = red
hair_color_ep ='Blonde'
elif e > 750000:
HR1 = nr
HR2 = red
hair_color_ep ='Black'
elif e > 625000:
HR1 = HR2
HR2 = red
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
HR2 = red
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
HR2 = red
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
HR2 = red
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
HR2 = red
hair_color_ep ='Black Rose'
else:
HR1 = HR7
HR2 = red
hair_color_ep ='Brown'
HALFINE_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,HR1,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,0,0,HR1,HR1,0,HR1,HR1,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,0,HR1,0,HR1,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,HR1,HR1,HR1,HR1,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,0,HR1,0,HR1,0,HR1,HR1,0,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0],
[0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0],
[0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0],
[0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,HR1,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0],
[0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0],
[0,HR1,HR1,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,HR1,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,HR1,0,0],
[0,0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,HR1,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,HR1,HR1,HR1,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MOLE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = HALFINE_HR1
haircut_ep ='Perm Hair'
elif f > 600000:
hair = HALFINE_HR2
haircut_ep ='Wild Hair'
elif f > 400000:
hair = HALFINE_HR3
haircut_ep ='Wedge Hair'
elif f > 200000:
hair = HALFINE_HR4
haircut_ep ='Feathered Hair'
else:
hair = HALFINE_HR5
haircut_ep ='Ponytail'
toto = 99
seed(f)
g=randint(0,1000000)
if g > 990000:
hair_prop = TIARA_3
hair_prop_ep = 'Tiara'
titine = 99
elif g > 940000:
hair_prop = Flower
hair_prop_ep = 'Flower'
elif g > 900000 and toto != 99:
hair_prop = Hob_Hat
hair_prop_ep = 'Shire Hat'
elif g > 860000:
hair_prop = HEADBAND_4
hair_prop_ep = 'Headband'
elif g > 850000:
hair = none
hair_prop = PILOT_2
hair_prop_ep = 'Pilot Helmet'
titine = 99
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
EY1 = (110,152,77)
SC1 = (92,133,57)
eyes_color_ep = 'Green Eye Shadow'
elif h > 800000:
EY1 = (93,121,117)
SC1 = (80,106,101)
eyes_color_ep = 'Blue Eye Shadow'
elif h > 700000:
EY1 = (176,61,133)
SC1 = (164,55,117)
eyes_color_ep = 'Purple Eye Shadow'
elif h > 600000:
EY1 = (214,92,26)
SC1 = (194,79,17)
eyes_color_ep = 'Orange Eye Shadow'
else:
eyes_color_ep = 'None'
neya = 99
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_2
mouth_prop_ep = 'Medical Mask'
tactac=99
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_3
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_3
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_4
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_4
eyes_prop_ep ='Eye Patch'
neyh = 99
elif j > 780000:
eyes = NerdGlasses_4
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_4
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_4
eyes_prop_ep ='Eye Mask'
neyh = 99
elif j > 650000:
eyes = HornedRimGlasses_4
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_4
eyes_prop_ep ='Regular Shades'
elif j > 590000:
eyes = GOGOLES_2
eyes_prop_ep ='Welding Goggles'
hair_prop = none
hair_prop_ep = 'None'
toctoc = 99
else:
eyes=none
eyes_prop_ep ='None'
neyh = 99
if titine == 99 and toctoc !=99:
eyes = none
eyes_prop_ep ='None'
if neya != 99 and neyh !=99:
eyes = none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_2
nose_ep = 'Clown Nose'
tuctuc = 99
else:
nose = none
nose_ep = 'None'
if tactac == 99 and tuctuc == 99:
mouthprop = none
mouth_prop_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_2
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE_2
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 930000:
LI1 = nr
mouth_ep = 'Black Lipstick'
elif m > 860000:
LI1 = (255,0,0)
mouth_ep = 'Hot Lipstick'
elif m > 790000:
LI1 = (208,82,203)
mouth_ep = 'Purple Lipstick'
elif m > 720000:
LI1 = (214,92,26)
mouth_ep = 'Orange Lipstick'
else:
mouth = none
mouth_ep = 'None'
seed(m)
n=randint(0,1000000)
if n > 900000:
neck = GoldChain_3
neck_ep = 'Gold Chain'
elif n > 820000:
neck = SilverChain_3
neck_ep = 'Silver Chain'
elif n > 800000:
neck = RING_3
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
HALFINE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,LI1,LI1,LI1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = HALFINE
elif b > 750000:
race_ep = 'Men'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
BE6 = (40,27,9)
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
BE5 = (163,151,131)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
BE5 = (153,124,89)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
BE5 = (121,97,68)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
BE5 = (79,44,20)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
red = (255,0,0)
if e > 875000:
HR1 = HR0
HR2 = red
hair_color_ep ='Blonde'
elif e > 750000:
HR1 = nr
HR2 = red
hair_color_ep ='Black'
elif e > 625000:
HR1 = HR2
HR2 = red
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
HR2 = red
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
HR2 = red
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
HR2 = red
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
HR2 = red
hair_color_ep ='Black Rose'
else:
HR1 = HR7
HR2 = red
hair_color_ep ='Brown'
MAN_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,HR1,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,HR1,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,HR1,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,HR1,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = MAN_HR1
haircut_ep = 'Grunge Hair'
elif f > 600000:
hair = MAN_HR2
haircut_ep = 'Prince Hair'
elif f > 400000:
hair = MAN_HR3
haircut_ep = 'King Hair'
elif f > 200000:
hair = MAN_HR4
haircut_ep = 'Bald'
else:
hair = MAN_HR5
haircut_ep = 'Straight Hair'
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 930000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 910000:
hair_prop = Gondor_Crown
hair_prop_ep = 'Men Crown'
elif g > 870000:
hair_prop = KNITTED_2
hair_prop_ep = 'Knitted Cap'
elif g > 820000:
hair_prop = HEADBAND_2
hair_prop_ep = 'Headband'
elif g > 790000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 760000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 740000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 710000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
elif g > 700000:
hair_prop = BEANI_2
hair_prop_ep = 'Beanie'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif h > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif h > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
elif h > 780000:
neck = BROCHE_1
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
ShadowBeard=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,0,0,0,0,0,0,BE5,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE6,BE6,BE6,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(h)
i=randint(0,1000000)
if i > 950000:
facial_hair = BigBeard
facial_hair_ep = 'Big Beard'
elif i >900000:
facial_hair = Muttonchops
facial_hair_ep = 'Muttonchops'
elif i > 850000:
facial_hair = Mustache
facial_hair_ep = 'Mustache'
elif i > 890000:
facial_hair = Handlebars
facial_hair_ep = 'Handlebars'
elif i > 750000:
facial_hair = FrontBeardDark
facial_hair_ep = 'Front Beard Dark'
elif i > 700000:
facial_hair = FrontBeard
facial_hair_ep = 'Front Beard'
elif i > 650000:
facial_hair = NormalBeard
facial_hair_ep = 'Normal Beard'
elif i > 600000:
facial_hair = NormalBeardBlack
facial_hair_ep = 'Normal Beard Black'
elif i > 550000:
facial_hair = LuxuriousBeard
facial_hair_ep = 'Luxurious Beard'
elif i > 500000:
facial_hair = Goat
facial_hair_ep = 'Goat'
elif i > 450000:
facial_hair = Chinstrap
facial_hair_ep = 'Chinstrap'
elif i > 400000:
facial_hair = ShadowBeard
facial_hair_ep = 'Shadow Beard'
else:
facial_hair = none
facial_hair_ep = 'None'
seed(i)
j=randint(0,1000000)
if j > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif j > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
facial_hair = none
elif j > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif j > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(j)
k=randint(0,1000000)
if k > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif k > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif k > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif k > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
hair = MAN_HR3
haircut_ep = 'King Hair'
elif k > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif k > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif k > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif k > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif k > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif k > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(k)
l=randint(0,1000000)
if l > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif m > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(m)
n=randint(0,1000000)
if n > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif n > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif n > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
MAN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = MAN
elif b > 600000:
race_ep = 'Men'
type_ep = 'Female'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
LI1 = (95,29,13)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
LI1 = (74,18,8)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_3
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
red = (255,0,0)
if e > 875000:
HR1 = HR0
HR2 = red
hair_color_ep ='Blonde'
elif e > 750000:
HR1 = nr
HR2 = red
hair_color_ep ='Black'
elif e > 625000:
HR1 = HR2
HR2 = red
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
HR2 = red
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
HR2 = red
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
HR2 = red
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
HR2 = red
hair_color_ep ='Black Rose'
else:
HR1 = HR7
HR2 = red
hair_color_ep ='Brown'
WOMAN_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MOLE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = WOMAN_HR1
haircut_ep = 'Curly Hair'
elif f > 600000:
hair = WOMAN_HR2
haircut_ep = 'Right Side Hair'
elif f > 400000:
hair = WOMAN_HR3
haircut_ep = 'Left Side Hair'
elif f > 200000:
hair = WOMAN_HR4
haircut_ep = 'The Bob'
else:
hair = WOMAN_HR5
haircut_ep = 'Straight Hair'
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_4
hair_prop_ep = 'Cap'
elif g > 950000:
hair_prop = TIARA_2
hair_prop_ep = 'Tiara'
titi = 99
elif g > 930000:
hair_prop = MILICAP_2
hair_prop_ep = 'Punk Hat'
elif e > 890000:
hair_prop = KNITTED_4
hair_prop_ep = 'Knitted Cap'
elif g > 850000:
hair_prop = HEADBAND_4
hair_prop_ep = 'Headband'
elif g > 840000:
hair = none
hair_prop = PILOT_2
hair_prop_ep = 'Pilot Helmet'
titi = 99
elif g > 810000:
hair_prop = BANDANA_4
hair_prop_ep = 'Bandana'
elif g > 750000:
hair_prop = Wo_Crown
hair_prop_ep = 'Circlet'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
EY1 = (110,152,77)
SC1 = (92,133,57)
eyes_color_ep = 'Green Eye Shadow'
elif h > 800000:
EY1 = (93,121,117)
SC1 = (80,106,101)
eyes_color_ep = 'Blue Eye Shadow'
elif h > 700000:
EY1 = (176,61,133)
SC1 = (164,55,117)
eyes_color_ep = 'Purple Eye Shadow'
elif h > 600000:
EY1 = (214,92,26)
SC1 = (194,79,17)
eyes_color_ep = 'Orange Eye Shadow'
else:
eyes_color_ep = 'None'
neyu = 99
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_2
mouth_prop_ep = 'Medical Mask'
tactac = 99
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_3
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_3
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_4
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_4
eyes_prop_ep ='Eye Patch'
neyw = 99
elif j > 780000:
eyes = NerdGlasses_4
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_4
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_4
eyes_prop_ep ='Eye Mask'
neyw = 99
elif j > 650000:
eyes = HornedRimGlasses_4
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_4
eyes_prop_ep ='Regular Shades'
elif j > 590000:
eyes = GOGOLES_2
eyes_prop_ep ='Welding Goggles'
hair_prop = none
hair_prop_ep = 'None'
tata = 99
else:
eyes=none
eyes_prop_ep ='None'
neyw = 99
if titi == 99 and tata != 99:
eyes = none
eyes_prop_ep ='None'
if neyu != 99 and neyw !=99:
eyes = none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_2
nose_ep = 'Clown Nose'
tuctuc = 99
else:
nose = none
nose_ep = 'None'
if tactac == 99 and tuctuc == 99:
mouthprop = none
mouth_prop_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_2
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE_2
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 930000:
LI1 = nr
mouth_ep = 'Black Lipstick'
elif m > 860000:
LI1 = (255,0,0)
mouth_ep = 'Hot Lipstick'
elif m > 790000:
LI1 = (208,82,203)
mouth_ep = 'Purple Lipstick'
elif m > 720000:
LI1 = (214,92,26)
mouth_ep = 'Orange Lipstick'
else:
mouth = none
mouth_ep = 'None'
seed(m)
n=randint(0,1000000)
if n > 900000:
neck = GoldChain_3
neck_ep = 'Gold Chain'
elif n > 820000:
neck = SilverChain_3
neck_ep = 'Silver Chain'
elif n > 800000:
neck = RING_3
neck_ep = 'Ring Onchain'
elif n > 790000:
neck = CHOKER
neck_ep = 'Choker'
elif n > 770000:
neck = BROCHE_3
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
WOMAN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,LI1,LI1,LI1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WOMAN
elif b > 535000:
race_ep = 'Elves'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,227,72)
HR2 = (255,255,153)
HR3 = (165,108,0)
HR4 = (61,35,32)
HR5 = (111,0,48)
HR6 = (255,0,0)
if e > 850000:
HR1 = HR0
hair_color_ep ='Blond'
elif e > 700000:
HR1 = HR2
hair_color_ep ='Butter'
elif e > 650000:
HR1 = HR3
hair_color_ep ='Ginger'
elif e > 500000:
HR1 = HR4
hair_color_ep ='Brown'
elif e > 350000:
HR1 = HR5
hair_color_ep ='Black Rose'
elif e > 200000:
HR1 = nr
hair_color_ep='Black'
else:
HR1 = HR6
hair_color_ep ='Red'
ELF_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELF_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,HR1,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELF_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,HR1,HR1,HR1,HR1,BG1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELF_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0]
]
ELF_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = ELF_HR1
haircut_ep = 'Straight Hair'
elif f > 600000:
hair = ELF_HR2
haircut_ep = 'Braids'
elif f > 400000:
hair = ELF_HR3
haircut_ep = 'Left Side Hair'
elif f > 200000:
hair = ELF_HR4
haircut_ep = 'Long Hair'
else:
hair = ELF_HR5
haircut_ep = 'Medium Layers'
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_1
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_1
hair_prop_ep = 'Cowboy Hat'
elif g > 910000:
hair_prop = TOPHAT_1
hair_prop_ep = 'Top Hat'
elif e > 870000:
hair_prop = KNITTED_1
hair_prop_ep = 'Knitted Cap'
elif g > 865000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR1
haircut_ep = 'Straight Hair'
elif g > 850000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR2
haircut_ep = 'Braids'
elif g > 835000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR4
haircut_ep = 'Long Hair'
elif g > 820000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR5
haircut_ep = 'Medium Layers'
elif g > 790000:
hair_prop = FORCAP_1
hair_prop_ep = 'Cap Forward'
elif g > 760000:
hair_prop = BANDANA_1
hair_prop_ep = 'Bandana'
elif g > 750000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR1
haircut_ep = 'Straight Hair'
elif g > 740000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR2
haircut_ep = 'Braids'
elif g > 730000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR4
haircut_ep = 'Long Hair'
elif g > 720000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR5
haircut_ep = 'Medium Layers'
elif g > 700000:
hair_prop = FEDORA_1
hair_prop_ep = 'Fedora'
elif g > 670000:
hair_prop = POLICE_1
hair_prop_ep = 'Police'
elif g > 660000:
hair_prop = BEANI_1
hair_prop_ep = 'Beanie'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif h > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif h > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
elif h > 780000:
neck = BROCHE_1
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif j > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif j > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif l > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(l)
m=randint(0,1000000)
if m > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif m > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif m > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
ELF=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,FR1,FR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,SK1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = ELF
elif b > 470000:
race_ep = 'Elves'
type_ep = 'Female'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = SK1
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = SK1
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = SK1
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
LI1 = (95,29,13)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = SK1
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
LI1 = (74,18,8)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_3
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,227,72)
HR2 = (249,255,0)
HR3 = (165,108,0)
HR4 = (61,35,32)
HR5 = (111,0,48)
HR6 = (255,0,0)
if e > 850000:
HR1 = HR0
hair_color_ep ='Blond'
elif e > 700000:
HR1 = HR2
hair_color_ep ='Butter'
elif e > 650000:
HR1 = HR3
hair_color_ep ='Ginger'
elif e > 500000:
HR1 = HR4
hair_color_ep ='Brown'
elif e > 350000:
HR1 = HR5
hair_color_ep ='Black Rose'
elif e > 200000:
HR1 = nr
hair_color_ep='Black'
else:
HR1 = HR6
hair_color_ep ='Red'
ELFE_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELFE_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0]
]
ELFE_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELFE_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0]
]
ELFE_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0]
]
MOLE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = ELFE_HR1
haircut_ep = 'Straight Hair'
elif f > 600000:
hair = ELFE_HR2
haircut_ep = 'Braids'
elif f > 400000:
hair = ELFE_HR3
haircut_ep = 'Left Side Hair'
elif f > 200000:
hair = ELFE_HR4
haircut_ep = 'Long Hair'
else:
hair = ELFE_HR5
haircut_ep = 'Medium Layers'
seed(f)
g=randint(0,1000000)
if g > 900000:
hair_prop = CAP_3
hair_prop_ep = 'Cap'
elif g > 700000:
hair_prop = MILICAP_1
hair_prop_ep = 'Punk Hat'
elif e > 600000:
hair_prop = KNITTED_3
hair_prop_ep = 'Knitted Cap'
elif g > 500000:
hair_prop = HEADBAND_3
hair_prop_ep = 'Headband'
elif g > 400000:
hair = none
hair_prop = PILOT_1
hair_prop_ep = 'Pilot Helmet'
titin = 99
elif g > 300000:
hair_prop = BANDANA_3
hair_prop_ep = 'Bandana'
elif g > 100000:
hair_prop = Elfe_Tiara
hair_prop_ep = 'Elfic Tiara'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
EY1 = (110,152,77)
SC1 = (92,133,57)
eyes_color_ep = 'Green Eye Shadow'
elif h > 800000:
EY1 = (93,121,117)
SC1 = (80,106,101)
eyes_color_ep = 'Blue Eye Shadow'
elif h > 700000:
EY1 = (176,61,133)
SC1 = (164,55,117)
eyes_color_ep = 'Purple Eye Shadow'
elif h > 600000:
EY1 = (214,92,26)
SC1 = (194,79,17)
eyes_color_ep = 'Orange Eye Shadow'
else:
eyes_color_ep = 'None'
neyo = 99
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_2
mouth_prop_ep = 'Medical Mask'
tactac = 99
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_3
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_3
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_3
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_3
eyes_prop_ep ='Eye Patch'
neye = 99
elif j > 780000:
eyes = NerdGlasses_3
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_3
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_3
eyes_prop_ep ='Eye Mask'
neye = 99
elif j > 650000:
eyes = HornedRimGlasses_3
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_3
eyes_prop_ep ='Regular Shades'
elif j > 590000:
eyes = GOGOLES_1
eyes_prop_ep ='Welding Goggles'
hair_prop = none
hair_prop_ep = 'None'
toutou = 99
else:
eyes=none
eyes_prop_ep ='None'
neye = 99
if titin == 99 and toutou != 99:
eyes = none
eyes_prop_ep ='None'
if neyo != 99 and neye !=99:
eyes = none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_2
nose_ep = 'Clown Nose'
tuctuc = 99
else:
nose = none
nose_ep = 'None'
if tactac == 99 and tuctuc == 99:
mouthprop = none
mouth_prop_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_2
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE_2
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 930000:
LI1 = nr
mouth_ep = 'Black Lipstick'
elif m > 860000:
LI1 = (255,0,0)
mouth_ep = 'Hot Lipstick'
elif m > 790000:
LI1 = (208,82,203)
mouth_ep = 'Purple Lipstick'
elif m > 720000:
LI1 = (214,92,26)
mouth_ep = 'Orange Lipstick'
else:
mouth = none
mouth_ep = 'None'
seed(m)
n=randint(0,1000000)
if n > 900000:
neck = GoldChain_2
neck_ep = 'Gold Chain'
elif n > 820000:
neck = SilverChain_2
neck_ep = 'Silver Chain'
elif n > 800000:
neck = RING_2
neck_ep = 'Ring Onchain'
elif n > 780000:
neck = BROCHE_2
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
ELFE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK2,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,SK1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,LI1,LI1,LI1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = ELFE
elif b > 460000:
race_ep = 'Dwarves'
type_ep = 'Firebeards'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_1=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,HR1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,SK1,SK1,FR1,HR1,HR1,HR2,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,SK1,FR1,HR1,HR1,HR2,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,HR1,HR1,HR2,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,HR1,HR1,HR2,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,HR2,FR2],
[FR2,BG1,BG1,HR2,BG1,HR1,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR2,BG1,HR1,HR1,FR1,HR1,HR2,HR2,HR2,HR2,HR1,HR1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,HR2,BG1,BG1,HR1,FR1,HR1,HR2,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR2,HR1,FR1,FR1,FR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR2,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,FR1,FR1,BG1,BG1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR1,BG1,FR1,HR1,HR1,FR1,FR1,FR1,FR1,HR2,FR1,BG1,BG1,BG1,HR1,HR1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_1
elif b > 450000:
race_ep = 'Dwarves'
type_ep = 'Blacklocks'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_2=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR2,HR2,HR2,HR2,HR2,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR2,SK1,FR1,FR1,FR1,SK1,HR2,HR2,HR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,SK1,HR2,HR2,HR2,SK1,SK1,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,SK1,SK1,HR2,SK1,SK1,SK1,HR2,HR2,FR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,FR1,FR1,HR2,FR1,FR1,SK1,HR2,HR2,FR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,BG1,BG1,HR2,HR2,BG1,BG1,HR2,BG1,FR1,SK1,HR2,HR2,FR1,BG1,HR2,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,HR2,HR2,FR2,FR2,HR2,FR2,FR1,SK1,HR2,HR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_2
elif b > 440000:
race_ep = 'Dwarves'
type_ep = 'Broadbeams'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_3=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,SK1,HR1,HR1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,FR1,HR1,SK1,FR1,FR1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,BG1,FR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,FR1,HR1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,BG1,FR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,FR1,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,FR1,FR1,FR1,HR1,HR1,HR1,HR2,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR2,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,HR2,HR2,HR2,HR1,HR1,HR1,HR2,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,BG1,FR1,HR2,HR1,HR2,HR2,HR2,HR2,HR2,HR1,HR2,FR1,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,BG1,BG1,FR1,HR2,FR1,FR1,FR1,FR1,FR1,HR2,FR1,SK1,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,FR2,FR2,HR1,HR1,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR1,FR1,SK1,SK1,FR1,FR2,HR1,HR1,FR2,FR2,FR2]
]
pixels = DWARF_3
elif b > 430000:
race_ep = 'Dwarves'
type_ep = 'Stiffbeards'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_4=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,FR1,FR1,FR1,SK1,HR1,HR1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_4
elif b > 420000:
race_ep = 'Dwarves'
type_ep = 'Stonefoots'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_5=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,HR1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SC1,SC1,HR1,SK1,HR1,SC1,SC1,HR1,SK1,HR1,FR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,SK1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,FR1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,HR2,HR2,SK1,SK1,SK1,HR1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR2,HR2,HR2,HR2,HR2,HR1,HR1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR2,HR2,FR1,FR1,FR1,HR2,HR2,HR1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR1,HR2,HR2,HR2,HR1,HR2,HR2,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR1,HR2,HR2,HR2,HR2,HR2,HR1,HR2,HR2,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,HR2,HR1,HR2,HR2,HR2,HR1,HR2,FR1,BG1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,BG1,BG1,BG1,HR2,HR2,HR1,HR2,FR1,BG1,BG1,HR1,HR1,HR1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,HR2,FR2,FR2,FR2,FR2,FR1,HR2,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_5
elif b > 410000:
race_ep = 'Dwarves'
type_ep = 'Ironfists'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_6=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,BG1,FR1,SK1,SK1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,FR1,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,BG1,FR1,SK1,HR1,FR1,FR1,FR1,HR1,SK1,SK1,SK1,FR1,BG1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,HR2,HR1,HR1,HR1,HR1,BG1,HR1,HR1,HR1,HR1,HR2,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,HR1,HR2,HR1,FR2,HR1,HR1,FR2,FR2,FR1,HR1,HR1,SK1,HR1,HR2,HR1,FR2,FR2,FR2,FR2]
]
pixels = DWARF_6
elif b > 400000:
race_ep = 'Dwarves'
type_ep = 'Longbeards'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_7=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,HR1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR2,SK1,SK1,SK1,HR1,HR1,SK1,SK1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,HR2,SK1,SK1,SK1,SK1,SK1,HR2,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,SK1,HR2,HR2,HR2,HR2,HR2,SK1,HR2,HR2,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,SK1,HR2,FR1,FR1,FR1,HR2,SK1,HR2,HR2,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,SK1,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,FR1,HR2,SK1,HR2,SK1,HR2,SK1,SK1,SK1,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,HR1,HR1,HR2,FR1,FR1,FR1,FR1,FR1,HR2,SK1,SK1,HR1,HR1,HR1,HR2,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,BG1,HR2,BG1,BG1,BG1,BG1,BG1,FR1,SK1,HR2,SK1,FR1,BG1,HR1,HR2,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_7
elif b > 250000:
race_ep = 'Gobelins'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (112,168,104) #ZOMBO
SC1 = (88,117,83)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Green'
elif c > 700000:
SK1 = (145,0,185) #PURPLE
SC1 = (120,0,160)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Purple'
elif c > 400000:
SK1 = (185,160,60) #DARK GREEN
SC1 = (150,125,25)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Camel'
else:
SK1 = (205,205,57) #JAUNE
SC1 = (130,119,23)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Wattle'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif e > 940000:
hair_prop = COWBOY_5
hair_prop_ep = 'Cowboy Hat'
elif e > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif e > 870000:
hair_prop = KNITTED_5
hair_prop_ep = 'Knitted Cap'
elif e > 850000:
hair_prop = Gobelin_Crown
hair_prop_ep = 'Gobelins Crown'
elif e > 830000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif e > 800000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif e > 780000:
hair_prop = FEDORA_5
hair_prop_ep = 'Fedora'
elif e > 750000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
elif e > 740000:
hair_prop = BEANI_2
hair_prop_ep = 'Beanie'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif f > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif f > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 300000:
DE1 = (255,255,255)
tooth_color_ep = 'White'
elif g > 200000:
DE1 = (163,110,16)
tooth_color_ep = 'Brown'
elif g > 80000:
DE1 = (255,203,0)
tooth_color_ep = 'Gold'
else :
DE1 = (200,0,0)
tooth_color_ep = 'Blood'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif h > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif i > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif i > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif i > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif j > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif j > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif j > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif j > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(j)
k=randint(0,1000000)
if k > 970000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif k > 940000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
GOBELIN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR1,SK1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,DE1,SK1,SK1,DE1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = GOBELIN
elif b > 150000:
race_ep = 'Orcs'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 850000:
SK1 = (112,112,112) #grey
SC1 = (64,64,64)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Smokey Grey'
elif c > 600000:
SK1 = (220,220,220) #brown
SC1 = (180,180,180)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Moon Grey'
elif c > 100000:
SK1 = (180,145,115) #Sand
SC1 = (120,100,60)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Sand'
else:
SK1 = (153,0,0) #red
SC1 = (102,0,0)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Red'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif e > 940000:
hair_prop = COWBOY_4
hair_prop_ep = 'Cowboy Hat'
elif e > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif e > 870000:
hair_prop = KNITTED_6
hair_prop_ep = 'Knitted Cap'
elif e > 860000:
hair_prop = HEADBAND_2
hair_prop_ep = 'Headband'
elif e > 830000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif e > 800000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif e > 780000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif e > 750000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
elif e > 740000:
hair_prop = BEANI_2
hair_prop_ep = 'Beanie'
elif e > 700000:
hair_prop = ORC_HELMET
hair_prop_ep = 'Orc Helmet'
tonton = 99
else:
hair_prop = none
hair_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif f > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif f > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 300000:
DE1 = (255,255,255)
tooth_color_ep = 'White'
elif g > 200000:
DE1 = (163,110,16)
tooth_color_ep = 'Brown'
elif g > 80000:
DE1 = (255,203,0)
tooth_color_ep = 'Gold'
else :
DE1 = (200,0,0)
tooth_color_ep = 'Blood'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif h > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif i > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif i > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif i > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif j > 650000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
tantan = 99
if tonton == 99 and tantan != 99:
eyes = none
eyes_prop_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(j)
k=randint(0,1000000)
if k > 970000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
ORC=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,FR1,SK1,FR1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR1,FR1,SK1,FR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,FR1,SK1,SK1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,DE1,SK1,SK1,DE1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = ORC
elif b > 135000:
race_ep = 'Wizards'
type_ep = 'White'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
HR1 = (140,140,140)
hair_color_ep = 'Granite'
elif e > 500000:
HR1 = (90,90,90)
hair_color_ep = 'Carbon Grey'
elif e > 250000:
HR1 = (240,240,240)
hair_color_ep = 'Seashell'
else:
HR1 = (190,190,190)
hair_color_ep = 'Silver'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 950000:
hair_prop = COWBOY_7
hair_prop_ep = 'Cowboy Hat'
elif g > 900000:
hair_prop = TOPHAT_7
hair_prop_ep = 'Top Hat'
elif e > 850000:
hair_prop = KNITTED_7
hair_prop_ep = 'Knitted Cap'
elif g > 800000:
hair_prop = FORCAP_7
hair_prop_ep = 'Cap Forward'
elif g > 750000:
hair_prop = FEDORA_7
hair_prop_ep = 'Fedora'
elif g > 700000:
hair_prop = BANDANA_7
hair_prop_ep = 'Bandana'
elif g > 650000:
hair_prop = POLICE_7
hair_prop_ep = 'Police'
elif g > 600000:
hair_prop = CAP_7
hair_prop_ep = 'Cap'
else:
hair_prop = none
hair_prop_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
WIZ_WHITE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,BG1,BG1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,HR1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR1,HR1,FR1,FR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,FR1,HR1,HR1,FR1,FR1,FR1,FR1,SK1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR2,FR1,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_WHITE
elif b > 110000:
race_ep = 'Wizards'
type_ep = 'Grey'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
CH1 = nr
CH2= (130,130,130)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Black & Granite'
elif e > 500000:
CH2 = (10,10,10)
CH1= (50,50,50)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Dark Grey & Black'
elif e > 250000:
CH1 = (130,130,130)
CH2= (230,230,230)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Granite & Seashell'
else:
CH1 = (50,50,50)
CH2= (200,200,200)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Dark Grey & Silver'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
WIZ_GREY=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,BG1,CH1,CH1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,CH1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,FR2],
[FR2,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,BR1,BR1,BR1,BR1,BR1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,FR1,BG1,BG1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,FR1,FR1,FR1,FR1,SK1,FR1,BG1,BG1,BG1,HR1,HR1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_GREY
elif b > 85000:
race_ep = 'Wizards'
type_ep = 'Tower'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
SC1 = (80,80,80)
BR1 = (80,80,80)
HR1 = (160,160,160)
hair_color_ep = 'Grey & Carbon Grey'
elif e > 500000:
SC1 = (30,30,30)
BR1 = (30,30,30)
HR1 = (110,110,110)
hair_color_ep = 'Smokey Grey & Charcoal'
elif e > 250000:
SC1 = (80,80,80)
BR1 = (80,80,80)
HR1 = (235,235,235)
hair_color_ep = 'Seashell & Carbon Grey'
else:
SC1 = (155,155,155)
BR1 = (155,155,155)
HR1 = (235,235,235)
hair_color_ep = 'Seashell & Grey'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 950000:
hair_prop = COWBOY_7
hair_prop_ep = 'Cowboy Hat'
elif g > 900000:
hair_prop = TOPHAT_7
hair_prop_ep = 'Top Hat'
elif e > 850000:
hair_prop = KNITTED_7
hair_prop_ep = 'Knitted Cap'
elif g > 800000:
hair_prop = FORCAP_7
hair_prop_ep = 'Cap Forward'
elif g > 750000:
hair_prop = FEDORA_7
hair_prop_ep = 'Fedora'
elif g > 700000:
hair_prop = BANDANA_7
hair_prop_ep = 'Bandana'
elif g > 650000:
hair_prop = POLICE_7
hair_prop_ep = 'Police'
elif g > 600000:
hair_prop = CAP_7
hair_prop_ep = 'Cap'
else:
hair_prop = none
hair_prop_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
WIZ_TOWER=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,HR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SC1,SC1,SC1,SK1,SK1,SC1,SC1,SC1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,BR1,BR1,BR1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,BR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,FR1,SK1,SK1,FR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_TOWER
elif b > 60000:
race_ep = 'Wizards'
type_ep = 'Wood'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
HR1 = (160,110,30)
HR2 = (130,60,20)
BR2 = (200,230,180)
BR1 = BE2
hair_color_ep = 'Taupe & Cookie Brown'
elif e > 500000:
HR1 = (130,90,10)
HR2 = (70,50,10)
BR2 = (200,230,180)
hair_color_ep = 'Brown & Cookie Brown'
BR1 = BE2
elif e > 250000:
HR1 = (160,110,30)
HR2 = (130,60,20)
BR2 = (60,200,180)
BR1 = (30,20,5)
hair_color_ep = 'Taupe & Graphite'
else:
HR1 = (130,90,10)
HR2 = (70,50,10)
BR2 = (60,200,180)
BR1 = (30,20,5)
hair_color_ep = 'Brown & Graphite'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif g > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
WIZ_WOODEN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,HR1,HR1,HR1,HR1,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,HR2,HR2,HR2,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR2,HR2,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,HR1,HR1,HR1,HR1,HR2,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR2,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,HR1,HR1,HR1,HR1,HR1,BR2,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,HR1,BG1,BG1,HR1,BR2,HR1,HR1,HR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,BR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,BR1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR2,SK1,FR1,FR1,SK1,SK1,SK1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR2,SK1,SK1,SK1,SK1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR1,FR1,FR1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_WOODEN
elif b > 35000:
race_ep = 'Wizards'
type_ep = 'Blue'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
HR1 = (30,25,200)
HR2 = (255,218,0)
SK1 = (234,217,217)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
skin_ep = 'Albino'
MO1 = EY1
SCR1 = EY1
hair_color_ep = 'Persian Blue'
elif c > 500000:
HR1 = (10,50,100)
HR2 = (216,214,203)
SK1 = (219,177,128)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
hair_color_ep = 'Sapphire'
elif c > 250000:
HR1 = (60,10,145)
HR2 = (255,218,0)
SK1 = (174,139,97)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
hair_color_ep = 'Indigo'
else:
HR1 = (30,180,220)
HR2 = (216,214,203)
SK1 = (113,63,29)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
hair_color_ep = 'Topaz'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
#if e > 900000:
# neck = GoldChain_1
#elif e > 700000:
# neck = SilverChain_1
#elif e > 500000:
# neck = RING_1
#else:
# neck = none
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif g > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
WIZ_BLUE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SC1,SC1,SC1,SK1,SK1,SC1,SC1,SC1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,FR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,FR1,FR1,BR1,BR1,BR1,FR1,FR1,SK1,HR2,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,HR1,HR1,HR2,FR2,FR1,BR1,FR1,FR1,FR2,HR2,HR1,HR1,HR1,HR1,FR2,FR2,FR2,FR2]
]
pixels = WIZ_BLUE
elif b > 19000:
race_ep = 'Unknown'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (250,200,170)
HR1 = (130,130,130)
skin_ep = 'Peach'
elif c > 500000:
SK1 = (200,170,140)
HR1 = (125,110,90)
skin_ep = 'Dust'
elif c > 250000:
SK1 = (240,210,190)
HR1 = (170,150,120)
skin_ep = 'Bone'
else:
SK1 = (195,175,165)
HR1 = (100,95,85)
skin_ep = 'Silk'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_4
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 950000:
hair_prop = CAP_5
hair_prop_ep = 'Cap'
elif e > 900000:
hair_prop = KNITTED_4
hair_prop_ep = 'Knitted Cap'
elif e > 850000:
hair_prop = HEADBAND_7
hair_prop_ep = 'Headband'
elif e > 800000:
hair_prop = FORCAP_3
hair_prop_ep = 'Cap Forward'
elif e > 750000:
hair_prop = COWBOY_3
hair_prop_ep = 'Cowboy Hat'
elif e > 700000:
hair_prop = TOPHAT_3
hair_prop_ep = 'Top Hat'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 980000:
neck = RING_3
neck_ep = 'Ring Onchain'
elif f > 880000:
neck = GoldChain_4
neck_ep = 'Gold Chain'
tutu = 99
elif f > 800000:
neck = SilverChain_3
neck_ep = 'Silver Chain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif g > 950000:
mouth = FROWN
mouth_ep = 'Frown'
tyty = 99
else:
mouth = none
mouth_ep = 'None'
if tutu == 99 and tyty == 99:
neck = none
neck_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 200000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif i > 80000:
EY1 = (230,180,100)
eyes_color_ep = 'Peach'
else:
EY1 = (78,154,197)
eyes_color_ep = 'Blue'
seed(i)
j=randint(0,1000000)
if j > 950000:
eyes = ClassicShades_4
eyes_prop_ep ='Classic Shades'
elif j > 900000:
eyes = EyePatch_4
eyes_prop_ep ='Eye Patch'
elif j > 850000:
eyes = RegularShades_4
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
GOLLUN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,HR1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,HR1,SK1,HR1,SK1,HR1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,SK1,HR1,SK1,HR1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,HR1,SK1,SK1,HR1,SK1,HR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,EY1,EY1,SK1,SK1,SK1,EY1,EY1,SK1,HR1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,SK1,SK1,SK1,HR1,SK1,HR1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,bl,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = GOLLUN
elif b > 10000:
race_ep = 'Wraiths'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 500000:
SK1 = (50,50,50)
HR1 = (100,100,100)
SC1 = nr
MO1 = nr
skin_ep = 'Dark Grey'
elif c > 400000:
SK1 = (128,128,128)
HR1 = (255,193,7) #OR
SC1 = nr
MO1 = nr
skin_ep = 'Granite'
elif c > 300000:
SK1 = (128,128,128)
HR1 = (200,130,40) #BRONZE
SC1 = nr
MO1 = nr
skin_ep = 'Granite'
elif c > 250000:
SK1 = (142,36,170) #VIOLET
HR1 = (40,5,55)
SC1 = (74,20,140)
MO1 = SC1
skin_ep = 'Eggplant'
else:
SK1 = (128,128,128)
HR1 = (230,230,230)
SC1 = (30,30,30)
MO1 = SC1
skin_ep = 'Granite'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(d)
e=randint(0,1000000)
if e > 930000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif f > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif f > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif h > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 400000:
EY1 = (255,255,255)
EY2 = nr
eyes_color_ep = 'White'
elif i > 300000:
EY1 = (214,92,26)
EY2 = nr
eyes_color_ep = "Orange"
elif i > 200000:
EY1 = (176,61,133)
EY2 = nr
eyes_color_ep = "Purple"
elif i > 100000:
EY1 = (255,255,0)
EY2 = nr
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
EY2 = nr
eyes_color_ep = 'Red'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif j > 700000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif j > 650000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
SPECTRE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,HR1,HR1,HR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,HR1,FR1,FR1,FR1,HR1,HR1,FR1,FR1,FR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,FR1,HR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,HR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,EY1,EY2,SK1,SK1,SK1,EY1,EY2,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,FR1,FR1,SK1,FR1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR1,HR1,HR1,HR1,FR1,HR1,HR1,HR1,FR1,FR2,FR2,FR2,FR2]
]
pixels = SPECTRE
elif b > 7000:
race_ep = 'Dark Riders'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
SK1 = (118,113,113)
SK2 = (191,191,191)
SK3 = (223,223,223)
skin_ep = 'None'
seed(b)
c=randint(0,1000000)
if c > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(c)
d=randint(0,1000000)
if d > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif d > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif d > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif e > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif e > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif f > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif f > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif f > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
seed(f)
g=randint(0,1000000)
if g > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif g > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif g > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif g > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif g > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif g > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif g > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif g > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif g > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
DARK_RIDER=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,FR1,FR1,SK1,SK1,SK1,FR1,FR1,SK1,FR1,FR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,EY1,SK1,SK1,SK1,FR1,EY1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DARK_RIDER
elif b > 1000:
race_ep = 'Daemons'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
SK1 = (90,90,90)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,114,48)
FR1 = nr
FR2 = bl
seed(b)
c=randint(0,1000000)
seed(c)
d=randint(0,1000000)
if d > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif d > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif d > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif e > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif e > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 500000:
EY1 = bl
eyes_color_ep = 'White'
else:
EY1 = nr
eyes_color_ep = 'Black'
seed(f)
g=randint(0,1000000)
if g > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif g > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif g > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif g > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif g > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif g > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif g > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif g > 650000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(g)
h=randint(0,1000000)
if h > 750000:
SK1 = (60,60,60)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,160,0)
FR1 = nr
FR2 = bl
skin_ep = 'Dark Grey'
hair_color_ep = 'Orange'
elif h > 500000:
SK1 = (30,30,30)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,160,0)
FR1 = nr
FR2 = bl
skin_ep = 'Charcoal'
hair_color_ep = 'Orange'
elif h > 250000:
SK1 = (60,60,60)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,114,48)
FR1 = nr
FR2 = bl
skin_ep = 'Dark Grey'
hair_color_ep = 'Burning Orange'
else:
SK1 = (30,30,30)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,114,48)
FR1 = nr
FR2 = bl
skin_ep = 'Charcoal'
hair_color_ep = 'Burning Orange'
DEAMON=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR3,FR3,FR3,BG1,BG1,BG1,BG1,BG1,FR3,FR3,FR3,FR3,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR3,FR1,FR1,FR1,FR3,BG1,BG1,BG1,FR3,FR1,FR1,FR1,FR1,FR3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,FR3,FR1,FR1,FR1,FR1,FR1,FR3,FR3,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR3,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,BG1,BG1,FR2],
[FR2,BG1,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,BG1,FR2],
[FR2,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,FR1,FR1,FR1,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR1,FR1,FR1,FR3,FR3,SK1,FR3,FR1,FR3,SK1,FR3,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR1,FR1,FR3,FR1,SK1,SK1,SK1,FR3,SK1,SK1,SK1,SK1,FR3,FR1,FR1,FR1,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,FR3,BG1,FR1,FR4,FR4,SK1,SK1,SK1,FR4,FR4,SK1,SK1,FR3,FR3,FR3,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,BG1,BG1,FR1,FR5,EY1,SK1,SK1,SK1,FR5,EY1,SK1,SK1,FR1,BG1,FR3,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,BG1,BG1,FR1,SK1,SK1,FR3,SK1,FR3,SK1,SK1,SK1,SK1,FR1,BG1,FR3,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR3,FR1,FR1,FR3,FR2],
[FR2,BG1,FR3,FR1,FR1,FR3,BG1,FR1,SK1,FR3,FR3,FR3,FR3,FR3,SK1,SK1,SK1,FR1,FR3,FR1,FR1,FR3,BG1,FR2],
[FR2,BG1,BG1,FR3,FR1,FR1,FR3,FR1,SK1,FR3,FR3,FR3,FR3,FR3,SK1,SK1,SK1,FR3,FR1,FR1,FR3,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,FR3,FR1,FR3,FR1,SK1,FR3,FR3,FR3,FR3,FR3,SK1,SK1,SK1,FR3,FR1,FR3,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR3,BG1,FR1,SK1,SK1,FR3,FR3,FR3,SK1,SK1,SK1,SK1,FR1,FR3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR3,FR3,FR3,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR3,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DEAMON
else:
race_ep = 'Dark Lord'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
SK1 = (113,113,113)
SK2 = (160,160,160)
SK3 = (223,223,223)
skin_ep = 'None'
seed(b)
c=randint(0,1000000)
if c > 750000:
ears = EARS_0
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(c)
d=randint(0,1000000)
if d > 700000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif d > 400000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif d > 100000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 800000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif e > 600000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif e > 400000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif f > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif f > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif f > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
DARK_LORD=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,BG1,BG1,FR1,BG1,BG1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,BG1,BG1,FR1,BG1,BG1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,FR1,BG1,FR1,BG1,FR1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,EY1,SK1,SK1,FR1,SK1,FR1,EY1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,FR1,SK1,EY1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,SK3,FR1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,SK3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,SK3,FR1,SK2,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK2,FR1,SK3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,SK3,SK1,SK2,SK1,SK1,FR1,SK1,SK1,SK2,SK1,SK3,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK3,SK1,SK2,SK1,FR1,SK1,SK2,SK1,SK3,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK3,SK1,SK2,FR1,SK2,SK1,SK3,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK3,SK1,SK2,FR1,SK2,SK1,SK3,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK3,SK1,SK2,FR1,SK2,SK1,SK3,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK3,SK1,SK2,SK1,FR1,SK1,SK2,SK1,SK3,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,SK3,FR1,SK1,SK2,SK1,FR1,SK1,SK2,SK1,SK1,SK3,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,SK2,SK1,SK1,FR1,SK1,SK1,SK2,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,SK2,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK2,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DARK_LORD
newtraitcombo = createCombo()
traits.append(newtraitcombo)
FL01 = len(filterlist1)
TR01 = len(traits)
RESU1 = TR01 - FL01
print(RESU1)
print(FL01)
#########################################
def createCombo2():
trait = {}
#trait["Name"] = name_ep
trait["Race"] = race_ep
trait["Type"] = type_ep
trait["Skin Tone"] = skin_ep
trait["Ears"] = ears_ep
trait["Hair Color"] = hair_color_ep
trait["Haircut"] = haircut_ep
trait["Hair Prop"] = hair_prop_ep
trait["Neck"] = neck_ep
trait["Facial Hair"] = facial_hair_ep
trait["Mouth Prop"] = mouth_prop_ep
trait["Eyes Color"] = eyes_color_ep
trait["Eyes Prop"] = eyes_prop_ep
trait["Nose"] = nose_ep
trait["Blemishe"] = blemishe_ep
trait["Tooth Color"] = tooth_color_ep
trait["Mouth"] = mouth_ep
if trait in traits2:
filterlist2.append(x)
else:
return trait
traits2 = []
list2 = range(11984)
#To avoid duplicates The first loop was just here for fill the filterlist1 with all the duplicates midpunks
#Allways put the same number in listx and increase the number until you get the desired number of midpunks
#Alaways use the same seed "a" in both loops, Here we need 11984 "loops" to get 10K unique midpunks
filtered=[item for item in list2 if item not in filterlist1]
jpeg = -1
for x in filtered:
a = 13080698
jpeg +=1
seed(x+a )
titi=0
titin=0
titine=0
toto=0
tata=0
tutu=0
tyty=0
tete=0
toutou=0
toctoc=0
tactac=0
tuctuc=0
tonton=0
tantan=0
neyo=0
neye=0
neya=0
neyh=0
neyu=0
neyw=0
b = randint(0,1000000)
if b > 950000:
race_ep = 'Halflings'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 875000:
HR1 = HR0
hair_color_ep ='Blond'
elif e > 750000:
HR1 = nr
hair_color_ep='Black'
elif e > 625000:
HR1 = HR2
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
hair_color_ep ='Black Rose'
else:
HR1 = HR7
hair_color_ep ='Brown'
HALFIN_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,HR1,0,0,HR1,HR1,HR1,HR1,0,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,HR1,HR1,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,HR1,HR1,0,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,HR1,0,HR1,0,0,HR1,HR1,HR1,HR1,0,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,HR1,HR1,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,HR1,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,0,HR1,0,0,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,HR1,0,0,0,HR1,HR1,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFIN_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = HALFIN_HR1
haircut_ep ='Wild Hair'
elif f > 600000:
hair = HALFIN_HR2
haircut_ep ='Perm Hair'
elif f > 400000:
hair = HALFIN_HR3
haircut_ep ='Bedhead'
elif f > 200000:
hair = HALFIN_HR4
haircut_ep ='Hockey Hair'
else:
hair = HALFIN_HR5
haircut_ep ='Bald'
seed(f)
g=randint(0,1000000)
if g > 970000:
hair_prop = POLICE_6
hair_prop_ep = 'Police'
elif g > 950000:
hair_prop = TOPHAT_6
hair_prop_ep = 'Top Hat'
elif e > 900000:
hair_prop = HEADBAND_6
hair_prop_ep = 'Headband'
elif e > 850000:
hair_prop = FORCAP_8
hair_prop_ep = 'Cap Forward'
elif e > 830000:
hair_prop = COWBOY_8
hair_prop_ep = 'Cowboy Hat'
elif e > 790000:
hair_prop = CAP_8
hair_prop_ep = 'Cap'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif h > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif h > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
elif h > 780000:
neck = BROCHE_1
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_1
facial_hair = none
mouth_prop_ep = 'Medical Mask'
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_6
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_6
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_6
eyes_prop_ep ='Classic Shades'
elif j >830000:
eyes = SmallShades_6
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_6
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = NerdGlasses_6
eyes_prop_ep ='Nerd Glasses'
elif j > 680000:
eyes = BigShades_6
eyes_prop_ep ='Big Shades'
elif j > 650000:
eyes = EyeMask_6
eyes_prop_ep ='Eye Mask'
elif j > 600000:
eyes = HornedRimGlasses_6
eyes_prop_ep ='Horned Rim Glasses'
elif j > 550000:
eyes = RegularShades_6
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
HALFIN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = HALFIN
elif b > 900000:
race_ep = 'Halflings'
type_ep = 'Female'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
LI1 = (95,29,13)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
LI1 = (74,18,8)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_3
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
red = (255,0,0)
if e > 875000:
HR1 = HR0
HR2 = red
hair_color_ep ='Blonde'
elif e > 750000:
HR1 = nr
HR2 = red
hair_color_ep ='Black'
elif e > 625000:
HR1 = HR2
HR2 = red
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
HR2 = red
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
HR2 = red
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
HR2 = red
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
HR2 = red
hair_color_ep ='Black Rose'
else:
HR1 = HR7
HR2 = red
hair_color_ep ='Brown'
HALFINE_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,HR1,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,0,0,HR1,HR1,0,HR1,HR1,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,0,HR1,0,HR1,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,HR1,HR1,HR1,HR1,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,0,HR1,0,HR1,0,HR1,HR1,0,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0],
[0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0],
[0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0],
[0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,HR1,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0],
[0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0],
[0,HR1,HR1,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,HR1,0,HR1,0,HR1,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,HR1,0,0],
[0,0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,HR1,0,HR1,0,0,0,0,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,HR1,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,HR1,HR1,HR1,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
HALFINE_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MOLE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = HALFINE_HR1
haircut_ep ='Perm Hair'
elif f > 600000:
hair = HALFINE_HR2
haircut_ep ='Wild Hair'
elif f > 400000:
hair = HALFINE_HR3
haircut_ep ='Wedge Hair'
elif f > 200000:
hair = HALFINE_HR4
haircut_ep ='Feathered Hair'
else:
hair = HALFINE_HR5
haircut_ep ='Ponytail'
toto = 99
seed(f)
g=randint(0,1000000)
if g > 990000:
hair_prop = TIARA_3
hair_prop_ep = 'Tiara'
titine = 99
elif g > 940000:
hair_prop = Flower
hair_prop_ep = 'Flower'
elif g > 900000 and toto != 99:
hair_prop = Hob_Hat
hair_prop_ep = 'Shire Hat'
elif g > 860000:
hair_prop = HEADBAND_4
hair_prop_ep = 'Headband'
elif g > 850000:
hair = none
hair_prop = PILOT_2
hair_prop_ep = 'Pilot Helmet'
titine = 99
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
EY1 = (110,152,77)
SC1 = (92,133,57)
eyes_color_ep = 'Green Eye Shadow'
elif h > 800000:
EY1 = (93,121,117)
SC1 = (80,106,101)
eyes_color_ep = 'Blue Eye Shadow'
elif h > 700000:
EY1 = (176,61,133)
SC1 = (164,55,117)
eyes_color_ep = 'Purple Eye Shadow'
elif h > 600000:
EY1 = (214,92,26)
SC1 = (194,79,17)
eyes_color_ep = 'Orange Eye Shadow'
else:
eyes_color_ep = 'None'
neya = 99
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_2
mouth_prop_ep = 'Medical Mask'
tactac=99
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_3
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_3
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_4
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_4
eyes_prop_ep ='Eye Patch'
neyh = 99
elif j > 780000:
eyes = NerdGlasses_4
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_4
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_4
eyes_prop_ep ='Eye Mask'
neyh = 99
elif j > 650000:
eyes = HornedRimGlasses_4
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_4
eyes_prop_ep ='Regular Shades'
elif j > 590000:
eyes = GOGOLES_2
eyes_prop_ep ='Welding Goggles'
hair_prop = none
hair_prop_ep = 'None'
toctoc = 99
else:
eyes=none
eyes_prop_ep ='None'
neyh = 99
if titine == 99 and toctoc !=99:
eyes = none
eyes_prop_ep ='None'
if neya != 99 and neyh !=99:
eyes = none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_2
nose_ep = 'Clown Nose'
tuctuc = 99
else:
nose = none
nose_ep = 'None'
if tactac == 99 and tuctuc == 99:
mouthprop = none
mouth_prop_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_2
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE_2
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 930000:
LI1 = nr
mouth_ep = 'Black Lipstick'
elif m > 860000:
LI1 = (255,0,0)
mouth_ep = 'Hot Lipstick'
elif m > 790000:
LI1 = (208,82,203)
mouth_ep = 'Purple Lipstick'
elif m > 720000:
LI1 = (214,92,26)
mouth_ep = 'Orange Lipstick'
else:
mouth = none
mouth_ep = 'None'
seed(m)
n=randint(0,1000000)
if n > 900000:
neck = GoldChain_3
neck_ep = 'Gold Chain'
elif n > 820000:
neck = SilverChain_3
neck_ep = 'Silver Chain'
elif n > 800000:
neck = RING_3
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
HALFINE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,LI1,LI1,LI1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = HALFINE
elif b > 750000:
race_ep = 'Men'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
BE6 = (40,27,9)
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
BE5 = (163,151,131)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
BE5 = (153,124,89)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
BE5 = (121,97,68)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
BE5 = (79,44,20)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
red = (255,0,0)
if e > 875000:
HR1 = HR0
HR2 = red
hair_color_ep ='Blonde'
elif e > 750000:
HR1 = nr
HR2 = red
hair_color_ep ='Black'
elif e > 625000:
HR1 = HR2
HR2 = red
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
HR2 = red
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
HR2 = red
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
HR2 = red
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
HR2 = red
hair_color_ep ='Black Rose'
else:
HR1 = HR7
HR2 = red
hair_color_ep ='Brown'
MAN_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,HR1,0,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,HR1,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,HR1,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,HR1,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MAN_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,HR1,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = MAN_HR1
haircut_ep = 'Grunge Hair'
elif f > 600000:
hair = MAN_HR2
haircut_ep = 'Prince Hair'
elif f > 400000:
hair = MAN_HR3
haircut_ep = 'King Hair'
elif f > 200000:
hair = MAN_HR4
haircut_ep = 'Bald'
else:
hair = MAN_HR5
haircut_ep = 'Straight Hair'
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 930000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 910000:
hair_prop = Gondor_Crown
hair_prop_ep = 'Men Crown'
elif g > 870000:
hair_prop = KNITTED_2
hair_prop_ep = 'Knitted Cap'
elif g > 820000:
hair_prop = HEADBAND_2
hair_prop_ep = 'Headband'
elif g > 790000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 760000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 740000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 710000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
elif g > 700000:
hair_prop = BEANI_2
hair_prop_ep = 'Beanie'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif h > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif h > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
elif h > 780000:
neck = BROCHE_1
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
ShadowBeard=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,0,0,0,0,0,0,BE5,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE6,BE6,BE6,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BE5,BE5,BE5,BE5,BE5,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(h)
i=randint(0,1000000)
if i > 950000:
facial_hair = BigBeard
facial_hair_ep = 'Big Beard'
elif i >900000:
facial_hair = Muttonchops
facial_hair_ep = 'Muttonchops'
elif i > 850000:
facial_hair = Mustache
facial_hair_ep = 'Mustache'
elif i > 890000:
facial_hair = Handlebars
facial_hair_ep = 'Handlebars'
elif i > 750000:
facial_hair = FrontBeardDark
facial_hair_ep = 'Front Beard Dark'
elif i > 700000:
facial_hair = FrontBeard
facial_hair_ep = 'Front Beard'
elif i > 650000:
facial_hair = NormalBeard
facial_hair_ep = 'Normal Beard'
elif i > 600000:
facial_hair = NormalBeardBlack
facial_hair_ep = 'Normal Beard Black'
elif i > 550000:
facial_hair = LuxuriousBeard
facial_hair_ep = 'Luxurious Beard'
elif i > 500000:
facial_hair = Goat
facial_hair_ep = 'Goat'
elif i > 450000:
facial_hair = Chinstrap
facial_hair_ep = 'Chinstrap'
elif i > 400000:
facial_hair = ShadowBeard
facial_hair_ep = 'Shadow Beard'
else:
facial_hair = none
facial_hair_ep = 'None'
seed(i)
j=randint(0,1000000)
if j > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif j > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
facial_hair = none
elif j > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif j > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(j)
k=randint(0,1000000)
if k > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif k > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif k > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif k > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
hair = MAN_HR3
haircut_ep = 'King Hair'
elif k > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif k > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif k > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif k > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif k > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif k > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(k)
l=randint(0,1000000)
if l > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif m > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(m)
n=randint(0,1000000)
if n > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif n > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif n > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
MAN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = MAN
elif b > 600000:
race_ep = 'Men'
type_ep = 'Female'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
LI1 = (95,29,13)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
LI1 = (74,18,8)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_3
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR2 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
red = (255,0,0)
if e > 875000:
HR1 = HR0
HR2 = red
hair_color_ep ='Blonde'
elif e > 750000:
HR1 = nr
HR2 = red
hair_color_ep ='Black'
elif e > 625000:
HR1 = HR2
HR2 = red
hair_color_ep ='Orange'
elif e > 500000:
HR1 = HR3
HR2 = red
hair_color_ep ='Fair'
elif e > 375000:
HR1 = HR4
HR2 = red
hair_color_ep ='Grey'
elif e > 250000:
HR1 = HR5
HR2 = red
hair_color_ep ='Ginger'
elif e > 125000:
HR1 = HR6
HR2 = red
hair_color_ep ='Black Rose'
else:
HR1 = HR7
HR2 = red
hair_color_ep ='Brown'
WOMAN_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,HR1,HR1,HR1,0,0,0,0,0,HR1,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
WOMAN_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
MOLE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = WOMAN_HR1
haircut_ep = 'Curly Hair'
elif f > 600000:
hair = WOMAN_HR2
haircut_ep = 'Right Side Hair'
elif f > 400000:
hair = WOMAN_HR3
haircut_ep = 'Left Side Hair'
elif f > 200000:
hair = WOMAN_HR4
haircut_ep = 'The Bob'
else:
hair = WOMAN_HR5
haircut_ep = 'Straight Hair'
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_4
hair_prop_ep = 'Cap'
elif g > 950000:
hair_prop = TIARA_2
hair_prop_ep = 'Tiara'
titi = 99
elif g > 930000:
hair_prop = MILICAP_2
hair_prop_ep = 'Punk Hat'
elif e > 890000:
hair_prop = KNITTED_4
hair_prop_ep = 'Knitted Cap'
elif g > 850000:
hair_prop = HEADBAND_4
hair_prop_ep = 'Headband'
elif g > 840000:
hair = none
hair_prop = PILOT_2
hair_prop_ep = 'Pilot Helmet'
titi = 99
elif g > 810000:
hair_prop = BANDANA_4
hair_prop_ep = 'Bandana'
elif g > 750000:
hair_prop = Wo_Crown
hair_prop_ep = 'Circlet'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
EY1 = (110,152,77)
SC1 = (92,133,57)
eyes_color_ep = 'Green Eye Shadow'
elif h > 800000:
EY1 = (93,121,117)
SC1 = (80,106,101)
eyes_color_ep = 'Blue Eye Shadow'
elif h > 700000:
EY1 = (176,61,133)
SC1 = (164,55,117)
eyes_color_ep = 'Purple Eye Shadow'
elif h > 600000:
EY1 = (214,92,26)
SC1 = (194,79,17)
eyes_color_ep = 'Orange Eye Shadow'
else:
eyes_color_ep = 'None'
neyu = 99
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_2
mouth_prop_ep = 'Medical Mask'
tactac = 99
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_3
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_3
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_4
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_4
eyes_prop_ep ='Eye Patch'
neyw = 99
elif j > 780000:
eyes = NerdGlasses_4
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_4
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_4
eyes_prop_ep ='Eye Mask'
neyw = 99
elif j > 650000:
eyes = HornedRimGlasses_4
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_4
eyes_prop_ep ='Regular Shades'
elif j > 590000:
eyes = GOGOLES_2
eyes_prop_ep ='Welding Goggles'
hair_prop = none
hair_prop_ep = 'None'
tata = 99
else:
eyes=none
eyes_prop_ep ='None'
neyw = 99
if titi == 99 and tata != 99:
eyes = none
eyes_prop_ep ='None'
if neyu != 99 and neyw !=99:
eyes = none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_2
nose_ep = 'Clown Nose'
tuctuc = 99
else:
nose = none
nose_ep = 'None'
if tactac == 99 and tuctuc == 99:
mouthprop = none
mouth_prop_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_2
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE_2
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 930000:
LI1 = nr
mouth_ep = 'Black Lipstick'
elif m > 860000:
LI1 = (255,0,0)
mouth_ep = 'Hot Lipstick'
elif m > 790000:
LI1 = (208,82,203)
mouth_ep = 'Purple Lipstick'
elif m > 720000:
LI1 = (214,92,26)
mouth_ep = 'Orange Lipstick'
else:
mouth = none
mouth_ep = 'None'
seed(m)
n=randint(0,1000000)
if n > 900000:
neck = GoldChain_3
neck_ep = 'Gold Chain'
elif n > 820000:
neck = SilverChain_3
neck_ep = 'Silver Chain'
elif n > 800000:
neck = RING_3
neck_ep = 'Ring Onchain'
elif n > 790000:
neck = CHOKER
neck_ep = 'Choker'
elif n > 770000:
neck = BROCHE_3
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
WOMAN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,LI1,LI1,LI1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WOMAN
elif b > 535000:
race_ep = 'Elves'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,227,72)
HR2 = (255,255,153)
HR3 = (165,108,0)
HR4 = (61,35,32)
HR5 = (111,0,48)
HR6 = (255,0,0)
if e > 850000:
HR1 = HR0
hair_color_ep ='Blond'
elif e > 700000:
HR1 = HR2
hair_color_ep ='Butter'
elif e > 650000:
HR1 = HR3
hair_color_ep ='Ginger'
elif e > 500000:
HR1 = HR4
hair_color_ep ='Brown'
elif e > 350000:
HR1 = HR5
hair_color_ep ='Black Rose'
elif e > 200000:
HR1 = nr
hair_color_ep='Black'
else:
HR1 = HR6
hair_color_ep ='Red'
ELF_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELF_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0,HR1,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELF_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,BG1,HR1,HR1,HR1,HR1,BG1,BG1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELF_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0]
]
ELF_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = ELF_HR1
haircut_ep = 'Straight Hair'
elif f > 600000:
hair = ELF_HR2
haircut_ep = 'Braids'
elif f > 400000:
hair = ELF_HR3
haircut_ep = 'Left Side Hair'
elif f > 200000:
hair = ELF_HR4
haircut_ep = 'Long Hair'
else:
hair = ELF_HR5
haircut_ep = 'Medium Layers'
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_1
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_1
hair_prop_ep = 'Cowboy Hat'
elif g > 910000:
hair_prop = TOPHAT_1
hair_prop_ep = 'Top Hat'
elif e > 870000:
hair_prop = KNITTED_1
hair_prop_ep = 'Knitted Cap'
elif g > 865000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR1
haircut_ep = 'Straight Hair'
elif g > 850000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR2
haircut_ep = 'Braids'
elif g > 835000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR4
haircut_ep = 'Long Hair'
elif g > 820000:
hair_prop = HEADBAND_1
hair_prop_ep = 'Headband'
hair = ELF_HR5
haircut_ep = 'Medium Layers'
elif g > 790000:
hair_prop = FORCAP_1
hair_prop_ep = 'Cap Forward'
elif g > 760000:
hair_prop = BANDANA_1
hair_prop_ep = 'Bandana'
elif g > 750000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR1
haircut_ep = 'Straight Hair'
elif g > 740000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR2
haircut_ep = 'Braids'
elif g > 730000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR4
haircut_ep = 'Long Hair'
elif g > 720000:
hair_prop = Elf_Crown
hair_prop_ep = 'Elfic Crown'
hair = ELF_HR5
haircut_ep = 'Medium Layers'
elif g > 700000:
hair_prop = FEDORA_1
hair_prop_ep = 'Fedora'
elif g > 670000:
hair_prop = POLICE_1
hair_prop_ep = 'Police'
elif g > 660000:
hair_prop = BEANI_1
hair_prop_ep = 'Beanie'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif h > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif h > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
elif h > 780000:
neck = BROCHE_1
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif j > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif j > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif l > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(l)
m=randint(0,1000000)
if m > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif m > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif m > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
ELF=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,FR1,FR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,SK1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = ELF
elif b > 470000:
race_ep = 'Elves'
type_ep = 'Female'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = SK1
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = SK1
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
LI1 = (113,28,17)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = SK1
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
LI1 = (95,29,13)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = SK1
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
LI1 = (74,18,8)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_3
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,227,72)
HR2 = (249,255,0)
HR3 = (165,108,0)
HR4 = (61,35,32)
HR5 = (111,0,48)
HR6 = (255,0,0)
if e > 850000:
HR1 = HR0
hair_color_ep ='Blond'
elif e > 700000:
HR1 = HR2
hair_color_ep ='Butter'
elif e > 650000:
HR1 = HR3
hair_color_ep ='Ginger'
elif e > 500000:
HR1 = HR4
hair_color_ep ='Brown'
elif e > 350000:
HR1 = HR5
hair_color_ep ='Black Rose'
elif e > 200000:
HR1 = nr
hair_color_ep='Black'
else:
HR1 = HR6
hair_color_ep ='Red'
ELFE_HR1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELFE_HR2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,HR1,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,HR1,0,0]
]
ELFE_HR3=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ELFE_HR4=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,HR1,HR1,HR1,HR1,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0]
]
ELFE_HR5=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0,HR1,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0],
[0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,HR1,HR1,0,0,0,0,HR1,HR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,HR1,0,0,0,0,HR1,0,0,0,0,0,0,0,0,0]
]
MOLE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_2=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,RC1,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(e)
f=randint(0,1000000)
if f > 800000:
hair = ELFE_HR1
haircut_ep = 'Straight Hair'
elif f > 600000:
hair = ELFE_HR2
haircut_ep = 'Braids'
elif f > 400000:
hair = ELFE_HR3
haircut_ep = 'Left Side Hair'
elif f > 200000:
hair = ELFE_HR4
haircut_ep = 'Long Hair'
else:
hair = ELFE_HR5
haircut_ep = 'Medium Layers'
seed(f)
g=randint(0,1000000)
if g > 900000:
hair_prop = CAP_3
hair_prop_ep = 'Cap'
elif g > 700000:
hair_prop = MILICAP_1
hair_prop_ep = 'Punk Hat'
elif e > 600000:
hair_prop = KNITTED_3
hair_prop_ep = 'Knitted Cap'
elif g > 500000:
hair_prop = HEADBAND_3
hair_prop_ep = 'Headband'
elif g > 400000:
hair = none
hair_prop = PILOT_1
hair_prop_ep = 'Pilot Helmet'
titin = 99
elif g > 300000:
hair_prop = BANDANA_3
hair_prop_ep = 'Bandana'
elif g > 100000:
hair_prop = Elfe_Tiara
hair_prop_ep = 'Elfic Tiara'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
EY1 = (110,152,77)
SC1 = (92,133,57)
eyes_color_ep = 'Green Eye Shadow'
elif h > 800000:
EY1 = (93,121,117)
SC1 = (80,106,101)
eyes_color_ep = 'Blue Eye Shadow'
elif h > 700000:
EY1 = (176,61,133)
SC1 = (164,55,117)
eyes_color_ep = 'Purple Eye Shadow'
elif h > 600000:
EY1 = (214,92,26)
SC1 = (194,79,17)
eyes_color_ep = 'Orange Eye Shadow'
else:
eyes_color_ep = 'None'
neyo = 99
seed(h)
i=randint(0,1000000)
if i > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif i > 880000:
mouth_prop = MASK_2
mouth_prop_ep = 'Medical Mask'
tactac = 99
elif i > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif i > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_3
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_3
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_3
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = EyePatch_3
eyes_prop_ep ='Eye Patch'
neye = 99
elif j > 780000:
eyes = NerdGlasses_3
eyes_prop_ep ='Nerd Glasses'
elif j > 730000:
eyes = BigShades_3
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_3
eyes_prop_ep ='Eye Mask'
neye = 99
elif j > 650000:
eyes = HornedRimGlasses_3
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_3
eyes_prop_ep ='Regular Shades'
elif j > 590000:
eyes = GOGOLES_1
eyes_prop_ep ='Welding Goggles'
hair_prop = none
hair_prop_ep = 'None'
toutou = 99
else:
eyes=none
eyes_prop_ep ='None'
neye = 99
if titin == 99 and toutou != 99:
eyes = none
eyes_prop_ep ='None'
if neyo != 99 and neye !=99:
eyes = none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_2
nose_ep = 'Clown Nose'
tuctuc = 99
else:
nose = none
nose_ep = 'None'
if tactac == 99 and tuctuc == 99:
mouthprop = none
mouth_prop_ep = 'None'
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_2
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE_2
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_2
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
seed(l)
m=randint(0,1000000)
if m > 930000:
LI1 = nr
mouth_ep = 'Black Lipstick'
elif m > 860000:
LI1 = (255,0,0)
mouth_ep = 'Hot Lipstick'
elif m > 790000:
LI1 = (208,82,203)
mouth_ep = 'Purple Lipstick'
elif m > 720000:
LI1 = (214,92,26)
mouth_ep = 'Orange Lipstick'
else:
mouth = none
mouth_ep = 'None'
seed(m)
n=randint(0,1000000)
if n > 900000:
neck = GoldChain_2
neck_ep = 'Gold Chain'
elif n > 820000:
neck = SilverChain_2
neck_ep = 'Silver Chain'
elif n > 800000:
neck = RING_2
neck_ep = 'Ring Onchain'
elif n > 780000:
neck = BROCHE_2
neck_ep = 'Brooch'
else:
neck = none
neck_ep = 'None'
ELFE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK2,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,SK1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,LI1,LI1,LI1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = ELFE
elif b > 460000:
race_ep = 'Dwarves'
type_ep = 'Firebeards'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_1=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,HR1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,SK1,SK1,FR1,HR1,HR1,HR2,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,SK1,FR1,HR1,HR1,HR2,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,HR1,HR1,HR2,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,HR1,HR1,HR2,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,HR2,FR2],
[FR2,BG1,BG1,HR2,BG1,HR1,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR2,BG1,HR1,HR1,FR1,HR1,HR2,HR2,HR2,HR2,HR1,HR1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,HR2,BG1,BG1,HR1,FR1,HR1,HR2,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR2,HR1,FR1,FR1,FR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR2,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,FR1,FR1,BG1,BG1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR1,BG1,FR1,HR1,HR1,FR1,FR1,FR1,FR1,HR2,FR1,BG1,BG1,BG1,HR1,HR1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_1
elif b > 450000:
race_ep = 'Dwarves'
type_ep = 'Blacklocks'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_2=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR2,HR2,HR2,HR2,HR2,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR2,SK1,FR1,FR1,FR1,SK1,HR2,HR2,HR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,SK1,HR2,HR2,HR2,SK1,SK1,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,SK1,SK1,HR2,SK1,SK1,SK1,HR2,HR2,FR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,FR1,FR1,HR2,FR1,FR1,SK1,HR2,HR2,FR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,BG1,BG1,HR2,HR2,BG1,BG1,HR2,BG1,FR1,SK1,HR2,HR2,FR1,BG1,HR2,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,HR2,HR2,FR2,FR2,HR2,FR2,FR1,SK1,HR2,HR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_2
elif b > 440000:
race_ep = 'Dwarves'
type_ep = 'Broadbeams'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_3=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,SK1,HR1,HR1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR1,FR1,HR1,SK1,FR1,FR1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,BG1,FR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,FR1,HR1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,BG1,FR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,FR1,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,FR1,FR1,FR1,HR1,HR1,HR1,HR2,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR2,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,FR1,HR2,HR1,HR1,HR1,HR2,HR2,HR2,HR1,HR1,HR1,HR2,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,BG1,FR1,HR2,HR1,HR2,HR2,HR2,HR2,HR2,HR1,HR2,FR1,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,BG1,BG1,FR1,HR2,FR1,FR1,FR1,FR1,FR1,HR2,FR1,SK1,FR1,BG1,HR1,HR1,BG1,BG1,FR2],
[FR2,FR2,FR2,HR1,HR1,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR1,FR1,SK1,SK1,FR1,FR2,HR1,HR1,FR2,FR2,FR2]
]
pixels = DWARF_3
elif b > 430000:
race_ep = 'Dwarves'
type_ep = 'Stiffbeards'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_4=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,FR1,FR1,FR1,SK1,HR1,HR1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,HR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_4
elif b > 420000:
race_ep = 'Dwarves'
type_ep = 'Stonefoots'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_5=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,HR1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SC1,SC1,HR1,SK1,HR1,SC1,SC1,HR1,SK1,HR1,FR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,SK1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,FR1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,HR2,HR2,SK1,SK1,SK1,HR1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR2,HR2,HR2,HR2,HR2,HR1,HR1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR2,HR2,FR1,FR1,FR1,HR2,HR2,HR1,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR1,HR2,HR2,HR2,HR1,HR2,HR2,HR1,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR1,HR2,HR2,HR2,HR2,HR2,HR1,HR2,HR2,FR1,BG1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,HR2,HR1,HR2,HR2,HR2,HR1,HR2,FR1,BG1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,BG1,HR2,HR2,BG1,BG1,BG1,HR2,HR2,HR1,HR2,FR1,BG1,BG1,HR1,HR1,HR1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,HR2,FR2,FR2,FR2,FR2,FR1,HR2,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_5
elif b > 410000:
race_ep = 'Dwarves'
type_ep = 'Ironfists'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_6=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,BG1,FR1,SK1,SK1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,FR1,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,BG1,FR1,SK1,HR1,FR1,FR1,FR1,HR1,SK1,SK1,SK1,FR1,BG1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,BG1,BG1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,HR2,HR1,HR1,HR1,HR1,BG1,HR1,HR1,HR1,HR1,HR2,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,HR1,HR2,HR1,FR2,HR1,HR1,FR2,FR2,FR1,HR1,HR1,SK1,HR1,HR2,HR1,FR2,FR2,FR2,FR2]
]
pixels = DWARF_6
elif b > 400000:
race_ep = 'Dwarves'
type_ep = 'Longbeards'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 200000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
HR0 = (255,193,0)
HR8 = (251,114,7)
HR3 = (210,154,0)
HR4 = (166,165,165)
HR5 = (165,108,0)
HR6 = (111,0,48)
HR7 = (85,57,23)
if e > 860000:
HR1 = (50,50,50) #Ok
HR2 = (200,200,200)
hair_color_ep = 'Dark Grey & Silver'
elif e > 720000:
HR1 = HR8
HR2 = (111,0,48) #ok
hair_color_ep = 'Orange & Black Rose'
elif e > 580000:
HR1 = HR3 #ok
HR2 = (210,210,0)
hair_color_ep = 'Fair & Wattle'
elif e > 440000:
HR1 = (80,50,30) #Ok
HR2 = (44,4,9)
hair_color_ep = 'Bronze & Chocolate'
elif e > 300000:
HR1 = HR5
HR2 = HR3
hair_color_ep = 'Ginger & Fair'
elif e > 150000:
HR1 = (220,130,0) #ok
HR2 = (70,40,10)
hair_color_ep = 'Mango & Brown'
else:
HR1 = (210,210,210) #Ok
HR2 = (210,210,210)
hair_color_ep = 'Grey Goose'
seed(e)
f=randint(0,1000000)
seed(f)
g=randint(0,1000000)
if g > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif g > 940000:
hair_prop = COWBOY_2
hair_prop_ep = 'Cowboy Hat'
elif g > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif g > 890000:
hair_prop = Helmet
hair_prop_ep = 'Dwarf Helmet'
tete = 99
elif g > 870000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif g > 850000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif g > 830000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif g > 800000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
# seed(h)
# i=randint(0,1000000)
# if i > 300000:
# EY1 = (255,255,255)
# elif i > 50000:
# EY1 = (0,0,255)
# else:
# EY1 = (0,255,0)
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif i > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif i > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif i > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif i > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif i > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif i > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
if tete == 99:
eyes = none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(k)
l=randint(0,1000000)
if l > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif l > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif l > 870000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
DWARF_7=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,HR1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR2,SK1,SK1,SK1,HR1,HR1,SK1,SK1,SK1,SK1,HR2,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,HR2,SK1,SK1,SK1,SK1,SK1,HR2,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR2,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR2,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR2,HR1,HR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,HR2,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,SK1,HR2,HR2,HR2,HR2,HR2,SK1,HR2,HR2,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,SK1,HR2,FR1,FR1,FR1,HR2,SK1,HR2,HR2,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,SK1,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR2,HR1,HR1,FR1,HR2,SK1,HR2,SK1,HR2,SK1,SK1,SK1,HR1,HR1,HR2,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,HR1,HR1,HR2,FR1,FR1,FR1,FR1,FR1,HR2,SK1,SK1,HR1,HR1,HR1,HR2,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR2,HR1,BG1,HR2,BG1,BG1,BG1,BG1,BG1,FR1,SK1,HR2,SK1,FR1,BG1,HR1,HR2,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DWARF_7
elif b > 250000:
race_ep = 'Gobelins'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 800000:
SK1 = (112,168,104) #ZOMBO
SC1 = (88,117,83)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Green'
elif c > 700000:
SK1 = (145,0,185) #PURPLE
SC1 = (120,0,160)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Purple'
elif c > 400000:
SK1 = (185,160,60) #DARK GREEN
SC1 = (150,125,25)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Camel'
else:
SK1 = (205,205,57) #JAUNE
SC1 = (130,119,23)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Wattle'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif e > 940000:
hair_prop = COWBOY_5
hair_prop_ep = 'Cowboy Hat'
elif e > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif e > 870000:
hair_prop = KNITTED_5
hair_prop_ep = 'Knitted Cap'
elif e > 850000:
hair_prop = Gobelin_Crown
hair_prop_ep = 'Gobelins Crown'
elif e > 830000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif e > 800000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif e > 780000:
hair_prop = FEDORA_5
hair_prop_ep = 'Fedora'
elif e > 750000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
elif e > 740000:
hair_prop = BEANI_2
hair_prop_ep = 'Beanie'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif f > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif f > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 300000:
DE1 = (255,255,255)
tooth_color_ep = 'White'
elif g > 200000:
DE1 = (163,110,16)
tooth_color_ep = 'Brown'
elif g > 80000:
DE1 = (255,203,0)
tooth_color_ep = 'Gold'
else :
DE1 = (200,0,0)
tooth_color_ep = 'Blood'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif h > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif i > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif i > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif i > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif j > 680000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif j > 650000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif j > 600000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif j > 550000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
SCARE_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,SCR1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(j)
k=randint(0,1000000)
if k > 970000:
blemishes = MOLE
blemishe_ep = 'Mole'
elif k > 940000:
blemishes = SCARE_1
blemishe_ep = 'Scare'
else:
blemishes = none
blemishe_ep = 'None'
GOBELIN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR1,SK1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,DE1,SK1,SK1,DE1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = GOBELIN
elif b > 150000:
race_ep = 'Orcs'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 850000:
SK1 = (112,112,112) #grey
SC1 = (64,64,64)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Smokey Grey'
elif c > 600000:
SK1 = (220,220,220) #brown
SC1 = (180,180,180)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Moon Grey'
elif c > 100000:
SK1 = (180,145,115) #Sand
SC1 = (120,100,60)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Sand'
else:
SK1 = (153,0,0) #red
SC1 = (102,0,0)
MO1 = SC1
SCR1 = SC1
skin_ep = 'Red'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 960000:
hair_prop = CAP_2
hair_prop_ep = 'Cap'
elif e > 940000:
hair_prop = COWBOY_4
hair_prop_ep = 'Cowboy Hat'
elif e > 920000:
hair_prop = TOPHAT_2
hair_prop_ep = 'Top Hat'
elif e > 870000:
hair_prop = KNITTED_6
hair_prop_ep = 'Knitted Cap'
elif e > 860000:
hair_prop = HEADBAND_2
hair_prop_ep = 'Headband'
elif e > 830000:
hair_prop = FORCAP_2
hair_prop_ep = 'Cap Forward'
elif e > 800000:
hair_prop = BANDANA_2
hair_prop_ep = 'Bandana'
elif e > 780000:
hair_prop = FEDORA_2
hair_prop_ep = 'Fedora'
elif e > 750000:
hair_prop = POLICE_2
hair_prop_ep = 'Police'
elif e > 740000:
hair_prop = BEANI_2
hair_prop_ep = 'Beanie'
elif e > 700000:
hair_prop = ORC_HELMET
hair_prop_ep = 'Orc Helmet'
tonton = 99
else:
hair_prop = none
hair_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif f > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif f > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 300000:
DE1 = (255,255,255)
tooth_color_ep = 'White'
elif g > 200000:
DE1 = (163,110,16)
tooth_color_ep = 'Brown'
elif g > 80000:
DE1 = (255,203,0)
tooth_color_ep = 'Gold'
else :
DE1 = (200,0,0)
tooth_color_ep = 'Blood'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif h > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif i > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif i > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif i > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = BigShades_2
eyes_prop_ep ='Big Shades'
elif j > 700000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif j > 650000:
eyes = HornedRimGlasses_2
eyes_prop_ep ='Horned Rim Glasses'
elif j > 600000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
tantan = 99
if tonton == 99 and tantan != 99:
eyes = none
eyes_prop_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(j)
k=randint(0,1000000)
if k > 970000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
ORC=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,FR1,SK1,FR1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR1,FR1,SK1,FR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,FR1,SK1,SK1,FR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,DE1,SK1,SK1,DE1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = ORC
elif b > 135000:
race_ep = 'Wizards'
type_ep = 'White'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
HR1 = (140,140,140)
hair_color_ep = 'Granite'
elif e > 500000:
HR1 = (90,90,90)
hair_color_ep = 'Carbon Grey'
elif e > 250000:
HR1 = (240,240,240)
hair_color_ep = 'Seashell'
else:
HR1 = (190,190,190)
hair_color_ep = 'Silver'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 950000:
hair_prop = COWBOY_7
hair_prop_ep = 'Cowboy Hat'
elif g > 900000:
hair_prop = TOPHAT_7
hair_prop_ep = 'Top Hat'
elif e > 850000:
hair_prop = KNITTED_7
hair_prop_ep = 'Knitted Cap'
elif g > 800000:
hair_prop = FORCAP_7
hair_prop_ep = 'Cap Forward'
elif g > 750000:
hair_prop = FEDORA_7
hair_prop_ep = 'Fedora'
elif g > 700000:
hair_prop = BANDANA_7
hair_prop_ep = 'Bandana'
elif g > 650000:
hair_prop = POLICE_7
hair_prop_ep = 'Police'
elif g > 600000:
hair_prop = CAP_7
hair_prop_ep = 'Cap'
else:
hair_prop = none
hair_prop_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
WIZ_WHITE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,BG1,BG1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,HR1,HR1,HR1,HR1,HR1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR1,HR1,FR1,FR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,FR1,HR1,HR1,FR1,FR1,FR1,FR1,SK1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR2,FR1,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_WHITE
elif b > 110000:
race_ep = 'Wizards'
type_ep = 'Grey'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
CH1 = nr
CH2= (130,130,130)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Black & Granite'
elif e > 500000:
CH2 = (10,10,10)
CH1= (50,50,50)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Dark Grey & Black'
elif e > 250000:
CH1 = (130,130,130)
CH2= (230,230,230)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Granite & Seashell'
else:
CH1 = (50,50,50)
CH2= (200,200,200)
HR1 = (160,160,160)
BR1 = (190,190,190)
hair_color_ep = 'Dark Grey & Silver'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
WIZ_GREY=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,BG1,CH1,CH1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,CH1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,CH2,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,BG1,FR2],
[FR2,BG1,BG1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,CH1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,BR1,BR1,BR1,BR1,BR1,SK1,SK1,SK1,FR1,HR1,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,HR1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,FR1,BG1,BG1,HR1,HR1,HR1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,FR1,FR1,FR1,FR1,SK1,FR1,BG1,BG1,BG1,HR1,HR1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_GREY
elif b > 85000:
race_ep = 'Wizards'
type_ep = 'Tower'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
SC1 = (80,80,80)
BR1 = (80,80,80)
HR1 = (160,160,160)
hair_color_ep = 'Grey & Carbon Grey'
elif e > 500000:
SC1 = (30,30,30)
BR1 = (30,30,30)
HR1 = (110,110,110)
hair_color_ep = 'Smokey Grey & Charcoal'
elif e > 250000:
SC1 = (80,80,80)
BR1 = (80,80,80)
HR1 = (235,235,235)
hair_color_ep = 'Seashell & Carbon Grey'
else:
SC1 = (155,155,155)
BR1 = (155,155,155)
HR1 = (235,235,235)
hair_color_ep = 'Seashell & Grey'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 950000:
hair_prop = COWBOY_7
hair_prop_ep = 'Cowboy Hat'
elif g > 900000:
hair_prop = TOPHAT_7
hair_prop_ep = 'Top Hat'
elif e > 850000:
hair_prop = KNITTED_7
hair_prop_ep = 'Knitted Cap'
elif g > 800000:
hair_prop = FORCAP_7
hair_prop_ep = 'Cap Forward'
elif g > 750000:
hair_prop = FEDORA_7
hair_prop_ep = 'Fedora'
elif g > 700000:
hair_prop = BANDANA_7
hair_prop_ep = 'Bandana'
elif g > 650000:
hair_prop = POLICE_7
hair_prop_ep = 'Police'
elif g > 600000:
hair_prop = CAP_7
hair_prop_ep = 'Cap'
else:
hair_prop = none
hair_prop_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
seed(j)
k=randint(0,1000000)
if k > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif k > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
WIZ_TOWER=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,HR1,SK1,SK1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,HR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SC1,SC1,SC1,SK1,SK1,SC1,SC1,SC1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,SK1,SK1,BR1,BR1,BR1,SK1,SK1,SK1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,FR1,BR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,FR1,SK1,SK1,FR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_TOWER
elif b > 60000:
race_ep = 'Wizards'
type_ep = 'Wood'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (234,217,217)
SC1 = (165,141,141)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Albino'
elif c > 500000:
SK1 = (219,177,128)
SC1 = (166,110,44)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
elif c > 250000:
SK1 = (174,139,97)
SC1 = (134,88,30)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
else:
SK1 = (113,63,29)
SC1 = (86,39,10)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 750000:
HR1 = (160,110,30)
HR2 = (130,60,20)
BR2 = (200,230,180)
BR1 = BE2
hair_color_ep = 'Taupe & Cookie Brown'
elif e > 500000:
HR1 = (130,90,10)
HR2 = (70,50,10)
BR2 = (200,230,180)
hair_color_ep = 'Brown & Cookie Brown'
BR1 = BE2
elif e > 250000:
HR1 = (160,110,30)
HR2 = (130,60,20)
BR2 = (60,200,180)
BR1 = (30,20,5)
hair_color_ep = 'Taupe & Graphite'
else:
HR1 = (130,90,10)
HR2 = (70,50,10)
BR2 = (60,200,180)
BR1 = (30,20,5)
hair_color_ep = 'Brown & Graphite'
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif g > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
WIZ_WOODEN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR2,HR2,HR2,HR1,HR1,HR1,HR1,HR2,HR2,HR2,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,HR1,HR1,HR2,HR2,HR2,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR2,HR2,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,HR1,HR1,HR1,HR1,HR2,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR2,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,HR1,HR1,HR1,HR1,HR1,BR2,HR2,HR1,HR1,HR1,HR1,HR1,HR1,HR2,HR1,HR1,HR1,HR1,HR1,HR1,BG1,FR2],
[FR2,BG1,HR1,BG1,BG1,HR1,BR2,HR1,HR1,HR1,SK1,SK1,SK1,SK1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,HR1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,BR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,BR1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR2,SK1,FR1,FR1,SK1,SK1,SK1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR2,SK1,SK1,SK1,SK1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BR1,BR1,BR1,BR1,BR1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR1,FR1,FR1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = WIZ_WOODEN
elif b > 35000:
race_ep = 'Wizards'
type_ep = 'Blue'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
HR1 = (30,25,200)
HR2 = (255,218,0)
SK1 = (234,217,217)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (201,178,178)
SK2 = (255,255,255)
HRG3 = (220,222,234)
HRG2 = (183,179,191)
HRG4 = (203,200,212)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (226,187,185)
skin_ep = 'Albino'
MO1 = EY1
SCR1 = EY1
hair_color_ep = 'Persian Blue'
elif c > 500000:
HR1 = (10,50,100)
HR2 = (216,214,203)
SK1 = (219,177,128)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (210,157,96)
SK2 = (235,203,166)
HRG3 = (213,200,183)
HRG2 = (184,163,135)
HRG4 = (209,189,164)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (215,154,104)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Light'
hair_color_ep = 'Sapphire'
elif c > 250000:
HR1 = (60,10,145)
HR2 = (255,218,0)
SK1 = (174,139,97)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (167,124,71)
SK2 = (178,138,93)
HRG3 = (188,179,165)
HRG2 = (166,150,128)
HRG4 = (184,171,151)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (191,105,71)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Mid'
hair_color_ep = 'Indigo'
else:
HR1 = (30,180,220)
HR2 = (216,214,203)
SK1 = (113,63,29)
SC1 = (190,215,240)
BR1 = (190,215,240)
EY1 = (114,55,17)
SK2 = (146,79,35)
HRG3 = (155,135,127)
HRG2 = (139,121,111)
HRG4 = (156,131,115)
HRG5 = (87,101,113)
HRG1 = (0,0,0)
RC1 = (142,36,2)
MO1 = EY1
SCR1 = EY1
skin_ep = 'Dark'
hair_color_ep = 'Topaz'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
#if e > 900000:
# neck = GoldChain_1
#elif e > 700000:
# neck = SilverChain_1
#elif e > 500000:
# neck = RING_1
#else:
# neck = none
seed(e)
f=randint(0,1000000)
if f > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif f > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif f > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif g > 950000:
mouth = FROWN
mouth_ep = 'Frown'
else:
mouth = none
mouth_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
ROSY_1=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,RC1,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,RC1,0,0,0,0,0,RC1,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(g)
h=randint(0,1000000)
if h > 970000:
blemishes = ROSY_1
blemishe_ep = 'Rosy Cheeks'
elif h > 900000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif i > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif i > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif i > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif i > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif i > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif i > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif i > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif i > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(i)
j=randint(0,1000000)
if j > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
WIZ_BLUE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR2,HR2,HR2,HR2,HR2,HR2,HR2,HR1,HR1,HR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SC1,SC1,SC1,SK1,SK1,SC1,SC1,SC1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,SK1,SK1,FR1,FR1,SK1,SK1,SK1,SK1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,FR1,FR1,FR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,FR1,HR2,HR1,HR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,BR1,BR1,BR1,BR1,BR1,BR1,BR1,FR1,SK1,HR2,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,HR1,HR1,HR2,FR1,FR1,BR1,BR1,BR1,FR1,FR1,SK1,HR2,HR1,HR1,HR1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,HR1,HR1,HR2,FR2,FR1,BR1,FR1,FR1,FR2,HR2,HR1,HR1,HR1,HR1,FR2,FR2,FR2,FR2]
]
pixels = WIZ_BLUE
elif b > 19000:
race_ep = 'Unknown'
type_ep = 'Male'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 750000:
SK1 = (250,200,170)
HR1 = (130,130,130)
skin_ep = 'Peach'
elif c > 500000:
SK1 = (200,170,140)
HR1 = (125,110,90)
skin_ep = 'Dust'
elif c > 250000:
SK1 = (240,210,190)
HR1 = (170,150,120)
skin_ep = 'Bone'
else:
SK1 = (195,175,165)
HR1 = (100,95,85)
skin_ep = 'Silk'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_4
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 950000:
hair_prop = CAP_5
hair_prop_ep = 'Cap'
elif e > 900000:
hair_prop = KNITTED_4
hair_prop_ep = 'Knitted Cap'
elif e > 850000:
hair_prop = HEADBAND_7
hair_prop_ep = 'Headband'
elif e > 800000:
hair_prop = FORCAP_3
hair_prop_ep = 'Cap Forward'
elif e > 750000:
hair_prop = COWBOY_3
hair_prop_ep = 'Cowboy Hat'
elif e > 700000:
hair_prop = TOPHAT_3
hair_prop_ep = 'Top Hat'
else:
hair_prop = none
hair_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 980000:
neck = RING_3
neck_ep = 'Ring Onchain'
elif f > 880000:
neck = GoldChain_4
neck_ep = 'Gold Chain'
tutu = 99
elif f > 800000:
neck = SilverChain_3
neck_ep = 'Silver Chain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
if g > 975000:
mouth = SMILE
mouth_ep = 'Smile'
elif g > 950000:
mouth = FROWN
mouth_ep = 'Frown'
tyty = 99
else:
mouth = none
mouth_ep = 'None'
if tutu == 99 and tyty == 99:
neck = none
neck_ep = 'None'
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 200000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif i > 80000:
EY1 = (230,180,100)
eyes_color_ep = 'Peach'
else:
EY1 = (78,154,197)
eyes_color_ep = 'Blue'
seed(i)
j=randint(0,1000000)
if j > 950000:
eyes = ClassicShades_4
eyes_prop_ep ='Classic Shades'
elif j > 900000:
eyes = EyePatch_4
eyes_prop_ep ='Eye Patch'
elif j > 850000:
eyes = RegularShades_4
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
GOLLUN=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,HR1,HR1,HR1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,HR1,SK1,HR1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,HR1,SK1,HR1,SK1,HR1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,SK1,HR1,SK1,HR1,SK1,HR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,HR1,SK1,SK1,HR1,SK1,HR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,EY1,EY1,SK1,SK1,SK1,EY1,EY1,SK1,HR1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,HR1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,HR1,SK1,SK1,SK1,SK1,HR1,SK1,HR1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,HR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,bl,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = GOLLUN
elif b > 10000:
race_ep = 'Wraiths'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
seed(b)
c=randint(0,1000000)
if c > 500000:
SK1 = (50,50,50)
HR1 = (100,100,100)
SC1 = nr
MO1 = nr
skin_ep = 'Dark Grey'
elif c > 400000:
SK1 = (128,128,128)
HR1 = (255,193,7) #OR
SC1 = nr
MO1 = nr
skin_ep = 'Granite'
elif c > 300000:
SK1 = (128,128,128)
HR1 = (200,130,40) #BRONZE
SC1 = nr
MO1 = nr
skin_ep = 'Granite'
elif c > 250000:
SK1 = (142,36,170) #VIOLET
HR1 = (40,5,55)
SC1 = (74,20,140)
MO1 = SC1
skin_ep = 'Eggplant'
else:
SK1 = (128,128,128)
HR1 = (230,230,230)
SC1 = (30,30,30)
MO1 = SC1
skin_ep = 'Granite'
seed(c)
d=randint(0,1000000)
if d > 750000:
ears = EARS_2
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
MOLE=[
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,MO1,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
]
seed(d)
e=randint(0,1000000)
if e > 930000:
blemishes = MOLE
blemishe_ep = 'Mole'
else:
blemishes = none
blemishe_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif f > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif f > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(f)
g=randint(0,1000000)
seed(g)
h=randint(0,1000000)
if h > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif h > 880000:
mouth_prop = MASK_1
mouth_prop_ep = 'Medical Mask'
elif h > 820000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif h > 780000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(h)
i=randint(0,1000000)
if i > 400000:
EY1 = (255,255,255)
EY2 = nr
eyes_color_ep = 'White'
elif i > 300000:
EY1 = (214,92,26)
EY2 = nr
eyes_color_ep = "Orange"
elif i > 200000:
EY1 = (176,61,133)
EY2 = nr
eyes_color_ep = "Purple"
elif i > 100000:
EY1 = (255,255,0)
EY2 = nr
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
EY2 = nr
eyes_color_ep = 'Red'
seed(i)
j=randint(0,1000000)
if j > 970000:
eyes = TD_2
eyes_prop_ep ='3D Glasses'
elif j > 930000:
eyes = VR_2
eyes_prop_ep ='VR'
elif j > 880000:
eyes = ClassicShades_2
eyes_prop_ep ='Classic Shades'
elif j > 830000:
eyes = SmallShades_2
eyes_prop_ep ='Small Shades'
elif j > 780000:
eyes = EyePatch_2
eyes_prop_ep ='Eye Patch'
elif j > 730000:
eyes = NerdGlasses_2
eyes_prop_ep ='Nerd Glasses'
elif j > 700000:
eyes = EyeMask_2
eyes_prop_ep ='Eye Mask'
elif j > 650000:
eyes = RegularShades_2
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(j)
k=randint(0,1000000)
if k > 975000:
nose = NOSE_1
nose_ep = 'Clown Nose'
else:
nose = none
nose_ep = 'None'
SPECTRE=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,HR1,HR1,HR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,HR1,FR1,FR1,FR1,HR1,HR1,FR1,FR1,FR1,HR1,HR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,FR1,FR1,SK1,SK1,SK1,FR1,HR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SC1,SC1,SK1,SK1,SK1,SC1,SC1,SK1,SK1,FR1,HR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,EY1,EY2,SK1,SK1,SK1,EY1,EY2,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,FR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,HR1,FR1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,HR1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,HR1,HR1,HR1,HR1,FR1,FR1,SK1,FR1,FR1,HR1,FR1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR1,FR1,HR1,HR1,HR1,FR1,HR1,HR1,HR1,FR1,FR2,FR2,FR2,FR2]
]
pixels = SPECTRE
elif b > 7000:
race_ep = 'Dark Riders'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
SK1 = (118,113,113)
SK2 = (191,191,191)
SK3 = (223,223,223)
skin_ep = 'None'
seed(b)
c=randint(0,1000000)
if c > 750000:
ears = EARS_1
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(c)
d=randint(0,1000000)
if d > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif d > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif d > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif e > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif e > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif f > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif f > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif f > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
seed(f)
g=randint(0,1000000)
if g > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif g > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif g > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif g > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif g > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif g > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif g > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif g > 650000:
eyes = HornedRimGlasses_1
eyes_prop_ep ='Horned Rim Glasses'
elif g > 600000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
DARK_RIDER=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,FR1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,FR1,FR1,SK1,FR1,FR1,SK1,SK1,SK1,FR1,FR1,SK1,FR1,FR1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,EY1,SK1,SK1,SK1,FR1,EY1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,SK1,EY1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DARK_RIDER
elif b > 1000:
race_ep = 'Daemons'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
SK1 = (90,90,90)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,114,48)
FR1 = nr
FR2 = bl
seed(b)
c=randint(0,1000000)
seed(c)
d=randint(0,1000000)
if d > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif d > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif d > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif e > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif e > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 500000:
EY1 = bl
eyes_color_ep = 'White'
else:
EY1 = nr
eyes_color_ep = 'Black'
seed(f)
g=randint(0,1000000)
if g > 970000:
eyes = TD_1
eyes_prop_ep ='3D Glasses'
elif g > 930000:
eyes = VR_1
eyes_prop_ep ='VR'
elif g > 880000:
eyes = ClassicShades_1
eyes_prop_ep ='Classic Shades'
elif g > 830000:
eyes = EyePatch_1
eyes_prop_ep ='Eye Patch'
elif g > 780000:
eyes = NerdGlasses_1
eyes_prop_ep ='Nerd Glasses'
elif g > 730000:
eyes = BigShades_1
eyes_prop_ep ='Big Shades'
elif g > 700000:
eyes = EyeMask_1
eyes_prop_ep ='Eye Mask'
elif g > 650000:
eyes = RegularShades_1
eyes_prop_ep ='Regular Shades'
else:
eyes=none
eyes_prop_ep ='None'
seed(g)
h=randint(0,1000000)
if h > 750000:
SK1 = (60,60,60)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,160,0)
FR1 = nr
FR2 = bl
skin_ep = 'Dark Grey'
hair_color_ep = 'Orange'
elif h > 500000:
SK1 = (30,30,30)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,160,0)
FR1 = nr
FR2 = bl
skin_ep = 'Charcoal'
hair_color_ep = 'Orange'
elif h > 250000:
SK1 = (60,60,60)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,114,48)
FR1 = nr
FR2 = bl
skin_ep = 'Dark Grey'
hair_color_ep = 'Burning Orange'
else:
SK1 = (30,30,30)
FR4 = (166,166,166)
FR5 = (225,63,0)
FR3 = (240,114,48)
FR1 = nr
FR2 = bl
skin_ep = 'Charcoal'
hair_color_ep = 'Burning Orange'
DEAMON=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR3,FR3,FR3,BG1,BG1,BG1,BG1,BG1,FR3,FR3,FR3,FR3,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR3,FR1,FR1,FR1,FR3,BG1,BG1,BG1,FR3,FR1,FR1,FR1,FR1,FR3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,FR3,FR1,FR1,FR1,FR1,FR1,FR3,FR3,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR3,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,BG1,BG1,FR2],
[FR2,BG1,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,BG1,FR2],
[FR2,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,FR1,FR1,FR1,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR1,FR1,FR1,FR3,FR3,SK1,FR3,FR1,FR3,SK1,FR3,FR3,FR1,FR1,FR1,FR1,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR1,FR1,FR3,FR1,SK1,SK1,SK1,FR3,SK1,SK1,SK1,SK1,FR3,FR1,FR1,FR1,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,FR3,BG1,FR1,FR4,FR4,SK1,SK1,SK1,FR4,FR4,SK1,SK1,FR3,FR3,FR3,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,BG1,BG1,FR1,FR5,EY1,SK1,SK1,SK1,FR5,EY1,SK1,SK1,FR1,BG1,FR3,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,BG1,BG1,FR1,SK1,SK1,FR3,SK1,FR3,SK1,SK1,SK1,SK1,FR1,BG1,FR3,FR1,FR1,FR3,FR2],
[FR2,FR3,FR1,FR1,FR3,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,SK1,FR1,BG1,FR3,FR1,FR1,FR3,FR2],
[FR2,BG1,FR3,FR1,FR1,FR3,BG1,FR1,SK1,FR3,FR3,FR3,FR3,FR3,SK1,SK1,SK1,FR1,FR3,FR1,FR1,FR3,BG1,FR2],
[FR2,BG1,BG1,FR3,FR1,FR1,FR3,FR1,SK1,FR3,FR3,FR3,FR3,FR3,SK1,SK1,SK1,FR3,FR1,FR1,FR3,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,FR3,FR1,FR3,FR1,SK1,FR3,FR3,FR3,FR3,FR3,SK1,SK1,SK1,FR3,FR1,FR3,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,FR3,BG1,FR1,SK1,SK1,FR3,FR3,FR3,SK1,SK1,SK1,SK1,FR1,FR3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR3,FR3,FR3,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR3,SK1,SK1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DEAMON
else:
race_ep = 'Dark Lord'
type_ep = 'None'
hair_color_ep = 'None'
haircut_ep = 'None'
hair_prop_ep = 'None'
eyes_prop_ep = 'None'
blemishe_ep = 'None'
eyes_color_ep = 'None'
facial_hair_ep = 'None'
mouth_prop_ep = 'None'
mouth_ep = 'None'
tooth_color_ep = 'None'
nose_ep = 'None'
neck_ep = 'None'
ears_ep = 'None'
skin_ep = 'None'
ears = none
hair = none
hair_prop = none
neck = none
blemishes = none
#tooth color
mouth = none
facial_hair = none
rod = none
mouth_prop = none
#eye color
eyes = none
nose = none
SK1 = (113,113,113)
SK2 = (160,160,160)
SK3 = (223,223,223)
skin_ep = 'None'
seed(b)
c=randint(0,1000000)
if c > 750000:
ears = EARS_0
ears_ep = 'Earring'
else:
ears = none
ears_ep = 'None'
seed(c)
d=randint(0,1000000)
if d > 900000:
neck = GoldChain_1
neck_ep = 'Gold Chain'
elif d > 820000:
neck = SilverChain_1
neck_ep = 'Silver Chain'
elif d > 800000:
neck = RING_1
neck_ep = 'Ring Onchain'
else:
neck = none
neck_ep = 'None'
seed(d)
e=randint(0,1000000)
if e > 900000:
mouth_prop = CIGARETTE
mouth_prop_ep = 'Cigarette'
elif e > 840000:
mouth_prop = PIPE
mouth_prop_ep = 'Pipe'
elif e > 800000:
mouth_prop = VAPE
mouth_prop_ep = 'Vape'
else:
mouth_prop = none
mouth_prop_ep = 'None'
seed(e)
f=randint(0,1000000)
if f > 400000:
EY1 = (255,255,255)
eyes_color_ep = 'White'
elif f > 300000:
EY1 = (214,92,26)
eyes_color_ep = "Orange"
elif f > 200000:
EY1 = (176,61,133)
eyes_color_ep = "Purple"
elif f > 100000:
EY1 = (255,255,0)
eyes_color_ep = 'Yellow'
else:
EY1 = (255,0,0)
eyes_color_ep = 'Red'
DARK_LORD=[
[FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,FR2,FR2,FR2,FR2,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,BG1,BG1,FR1,BG1,BG1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,BG1,BG1,FR1,BG1,BG1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,FR1,FR1,BG1,FR1,FR1,BG1,FR1,BG1,FR1,FR1,BG1,FR1,FR1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,FR1,FR1,FR1,FR1,FR1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,FR1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,FR1,EY1,SK1,SK1,FR1,SK1,FR1,EY1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,EY1,FR1,SK1,SK1,FR1,SK1,EY1,FR1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,SK3,FR1,SK1,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK1,FR1,SK3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,SK3,FR1,SK2,SK1,SK1,SK1,FR1,SK1,SK1,SK1,SK2,FR1,SK3,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,SK3,SK1,SK2,SK1,SK1,FR1,SK1,SK1,SK2,SK1,SK3,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK3,SK1,SK2,SK1,FR1,SK1,SK2,SK1,SK3,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK3,SK1,SK2,FR1,SK2,SK1,SK3,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK3,SK1,SK2,FR1,SK2,SK1,SK3,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK3,SK1,SK2,FR1,SK2,SK1,SK3,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK3,SK1,SK2,SK1,FR1,SK1,SK2,SK1,SK3,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,SK3,FR1,SK1,SK2,SK1,FR1,SK1,SK2,SK1,SK1,SK3,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,SK2,SK1,SK1,FR1,SK1,SK1,SK2,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,SK2,BG1,FR1,FR1,FR1,FR1,FR1,SK1,SK2,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,BG1,FR1,SK1,SK1,SK1,FR1,BG1,BG1,BG1,BG1,BG1,FR2],
[FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR2,FR1,SK1,SK1,SK1,FR1,FR2,FR2,FR2,FR2,FR2,FR2]
]
pixels = DARK_LORD
newtraitcombo1 = createCombo2()
traits2.append(newtraitcombo1)
######################
# we stock all the atty in dataframes with pandas library for each loop
df = pd.DataFrame(pixels)
df2 = pd.DataFrame(ears)
df3 = pd.DataFrame(hair)
df31 = pd.DataFrame(hair_prop)
df4 = pd.DataFrame(neck)
df5 = pd.DataFrame(blemishes)
df6 = pd.DataFrame(facial_hair)
df7 = pd.DataFrame(mouth)
df8 = pd.DataFrame(rod)
df9 = pd.DataFrame(mouth_prop)
df10 = pd.DataFrame(eyes)
df11 = | pd.DataFrame(nose) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.