path
stringlengths
7
265
concatenated_notebook
stringlengths
46
17M
Prace_domowe/Praca_domowa1/Grupa1/KozminskiPawel/WUM_PracaDomowa1_KozminskiPawel.ipynb
###Markdown Praca domowa 1 - eksploracja danych German Credit Data Paweł Koźmiński ###Code import pandas as pd import numpy as np import sklearn import matplotlib as plt import seaborn as sns import matplotlib.pyplot as plt gcdata = pd.read_csv("https://www.mldata.io/download-csv-weka/german_credit_data/") print(gcdata.head()) np.random.seed(123) gcdata.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 1000 entries, 0 to 999 Data columns (total 21 columns): checking_account_status 1000 non-null object duration 1000 non-null int64 credit_history 1000 non-null object purpose 1000 non-null object credit_amount 1000 non-null float64 savings 1000 non-null object present_employment 1000 non-null object installment_rate 1000 non-null float64 personal 1000 non-null object other_debtors 1000 non-null object present_residence 1000 non-null float64 property 1000 non-null object age 1000 non-null float64 other_installment_plans 1000 non-null object housing 1000 non-null object existing_credits 1000 non-null float64 job 1000 non-null object dependents 1000 non-null int64 telephone 1000 non-null object foreign_worker 1000 non-null object customer_type 1000 non-null int64 dtypes: float64(5), int64(3), object(13) memory usage: 164.1+ KB ###Markdown Wprawdzie braków brak - za to większość z danych nie jest liczbowa ###Code gcdata.describe() ###Output _____no_output_____ ###Markdown Warto zauważyć, że kolumny `installment_rate`, `present_residence`, `dependents`, `customer_type` są najprawdopodniej kategoryczne (dokumentacja na stronie nie przekazuje pełni informacji nt. danych) ###Code age_dens = sns.distplot(gcdata.age, bins = 75-18) age_dens.set_title('Rozklad wieku wsrod osob starajacych sie o kredyt') ###Output _____no_output_____ ###Markdown Widzimy, że rozkład wieku wśród biorących kredyty charakteryzuje się wysoką, dodatnią skośnością, a najwięcej jest osób w wieku ok. 23-28 lat. ###Code gcdata.savings.value_counts() ###Output _____no_output_____ ###Markdown Oznaczenia: A61 - =1000Ponieważ obserwacji jest dokładnie 1000 - łatwo policzyć sobie procenty udziałów poszczególnych kategorii. ###Code gcd_violin = sns.violinplot(gcdata.present_employment, gcdata.credit_amount, order = ['A71', 'A72', 'A73', 'A74', 'A75']) #gcd_violin.set_title('Rozklad kwot zaciaganych kredytow z podzialem na zatrudnienie') plt.text(x=-1, y=23000, s='Kwoty kredytow z podzialem na zatrudnienie', fontsize=16, weight='bold') plt.text(x=-0.6, y=21500, s='A71-bezrobotni, A72 - <1 rok zatrudnienia, A73 - <1, 4), A74 - <4, 7), A75 - 7+', fontsize=9, alpha=0.75) plt.show() ###Output _____no_output_____ ###Markdown Wśród wszystkich grup podziału zatrudnienia, najwięcej kredytów branych jest na kwotę 0-5000 DM, jendak najmniejszy procent kreedytów branych w tym przedziale jest wśród osób... bezrobotnych. ###Code sns.pairplot(gcdata[["duration", "credit_amount", "age"]]) ###Output _____no_output_____ ###Markdown Trudno dopatrzeć się korelacji między wartościami pól "Czas trwania kredytu", "Wysokość kredytu" oraz "Wiek". Interesujaca kwestia: Jak historia kredytowa wpływa na ocenę klienta? ###Code cat = sns.catplot(y = "credit_history", hue = "customer_type", kind = "count", data = gcdata, order = ["A30", "A31", "A32", "A33", "A34"]) cat.fig.suptitle("Liczba ocen wiarygodnosci klientow z podzialem na grupy historii kredytowej") ###Output _____no_output_____ ###Markdown A30 - brak kredytów w historii lub wszystkie spłacane w terminie A31 - wszystkie kredyty w danym banku spłacane terminowo A32 - aktualne kredyty dotychczas spłacane terminowo A33 - opóźnienie w spłacaniu w przeszłości A34 - konto niepokojący / istnieją kredyty w innych bankach ###Code print(gcdata.groupby("credit_history")["customer_type"].mean()-1) ###Output credit_history A30 0.625000 A31 0.571429 A32 0.318868 A33 0.318182 A34 0.170648 Name: customer_type, dtype: float64 ###Markdown Powyżej widzimy odsetek osób, które zostały negatywnie ocenione w poszczególnych grupach klientów ze względu na historię kredytową. ###Code from scipy.stats import chi2_contingency for i in gcdata.select_dtypes(include=['float64', 'int64']).columns: d=np.hstack([pd.DataFrame(gcdata.customer_type), gcdata[i].values.reshape(-1,1)]) p_value=chi2_contingency(d)[1] if p_value<0.1: print('%s - ISTOTNA' %i) else: print ('%s - NIEISTOTNA' %i) cor = gcdata.corr() corplot = sns.heatmap(cor, annot = True) bottom, top = corplot.get_ylim() corplot.set_ylim(bottom + 0.5, top - 0.5) corplot.set_title('Korelacja zmiennych') plt.show() ###Output _____no_output_____ ###Markdown Oczywiscie jest to korelacja tylko zmiennych liczbowych, żadna para jednak nie jest od siebie silnie zależna. Próba zamiany zmiennych kategorycznych na liczbowe factory ###Code from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder le = LabelEncoder() integer_encoded = le.fit_transform(gcdata["savings"]) print(integer_encoded) #invert #print(le.inverse_transform(integer_encoded)) cat_encoded = gcdata.select_dtypes(exclude = "object") for i in gcdata.select_dtypes(include = "object").columns: f = pd.Series(le.fit_transform(gcdata[i])) cat_encoded = pd.concat([cat_encoded.reset_index(drop = True), f], axis = 1) cat_encoded.columns = cat_encoded.columns[0:8].tolist() + gcdata.select_dtypes(include = "object").columns.tolist() print(cat_encoded.head()) cor = cat_encoded.corr() cor = cor>0.5 print(cor.sum()-1) ###Output duration 1 credit_amount 1 installment_rate 0 present_residence 0 age 0 existing_credits 0 dependents 0 customer_type 0 checking_account_status 0 credit_history 0 purpose 0 savings 0 present_employment 0 personal 0 other_debtors 0 property 0 other_installment_plans 0 housing 0 job 0 telephone 0 foreign_worker 0 dtype: int64 ###Markdown Z powyższej tabeli możemy odczytać, że spośród wszystkich kolumn, jedynie pomiędzy duration i credit_amount występuje korelacja o wartości współczynnika większej niż pół, co zgodne jest z naszymi poprzednimi obserwacjami. ###Code import lens ls = lens.summarise(gcdata) explorer = lens.explore(ls) explorer.describe() explorer.column_details("age") explorer.correlation() #explorer.correlation_plot() #explorer.distribution_plot('credit_amount') #niestety przy wywołaniu pojawił się błąd - issue: # https://github.com/facultyai/lens/issues/44 # nawet po instalowaniu plotly >4.0 #explorer.pairwise_density_plot('age', 'dependents') explorer.crosstab('present_employment', 'savings') ###Output _____no_output_____ ###Markdown Powyższa tabelka wydaje się świetnym rozwiązaniem do porównania zależności między zmiennymi! Doskonale widać, że A71 - bezrobotni, o ile mają oszczędności, to raczej niewiekie (a już z pewnością nie ponad 1000 DM). Wśród innych grup zatrudnionych, rozkład oszczędności wygląda inaczej. ###Code lens.interactive_explore(ls) ###Output _____no_output_____
.ipynb_checkpoints/V1-checkpoint.ipynb
###Markdown Reflect Tables into SQLAlchemy ORM ###Code # Python SQL toolkit and Object Relational Mapper import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func from sqlalchemy import create_engine, inspect from sqlalchemy import Column, Integer, String, Float, Date engine = create_engine("sqlite:///Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # We can view all of the classes that automap found Base.classes.keys() # Save references to each table Measurement = Base.classes.measurement Station = Base.classes.station #Create our session (link) from Python to the DB session = Session(engine) ###Output _____no_output_____ ###Markdown Exploratory Climate Analysis ###Code inspector = inspect(engine) columns = inspector.get_columns('measurement') for column in columns: print(column["name"], column["type"]) columns = inspector.get_columns('station') for column in columns: print(column["name"], column["type"]) Base.metadata.create_all(engine) engine = create_engine("sqlite:///Resources/hawaii.sqlite") conn = engine.connect() Base.metadata.create_all(engine) from sqlalchemy.orm import Session session = Session(bind=engine) last_year = session.query(Measurement) for measure in last_year: print (measure.prcp) # Design a query to retrieve the last 12 months of precipitation data and plot the results # Calculate the date 1 year ago from the last data point in the database # Perform a query to retrieve the data and precipitation scores # Save the query results as a Pandas DataFrame and set the index to the date column # Sort the dataframe by date # Use Pandas Plotting with Matplotlib to plot the data ###Output _____no_output_____ ###Markdown ![precipitation](Images/precipitation.png) ###Code # Use Pandas to calcualte the summary statistics for the precipitation data ###Output _____no_output_____ ###Markdown ![describe](Images/describe.png) ###Code # Design a query to show how many stations are available in this dataset? # What are the most active stations? (i.e. what stations have the most rows)? # List the stations and the counts in descending order. # Using the station id from the previous query, calculate the lowest temperature recorded, # highest temperature recorded, and average temperature most active station? # Choose the station with the highest number of temperature observations. # Query the last 12 months of temperature observation data for this station and plot the results as a histogram ###Output _____no_output_____ ###Markdown ![precipitation](Images/station-histogram.png) ###Code # This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' # and return the minimum, average, and maximum temperatures for that range of dates def calc_temps(start_date, end_date): """TMIN, TAVG, and TMAX for a list of dates. Args: start_date (string): A date string in the format %Y-%m-%d end_date (string): A date string in the format %Y-%m-%d Returns: TMIN, TAVE, and TMAX """ return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() # function usage example print(calc_temps('2012-02-28', '2012-03-05')) # Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax # for your trip using the previous year's data for those same dates. # Plot the results from your previous query as a bar chart. # Use "Trip Avg Temp" as your Title # Use the average temperature for the y value # Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr) # Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates. # Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation ###Output _____no_output_____ ###Markdown Optional Challenge Assignment ###Code # Create a query that will calculate the daily normals # (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day) def daily_normals(date): """Daily Normals. Args: date (str): A date string in the format '%m-%d' Returns: A list of tuples containing the daily normals, tmin, tavg, and tmax """ sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)] return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all() daily_normals("01-01") # calculate the daily normals for your trip # push each tuple of calculations into a list called `normals` # Set the start and end date of the trip # Use the start and end date to create a range of dates # Stip off the year and save a list of %m-%d strings # Loop through the list of %m-%d strings and calculate the normals for each date # Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index # Plot the daily normals as an area plot with `stacked=False` class Station(Base): __tablename__ = 'location' id = Column(Integer, primary_key=True) station = Column(String(100)) name = Column(String(100)) latitude = Column(Float) longitude = Column(Float) elevation = Column(Float) class Measurement(Base): __tablename__ = 'measure' id = Column(Integer, primary_key=True) station = Column(String(100)) date = Column(Date) prcp = Column(Float) tobs = Column(Float) ###Output _____no_output_____
Fakenews-buster-website-crawler.ipynb
###Markdown Samayam - Malayalam News * Article listing : https://malayalam.samayam.com/latest-news/fact-check/articlelist/66765139.cms?curpg=1 ###Code language_code = "ml" # Parse the first page and identify how many pages are there article_list_page ="https://malayalam.samayam.com/latest-news/fact-check/articlelist/66765139.cms" response = requests.get(article_list_page) soup = BeautifulSoup(response.text, "html.parser") pages = int(soup.select("#printpage>a")[-1].string) print("Number of pages:", pages) # Now go to all pages and create a list of articles to download, # I'm bruteforcing it in a single thread for now article_list = [] for page in range(1, pages+1): # we've alreaedy fetched the first page, ideally we don't have to fetch it again. # but I'm doing it anyway since I'm lazy to handle it :P url = article_list_page + f"?curpg={page}" print(url) response = requests.get(url) soup = BeautifulSoup(response.text, "html.parser") for item in soup.select(".textsec>a"): article_list.append(item['href']) print(f"\n\nTotal {len(article_list)} articles needs to be fetched\n\n") # create ./data/ml/samayam directory if it is not there. Path(f"{data_dir}{language_code}/samayam").mkdir(parents=True, exist_ok=True) # Now go to each articles and save them for idx, article in enumerate(article_list): try: r = requests.get(article) except Exception: print("Exception while getting the article, trying next one.") article_id = article.split('/')[-1].split('.')[0] with open(f'{data_dir}{language_code}/samayam/article{article_id}.txt', 'w') as file: file.write(r.text) print(f"{idx+1}: Saved article: ", article) time.sleep(1) ###Output _____no_output_____
examples/01-hypercat-basics.ipynb
###Markdown Hypercat - BasicsThis introductory notebook shows some of the basic functionality of the HYPERCAT software. Advanced topics are than explored in accompanying notebooks. Disclaimer and AttributionHYPERCAT / CLUMPY models and the accompanyin software were published in two papers. If you decide to make use of either the models, the software, the example notebooks, or the included pupil images of telescopes included with HYPERCAT, please cite these two papers:* **Nikutta, Lopez-Rodriguez, Ichikawa, Levenson, Packham, Hönig, Alonso-Herrero, "Hypercubes of AGN Tori (Hypercat) -- I. Models and Image Morphology", ApJ (2021, accepted)*** **Nikutta, Lopez-Rodriguez, Ichikawa, Levenson, Packham, Hönig, Alonso-Herrero, "Hypercubes of AGN Tori (Hypercat) -- II. Resolving thetorus with Extremely Large Telescopes", ApJ (2021, under review)** Table of contents* [Imports & setup](import)* [Loading a model hypercube](load)* [Your first image](first)* [A different model](different)* [Multiple wavelengths](multiwave)* [Multiple anything](multiany)* [Accessing cloud maps](clouds)* [Further information](info) Imports and setup ###Code import warnings warnings.filterwarnings("ignore", message="dataset.value has been deprecated") from itertools import product %matplotlib inline import hypercat as hc from hypercat import plotting print(hc.__version__) ###Output 0.1.5 ###Markdown Loading a model hypercube To create a model hypercube only the location of the hdf5 file is needed. ###Code fname = 'hypercat_20200830_all.hdf5' # use your local location to the HDF5 model file (or a symlink) cube = hc.ModelCube(fname,hypercube='imgdata') ###Output Opening HDF5 file: hypercat_20200830_all.hdf5 Loading sampling parameters. Closing HDF5 file. Loading hypercube 'imgdata' [shape: (5,10,16,12,5,7,25,121,241)] to RAM (912.52 GB required) ... Inspect the loaded hypercube with .print_sampling() ------------------------------------------------------- Parameter Range Nvalues Sampled values ------------------------------------------------------- sig* [ 15.000 - 75.000] ( 5) 15.000, 30.000, 45.000, 60.000, 75.000 i* [ 0.000 - 90.000] ( 10) 0.000, 10.000, 20.000, 30.000, 40.000, 50.000, 60.000, 70.000, ... Y* [ 5.000 - 20.000] ( 16) 5.000, 6.000, 7.000, 8.000, 9.000, 10.000, 11.000, 12.000, ... N* [ 1.000 - 12.000] ( 12) 1.000, 2.000, 3.000, 4.000, 5.000, 6.000, 7.000, 8.000, ... q* [ 0.000 - 2.000] ( 5) 0.000, 0.500, 1.000, 1.500, 2.000 tv* [ 10.000 - 160.000] ( 7) 10.000, 20.000, 40.000, 60.000, 80.000,120.000,160.000 wave* [ 1.200 - 945.000] ( 25) 1.200, 2.200, 3.500, 4.800, 8.700, 9.300, 9.800, 10.000, ... x [ 0.000 - 120.000] (121) 0.000, 1.000, 2.000, 3.000, 4.000, 5.000, 6.000, 7.000, ... y [ 0.000 - 240.000] (241) 0.000, 1.000, 2.000, 3.000, 4.000, 5.000, 6.000, 7.000, ... ------------------------------------------------------- Parameters printed in bold and/or marked with an asterisk (*) are interpolable. Hypercube size: 912.519 (GB) ###Markdown This didn't really load hundreds of GBs into your memory, but has simply memory-mapped the content of the HDF5 file.The printed table lists the parameters/axes of the loaded hypercube `imgdata`, and the model parameter values each axis represents. All model images will be interplated in this 9-dimensional space. Note that only parameters with a `*` printed next to them are selectable by the user. `x` and `y` simply correspond to the image x and y axes, and will be interpolated implicitly (no need to worry about them, you will always receive the full image for all other parameters that you specify).For a more detailed description of the CLUMPY torus model parameters, please see https://www.clumpy.org/model-description/index.html Your first imageTo generate your first model image, you just need to pick a single location in the 9 dimensions spanned by the parameter axes for `sig, i, Y, N, q, tv` and `wave` paramaters, i.e. to construct a parameter vector. Please make sure that none of your values falls outside the range stored in the hypercube.To then generate the model image, simple call the cube object with the parameter vector as argument. ###Code vec1 = (40,85,20,4,0,20,10.0) # thes are the values for: sig [degrees], i [degrees], Y, N, q, tv, wave [micron] img1 = cube(vec1) ###Output Loading a subcube of 14.2388 MB into RAM. Squeezing all dim-1 axes... Instantiating n-dim interpolation object ... Done. ###Markdown We can now inspect what `img` is: ###Code print("Shape:", img1.shape) print("Sum of all values:", img1.sum()) ###Output Shape: (241, 241) Sum of all values: 9.295261 ###Markdown Apparently `img` is a 2D array (an image) of size 242x241 pixels with floating point values. In this case, it is the brightness map of a CLUMPY torus with parameters:- sigma = 40 degrees torus angular height)- i = 85 degrees (almost edge-on) viewing angle- Y = 20 torus radial extent- N = 4 avg. number of clouds along radial ray in the equatorial plane- q = 0 radial cloud fall-off power-law exponent- tv = 20 optical depth of single cloud in the V band (0.55 micron)- wave = 10.0 micron wavelength We can plot the image, for instance using a simple plotting helper function from Hypercat: ###Code plotting.multiplot(img1,panelsize=6,cmaps='jet'); ###Output _____no_output_____ ###Markdown A different modelTo generate a model image with different parameters, simply provide a new parameter vector: ###Code vec2 = (20,30,20,8,1,60,200.0) img2 = cube(vec2) plotting.multiplot(img2,panelsize=6,cmaps='jet'); ###Output Loading a subcube of 14.2388 MB into RAM. Squeezing all dim-1 axes... Instantiating n-dim interpolation object ... Done. ###Markdown We can also plot the images together: ###Code title = "sig = %g, i = %g, Y = %g, N = %g, q = %g, tv = %g, wave = %g" titles = (title % vec1, title % vec2) plotting.multiplot((img1,img2),panelsize=6,cmaps='jet',titles=titles,fontsize=10); ###Output _____no_output_____ ###Markdown Multiple wavelengthsSo far we saw the model interpolation with single value per parameter, but multi-valued entries per-parameter are allowed. We can use this for instance to quickly produce images of the same model but for several wavelengths. ###Code wave = (4.8,10.0,53) vec3 = (40,85,20,4,0,20,wave) img3 = cube(vec3) print("Shape:",img3.shape) ###Output Loading a subcube of 113.91 MB into RAM. Squeezing all dim-1 axes... Instantiating n-dim interpolation object ... Done. Shape: (3, 241, 241) ###Markdown The result is a mini-cube holding 3 slices (images at the different wavelengths), each 241x241 pixels in size. ###Code nslices = img3.shape[0] print(nslices) images = [img3[j,...] for j in range(nslices)] plotting.multiplot(images,cmaps='jet',panelsize=5,titles=["wave = %g mic" % wave for wave in vec3[-1]]); ###Output 3 ###Markdown Multiple anythingThe multi-value capability is available for any of the free parameters. For instance, let's plot a model with changing `q` values, at 10 micron: ###Code q = (0,0.5,1,1.5,2) vec4 = (40,85,20,4,q,20,10) img4 = cube(vec4) nslices = img4.shape[0] images = [img4[j,...] for j in range(nslices)] # Let's also add countour lines plotting.multiplot(images,cmaps='jet',panelsize=4,titles=["q = %g" % wave for wave in q],colorbars=None,contours='log'); ###Output Loading a subcube of 35.5969 MB into RAM. Squeezing all dim-1 axes... Instantiating n-dim interpolation object ... Done. ###Markdown Even multiple parameters can be multi-valued. Here for instance we plot a model at several values for `sig` (torus azimuthal thickness) and `i` (viewing angle) values: ###Code sig = (15,45,75) i = (0,30,60,90) vec5 = (sig,i,20,4,0,20,10) img5 = cube(vec5) print(img5.shape) ###Output Loading a subcube of 177.985 MB into RAM. Squeezing all dim-1 axes... Instantiating n-dim interpolation object ... Done. (3, 4, 241, 241) ###Markdown The result is a hypercube with 3x4 slices (images) of 241x241 pixels each. Let's plot them: ###Code images = [img5[j,k,...] for j in range(len(sig)) for k in range(len(i))] titles = ["sig = %g, i = %g" % pair for pair in product(sig,i)] fig, axes = plotting.multiplot(images,geometry=(3,4),direction='y', cmaps='inferno',panelsize=3,colorbars=None,titles=titles,\ sharex=True,sharey=True,fontsize=14) fig.subplots_adjust(wspace=0.1,hspace=0.1) ###Output _____no_output_____ ###Markdown Accessing clouds mapsThe HYPERCAT `hdf5` files also contain the projected 2D cloud maps for each model parameter vector. The pixel values in these maps encode how many dust clouds there are along the entire line of sight (LOS). Note that the number can be fractional -- it's the statistical mean over many possible realizations of the model. ###Code fname = 'hypercat_20200830_all.hdf5' # same local location to the HDF5 model file cube_clouds = hc.ModelCube(fname,hypercube='clddata') # note 'clddata' for 'cloud data', rather than 'imgdata' as before ###Output Opening HDF5 file: hypercat_20200830_all.hdf5 Loading sampling parameters. Closing HDF5 file. Loading hypercube 'clddata' [shape: (5,10,16,5,121,241)] to RAM (444.96 MB required) ... Inspect the loaded hypercube with .print_sampling() ------------------------------------------------------- Parameter Range Nvalues Sampled values ------------------------------------------------------- sig* [ 15.000 - 75.000] ( 5) 15.000, 30.000, 45.000, 60.000, 75.000 i* [ 0.000 - 90.000] ( 10) 0.000, 10.000, 20.000, 30.000, 40.000, 50.000, 60.000, 70.000, ... Y* [ 5.000 - 20.000] ( 16) 5.000, 6.000, 7.000, 8.000, 9.000, 10.000, 11.000, 12.000, ... q* [ 0.000 - 2.000] ( 5) 0.000, 0.500, 1.000, 1.500, 2.000 x [ 0.000 - 120.000] (121) 0.000, 1.000, 2.000, 3.000, 4.000, 5.000, 6.000, 7.000, ... y [ 0.000 - 240.000] (241) 0.000, 1.000, 2.000, 3.000, 4.000, 5.000, 6.000, 7.000, ... ------------------------------------------------------- Parameters printed in bold and/or marked with an asterisk (*) are interpolable. Hypercube size: 444.962 (MB) ###Markdown Observe that this hypercube of dust cloud maps is smaller: it has 6 dimensions (`imgdata` had 9), and the axes for optical depth `tv` and wavelength `wave` are missing -- the maps of how many clouds there are along a line of sight don't depend on optical depth and wavelength.This entire cube was computed assuming `N_0 = 1` for the avg. number of clouds per radial ray in the equatorial plane. If you are investigating a model with a different `N_0`, simply multiply the dust map by `N_0`.Let's compare a model's cloud distribution and emission morphology at a given wavelength: ###Code sig, i, Y, N, q, tv, wave = 30, 90, 20, 6.5, 0, 30, 10 vec_img = (sig,i,Y,N,q,tv,wave) vec_cld = (sig,i,Y,q) img = cube(vec_img) cld = cube_clouds(vec_cld) plotting.multiplot((img,cld*N),cmaps='gray',panelsize=5,fontsize=14,\ titles=('Image at %g micron' % wave,'Cloud number per LOS')); ###Output Loading a subcube of 14.2388 MB into RAM. Squeezing all dim-1 axes... Instantiating n-dim interpolation object ... Done. Loading a subcube of 1.77985 MB into RAM. Squeezing all dim-1 axes... Instantiating n-dim interpolation object ... Done.
wandb/run-20210519_214225-2viv4znv/tmp/code/00-main.ipynb
###Markdown Loading the data Clean the data Feature Enginnering Modelling Save model ###Code ## Load the data import pandas as pd data = pd.read_csv('./data.csv') data.sample(frac=1) ## Clean the data data.columns data.drop(['sqft_living','sqft_lot','waterfront','view','condition','sqft_above','sqft_basement','street','city','statezip','country'],axis=1,inplace=True) data.drop('date',axis=1,inplace=True) data.head() ## Feature Enginnering def fe(data,col): print(len(data)) max_no = data[col].quantile(0.99) min_no = data[col].quantile(0.05) data = data[data[col] > min_no] data = data[data[col] < max_no] print(len(data)) return data for col in list(data.columns): print(col) data = fe(data,'price') data.head() X = data.drop('price',axis=1) y = data['price'] from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25) len(X_train),len(X_test) ## Modelling import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F class BaseLine_Model(nn.Module): def __init__(self,input_shape,output_shape): super().__init__() self.fc1 = nn.Linear(input_shape,64) self.fc2 = nn.Linear(64,128) self.fc3 = nn.Linear(128,256) self.fc4 = nn.Linear(256,128) self.fc5 = nn.Linear(128,output_shape) def forward(self,X): preds = self.fc1(X) preds = F.relu(preds) preds = self.fc2(preds) preds = F.relu(preds) preds = self.fc3(preds) preds = F.relu(preds) preds = self.fc4(preds) preds = F.relu(preds) preds = self.fc5(preds) return preds import wandb BATCH_SIZE = 32 PROJECT_NAME = 'House-Price-Pred' from tqdm import tqdm device = torch.device('cuda') def get_loss(criterion,X,y,model): model.eval() with torch.no_grad(): preds = model(X.float().to(device)) preds = preds.to(device) y = y.to(device) loss = criterion(preds,y) model.train() return loss.item() def get_accuracy(X,y,model): model.eval() with torch.no_grad(): correct = 0 total = 0 for i in range(len(X)): pred = model(X[i].float().to(device)) pred.to(device) if round(int(pred[0])) == round(int(y[i])): correct += 1 total += 1 if correct == 0: correct += 1 model.train() return round(correct/total,3) import numpy as np X_train = torch.from_numpy(np.array(X_train)) y_train = torch.from_numpy(np.array(y_train)) X_test = torch.from_numpy(np.array(X_test)) y_test = torch.from_numpy(np.array(y_test)) get_accuracy(X_test,y_test,model) EPOCHS = 112 # wandb.init(project=PROJECT_NAME,name='baseline') # for _ in tqdm(range(EPOCHS)): # model.to(device) # preds = model(X_train.float().to(device)) # preds = preds.view(len(preds),) # preds.to(device) # loss = criterion(preds.float(),y_train.float().to(device)) # optimizer.zero_grad() # loss.backward() # optimizer.step() # wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(X_train,y_train,model),'val_accuracy':get_accuracy(X_test,y_test,model)}) # preds # y_train # torch.round(preds) # torch.round(y_train) import matplotlib.pyplot as plt # preds[0] # y_train[0] # model.eval() # with torch.no_grad(): # # preds = model(X_test.float().to(device)) # for index in range(12): # print(preds[index][0]) # print(y_test[index]) # print('\n') ###Output _____no_output_____ ###Markdown Test modelling ###Code class Test_Model(nn.Module): def __init__(self,input_shape=5,output_shape=1,num_of_layers=2,activations=F.relu,fc1_output=64,fc2_output=128,fc3_output=256): super().__init__() self.activation = activations self.num_of_layers = num_of_layers self.fc1 = nn.Linear(input_shape,fc1_output) self.fc2 = nn.Linear(fc1_output,fc2_output) self.fc3 = nn.Linear(fc2_output,fc3_output) self.fc4 = nn.Linear(fc3_output,fc2_output) self.fc5 = nn.Linear(fc2_output,fc2_output) self.fc6 = nn.Linear(fc2_output,output_shape) def forward(self,X): preds = self.fc1(X) preds = self.activation(preds) preds = self.fc2(preds) preds = self.activation(preds) preds = self.fc3(preds) preds = self.activation(preds) preds = self.fc4(preds) for _ in range(self.num_of_layers): preds = self.fc5(preds) preds = self.activation(preds) preds = self.fc6(preds) return preds # num_of_layers # activations # fc1_output # fc2_output # fc3_output # criterion # optimizer activations = [nn.ELU(),nn.LeakyReLU(),nn.PReLU(),nn.ReLU(),nn.ReLU6(),nn.RReLU(),nn.SELU(),nn.CELU(),nn.GELU(),nn.SiLU(),nn.Tanh()] for activation in activations: model = Test_Model().to(device) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(),lr=0.1) wandb.init(project=PROJECT_NAME,name=f'activation-{activation}') for _ in tqdm(range(EPOCHS)): model.to(device) preds = model(X_train.float().to(device)) preds = preds.view(len(preds),) preds.to(device) loss = criterion(preds.float(),y_train.float().to(device)) optimizer.zero_grad() loss.backward() optimizer.step() wandb.log({'loss':loss.item(),'val_loss':get_loss(criterion,X_test,y_test,model),'accuracy':get_accuracy(X_train,y_train,model),'val_accuracy':get_accuracy(X_test,y_test,model)}) ###Output wandb: Currently logged in as: ranuga-d (use `wandb login --relogin` to force relogin)
notebooks/04_01_numpy.ipynb
###Markdown Why do we need numpy?* You may have heard "Python is slow", this is true when it concerns looping over many small python objects* Python is dynamically typed and everything is an object, even an `int`. There are no primitive types.* Numpy's main feature is the `ndarray` class, a fixed length, homogeniously typed array class.* Numpy implements a lot of functionality in fast c, cython and fortran code to work on these arrays* python with vectorized operations using numpy can be blazingly fastSee: [Python is not C](https://www.ibm.com/developerworks/community/blogs/jfp/entry/Python_Is_Not_C?lang=en)But the most important reason:* More beautiful code ###Code import numpy as np ###Output _____no_output_____ ###Markdown More beautiful code through vectorisationpure python with list comprehension ###Code voltages = [10.1, 15.1, 9.5] currents = [1.2, 2.4, 5.2] resistances = [U * I for U, I in zip(voltages, currents)] resistances ###Output _____no_output_____ ###Markdown Using numpy ###Code U = np.array([10.1, 15.1, 9.5]) I = np.array([1.2, 2.4, 5.2]) R = U * I R ###Output _____no_output_____ ###Markdown Finding the point with the smallest distance ###Code import math def euclidean_distance(p1, p2): return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) point = (1, 2) points = [(3, 2), (4, 2), (3, 0)] min_distance = float('inf') for other in points: distance = euclidean_distance(point, other) if distance < min_distance: closest = other min_distance = distance print(min_distance, closest) point = np.array([1, 2]) points = np.array([(3, 2), (4, 2), (3, 0)]) distance = np.linalg.norm(point - points, axis=1) idx = np.argmin(distance) print(distance[idx], points[idx]) ###Output 2.0 [3 2] ###Markdown Small example timings ###Code import math def var(data): ''' knuth's algorithm for one-pass calculation of the variance Avoids rounding errors of large numbers when doing the naive approach of `sum(v**2 for v in data) - sum(v)**2` ''' n = 0 mean = 0.0 m2 = 0.0 if len(data) < 2: return float('nan') for value in data: n += 1 delta = value - mean mean += delta / n delta2 = value - mean m2 += delta * delta2 return m2 / n list(range(10)) %%timeit l = list(range(1000)) var(l) %%timeit a = np.arange(1000) # array with numbers 0,...,999 np.var(a) ###Output 31.1 µs ± 7.2 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) ###Markdown Basic math: vectorizedOperations on numpy arrays work vectorized, element-by-element** Lose your loops ** ###Code # create a numpy array from a python a python list a = np.array([1.0, 3.5, 7.1, 4, 6]) 2 * a a**2 a**a np.cos(a) ###Output _____no_output_____ ###Markdown **Attention: You need the `cos` from numpy!** ###Code math.cos(a) ###Output _____no_output_____ ###Markdown Most normal python functions with basic operators like `*`, `+`, `**` simply work becauseof operator overloading: ###Code def poly(x): return x + 2 * x**2 - x**3 poly(a) poly(np.e), poly(np.pi) ###Output _____no_output_____ ###Markdown Useful properties ###Code len(a) a.shape a.dtype a.ndim ###Output _____no_output_____ ###Markdown Arbitrary dimension arrays ###Code # two-dimensional array y = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) y + y ## since python 3.5 @ is matrix product y @ y # Broadcasting, changing array dimensions to fit the larger one y + np.array([1, 2, 3]) ###Output _____no_output_____ ###Markdown Reduction operationsNumpy has many operations, which reduce dimensionality of arrays ###Code x = np.random.normal(0, 1, 1000) np.sum(x) np.prod(x) np.mean(x) ###Output _____no_output_____ ###Markdown Standard Deviation ###Code np.std(x) ###Output _____no_output_____ ###Markdown Standard error of the mean ###Code np.std(x, ddof=1) / np.sqrt(len(x)) ###Output _____no_output_____ ###Markdown Sample Standard Deviation ###Code np.std(x, ddof=1) ###Output _____no_output_____ ###Markdown Most of the numpy functions are also methods of the array ###Code x.mean(), x.std(), x.max(), x.min() ###Output _____no_output_____ ###Markdown Difference between neighbor elements ###Code z = np.arange(10)**2 diff_z = np.diff(z) print(z) print(diff_z) ###Output [ 0 1 4 9 16 25 36 49 64 81] [ 1 3 5 7 9 11 13 15 17] ###Markdown Reductions on multi-dimensional arrays ###Code array2d = np.arange(20).reshape(4, 5) array2d np.sum(array2d, axis=0) np.var(array2d, axis=1) ###Output _____no_output_____ ###Markdown Exercise 1Write a function that calculates the analytical linear regression for a set ofx and y values.Reminder:$$ f(x) = a \cdot x + b$$with $$\hat{a} = \frac{\mathrm{Cov}(x, y)}{\mathrm{Var}(x)} \\\hat{b} = \bar{y} - \hat{a} \cdot \bar{x}$$ ###Code # %load 04_01_numpy_solutions/exercise_linear.py x = np.linspace(0, 1, 50) y = 5 * np.random.normal(x, 0.1) + 2 # see section on random numbers later a, b = linear_regression(x, y) a, b ###Output _____no_output_____ ###Markdown Helpers for creating arrays ###Code np.zeros(10) np.ones((5, 2)) np.full(5, np.nan) np.empty(10) # attention, uninitialised memory, be carefull np.linspace(-2, 1, 1) # like range() for arrays: np.arange(5) np.arange(2, 10, 2) np.logspace(-4, 5, 10) np.logspace(1, 4, 4, base=2) ###Output _____no_output_____ ###Markdown Numpy Indexing* Element access* Slicing ###Code x = np.arange(0, 10) # like lists: x # like lists: x[0] # all elements with indices ≥1 and <4: x[1:4] # negative indices count from the end x[-1], x[-2] # combination: x[3:-2] # step size x[::2] # trick for reversal: negative step x[::-1] y = np.array([x, x + 10, x + 20, x + 30]) y # only one index ⇒ one-dimensional array y[2] # other axis: (: alone means the whole axis) y[:, 3] # inspecting the number of elements per axis: y[:, 1:3].shape ###Output _____no_output_____ ###Markdown Changing array content ###Code y y[:, 3] = 0 y ###Output _____no_output_____ ###Markdown Using slices on both sides ###Code y[:,0] = x[3:7] y ###Output _____no_output_____ ###Markdown Transposing inverts the order of the dimensions ###Code y y.shape y.T y.T.shape ###Output _____no_output_____ ###Markdown Masks* A boolean array can be used to select only the element where it contains `True`.* Very powerfull tool to select certain elements that fullfill a certain condition ###Code a = np.linspace(0, 2, 11) b = np.random.normal(0, 1, 11) print(b >= 0) print(a[b >= 0]) a[[0, 2]] = np.nan a a[np.isnan(a)] = -1 a ###Output _____no_output_____ ###Markdown Random numbers* numpy has a larger number of distributions builtin ###Code np.random.uniform(-1, 1, 10) np.random.normal(0, 5, (2, 10)) np.random.poisson(5, 2) ###Output _____no_output_____ ###Markdown Calculating pi through monte-carlo simulation* We draw random numbers in a square with length of the sides of 2* We count the points which are inside the circle of radius 1The area of the square is$$A_\mathrm{square} = a^2 = 4$$The area of the circle is$$A_\mathrm{circle} = \pi r^2 = \pi$$With $$\frac{n_\mathrm{circle}}{n_\mathrm{square}} = \frac{A_\mathrm{circle}}{A_\mathrm{square}}$$We can calculate pi:$$\pi = 4 \frac{n_\mathrm{circle}}{n_\mathrm{square}}$$ ###Code n_square = 10000000 x = np.random.uniform(-1, 1, n_square) y = np.random.uniform(-1, 1, n_square) radius = np.sqrt(x**2 + y**2) n_circle = np.sum(radius <= 1.0) print(4 * n_circle / n_square) ###Output 3.1424804 ###Markdown Exercise1. Draw 10000 gaussian random numbers with mean of $\mu = 2$ and standard deviation of $\sigma = 3$2. Calculate the mean and the standard deviation of the sample3. What percentage of the numbers are outside of $[\mu - \sigma, \mu + \sigma]$?4. How many of the numbers are $> 0$?5. Calculate the mean and the standard deviation of all numbers ${} > 0$ ###Code # %load 04_01_numpy_solutions/exercise_gaussian.py ###Output _____no_output_____ ###Markdown ExerciseMonte-Carlo uncertainty propagation* The hubble constant as measured by PLANCK is$$H_0 = (67.74 \pm 0.47)\,\frac{\mathrm{km}}{\mathrm{s}\cdot\mathrm{Mpc}}$$* Estimate mean and the uncertainty of the velocity of a galaxy which is measured to be $(500 \pm 100)\,\mathrm{Mpc}$ awayusing monte carlo methods ###Code # %load 04_01_numpy_solutions/exercise_hubble.py ###Output _____no_output_____ ###Markdown Simple io functions ###Code idx = np.arange(100) x = np.random.normal(0, 1e5, 100) y = np.random.normal(0, 1, 100) n = np.random.poisson(20, 100) idx.shape, x.shape, y.shape, n.shape np.savetxt( 'data.txt', np.column_stack([idx, x, y, n]), ) !head data.txt # Load back the data, unpack=True is needed to read the data columnwise and not row-wise idx, x, y, n = np.genfromtxt('data.txt', unpack=True) idx.dtype, x.dtype ###Output _____no_output_____ ###Markdown Problems* Everything is a float* Way larger file than necessary because of too much digits for floats* No column names Numpy recarrays* Numpy recarrays can store columns of different types* Rows are addressed by integer index* Columns are addressed by stringsSolution for our io problem → Column names, different types ###Code # for more options on formatting see # https://docs.scipy.org/doc/numpy/reference/generated/numpy.savetxt.html data = np.savetxt( 'data.csv', np.column_stack([idx, x, y, n]), delimiter=',', # true csv file header=','.join(['idx', 'x', 'y', 'n']), fmt=['%d', '%.4g', '%.4g', '%d'], # One formatter for each column ) !head data.csv data = np.genfromtxt( 'data.csv', names=True, # load column names from first row dtype=None, # Automagically determince best data type for each column delimiter=',', ) data[:10] data[0] data['n'] data.dtype ###Output _____no_output_____ ###Markdown Linear algebraNumpy offers a lot of linear algebra functionality, mostly wrapping LAPACK ###Code # symmetric matrix, use eigh # If not symmetric, use eig mat = np.array([ [4, 2, 0], [2, 1, -3], [0, -3, 4] ]) eig_vals, eig_vecs = np.linalg.eigh(mat) eig_vals, eig_vecs np.linalg.inv(mat) ###Output _____no_output_____ ###Markdown Numpy matricesNumpy also has a matrix class, with operator overloading suited for matrices ###Code mat = np.matrix(mat) mat.T mat ** 2 mat * 5 mat.I mat * np.matrix([1, 2, 3]).T ###Output _____no_output_____
KNN+Breast+Cancer+Modeling.ipynb
###Markdown drop the first column from the data frame. This is Id column which is not used in modeling ###Code # The first column is id column which is patient id and nothing to do with the model attriibutes. So drop it. bc_df = bc_df.drop(labels = "id", axis = 1) bc_df.shape # Create a separate dataframe consisting only of the features i.e independent attributes bc_feature_df = bc_df.drop(labels= "diagnosis" , axis = 1) bc_feature_df.head() # convert the features into z scores as we do not know what units / scales were used and store them in new dataframe # It is always adviced to scale numeric attributes in models that calculate distances. bc_feature_df_z = bc_feature_df.apply(zscore) # convert all attributes to Z scale bc_feature_df_z.describe() # Capture the class values from the 'diagnosis' column into a pandas series akin to array bc_labels = bc_df["diagnosis"] # store the normalized features data into np array X = np.array(bc_feature_df_z) X.shape # store the bc_labels data into a separate np array y = np.array(bc_labels) y.shape # Split X and y into training and test set in 75:25 ratio X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1) # Call Nearest Neighbour algorithm NNH.fit(X_train, y_train) # For every test data point, predict it's label based on 5 nearest neighbours in this model. The majority class will # be assigned to the test data point predicted_labels = NNH.predict(X_test) NNH.score(X_test, y_test) # calculate accuracy measures and confusion matrix from sklearn import metrics print(metrics.confusion_matrix(y_test, predicted_labels)) # To improve performance ------------------------- Iteration 2 ----------------------------------- # Let us analyze the different attributes for distribution and the correlation by using scatter matrix #sns.pairplot(bc_df) # As is evident from the scatter matrix, many dimensions have strong correlation and that is not surprising # Area and Perimeter are function of radius, so they will have strong correlation. Why take multiple dimensions # when they convey the same information to the model? # To to drop dependent columns from bc_df bc_features_pruned_df_z = bc_feature_df_z.drop(['radius_mean'], axis=1) X = np.array(bc_features_pruned_df_z) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1) # Call Nearest Neighbour algorithm NNH = KNeighborsClassifier(n_neighbors= 20 , weights = 'distance' ) NNH.fit(X_train, y_train) # For every test data point, predict it's label based on 5 nearest neighbours in this model. The majority class will # be assigned to the test data point predicted_labels = NNH.predict(X_test) # get the accuracy score which is how many test cases were correctly predicted as a ratio of total number of test cases NNH.score(X_test, y_test) # calculate accuracy measures and confusion matrix from sklearn import metrics print(pd.DataFrame(metrics.confusion_matrix(y_test, predicted_labels, labels=["M" ,"B"]), index=['true:yes', 'true:no'], columns=['pred:yes', 'pred:no'])) # peformance has dropped! So, be careful about the dimensions you drop. #Domain expertise is a must to know whether dropping radius or dropping area #will be better. The area may be a stronger predictor than radius and the #way they are calculated under a electron microscope may be effecting the outcome from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score # creating odd list of K for KNN myList = list(range(1,50)) # empty list that will hold cv scores cv_scores = [] k_neighbors = [] # perform 10-fold cross validation for k in myList: knn = KNeighborsClassifier(n_neighbors=k) scores = cross_val_score(knn, X_train, y_train, cv=10, scoring='accuracy') print(scores.mean()) cv_scores.append(scores.mean()) k_neighbors.append(k) MSE = [1 - x for x in cv_scores] print(min(MSE)) MSE.index(min(MSE)) best_k = myList[MSE.index(min(MSE))] print ("The optimal number of neighbors is %d" % best_k) %matplotlib inline import matplotlib.pyplot as plt fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 18 fig_size[1] = 9 plt.rcParams["figure.figsize"] = fig_size plt.xlim(0,25) # plot misclassification error vs k plt.plot(k_neighbors, MSE) plt.xlabel('Number of Neighbors K') plt.ylabel('Misclassification Error') plt.show() ###Output _____no_output_____
assignments/PythonAdvanceTheory/Python_Advance_11.ipynb
###Markdown Assignment_11 Q1. What is the concept of a metaclass?In python everything is associated with type. In other programming languages, int, str, array are data type. But, in python, they are objects of the class int, str, list, dict. We can create user defined type by creating a class and then object of that class.A Class is also an object, and just like any other object it’s a instance of something called Metaclass. A special class type creates these Class object. The type class is default metaclass which is responsible for making classes. Q2. What is the best way to declare a class's metaclass?When defining a class and no metaclass is defined the default type metaclass will be used. If a metaclass is given and it is not an instance of type(), then it is used directly as the metaclass. ###Code class A(type): pass class B(metaclass=A): # for class B, class A is a metaclass pass class C(B): pass print(type(A)) print(type(B)) print(type(C)) ###Output <class 'type'> <class '__main__.A'> <class '__main__.A'>
notebooks/problem_solved/problem_004_Largest_palindrome_product.ipynb
###Markdown Problem_004 Largest palindrome productA palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.Find the largest palindrome made from the product of two 3-digit numbers. ###Code palindromic = [] for i in range(999, 0, -1): for j in range(999, 0, -1): x = int(i*j) y = int(str(x)[::-1]) # print('x={}, y={}'.format(x, y)) if x==y: palindromic.append(x) # print(x) palindromic.sort(reverse=True) print(palindromic[0]) ###Output 906609
lesson_1/Slides.ipynb
###Markdown * Привет! Меня зовут Николай Марков. * Я работаю на позиции Senior Data Engineer в компании Aligned Research Group.* Создаю пайплайны для анализа данных.* Пишу на Python суммарно уже лет шесть-семь.* А еще мы организуем в России встречи и конференции под брендом PyData (http://pydata.org/). Присоединяйтесь: https://www.meetup.com/PyData-Moscow/* У меня везде один и тот же ник - @enchantner, пишите в твиттере, телеграме или фейсбуке.* И в http://ods.ai/ тоже заходите. Что такое Python?* Язык общего назначения* Интерпретируемый* С динамической "утиной" типизацией* С автоматическим управлением памятью и Garbage Collector'ом* Эталонная реализация - CPython (есть также PyPy, Jython, IronPython и т.д.) Что можно писать на Python? Сайты и сервисы, например.![web](img/web.png "Сайты, например") А также:* Оконные приложения (как ни странно)* Ботов для игр и Telegram* Приложения для анализа данных* Облачные платформы* Утилиты для администрирования* Что угодно еще Лучше не брать Python для* Оконных приложений (как ни странно)* Фронтенд-задач* Мобильных приложений* Сложных распределенных систем Что такое "интерпретируемый язык"? 1) Это как компилируемый, только быстро 2) Это исполняемый на виртуальной машине 3) Это позволяющий получить ошибки во время исполнения 4) Это язык, у которого зачастую есть REPL Как происходит запуск нашей программы (скрипта)?![lexer](img/interpreter.png "lexer") А что потом?![ast](img/ast.png "AST") И как в конце?![bytecode](img/bytecode.png "Bytecode") Давайте поставим интерпретатор Python* Экосистема Python состоит из * интерпретатора и стандартной библиотеки * сторонних модулей * окружения и IDE* Python 2 умер, ставим Python 3* “ванильный” дистрибутив: https://www.python.org/* дистрибутив для data science https://www.anaconda.com/distribution/ Основные типы данных- bool (логический, True/False)- string/bytes (строковый)- int, float, complex (числовой) Основные типы контейнеров- list (список элементов разных типов)- tuple (кортеж, неизменяемый список)- dict (словарь, набор пар ключ-значение)- set (множество)Еще: https://docs.python.org/3/library/datatypes.html Что такое "динамическая типизация"? ###Code s = 'abc' # строка s = "abc" # тоже строка n = 2 # int n = 3.5 # float l = [] # пустой список l = [1, 2, 3] # тоже список, но с числами l = ['a', 1, None] # разные типы в одном списке t = (1,) # кортеж с одним элементом (не забудьте про запятую!) d = {} # пустой словарь d = {'a': 1, 'b': 2} # словарь с парами ключ-значение s = set([1, 2]) # множество, еще можно {1, 2} ###Output _____no_output_____ ###Markdown Список - непрерывный кусок памяти!https://wiki.python.org/moin/TimeComplexity ###Code l = [3,2,1] len(l) # посчитать длину списка или строки sum(l) # просуммировать элементы списка sorted(l) # вернуть отсортированный список, reverse=True для сортировки в обратном порядке max(l) # максимальный элемент; min(l) # минимальный ###Output _____no_output_____ ###Markdown Методы самого списка ###Code l.append(9) # добавить элемент в конец l.extend([3, 4, 5]) # расширить один список другим l.insert(3, 4) # вставить элемент 4 в позицию 3 l.remove(3) # удалить первый элемент со значением 3 l.count(3) # посчитать число элементов со значением 3 ###Output _____no_output_____ ###Markdown Строки, списки, срезы ###Code # s - строка или список s = "string" s[0] # нулевой элемент (индексация с нуля) s[2:4] # элементы 2 и 3 s[1:8:2] # элементы 1, 3, 5, 7 (шаг 2) s[-1] # обратный индекс - последний элемент s[::2] # все элементы с шагом 2 s[::-1] # развернутая строка/список ###Output _____no_output_____ ###Markdown Разделение - объединение ###Code s.split("a") # разделяем строку по "a" s.split("\t") # разделяем строку по табуляции "\t".join(list_of_strings) # объединяем ###Output _____no_output_____ ###Markdown Попробуем сами ###Code s = "У Мэри есть овечка" print(s[0] == ...) print(s[7:10] == ...) print(s[1:8:2] == ...) print(s[-3] == ...) print(s[::4] == ...) print(s[::-1] == ...) ###Output _____no_output_____ ###Markdown Циклы ###Code lst = [3,2,1] for i in lst: # перебираем элементы в контейнере print(i) for i, item in enumerate(lst): # перебираем вместе с индексами print(str(i) + ". " + str(item)) for j in range(1, 10, 2): # что напечатает? print(j) ###Output _____no_output_____ ###Markdown Цикл с условием останова ###Code a = 0 while a < 100: # проверка условия a += 1 # инкремент (увеличение значения на 1) print(a) if a == 20: break # выход из цикла ###Output _____no_output_____ ###Markdown Упражнения 1) Для чисел от 0 до 100 вывести само число, а затем«fizz», если число делится на 3, и «buzz», если оно делитсяна 5. Подсказка: оператор взятия остатка - %2) Даны два списка равной длины с целыми числами - например, время в секундах, проведенное на сайте, для каждого пользователя в группе A/B тестирования. Нужно найти разницу между максимальным элементом первого списка и минимальным элементом второго за один проход (O(n)). ###Code # сгенерировать список можно, например, так: import random a = [random.randint(-10, 10) for _ in range(10)] b = [random.randint(-10, 10) for _ in range(10)] mx = a[0] mn = b[0] ###Output _____no_output_____ ###Markdown Set- Вставка и проверка наличия элемента - гораздо быстрее, чем в списке!- Все элементы уникальны- Элементы сортируются в Python 3.5+, но на это полагаться не стоит ###Code s = {1, 2, 3} s.add(6) # добавление элемента s.remove(2) # удаление элемента 3 in s # проверка наличия элемента 4 not in s # проверка отсутствия элемента ###Output _____no_output_____ ###Markdown Set как математическое множество ###Code s1, s2 = {1, 2, 3}, {2, 3} s2.issubset(s1) # является ли s2 подсетом s1? s1.issuperset(s2) # является ли s1 суперсетом над s2? s1.union(s2) # объединить два множества в одно s1.intersection(s2) # пересечение множеств s1.difference(s2) # разность множеств # сокращенные версии (не рекомендуются к использованию) s2 <= s1 # можно также s1<s2 s1 >= s2 # можно также s1>s2 s1 | s2 s1 & s2 s1 - s2 ###Output _____no_output_____ ###Markdown Dictionary- Структура данных - hash table (хэш-таблица)- На упорядоченность полагаться тоже не стоит ###Code d = {"foo": 1, 42: "bar"} d["a"] = 125 # добавить значение по ключу в словарь del d["a"] # удалить значение по ключу из словаря d.keys() # список ключей (в Python 3 - итератор) d.values() # список значений (в Python 3 - итератор) d.get(12, "freedom") # значение по умолчанию d.update({42: "zord", "obama": "trump"}) # обновить значения из другого словаря ###Output _____no_output_____ ###Markdown Шаблонизация и форматирование ###Code s = "ТеКсТ ДоМиКом" print(s.lower()) # нижний регистр print(s.upper()) # верхний регистр print("У Пети было {0} яблок".format(15)) print("Что лучше - {0} ящиков пива или {1} ящиков водки?".format(30, 20)) print("У Пети было {how_much} яблок".format(how_much="дофига")) """ Взвесьте мне {0:.5f} килограмм хурмы """.format(21.35236172) ###Output _____no_output_____ ###Markdown Еще пара примеров ###Code # f-строки (Python 3.6+): a = 2 b = 8 f"a + b = {a + b}" # старый формат: printf-нотация: "Жили в квартире %d веселых чижа" % 44 ###Output _____no_output_____
gs_quant/documentation/06_baskets/examples/03_basket_creation/0000_clone_basket_position_set.ipynb
###Markdown If you'd like, you may now create a new basket in Marquee using this composition. See the Basket Create Tutorial for a more nuanced example/explanation on how to do this (you may skip step 3 in this case). ###Code new_basket.ticker = 'GSMBCLNE' new_basket.name = 'Clone of GSMBXXXX' new_basket.currency = 'USD' new_basket.return_type = ReturnType.PRICE_RETURN new_basket.create() ###Output _____no_output_____
scATACseq_females_0.ipynb
###Markdown ATAC + MULTIOME (females) Build anndata from cellatac output ###Code # Load libraries import pandas as pd import scanpy as sc import numpy as np import scipy.sparse # Define variables outdir = "/nfs/team292/vl6/my_MULTIOME_dir/females_apr2021/" experiment_prefix = 'females_' cellatac_outdir = '/lustre/scratch117/cellgen/cellgeni/TIC-atacseq/tic-1004/female/results200k-b-female/' input={'cnts': cellatac_outdir + 'peak_matrix/peaks_bc_matrix.mmtx.gz', "bcs": cellatac_outdir + 'peak_matrix/bc.txt', "peaks": cellatac_outdir + 'peak_matrix/peaks.txt', 'clusters': cellatac_outdir + 'qc/seurat-clades.tsv'} output = { "cnt_mmtx":outdir + experiment_prefix + '_ATAC_cisTopic.mmtx', "cnt_peaks":outdir + experiment_prefix + '_ATAC_cisTopic.peaks.tsv', "cnt_cells":outdir + experiment_prefix + '_ATAC_cisTopic.cells.tsv', "h5ad":outdir + experiment_prefix + '_ATAC_raw.h5ad', } ###Output _____no_output_____ ###Markdown Make anndata object ###Code adata = sc.read_mtx(input["cnts"]).T bc = pd.read_table(input["bcs"], header=None) feat = pd.read_table(input["peaks"], header=None) adata.obs_names = bc[0] adata.var_names = feat[0] adata # Code from cell barcodes adata.obs['code'] = [name[0:2] for name in adata.obs_names] adata.obs['code'].value_counts(dropna = False) # Sample from code sample_dictionary = {'01' : 'FCA_GND8046539', '02' : 'FCA_GND8768483', '03' : 'FCA_GND8768484', '04' : 'FCA_GND8768485', '05' : 'HD_F_GON9479871', '06' : 'HD_F_GON9479873', '07' : 'HD_F_GON9479874', '08' : 'HD_F_GON9479875', '09' : 'HD_F_GON9525613', '10' : 'HD_F_GON9525614', '11' : 'HD_F_GON9883959', '12' : 'HD_F_GON9883960'} adata.obs['sample'] = adata.obs['code'].map(sample_dictionary) adata.obs['sample'].value_counts(dropna = False) ###Output _____no_output_____ ###Markdown Load peak annotations (done with R script by Emma) ###Code peak_anno_df = pd.read_csv(outdir + "ATACpeaks_annotation.csv", index_col=0) peak_anno_df.index = peak_anno_df["peak_id"] peak_anno_df.drop("peak_id",1, inplace=True) adata.var = pd.concat([adata.var, peak_anno_df], 1) ###Output _____no_output_____ ###Markdown Save binary data to layers ###Code adata.layers["binary_raw"] = adata.X adata.layers["binary_raw"][adata.layers["binary_raw"] > 1] = 1 adata ###Output _____no_output_____ ###Markdown **Peak filtering** ###Code adata.var.hist(column = 'peak_width', bins = 200, grid = False, figsize = (25,6), color = '#870052') var_qc = sc.pp.calculate_qc_metrics(adata, layer = "binary_raw")[1] adata.var = pd.concat([adata.var, var_qc], 1) adata adata.var.head() adata.var.hist(column = 'total_counts', bins = 200, grid = False, figsize = (25,6), color = '#870052') thirty_percent = len(adata.obs_names) / 100 * 30 point_one_percent = len(adata.obs_names) / 100 * 0.1 print("30% : {}".format(thirty_percent)) print("0.1% : {}".format(point_one_percent)) # Accessible in at least k cells adata = adata[:,adata.var.total_counts > point_one_percent] adata = adata[:,adata.var.total_counts < thirty_percent] adata # Remove peaks in ENCODE blacklist adata = adata[:, adata.var.ENCODE_blacklist == 0] adata # Filter by width (remove peaks at the lowest end, closest to min peak width in MACS2) adata = adata[:, adata.var.peak_width > 210] adata = adata[:, adata.var.peak_width < 1500] adata adata.var.head() adata.var['annotation'].value_counts() adata.obs.head() ### Filter peaks that are not accessible in at least 4% of cells from a coarse cluster min_frac=0.04 # Load cluster information from cellatac outputs clusters = pd.read_table(input["clusters"], header = None, index_col = 0) clusters.columns = ["cellatac_clusters"] adata.obs = clusters.loc[adata.obs_names] adata adata.obs.head() n_clusters = len(np.unique(adata.obs[["cellatac_clusters"]])) clus_mat = np.empty([adata.n_obs, n_clusters]) for cl in np.unique(adata.obs[["cellatac_clusters"]]): clus_mat[np.where(adata.obs['cellatac_clusters']==cl)[0],cl] = 1 clus_mat = scipy.sparse.csr_matrix(clus_mat) clus_mat[clus_mat != 1 ] = 0 cl_peak_mat = np.dot(clus_mat.T, adata.layers["binary_raw"]) cl_peak_frac = cl_peak_mat/clus_mat.sum(0).T cl_peak_frac.max(0).shape bool_matrix = cl_peak_frac.max(0) > min_frac bool_matrix.shape bool_vector = np.squeeze(np.asarray(bool_matrix)) bool_vector.shape adata = adata[:, bool_vector] adata sc.pp.calculate_qc_metrics(adata, layer = "binary_raw", inplace = True) adata.var.hist(column = 'log1p_total_counts', bins = 200, grid = False, figsize = (25,6), color = '#870052') adata.obs.hist(column = 'log1p_total_counts', bins = 200, grid = False, figsize = (25,6), color = '#870052') adata = adata[adata.obs.log1p_total_counts >= 5.5] adata ## Write output anndata adata.write_h5ad(output["h5ad"]) ###Output /opt/conda/envs/atac_env/lib/python3.8/site-packages/anndata/_core/anndata.py:1207: ImplicitModificationWarning: Initializing view as actual. warnings.warn( Trying to set attribute `.var` of view, copying. ... storing 'annotation' as categorical Trying to set attribute `.var` of view, copying. ... storing 'gene_name' as categorical Trying to set attribute `.var` of view, copying. ... storing 'gene_id' as categorical
optim/error-feedback-SGD/notebooks/report_plots.ipynb
###Markdown \begin{table}[]\begin{tabular}{cc|c|c|c|c|}\cline{3-6}\multicolumn{1}{l}{} & & \multicolumn{4}{c|}{Algorithm} \\ \cline{3-6} \multicolumn{1}{l}{} & & SGDm & Signum & SSGDf & sSSGD \\ \hline\multicolumn{1}{|c|}{\multirow{3}{*}{\begin{tabular}[c]{@{}c@{}}Batch\\ size\end{tabular}}}& 128 & 75.35 & -3.15 & -0.92 & -2.21 \\ \cline{2-6}\multicolumn{1}{|c|}{}& 32 & 76.22 & -3.57 & -0.79 & -3.04 \\ \cline{2-6} \multicolumn{1}{|c|}{}& 8 & 74.91 & -6.6 & -0.64 & -36.35 \\ \hline\end{tabular}\caption[short]{Best mean test accuracy over the 3 repetitions for each batch size and each algorithm on the Resnet architecture}\end{table} ###Code max_table(batch_size=8, name='vgg-cifar10') max_table(batch_size=32, name='vgg-cifar10') max_table(batch_size=128, name='vgg-cifar10') ###Output Batchsize (8) Opt (sgdm) Maxacc (93.09) Batchsize (8) Opt (signum) Maxacc (-2.75) Batchsize (8) Opt (ssgdf) Maxacc (-0.27) Batchsize (8) Opt (sssgd) Maxacc (-20.22) Batchsize (32) Opt (sgdm) Maxacc (93.42) Batchsize (32) Opt (signum) Maxacc (-1.54) Batchsize (32) Opt (ssgdf) Maxacc (-0.71) Batchsize (32) Opt (sssgd) Maxacc (-1.49) Batchsize (128) Opt (sgdm) Maxacc (93.38) Batchsize (128) Opt (signum) Maxacc (-0.94) Batchsize (128) Opt (ssgdf) Maxacc (-0.68) Batchsize (128) Opt (sssgd) Maxacc (-1.31) ###Markdown \begin{table}[]\begin{tabular}{cc|c|c|c|c|}\cline{3-6}\multicolumn{1}{l}{} & & \multicolumn{4}{c|}{Algorithm} \\ \cline{3-6} \multicolumn{1}{l}{} & & SGDm & Signum & SSGDf & sSSGD \\ \hline\multicolumn{1}{|c|}{\multirow{3}{*}{\begin{tabular}[c]{@{}c@{}}Batch\\ size\end{tabular}}}& 128 & 93.38 & -0.94 & -0.68 & -1.31 \\ \cline{2-6}\multicolumn{1}{|c|}{}& 32 & 93.42 & -1.54 & -0.71 & -1.49 \\ \cline{2-6} \multicolumn{1}{|c|}{}& 8 & 93.09 & -2.75 & -0.27 & -20.22 \\ \hline\end{tabular}\caption[short]{Best mean test accuracy over the 3 repetitions for each batch size and each algorithm on the VGG architecture}\end{table} ###Code # mean of max def max_table(batch_size=8, name='resnet-cifar100'): optimizers = ['sgdm', 'signum','ssgdf', 'sssgd'] folders_list = [base_folder + 'batchsize-' + str(batch_size) + '/' + name + '-' + optimizer for optimizer in optimizers] results_list = load(folders_list) index = 3 for i, l in enumerate(results_list[index]): max_ = np.mean(np.max(l, axis=1)) print('Batchsize ({0}) Opt ({1}) Maxacc ({2})'.format(batch_size, optimizers[i], np.round(max_, 3))) max_table(batch_size=8) max_table(batch_size=32) max_table(batch_size=128) from models import VGG, ResNet18 vgg_net = VGG('VGG19', num_classes=2) vgg_param_dims = [] for name, param in vgg_net.named_parameters(): vgg_param_dims.append(param.nelement()) vgg_param_dims = np.array(vgg_param_dims, dtype=float) res_net = ResNet18(100) resnet_param_dims = [] for name, param in res_net.named_parameters(): resnet_param_dims.append(param.nelement()) resnet_param_dims = np.array(resnet_param_dims, dtype=float) ratios_vgg_grad = load_obj('results/norm_ratio_experiments/batchsize-128/7vgg-cifar10-ssgdf/gradient_norm_ratios') ratios_vgg_g = load_obj('results/norm_ratio_experiments/batchsize-128/7vgg-cifar10-ssgdf/g_norm_ratios') ratios_resnet_grad = load_obj('results/norm_ratio_experiments/batchsize-32/3resnet-cifar100-ssgdf/gradient_norm_ratios') ratios_resnet_g = load_obj('results/norm_ratio_experiments/batchsize-32/3resnet-cifar100-ssgdf/g_norm_ratios') def normalize_results(res): res = np.array(res) for i in range(res.shape[0]): for j in range(res.shape[1]): res[i, j] = float(res[i, j]) res = np.array(res, dtype=float) return res ratios_vgg_grad = normalize_results(ratios_vgg_grad) ratios_vgg_g = normalize_results(ratios_vgg_g) ratios_resnet_grad = normalize_results(ratios_resnet_grad) ratios_resnet_g = normalize_results(ratios_resnet_g) fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(3.3, 2.3)) ax.plot(ratios_vgg_grad[:,-1], label='$g_t$') ax.plot(ratios_vgg_g[:,-1], label='$g_t + e_t$') ax.set_title('') ax.set_xlabel('Epoch', fontsize=12.5) ax.set_ylabel('$\phi(\cdot)$', fontsize=12.5) ax.legend(prop={'size': 12.5}, loc='lower left') print(np.min(ratios_vgg_g[:,-1])) plt.tight_layout() fig.savefig('../figs/vgg_gradients_corrected_norms_ratios_comparison.pdf') fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(3.3, 2.3)) ax.plot(np.average(ratios_resnet_grad, weights=resnet_param_dims, axis=1), label='Gradients') ax.plot(np.average(ratios_resnet_g, weights=resnet_param_dims, axis=1), label='Corrected gradients') ax.set_title('') ax.set_xlabel('Epoch', fontsize=12.5) ax.set_ylabel('$\phi(\cdot)$', fontsize=12.5) ax.legend(prop={'size': 8}, loc='lower left') plt.tight_layout() fig.savefig('../figs/resnet_gradients_norms_ratios_comparison.pdf') ###Output _____no_output_____ ###Markdown Appendix lr tuning ###Code from tune_lr import get_tuned_learning_rate def Round_To_n(x, n): return round(x, -int(np.floor(np.sign(x) * np.log10(abs(x)))) + n) lr_space = np.logspace(-5, 1, 9) print(lr_space) for i, x in enumerate(lr_space): lr_space[i] = Round_To_n(x, 1) print(lr_space) print(get_tuned_learning_rate('vgg', 'cifar10', 'sgdm')) print(get_tuned_learning_rate('vgg', 'cifar10', 'ssgdf')) print(get_tuned_learning_rate('vgg', 'cifar10', 'signum')) print(get_tuned_learning_rate('vgg', 'cifar10', 'sssgd')) print(get_tuned_learning_rate('resnet', 'cifar100', 'sgdm')) print(get_tuned_learning_rate('resnet', 'cifar100', 'ssgdf')) print(get_tuned_learning_rate('resnet', 'cifar100', 'signum')) print(get_tuned_learning_rate('resnet', 'cifar100', 'sssgd')) ###Output 0.01 0.05623413251903491 5.623413251903491e-05 0.05623413251903491 0.01 0.05623413251903491 0.00031622776601683794 0.05623413251903491
notebooks/stack_models_from_N_features.ipynb
###Markdown Prepare folding scheme (do i need it ?) and load the pre-trained metafeatures ###Code # Get the K fold indexes n_folds =6 kf = KFold(n_splits=n_folds, shuffle=False, random_state=156) with open("../models/test_features_rougher.pkl", "rb") as f: test_rougher = pickle.load(f) with open("../models/train_features_rougher.pkl", "rb") as f: train_rougher = pickle.load(f) with open("../models/test_features_final.pkl", "rb") as f: test_final = pickle.load(f) with open("../models/train_features_final.pkl", "rb") as f: train_final = pickle.load(f) ## For linear models: with open("../models/test_features_rougher_linear.pkl", "rb") as f: test_rougher_lm = pickle.load(f) with open("../models/train_features_rougher_linear.pkl", "rb") as f: train_rougher_lm = pickle.load(f) with open("../models/test_features_final_linear.pkl", "rb") as f: test_final_lm = pickle.load(f) with open("../models/train_features_final_linear.pkl", "rb") as f: train_final_lm = pickle.load(f) ###Output _____no_output_____ ###Markdown Define the N features for which to pull the level0 features for; separately for rougher & final ###Code N_final = [50,150,250,350,450,550] N_rougher = [50,150,250,350] # Retrieve rougher predictions: level0_rougher_train = np.hstack([train_rougher[Nr] for Nr in N_rougher]) level0_final_train = np.hstack([train_final[Nf] for Nf in N_final]) level0_rougher_test = np.hstack([test_rougher[Nr] for Nr in N_rougher]) level0_final_test = np.hstack([test_final[Nf] for Nf in N_final]) tgt = 'rougher.output.recovery' predictions = level0_rougher_train yTrue = y[tgt].values print(f'ROUGHER Shape y : {yTrue.shape}, preds : {level0_rougher_train.shape}' ) print(f'FINAL Shape y : {yTrue.shape}, preds : {level0_final_train.shape}' ) r= mase(level0_rougher_train.mean(axis = 1),y['rougher.output.recovery'].values) f=mase(level0_final_train.mean(axis = 1),y['final.output.recovery'].values) print(f'R {r} F {f} sum = {r*0.25+0.75*f}') print(r) # for Linear N_final_lm = [50,150,250,350] N_rougher_lm = [50,150] # Retrieve rougher predictions: level0_rougher_train_lm = np.hstack([train_rougher_lm[Nr] for Nr in N_rougher_lm]) level0_final_train_lm = np.hstack([train_final_lm[Nf] for Nf in N_final_lm]) level0_rougher_test_lm = np.hstack([test_rougher_lm[Nr] for Nr in N_rougher_lm]) level0_final_test_lm = np.hstack([test_final_lm[Nf] for Nf in N_final_lm]) tgt = 'rougher.output.recovery' predictions_lm = level0_rougher_train_lm yTrue = y[tgt].values print(f'ROUGHER Shape y : {yTrue.shape}, preds : {level0_rougher_train_lm.shape}' ) print(f'FINAL Shape y : {yTrue.shape}, preds : {level0_final_train_lm.shape}' ) r= mase(level0_rougher_train_lm.mean(axis = 1),y['rougher.output.recovery'].values) f=mase(level0_final_train_lm.mean(axis = 1),y['final.output.recovery'].values) print(f'R {r} F {f} sum = {r*0.25+0.75*f}') print(r) #predictions.mean(axis = 1)*r+ ###Output _____no_output_____ ###Markdown Run an optimization with constraints on positive weights ###Code from scipy.optimize import minimize predictions_r = level0_rougher_train ytrue =y['rougher.output.recovery'].values def f(weights): valid_preds_r = np.average(predictions_r, axis=1, weights=weights).reshape((-1,1)) return np.hstack([valid_preds_r]) def loss_function(weights,C = 0.1): y_valid_pred = f(weights) sc = mase(y_valid_pred[:,0],ytrue) + C*(weights**2).sum() #print('loss', sc, 'current weights', weights) return sc opt_weights = minimize(loss_function, [1/predictions_r.shape[1]] * predictions_r.shape[1], constraints=({'type': 'eq','fun': lambda w: 1-sum(w)}), method= 'SLSQP', bounds=[(0.0, 1.0)] * predictions_r.shape[1], options = {'ftol':1e-10}, )['x'] print('Optimum weights = ', opt_weights, 'with loss', loss_function(opt_weights)) def acc_function(weights): y_valid_pred = f(weights) sc = mase(y_valid_pred[:,0],ytrue) return sc print('Ensembled Accuracy =', acc_function(opt_weights)) w_rougher = opt_weights predictions_r = level0_final_train ytrue =y['final.output.recovery'].values opt_weights = minimize(loss_function, [1/predictions_r.shape[1]] * predictions_r.shape[1], constraints=({'type': 'eq','fun': lambda w: 1-sum(w)}), method= 'SLSQP', bounds=[(0.0, 1.0)] * predictions_r.shape[1], options = {'ftol':1e-10}, )['x'] print('Optimum weights = ', opt_weights, 'with loss', loss_function(opt_weights)) def acc_function(weights): y_valid_pred = f(weights) sc = mase(y_valid_pred[:,0],ytrue) return sc print('Ensembled Accuracy =', acc_function(opt_weights)) w_final = opt_weights # 0.00000000e+00 1.53911667e-15 4.63643997e-02 0.00000000e+00 # 7.57377707e-02 8.77897830e-01] # rougher = [0.06394941 0.04131424 0.03608431 0.04175253 0.04084764 0.04474683 0.18637792 0.07544563 0.07143145 0.07306282 0.16714796 0.15783926] # final = [0.03389247 0.0309956 0.05860959 0.0303532 0.03120939 0.034088 0.04629192 0.04832187 0.03993592 0.05727612 0.03868644 0.04596449 0.05082181 0.04129324 0.0481723 0.04222097 0.05384205 0.03739259 0.03535463 0.02802449 0.043193 0.04099059 0.04402116 0.03904817] plt.plot(w_rougher) ###Output _____no_output_____ ###Markdown Train a random forest on top of individual predictions ###Code from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import cross_val_predict from skgarden import RandomForestQuantileRegressor nt = 100 rf_quantile = RandomForestQuantileRegressor(n_estimators=nt,max_depth = 2,min_samples_split = 10) rf = RandomForestRegressor(n_estimators=nt,max_depth = 2,min_samples_split = 10) rf_quantile_final = RandomForestQuantileRegressor(n_estimators=nt,max_depth = 2,min_samples_split = 10) rf_final =RandomForestRegressor(n_estimators=nt,max_depth = 2,min_samples_split = 10) X_train_r = np.hstack([level0_rougher_train,level0_rougher_train_lm,X_filt[feature_subset_rougher_df['feature']]]) y_train_r = y['rougher.output.recovery'].values X_train_f = np.hstack([level0_final_train,level0_rougher_train_lm,X[feature_subset_final_df['feature']]]) y_train_f = y['final.output.recovery'].values sc_normal = cross_val_predict(rf,X_train_r,y_train_r,cv = kf,n_jobs = 6) sc_quantile = cross_val_predict(rf_quantile,X_train_r,y_train_r,cv = kf,n_jobs = 6) sc_normal = mase(sc_normal,y_train_r) sc_quantile = mase(sc_quantile,y_train_r) print(f'Scores: normal : {sc_normal} , quantile: {sc_quantile}') rf_quantile.fit(X_train_r,y_train_r) rf.fit(X_train_r,y_train_r) rf_quantile_final.fit(X_train_f,y_train_f) rf_final.fit(X_train_f,y_train_f) pred_qrf_final = rf_quantile_final.predict(np.hstack([level0_final_test,level0_final_test_lm,X_test[feature_subset_final_df['feature']]])) pred_rf_final =rf_final.predict(np.hstack([level0_final_test,level0_final_test_lm,X_test[feature_subset_final_df['feature']]])) pred_qrf_rougher = rf_quantile.predict(np.hstack([level0_rougher_test,level0_rougher_test_lm,X_test[feature_subset_rougher_df['feature']]])) pred_rf_rougher =rf.predict(np.hstack([level0_rougher_test,level0_rougher_test_lm,X_test[feature_subset_rougher_df['feature']]])) ###Output Scores: normal : 2.3637073637325736 , quantile: 2.3663198099860514 ###Markdown Make a submission: ###Code preds = pd.DataFrame(data = {'date':X_test.index,'rougher.output.recovery':level0_rougher_test.mean(axis=1), 'final.output.recovery':level0_final_test.mean(axis=1)}) stacked_preds_sub = preds stacked_preds_sub['date'] = stacked_preds_sub['date'].dt.strftime('%Y-%m-%dT%H:%M:%SZ') stacked_preds_sub.set_index('date',inplace=True) #stacked_preds_sub.drop_duplicates(inplace=True) stacked_preds_sub.to_csv('../results/stacked_sub_lgb_lasso_base_alldata_r_250_f350_averaged_minmax_slope_interactions_proddiff.csv') stacked_preds_sub.plot(style=['-','-'],figsize = (16,8),alpha=0.9,ylim = [35,95]) preds = pd.DataFrame(data = {'date':X_test.index,'rougher.output.recovery':level0_rougher_test.dot(w_rougher)*1.04, 'final.output.recovery':level0_final_test.dot(w_final)*1.04}) stacked_preds_sub = preds stacked_preds_sub['date'] = stacked_preds_sub['date'].dt.strftime('%Y-%m-%dT%H:%M:%SZ') stacked_preds_sub.set_index('date',inplace=True) #stacked_preds_sub.drop_duplicates(inplace=True) stacked_preds_sub.to_csv('../results/stacked_sub_lgb_lasso_base_alldata_r_250_f350_averaged_minmax_slope_interactions_proddiff_optimized_1_04.csv') stacked_preds_sub.plot(style=['-','-'],figsize = (16,8),alpha=0.9,ylim = [35,99]) # for linear model level0_rougher_test_lm[(level0_rougher_test_lm>98) | (level0_rougher_test_lm<30)] = np.median(level0_rougher_test_lm) level0_final_test_lm[(level0_final_test_lm>80) | (level0_final_test_lm<30)] = np.median(level0_final_test_lm) # ,'rougher.output.recovery':np.median(level0_rougher_test_lm,axis=1), 'final.output.recovery':np.median(level0_final_test_lm,axis=1) preds = pd.DataFrame(data = {'date':X_test.index,'qrf':pred_qrf_rougher,'rf':pred_rf_rougher,'rougher.output.recovery':level0_rougher_test.mean(axis=1), 'final.output.recovery':level0_final_test.mean(axis=1), 'qrf_final':pred_qrf_final,'rf_final':pred_rf_final}) stacked_preds_sub = preds stacked_preds_sub['date'] = stacked_preds_sub['date'].dt.strftime('%Y-%m-%dT%H:%M:%SZ') stacked_preds_sub.set_index('date',inplace=True) #stacked_preds_sub.drop_duplicates(inplace=True) stacked_preds_sub.to_csv('../results/stacked_sub_lgb_lasso_base_alldata_averaged_minmax_slope_interactions_proddiff_linear.csv') stacked_preds_sub.plot(style=['-','-'],figsize = (16,8),alpha=0.8,ylim = [35,95]) r = np.power(level0_testFeatures_rougher.prod(axis=1),1/level0_testFeatures_rougher.shape[1]) f = np.power(level0_testFeatures_final.prod(axis=1),1/level0_testFeatures_final.shape[1]) preds_av = pd.DataFrame(data = {'date':X_test.index,'rougher.output.recovery':r, 'final.output.recovery':f}) preds_av['date'] = preds_av['date'].dt.strftime('%Y-%m-%dT%H:%M:%SZ') preds_av.set_index('date',inplace=True) preds_av.plot(figsize = (20,10),style=['o','o'],alpha=0.9) ###Output _____no_output_____
Naive Bayes(important).ipynb
###Markdown Naive Bayes ###Code # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd import seaborn as sb plt.rcParams['figure.figsize']=(20.0,10.0) #importing the dataset dataset=pd.read_csv('C:/Users/Bharathi/Downloads/purchase_salary.csv') dataset.shape dataset.info() dataset.head() X=dataset.iloc[:,[2,3]].values y=dataset.iloc[:,4].values pd.unique(dataset['Purchased']) sb.distplot(dataset['EstimatedSalary']) sb.countplot(x='Purchased',data=dataset) #splitting the dataset into the training set and test set from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.10,random_state=101) #Feature scaling from sklearn.preprocessing import StandardScaler sc=StandardScaler() X_train=sc.fit_transform(X_train) X_test=sc.transform(X_test) #Fitting Naive bayes to the training set from sklearn.naive_bayes import GaussianNB classifier=GaussianNB() classifier.fit(X_train,y_train) #Predicting the Test set results y_pred=classifier.predict(X_test) y_pred classifier.score(X_test,y_test) X_new=sc.transform([[42,100000]]) classifier.predict(X_new) #Evaluating the algorithm from sklearn.metrics import classification_report,confusion_matrix print(confusion_matrix(y_test,y_pred)) print(classification_report(y_test,y_pred)) ###Output [[22 5] [ 1 12]] precision recall f1-score support 0 0.96 0.81 0.88 27 1 0.71 0.92 0.80 13 micro avg 0.85 0.85 0.85 40 macro avg 0.83 0.87 0.84 40 weighted avg 0.88 0.85 0.85 40
v1.52/Functions/4. User comparison.ipynb
###Markdown User comparison Table of Contents1. [Preparation](preparation)2. [Functions](functions)3. [Tests](tests) Preparation ###Code %run "../Functions/3. Per session and per user analysis.ipynb" ###Output _____no_output_____ ###Markdown Functions ###Code def getAllUsers( dataframe ): allUserIds = np.array(dataframe['userId'].unique()) allUserIds = [i for i in allUserIds if not i in ['nan', np.nan, 'null']] return allUserIds # _source is used as correction source, if we want to include answers to these questions def getAllUserVectorData( userIds, _source = correctAnswers, _rmDF = rmdf152 ): # result isInitialized = False allData = [] f = FloatProgress(min=0, max=len(userIds)) display(f) for userId in userIds: #print(str(userId)) f.value += 1 dataVector = getUserDataVector(userId, _source = _source, _rmDF = _rmDF) if not isInitialized: isInitialized = True allData = dataVector else: allData = pd.concat([allData, dataVector], axis=1) #print('done') return allData def getAllUserVectorDataCustom(before, after, gfMode = False, rmMode = True, sessionCount = 1, _rmDF = rmdf152): userIds = [] if (before and after): userIds = getSurveysOfUsersWhoAnsweredBoth(sample, gfMode = gfMode, rmMode = rmMode) elif before: if rmMode: userIds = getRMBefores(sample) else: userIds = getGFBefores(sample) elif after: if rmMode: userIds = getRMAfters(sample) else: userIds = getGFormAfters(sample) if(len(userIds) > 0): userIds = userIds[localplayerguidkey] allUserVectorData = getAllUserVectorData(userIds, _rmDF = _rmDF) allUserVectorData = allUserVectorData.T result = allUserVectorData[allUserVectorData['sessionsCount'] == sessionCount].T return result else: print("no matching user") return [] methods = ['pearson', 'kendall', 'spearman'] def plotAllUserVectorDataCorrelationMatrix( _allUserVectorData, _method = methods[0], _title='RedMetrics Correlations', _abs=False, _clustered=False, _figsize = (20,20) ): _progress = FloatProgress(min=0, max=3) display(_progress) # computation of correlation matrix _m = _method if(not (_method in methods)): _m = methods[0] _correlation = _allUserVectorData.astype(float).corr(_m) _progress.value += 1 if(_abs): _correlation = _correlation.abs() _progress.value += 1 # plot if(_clustered): sns.clustermap(_correlation,cmap=plt.cm.jet,square=True,figsize=_figsize) else: _fig = plt.figure(figsize=_figsize) _ax = plt.subplot(111) _ax.set_title(_title) sns.heatmap(_correlation,ax=_ax,cmap=plt.cm.jet,square=True) _progress.value += 1 ###Output _____no_output_____
rcnn_depth/image generation.ipynb
###Markdown CNN PrototypingDec 2018Regression based grasp predictor* Input is black and white image* Net is 2 conv layers, 3 FC layers* Output is x,y, theta of grasp ###Code from PIL import Image, ImageDraw, ImageFont %matplotlib inline import numpy as np import torch import torch.nn as nn from skimage import io import math from torch.utils.data import Dataset, DataLoader import torch.nn.functional as F import os from IPython.display import Audio import matplotlib.pyplot as plt IMG_X, IMG_Y = 200,200 # length and width of blocks (fixed for now) block_l, block_w = 20, 30 # img_list = [] # Calc rectangle vertices. makeRectangle() credit Sparkler, stackoverflow, feb 17 def makeRectangle(l, w, theta, offset=(0, 0)): c, s = math.cos(theta), math.sin(theta) rectCoords = [(l/2.0, w/2.0), (l/2.0, -w/2.0), (-l/2.0, -w/2.0), (-l/2.0, w/2.0)] return [(c*x-s*y+offset[0], s*x+c*y+offset[1]) for (x, y) in rectCoords] # ---- Make depth images --- def make_dataset(dirname, num_images): true_coords = [] newpath = './' + dirname if not os.path.exists(newpath): os.makedirs(newpath) print(newpath) for i in range(num_images): #orient = 0 # degrees img = Image.new('RGB', (IMG_X, IMG_Y), 'black') # block_l and _w offset so blocks don't run off edge of image rand_x = int(np.random.rand() * (IMG_X-2*block_l)) + block_l rand_y = int(np.random.rand() * (IMG_Y-2*block_w)) + block_w orient = int(np.random.rand() * 180) # .random() is range [0.0, 1.0). orient = math.radians(orient) # math.cos takes radians! true_coords.append(np.array((rand_x, rand_y, orient))) rect_vertices = makeRectangle(block_l, block_w, orient, offset=(rand_x, rand_y)) idraw = ImageDraw.Draw(img) idraw.polygon(rect_vertices, fill='white') # use a truetype font #font = imagefont.truetype("dejavusans.ttf", 15) #font = imagefont.truetype("arial.ttf",14) #idraw.text((10, 25), '('+ str(rand_x) + ', ' + str(rand_y) +')') img.save(newpath + '/rect'+str(i)+'.png') return true_coords train_truth = make_dataset('data1', 1500) print(len(train_truth)) test_truth = make_dataset('./data1/test', 300) class RectDepthImgsDataset(Dataset): """Artificially generated depth images dataset""" def __init__(self, img_dir, coords, transform=None): """ """ self.img_dir = img_dir self.true_coords = coords self.transform = transform def __len__(self): #print('true coord len', len(self.true_coords)) return len(self.true_coords) def __getitem__(self, idx): # image = self.images[idx] image = io.imread(self.img_dir + '/rect'+str(idx)+'.png') image = torch.FloatTensor(image).permute(2, 0, 1) #PIL and torch expect difft orders coords = torch.FloatTensor(self.true_coords[idx]) if self.transform: image = self.transform(image) # sample = {'image': image, 'grasp': str(coords[0]) + str(coords[1])} sample = {'image': image, 'grasp': coords} sample = image, coords return sample # Hyper parameters num_epochs = 20 num_classes = 3 # predicting x,y,orientation learning_rate = 0.001 batch_size = 32 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") print("CUDA available? device: ", device) # Dataset is depth images of rectangular blocks train_dataset = RectDepthImgsDataset(img_dir='./data1', coords=train_truth) # Data loader train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_dataset = RectDepthImgsDataset(img_dir='./data1/test', coords=test_truth) # Data loader test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True) class Net(nn.Module): # CIFAR is 32x32x3, MNIST is 28x28x1) def __init__(self, IMG_X, IMG_Y): super(Net, self).__init__() self._imgx = IMG_X self._imgy = IMG_Y _pool = 2 _stride = 5 _outputlayers = 16 def _calc(val): layer_size = (val- (_stride-1)) / _pool return layer_size #print(self._imgx) self._const = _calc(_calc(self._imgx)) self._const *= _calc(_calc(self._imgy)) self._const *= _outputlayers #print(self._const) self._const = int(self._const) self.conv1 = nn.Conv2d(3, 6, _stride).to(device) self.pool = nn.MaxPool2d(_pool, _pool).to(device) self.conv2 = nn.Conv2d(6, _outputlayers, _stride).to(device) self.fc1 = nn.Linear(self._const, 120).to(device) self.fc2 = nn.Linear(120, 84).to(device) self.fc3 = nn.Linear(84, num_classes).to(device) def forward(self, x): #print(x.size()) x = x.to(device) x = x.view(-1, 3, IMG_X, IMG_Y) x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, self._const) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x model = Net(IMG_X, IMG_Y) model = model.to(device) # ONLY FOR DEBUGGING (check if code runs at all) #images = iter(train_loader) ##outputs = model(images.next()[0]) criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) losses_list = [] ct = 0 print('Training model now...') total_step = len(train_loader) for epoch in range(num_epochs): for i_batch, (images, labels) in enumerate(train_loader): images, labels = images.to(device), labels.to(device) optimizer.zero_grad() #print('This is batch', i_batch, ' with len images ', len(images)) # Forward pass outputs = model(images).to(device) loss = criterion(outputs, labels) # Backward and optimize loss.backward() optimizer.step() #if (i_batch+1) % 1 == 0: if (i_batch) % 25 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i_batch+1, total_step, loss.item())) losses_list.append(loss.item()) ###Output Training model now... Epoch [1/20], Step [1/47], Loss: 7477.4780 Epoch [1/20], Step [26/47], Loss: 199.8231 Epoch [2/20], Step [1/47], Loss: 112.1143 Epoch [2/20], Step [26/47], Loss: 33.0154 Epoch [3/20], Step [1/47], Loss: 16.2047 Epoch [3/20], Step [26/47], Loss: 16.7992 Epoch [4/20], Step [1/47], Loss: 7.2733 Epoch [4/20], Step [26/47], Loss: 7.4274 Epoch [5/20], Step [1/47], Loss: 3.2121 Epoch [5/20], Step [26/47], Loss: 3.7564 Epoch [6/20], Step [1/47], Loss: 3.7327 Epoch [6/20], Step [26/47], Loss: 2.5129 Epoch [7/20], Step [1/47], Loss: 2.6528 Epoch [7/20], Step [26/47], Loss: 2.6656 Epoch [8/20], Step [1/47], Loss: 1.4304 Epoch [8/20], Step [26/47], Loss: 2.2827 Epoch [9/20], Step [1/47], Loss: 1.6282 Epoch [9/20], Step [26/47], Loss: 1.1718 Epoch [10/20], Step [1/47], Loss: 1.5665 Epoch [10/20], Step [26/47], Loss: 1.9583 Epoch [11/20], Step [1/47], Loss: 0.9557 Epoch [11/20], Step [26/47], Loss: 1.3573 Epoch [12/20], Step [1/47], Loss: 1.3503 Epoch [12/20], Step [26/47], Loss: 1.2977 Epoch [13/20], Step [1/47], Loss: 1.2210 Epoch [13/20], Step [26/47], Loss: 7.6422 Epoch [14/20], Step [1/47], Loss: 3.6326 Epoch [14/20], Step [26/47], Loss: 1.5281 Epoch [15/20], Step [1/47], Loss: 2.2789 Epoch [15/20], Step [26/47], Loss: 2.8319 Epoch [16/20], Step [1/47], Loss: 6.0924 Epoch [16/20], Step [26/47], Loss: 30.1451 Epoch [17/20], Step [1/47], Loss: 9.1859 Epoch [17/20], Step [26/47], Loss: 7.4994 Epoch [18/20], Step [1/47], Loss: 11.4720 Epoch [18/20], Step [26/47], Loss: 9.3305 Epoch [19/20], Step [1/47], Loss: 9.0590 Epoch [19/20], Step [26/47], Loss: 12.3741 Epoch [20/20], Step [1/47], Loss: 4.4955 Epoch [20/20], Step [26/47], Loss: 6.6464 ###Markdown evaluate ###Code # alert when training is done sound_file = '/home/rui/Downloads/newyear.ogg' Audio(sound_file, autoplay=True) plt.plot(range(len(losses_list)), losses_list) print(len(losses_list)) plt.xlim([0,2350]) plt.ylim([0,100]) model.to(device).eval() # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance) # evaluation: MSE loss of center predictions # evaluation: MSE loss of degree predictions criterion = nn.MSELoss() print(len(test_loader)) print(len(train_loader)) print(len(test_loader)) print(len(test_loader)) with torch.no_grad(): total_err = 0 n_total = 0 for i_batch, (images, labels) in enumerate(test_loader): #for i_batch, (images, labels) in enumerate(train_loader): print('i_batch', i_batch) images = images.to(device) labels = labels.to(device) outputs = model(images).to(device) diff = outputs - labels diff = torch.sum(diff, 0) #column sum total_err += diff #loss = criterion(outputs, labels) #print(loss) #x, y, orient = outputs.squeeze() # remove extra dim, so not nested list of list #true_x, true_y, true_orient = labels.squeeze() #xy_err +=criterion( torch.Tensor([x,y]), torch.Tensor([true_x, true_y])) #orient_err += criterion(orient, true_orient) #print(n_total) #n_total += 1 #_, predicted = torch.max(outputs.data, 1) #total += labels.size(0) #correct += (predicted == labels).sum().item() print(n_total * batch_size) n_total = len(test_loader.dataset) print(n_total) from IPython.display import display # to display images import torchvision img = torchvision.transforms.ToPILImage()(images[0].cpu()) display(img) avg_err = total_err / n_total print(total_err) print(avg_err) xerr, yerr, orienterr = avg_err print('\n ----------------------------') print('Across a total of %d images, the average error was %0.2f and %0.2f pixels for x and y, \ and %0.2f degrees for orientation' % (n_total, xerr, yerr, math.degrees(orienterr))) print('\n ----------------------------') ###Output 300 ###Markdown diagnostics ###Code from IPython.display import display # to display images #font = ImageFont.truetype("Arial.ttf",14) # OSError: cannot open resource font = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuSans.ttf",14) def imshow_coord(img, a_label): img = torchvision.transforms.ToPILImage()(img) draw = ImageDraw.Draw(img) #draw.text((10, 25), '(' + np.array2string(np.around(a_label.numpy()), separator=', ') + ')') x,y,orient = a_label rect_vertices = makeRectangle(5, 15, orient, offset=(x,y)) draw.polygon(rect_vertices, fill='red') draw = draw_crosspointer(a_label, draw, 'green', 'white', length=8) display(img) def draw_crosspointer(xy, draw, fillcolor_X='green', fillcolor='white', length=2): a,b, orient = tuple(xy) draw.line((a-length, b+length, a+length, b-length), fill=fillcolor_X) draw.line((a-length, b-length, a+length, b+length), fill=fillcolor_X) draw.point((a,b)) return draw import torchvision import matplotlib.pyplot as plt import copy # get some random training images with torch.no_grad(): dataiter = iter(train_loader) images, labels = dataiter.next() outputs = model(images) # show images #imshow_coord(torchvision.utils.make_grid(images), (outputs)) # print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] # for j in range(4))) loss = criterion(outputs.to(device), labels.to(device)) n =0 #print(len(labels)) print('x,y truth', labels[n]) print('x,y guess by net', outputs[n]) images_copy = copy.deepcopy(images) print('image size: ', images_copy[n].size()) print(loss) print('net output: ') imshow_coord(images_copy[n], outputs[n]) print('truth label: ') imshow_coord(images_copy[n], labels[n]) #imagePIL = torchvision.transforms.ToPILImage()(images[n]) #print('PIL image size', imagePIL.size) #imagePIL.save('test.png') #display(imagePIL) #display(Image.open('./data/rect'+str(n)+'.png')) %matplotlib inline #im = Image.open("Mew.jpg") n = 12 n = np.random.rand()*len(images) n = int(n) image = io.imread('./data/rect'+str(n)+'.png') image_tensor = torch.FloatTensor(image).permute(2, 0, 1) #PIL and torch expect difft orders #coords = torch.FloatTensor(true_coords[n]) with torch.no_grad(): output = model(image_tensor) #print(true_coords[n]) print(output) x,y,orient = output.cpu().numpy().flatten() imshow_coord(image, (x,y, orient)) plt.imshow(image) #img = Image.new('RGB', (300,400), 'gray') import seaborn as sns sns.set_style("ticks") n = 0 #imshow_coord(images[n], outputs[n], labels[n]) print(images.size()) a = torchvision.utils.make_grid(images) print(a.max(), a.min(), a.size()) #a = a / 2 + 0.5 # unnormalize a = a.cpu().numpy() a = np.transpose(a,(1,2,0)) print(a.shape) print(a.ndim) #ran = a.max() - a.min() #a = (a/ran ) plt.rcParams['figure.figsize'] = [50,10] fig,ax = plt.isubplots() # Display the image #x,y = labels[0].numpy() #plt.scatter(x,y, color='g', marker='x', linewidth='1') labels = labels.cpu() print('x,y guess by net', outputs[n]) import matplotlib.patches as patches for i in range(len(labels)-20): x,y, orient = labels[i].numpy() x1, y1, orient1 = outputs[i].cpu().numpy() orient = np.rad2deg(orient) orient1 = np.rad2deg(orient1) # class matplotlib.patches.Rectangle(xy, width, height, angle=0.0, **kwargs)[source] #rect = patches.Rectangle((IMG_X, y), 30,20,angle=orient, fill=True, color='orange') rect = patches.Rectangle((x+i*IMG_X, y), 30,20,angle=orient, fill=True, color='black') rect1 = patches.Rectangle((x1+i*IMG_X, y1), 30,20,angle=orient1, fill=True, color='orange') rect2 = patches.Rectangle((i*IMG_X, 0), 200, 200,angle=0, fill=False, color='black') ax.add_patch(rect) ax.add_patch(rect1) ax.add_patch(rect2) ax.scatter(x + i*IMG_X, y, color='r', marker='x', linewidth='1') #plt.imshow(np.transpose(a, (2,0,1))) ax.set_aspect('equal', 'box') plt.show() #ax.imshow(a) import seaborn as sns sns.set() f = plt.subplot() f.plot(range(len(losses_list)), losses_list) plt.rcParams['figure.figsize'] = [30, 5] xposition = np.array(range(num_epoch)) * (total_step) #print(xposition) for xc in xposition: #plt.axvline(x=xc, color='k', linestyle='--') pass g = plt.subplot() trunc = 100 g.plot(range(trunc), losses_list[-trunc:]) plt.show() ###Output _____no_output_____ ###Markdown Don't run the following cell (takes long time) ###Code num_epochs = [10,20,50,70,100,150] num_classes = 3 # predicting x,y,orientation batch_size = 5 learning_rates = [0.001, 0.005, 0.01, 0.1] meta_losses_list = [] for i in range(len(num_epochs)): num_epoch = num_epochs[i] learning_rate = learning_rates[0] losses_list = [] print('Training model now... with: %d num_epoch and %d learning rate' % (num_epoch, learning_rate)) total_step = len(train_loader) for epoch in range(num_epoch): for i_batch, (images, labels) in enumerate(train_loader): images, labels = images.to(device), labels.to(device) optimizer.zero_grad() # Forward pass outputs = model(images) # magnify orientation loss outputs[2] *= 10 labels[2] *= 10 loss = criterion(outputs, labels) # Backward and optimize loss.backward() optimizer.step() if (i_batch+1) % 1 == 0: print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i_batch+1, total_step, loss.item())) losses_list.append(loss.item()) meta_losses_list.append(losses_list) fig = plt.subplots() trunc = 100 for l in meta_losses_list: plt.plot(range(len(l[-trunc:])), l[-trunc:]) plt.legend() plt.show() ###Output _____no_output_____
module1/Kaggle_random_forest.ipynb
###Markdown Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 1 Assignment- [ ] Do train/validate/test split with the Tanzania Waterpumps data.- [ ] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features. (For example, [what other columns have zeros and shouldn't?](https://github.com/Quartz/bad-data-guidezeros-replace-missing-values) What other columns are duplicates, or nearly duplicates? Can you extract the year from date_recorded? Can you engineer new features, such as the number of years from waterpump construction to waterpump inspection?)- [ ] Select features. Use a scikit-learn pipeline to encode categoricals, impute missing values, and fit a decision tree classifier.- [ ] Get your validation accuracy score.- [ ] Get and plot your feature importances.- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals Reading- A Visual Introduction to Machine Learning - [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/) - [Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.htmladvantages-2)- [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) — _Don’t worry about understanding the code, just get introduced to the concepts. This 10 minute video has excellent diagrams and explanations._- [Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/) Doing- [ ] Add your own stretch goal(s) !- [ ] Try other [scikit-learn imputers](https://scikit-learn.org/stable/modules/impute.html).- [ ] Try other [scikit-learn scalers](https://scikit-learn.org/stable/modules/preprocessing.html).- [ ] Make exploratory visualizations and share on Slack. Exploratory visualizationsVisualize the relationships between feature(s) and target. I recommend you do this with your training set, after splitting your data. For this problem, you may want to create a new column to represent the target as a number, 0 or 1. For example:```pythontrain['functional'] = (train['status_group']=='functional').astype(int)```You can try [Seaborn "Categorical estimate" plots](https://seaborn.pydata.org/tutorial/categorical.html) for features with reasonably few unique values. (With too many unique values, the plot is unreadable.)- Categorical features. (If there are too many unique values, you can replace less frequent values with "OTHER.")- Numeric features. (If there are too many unique values, you can [bin with pandas cut / qcut functions](https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html?highlight=qcutdiscretization-and-quantiling).)You can try [Seaborn linear model plots](https://seaborn.pydata.org/tutorial/regression.html) with numeric features. For this problem, you may want to use the parameter `logistic=True`You do _not_ need to use Seaborn, but it's nice because it includes confidence intervals to visualize uncertainty. High-cardinality categoricalsThis code from a previous assignment demonstrates how to replace less frequent values with 'OTHER'```python Reduce cardinality for NEIGHBORHOOD feature ... Get a list of the top 10 neighborhoodstop10 = train['NEIGHBORHOOD'].value_counts()[:10].index At locations where the neighborhood is NOT in the top 10, replace the neighborhood with 'OTHER'train.loc[~train['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'test.loc[~test['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'``` ###Code #Imports import pandas as pd import numpy as np import category_encoders as ce from sklearn.model_selection import train_test_split from sklearn.impute import SimpleImputer from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline # If you're in Colab... import os, sys in_colab = 'google.colab' in sys.modules if in_colab: # Install required python packages: # category_encoders, version >= 2.0 # pandas-profiling, version >= 2.0 # plotly, version >= 4.0 !pip install --upgrade category_encoders pandas-profiling plotly # Pull files from Github repo os.chdir('/content') !git init . !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git !git pull origin master # Change into directory for module os.chdir('module1') #Load dataset train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'), pd.read_csv('../data/tanzania/train_labels.csv')) test = pd.read_csv('../data/tanzania/test_features.csv') sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv') train.shape, test.shape ###Output _____no_output_____ ###Markdown - [x] Do train/validate/test split with the Tanzania Waterpumps data.- [x] Define a function to wrangle train, validate, and test sets in the same way. Clean outliers and engineer features. (For example, [what other columns have zeros and shouldn't?](https://github.com/Quartz/bad-data-guidezeros-replace-missing-values) What other columns are duplicates, or nearly duplicates? Can you extract the year from date_recorded? Can you engineer new features, such as the number of years from waterpump construction to waterpump inspection?)- [x] Select features. Use a scikit-learn pipeline to encode categoricals, impute missing values, and fit a decision tree classifier.- [x] Get your validation accuracy score.- [ ] Get and plot your feature importances.- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)- [ ] Commit your notebook to your fork of the GitHub repo. ###Code #Train/Val/Test split train, val = train_test_split(train, train_size = 0.8, test_size = 0.2, stratify=train['status_group'], random_state = 42) train.shape, val.shape, test.shape def wrangle(X): """Used to wrangle train, val, and test""" #Prevent SettingWithCopyWarning X = X.copy() X['latitude'] = X['latitude'].replace(-2e-08, 0) cols_with_zeros = ['longitude', 'latitude', 'construction_year', 'gps_height', 'population'] for col in cols_with_zeros: X[col] = X[col].replace(0,np.nan) X[col+'_MISSING'] = X[col].isnull() #What does this line do???????????? #Drop duplicate cols duplicates = ['quantity_group', 'payment_type'] X = X.drop(columns=duplicates) #Drop non-informative features: recoded_by (never varies) unusable_variance = ['recorded_by'] X = X.drop(columns=unusable_variance) #Convert date_recorded to datetime X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True) #Extract components from date_recorded, then drop the original column X['year_recorded'] = X['date_recorded'].dt.year X['month_recorded'] = X['date_recorded'].dt.month X['day_recorded'] = X['date_recorded'].dt.day X = X.drop(columns='date_recorded') #Engineer feature: how many year from construction_year to date_recorded X['years'] = X['year_recorded'] - X['construction_year'] X['years_MISSING'] = X['years'].isnull() #What does this line do?????????? # return the wrangled dataframe return X train = wrangle(train) val = wrangle(val) test = wrangle(test) ###Output _____no_output_____ ###Markdown TODO: Wrangle clean more, use profiling, clean more, calculated feature importances ###Code #Target- Feature selection target = 'status_group' train_features = train.drop(columns=[target]) numeric_features = train_features.select_dtypes(include='number').columns.tolist() cardinality = train_features.select_dtypes(exclude='number').nunique() categorical_features = cardinality[cardinality < 50].index.tolist() features = numeric_features + categorical_features #Split into features and targets X_train = train[features] y_train = train[target] X_val = val[features] y_val = val[target] X_test = test[features] ###Output _____no_output_____ ###Markdown Random Forests ###Code #%%time pipeline = make_pipeline( ce.OneHotEncoder(use_cat_names='True'), SimpleImputer(strategy='median'), RandomForestClassifier(n_estimators=5, random_state=42, n_jobs=-1) ) pipeline.fit(X_train, y_train) print ('Validation Accuracy', pipeline.score(X_val, y_val)) ###Output Validation Accuracy 0.7828282828282829 ###Markdown Submission ###Code sample_submission.head() test_predictions = pipeline.predict(X_test) test_predictions.shape submission = pd.DataFrame(test['id'], columns=['id']) submission['status_group'] = test_predictions submission.head() #Download prediction from google.colab import files submission.to_csv('submission.csv', index=False) files.download('submission.csv') ###Output _____no_output_____
src/tv-script-generation/dlnd_tv_script_generation.ipynb
###Markdown TV Script GenerationIn this project, you'll generate your own [Seinfeld](https://en.wikipedia.org/wiki/Seinfeld) TV scripts using RNNs. You'll be using part of the [Seinfeld dataset](https://www.kaggle.com/thec03u5/seinfeld-chroniclesscripts.csv) of scripts from 9 seasons. The Neural Network you'll build will generate a new ,"fake" TV script, based on patterns it recognizes in this training data. Get the DataThe data is already provided for you in `./data/Seinfeld_Scripts.txt` and you're encouraged to open that file and look at the text. >* As a first step, we'll load in this data and look at some samples. * Then, you'll be tasked with defining and training an RNN to generate a new script! ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ # load in data import helper data_dir = './data/Seinfeld_Scripts.txt' text = helper.load_data(data_dir) ###Output _____no_output_____ ###Markdown Explore the DataPlay around with `view_line_range` to view different parts of the data. This will give you a sense of the data you'll be working with. You can see, for example, that it is all lowercase text, and each new line of dialogue is separated by a newline character `\n`. ###Code view_line_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) lines = text.split('\n') print('Number of lines: {}'.format(len(lines))) word_count_line = [len(line.split()) for line in lines] print('Average number of words in each line: {}'.format(np.average(word_count_line))) print() print('The lines {} to {}:'.format(*view_line_range)) print('\n'.join(text.split('\n')[view_line_range[0]:view_line_range[1]])) ###Output Dataset Stats Roughly the number of unique words: 46367 Number of lines: 109233 Average number of words in each line: 5.544240293684143 The lines 0 to 10: jerry: do you know what this is all about? do you know, why were here? to be out, this is out...and out is one of the single most enjoyable experiences of life. people...did you ever hear people talking about we should go out? this is what theyre talking about...this whole thing, were all out now, no one is home. not one person here is home, were all out! there are people trying to find us, they dont know where we are. (on an imaginary phone) did you ring?, i cant find him. where did he go? he didnt tell me where he was going. he must have gone out. you wanna go out you get ready, you pick out the clothes, right? you take the shower, you get all ready, get the cash, get your friends, the car, the spot, the reservation...then youre standing around, what do you do? you go we gotta be getting back. once youre out, you wanna get back! you wanna go to sleep, you wanna get up, you wanna go out again tomorrow, right? where ever you are in life, its my feeling, youve gotta go. jerry: (pointing at georges shirt) see, to me, that button is in the worst possible spot. the second button literally makes or breaks the shirt, look at it. its too high! its in no-mans-land. you look like you live with your mother. george: are you through? jerry: you do of course try on, when you buy? george: yes, it was purple, i liked it, i dont actually recall considering the buttons. ###Markdown --- Implement Pre-processing FunctionsThe first thing to do to any dataset is pre-processing. Implement the following pre-processing functions below:- Lookup Table- Tokenize Punctuation Lookup TableTo create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:- Dictionary to go from the words to an id, we'll call `vocab_to_int`- Dictionary to go from the id to word, we'll call `int_to_vocab`Return these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)` ###Code import problem_unittests as tests from collections import Counter def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ # TODO: Implement Function word_counts = Counter(text) sorted_vocab = sorted(word_counts, key=word_counts.get, reverse=True) int_to_vocab = {ii: word for ii, word in enumerate(sorted_vocab)} vocab_to_int = {word: ii for ii, word in int_to_vocab.items()} # return tuple return (vocab_to_int, int_to_vocab) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) ###Output Tests Passed ###Markdown Tokenize PunctuationWe'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, "bye" and "bye!" would generate two different word ids.Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:- Period ( **.** )- Comma ( **,** )- Quotation Mark ( **"** )- Semicolon ( **;** )- Exclamation mark ( **!** )- Question mark ( **?** )- Left Parentheses ( **(** )- Right Parentheses ( **)** )- Dash ( **-** )- Return ( **\n** )This dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Make sure you don't use a value that could be confused as a word; for example, instead of using the value "dash", try using something like "||dash||". ###Code def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenized dictionary where the key is the punctuation and the value is the token """ # TODO: Implement Function tokens = dict() tokens['.'] = '<PERIOD>' tokens[','] = '<COMMA>' tokens['"'] = '<QUOTATION_MARK>' tokens[';'] = '<SEMICOLON>' tokens['!'] = '<EXCLAMATION_MARK>' tokens['?'] = '<QUESTION_MARK>' tokens['('] = '<LEFT_PAREN>' tokens[')'] = '<RIGHT_PAREN>' tokens['-'] = '<DASH>' tokens['\n'] = '<NEW_LINE>' return tokens """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) ###Output Tests Passed ###Markdown Pre-process all the data and save itRunning the code cell below will pre-process all the data and save it to file. You're encouraged to lok at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail, but you do not need to change this code. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ # pre-process training data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) ###Output _____no_output_____ ###Markdown Check PointThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() ###Output _____no_output_____ ###Markdown Build the Neural NetworkIn this section, you'll build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions. Check Access to GPU ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch # Check for a GPU train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('No GPU found. Please use a GPU to train your neural network.') ###Output _____no_output_____ ###Markdown InputLet's start with the preprocessed input data. We'll use [TensorDataset](http://pytorch.org/docs/master/data.htmltorch.utils.data.TensorDataset) to provide a known format to our dataset; in combination with [DataLoader](http://pytorch.org/docs/master/data.htmltorch.utils.data.DataLoader), it will handle batching, shuffling, and other dataset iteration functions.You can create data with TensorDataset by passing in feature and target tensors. Then create a DataLoader as usual.```data = TensorDataset(feature_tensors, target_tensors)data_loader = torch.utils.data.DataLoader(data, batch_size=batch_size)``` BatchingImplement the `batch_data` function to batch `words` data into chunks of size `batch_size` using the `TensorDataset` and `DataLoader` classes.>You can batch words using the DataLoader, but it will be up to you to create `feature_tensors` and `target_tensors` of the correct size and content for a given `sequence_length`.For example, say we have these as input:```words = [1, 2, 3, 4, 5, 6, 7]sequence_length = 4```Your first `feature_tensor` should contain the values:```[1, 2, 3, 4]```And the corresponding `target_tensor` should just be the next "word"/tokenized word value:```5```This should continue with the second `feature_tensor`, `target_tensor` being:```[2, 3, 4, 5] features6 target``` ###Code from torch.utils.data import TensorDataset, DataLoader def batch_data(words, sequence_length, batch_size): """ Batch the neural network data using DataLoader :param words: The word ids of the TV scripts :param sequence_length: The sequence length of each batch :param batch_size: The size of each batch; the number of sequences in a batch :return: DataLoader with batched data """ # TODO: Implement function n_batches = len(words)//batch_size words = words[:n_batches*batch_size] y_len = len(words) - sequence_length x, y = [], [] for idx in range(0, y_len): idx_end = sequence_length + idx x_batch = words[idx:idx_end] #print("feature: ",x_batch) x.append(x_batch) batch_y = words[idx_end] #print("target: ", batch_y) y.append(batch_y) data = TensorDataset(torch.from_numpy(np.asarray(x)), torch.from_numpy(np.asarray(y))) data_loader = DataLoader(data, batch_size=batch_size) return data_loader # there is no test for this function, but you are encouraged to create # print statements and tests of your own ###Output _____no_output_____ ###Markdown Test your dataloader You'll have to modify this code to test a batching function, but it should look fairly similar.Below, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader.Your code should return something like the following (likely in a different order, if you shuffled your data):```torch.Size([10, 5])tensor([[ 28, 29, 30, 31, 32], [ 21, 22, 23, 24, 25], [ 17, 18, 19, 20, 21], [ 34, 35, 36, 37, 38], [ 11, 12, 13, 14, 15], [ 23, 24, 25, 26, 27], [ 6, 7, 8, 9, 10], [ 38, 39, 40, 41, 42], [ 25, 26, 27, 28, 29], [ 7, 8, 9, 10, 11]])torch.Size([10])tensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12])``` SizesYour sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10). ValuesYou should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`. ###Code # test dataloader test_text = range(50) t_loader = batch_data(test_text, sequence_length=5, batch_size=10) data_iter = iter(t_loader) sample_x, sample_y = data_iter.next() print(sample_x.shape) print(sample_x) print() print(sample_y.shape) print(sample_y) ###Output torch.Size([10, 5]) tensor([[ 0, 1, 2, 3, 4], [ 1, 2, 3, 4, 5], [ 2, 3, 4, 5, 6], [ 3, 4, 5, 6, 7], [ 4, 5, 6, 7, 8], [ 5, 6, 7, 8, 9], [ 6, 7, 8, 9, 10], [ 7, 8, 9, 10, 11], [ 8, 9, 10, 11, 12], [ 9, 10, 11, 12, 13]]) torch.Size([10]) tensor([ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) ###Markdown --- Build the Neural NetworkImplement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.htmltorch.nn.Module). You may choose to use a GRU or an LSTM. To complete the RNN, you'll have to implement the following functions for the class: - `__init__` - The initialize function. - `init_hidden` - The initialization function for an LSTM/GRU hidden state - `forward` - Forward propagation function. The initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state.**The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word. Hints1. Make sure to stack the outputs of the lstm to pass to your fully-connected layer, you can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)`2. You can get the last batch of word scores by shaping the output of the final, fully-connected layer like so:``` reshape into (batch_size, seq_length, output_size)output = output.view(batch_size, -1, self.output_size) get last batchout = output[:, -1]``` ###Code import torch.nn as nn class RNN(nn.Module): def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5): """ Initialize the PyTorch RNN Module :param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary) :param output_size: The number of output dimensions of the neural network :param embedding_dim: The size of embeddings, should you choose to use them :param hidden_dim: The size of the hidden layer outputs :param dropout: dropout to add in between LSTM/GRU layers """ super(RNN, self).__init__() # TODO: Implement function # set class variables self.output_size = output_size self.hidden_dim = hidden_dim self.n_layers = n_layers # define model layers self.embedding = nn.Embedding(vocab_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=dropout, batch_first=True) self.fc = nn.Linear(hidden_dim, output_size) def forward(self, nn_input, hidden): """ Forward propagation of the neural network :param nn_input: The input to the neural network :param hidden: The hidden state :return: Two Tensors, the output of the neural network and the latest hidden state """ # TODO: Implement function batch_size = nn_input.size(0) #embedding embeds = self.embedding(nn_input) #lstm output output, hidden = self.lstm(embeds, hidden) #stack up lstm output output = output.contiguous().view(-1, self.hidden_dim) #fully connected layer output = self.fc(output) #reshape output = output.view(batch_size, -1, self.output_size) #get last batch out = output[:, -1] # return one batch of output word scores and the hidden state return out, hidden def init_hidden(self, batch_size): ''' Initialize the hidden state of an LSTM/GRU :param batch_size: The batch_size of the hidden state :return: hidden state of dims (n_layers, batch_size, hidden_dim) ''' # Implement function # initialize hidden state with zero weights, and move to GPU if available weight = next(self.parameters()).data if (train_on_gpu): hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_().cuda()) else: hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(), weight.new(self.n_layers, batch_size, self.hidden_dim).zero_()) return hidden """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_rnn(RNN, train_on_gpu) ###Output Tests Passed ###Markdown Define forward and backpropagationUse the RNN class you implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows:```loss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target)```And it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that you can get this loss by computing it, as usual, and calling `loss.item()`.**If a GPU is available, you should move your data to that GPU device, here.** ###Code def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden): """ Forward and backward propagation on the neural network :param decoder: The PyTorch Module that holds the neural network :param decoder_optimizer: The PyTorch optimizer for the neural network :param criterion: The PyTorch loss function :param inp: A batch of input to the neural network :param target: The target output for the batch of input :return: The loss and the latest hidden state Tensor """ # TODO: Implement Function # move data to GPU, if available if train_on_gpu: rnn.cuda() inp, target = inp.cuda(), target.cuda() hidden_state = tuple([each.data for each in hidden]) #zero gradients rnn.zero_grad() #output from the model output, hidden_state = rnn(inp, hidden_state) #calculate the loss loss = criterion(output, target) #backpropagate loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. clip = 5 nn.utils.clip_grad_norm_(rnn.parameters(), clip) #perform step optimizer.step() # perform backpropagation and optimization # return the loss over a batch and the hidden state produced by our model return loss.item(), hidden_state # Note that these tests aren't completely extensive. # they are here to act as general checks on the expected outputs of your functions """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu) ###Output Tests Passed ###Markdown Neural Network TrainingWith the structure of the network complete and data ready to be fed in the neural network, it's time to train it. Train LoopThe training loop is implemented for you in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. You'll set this parameter along with other parameters in the next section. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100): batch_losses = [] rnn.train() print("Training for %d epoch(s)..." % n_epochs) for epoch_i in range(1, n_epochs + 1): # initialize hidden state hidden = rnn.init_hidden(batch_size) for batch_i, (inputs, labels) in enumerate(train_loader, 1): # make sure you iterate over completely full batches, only n_batches = len(train_loader.dataset)//batch_size if(batch_i > n_batches): break # forward, back prop loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden) # record loss batch_losses.append(loss) # printing loss stats if batch_i % show_every_n_batches == 0: print('Epoch: {:>4}/{:<4} Loss: {}\n'.format( epoch_i, n_epochs, np.average(batch_losses))) batch_losses = [] # returns a trained rnn return rnn ###Output _____no_output_____ ###Markdown HyperparametersSet and train the neural network with the following parameters:- Set `sequence_length` to the length of a sequence.- Set `batch_size` to the batch size.- Set `num_epochs` to the number of epochs to train for.- Set `learning_rate` to the learning rate for an Adam optimizer.- Set `vocab_size` to the number of uniqe tokens in our vocabulary.- Set `output_size` to the desired size of the output.- Set `embedding_dim` to the embedding dimension; smaller than the vocab_size.- Set `hidden_dim` to the hidden dimension of your RNN.- Set `n_layers` to the number of layers/cells in your RNN.- Set `show_every_n_batches` to the number of batches at which the neural network should print progress.If the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class. ###Code # Data params # Sequence Length sequence_length = 25 # of words in a sequence # Batch Size batch_size = 128 # data loader - do not change train_loader = batch_data(int_text, sequence_length, batch_size) # Training parameters # Number of Epochs num_epochs = 50 # Learning Rate learning_rate = 0.001 # Model parameters # Vocab size vocab_size = len(vocab_to_int) # Output size output_size = vocab_size # Embedding Dimension embedding_dim = 300 # Hidden Dimension hidden_dim = 512 # Number of RNN Layers n_layers = 2 # Show stats for every n number of batches show_every_n_batches = 500 ###Output _____no_output_____ ###Markdown TrainIn the next cell, you'll train the neural network on the pre-processed data. If you have a hard time getting a good loss, you may consider changing your hyperparameters. In general, you may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train. > **You should aim for a loss less than 3.5.** You should also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn. ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ import workspace_utils from workspace_utils import active_session #keep cell running longer than 30 min with active_session(): # create model and move to gpu if available rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5) if train_on_gpu: rnn.cuda() # defining loss and optimization functions for training optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate) criterion = nn.CrossEntropyLoss() # training the model trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches) # saving the trained model helper.save_model('./save/trained_rnn', trained_rnn) print('Model Trained and Saved') ###Output Training for 10 epoch(s)... Epoch: 1/10 Loss: 5.463116533756256 Epoch: 1/10 Loss: 4.815075745582581 Epoch: 1/10 Loss: 4.557721925258637 Epoch: 1/10 Loss: 4.425225018024444 Epoch: 1/10 Loss: 4.433761734485627 Epoch: 1/10 Loss: 4.466397776126861 Epoch: 1/10 Loss: 4.361214367389679 Epoch: 1/10 Loss: 4.244606667041778 Epoch: 1/10 Loss: 4.234342841148377 Epoch: 1/10 Loss: 4.15948775100708 Epoch: 1/10 Loss: 4.284430130958557 Epoch: 1/10 Loss: 4.3135910339355465 Epoch: 1/10 Loss: 4.312585735321045 Epoch: 2/10 Loss: 4.101212255964595 Epoch: 2/10 Loss: 3.907897990703583 Epoch: 2/10 Loss: 3.8182256827354433 Epoch: 2/10 Loss: 3.7704698147773743 Epoch: 2/10 Loss: 3.8073702754974366 Epoch: 2/10 Loss: 3.888620740413666 Epoch: 2/10 Loss: 3.8122659192085266 Epoch: 2/10 Loss: 3.7161363649368284 Epoch: 2/10 Loss: 3.7071275944709776 Epoch: 2/10 Loss: 3.6744387125968934 Epoch: 2/10 Loss: 3.787210060596466 Epoch: 2/10 Loss: 3.8204187088012693 Epoch: 2/10 Loss: 3.8137642922401427 Epoch: 3/10 Loss: 3.717030230386198 Epoch: 3/10 Loss: 3.6147490787506102 Epoch: 3/10 Loss: 3.542679307937622 Epoch: 3/10 Loss: 3.5034243454933165 Epoch: 3/10 Loss: 3.5151408071517944 Epoch: 3/10 Loss: 3.622207152843475 Epoch: 3/10 Loss: 3.5719383759498595 Epoch: 3/10 Loss: 3.499570921421051 Epoch: 3/10 Loss: 3.4688178272247314 Epoch: 3/10 Loss: 3.4380863952636718 Epoch: 3/10 Loss: 3.559805054664612 Epoch: 3/10 Loss: 3.584144694805145 Epoch: 3/10 Loss: 3.5536965546607973 Epoch: 4/10 Loss: 3.4980972515157434 Epoch: 4/10 Loss: 3.426353521823883 Epoch: 4/10 Loss: 3.3432546753883363 Epoch: 4/10 Loss: 3.3282262744903566 Epoch: 4/10 Loss: 3.32100039434433 Epoch: 4/10 Loss: 3.42879892206192 Epoch: 4/10 Loss: 3.3863004660606384 Epoch: 4/10 Loss: 3.3304165029525756 Epoch: 4/10 Loss: 3.3014186444282534 Epoch: 4/10 Loss: 3.2815961422920226 Epoch: 4/10 Loss: 3.3862370467185974 Epoch: 4/10 Loss: 3.4094378209114073 Epoch: 4/10 Loss: 3.382577163219452 Epoch: 5/10 Loss: 3.3412521075118673 Epoch: 5/10 Loss: 3.289597149848938 Epoch: 5/10 Loss: 3.220576942920685 Epoch: 5/10 Loss: 3.202632845878601 Epoch: 5/10 Loss: 3.179095251560211 Epoch: 5/10 Loss: 3.29287628698349 Epoch: 5/10 Loss: 3.2485314683914184 Epoch: 5/10 Loss: 3.1897765288352966 Epoch: 5/10 Loss: 3.1792118697166445 Epoch: 5/10 Loss: 3.163952871799469 Epoch: 5/10 Loss: 3.255647312641144 Epoch: 5/10 Loss: 3.2762828822135925 Epoch: 5/10 Loss: 3.2566953349113463 Epoch: 6/10 Loss: 3.223505116444974 Epoch: 6/10 Loss: 3.1746608338356017 Epoch: 6/10 Loss: 3.1163034696578977 Epoch: 6/10 Loss: 3.09956164598465 Epoch: 6/10 Loss: 3.080168787479401 Epoch: 6/10 Loss: 3.195700294017792 Epoch: 6/10 Loss: 3.145141996383667 Epoch: 6/10 Loss: 3.096443654060364 Epoch: 6/10 Loss: 3.076122142791748 Epoch: 6/10 Loss: 3.075185565948486 Epoch: 6/10 Loss: 3.1641750059127807 Epoch: 6/10 Loss: 3.1733086495399476 Epoch: 6/10 Loss: 3.159344530582428 Epoch: 7/10 Loss: 3.1316622840471506 Epoch: 7/10 Loss: 3.091824433326721 Epoch: 7/10 Loss: 3.0386316900253294 Epoch: 7/10 Loss: 3.0239725489616394 Epoch: 7/10 Loss: 2.9985297055244446 Epoch: 7/10 Loss: 3.1066632194519044 Epoch: 7/10 Loss: 3.0647981162071227 Epoch: 7/10 Loss: 3.0170075173377993 Epoch: 7/10 Loss: 2.9962269282341003 Epoch: 7/10 Loss: 3.0025529990196227 Epoch: 7/10 Loss: 3.089940724372864 Epoch: 7/10 Loss: 3.1057039036750793 Epoch: 7/10 Loss: 3.089015625 Epoch: 8/10 Loss: 3.053129531750994 Epoch: 8/10 Loss: 3.024715522766113 Epoch: 8/10 Loss: 2.974336974143982 Epoch: 8/10 Loss: 2.960674822330475 Epoch: 8/10 Loss: 2.929070911884308 Epoch: 8/10 Loss: 3.041597698688507 Epoch: 8/10 Loss: 2.99731020116806 Epoch: 8/10 Loss: 2.949739360332489 Epoch: 8/10 Loss: 2.9399639530181885 Epoch: 8/10 Loss: 2.9455386881828307 Epoch: 8/10 Loss: 3.0234020719528196 Epoch: 8/10 Loss: 3.0415712413787843 Epoch: 8/10 Loss: 3.02445863199234 Epoch: 9/10 Loss: 2.993773799420388 Epoch: 9/10 Loss: 2.9743737683296203 Epoch: 9/10 Loss: 2.916193949222565 Epoch: 9/10 Loss: 2.903786855220795 Epoch: 9/10 Loss: 2.8716085147857666 Epoch: 9/10 Loss: 2.977797413825989 Epoch: 9/10 Loss: 2.9386006093025205 Epoch: 9/10 Loss: 2.892512547969818 Epoch: 9/10 Loss: 2.8755614318847655 Epoch: 9/10 Loss: 2.8885124263763426 Epoch: 9/10 Loss: 2.9685447483062744 Epoch: 9/10 Loss: 2.9702795548439025 Epoch: 9/10 Loss: 2.9620283942222594 Epoch: 10/10 Loss: 2.940785269968766 Epoch: 10/10 Loss: 2.922002249240875 Epoch: 10/10 Loss: 2.87406214427948 Epoch: 10/10 Loss: 2.8591037855148316 Epoch: 10/10 Loss: 2.821552659034729 Epoch: 10/10 Loss: 2.9248615193367002 Epoch: 10/10 Loss: 2.8878612599372864 Epoch: 10/10 Loss: 2.838347276687622 Epoch: 10/10 Loss: 2.830324348449707 Epoch: 10/10 Loss: 2.8384297332763673 Epoch: 10/10 Loss: 2.910147524356842 Epoch: 10/10 Loss: 2.913524629116058 Epoch: 10/10 Loss: 2.9071058940887453 ###Markdown Question: How did you decide on your model hyperparameters? For example, did you try different sequence_lengths and find that one size made the model converge faster? What about your hidden_dim and n_layers; how did you decide on those? **Answer:** Based upon the course material about hyperparameters and some google research, I figure out that there is no magic number. You just have to do some experiment on your dataset.During my first training attempts I tried smaller sequence length and batch size parameters but after just a few iterations, the loss was quite large and not dropping significantly, so I increased the parameters a number of times and finally settled on sequence_length = 30 and batch_size = 128.I tried:- Sequence_length=10, batch_size = 128, hidden_dim=256, embedding_dim=300, n_layers=2, learning_rate = 0.001- Sequence_length=50, batch_size = 128, hidden_dim=256, embedding_dim=300, n_layers=2, learning_rate = 0.001 - Sequence_length=50, batch_size = 128, hidden_dim=512, embedding_dim=300, n_layers=2, learning_rate = 0.001 - Sequence_length=10, batch_size = 128, hidden_dim=512, embedding_dim=300, n_layers=2, learning_rate = 0.001- Sequence_length=30, batch_size = 128, hidden_dim=512, embedding_dim=300, n_layers=2, learning_rate = 0.001I stick with the last experiment. --- CheckpointAfter running the above training cell, your model will be saved by name, `trained_rnn`, and if you save your notebook progress, **you can pause here and come back to this code at another time**. You can resume your progress by running the next cell, which will load in our word:id dictionaries _and_ load in your saved model by name! ###Code """ DON'T MODIFY ANYTHING IN THIS CELL """ import torch import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() trained_rnn = helper.load_model('./save/trained_rnn') ###Output _____no_output_____ ###Markdown Generate TV ScriptWith the network trained and saved, you'll use it to generate a new, "fake" Seinfeld TV script in this section. Generate TextTo generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. You'll be using the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores! ###Code """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ import torch.nn.functional as F def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100): """ Generate text using the neural network :param decoder: The PyTorch Module that holds the trained neural network :param prime_id: The word id to start the first prediction :param int_to_vocab: Dict of word id keys to word values :param token_dict: Dict of puncuation tokens keys to puncuation values :param pad_value: The value used to pad a sequence :param predict_len: The length of text to generate :return: The generated text """ rnn.eval() # create a sequence (batch_size=1) with the prime_id current_seq = np.full((1, sequence_length), pad_value) current_seq[-1][-1] = prime_id predicted = [int_to_vocab[prime_id]] for _ in range(predict_len): if train_on_gpu: current_seq = torch.LongTensor(current_seq).cuda() else: current_seq = torch.LongTensor(current_seq) # initialize the hidden state hidden = rnn.init_hidden(current_seq.size(0)) # get the output of the rnn output, _ = rnn(current_seq, hidden) # get the next word probabilities p = F.softmax(output, dim=1).data if(train_on_gpu): p = p.cpu() # move to cpu # use top_k sampling to get the index of the next word top_k = 5 p, top_i = p.topk(top_k) top_i = top_i.numpy().squeeze() # select the likely next word index with some element of randomness p = p.numpy().squeeze() word_i = np.random.choice(top_i, p=p/p.sum()) # retrieve that word from the dictionary word = int_to_vocab[word_i] predicted.append(word) # the generated word becomes the next "current sequence" and the cycle can continue current_seq = np.roll(current_seq, -1, 1) current_seq[-1][-1] = word_i gen_sentences = ' '.join(predicted) # Replace punctuation tokens for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' gen_sentences = gen_sentences.replace(' ' + token.lower(), key) gen_sentences = gen_sentences.replace('\n ', '\n') gen_sentences = gen_sentences.replace('( ', '(') # return all the sentences return gen_sentences ###Output _____no_output_____ ###Markdown Generate a New ScriptIt's time to generate the text. Set `gen_length` to the length of TV script you want to generate and set `prime_word` to one of the following to start the prediction:- "jerry"- "elaine"- "george"- "kramer"You can set the prime word to _any word_ in our dictionary, but it's best to start with a name for generating a TV script. (You can also start with any other names you find in the original text file!) ###Code # run the cell multiple times to get different results! gen_length = 400 # modify the length to your preference prime_word = 'kramer' # name for starting the script """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ pad_word = helper.SPECIAL_WORDS['PADDING'] generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length) print(generated_script) ###Output /opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:43: UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters(). ###Markdown Save your favorite scriptsOnce you have a script that you like (or find interesting), save it to a text file! ###Code # save script to a text file f = open("generated_script_1.txt","w") f.write(generated_script) f.close() ###Output _____no_output_____
2_extract_ans.ipynb
###Markdown 第2章 抽出 データ列指定による抽出まずは,分析に必要ないデータを排除し,必要な列のみに絞りこみます,これによって1行あたりのデータ数を減らし,データ分析が行いやすくなります. Q:reserve_tbには以下のカラムが入っています.reserve_tbのカラムから'people_num', 'total_price'を削除しましょう.'reserve_id', 'hotel_id', 'customer_id', 'reserve_datetime','checkin_date', 'checkin_time', 'checkout_date', 'people_num','total_price' ###Code #答えは以下の3パターン reserve_tb[['reserve_id', 'hotel_id', 'customer_id' , 'reserve_datetime', 'checkin_date', 'checkin_time' , 'checkout_date']] # loc関数の二次元配列の2次元目に抽出したい列名の配列を指定することで,列を抽出 reserve_tb.loc[:, ['reserve_id', 'hotel_id', 'customer_id' , 'reserve_datetime', 'checkin_date', 'checkin_time' , 'checkout_date']] # drop関数によって,不要な列を削除 # axisを1にすることによって,列の削除を指定 # inplaceをTrueに指定することによって,reserve_tbの書き換えを指定 reserve_tb.drop(['people_num', 'total_price'], axis=1, inplace=True) ###Output _____no_output_____ ###Markdown 条件指定による抽出条件指定によって,データを絞り込みます. Q:対象のデータセットは,ホテルの予約レコードです.予約テーブルから,checkin_dateが2016-10-12から2016-10-13までのデータ行を抽出しましょう.query関数を使ったらやりやすいです. ###Code reserve_tb.query('"2016-10-13" <= checkout_date <= "2016-10-13"') ###Output _____no_output_____ ###Markdown データサンプリングデータ分析をする際に,抽出したデータ数が多すぎて扱いに困る場合があります.このような時に,サンプリングによってデータ数を減らすのが有効です.ホテルの予約レコードから,無作為に約50%の行を抽出するコードは以下です. ###Code reserve_tb.sample(frac=0.5) ###Output _____no_output_____ ###Markdown 集約に基づくサンプリングサンプリングにおいて,公平なサンプリングをすることは最も重要です.しかし,上記のサンプリングでは,顧客に偏りが生じてしまう可能性があります.例えば,このホテル予約レコードにおいて,顧客全員が予約数2件だったとします.その時50%のサンプリングをすると,25%の顧客は2件とも削除され,また25%の顧客は2件とも抽出されます.公平にサンプリングするために,予約テーブルの顧客IDに対してランダムサンプリングを行い,サンプリングした顧客IDの予約レコードのみを抽出する方法をとります. Q:対象のホテル予約レコードで,顧客単位のランダムサンプリングによって,予約テーブルから約50%の行を抽出しましょう.以下の関数を使います.- .unique() 重複したcustomer_idを返す- pd.Series() sample関数を利用するために変換- .sample() 顧客IDをサンプリング- .isin()引数で渡したリスト内の値のいずれかと一致する列値のみ抽出できる ###Code # reserve_tb['customer_id'].unique()は,重複を排除したcustomoer_idを返す # sample関数を利用するためにpandas.Series(pandasのリストオブジェクト)に変換 # sample関数によって,顧客IDをサンプリング target = pd.Series(reserve_tb['customer_id'].unique()).sample(frac=0.5) # isin関数によって,customer_idがサンプリングした顧客IDのいずれかに一致した行を抽出 reserve_tb[reserve_tb['customer_id'].isin(target)] ###Output _____no_output_____
AI Class/KNN Example/wine.ipynb
###Markdown -------- ###Code #Import knearest neighbors Classifier model from sklearn.neighbors import KNeighborsClassifier #Create KNN Classifier knn = KNeighborsClassifier(n_neighbors=5) #Train the model using the training sets knn.fit(X_train, y_train) #Predict the response for test dataset y_pred = knn.predict(X_test) #Import scikit-learn metrics module for accuracy calculation from sklearn import metrics # Model Accuracy, how often is the classifier correct? print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) ###Output Accuracy: 0.7592592592592593 ###Markdown ------- ###Code #Import knearest neighbors Classifier model from sklearn.neighbors import KNeighborsClassifier #Create KNN Classifier knn = KNeighborsClassifier(n_neighbors=7) #Train the model using the training sets knn.fit(X_train, y_train) #Predict the response for test dataset y_pred = knn.predict(X_test) #Import scikit-learn metrics module for accuracy calculation from sklearn import metrics # Model Accuracy, how often is the classifier correct? print("Accuracy:",metrics.accuracy_score(y_test, y_pred)) ###Output Accuracy: 0.7222222222222222
notebooks/02-data-manipulation/.ipynb_checkpoints/2.2-aggregate-functions-checkpoint.ipynb
###Markdown Aggregate functionsTwo aggregate functions:* `aggregate()`* `aggregateByKey()` `aggregate(zeroValue, seqOp, combOp)`* **zeroValue** is like a data container. Its structure should match with the data structure of the returned values from the seqOp function.* **seqOp** is a function that takes two arguments: the first argument is the zeroValue and the second argument is an element from the RDD. The zeroValue gets updated with the returned value after every run.* **combOp** is a function that takes two arguments: the first argument is the final zeroValue from one partition and the other is another final zeroValue from another partition.The code below calculates the total sum of squares for **mpg** and **disp** in data set **mtcars**. Step 1: get some data. ###Code mtcars_df = spark.read.csv('../../data/mtcars.csv', inferSchema=True, header=True).select(['mpg', 'disp']) mtcars_df.take(5) ###Output _____no_output_____ ###Markdown Step 2: calculate averages of mgp and disp ###Code mpg_mean = mtcars_df.select('mpg').rdd.map(lambda x: x[0]).mean() disp_mean = mtcars_df.select('disp').rdd.map(lambda x: x[0]).mean() print('mpg mean = ', mpg_mean, '; ' 'disp mean = ', disp_mean) ###Output mpg mean = 20.090625000000003 ; disp mean = 230.721875 ###Markdown Step 3: build **zeroValue, seqOp** and **combOp**We are calculating two TSS. We create a tuple to store two values. ###Code zeroValue = (0, 0) ###Output _____no_output_____ ###Markdown The **z** below refers to `zeroValue`. Its values get updated after every run. The **x** refers to an element in an RDD partition. In this case, both **z** and **x** have two values. ###Code seqOp = lambda z, x: (z[0] + (x[0] - mpg_mean)**2, z[1] + (x[1] - disp_mean)**2) ###Output _____no_output_____ ###Markdown The `combOp` function simply aggrate all `zeroValues` into one. ###Code combOp = lambda px, py: ( px[0] + py[0], px[1] + py[1] ) ###Output _____no_output_____ ###Markdown Implement `aggregate()` function. mtcars_df.rdd.aggregate(zeroValue, seqOp, combOp) `aggregateByKey(zeroValue, seqOp, combOp)`This function does similar things as `aggregate()`. The `aggregate()` aggregate all results to the very end, but aggregateByKey() merge results by key. Import data ###Code iris_rdd = sc.textFile('../../data/iris.csv', use_unicode=True) iris_rdd.take(2) ###Output _____no_output_____ ###Markdown Transform data to a tuple RDD ###Code iris_rdd_2 = iris_rdd.map(lambda x: x.split(',')).\ filter(lambda x: x[0] != 'sepal_length').\ map(lambda x: (x[-1], [*map(float, x[:-1])])) iris_rdd_2.take(5) ###Output _____no_output_____ ###Markdown Define initial values, seqOp and combOp ###Code zero_value = (0, 0) seqOp = (lambda x, y: (x[0] + (y[0])**2, x[1] + (y[1])**2)) combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) ###Output _____no_output_____ ###Markdown Implement `aggregateByKey()` ###Code iris_rdd_2.aggregateByKey(zero_value, seqOp, combOp).collect() ###Output _____no_output_____
python/notebooks/image_registration/two_frame/07_compare_transport_plans.ipynb
###Markdown Compare Gromov-Wasserstein and traditional OT Imports and magics ###Code %load_ext autoreload %autoreload 2 %matplotlib inline import numpy as np import matplotlib.pyplot as plt import ipywidgets as ipyw from ot import dist, emd from ot.gromov import gromov_wasserstein from IPython.display import HTML from otimage import imagerep, io from otimage.utils import plot_maxproj ###Output _____no_output_____ ###Markdown Transport plans ###Code def normalized_dist_mtx(pts_1, pts_2, metric): """Return distance matrix normalized by median.""" mtx_nn = dist(pts_1, pts_2, metric=metric) return mtx_nn / np.median(mtx_nn) def compute_ot(pts_1, pts_2, wts_1, wts_2): """Normalize weights and compute OT matrix.""" # Normalize weights p_1 = wts_1 / np.sum(wts_1) p_2 = wts_2 / np.sum(wts_2) # Normalized distance matrix c_mtx = normalized_dist_mtx(pts_1, pts_2, 'sqeuclidean') # Compute transport plan return emd(p_1, p_2, c_mtx, log=True) def compute_gw(pts_1, pts_2, wts_1, wts_2): """Normalize weights and compute OT matrix.""" # Normalize weights p_1 = wts_1 / np.sum(wts_1) p_2 = wts_2 / np.sum(wts_2) # Normalized distance matrices c_1 = normalized_dist_mtx(pts_1, pts_1, metric='sqeuclidean') c_2 = normalized_dist_mtx(pts_2, pts_2, metric='sqeuclidean') # Compute transport plan return gromov_wasserstein(c_1, c_2, p_1, p_2, 'square_loss', log=True) ###Output _____no_output_____ ###Markdown Load data ###Code # Frame index t1 = 3 t2 = 30 # Load two successive frames from dataset img_path = '/home/mn2822/Desktop/WormOT/data/zimmer/raw/mCherry_v00065-00115.hdf5' with io.ZimmerReader(img_path) as reader: img_1 = reader.get_frame(t1) img_2 = reader.get_frame(t2) # Load MP components mp_path = '/home/mn2822/Desktop/WormOT/data/zimmer/mp_components/mp_0000_0050.mat' with io.MPReader(mp_path) as reader: mp_1 = reader.get_frame(t1) mp_2 = reader.get_frame(t2) # Reconstruct images from MPs rec_1 = imagerep.reconstruct_mp_image(mp_1) rec_2 = imagerep.reconstruct_mp_image(mp_2) plt.figure(figsize=(10, 10)) plt.subplot(221) plot_maxproj(img_1) plt.title(f'frame: {t1}') plt.axis('off') plt.subplot(222) plot_maxproj(img_2) plt.title(f'frame: {t2}') plt.axis('off'); plt.subplot(223) plot_maxproj(rec_1) plt.title('MP recon') plt.axis('off') plt.subplot(224) plot_maxproj(rec_2) plt.title('MP recon') plt.axis('off'); ###Output _____no_output_____ ###Markdown Compute transport plans ###Code pts_1, wts_1 = (mp_1.pts, mp_1.wts) pts_2, wts_2 = (mp_2.pts, mp_2.wts) t_ot, ot_log = compute_ot(pts_1, pts_2, wts_1, wts_2) t_gw, gw_log = compute_gw(pts_1, pts_2, wts_1, wts_2) ###Output _____no_output_____ ###Markdown Examine transport plans Widget for viewing rows of transport plans ###Code def plot_rows(idx): fig, (ax_1, ax_2) = plt.subplots(2, 1, sharex=True, sharey=True) ax_1.plot(t_ot[idx, :]) ax_1.set_title('OT') ax_1.get_xaxis().set_visible(False) ax_2.plot(t_gw[idx, :]) ax_2.set_title('GW'); ipyw.interact( plot_rows, idx=ipyw.IntSlider( min=0, max=pts_1.shape[0], step=1, continuous_update=False, description='MP:' ) ); ###Output _____no_output_____ ###Markdown Widget for viewing pushforwards of spatial locations ###Code # Pushforward matrix for OT plan q_ot = t_ot / np.sum(t_ot, 1)[:, np.newaxis] pf_means_ot = q_ot @ pts_2 pf_modes_ot = pts_2[np.argmax(q_ot, 1)] # Pushforward matrix for GW plan q_gw = t_gw / np.sum(t_gw, 1)[:, np.newaxis] pf_means_gw = q_gw @ pts_2 pf_modes_gw = pts_2[np.argmax(q_gw, 1)] def plot_pushforward_compare(idx): pt_1 = pts_1[idx, :] mean_pf_ot = pf_means_ot[idx, :] mode_pf_ot = pf_modes_ot[idx, :] mean_pf_gw = pf_means_gw[idx, :] mode_pf_gw = pf_modes_gw[idx, :] fig, (ax_1, ax_2, ax_3) = plt.subplots(1, 3, figsize=(15, 15)) ax_1.imshow(np.max(rec_1, 2).T, origin='lower') ax_1.plot(pt_1[0], pt_1[1], marker='*', color='red', markersize=7) ax_1.get_xaxis().set_visible(False) ax_1.get_yaxis().set_visible(False) ax_1.set_title(f'MP: {idx}') ax_2.imshow(np.max(rec_2, 2).T, origin='lower') ax_2.plot(mean_pf_ot[0], mean_pf_ot[1], marker='*', color='red', markersize=7) ax_2.plot(mode_pf_ot[0], mode_pf_ot[1], marker='+', color='red', markersize=7) ax_2.get_xaxis().set_visible(False) ax_2.get_yaxis().set_visible(False) ax_2.set_title('pushforward (OT)') ax_3.imshow(np.max(rec_2, 2).T, origin='lower') ax_3.plot(mean_pf_gw[0], mean_pf_gw[1], marker='*', color='red', markersize=7) ax_3.plot(mode_pf_gw[0], mode_pf_gw[1], marker='+', color='red', markersize=7) ax_3.get_xaxis().set_visible(False) ax_3.get_yaxis().set_visible(False) ax_3.set_title('pushforward (GW)') ipyw.interact( plot_pushforward_compare, idx=ipyw.IntSlider( min=0, max=pts_1.shape[0], step=1, continuous_update=False, description='MP:' ) ); ###Output _____no_output_____
New_Thoughts_Lab_Spam_classification.ipynb
###Markdown sklearn count vectorizerIt tokenize the string and gives an integer ID to each tokernIt counts the occurrence of each of those tokens1. It coverts to lower case automatically: **lowercase = True** by default2. It ignores puncutation: **token_pattern**3. It will ignore all words that commonly used like 'am', 'an', 'the', etc if **stop_words='english'**. Turn all to lowercase ###Code import string str1 = 'Hello! How are you?' str1_lower = str1.lower() print(str1_lower) ###Output hello! how are you? ###Markdown Remove punctuation ###Code translator = str1_lower.maketrans('', '', string.punctuation) str1_lower = str1_lower.translate(translator) print(str1_lower) ###Output hello how are you ###Markdown Count the freq. of the words ###Code from collections import Counter pre_str = str1_lower.split() count = Counter(pre_str) print(count) ###Output Counter({'hello': 1, 'how': 1, 'are': 1, 'you': 1})
tutorials/operon_distance_exploration.ipynb
###Markdown Operon analysisWe don't yet have an exhaustive annotation of all operons in _M. buryatense_ but there are a small handful of known operons (curated by Mary Lidstrom, saved in `/data/operon_ncbi_ids,txt`). We examined the spacing between genes within this known set and used this to help us decide how to set the `min_dist` parameter when estimating operons in our main framework. Basic feature file loading/parsing ###Code from Bio import SeqIO import altair as alt import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns import sys sys.path.append('../') # use modules in main directory import genbank_utils as gu # genbank files gbFile_5G = '../data/5GB1c_sequence.gb' gb_5G = SeqIO.parse(gbFile_5G, "genbank").__next__() print("___ 5G ____") print("Genome length:", len(gb_5G.seq), "bps") print("num features:", len(gb_5G.features)) print("num CDS features:", len([x for x in gb_5G.features if x.type=='CDS'])) print("num gene features:", len([x for x in gb_5G.features if x.type=='gene'])) print("num other features:", len([x for x in gb_5G.features if x.type not in ['gene', 'CDS']])) # get feature coords from genbanks file feats_5G = gu.get_feature_tuples_from_genbank(gbFile_5G) feats_5G_filt = [x for x in feats_5G if x[5] not in ['gene']] feats_5G_filt[:10] # feat list indices LEFT_IDX = 0 RIGHT_IDX = 1 STRAND_IDX = 2 LOCUS_IDX = 3 GENE_IDX = 4 TYPE_IDX = 5 ###Output _____no_output_____ ###Markdown Curated operon analysis ###Code # load file of curated operons op_df = pd.read_csv("../data/operon_ncbi_ids.txt", sep='\t') op_df ###Output _____no_output_____ ###Markdown Mary curatd the above 30 operons as respresentative examples of operons in _M. buryatense_. We analyzed these examples to better understand the typical distance between genes known to be in an operon. ###Code # get list of ids of genes in operons all_op_genes = set(",".join(op_df['all_genes'].values).split(',')) # as well as a list of first-genes in operons op_first_genes = set(op_df['ncbi_locus'].values) print("Total genes in example operons:",len(all_op_genes)) ###Output Total genes in example operons: 175 ###Markdown Build a dict of all genes in Mary's list to its upstream distance to its nearest neighbor ###Code upd = {} # key: gene, value: upstream_distance # loop through all features for i,(g_left,g_right,strand,locus,gene,typee) in enumerate(feats_5G_filt): # if this is a gene in one of Mary's operons, get it's upstream dist if locus in all_op_genes: # if we're on the negative strand, look to the right if strand == -1: # make sure we're not the very last gene if i < len(feats_5G_filt) -1: # get the FOLLOWING feature (because on -1 strand) upstream_gene = feats_5G_filt[i+1] upstream_dist = upstream_gene[LEFT_IDX] - g_right # otherwise, we're on the positive strand so look left else: # make sure we're not the very first gene if i != 0: # get the PREVIOUS feature (because on +1 strand) upstream_gene = feats_5G_filt[i-1] upstream_dist = g_left - upstream_gene[RIGHT_IDX] upd[locus] = upstream_dist # print a few examples from the usptream distance dict [(x,upd[x]) for x in list(upd.keys())[:10]] ###Output _____no_output_____ ###Markdown For every example operon, visualize the upstream distance for all genes inside, as well as the upstream distance from the first gene in the operon to its nearest neighor NOT in the operon ###Code # Make a full grid of operon upstream distances all_dfs = [] for i,row in op_df.iterrows(): start_gene = row['ncbi_locus'] other_genes = [x for x in row['all_genes'].split(',') if x !=start_gene] start_up_dist = upd[start_gene] other_dists = [upd[x] for x in other_genes] df = pd.DataFrame() df['gene'] = row['all_genes'].split(',') df['upstream_dist'] = df['gene'].apply(lambda x: upd[x]) df['start?'] = df['gene'].apply(lambda x: True if x == start_gene else False) df['operon'] = row['shortd'] all_dfs.append(df) all_ops_df = pd.concat(all_dfs) all_ops_df.head(10) # We can ask for ALL THE AXES and put them into axes fig, axes = plt.subplots(nrows=6, ncols=6, sharex=False, sharey='row', figsize=(15,15)) axes_list = [item for sublist in axes for item in sublist] for operon, selection in all_ops_df.groupby("operon"): ax = axes_list.pop(0) sns.barplot(data=selection,x='gene',y='upstream_dist',hue='start?',dodge=False, ax=ax) ax.set_title(operon,fontsize=14) ax.tick_params( right=False, top=False ) ax.get_legend().remove() ax.grid(linewidth=0.25) ax.set_ylim((-50, 1700)) ax.set_xlabel("") ax.set_xticklabels("") ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) # Now use the matplotlib .remove() method to # delete anything we didn't use for ax in axes_list: ax.remove() plt.tight_layout() ###Output _____no_output_____ ###Markdown Each operon has it's leading gene in orange and all internal genes in blue. The height of the bar is the distance between each gene and it's nearest upstream neighbor on the same strand. Overall, leading genes have the biggest upstream distance, though some internal genes (such as those for the "hydrogenase" operon) have large distances (over 250) Calculate the average distance between genes inside operons ###Code all_non_start_dists = [] mean_dist_per_operon = [] for i,row in op_df.iterrows(): start_gene = row['ncbi_locus'] other_genes = [x for x in row['all_genes'].split(',') if x !=start_gene] start_up_dist = upd[start_gene] other_dists = [upd[x] for x in other_genes] # collect averages op_ave = np.mean(other_dists) all_non_start_dists += other_dists mean_dist_per_operon.append(op_ave) # build dataframe for seaborn df = pd.DataFrame() df['gene'] = row['all_genes'].split(',') df['upstream_dist'] = df['gene'].apply(lambda x: upd[x]) df['start?'] = df['gene'].apply(lambda x: True if x == start_gene else False) # average over all non-start genes print("Mean:",np.mean(all_non_start_dists)) print("Median:",np.median(all_non_start_dists)) ###Output Mean: 44.675862068965515 Median: 16.0 ###Markdown Visualize the distibution of upstream distances for all inside-operon genes ###Code plt.figure(figsize=(10,5)) ax = sns.swarmplot(all_non_start_dists,s=6) ax.set_xticks(np.arange(-50,400,50)) plt.xlabel("Upstream distance") plt.title("Every non-starting operon gene's upstream distance",fontsize=20) plt.show() non_start_df = all_ops_df[all_ops_df['start?']==False] # interavtive version with some jitter to simulate swarm plot selection = alt.selection_multi(fields=['operon'], bind='legend') area = alt.Chart(non_start_df).transform_density( 'upstream_dist', as_=['upstream_dist', 'density'], ).mark_area(orient='vertical', color='gray', opacity=0.2 ).encode( x='upstream_dist:Q', y=alt.Y( 'density:Q', stack='center', impute=None, title=None, axis=alt.Axis(labels=False, values=[0],grid=False, ticks=True), scale=alt.Scale(nice=False,zero=False), ), ).properties( height=300, width=500, ) stripplot = alt.Chart( non_start_df, ).mark_point().encode( y=alt.Y( 'jitter:Q', title=None, axis=alt.Axis(values=[0], ticks=True, grid=False, labels=False), scale=alt.Scale() ), x=alt.X( 'upstream_dist:Q', axis=alt.Axis(title="Upstream Distance")), color=alt.Color('operon:N',scale=alt.Scale(scheme="sinebow")), size=alt.condition(selection, alt.value(100), alt.value(10)), tooltip=["operon:N","gene:N", "upstream_dist:Q"], opacity=alt.condition(selection, alt.value(1), alt.value(0.2)), ).transform_calculate( # Generate Gaussian jitter with a Box-Muller transform #jitter='sqrt(-2*log(random()))*cos(2*PI*random())' jitter='0.001*sqrt(-10*log(random()))*cos(10*PI*random())+0.007' ).properties( height=300, width=500, title="Every non-starting operon gene's upstream distance" ).add_selection( selection, ).interactive() chart = area + stripplot chart ###Output _____no_output_____ ###Markdown Interactive plot!* Click operon in the legend to highlight points in the scatter plot * + click to select multiple points in the legend* Hover over points to see more detail* Use mouse scroll to zoom in and out Most of the distribution of upstream distance for internal operon genes is shorter than 50 bases but there are a number of genes that do have larger distances. In particular the, distances for genes in the pMMO operon is 93 and 108. Ultimately we chose 120 as our default value for the miniumum distance genes must be within to be considered "possibly in an operon." 120 includes most genes from this hand curated set but misses a few. ###Code #chart.save('mbur_operon_dist.html') ###Output _____no_output_____
ex04_extra/mateus_oliveira/Exercicio_aula4_plot_otimo_batch_iris_mateus_oliveira.ipynb
###Markdown Nome:> Mateus Oliveira da Silva ###Code import numpy as np import torch from torch.utils.data import TensorDataset from torch.utils.data import DataLoader from torchvision import transforms from sklearn.datasets import load_iris %matplotlib inline import matplotlib.pyplot as plt iris = load_iris() data = iris.data[iris.target==1,::2] # comprimento das sépalas e pétalas, indices 0 e 2 x_data = data[:,0:1] target = data[:,1:2] n_samples = x_data.shape[0] print('dimensões de x_data:', x_data.shape) print('dimensões de target:', target.shape) x_train = torch.FloatTensor(x_data) y_train = torch.FloatTensor(target) xt_min = x_train.min() xt_max = x_train.max() x_train_n = (x_train - xt_min)/(xt_max - xt_min) yt_min = y_train.min() yt_max = y_train.max() y_train_n = (y_train - yt_min)/(yt_max - yt_min) x_train_bias = torch.cat([torch.ones(size=(n_samples,1)), x_train_n], dim=1) print(x_train_bias.shape) dataset = TensorDataset(x_train_bias, y_train_n) data_loader = DataLoader(dataset, batch_size=10, # tamanho do mini-batch de dados shuffle=True) # se for True, embaralha os dados no inicio de cada iteração ###Output _____no_output_____ ###Markdown Solução ótima ###Code x_bias, y = next(iter(data_loader)) w_opt = (torch.inverse(x_bias.t().mm(x_bias)).mm(x_bias.t())).mm(y) print(w_opt.t(), w_opt.shape) ###Output tensor([[0.5065, 0.2817]]) torch.Size([2, 1]) ###Markdown Exercício é codificar a próxima célula: ###Code # colocar aqui um laço para # calcular os valores ótimos associados a cada batch # é esperado um gráfico parecido com o gráfico n_epochs=2500 wlist=torch.Tensor([]) for epoch in range(n_epochs): x_bias, y = next(iter(data_loader)) x_T = x_bias.t() w_opt = (torch.inverse(x_T.mm(x_bias)).mm(x_T)).mm(y) wlist = torch.cat((wlist, w_opt.t()), dim=0) w_opt = wlist plt.scatter(w_opt[:,0], w_opt[:,1]) ###Output _____no_output_____
Quickstart/REST/azure-search-quickstart.ipynb
###Markdown Create a search index using REST APIs and Python This Jupyter Notebook demonstrates index creation, data ingestion, and queries of an Azure Cognitive Search index by calling the REST APIs from Python code. This notebook is a companion document to this [Python quickstart](https://docs.microsoft.com/azure/search/search-get-started-python). As a first step, load the libraries used for working with JSON and formulating HTTP requests. ###Code import json import requests from pprint import pprint ###Output _____no_output_____ ###Markdown In the second cell, input the request elements that will be constants on every request. Replace the search service name (YOUR-SEARCH-SERVICE-NAME) and admin API key (YOUR-ADMIN-API-KEY) with valid values. If you get ConnectionError "Failed to establish a new connection", verify that the api-key is a primary or secondary admin key, and that all leading and trailing characters (? and /) are in place. ###Code endpoint = 'https://<YOUR-SEARCH-SERVICE-NAME>.search.windows.net/' api_version = '?api-version=2019-05-06' headers = {'Content-Type': 'application/json', 'api-key': '<YOUR-ADMIN-API-KEY>' } ###Output _____no_output_____ ###Markdown In the third cell, formulate the request. This GET request targets the indexes collection of your search service and selects the name property of existing indexes so that you can see which indexes already exist. Index names must be unique. Check the list to make sure "hotels-quickstart" isn't listed.. ###Code url = endpoint + "indexes" + api_version + "&$select=name" response = requests.get(url, headers=headers) index_list = response.json() pprint(index_list) ###Output _____no_output_____ ###Markdown Specify the index definition, including the fields that define each search document. Fields have a name type, and attributes that determine how you can use the field. For example, "searchable" enables full text search on the field, "retrievable" means it can be returned in results, and "filterable" allows the field to be used in a filter expression. ###Code index_schema = { "name": "hotels-quickstart", "fields": [ {"name": "HotelId", "type": "Edm.String", "key": "true", "filterable": "true"}, {"name": "HotelName", "type": "Edm.String", "searchable": "true", "filterable": "false", "sortable": "true", "facetable": "false"}, {"name": "Description", "type": "Edm.String", "searchable": "true", "filterable": "false", "sortable": "false", "facetable": "false", "analyzer": "en.lucene"}, {"name": "Description_fr", "type": "Edm.String", "searchable": "true", "filterable": "false", "sortable": "false", "facetable": "false", "analyzer": "fr.lucene"}, {"name": "Category", "type": "Edm.String", "searchable": "true", "filterable": "true", "sortable": "true", "facetable": "true"}, {"name": "Tags", "type": "Collection(Edm.String)", "searchable": "true", "filterable": "true", "sortable": "false", "facetable": "true"}, {"name": "ParkingIncluded", "type": "Edm.Boolean", "filterable": "true", "sortable": "true", "facetable": "true"}, {"name": "LastRenovationDate", "type": "Edm.DateTimeOffset", "filterable": "true", "sortable": "true", "facetable": "true"}, {"name": "Rating", "type": "Edm.Double", "filterable": "true", "sortable": "true", "facetable": "true"}, {"name": "Address", "type": "Edm.ComplexType", "fields": [ {"name": "StreetAddress", "type": "Edm.String", "filterable": "false", "sortable": "false", "facetable": "false", "searchable": "true"}, {"name": "City", "type": "Edm.String", "searchable": "true", "filterable": "true", "sortable": "true", "facetable": "true"}, {"name": "StateProvince", "type": "Edm.String", "searchable": "true", "filterable": "true", "sortable": "true", "facetable": "true"}, {"name": "PostalCode", "type": "Edm.String", "searchable": "true", "filterable": "true", "sortable": "true", "facetable": "true"}, {"name": "Country", "type": "Edm.String", "searchable": "true", "filterable": "true", "sortable": "true", "facetable": "true"} ] } ] } ###Output _____no_output_____ ###Markdown In the following cell, formulate the request. This POST request targets the indexes collection of your search service and creates an index based on the index schema you provided in the previous cell. ###Code url = endpoint + "indexes" + api_version response = requests.post(url, headers=headers, json=index_schema) index = response.json() pprint(index) ###Output _____no_output_____ ###Markdown Next, provide four documents that conform to the index schema. Specify an upload action for each document. ###Code documents = { "value": [ { "@search.action": "upload", "HotelId": "1", "HotelName": "Secret Point Motel", "Description": "The hotel is ideally located on the main commercial artery of the city in the heart of New York. A few minutes away is Time's Square and the historic centre of the city, as well as other places of interest that make New York one of America's most attractive and cosmopolitan cities.", "Description_fr": "L'hôtel est idéalement situé sur la principale artère commerciale de la ville en plein cœur de New York. A quelques minutes se trouve la place du temps et le centre historique de la ville, ainsi que d'autres lieux d'intérêt qui font de New York l'une des villes les plus attractives et cosmopolites de l'Amérique.", "Category": "Boutique", "Tags": [ "pool", "air conditioning", "concierge" ], "ParkingIncluded": "false", "LastRenovationDate": "1970-01-18T00:00:00Z", "Rating": 3.60, "Address": { "StreetAddress": "677 5th Ave", "City": "New York", "StateProvince": "NY", "PostalCode": "10022", "Country": "USA" } }, { "@search.action": "upload", "HotelId": "2", "HotelName": "Twin Dome Motel", "Description": "The hotel is situated in a nineteenth century plaza, which has been expanded and renovated to the highest architectural standards to create a modern, functional, and first-class hotel in which art and unique historical elements coexist with the most modern comforts.", "Description_fr": "L'hôtel est situé dans une place du XIXe siècle, qui a été agrandie et rénovée aux plus hautes normes architecturales pour créer un hôtel moderne, fonctionnel et de première classe dans lequel l'art et les éléments historiques uniques coexistent avec le confort le plus moderne.", "Category": "Boutique", "Tags": [ "pool", "free wifi", "concierge" ], "ParkingIncluded": "false", "LastRenovationDate": "1979-02-18T00:00:00Z", "Rating": 3.60, "Address": { "StreetAddress": "140 University Town Center Dr", "City": "Sarasota", "StateProvince": "FL", "PostalCode": "34243", "Country": "USA" } }, { "@search.action": "upload", "HotelId": "3", "HotelName": "Triple Landscape Hotel", "Description": "The Hotel stands out for its gastronomic excellence under the management of William Dough, who advises on and oversees all of the Hotel’s restaurant services.", "Description_fr": "L'hôtel est situé dans une place du XIXe siècle, qui a été agrandie et rénovée aux plus hautes normes architecturales pour créer un hôtel moderne, fonctionnel et de première classe dans lequel l'art et les éléments historiques uniques coexistent avec le confort le plus moderne.", "Category": "Resort and Spa", "Tags": [ "air conditioning", "bar", "continental breakfast" ], "ParkingIncluded": "true", "LastRenovationDate": "2015-09-20T00:00:00Z", "Rating": 4.80, "Address": { "StreetAddress": "3393 Peachtree Rd", "City": "Atlanta", "StateProvince": "GA", "PostalCode": "30326", "Country": "USA" } }, { "@search.action": "upload", "HotelId": "4", "HotelName": "Sublime Cliff Hotel", "Description": "Sublime Cliff Hotel is located in the heart of the historic center of Sublime in an extremely vibrant and lively area within short walking distance to the sites and landmarks of the city and is surrounded by the extraordinary beauty of churches, buildings, shops and monuments. Sublime Cliff is part of a lovingly restored 1800 palace.", "Description_fr": "Le sublime Cliff Hotel est situé au coeur du centre historique de sublime dans un quartier extrêmement animé et vivant, à courte distance de marche des sites et monuments de la ville et est entouré par l'extraordinaire beauté des églises, des bâtiments, des commerces et Monuments. Sublime Cliff fait partie d'un Palace 1800 restauré avec amour.", "Category": "Boutique", "Tags": [ "concierge", "view", "24-hour front desk service" ], "ParkingIncluded": "true", "LastRenovationDate": "1960-02-06T00:00:00Z", "Rating": 4.60, "Address": { "StreetAddress": "7400 San Pedro Ave", "City": "San Antonio", "StateProvince": "TX", "PostalCode": "78216", "Country": "USA" } } ] } ###Output _____no_output_____ ###Markdown Formulate the request. This POST request targets the docs collection of the hotels-quickstart index and pushes the documents provided in the previous step. ###Code url = endpoint + "indexes/hotels-quickstart/docs/index" + api_version response = requests.post(url, headers=headers, json=documents) index_content = response.json() pprint(index_content) ###Output _____no_output_____ ###Markdown You are now ready to run some queries. The next cell contains a query expression that executes an empty search (search=*), returning an unranked list (search score = 1.0) of arbitrary documents. By default, Azure Cognitive Search returns 50 matches at a time. As structured, this query returns an entire document structure and values. Add $count=true to get a count of all documents (4) in the results. ###Code searchstring = '&search=*&$count=true' url = endpoint + "indexes/hotels-quickstart/docs" + api_version + searchstring response = requests.get(url, headers=headers, json=searchstring) query = response.json() pprint(query) ###Output _____no_output_____ ###Markdown The next query adds whole terms to the search expression ("hotels" and "wifi") and selects just a few fields to return in the results. ###Code searchstring = '&search=hotels wifi&$count=true&$select=HotelId,HotelName' url = endpoint + "indexes/hotels-quickstart/docs" + api_version + searchstring response = requests.get(url, headers=headers, json=searchstring) query = response.json() pprint(query) ###Output _____no_output_____ ###Markdown This query adds a $filter expression, returning only those hotels with a rating greater than 4. ###Code searchstring = '&search=*&$filter=Rating gt 4&$select=HotelId,HotelName,Description' url = endpoint + "indexes/hotels-quickstart/docs" + api_version + searchstring response = requests.get(url, headers=headers, json=searchstring) query = response.json() pprint(query) ###Output _____no_output_____ ###Markdown By default, the search engine returns the top 50 documents but you can use top and skip to add pagination and choose how many documents in each result. This query returns two documents in each result set. ###Code searchstring = '&search=boutique&$top=2&$select=HotelId,HotelName,Description' url = endpoint + "indexes/hotels-quickstart/docs" + api_version + searchstring response = requests.get(url, headers=headers, json=searchstring) query = response.json() pprint(query) ###Output _____no_output_____ ###Markdown In this last example, use $orderby to sort results by city. This example includes fields from the Address collection. ###Code searchstring = '&search=pool&$orderby=Address/City&$select=HotelId, HotelName, Address/City, Address/StateProvince' url = endpoint + "indexes/hotels-quickstart/docs" + api_version + searchstring response = requests.get(url, headers=headers, json=searchstring) query = response.json() pprint(query) ###Output _____no_output_____ ###Markdown If you are finished with this index, you can delete it by running the following lines. Deleting unnecessary indexes frees up space for steeping through more quickstarts and tutorials. ###Code url = endpoint + "indexes/hotels-quickstart" + api_version response = requests.delete(url, headers=headers) ###Output _____no_output_____ ###Markdown Confirm the index deletion by running the following script that lists all of the indexes on your search service. If hotels-quickstart is not listed, you've successfully deleted the index and have completed this quickstart. ###Code url = endpoint + "indexes" + api_version + "&$select=name" response = requests.get(url, headers=headers) index_list = response.json() pprint(index_list) ###Output _____no_output_____
casestudyreview/training.ipynb
###Markdown Create training and test ###Code from sklearn.model_selection import train_test_split training = {} data = tqdm_notebook(T.items()) for k, v in data: X = csr_matrix(v[0]) y = [D.loc[int(x)].score for x in v[1]] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) training[k] = (X_train, X_test, y_train, y_test) ###Output /home/nick/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:2: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` ###Markdown Training classification models ###Code from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier classifiers = {'DTC': DecisionTreeClassifier(), 'KNN': KNeighborsClassifier()} trained = defaultdict(lambda: {}) experiments = tqdm_notebook(training.items()) for k, (x_train, x_test, y_train, y_test) in experiments: for cl, model in classifiers.items(): m = model.__class__() m.fit(x_train, y_train) trained[k][cl] = m ###Output /home/nick/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:2: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` ###Markdown DNN ###Code from sklearn.preprocessing import OneHotEncoder from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense def get_dnn(x_train): model = Sequential() model.add(Dense(100, input_dim=x_train.shape[1], activation='relu')) model.add(Dense(5, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model experiments = tqdm_notebook(training.items()) for k, (x_train, x_test, y_train, y_test) in experiments: y_e = OneHotEncoder().fit_transform(np.array(y_train).reshape(-1, 1)) m = get_dnn(x_train) m.fit(x_train, y_e, batch_size=50, epochs=6, verbose=0) trained[k]['DNN'] = m ###Output /home/nick/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: TqdmDeprecationWarning: This function will be removed in tqdm==5.0.0 Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook` """Entry point for launching an IPython kernel. ###Markdown Save ###Code with open('../data/yelp_classification_training.pkl', 'wb') as out: pickle.dump(training, out) to_save = {} for k, v in trained.items(): s = {} for model_name, model in v.items(): if model_name == 'DNN': m_json = model.to_json() model.save_weights("../data/{}_{}.h5".format(k, model_name)) s['DNN'] = m_json else: s[model_name] = model to_save[k] = s with open('../data/yelp_classification_experiments.pkl', 'wb') as out: pickle.dump(to_save, out) ###Output _____no_output_____ ###Markdown Exercize: do the same process for regression ###Code from sklearn.model_selection import train_test_split training = {} data = tqdm_notebook(T.items()) for k, v in data: X = csr_matrix(v[0]) y = [D.loc[int(x)].avgstars for x in v[1]] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) training[k] = (X_train, X_test, y_train, y_test) from sklearn.tree import DecisionTreeRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.linear_model import LinearRegression regressors = {'DTR': DecisionTreeRegressor(), 'NNR': KNeighborsRegressor(), 'LR': LinearRegression() } trained = defaultdict(lambda: {}) experiments = tqdm_notebook(training.items()) for k, (x_train, x_test, y_train, y_test) in experiments: for cl, model in regressors.items(): m = model.__class__() m.fit(x_train, y_train) trained[k][cl] = m def get_regressor(x_train): model = Sequential() model.add(Dense(100, input_dim=x_train.shape[1], activation='relu')) model.add(Dense(1, activation='softmax')) model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) return model experiments = tqdm_notebook(training.items()) for k, (x_train, x_test, y_train, y_test) in experiments: #y_e = OneHotEncoder().fit_transform(np.array(y_train).reshape(-1, 1)) m = get_regressor(x_train) m.fit(x_train, y_train, batch_size=50, epochs=6, verbose=0) trained[k]['DNR'] = m with open('../data/yelp_regression_training.pkl', 'wb') as out: pickle.dump(training, out) to_save = {} for k, v in trained.items(): s = {} for model_name, model in v.items(): if model_name == 'DNR': m_json = model.to_json() model.save_weights("../data/{}_{}.h5".format(k, model_name)) s['DNR'] = m_json else: s[model_name] = model to_save[k] = s with open('../data/yelp_regression_experiments.pkl', 'wb') as out: pickle.dump(to_save, out) ###Output _____no_output_____
week1_embeddings/homework-Copy1.ipynb
###Markdown Homework: Multilingual Embedding-based Machine Translation (7 points) **In this homework** **YOU** will make machine translation system without using parallel corpora, alignment, attention, 100500 depth super-cool recurrent neural network and all that kind superstuff.But even without parallel corpora this system can be good enough (hopefully). For our system we choose two kindred Slavic languages: Ukrainian and Russian. Feel the difference!(_синій кіт_ vs. _синій кит_) ![blue_cat_blue_whale.png](https://github.com/yandexdataschool/nlp_course/raw/master/resources/blue_cat_blue_whale.png) Frament of the Swadesh list for some slavic languagesThe Swadesh list is a lexicostatistical stuff. It's named after American linguist Morris Swadesh and contains basic lexis. This list are used to define subgroupings of languages, its relatedness.So we can see some kind of word invariance for different Slavic languages.| Russian | Belorussian | Ukrainian | Polish | Czech | Bulgarian ||-----------------|--------------------------|-------------------------|--------------------|-------------------------------|-----------------------|| женщина | жанчына, кабета, баба | жінка | kobieta | žena | жена || мужчина | мужчына | чоловік, мужчина | mężczyzna | muž | мъж || человек | чалавек | людина, чоловік | człowiek | člověk | човек || ребёнок, дитя | дзіця, дзіцёнак, немаўля | дитина, дитя | dziecko | dítě | дете || жена | жонка | дружина, жінка | żona | žena, manželka, choť | съпруга, жена || муж | муж, гаспадар | чоловiк, муж | mąż | muž, manžel, choť | съпруг, мъж || мать, мама | маці, матка | мати, матір, неня, мама | matka | matka, máma, 'стар.' mateř | майка || отец, тятя | бацька, тата | батько, тато, татусь | ojciec | otec | баща, татко || много | шмат, багата | багато | wiele | mnoho, hodně | много || несколько | некалькі, колькі | декілька, кілька | kilka | několik, pár, trocha | няколко || другой, иной | іншы | інший | inny | druhý, jiný | друг || зверь, животное | жывёла, звер, істота | тварина, звір | zwierzę | zvíře | животно || рыба | рыба | риба | ryba | ryba | риба || птица | птушка | птах, птиця | ptak | pták | птица || собака, пёс | сабака | собака, пес | pies | pes | куче, пес || вошь | вош | воша | wesz | veš | въшка || змея, гад | змяя | змія, гад | wąż | had | змия || червь, червяк | чарвяк | хробак, черв'як | robak | červ | червей || дерево | дрэва | дерево | drzewo | strom, dřevo | дърво || лес | лес | ліс | las | les | гора, лес || палка | кій, палка | палиця | patyk, pręt, pałka | hůl, klacek, prut, kůl, pálka | палка, пръчка, бастун | But the context distribution of these languages demonstrates even more invariance. And we can use this fact for our for our purposes. Data ###Code import gensim import numpy as np from gensim.models import KeyedVectors ###Output _____no_output_____ ###Markdown Download embeddings here:* [cc.uk.300.vec.zip](https://yadi.sk/d/9CAeNsJiInoyUA)* [cc.ru.300.vec.zip](https://yadi.sk/d/3yG0-M4M8fypeQ) Load embeddings for ukrainian and russian. ###Code uk_emb = KeyedVectors.load_word2vec_format("data/cc.uk.300.vec") ru_emb = KeyedVectors.load_word2vec_format("data/cc.ru.300.vec") ru_emb.most_similar([ru_emb["август"]], topn=10) uk_emb.most_similar([uk_emb["серпень"]]) ru_emb.most_similar([uk_emb["серпень"]]) ###Output /usr/local/lib/python3.5/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`. if np.issubdtype(vec.dtype, np.int): ###Markdown Load small dictionaries for correspoinding words pairs as trainset and testset. ###Code def load_word_pairs(filename): uk_ru_pairs = [] uk_vectors = [] ru_vectors = [] with open(filename, "r", encoding='utf-8') as inpf: for line in inpf: uk, ru = line.rstrip().split("\t") if uk not in uk_emb or ru not in ru_emb: continue uk_ru_pairs.append((uk, ru)) uk_vectors.append(uk_emb[uk]) ru_vectors.append(ru_emb[ru]) return uk_ru_pairs, np.array(uk_vectors), np.array(ru_vectors) uk_ru_train, X_train, Y_train = load_word_pairs("ukr_rus.train.txt") uk_ru_test, X_test, Y_test = load_word_pairs("ukr_rus.test.txt") ###Output _____no_output_____ ###Markdown Embedding space mapping Let $x_i \in \mathrm{R}^d$ be the distributed representation of word $i$ in the source language, and $y_i \in \mathrm{R}^d$ is the vector representation of its translation. Our purpose is to learn such linear transform $W$ that minimizes euclidian distance between $Wx_i$ and $y_i$ for some subset of word embeddings. Thus we can formulate so-called Procrustes problem:$$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$$or$$W^*= \arg\min_W ||WX - Y||_F$$where $||*||_F$ - Frobenius norm.In Greek mythology, Procrustes or "the stretcher" was a rogue smith and bandit from Attica who attacked people by stretching them or cutting off their legs, so as to force them to fit the size of an iron bed. We make same bad things with source embedding space. Our Procrustean bed is target embedding space. ![embedding_mapping.png](https://github.com/yandexdataschool/nlp_course/raw/master/resources/embedding_mapping.png) ![procrustes.png](https://github.com/yandexdataschool/nlp_course/raw/master/resources/procrustes.png) But wait...$W^*= \arg\min_W \sum_{i=1}^n||Wx_i - y_i||_2$ looks like simple multiple linear regression (without intercept fit). So let's code. ###Code from sklearn.linear_model import LinearRegression mapping = LinearRegression(fit_intercept=False) mapping.fit(X_train, Y_train) ###Output _____no_output_____ ###Markdown Let's take a look at neigbours of the vector of word _"серпень"_ (_"август"_ in Russian) after linear transform. ###Code august = mapping.predict(uk_emb["серпень"].reshape(1, -1)) ru_emb.most_similar(august) ###Output /usr/local/lib/python3.5/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`. if np.issubdtype(vec.dtype, np.int): ###Markdown We can see that neighbourhood of this embedding cosists of different months, but right variant is on the ninth place. As quality measure we will use precision top-1, top-5 and top-10 (for each transformed Ukrainian embedding we count how many right target pairs are found in top N nearest neighbours in Russian embedding space). ###Code def precision(pairs, mapped_vectors, topn=1): """ :args: pairs = list of right word pairs [(uk_word_0, ru_word_0), ...] mapped_vectors = list of embeddings after mapping from source embedding space to destination embedding space topn = the number of nearest neighbours in destination embedding space to choose from :returns: precision_val, float number, total number of words for those we can find right translation at top K. """ assert len(pairs) == len(mapped_vectors) num_matches = 0 for i, (_, ru) in enumerate(pairs): nearest = ru_emb.most_similar(mapped_vectors[i].reshape(1,-1), topn=topn) nearest = [word[0] for word in nearest] if ru in nearest: num_matches += 1 precision_val = num_matches / len(pairs) return precision_val assert precision([("серпень", "август")], august, topn=5) == 0.0 assert precision([("серпень", "август")], august, topn=9) == 1.0 assert precision([("серпень", "август")], august, topn=10) == 1.0 assert precision(uk_ru_test, X_test) == 0.0 assert precision(uk_ru_test, Y_test) == 1.0 precision_top1 = precision(uk_ru_test, mapping.predict(X_test), 1) precision_top5 = precision(uk_ru_test, mapping.predict(X_test), 5) assert precision_top1 >= 0.635 assert precision_top5 >= 0.813 ###Output /usr/local/lib/python3.5/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`. if np.issubdtype(vec.dtype, np.int): ###Markdown Making it better (orthogonal Procrustean problem) It can be shown (see original paper) that a self-consistent linear mapping between semantic spaces should be orthogonal. We can restrict transform $W$ to be orthogonal. Then we will solve next problem:$$W^*= \arg\min_W ||WX - Y||_F \text{, where: } W^TW = I$$$$I \text{- identity matrix}$$Instead of making yet another regression problem we can find optimal orthogonal transformation using singular value decomposition. It turns out that optimal transformation $W^*$ can be expressed via SVD components:$$X^TY=U\Sigma V^T\text{, singular value decompostion}$$$$W^*=UV^T$$ ###Code from sklearn.decomposition import TruncatedSVD def learn_transform(X_train, Y_train): """ :returns: W* : float matrix[emb_dim x emb_dim] as defined in formulae above """ U, S, Vh = np.linalg.svd(X_train.T.dot(Y_train)) W_star = U.dot(Vh) return W_star W = learn_transform(X_train, Y_train) ru_emb.most_similar([np.matmul(uk_emb["серпень"], W)]) precision(uk_ru_test, np.matmul(X_test, W), 5) assert precision(uk_ru_test, np.matmul(X_test, W)) >= 0.653 assert precision(uk_ru_test, np.matmul(X_test, W), 5) >= 0.824 ###Output /usr/local/lib/python3.5/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`. if np.issubdtype(vec.dtype, np.int): ###Markdown UK-RU Translator Now we are ready to make simple word-based translator: for earch word in source language in shared embedding space we find the nearest in target language. ###Code with open("fairy_tale.txt", "r", encoding='utf-8') as inpf: uk_sentences = [line.rstrip().lower() for line in inpf] uk_sentences[:2] def translate(sentence): """ :args: sentence - sentence in Ukrainian (str) :returns: translation - sentence in Russian (str) * find ukrainian embedding for each word in sentence * transform ukrainian embedding vector * find nearest russian word and replace """ result = [] for word in sentence.split(' '): word_emb = uk_emb[word] if word in uk_emb else np.zeros(300) word_emb_ru = np.matmul(word_emb, W).reshape(1, -1) word_ru = ru_emb.most_similar(word_emb_ru) result += [word_ru[0][0]] return ' '.join(result) assert translate(".") == "." assert translate("1 , 3") == "1 , 3" assert translate("кіт зловив мишу") == "кот поймал мышку" for sentence in uk_sentences: print("src: {}\ndst: {}\n".format(sentence, translate(sentence))) ###Output /usr/local/lib/python3.5/dist-packages/gensim/matutils.py:737: FutureWarning: Conversion of the second argument of issubdtype from `int` to `np.signedinteger` is deprecated. In future, it will be treated as `np.int64 == np.dtype(int).type`. if np.issubdtype(vec.dtype, np.int):
NTPU_05_Network_Training.ipynb
###Markdown Lab 5: Training Techniques and ResNet Part 0: Preliminaries ###Code !pip uninstall -y tensorflow !pip install -U tensorflow-gpu==2.0.0-beta1 # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" # TensorFlow ≥2.0-preview is required import tensorflow as tf from tensorflow import keras assert tf.__version__ >= "2.0" # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "deep" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) ###Output _____no_output_____ ###Markdown Part 1: Activation FunctionsLet's list all the activation functions. ###Code [m for m in dir(keras.activations) if not m.startswith("_")] ###Output _____no_output_____ ###Markdown Certain advanced activations are treated as layers in Keras. For these, do not specify any specific activation for the preceding layer. Simply add these advanced activations as a layer after a normal layer in your model. ###Code [m for m in dir(keras.layers) if "relu" in m.lower()] ###Output _____no_output_____ ###Markdown SoftmaxWe've used the softmax activation many times now without explaining what it is. So let's talk about it now. Let's say we have a *k* class classification problem. For multinomial classifcation problems, we often use the softmax activation function to calculate the probability that an input belongs to a certain class, p_k. The way softmax works is, for each class, we will have a score:![alt text](https://drive.google.com/uc?id=1CN_oTjvwxTU_Nu4y1W8bipFv46UhEHmx)If you look at this closely, the score is just the weight multiplied by the input (so basically just what the neuron does before activation).Now, the probability that the input belongs to class *k* is:![alt text](https://drive.google.com/uc?id=1rSxk2Gp-VE2BFE_dzHtBRi8w-NushF8Z)where *K* is the total number of classes. So basically you divide the exponential score of this class by the sum of all the output exponentials of each class. This is different than, say, the linear ratio between class *k*'s score and the sum of all the scores. Namely, the softmax function makes the probability of the higher scored classes **further** apart from the ones with lower scores. Leaky ReLULet's revisit the fashion MNIST problem using leaky ReLU. ###Code # define the leaky relu activation function z = np.linspace(-5, 5, 200) def relu(z): return np.where(z < 0, 0, z) def leaky_relu(z, alpha=0.01): return np.maximum(alpha*z, z) plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2) plt.plot([-5, 5], [0, 0], 'k-') plt.plot([0, 0], [-0.5, 4.2], 'k-') plt.grid(True) props = dict(facecolor='black', shrink=0.1) plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center") plt.title("Leaky ReLU activation function", fontsize=14) plt.axis([-5, 5, -0.5, 4.2]) plt.show() (X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data() X_train_full = X_train_full / 255.0 X_test = X_test / 255.0 X_valid, X_train = X_train_full[:5000], X_train_full[5000:] y_valid, y_train = y_train_full[:5000], y_train_full[5000:] tf.random.set_seed(42) np.random.seed(42) model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.Dense(300, kernel_initializer="he_normal"), keras.layers.LeakyReLU(), keras.layers.Dense(100, kernel_initializer="he_normal"), keras.layers.LeakyReLU(), keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) ###Output WARNING: Logging before flag parsing goes to stderr. W0708 16:29:38.266940 140483582592896 deprecation.py:323] From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_grad.py:1250: add_dispatch_support.<locals>.wrapper (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version. Instructions for updating: Use tf.where in 2.0, which has the same broadcast rule as np.where ###Markdown SELUSELU is a scaled ELU activation function. It's special because for a network consisting of purely dense layers, the SELU activation helps the outputs *self-normalize*. That means, the output will preserve mean 0 and std 1 during training, which automatically deals with the vanishing gradient problem. Sufficiently deep dense nets will therefore work nicely with SELU. ###Code from scipy.special import erfc # alpha and scale to self normalize with mean 0 and standard deviation 1 # (see equation 14 in the paper): alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1) scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2) # define the elu and selu activations def elu(z, alpha=1): return np.where(z < 0, alpha * (np.exp(z) - 1), z) def selu(z, scale=scale_0_1, alpha=alpha_0_1): return scale * elu(z, alpha) plt.plot(z, elu(z), "b-", linewidth=2) plt.plot([-5, 5], [0, 0], 'k-') plt.plot([-5, 5], [-1, -1], 'k--') plt.plot([0, 0], [-2.2, 3.2], 'k-') plt.grid(True) plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14) plt.axis([-5, 5, -2.2, 3.2]) plt.show() plt.plot(z, selu(z), "b-", linewidth=2) plt.plot([-5, 5], [0, 0], 'k-') plt.plot([-5, 5], [-1.758, -1.758], 'k--') plt.plot([0, 0], [-2.2, 3.2], 'k-') plt.grid(True) plt.title("SELU activation function", fontsize=14) plt.axis([-5, 5, -2.2, 3.2]) plt.show() def try_1000_activations(activation): np.random.seed(42) Z = np.random.normal(size=(500, 100)) # standardized inputs for layer in range(1000): W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization Z = activation(np.dot(Z, W)) means = np.mean(Z, axis=0).mean() stds = np.std(Z, axis=0).mean() if layer % 100 == 0: print("Layer {}: mean {:.2f}, std deviation {:.2f}".format(layer, means, stds)) try_1000_activations(selu) try_1000_activations(relu) try_1000_activations(leaky_relu) try_1000_activations(elu) ###Output Layer 0: mean 0.16, std deviation 0.79 Layer 100: mean -0.00, std deviation 0.02 Layer 200: mean -0.00, std deviation 0.01 Layer 300: mean -0.00, std deviation 0.00 Layer 400: mean 0.00, std deviation 0.00 Layer 500: mean -0.00, std deviation 0.00 Layer 600: mean 0.00, std deviation 0.00 Layer 700: mean 0.00, std deviation 0.00 Layer 800: mean -0.00, std deviation 0.00 Layer 900: mean -0.00, std deviation 0.00 ###Markdown Let's try using the SELU activation on fashion MNIST. ###Code np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) model.add(keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal")) for layer in range(99): model.add(keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal")) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) ###Output _____no_output_____ ###Markdown Before training it, remember to scale the inputs to mean 0 and std 1 first. ###Code pixel_means = X_train.mean(axis=0, keepdims=True) pixel_stds = X_train.std(axis=0, keepdims=True) X_train_scaled = (X_train - pixel_means) / pixel_stds X_valid_scaled = (X_valid - pixel_means) / pixel_stds X_test_scaled = (X_test - pixel_means) / pixel_stds history = model.fit(X_train_scaled, y_train, epochs=5, validation_data=(X_valid_scaled, y_valid)) ###Output Train on 55000 samples, validate on 5000 samples Epoch 1/5 55000/55000 [==============================] - 34s 617us/sample - loss: 1.2710 - accuracy: 0.5080 - val_loss: 1.0708 - val_accuracy: 0.5594 Epoch 2/5 55000/55000 [==============================] - 29s 534us/sample - loss: 0.8509 - accuracy: 0.6779 - val_loss: 0.6995 - val_accuracy: 0.7412 Epoch 3/5 55000/55000 [==============================] - 30s 537us/sample - loss: 0.7195 - accuracy: 0.7356 - val_loss: 0.7018 - val_accuracy: 0.7498 Epoch 4/5 55000/55000 [==============================] - 30s 538us/sample - loss: 0.7286 - accuracy: 0.7386 - val_loss: 0.8595 - val_accuracy: 0.7112 Epoch 5/5 55000/55000 [==============================] - 29s 535us/sample - loss: 0.6805 - accuracy: 0.7522 - val_loss: 0.5807 - val_accuracy: 0.8048 ###Markdown Let's do the same thing with ReLU to compare. ###Code np.random.seed(42) tf.random.set_seed(42) model = keras.models.Sequential() model.add(keras.layers.Flatten(input_shape=[28, 28])) model.add(keras.layers.Dense(300, activation="relu", kernel_initializer="he_normal")) for layer in range(99): model.add(keras.layers.Dense(100, activation="relu", kernel_initializer="he_normal")) model.add(keras.layers.Dense(10, activation="softmax")) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) history = model.fit(X_train_scaled, y_train, epochs=5, validation_data=(X_valid_scaled, y_valid)) ###Output Train on 55000 samples, validate on 5000 samples Epoch 1/5 55000/55000 [==============================] - 21s 381us/sample - loss: 1.8279 - accuracy: 0.2662 - val_loss: 1.2287 - val_accuracy: 0.4560 Epoch 2/5 55000/55000 [==============================] - 19s 339us/sample - loss: 1.1299 - accuracy: 0.5147 - val_loss: 1.0086 - val_accuracy: 0.5574 Epoch 3/5 55000/55000 [==============================] - 19s 340us/sample - loss: 1.0200 - accuracy: 0.5735 - val_loss: 1.0418 - val_accuracy: 0.5538 Epoch 4/5 55000/55000 [==============================] - 19s 341us/sample - loss: 0.9309 - accuracy: 0.6126 - val_loss: 0.7873 - val_accuracy: 0.6586 Epoch 5/5 55000/55000 [==============================] - 19s 343us/sample - loss: 0.7924 - accuracy: 0.6719 - val_loss: 0.7257 - val_accuracy: 0.7084 ###Markdown Part 2: Batch Normalization ###Code model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.BatchNormalization(), keras.layers.Dense(300, activation="relu"), keras.layers.BatchNormalization(), keras.layers.Dense(100, activation="relu"), keras.layers.BatchNormalization(), keras.layers.Dense(10, activation="softmax") ]) model.summary() bn1 = model.layers[1] [(var.name, var.trainable) for var in bn1.variables] model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) ###Output Train on 55000 samples, validate on 5000 samples Epoch 1/10 55000/55000 [==============================] - 6s 115us/sample - loss: 0.8756 - accuracy: 0.7140 - val_loss: 0.5515 - val_accuracy: 0.8216 Epoch 2/10 55000/55000 [==============================] - 6s 105us/sample - loss: 0.5765 - accuracy: 0.8033 - val_loss: 0.4743 - val_accuracy: 0.8434 Epoch 3/10 55000/55000 [==============================] - 6s 105us/sample - loss: 0.5146 - accuracy: 0.8213 - val_loss: 0.4382 - val_accuracy: 0.8534 Epoch 4/10 55000/55000 [==============================] - 6s 105us/sample - loss: 0.4821 - accuracy: 0.8322 - val_loss: 0.4171 - val_accuracy: 0.8594 Epoch 5/10 55000/55000 [==============================] - 6s 108us/sample - loss: 0.4590 - accuracy: 0.8403 - val_loss: 0.4002 - val_accuracy: 0.8658 Epoch 6/10 55000/55000 [==============================] - 6s 105us/sample - loss: 0.4428 - accuracy: 0.8459 - val_loss: 0.3884 - val_accuracy: 0.8690 Epoch 7/10 55000/55000 [==============================] - 6s 105us/sample - loss: 0.4219 - accuracy: 0.8521 - val_loss: 0.3792 - val_accuracy: 0.8718 Epoch 8/10 55000/55000 [==============================] - 6s 105us/sample - loss: 0.4150 - accuracy: 0.8549 - val_loss: 0.3695 - val_accuracy: 0.8758 Epoch 9/10 55000/55000 [==============================] - 6s 106us/sample - loss: 0.4014 - accuracy: 0.8588 - val_loss: 0.3630 - val_accuracy: 0.8740 Epoch 10/10 55000/55000 [==============================] - 6s 106us/sample - loss: 0.3932 - accuracy: 0.8614 - val_loss: 0.3581 - val_accuracy: 0.8770 ###Markdown Let's try putting the batch normalization before the activations. ###Code model = keras.models.Sequential([ keras.layers.Flatten(input_shape=[28, 28]), keras.layers.BatchNormalization(), keras.layers.Dense(300, use_bias=False), keras.layers.BatchNormalization(), keras.layers.Activation("relu"), keras.layers.Dense(100, use_bias=False), keras.layers.Activation("relu"), keras.layers.BatchNormalization(), keras.layers.Dense(10, activation="softmax") ]) model.compile(loss="sparse_categorical_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) history = model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid)) ###Output Train on 55000 samples, validate on 5000 samples Epoch 1/10 55000/55000 [==============================] - 6s 115us/sample - loss: 0.8618 - accuracy: 0.7093 - val_loss: 0.5659 - val_accuracy: 0.8084 Epoch 2/10 55000/55000 [==============================] - 6s 107us/sample - loss: 0.5809 - accuracy: 0.8008 - val_loss: 0.4832 - val_accuracy: 0.8350 Epoch 3/10 55000/55000 [==============================] - 6s 108us/sample - loss: 0.5161 - accuracy: 0.8199 - val_loss: 0.4463 - val_accuracy: 0.8474 Epoch 4/10 55000/55000 [==============================] - 6s 103us/sample - loss: 0.4850 - accuracy: 0.8301 - val_loss: 0.4255 - val_accuracy: 0.8550 Epoch 5/10 55000/55000 [==============================] - 6s 103us/sample - loss: 0.4580 - accuracy: 0.8406 - val_loss: 0.4109 - val_accuracy: 0.8594 Epoch 6/10 55000/55000 [==============================] - 6s 102us/sample - loss: 0.4410 - accuracy: 0.8458 - val_loss: 0.3977 - val_accuracy: 0.8620 Epoch 7/10 55000/55000 [==============================] - 6s 103us/sample - loss: 0.4299 - accuracy: 0.8489 - val_loss: 0.3917 - val_accuracy: 0.8652 Epoch 8/10 55000/55000 [==============================] - 6s 103us/sample - loss: 0.4128 - accuracy: 0.8558 - val_loss: 0.3829 - val_accuracy: 0.8666 Epoch 9/10 55000/55000 [==============================] - 6s 103us/sample - loss: 0.4012 - accuracy: 0.8589 - val_loss: 0.3761 - val_accuracy: 0.8686 Epoch 10/10 55000/55000 [==============================] - 6s 103us/sample - loss: 0.3932 - accuracy: 0.8625 - val_loss: 0.3706 - val_accuracy: 0.8712 ###Markdown Part 3: Gradient Clipping ###Code optimizer = keras.optimizers.SGD(clipvalue=1.0) optimizer = keras.optimizers.SGD(clipnorm=1.0) #model.compile(loss="mse", optimizer=optimizer) ###Output _____no_output_____ ###Markdown Part 4: Transfer learning Using the fashion MNIST dataset, let's pretrain a network to classify 8 classes, then reuse parts of that network to perform binary classification on the remaining 2 classes.Let's start by splitting the dataset into two parts.* ```X_train_A```: all classes except class 5 and 6 (sandals and shirts)* ```X_train_B```: just the first 200 images of sandals and shirts.We also split the validation and test sets. ###Code def split_dataset(X, y): y_5_or_6 = (y == 5) | (y == 6) # sandals or shirts y_A = y[~y_5_or_6] y_A[y_A > 6] -= 2 # class indices 7, 8, 9 should be moved to 5, 6, 7 y_B = (y[y_5_or_6] == 6).astype(np.float32) # binary classification task: is it a shirt (class 6)? return ((X[~y_5_or_6], y_A), (X[y_5_or_6], y_B)) (X_train_A, y_train_A), (X_train_B, y_train_B) = split_dataset(X_train, y_train) (X_valid_A, y_valid_A), (X_valid_B, y_valid_B) = split_dataset(X_valid, y_valid) (X_test_A, y_test_A), (X_test_B, y_test_B) = split_dataset(X_test, y_test) X_train_B = X_train_B[:200] y_train_B = y_train_B[:200] # reshape the inputs for CNN data = [X_train_A, X_valid_A, X_test_A, X_train_B, X_valid_B, X_test_B] for i in range(len(data)): data[i].resize(*data[i].shape, 1) X_train_A.shape X_train_B.shape y_train_A[:30] y_train_B[:30] tf.random.set_seed(42) np.random.seed(42) from functools import partial DefaultConv2D = partial(keras.layers.Conv2D, kernel_size=3, activation='relu', padding="SAME") model_A = keras.models.Sequential([ DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]), keras.layers.MaxPooling2D(pool_size=2), DefaultConv2D(filters=128), DefaultConv2D(filters=128), keras.layers.MaxPooling2D(pool_size=2), DefaultConv2D(filters=256), DefaultConv2D(filters=256), keras.layers.MaxPooling2D(pool_size=2), keras.layers.Flatten(), keras.layers.Dense(units=128, activation='relu'), keras.layers.Dense(units=64, activation='relu'), keras.layers.Dense(units=8, activation='softmax'), ]) model_A.compile(loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]) history = model_A.fit(X_train_A, y_train_A, epochs=10, validation_data=[X_valid_A, y_valid_A]) score = model_A.evaluate(X_test_A, y_test_A) model_A.save("my_model_A.h5") model_B = keras.models.Sequential([ DefaultConv2D(filters=64, kernel_size=7, input_shape=[28, 28, 1]), keras.layers.MaxPooling2D(pool_size=2), DefaultConv2D(filters=128), DefaultConv2D(filters=128), keras.layers.MaxPooling2D(pool_size=2), DefaultConv2D(filters=256), DefaultConv2D(filters=256), keras.layers.MaxPooling2D(pool_size=2), keras.layers.Flatten(), keras.layers.Dense(units=128, activation='relu'), keras.layers.Dense(units=64, activation='relu'), keras.layers.Dense(units=1, activation='sigmoid'), ]) model_B.compile(loss="binary_crossentropy", optimizer="sgd", metrics=["accuracy"]) history = model_B.fit(X_train_B, y_train_B, epochs=10, validation_data=[X_valid_B, y_valid_B]) score = model_B.evaluate(X_test_B, y_test_B) ###Output Train on 200 samples, validate on 986 samples Epoch 1/10 200/200 [==============================] - 1s 3ms/sample - loss: 0.6898 - accuracy: 0.4950 - val_loss: 0.6869 - val_accuracy: 0.5000 Epoch 2/10 200/200 [==============================] - 0s 630us/sample - loss: 0.6850 - accuracy: 0.5050 - val_loss: 0.6826 - val_accuracy: 0.5304 Epoch 3/10 200/200 [==============================] - 0s 593us/sample - loss: 0.6807 - accuracy: 0.5400 - val_loss: 0.6784 - val_accuracy: 0.7373 Epoch 4/10 200/200 [==============================] - 0s 619us/sample - loss: 0.6763 - accuracy: 0.7500 - val_loss: 0.6740 - val_accuracy: 0.7414 Epoch 5/10 200/200 [==============================] - 0s 601us/sample - loss: 0.6716 - accuracy: 0.7550 - val_loss: 0.6689 - val_accuracy: 0.7110 Epoch 6/10 200/200 [==============================] - 0s 586us/sample - loss: 0.6660 - accuracy: 0.7950 - val_loss: 0.6631 - val_accuracy: 0.6714 Epoch 7/10 200/200 [==============================] - 0s 580us/sample - loss: 0.6595 - accuracy: 0.7500 - val_loss: 0.6562 - val_accuracy: 0.8763 Epoch 8/10 200/200 [==============================] - 0s 597us/sample - loss: 0.6514 - accuracy: 0.8600 - val_loss: 0.6467 - val_accuracy: 0.6653 Epoch 9/10 200/200 [==============================] - 0s 601us/sample - loss: 0.6410 - accuracy: 0.7800 - val_loss: 0.6346 - val_accuracy: 0.7708 Epoch 10/10 200/200 [==============================] - 0s 625us/sample - loss: 0.6273 - accuracy: 0.7800 - val_loss: 0.6212 - val_accuracy: 0.8895 2000/2000 [==============================] - 0s 114us/sample - loss: 0.6219 - accuracy: 0.8860 ###Markdown Load model A and replace the output layer. ###Code model_A = keras.models.load_model("my_model_A.h5") model_B_on_A = keras. models.Sequential(model_A.layers[:-1]) model_B_on_A.add(keras.layers.Dense(1, activation="sigmoid")) ###Output _____no_output_____ ###Markdown Because the layers for ```model_B_on_A``` are shared with ```model_A```, training performed on ```model_B_on_A``` will also affect ```model_A```. If you want to avoid that, you can make a copy of ```model_A```, which will not be affected. ###Code model_A_clone = keras.models.clone_model(model_A) model_A_clone.set_weights(model_A.get_weights()) ###Output _____no_output_____ ###Markdown Let's freeze all of the reused layers first. ###Code for layer in model_B_on_A.layers[:-1]: layer.trainable = False model_B_on_A.compile(loss="binary_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) history = model_B_on_A.fit(X_train_B, y_train_B, epochs=4, validation_data=(X_valid_B, y_valid_B)) for layer in model_B_on_A.layers[:-1]: layer.trainable = True model_B_on_A.compile(loss="binary_crossentropy", optimizer=keras.optimizers.SGD(lr=1e-3), metrics=["accuracy"]) history = model_B_on_A.fit(X_train_B, y_train_B, epochs=16, validation_data=(X_valid_B, y_valid_B)) model_B.evaluate(X_test_B, y_test_B) model_B_on_A.evaluate(X_test_B, y_test_B) ###Output 2000/2000 [==============================] - 0s 101us/sample - loss: 0.0557 - accuracy: 0.9945
Foxtrot_Finale_svm.ipynb
###Markdown OVERVIEW Algarve resort and Lisbon City Hotel are 2 hotels located in Portugal. The two hotels wish to find out the most popular time of year that hotel rooms are booked, so they can adequately plan themselves in terms of supplies and staffing. This will ensure that they are able to cater to their guests more efficiently. IMPORTING LIBRARIES ###Code # Importing important libraries import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown LOADING DATASET ###Code # Loading the datasets names = ['hotel', 'is_canceled', 'lead_time', 'arrival_date_year', 'arrival_date_month', 'arrival_date_week_number', 'arrival_date_day_of_month', 'stays_in_weekend_nights', 'stays_in_week_nights', 'adults', 'children', 'babies', 'meal', 'country', 'market_segment', 'distribution_channel', 'is_repeated_guest', 'previous_cancellations', 'previous_bookings_not_canceled', 'reserved_room_type', 'assigned_room_type', 'booking_changes', 'deposit_type', 'agent', 'company', 'days_in_waiting_list', 'customer_type', 'adr', 'required_car_parking_spaces', 'total_of_special_requests', 'reservation_status', 'reservation_status_date'] hotel_df = pd.read_csv('hotel_bookings.csv', usecols=names) hotel_df.head() ###Output _____no_output_____ ###Markdown CHECKING THE DATA ###Code # Determing the number of records in our dataset hotel_df.shape # Previewing the first records of our dataset hotel_df.head() # Previewing the last records of our dataset hotel_df.tail() #consice summary of train dataset hotel_df.info() ###Output <class 'pandas.core.frame.DataFrame'> RangeIndex: 29264 entries, 0 to 29263 Data columns (total 32 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 hotel 29264 non-null object 1 is_canceled 29264 non-null int64 2 lead_time 29264 non-null int64 3 arrival_date_year 29264 non-null int64 4 arrival_date_month 29264 non-null object 5 arrival_date_week_number 29264 non-null int64 6 arrival_date_day_of_month 29264 non-null int64 7 stays_in_weekend_nights 29264 non-null int64 8 stays_in_week_nights 29264 non-null int64 9 adults 29264 non-null int64 10 children 29264 non-null int64 11 babies 29264 non-null int64 12 meal 29264 non-null object 13 country 28803 non-null object 14 market_segment 29264 non-null object 15 distribution_channel 29264 non-null object 16 is_repeated_guest 29264 non-null int64 17 previous_cancellations 29264 non-null int64 18 previous_bookings_not_canceled 29264 non-null int64 19 reserved_room_type 29264 non-null object 20 assigned_room_type 29264 non-null object 21 booking_changes 29264 non-null int64 22 deposit_type 29264 non-null object 23 agent 23221 non-null float64 24 company 2351 non-null float64 25 days_in_waiting_list 29264 non-null int64 26 customer_type 29264 non-null object 27 adr 29264 non-null float64 28 required_car_parking_spaces 29264 non-null int64 29 total_of_special_requests 29264 non-null int64 30 reservation_status 29264 non-null object 31 reservation_status_date 29264 non-null object dtypes: float64(3), int64(17), object(12) memory usage: 7.1+ MB ###Markdown **observation**: our datset has 119390 observations and 32 variables, it has 4 floats, 16int and 12 object datatype. ###Code # checking the target variable hotel_df.is_canceled.value_counts() #getting the above in parcentage # hotel_df.is_canceled.value_counts() / len(hotel_df) * 100 ###Output _____no_output_____ ###Markdown Tidying the data ###Code #creating a copy of the dataset to be be used on cleaning process hotel_df1=hotel_df.copy() # #getting the irrelevant varaibles columns = ['arrival_date_year','agent','arrival_date_week_number','lead_time', 'market_segment', 'distribution_channel','booking_changes','days_in_waiting_list', 'adr','reservation_status_date'] # #dropping the irrelevant colunms drop_colunms= hotel_df1.drop(columns, axis = 1, inplace=True) # #previewing the dataset hotel_df1.head() # Check for missing values # hotel_df1.isnull().sum() #getting the percentage of missing data in each column # hotel_df1.isnull().sum()/len(hotel_df1)* 100 # as a role of thumb, drop any colunm with more than 25% of the missing data. # hotel_df1 = hotel_df1[hotel_df1.columns[hotel_df1.isnull().mean() < 0.25]] hotel_df1.shape hotel_df1.isnull().sum() ###Output _____no_output_____ ###Markdown **observation**: the data type are appropriate now and there is a clue that we have null values. ###Code #filling the missing values of children variable with the mean # hotel_df1['children']= hotel_df1['children'].fillna(hotel_df1['children'].mean(), axis = 0) #filling the missing values in the country variable with mode # hotel_df1.fillna({'country': hotel_df1['country'].mode()[0]}, inplace=True) hotel_df1.columns #confriming no missing values. # hotel_df1.isnull().sum().any() # Checking if there are duplicates # hotel_df1.duplicated().sum() # Dropping duplicates hotel_df1.drop_duplicates(inplace=True) # #confirm no duplicates # hotel_df1.duplicated().any() #To remove white spaces in our columns hotel_df1.columns.str.replace(" "," ") # To check for outliers through boxplots # fig, ((ax1, ax2), (ax3, ax4),(ax5, ax6), (ax7, ax8)) = plt.subplots(4,2, figsize=(15, 15)) fig.suptitle('Boxplots') sns.boxplot(hotel_df1['stays_in_weekend_nights'], ax=ax1) sns.boxplot(hotel_df1['stays_in_week_nights'], ax=ax2) sns.boxplot(hotel_df1['adults'], ax=ax3) sns.boxplot(hotel_df1['babies'], ax=ax4) sns.boxplot(hotel_df1['is_repeated_guest'], ax=ax5) sns.boxplot(hotel_df1['previous_cancellations'], ax=ax6) sns.boxplot(hotel_df1['previous_bookings_not_canceled'], ax=ax7) plt.show() # Check for outliers through IQR Score # we first get the interquantile range for each column Q1 = hotel_df1.quantile(0.25) Q3 = hotel_df1.quantile(0.75) IQR = Q3 - Q1 print(IQR) # We then get the outliers as follows: # True means presence of outliers (hotel_df1 < (Q1 - 1.5 * IQR)) |(hotel_df1 > (Q3 + 1.5 * IQR)) # We then filter out outliers and remain with valid values as follows df_clean = hotel_df1[~((hotel_df1 < (Q1 - 1.5 * IQR)) |(hotel_df1 > (Q3 + 1.5 * IQR))).any(axis=1)] df_clean.shape ###Output _____no_output_____ ###Markdown **observation**: clean dataset has 43962 records and 21 variables. ###Code #converting the data to the correct datatype- float # df_clean[["is_canceled", "arrival_date_day_of_month", "stays_in_weekend_nights", "adults", "adults", "previous_cancellations","previous_cancellations", "previous_bookings_not_canceled","required_car_parking_spaces", "total_of_special_requests" ]] = df_clean[["is_canceled", "arrival_date_day_of_month", "stays_in_weekend_nights", "adults", "adults", "previous_cancellations","previous_cancellations", "previous_bookings_not_canceled","required_car_parking_spaces", "total_of_special_requests"]].astype(float) # #converting categorical variables to the correct datatype-category # df_clean[["hotel","arrival_date_month","meal", "country", "reserved_room_type","assigned_room_type", "deposit_type", "customer_type", "reservation_status" ]] = df_clean[["hotel","arrival_date_month","meal", "country", "reserved_room_type","assigned_room_type", "deposit_type", "customer_type", "reservation_status"]].astype('category') df_clean.info() ###Output <class 'pandas.core.frame.DataFrame'> Int64Index: 12025 entries, 0 to 29262 Data columns (total 21 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 hotel 12025 non-null category 1 is_canceled 12025 non-null float64 2 arrival_date_month 12025 non-null category 3 arrival_date_day_of_month 12025 non-null float64 4 stays_in_weekend_nights 12025 non-null float64 5 stays_in_week_nights 12025 non-null int64 6 adults 12025 non-null float64 7 children 12025 non-null int64 8 babies 12025 non-null int64 9 meal 12025 non-null category 10 country 12025 non-null category 11 is_repeated_guest 12025 non-null int64 12 previous_cancellations 12025 non-null float64 13 previous_bookings_not_canceled 12025 non-null float64 14 reserved_room_type 12025 non-null category 15 assigned_room_type 12025 non-null category 16 deposit_type 12025 non-null category 17 customer_type 12025 non-null category 18 required_car_parking_spaces 12025 non-null float64 19 total_of_special_requests 12025 non-null float64 20 reservation_status 12025 non-null category dtypes: category(9), float64(8), int64(4) memory usage: 1.3 MB ###Markdown 6. Exploratory Data Analysis Univariate analysisWe will now perform univariate analysis. This is a type of analysis done only one variable. This type of analysis will be helpful in understanding the characteristics of each variables. ###Code # preview of our dataset # df_clean.head() #univariate analysis: hotel variable piechart # plt.figure(figsize = (6, 6)) df_clean.hotel.value_counts().plot(kind = 'pie', autopct = '%0.1f%%') plt.show() ###Output _____no_output_____ ###Markdown **observation**: city hotel registered a higher reservation of 63.7% ###Code #univariate analysis: is cancelled variable piechart # plt.figure(figsize = (6, 6)) df_clean.is_canceled.value_counts().plot(kind = 'pie', autopct = '%0.1f%%') plt.show() #0=> not cancelled #1=> cancelled ###Output _____no_output_____ ###Markdown **observation**: only 31% of reversation was cancelled. ###Code # univariate analysis: month of arrival variable histogram # plt.hist(df_clean['arrival_date_month'], bins=12, histtype='bar', rwidth=0.9) plt.xticks(rotation=90) plt.xlabel('months') plt.ylabel('frequency') plt.title('Histogram of months of the year') plt.show() ###Output _____no_output_____ ###Markdown **observation**: The most popular months of travel were between May and August with the peak being recorded at August. ###Code # univariate analysis: meal variable bar graph df_clean.meal.value_counts().plot(kind = 'bar', rot = 0) plt.show() ###Output _____no_output_____ ###Markdown **observation**: many guests went for bed and breakfast option. ###Code # univariate analysis: month of arrival frequency count # df_clean.country.value_counts(ascending=False) ###Output _____no_output_____ ###Markdown **observation**: Most of the hotel guests came from Portugal and Great Britain. ###Code # univariate analysis: customer type variable bar graph df_clean.customer_type.value_counts().plot(kind = 'bar', rot = 0) plt.show() ###Output _____no_output_____ ###Markdown **observation**:The most frequent type of guest was the transient. **observation**: Most popular arrival dates for customers is 17th and 2nd early of the month mostly because that's when people get their salaries ###Code # univariate analysis: room type variable bar graph df_clean.assigned_room_type.value_counts().plot(kind = 'bar', rot = 0) plt.show() ###Output _____no_output_____ ###Markdown **observation**: most of the clients went for romm type A Observations from Univariate Analysis1. Guests preferred the city hotels to the resort hotels2. Most guests went for the Bed and Break fast option while the Full board option had the least customers3.Most of the hotel guests came from Portugal and Great Britain while very few came from Germany4.The most frequent type of guest was the transient type who came for a short term visit while the group type of guests were the least5.The most popular months of travel were between May and August with the peak being recorded at July mainly because it is summer in Europe during these months6.Most popular arrival dates for customers is during end month mostly because that's when people get their salaries 7.The most popular room type both for bookings and reservations is Type A while the least popular L Bivariate Analysis Bar Charts to show the relationship between booking cancellation status and other variables ###Code df_clean['is_canceled'].value_counts() # 0 = not canceled # 1 = canceled #plotting the target variable against other features. # ignoring the warnings import warnings warnings.filterwarnings('ignore') f, axes = plt.subplots(7, 1, figsize=[5, 25]) sns.countplot('hotel', hue= 'is_canceled', data=df_clean, ax=axes[0]) sns.countplot('customer_type', hue= 'is_canceled', data=df_clean, ax=axes[1]) sns.countplot('deposit_type', hue= 'is_canceled', data=df_clean, ax=axes[2]) sns.countplot('is_repeated_guest', hue= 'is_canceled', data=df_clean, ax=axes[3]) sns.countplot('previous_cancellations', hue= 'is_canceled', data=df_clean, ax=axes[4]) sns.countplot('previous_bookings_not_canceled', hue= 'is_canceled', data=df_clean, ax=axes[5]) sns.countplot('required_car_parking_spaces', hue= 'is_canceled', data=df_clean, ax=axes[6]) #Distribution of cancellation and Deposit Type¶ plt.figure(figsize=(7,7)) sns.barplot(x=df_clean['deposit_type'], y=df_clean['is_canceled'], palette=sns.color_palette("icefire")) #It Seems more bookings were non refundable and were cancelled more as well # Special and non Special Request of the guest fig, ax = plt.subplots(1, 2, figsize=[25, 6]) special = df_clean[df_clean.total_of_special_requests != 0] non_special = df_clean[df_clean.total_of_special_requests == 0] special # Plots spec = sns.countplot(special.reservation_status, ax = ax[0], palette = 'winter') spec.set(title = 'Guest with special requests', xlabel = 'Reservation_status') n_spec = sns.countplot(non_special.reservation_status, ax = ax[1], palette = 'winter') n_spec.set(title = 'Guest without special requests', ylabel = "", xlabel = 'Reservation_status') #Stays in weekend Nights and Week Nights for Cancelled and non cancelled bookings fig, ax = plt.subplots(1, 2, figsize=[18, 8]) sns.violinplot( x=df_clean["is_canceled"], y=df_clean["stays_in_weekend_nights"], ax = ax[0] ) sns.violinplot( x=df_clean["is_canceled"], y=df_clean["stays_in_week_nights"], ax = ax[1] ) #sns.pairplot(df_clean,kind="scatter", diag_kind="kde", hue="is_canceled") # Computing the correlation matrix # # This is the association between numerical variables in the dataset # ###Output _____no_output_____ ###Markdown Observations from the Bivariate Analysis1. We see that it is more likely for customers from the city hotel to cancel their booking than the ones from the resort hotel2. For customer type, the transient customers were most likely to cancel while groups cancelled least3. We also saw that the guests who put no deposit were very likely to cancel because they had nothing to lose while the ones who put a refundable deposit didn't cancel at all 4. The bivariate analysis also shows that it was by far less likely for repeated guests to cancel compared to their new counterparts ###Code df_clean2=df_clean.copy() # Label encoding to change our categorical variables to numerical to be able to create our correlation matrix from sklearn.preprocessing import LabelEncoder en = LabelEncoder() df_clean['hotel'] = en.fit_transform(df_clean['hotel']) df_clean['arrival_date_month'] = en.fit_transform(df_clean['arrival_date_month']) df_clean['country'] = en.fit_transform(df_clean['country']) df_clean['meal'] = en.fit_transform(df_clean['meal']) df_clean['reserved_room_type'] = en.fit_transform(df_clean['reserved_room_type']) df_clean['assigned_room_type'] = en.fit_transform(df_clean['assigned_room_type']) df_clean['deposit_type'] = en.fit_transform(df_clean['deposit_type']) df_clean['customer_type'] = en.fit_transform(df_clean['customer_type']) df_clean['reservation_status'] = en.fit_transform(df_clean['reservation_status']) df_clean from sklearn.model_selection import train_test_split # Splitting data into Input and Target Variable y = df_clean[['is_canceled']] # target variable X = df_clean.drop(['is_canceled'], axis=1) # input variables # Assign variables to capture train test split output X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=101) # Standardising the X_train and the X_test to the same scale # from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) #creating model. creat regressor object and train it using train data # # Building the Logistic Regression model from sklearn.linear_model import LogisticRegression clf_lr = LogisticRegression(random_state = 1) lr_baseline_model = clf_lr.fit(X_train,y_train) # Noting down accuracy of the baseline model from sklearn.metrics import accuracy_score, f1_score # finding our predicted values y_pred = lr_baseline_model.predict(X_test) # Calculating the accuracy and F1 Score by comparing the actual and predicted values ac = accuracy_score(y_test, y_pred) f_score = f1_score(y_test ,y_pred) print("Baseline Model Accuracy:", ac) print("Baseline Model F1 Score:", f_score) from sklearn.feature_selection import RFE #importing RFE class from sklearn library rfe = RFE(estimator= clf_lr , step = 1) # estimator clf_lr is the baseline model (basic model) that we have created under "Base line Model" selection # step = 1: removes one feature at a time and then builds a model on the remaining features # It uses the model accuracy to identify which features (and combination of features) contribute the most to predicting the target variable. # we can even provide no. of features as an argument # Fit the function for ranking the features fit = rfe.fit(X_train, y_train) print("Num Features: %d" % fit.n_features_) print("Selected Features: %s" % fit.support_) print("Feature Ranking: %s" % fit.ranking_) X.info()#0,1,4,8,9,13,14,15,18,19 selected_rfe_features = pd.DataFrame({'Feature':list(X.iloc[:, 0:]), 'Ranking':rfe.ranking_}) selected_rfe_features.sort_values(by='Ranking') df2 = df_clean[['hotel', 'deposit_type','assigned_room_type','reserved_room_type','total_of_special_requests','meal','country','stays_in_week_nights','arrival_date_month','reservation_status']] df2.info() df2.columns df_clean2=df_clean2[['hotel', 'deposit_type', 'assigned_room_type', 'reserved_room_type', 'total_of_special_requests', 'meal', 'country', 'stays_in_week_nights', 'arrival_date_month', 'reservation_status']] df_clean2.info() df_clean2_copy = df_clean2.copy() #creating dummies for the categorical columns columns = df_clean2.columns df_clean2 = pd.get_dummies(df_clean2, columns=columns, drop_first=True) df_clean2.head() ##Defining our dependent and Independent Variables y = df_clean[['is_canceled']] X = df_clean2 # Implementing the Solution import numpy as np from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor #Scaling the data from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X = scaler.fit_transform(X) # Using the 80-20 to train and test the model. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) X_train.shape, y_train.shape from sklearn.svm import SVC from sklearn import metrics ###Output _____no_output_____ ###Markdown Linear SVM ###Code X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) # Instance of svm model linear = SVC(kernel = 'linear') # Train the model using the training set print(linear.fit(X_train,y_train)) # Predict the response for the test set linear.fit(X_train,y_train) y_pred_linear = linear.predict(X_test) #Checking performance our model with ROC Score. from sklearn.metrics import roc_auc_score roc_a = roc_auc_score(y_test, y_pred_linear) f1_a = f1_score(y_test, y_pred_linear) print('The roc score for the linear model is:',roc_a) print('The f1 score for the linear model is:', f1_a) ###Output The roc score for the linear model is: 1.0 The f1 score for the linear model is: 1.0 ###Markdown Poly SVM ###Code poly_classifier = SVC(kernel = 'poly') # Train the model using the training set print(poly_classifier.fit(X_train,y_train)) # Predict the response for the test set # poly_classifier.fit(X_train,y_train) y_pred_poly = poly_classifier.predict(X_test) # Calculating the roc_auc scores and F1 Score by comparing the actual and predicted values roc_b = roc_auc_score(y_test, y_pred_poly) f1_b = f1_score(y_test, y_pred_poly) print('The roc score for the linear model is:',roc_b) print('The f1 score for the linear model is:', f1_b) ###Output SVC(C=1.0, break_ties=False, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma='scale', kernel='poly', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) The roc score for the linear model is: 0.6918261467329464 The f1 score for the linear model is: 0.5558194774346794 ###Markdown RBF model ###Code # Building and training model rbf = SVC(kernel='rbf') # Train the model using the training set print(rbf.fit(X_train, y_train)) rbf.fit(X_train, y_train) # y prediction y_pred_rbf = rbf.predict(X_test) #Calculating the roc_auc scores and F1 Score by comparing the actual and predicted values roc_c = roc_auc_score(y_test, y_pred_rbf) f1_c = f1_score(y_test, y_pred_rbf) print('The roc score for the rbf model is:',roc_c) print('The f1 score for the rbf model is:', f1_c) # Reviewing the Solution # Models vs f1 score. models = ['linear model', 'polynomial model', 'rbf model'] f1_scores = [f1_a,f1_b,f1_c] roc_auc_score = [roc_a,roc_b,roc_c] model_results = pd.DataFrame({'SVM models': models, 'f1_scores': f1_scores, 'roc_auc_score':roc_auc_score}) print(model_results) print("\n") print("**************") print("\n") print("The mean f1 scores is:", model_results.mean()) #model_results.sort_values(by = 'f1_scores',axis=0,ascending=False) ###Output SVM models f1_scores roc_auc_score 0 linear model 1.000000 1.000000 1 polynomial model 0.555819 0.691826 2 rbf model 0.975610 0.979442 ************** The mean f1 scores is: f1_scores 0.843810 roc_auc_score 0.890423 dtype: float64 ###Markdown Notice that the liear model tends to overfit and thus the 100% accuracy may not be as accurate as possible. Comparing between the polynomial and rbf models, the rbf model outperforms the polynomial model. I will take the this model and tune the parameters to see how it affects our model's performance. > We did not need to plot a graph but we were able to get some pretty accurate results. One major resaon behind this is that we were dealing with multiple features so it would have been pretty hard to visualize. Hyperparameter tuning The RBF model is very sensitive to Gamma. >If gamma is too large, may lead to overfitting >If gamma is too small, the model is too constrained ie underfitted. >We'll use the random search cv to get the best parameters. ###Code #Grid Search Technique. Create a scorer function by using make_scorer from sklearn.metrics import f1_score, make_scorer from sklearn.model_selection import GridSearchCV f1_scorer=metrics.make_scorer(f1_score) #create a grid of parameters param_grid={ 'kernel': ['rbf'], 'C': [0.1, 1.0, 8, 16, 32], 'gamma': [0.1,1,10,100,1000,10000] } classifier=SVC() model=GridSearchCV(estimator=classifier, param_grid=param_grid, scoring=f1_scorer, verbose=10, n_jobs=4, cv=5) model.fit(X_train, y_train) print("Best score: %0.3f" % model.best_score_) print("Best Parameters set:") best_parameters=model.best_estimator_.get_params() for param_name in sorted(param_grid.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name])) ###Output Fitting 5 folds for each of 30 candidates, totalling 150 fits ###Markdown Implementing the best parameters ###Code # Building and training model rbf = SVC(kernel='rbf', C=8, gamma=0.1) # Train the model using the training set print(rbf.fit(X_train, y_train)) rbf.fit(X_train, y_train) # y prediction y_pred_rbf = rbf.predict(X_test) f1_rbf_a= f1_score(y_test, y_pred_rbf) print('\n The f1 score for the tuned model is:', f1_rbf_a) ###Output SVC(C=8, break_ties=False, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.1, kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) The f1 score for the tuned model is: 0.9153936545240894
_infty/2018/01/jp/11.ipynb
###Markdown 11. Limits, Convergence, and Estimation [Inference Theory 1](https://lamastex.github.io/scalable-data-science/infty/2018/01/)&copy;2018 Raazesh Sainudiin. [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) Inference and Estimation: The Big Picture- Limits - Limits of Sequences of Real Numbers - Limits of Functions - Limit of a Sequence of Random Variables- Convergence in Distribution- Convergence in Probability- Some Basic Limit Laws in Statistics- Weak Law of Large Numbers- Central Limit Theorem Inference and Estimation: The Big PictureThe Markov Chains we discussed earlier fit into our Big Picture, which is about inference and estimation and especially inference and estimation problems where computational techniques are helpful. &nbsp;Point estimationSet estimationParametric&nbsp;MLE of finitely many parametersdoneConfidence intervals, via the central limit theoremNon-parametric (infinitely many parameters)coming up ... &nbsp;coming up ...One/Many-dimensional Integrals (finite-dimensional)coming up ... &nbsp;coming up ...But before we move on we have to discuss what makes it all work: the idea of limits - where do you get to if you just keep going? LimitsLast week we described a Markov Chain, informally, as a system which "jumps" among several states, with the next state depending (probabilistically) only on the current state. Since the system changes randomly, it is generally impossible to predict the exact state of the system in the future. However, the statistical and probailistic properties of the system's future can be predicted. In many applications it is these statistical properties that are important. We saw how we could find a steady state vector:$$\mathbf{s} = \lim_{n \to \infty} \mathbf{p}^{(n)}$$(And we noted that $\mathbf{p}^{(n)}$ only converges to a strictly positive vector if $\mathbf{P}$ is a regular transition matrix.)The week before, we talked about the likelihood function and maximum likelihood estimators for making point estimates of model parameters. For example for the $Bernoulli(\theta^*)$ RV (a $Bernoulli$ RV with true but possibly unknown parameter $\theta^*$, we found that the likelihood function was $L_n(\theta) = \theta^{t_n}(1-\theta)^{(n-t_n)}$ where $t_n = \displaystyle\sum_{i=1}^n x_i$. We also found the maxmimum likelihood estimator (MLE) for the $Bernoulli$ model, $\widehat{\theta}_n = \frac{1}{n}\displaystyle\sum_{i=1}^n x_i$. We demonstrated these ideas using samples simulated from a $Bernoulli$ process with a secret $\theta^*$. We had an interactive plot of the likelihood function where we could increase $n$, the number of simulated samples or the amount of data we had to base our estimate on, and see the effect on the shape of the likelihood function. The animation belows shows the changing likelihood function for the Bernoulli process with unknown $\theta^*$ as $n$ (the amount of data) increases. Likelihood function for Bernoulli process, as $n$ goes from 1 to 1000 in a continuous loop. For large $n$, you can probably make your own guess about the true value of $\theta^*$ even without knowing $t_n$. As the animation progresses, we can see the likelihood function 'homing in' on $\theta = 0.3$. We can see this in another way, by just looking at the sample mean as $n$ increases. An easy way to do this is with running means: generate a very large sample and then calculate the mean first over just the first observation in the sample, then the first two, first three, etc etc (running means were discussed in an earlier worksheet if you want to go back and review them in detail in your own time). Here we just define a function so that we can easily generate sequences of running means for our $Bernoulli$ process with the unknown $\theta^*$. Preparation: Let's just evaluate the next cel and focus on concepts.You can see what they are as you need to. ###Code def likelihoodBernoulli(theta, n, tStatistic): '''Bernoulli likelihood function. theta in [0,1] is the theta to evaluate the likelihood at. n is the number of observations. tStatistic is the sum of the n Bernoulli observations. return a value for the likelihood of theta given the n observations and tStatistic.''' retValue = 0 # default return value if (theta >= 0 and theta <= 1): # check on theta mpfrTheta = RR(theta) # make sure we use a Sage mpfr retValue = (mpfrTheta^tStatistic)*(1-mpfrTheta)^(n-tStatistic) return retValue def bernoulliFInverse(u, theta): '''A function to evaluate the inverse CDF of a bernoulli. Param u is the value to evaluate the inverse CDF at. Param theta is the distribution parameters. Returns inverse CDF under theta evaluated at u''' return floor(u + theta) def bernoulliSample(n, theta, simSeed=None): '''A function to simulate samples from a bernoulli distribution. Param n is the number of samples to simulate. Param theta is the bernoulli distribution parameter. Param simSeed is a seed for the random number generator, defaulting to 30. Returns a simulated Bernoulli sample as a list.''' set_random_seed(simSeed) us = [random() for i in range(n)] set_random_seed(None) return [bernoulliFInverse(u, theta) for u in us] # use bernoulliFInverse in a list comprehension def bernoulliSampleSecretTheta(n, theta=0.30, simSeed=30): '''A function to simulate samples from a bernoulli distribution. Param n is the number of samples to simulate. Param theta is the bernoulli distribution parameter. Param simSeed is a seed for the random number generator, defaulting to 30. Returns a simulated Bernoulli sample as a list.''' set_random_seed(simSeed) us = [random() for i in range(n)] set_random_seed(None) return [bernoulliFInverse(u, theta) for u in us] # use bernoulliFInverse in a list comprehension def bernoulliRunningMeans(n, myTheta, mySeed = None): '''Function to give a list of n running means from bernoulli with specified theta. Param n is the number of running means to generate. Param myTheta is the theta for the Bernoulli distribution Param mySeed is a value for the seed of the random number generator, defaulting to None.''' sample = bernoulliSample(n, theta=myTheta, simSeed = mySeed) from pylab import cumsum # we can import in the middle of code csSample = list(cumsum(sample)) samplesizes = range(1, n+1,1) return [RR(csSample[i])/samplesizes[i] for i in range(n)] #return a plot object for BernoulliLikelihood using the secret theta bernoulli generator def plotBernoulliLikelihoodSecretTheta(n): '''Return a plot object for BernoulliLikelihood using the secret theta bernoulli generator. Param n is the number of simulated samples to generate and do likelihood plot for.''' thisBSample = bernoulliSampleSecretTheta(n) # make sample tn = sum(thisBSample) # summary statistic from pylab import arange ths = arange(0,1,0.01) # get some values to plot against liks = [likelihoodBernoulli(t,n,tn) for t in ths] # use the likelihood function to generate likelihoods redshade = 1*n/1000 # fancy colours blueshade = 1 - redshade return line(zip(ths, liks), rgbcolor = (redshade, 0, blueshade)) def cauchyFInverse(u): '''A function to evaluate the inverse CDF of a standard Cauchy distribution. Param u is the value to evaluate the inverse CDF at.''' return RR(tan(pi*(u-0.5))) def cauchySample(n): '''A function to simulate samples from a standard Cauchy distribution. Param n is the number of samples to simulate.''' us = [random() for i in range(n)] return [cauchyFInverse(u) for u in us] def cauchyRunningMeans(n): '''Function to give a list of n running means from standardCauchy. Param n is the number of running means to generate.''' sample = cauchySample(n) from pylab import cumsum csSample = list(cumsum(sample)) samplesizes = range(1, n+1,1) return [RR(csSample[i])/samplesizes[i] for i in range(n)] def twoRunningMeansPlot(nToPlot, iters): '''Function to return a graphics array containing plots of running means for Bernoulli and Standard Cauchy. Param nToPlot is the number of running means to simulate for each iteration. Param iters is the number of iterations or sequences of running means or lines on each plot to draw. Returns a graphics array object containing both plots with titles.''' xvalues = range(1, nToPlot+1,1) for i in range(iters): shade = 0.5*(iters - 1 - i)/iters # to get different colours for the lines bRunningMeans = bernoulliSecretThetaRunningMeans(nToPlot) cRunningMeans = cauchyRunningMeans(nToPlot) bPts = zip(xvalues, bRunningMeans) cPts = zip(xvalues, cRunningMeans) if (i < 1): p1 = line(bPts, rgbcolor = (shade, 0, 1)) p2 = line(cPts, rgbcolor = (1-shade, 0, shade)) cauchyTitleMax = max(cRunningMeans) # for placement of cauchy title else: p1 += line(bPts, rgbcolor = (shade, 0, 1)) p2 += line(cPts, rgbcolor = (1-shade, 0, shade)) if max(cRunningMeans) > cauchyTitleMax: cauchyTitleMax = max(cRunningMeans) titleText1 = "Bernoulli running means" # make title text t1 = text(titleText1, (nToGenerate/2,1), rgbcolor='blue',fontsize=10) titleText2 = "Standard Cauchy running means" # make title text t2 = text(titleText2, (nToGenerate/2,ceil(cauchyTitleMax)+1), rgbcolor='red',fontsize=10) return graphics_array((p1+t1,p2+t2)) def pmfPointMassPlot(theta): '''Returns a pmf plot for a point mass function with parameter theta.''' ptsize = 10 linethick = 2 fudgefactor = 0.07 # to fudge the bottom line drawing pmf = points((theta,1), rgbcolor="blue", pointsize=ptsize) pmf += line([(theta,0),(theta,1)], rgbcolor="blue", linestyle=':') pmf += points((theta,0), rgbcolor = "white", faceted = true, pointsize=ptsize) pmf += line([(min(theta-2,-2),0),(theta-0.05,0)], rgbcolor="blue",thickness=linethick) pmf += line([(theta+.05,0),(theta+2,0)], rgbcolor="blue",thickness=linethick) pmf+= text("Point mass f", (theta,1.1), rgbcolor='blue',fontsize=10) pmf.axes_color('grey') return pmf def cdfPointMassPlot(theta): '''Returns a cdf plot for a point mass function with parameter theta.''' ptsize = 10 linethick = 2 fudgefactor = 0.07 # to fudge the bottom line drawing cdf = line([(min(theta-2,-2),0),(theta-0.05,0)], rgbcolor="blue",thickness=linethick) # padding cdf += points((theta,1), rgbcolor="blue", pointsize=ptsize) cdf += line([(theta,0),(theta,1)], rgbcolor="blue", linestyle=':') cdf += line([(theta,1),(theta+2,1)], rgbcolor="blue", thickness=linethick) # padding cdf += points((theta,0), rgbcolor = "white", faceted = true, pointsize=ptsize) cdf+= text("Point mass F", (theta,1.1), rgbcolor='blue',fontsize=10) cdf.axes_color('grey') return cdf def uniformFInverse(u, theta1, theta2): '''A function to evaluate the inverse CDF of a uniform(theta1, theta2) distribution. u, u should be 0 <= u <= 1, is the value to evaluate the inverse CDF at. theta1, theta2, theta2 > theta1, are the uniform distribution parameters.''' return theta1 + (theta2 - theta1)*u def uniformSample(n, theta1, theta2): '''A function to simulate samples from a uniform distribution. n > 0 is the number of samples to simulate. theta1, theta2 (theta2 > theta1) are the uniform distribution parameters.''' us = [random() for i in range(n)] return [uniformFInverse(u, theta1, theta2) for u in us] def exponentialFInverse(u, lam): '''A function to evaluate the inverse CDF of a exponential distribution. u is the value to evaluate the inverse CDF at. lam is the exponential distribution parameter.''' # log without a base is the natural logarithm return (-1.0/lam)*log(1 - u) def exponentialSample(n, lam): '''A function to simulate samples from an exponential distribution. n is the number of samples to simulate. lam is the exponential distribution parameter.''' us = [random() for i in range(n)] return [exponentialFInverse(u, lam) for u in us] ###Output _____no_output_____ ###Markdown To get back to our running means of Bernoullin RVs: ###Code def bernoulliSecretThetaRunningMeans(n, mySeed = None): '''Function to give a list of n running means from Bernoulli with unknown theta. Param n is the number of running means to generate. Param mySeed is a value for the seed of the random number generator, defaulting to None Note: the unknown theta parameter for the Bernoulli process is defined in bernoulliSampleSecretTheta Return a list of n running means.''' sample = bernoulliSampleSecretTheta(n, simSeed = mySeed) from pylab import cumsum # we can import in the middle of code csSample = list(cumsum(sample)) samplesizes = range(1, n+1,1) return [RR(csSample[i])/samplesizes[i] for i in range(n)] ###Output _____no_output_____ ###Markdown Now we can use this function to look at say 5 different sequences of running means (they will be different, because for each iteration, we will simulate a different sample of $Bernoulli$ observations). ###Code nToGenerate = 1500 iterations = 5 xvalues = range(1, nToGenerate+1,1) for i in range(iterations): redshade = 0.5*(iterations - 1 - i)/iterations # to get different colours for the lines bRunningMeans = bernoulliSecretThetaRunningMeans(nToGenerate) pts = zip(xvalues,bRunningMeans) if (i == 0): p = line(pts, rgbcolor = (redshade,0,1)) else: p += line(pts, rgbcolor = (redshade,0,1)) show(p, figsize=[5,3], axes_labels=['n','sample mean']) ###Output _____no_output_____ ###Markdown What we notice is how the different lines **converge** on a sample mean of close to 0.3. Is life always this easy? Unfortunately no. In the plot below we show the well-behaved running means for the $Bernoulli$ and beside them the running means for simulated standard $Cauchy$ random variables. They are all over the place, and each time you re-evaluate the cell you'll get different all-over-the-place behaviour. ###Code nToGenerate = 15000 iterations = 5 g = twoRunningMeansPlot(nToGenerate, iterations) # uses above function to make plot show(g,figsize=[10,5]) ###Output _____no_output_____ ###Markdown We talked about the Cauchy in more detail in an earlier notebook. If you cannot recall the detail and are interested, go back to that in your own time. The message here is that although with the Bernoulli process, the sample means converge as the number of observations increases, with the Cauchy they do not. We talked about $\mathbf{p}^{(n)}$, for $\mathbf{p}$ our Markov Chain transition matrix, converging. We talked about sample means converging (or not). What do we actually mean by *converge*? These ideas of convergence and limits are fundamental to data science: we need to be able to justify that the way we are attacking a problem will give us the *right* answer. (At its very simplest, how do we justify that, by generating lots of simulations, we can get to some good approximation for a probability or an integral or a sum?) The advantages of an MLE as a point estimate in parametric estimation all come back to limits and convergence (remember how the likelihood function 'homed in' as the amount of data increased). And, as we will see when we do non-parametric estimation, limits and convergence are also fundamental there. Limits of a Sequence of Real NumbersA sequence of real numbers $x_1, x_2, x_3, \ldots $ (which we can also write as $\{ x_i\}_{i=1}^\infty$) is said to converge to a limit $a \in \mathbb{R}$,$$\underset{i \rightarrow \infty}{\lim} x_i = a$$if for every natural number $m \in \mathbb{N}$, a natural number $N_m \in \mathbb{N}$ exists such that for every $j \geq N_m$, $\left|x_j - a\right| \leq \frac{1}{m}$What is this saying? $\left|x_j - a\right|$ is measuring the closeness of the $j$th value in the sequence to $a$. If we pick bigger and bigger $m$, $\frac{1}{m}$ will get smaller and smaller. The definition of the limit is saying that if $a$ is the limit of the sequence then we can get the sequence to become as close as we want ('arbitrarily close') to $a$, and to stay that close, by going far enough into the sequence ('for every $j \geq N_m$, $\left|x_j - a\right| \leq \frac{1}{m}$')($\mathbb{N}$, the natural numbers, are just the 'counting numbers' $\{1, 2, 3, \ldots\}$.) Take a trivial example, the sequence $\{x_i\}_{i=1}^\infty = 17, 17, 17, \ldots$Clearly, $\underset{i \rightarrow \infty}{\lim} x_i = 17$, but let's do this formally:For every $m \in \mathbb{N}$, take $N_m =1$, then$\forall$ $j \geq N_m=1, \left|x_j -17\right| = \left|17 - 17\right| = 0 \leq \frac{1}{m}$, as required.($\forall$ is mathspeak for 'for all' or 'for every')What about $\{x_i\}_{i=1}^\infty = \displaystyle\frac{1}{1}, \frac{1}{2}, \frac{1}{3}, \ldots$, i.e., $x_i = \frac{1}{i}$?$\underset{i \rightarrow \infty}{\lim} x_i = \underset{i \rightarrow \infty}{\lim}\frac{1}{i} = 0$For every $m \in \mathbb{N}$, take $N_m = m$, then $\forall$ $j \geq m$, $\left|x_j - 0\right| \leq \left |\frac{1}{m} - 0\right| = \frac{1}{m}$ YouTryThink about $\{x_i\}_{i=1}^\infty = \frac{1}{1^p}, \frac{1}{2^p}, \frac{1}{3^p}, \ldots$ with $p > 0$. The limit$\underset{i \rightarrow \infty}{\lim} \displaystyle\frac{1}{i^p} = 0$, provided $p > 0$.You can draw the plot of this very easily using the Sage symbolic expressions we have already met (`f.subs(...)` allows us to substitute a particular value for one of the symbolic variables in the symbolic function `f`, in this case a value to use for $p$). ###Code var('i, p') f = 1/(i^p) # make and show plot, note we can use f in the label plot(f.subs(p=1), (x, 0.1, 3), axes_labels=('i',f)).show(figsize=[6,3]) ###Output _____no_output_____ ###Markdown What about $\{x_i\}_{i=1}^\infty = 1^{\frac{1}{1}}, 2^{\frac{1}{2}}, 3^{\frac{1}{3}}, \ldots$. The limit$\underset{i \rightarrow \infty}{\lim} i^{\frac{1}{i}} = 1$.This one is not as easy to see intuitively, but again we can plot it with SageMath. ###Code var('i') f = i^(1/i) n=500 p=plot(f.subs(p=1), (x, 0, n), axes_labels=('i',f)) # main plot p+=line([(0,1),(n,1)],linestyle=':') # add a dotted line at height 1 p.show(figsize=[6,3]) # show the plot ###Output _____no_output_____ ###Markdown Finally, $\{x_i\}_{i=1}^\infty = p^{\frac{1}{1}}, p^{\frac{1}{2}}, p^{\frac{1}{3}}, \ldots$, with $p > 0$. The limit$\underset{i \rightarrow \infty}{\lim} p^{\frac{1}{i}} = 1$ provided $p > 0$.You can cut and paste (with suitable adaptations) to try to plot this one as well ... ###Code x ###Output _____no_output_____ ###Markdown (end of You Try)---*back to the real stuff ...* Limits of FunctionsWe say that a function $f(x): \mathbb{R} \rightarrow \mathbb{R}$ has a limit $L \in \mathbb{R}$ as $x$ approaches $a$:$$\underset{x \rightarrow a}{\lim} f(x) = L$$provided $f(x)$ is arbitrarily close to $L$ for all ($\forall$) values of $x$ that are sufficiently close to but not equal to $a$.For exampleConsider the function $f(x) = (1+x)^{\frac{1}{x}}$$\underset{x \rightarrow 0}{\lim} f(x) = \underset{x \rightarrow 0}{\lim} (1+x)^{\frac{1}{x}} = e \approx 2.71828\cdots$even though $f(0) = (1+0)^{\frac{1}{0}}$ is undefined! ###Code # x is defined as a symbolic variable by default by Sage so we do not need var('x') f = (1+x)^(1/x) f.subs(x=0) # this will give you an error message ###Output _____no_output_____ ###Markdown BUT: If you are intersted in the "Art of dividing by zero" talk to Professor Warwick Tucker in Maths Department! You can get some idea of what is going on with two plots on different scales ###Code f = (1+x)^(1/x) n1=5 p1=plot(f.subs(p=1), (x, 0.001, n1), axes_labels=('x',f)) # main plot t1 = text("Large scale plot", (n1/2,e), rgbcolor='blue',fontsize=10) n2=0.1 p2=plot(f.subs(p=1), (x, 0.0000001, n2), axes_labels=('x',f)) # main plot p2+=line([(0,e),(n2,e)],linestyle=':') # add a dotted line at height e t2 = text("Small scale plot", (n2/2,e+.01), rgbcolor='blue',fontsize=10) show(graphics_array((p1+t1,p2+t2)),figsize=[6,3]) # show the plot ###Output _____no_output_____ ###Markdown all this has been laying the groundwork for the topic of real interest to us ... Limit of a Sequence of Random VariablesWe want to be able to say things like $\underset{i \rightarrow \infty}{\lim} X_i = X$ in some sensible way. $X_i$ are some random variables, $X$ is some 'limiting random variable', but what do we mean by 'limiting random variable'?To help us, lets introduce a very very simple random variable, one that puts all its mass in one place. ###Code theta = 2.0 show(graphics_array((pmfPointMassPlot(theta),cdfPointMassPlot(theta))),\ figsize=[8,2]) # show the plots ###Output _____no_output_____ ###Markdown This is known as the $Point\,Mass(\theta)$ random variable, $\theta \in \mathbb(R)$: the density $f(x)$ is 1 if $x=\theta$ and 0 everywhere else$$f(x;\theta) =\begin{cases}0 & \text{ if } x \neq \theta \\1 & \text{ if } x = \theta\end{cases}$$$$F(x;\theta) =\begin{cases}0 & \text{ if } x < \theta \\1 & \text{ if } x \geq \theta\end{cases}$$So, if we had some sequence $\{\theta_i\}_{i=1}^\infty$ and $\underset{i \rightarrow \infty}{\lim} \theta_i = \theta$and we had a sequence of random variables $X_i \sim Point\,Mass(\theta_i)$, $i = 1, 2, 3, \ldots$then we could talk about a limiting random variable as $X \sim Point\,Mass(\theta)$:i.e., we could talk about $\underset{i \rightarrow \infty}{\lim} X_i = X$ ###Code # mock up a picture of a sequence of point mass rvs converging on theta = 0 ptsize = 20 i = 1 theta_i = 1/i p = points((theta_i,1), rgbcolor="blue", pointsize=ptsize) p += line([(theta_i,0),(theta_i,1)], rgbcolor="blue", linestyle=':') while theta_i > 0.01: i+=1 theta_i = 1/i p += points((theta_i,1), rgbcolor="blue", pointsize=ptsize) p += line([(theta_i,0),(theta_i,1)], rgbcolor="blue", linestyle=':') p += points((0,1), rgbcolor="red", pointsize=ptsize) p += line([(0,0),(0,1)], rgbcolor="red", linestyle=':') p.show(xmin=-1, xmax = 2, ymin=0, ymax = 1.1, axes=false, gridlines=[None,[0]], \ figsize=[7,2]) ###Output _____no_output_____ ###Markdown Now, we want to generalise this notion of a limit to other random variables (that are not necessarily $Point\,Mass(\theta_i)$ RVs)What about one many of you will be familiar with - the 'bell-shaped curve' The $Gaussian(\mu, \sigma^2)$ or $Normal(\mu, \sigma^2)$ RV?The probability density function (PDF) $f(x)$ is given by$$f(x ;\mu, \sigma) = \displaystyle\frac{1}{\sigma\sqrt{2\pi}}\exp\left(\frac{-1}{2\sigma^2}(x-\mu)^2\right)$$The two parameters, $\mu$ and $\sigma$, are sometimes referred to as the location and scale parameters.To see why this is, use the interactive plot below to have a look at what happens to the shape of the density function $f(x)$ when you change $\mu$ or increase or decrease $\sigma$: ###Code @interact def _(my_mu=input_box(0, label='mu') ,my_sigma=input_box(1,label='sigma')): '''Interactive function to plot the normal pdf and ecdf.''' if my_sigma > 0: html('<h4>Normal('+str(my_mu)+','+str(my_sigma)+'<sup>2</sup>)</h4>') var('mu sigma') f = (1/(sigma*sqrt(2.0*pi)))*exp(-1.0/(2*sigma^2)*(x - mu)^2) p1=plot(f.subs(mu=my_mu,sigma=my_sigma), (x, my_mu - 3*my_sigma - 2, my_mu + 3*my_sigma + 2), axes_labels=('x','f(x)')) show(p1,figsize=[8,3]) else: print "sigma must be greater than 0" ###Output _____no_output_____ ###Markdown Consider the sequence of random variables $X_1, X_2, X_3, \ldots$, where- $X_1 \sim Normal(0, 1)$- $X_2 \sim Normal(0, \frac{1}{2})$- $X_3 \sim Normal(0, \frac{1}{3})$- $X_4 \sim Normal(0, \frac{1}{4})$- $\vdots$- $X_i \sim Normal(0, \frac{1}{i})$- $\vdots$We can use the animation below to see how the PDF $f_{i}(x)$ looks as we move through the sequence of $X_i$ (the animation only goes to $i = 25$, $\sigma = 0.04$ but you get the picture ...) Normal curve animation, looping through $\sigma = \frac{1}{i}$ for $i = 1, \dots, 25$ We can see that the probability mass of $X_i \sim Normal(0, \frac{1}{i})$ increasingly concentrates about 0 as $i \rightarrow \infty$ and $\frac{1}{i} \rightarrow 0$Does this mean that $\underset{i \rightarrow \infty}{\lim} X_i = Point\,Mass(0)$?No, because for any $i$, however large, $P(X_i = 0) = 0$ because $X_i$ is a continuous RV (for any continous RV $X$, for any $x \in \mathbb{R}$, $P(X=x) = 0$).So, we need to refine our notions of convergence when we are dealing with random variables Convergence in DistributionLet $X_1, X_2, \ldots$ be a sequence of random variables and let $X$ be another random variable. Let $F_i$ denote the distribution function (DF) of $X_i$ and let $F$ denote the distribution function of $X$.Now, if for any real number $t$ at which $F$ is continuous,$$\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$$(in the sense of the convergence or limits of functions we talked about earlier)Then we can say that the sequence or RVs $X_i$, $i = 1, 2, \ldots$ **converges to $X$ in distribution** and write $X_i \overset{d}{\rightarrow} X$.An equivalent way of defining convergence in distribution is to go right back to the meaning of the probabilty space 'under the hood' of a random variable, a random variable $X$ as a mapping from the sample space $\Omega$ to the real line ($X: \Omega \rightarrow \mathbb{R}$), and the sample points or outcomes in the sample space, the $\omega \in \Omega$. For $\omega \in \Omega$, $X(\omega)$ is the mapping of $\omega$ to the real line $\mathbb{R}$. We could look at the set of $\omega$ such that $X(\omega) \leq t$, i.e. the set of $\omega$ that map to some value on the real line less than or equal to $t$, $\{\omega: X(\omega) \leq t \}$. Saying that for any $t \in \mathbb{R}$, $\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$ is the equivalent of saying that for any $t \in \mathbb{R}$, $$\underset{i \rightarrow \infty}{\lim} P\left(\{\omega:X_i(\omega) \leq t \}\right) = P\left(\{\omega: X(\omega) \leq t\right)$$Armed with this, we can go back to our sequence of $Normal$ random variables $X_1, X_2, X_3, \ldots$, where- $X_1 \sim Normal(0, 1)$- $X_2 \sim Normal(0, \frac{1}{2})$- $X_3 \sim Normal(0, \frac{1}{3})$- $X_4 \sim Normal(0, \frac{1}{4})$- $\vdots$- $X_i \sim Normal(0, \frac{1}{i})$- $\vdots$and let $X \sim Point\,Mass(0)$,and say that the $X_i$ **converge in distribution** to the $x \sim Point\,Mass$ RV $X$,$$X_i \overset{d}{\rightarrow} X$$What we are saying with convergence in distribution, informally, is that as $i$ increases, we increasingly expect to see the next outcome in a sequence of random experiments becoming better and better modeled by the limiting random variable. In this case, as $i$ increases, the $Point\,Mass(0)$ is becoming a better and better model for the next outcome of a random experiment with outcomes $\sim Normal(0,\frac{1}{i})$. ###Code # mock up a picture of a sequence of converging normal distributions my_mu = 0 upper = my_mu + 5; lower = -upper; # limits for plot var('mu sigma') stop_i = 12 html('<h4>N(0,1) to N(0, 1/'+str(stop_i)+')</h4>') f = (1/(sigma*sqrt(2.0*pi)))*exp(-1.0/(2*sigma^2)*(x - mu)^2) p=plot(f.subs(mu=my_mu,sigma=1.0), (x, lower, upper), rgbcolor = (0,0,1)) for i in range(2, stop_i, 1): # just do a few of them shade = 1-11/i # make them different colours p+=plot(f.subs(mu=my_mu,sigma=1/i), (x, lower, upper), rgbcolor = (1-shade, 0, shade)) textOffset = -0.2 # offset for placement of text - may need adjusting p+=text("0",(0,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(upper.n(digits=2)),(upper,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(lower.n(digits=2)),(lower,textOffset),fontsize = 10, rgbcolor='grey') p.show(axes=false, gridlines=[None,[0]], figsize=[7,3]) ###Output _____no_output_____ ###Markdown There is an interesting point to note about this convergence: We have said that the $X_i \sim Normal(0,\frac{1}{i})$ with distribution functions $F_i$ converge in distribution to $X \sim Point\,Mass(0)$ with distribution function $F$, which means that we must be able to show that for any real number $t$ at which $F$ is continuous,$$\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$$Note that for any of the $X_i \sim Normal(0, \frac{1}{i})$, $F_i(0) = \frac{1}{2}$, and also note that for $X \sim Point,Mass(0)$, $F(0) = 1$, so clearly $F_i(0) \neq F(0)$. What has gone wrong? Nothing: we said that we had to be able to show that $\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$ for any $t \in \mathbb{R}$ at which $F$ is continuous, but the $Point\,Mass(0)$ distribution function $F$ is not continous at 0! ###Code theta = 0.0 # show the plots show(graphics_array((pmfPointMassPlot(theta),cdfPointMassPlot(theta))),figsize=[8,2]) ###Output _____no_output_____ ###Markdown Convergence in ProbabilityLet $X_1, X_2, \ldots$ be a sequence of random variables and let $X$ be another random variable. Let $F_i$ denote the distribution function (DF) of$X_i$ and let $F$ denote the distribution function of $X$.Now, if for any real number $\varepsilon > 0$,$$\underset{i \rightarrow \infty}{\lim} P\left(|X_i - X| > \varepsilon\right) = 0$$Then we can say that the sequence $X_i$, $i = 1, 2, \ldots$ **converges to $X$ in probability** and write $X_i \overset{P}{\rightarrow} X$.Or, going back again to the probability space 'under the hood' of a random variable, we could look the way the $X_i$ maps each outcome $\omega \in \Omega$, $X_i(\omega)$, which is some point on the real line, and compare this to mapping $X(\omega)$. Saying that for any $\varepsilon \in \mathbb{R}$, $\underset{i \rightarrow \infty}{\lim} P\left(|X_i - X| > \varepsilon\right) = 0$ is the equivalent of saying that for any $\varepsilon \in \mathbb{R}$, $$\underset{i \rightarrow \infty}{\lim} P\left(\{\omega:|X_i(\omega) - X(\omega)| > \varepsilon \}\right) = 0$$Informally, we are saying $X$ is a limit in probabilty if, by going far enough into the sequence $X_i$, we can ensure that the mappings $X_i(\omega)$ and $X(\omega)$ will be arbitrarily close to each other on the real line for all $\omega \in \Omega$.**Note** that convergence in distribution is implied by convergence in probability: convergence in distribution is the weakest form of convergence; any sequence of RV's that converges in probability to some RV $X$ also converges in distribution to $X$ (but not necessarily vice versa). ###Code # mock up a picture of a sequence of converging normal distributions my_mu = 0 var('mu sigma') upper = 0.2; lower = -upper i = 20 # start part way into the sequence lim = 100 # how far to go stop_i = 12 html('<h4>N(0,1/'+str(i)+') to N(0, 1/'+str(lim)+')</h4>') f = (1/(sigma*sqrt(2.0*pi)))*exp(-1.0/(2*sigma^2)*(x - mu)^2) p=plot(f.subs(mu=my_mu,sigma=1.0/i), (x, lower, upper), rgbcolor = (0,0,1)) for j in range(i, lim+1, 4): # just do a few of them shade = 1-(j-i)/(lim-i) # make them different colours p+=plot(f.subs(mu=my_mu,sigma=1/j), (x, lower,upper), rgbcolor = (1-shade, 0, shade)) textOffset = -1.5 # offset for placement of text - may need adjusting p+=text("0",(0,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(upper.n(digits=2)),(upper,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(lower.n(digits=2)),(lower,textOffset),fontsize = 10, rgbcolor='grey') p.show(axes=false, gridlines=[None,[0]], figsize=[7,3]) ###Output _____no_output_____ ###Markdown For our sequence of $Normal$ random variables $X_1, X_2, X_3, \ldots$, where- $X_1 \sim Normal(0, 1)$- $X_2 \sim Normal(0, \frac{1}{2})$- $X_3 \sim Normal(0, \frac{1}{3})$- $X_4 \sim Normal(0, \frac{1}{4})$- $\vdots$- $X_i \sim Normal(0, \frac{1}{i})$- $\vdots$and $X \sim Point\,Mass(0)$,It can be shown that the $X_i$ converge in probability to $X \sim Point\,Mass(0)$ RV $X$,$$X_i \overset{P}{\rightarrow} X$$(the formal proof of this involves Markov's Inequality, which is beyond the scope of this course). Some Basic Limit Laws in StatisticsIntuition behind Law of Large Numbers and Central Limit TheoremTake a look at the Khan academy videos on the Law of Large Numbers and the Central Limit Theorem. This will give you a working idea of these theorems. In the sequel, we will strive for a deeper understanding of these theorems on the basis of the two notions of convergence of sequences of random variables we just saw. Weak Law of Large NumbersRemember that a statistic is a random variable, so a sample mean is a random variable. If we are given a sequence of independent and identically distributed RVs, $X_1,X_2,\ldots \overset{IID}{\sim} X_1$, then we can also think of a sequence of random variables $\overline{X}_1, \overline{X}_2, \ldots, \overline{X}_n, \ldots$ ($n$ being the sample size). Since $X_1, X_2, \ldots$ are $IID$, they all have the same expection, say $E(X_1)$ by convention.If $E(X_1)$ exists, then the sample mean $\overline{X}_n$ converges in probability to $E(X_1)$ (i.e., to the expectatation of any one of the individual RVs):$$\text{If} \quad X_1,X_2,\ldots \overset{IID}{\sim} X_1 \ \text{and if } \ E(X_1) \ \text{exists, then } \ \overline{X}_n \overset{P}{\rightarrow} E(X_1) \ .$$Going back to our definition of convergence in probability, we see that this means that for any real number $\varepsilon > 0$, $\underset{n \rightarrow \infty}{\lim} P\left(|\overline{X}_n - E(X_1)| > \varepsilon\right) = 0$Informally, this means that means that, by taking larger and larger samples we can make the probability that the average of the observations is more than $\varepsilon$ away from the expected value get smaller and smaller.Proof of this is beyond the scope of this course, but we have already seen it in action when we looked at the $Bernoulli$ running means. Have another look, this time with only one sequence of running means. You can increase $n$, the sample size, and change $\theta$. Note that the seed for the random number generator is also under your control. This means that you can get replicable samples: in particular, in this interact, when you increase the sample size it looks as though you are just adding more to an existing sample rather than starting from scratch with a new one. ###Code @interact def _(nToGen=slider(1,1500,1,100,label='n'),my_theta=input_box(0.3,label='theta'),rSeed=input_box(1234,label='random seed')): '''Interactive function to plot running mean for a Bernoulli with specified n, theta and random number seed.''' if my_theta >= 0 and my_theta <= 1: html('<h4>Bernoulli('+str(my_theta.n(digits=2))+')</h4>') xvalues = range(1, nToGen+1,1) bRunningMeans = bernoulliRunningMeans(nToGen, myTheta=my_theta, mySeed=rSeed) pts = zip(xvalues, bRunningMeans) p = line(pts, rgbcolor = (0,0,1)) p+=line([(0,my_theta),(nToGen,my_theta)],linestyle=':',rgbcolor='grey') show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'Theta must be between 0 and 1' ###Output _____no_output_____ ###Markdown Central Limit TheoremYou have probably all heard of the Central Limit Theorem before, but now we can relate it to our definition of convergence in distribution. Let $X_1,X_2,\ldots \overset{IID}{\sim} X_1$ and suppose $E(X_1)$ and $V(X_1)$ both exist,then$$\overline{X}_n = \frac{1}{n} \sum_{i=1}^n X_i \overset{d}{\rightarrow} X \sim Normal \left(E(X_1),\frac{V(X_1)}{n} \right)$$And remember $Z \sim Normal(0,1)$?Consider $Z_n := \displaystyle\frac{\overline{X}_n-E(\overline{X}_n)}{\sqrt{V(\overline{X}_n)}} = \displaystyle\frac{\sqrt{n} \left( \overline{X}_n -E(X_1) \right)}{\sqrt{V(X_1)}}$If $\overline{X}_n = \displaystyle\frac{1}{n} \displaystyle\sum_{i=1}^n X_i \overset{d}{\rightarrow} X \sim Normal \left(E(X_1),\frac{V(X_1)}{n} \right)$, then $\overline{X}_n -E(X_1) \overset{d}{\rightarrow} X-E(X_1) \sim Normal \left( 0,\frac{V(X_1)}{n} \right)$and $\sqrt{n} \left( \overline{X}_n -E(X_1) \right) \overset{d}{\rightarrow} \sqrt{n} \left( X-E(X_1) \right) \sim Normal \left( 0,V(X_1) \right)$so $Z_n := \displaystyle \frac{\overline{X}_n-E(\overline{X}_n)}{\sqrt{V(\overline{X}_n)}} = \displaystyle\frac{\sqrt{n} \left( \overline{X}_n -E(X_1) \right)}{\sqrt{V(X_1)}} \overset{d}{\rightarrow} Z \sim Normal \left( 0,1 \right)$Thus, for sufficiently large $n$ (say $n>30$), probability statements about $\overline{X}_n$ can be approximated using the $Normal$ distribution. The beauty of the CLT, as you have probably seen from other courses, is that $\overline{X}_n \overset{d}{\rightarrow} Normal \left( E(X_1), \frac{V(X_1)}{n} \right)$ does not require the $X_i$ to be normally distributed. We can try this with our $Bernoulli$ RV generator. First, a small number of samples: ###Code theta, n, samples = 0.6, 10, 5 # concise way to set some variable values sampleMeans=[] # empty list for i in range(0, samples, 1): # loop thisMean = QQ(sum(bernoulliSample(n, theta)))/n # get a sample and find the mean sampleMeans.append(thisMean) # add mean to the list of means sampleMeans # disclose the sample means ###Output _____no_output_____ ###Markdown You can use the interactive plot to increase the number of samples and make a histogram of the sample means. According to the CLT, for lots of reasonably-sized samples we should get a nice symmetric bell-curve-ish histogram centred on $\theta$. You can adjust the number of bins in the histogram as well as the number of samples, sample size, and $\theta$. ###Code import pylab @interact def _(samples=slider(1,3000,1,100,label='number of samples'), nToGen=slider(1,1500,1,100,label='sample size n'),my_theta=input_box(0.3,label='theta'),Bins=5): '''Interactive function to plot distribution of sample means for a Bernoulli process.''' if my_theta >= 0 and my_theta <= 1 and samples > 0: sampleMeans=[] # empty list for i in range(0, samples, 1): thisMean = RR(sum(bernoulliSample(nToGen, my_theta)))/nToGen sampleMeans.append(thisMean) pylab.clf() # clear current figure n, bins, patches = pylab.hist(sampleMeans, Bins, normed=true) pylab.ylabel('normalised count') pylab.title('Normalised histogram for Bernoulli sample means') pylab.savefig('myHist') # to actually display the figure pylab.show() #show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'Theta must be between 0 and 1, and samples > 0' ###Output _____no_output_____ ###Markdown Increase the sample size and the numbe rof bins in the above interact and see if the histograms of the sample means are looking more and more normal as the CLT would have us believe. But although the $X_i$ do not have to be $\sim Normal$ for $\overline{X}_n = \overset{d}{\rightarrow} X \sim Normal\left(E(X_1),\frac{V(X_1)}{n} \right)$, remember that we said "Let $X_1,X_2,\ldots \overset{IID}{\sim} X_1$ and suppose $E(X_1)$ and $V(X_1)$ both exist", then,$$\overline{X}_n = \frac{1}{n} \sum_{i=1}^n X_i \overset{d}{\rightarrow} X \sim Normal \left(E(X_1),\frac{V(X_1)}{n} \right)$$This is where is all goes horribly wrong for the standard $Cauchy$ distribution (any $Cauchy$ distribution in fact): neither the expectation nor the variance exist for this distribution. The Central Limit Theorem cannot be applied here. In fact, if $X_1,X_2,\ldots \overset{IID}{\sim}$ standard $Cauchy$, then $\overline{X}_n = \displaystyle \frac{1}{n} \sum_{i=1}^n X_i \sim$ standard $Cauchy$. YouTryTry looking at samples from two other RVs where the expectation and variance do exist, the $Uniform$ and the $Exponential$: ###Code import pylab @interact def _(samples=input_box(100,label='number of samples'), nToGen=slider(1,1500,1,100,label='sample size n'),my_theta1=input_box(2,label='theta1'),my_theta2=input_box(4,label='theta1'),Bins=5): '''Interactive function to plot distribution of sample means for a Uniform(theta1, theta2) process.''' if (my_theta1 < my_theta2) and samples > 0: sampleMeans=[] # empty list for i in range(0, samples, 1): thisMean = RR(sum(uniformSample(nToGen, my_theta1, my_theta2)))/nToGen sampleMeans.append(thisMean) pylab.clf() # clear current figure n, bins, patches = pylab.hist(sampleMeans, Bins, normed=true) pylab.ylabel('normalised count') pylab.title('Normalised histogram for Uniform sample means') pylab.savefig('myHist') # to actually display the figure pylab.show() #show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'theta1 must be less than theta2, and samples > 0' import pylab @interact def _(samples=input_box(100,label='number of samples'), nToGen=slider(1,1500,1,100,label='sample size n'),my_lambda=input_box(2,label='lambda'),Bins=5): '''Interactive function to plot distribution of sample means for an Exponential(lambda) process.''' if my_lambda > 0 and samples > 0: sampleMeans=[] # empty list for i in range(0, samples, 1): thisMean = RR(sum(exponentialSample(nToGen, my_lambda)))/nToGen sampleMeans.append(thisMean) pylab.clf() # clear current figure n, bins, patches = pylab.hist(sampleMeans, Bins, normed=true) pylab.ylabel('normalised count') pylab.title('Normalised histogram for Exponential sample means') pylab.savefig('myHist') # to actually display the figure pylab.show() #show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'lambda must be greater than 0, and samples > 0' ###Output _____no_output_____ ###Markdown YouTry LaterPython's `random` for sampling and sequence manipulationThe Python `random` module, available in SageMath, provides a useful way of taking samples if you have already generated a 'population' to sample from, or otherwise playing around with the elements in a sequence. See http://docs.python.org/library/random.html for more details. Here we will try a few of them.The aptly-named sample function allows us to take a sample of a specified size from a sequence. We will use a list as our sequence: ###Code pop = range(1, 101, 1) # make a population sample(pop, 10) # sample 10 elements from it at random ###Output _____no_output_____ ###Markdown Each call to sample will select unique elements in the list (note that 'unique' here means that it will not select the element at any particular position in the list more than once, but if there are duplicate elements in the list, such as with a list [1,2,4,2,5,3,1,3], then you may well get any of the repeated elements in your sample more than once). sample samples with replacement, which means that repeated calls to sample may give you samples with the same elements in. ###Code popWithDuplicates = range(1, 11, 1)*4 # make a population with repeated elements print(popWithDuplicates) for i in range (5): print sample(popWithDuplicates, 10) ###Output [8, 1, 8, 4, 2, 4, 9, 2, 5, 2] [7, 1, 6, 4, 1, 5, 2, 6, 3, 5] [10, 1, 8, 4, 1, 6, 3, 3, 2, 1] [7, 9, 9, 3, 10, 7, 2, 1, 6, 5] [8, 1, 7, 1, 5, 2, 4, 6, 4, 9] ###Markdown Try experimenting with choice, which allows you to select one element at random from a sequence, and shuffle, which shuffles the sequence in place (i.e, the ordering of the sequence itself is changed rather than you being given a re-ordered copy of the list). It is probably easiest to use lists for your sequences. See how `shuffle` is creating permutations of the list. You could use `sample` and `shuffle` to emulate *permuations of k objects out of n* ...You may need to check the documentation to see how use these functions. ###Code ?sample ?shuffle ?choice ###Output _____no_output_____
Analytics/Comb Class 36 (2 cars, 2 trucks).ipynb
###Markdown Here -* Select representative for each component* save json to file for each component (class_2_2_comp_39823484994949)* draw graph with d3* What I really want is real time back and forth to pull out components on the fly rather than build them all and save mass files to harddrive. ###Code #df.describe() #%matplotlib inline df_solvable.hist(bins=20) df_unsolvable.hist(bins=20) comp = comps[0] min_int = min(comp.node[node]['board_int'] for node in comp.nodes()) min_int df_solvable.describe() df_unsolvable.describe() #node = 0 comp_repr_node = [comp[node] for node in comp.nodes() if comp.node[node]['board_int'] == min_int] comp_repr_node len([g for g in comps if len(g.nodes())==1]) n = solvable_comps[40].node[0] #HTML(RHdisplay.svg_from_state(solvable_comps[40].nodes(0))) HTML(RHdisplay.svg_from_state(n['board_int'],n['red_col'])) df = pd.DataFrame( [ [len(g.nodes()),g.graph['solvable'] ] for g in comps]) g = solvable_comps[40] g RHdistance_partition.distance_partition(g) df_dist = pd.DataFrame( [g.node[n]['soln_distance'] for n in g.nodes() ], columns = ['distance']) df_dist.groupby(['distance']).size() HTML(RHdisplay.svg_from_state( g.node[0]['board_int'] , g.node[0]['red_col'])) len(solvable_comps) , len(unsolvable_comps) len(solvable_comps) + len(unsolvable_comps) importlib.reload(RHdistance_partition) for g in solvable_comps: RHdistance_partition.distance_partition(g) for g in solvable_comps: g.graph['max_distance'] = max(g.graph['distance_partition'].keys()) for node in g.graph['distance_partition'][g.graph['max_distance']]: break node_dict = g.node[node] split_board_int = RHutilities.split_int( node_dict['board_int']) g.graph['repr_board_int_s1'] = split_board_int[0] g.graph['repr_board_int_s2'] = split_board_int[1] g.graph['repr_red_col'] = node_dict['red_col'] df_max_dist = pd.DataFrame( \ [ [g.graph['max_distance'],\ g.graph['repr_board_int_s1'],\ g.graph['repr_board_int_s2'],\ g.graph['repr_red_col']\ ,len(g.nodes())\ ,g.size()\ ,nx.density(g)\ ]\ for g in solvable_comps], columns=['distance','int_s1','int_s2','repr_red_col','num_nodes','num_edges','density']) df_max_dist.sort_values(['distance'],ascending=False) board_int = RHutilities.combine_ints(21990316605814,11276634271252480) red_col = 5 HTML(RHdisplay.svg_from_state(board_int,red_col)) df_max_dist.groupby(['distance']).agg(['count']) df_max_dist.sort_values(by=['distance'] , ascending = False) df_max_dist.loc[df_max_dist['distance'] == 19] #HTML(RHdisplay.svg_from_state(n['board_int'],n['red_col'])) board_int = RHutilities.combine_ints(2748789558696,240243301154856) red_col = 4 HTML(RHdisplay.svg_from_state(board_int,red_col)) ###Output _____no_output_____
03_IST_basic_stats.ipynb
###Markdown Hypothesis testingIn the intial "International Stroke Trial" the researchers wanted to test the effect of aspirin and/or heparin on stroke patients. Here, we want to repeat some basic statistical tests to get a feeling for the data. We will not go into detail about the different treatments. Aspirin versus Avoid aspirin Outcome after 14 daysThe endpoint after 14 days is 'DDEAD'. It includes information whether a patient has passed away within 14 days or not.First we grouped the patients and their status concerning Aspirin treatment (Y/N) - and their outcome after 14 days. ###Code list_asp14 = df.groupby('DASP14')['DDEAD'].apply(list) list_asp14 stat, p = kruskal(list_asp14[0], list_asp14[1]) print('Kruskal Wallis') print_stats(stat, p) group_asp = df.groupby('DASP14') group_asp['DDEAD'].value_counts() asp14_nn = group_asp['DDEAD'].value_counts()[0] asp14_ny = group_asp['DDEAD'].value_counts()[1] asp14_yn = group_asp['DDEAD'].value_counts()[2] asp14_yy = group_asp['DDEAD'].value_counts()[3] print(f"--- No aspirin during study ---") print(f'{round((asp14_nn / (asp14_nn + asp14_ny) * 100),1)} % of patients living 14 days after their stroke') print() print(f"--- Aspirin during study ---") print(f'{round((asp14_yn/ (asp14_yn + asp14_yy) * 100),1)} % of patients living 14 days after their stroke') ###Output --- No aspirin during study --- 88.3 % of patients living 14 days after their stroke --- Aspirin during study --- 89.8 % of patients living 14 days after their stroke ###Markdown When comparing the groups by implementing the Kruskal Wallis Test, we see that there is a significant difference between patients who were given Aspirin in comparison to those who didn't, after 14 days. More patients that did not receive Aspirin passed away compared to those who received Aspirin. Outcome after 6 months There are two endpoints that are interesting after 6 months, that is 'FRECOVER', which is the information of full recovery, as well as 'FDEAD', which is information on patients who have passed away. ###Code # FRECOVER (Y) is not equal to "recovered" in OCCODE r_group = df.groupby('FRECOVER') r_group.OCCODE.value_counts() ###Output _____no_output_____ ###Markdown FRECOVER (Y) is not equal to "recovered" in OCCODE. So we create a new variable based on OCCODE. ###Code df['RECO'] = (df.OCCODE == 'Recovered') list_asp6 = df.groupby('DASP14')['RECO'].apply(list) print(list_asp6) print() stat, p = kruskal(list_asp6[0], list_asp6[1]) print('Kruskal Wallis') print_stats(stat, p) group_asp['RECO'].value_counts() asp6_nf = group_asp['RECO'].value_counts()[0] asp6_nt = group_asp['RECO'].value_counts()[1] asp6_yf = group_asp['RECO'].value_counts()[2] asp6_yt = group_asp['RECO'].value_counts()[3] print(f"--- No aspirin during study ---") print(f'{round((asp6_nf / (asp6_nf + asp6_nt) * 100),1)} % of patients living 14 days after their stroke') print() print(f"--- Aspirin during study ---") print(f'{round((asp6_yf / (asp6_yf + asp6_yt) * 100),1)} % of patients living 14 days after their stroke') ###Output --- No aspirin during study --- 84.2 % of patients living 14 days after their stroke --- Aspirin during study --- 81.8 % of patients living 14 days after their stroke ###Markdown Patients who did not receive aspirin during the study were more likely to recover within 6 months after their stroke. In slight contrast, there were more patients alive after 6 months who did receive aspirin. ###Code list_asp6 = df.groupby('DASP14')['FDEAD'].apply(list) print(list_asp6) print() stat, p = kruskal(list_asp6[0], list_asp6[1]) print('Kruskal Wallis') print_stats(stat, p) group_asp['FDEAD'].value_counts() asp6_nn = group_asp['FDEAD'].value_counts()[0] asp6_ny = group_asp['FDEAD'].value_counts()[1] asp6_yn = group_asp['FDEAD'].value_counts()[2] asp6_yy = group_asp['FDEAD'].value_counts()[3] print(f"--- No aspirin during study ---") print(f'{round((asp6_nn / (asp6_nn + asp6_ny) * 100),1)} % of patients living 6 months after their stroke') print() print(f"--- Aspirin during study ---") print(f'{round((asp6_yn/ (asp6_yn + asp6_yy) * 100),1)} % of patients living 6 months after their stroke') ###Output --- No aspirin during study --- 75.1 % of patients living 6 months after their stroke --- Aspirin during study --- 78.0 % of patients living 6 months after their stroke ###Markdown Heparin vs non-heparin Outcome after 14 days ###Code list_hep14 = df.groupby(by = ['RXHEP14'])['DDEAD'].apply(list) list_hep14 stat, p = kruskal(list_hep14[0], list_hep14[1]) print_stats(stat, p) ###Output stat = 0.657, p = 0.418 Probably the same distribution ###Markdown After 14 days: No difference between heparin and no heparin. Outcome after 6 months ###Code list_hep6 = df.groupby(by = ['RXHEP14'])['RECO'].apply(list) stat, p = kruskal(list_hep6[0] , list_hep6[1]) print_stats(stat, p) group_hep = df.groupby('RXHEP14') group_hep['RECO'].value_counts() hep6_nf = group_hep['RECO'].value_counts()[0] hep6_nt = group_hep['RECO'].value_counts()[1] hep6_yf = group_hep['RECO'].value_counts()[2] hep6_yt = group_hep['RECO'].value_counts()[3] print(f"--- No heparin during study ---") print(f'{round((hep6_nf / (hep6_nf + hep6_nt) * 100),1)} % of patients living 6 months after their stroke') print() print(f"--- Heparin during study ---") print(f'{round((hep6_yf / (hep6_yf + hep6_yt) * 100),1)} % of patients living 6 months after their stroke') ###Output --- No heparin during study --- 83.7 % of patients living 6 months after their stroke --- Heparin during study --- 82.3 % of patients living 6 months after their stroke ###Markdown Patients who did not receive heparin during the study were more likely to be alive after 6 months. Though, the difference seems to be minor. Heparin treatment did not have an effect on the outcome 'FDEAD'. ###Code list_hep6 = df.groupby(by = ['RXHEP14'])['FDEAD'].apply(list) stat, p = kruskal(list_hep6[0] , list_hep6[1]) print_stats(stat, p) ###Output stat = 0.683, p = 0.409 Probably the same distribution ###Markdown No versus low versus medium heparin Outcome after 14 days ###Code list2_hep14 = df.groupby(by = ['HEP14'])['DDEAD'].apply(list) list2_hep14 stat, p = kruskal(list2_hep14[0], list2_hep14[1], list2_hep14[2]) print_stats(stat, p) ###Output stat = 1.507, p = 0.471 Probably the same distribution ###Markdown After 14 days: No difference between low, medium and no heparin. Outcome after 6 months ###Code list2_hep6 = df.groupby(by = ['HEP14'])['RECO'].apply(list) stat, p = kruskal(list2_hep6[0], list2_hep6[1], list2_hep6[2]) print_stats(stat, p) group_hep2 = df.groupby('HEP14') group_hep2['RECO'].value_counts() hep6_lf = group_hep2['RECO'].value_counts()[0] hep6_lt = group_hep2['RECO'].value_counts()[1] hep6_mf = group_hep2['RECO'].value_counts()[2] hep6_mt = group_hep2['RECO'].value_counts()[3] hep6_nf = group_hep2['RECO'].value_counts()[4] hep6_nt = group_hep2['RECO'].value_counts()[5] print(f"--- No heparin during study ---") print(f'{round((hep6_nf / (hep6_nf + hep6_nt) * 100),1)} % of patients living 6 months after their stroke') print() print(f"--- Low heparin during study ---") print(f'{round((hep6_lf / (hep6_lf + hep6_lt) * 100),1)} % of patients living 6 months after their stroke') print() print(f"--- Medium heparin during study ---") print(f'{round((hep6_mf / (hep6_mf + hep6_mt) * 100),1)} % of patients living 6 months after their stroke') ###Output --- No heparin during study --- 83.7 % of patients living 6 months after their stroke --- Low heparin during study --- 82.4 % of patients living 6 months after their stroke --- Medium heparin during study --- 82.2 % of patients living 6 months after their stroke ###Markdown Slightly more patients were alive after 6 months when they received neither low nor medium heparin. Again, the different heparin treatments did not have an effect on outcome 'FDEAD'. ###Code list2_hep6 = df.groupby(by = ['HEP14'])['FDEAD'].apply(list) stat, p = kruskal(list2_hep6[0], list2_hep6[1], list2_hep6[2]) print_stats(stat, p) ###Output stat = 0.724, p = 0.696 Probably the same distribution ###Markdown Combination of aspirin and heparin(significantly) different outcome than separate effects of aspirin and heparin ###Code df.TREAT14.unique() ###Output _____no_output_____ ###Markdown Outcome after 14 days ###Code list_treat14 = df.groupby(by = ['TREAT14'])['DDEAD'].apply(list) list_treat14 stat, p = kruskal(list_treat14[0], list_treat14[1], list_treat14[2], list_treat14[3],list_treat14[4], list_treat14[5]) print_stats(stat, p) iterator = product(enumerate(list_treat14), enumerate(list_treat14)) for (first_idx, first_group), (second_idx, second_group) in iterator: print(first_idx, second_idx) stat, p = mannwhitneyu(first_group, second_group) print_stats(stat, p) print() group_treat = df.groupby(by = ['TREAT14']) group_treat['DDEAD'].value_counts() treat14_a_n = group_treat['DDEAD'].value_counts()[0] treat14_a_y = group_treat['DDEAD'].value_counts()[1] treat14_alh_n = group_treat['DDEAD'].value_counts()[2] treat14_alh_y = group_treat['DDEAD'].value_counts()[3] treat14_amh_n = group_treat['DDEAD'].value_counts()[4] treat14_amh_y = group_treat['DDEAD'].value_counts()[5] treat14_c_n = group_treat['DDEAD'].value_counts()[6] treat14_c_y = group_treat['DDEAD'].value_counts()[7] treat14_lh_n = group_treat['DDEAD'].value_counts()[8] treat14_lh_y = group_treat['DDEAD'].value_counts()[9] treat14_mh_n = group_treat['DDEAD'].value_counts()[10] treat14_mh_y = group_treat['DDEAD'].value_counts()[11] print(f"--- Aspirin during study ---") print(f'{round((treat14_a_n / (treat14_a_n + treat14_a_y) * 100),1)} % of patients living 14 days after their stroke') print(f'-- Significantly different compared to the control group (p < 0.05)') print() print(f"--- Aspirin + Low Heparin during study ---") print(f'{round((treat14_alh_n / (treat14_alh_n + treat14_alh_y) * 100),1)} % of patients living 14 days after their stroke') print(f'-- Significantly different compared to both Heparin groups and the control group (p < 0.05)') print() print(f"--- Aspirin + Medium Heparin during study ---") print(f'{round((treat14_amh_n / (treat14_amh_n + treat14_amh_y) * 100),1)} % of patients living 14 days after their stroke') print() print(f"--- Low Heparin during study ---") print(f'{round((treat14_lh_n / (treat14_lh_n + treat14_lh_y) * 100),1)} % of patients living 14 days after their stroke') print() print(f"--- Medium Heparin during study ---") print(f'{round((treat14_mh_n / (treat14_mh_n + treat14_mh_y) * 100),1)} % of patients living 14 days after their stroke') print() print(f"--- Control (neither Aspirin nor Heparin during study) ---") print(f'{round((treat14_c_n / (treat14_c_n + treat14_c_y) * 100),1)} % of patients living 14 days after their stroke') print() ###Output --- Aspirin during study --- 89.9 % of patients living 14 days after their stroke -- Significantly different compared to the control group (p < 0.05) --- Aspirin + Low Heparin during study --- 90.3 % of patients living 14 days after their stroke -- Significantly different compared to both Heparin groups and the control group (p < 0.05) --- Aspirin + Medium Heparin during study --- 89.1 % of patients living 14 days after their stroke --- Low Heparin during study --- 88.8 % of patients living 14 days after their stroke --- Medium Heparin during study --- 88.7 % of patients living 14 days after their stroke --- Control (neither Aspirin nor Heparin during study) --- 87.9 % of patients living 14 days after their stroke ###Markdown Outcome after 6 months ###Code list_treat6 = df.groupby(by = ['TREAT14'])['RECO'].apply(list) stat, p = kruskal(list_treat6[0], list_treat6[1], list_treat6[2], list_treat6[3],list_treat6[4], list_treat6[5]) print_stats(stat, p) iterator = product(enumerate(list_treat6), enumerate(list_treat6)) for (first_idx, first_group), (second_idx, second_group) in iterator: print(first_idx, second_idx) stat, p = mannwhitneyu(first_group, second_group) print_stats(stat, p) print() group_treat['RECO'].value_counts() treat6_a_f = group_treat['RECO'].value_counts()[0] treat6_a_t = group_treat['RECO'].value_counts()[1] treat6_alh_f = group_treat['RECO'].value_counts()[2] treat6_alh_t = group_treat['RECO'].value_counts()[3] treat6_amh_f = group_treat['RECO'].value_counts()[4] treat6_amh_t = group_treat['RECO'].value_counts()[5] treat6_c_f = group_treat['RECO'].value_counts()[6] treat6_c_t = group_treat['RECO'].value_counts()[7] treat6_lh_f = group_treat['RECO'].value_counts()[8] treat6_lh_t = group_treat['RECO'].value_counts()[9] treat6_mh_f = group_treat['RECO'].value_counts()[10] treat6_mh_t = group_treat['RECO'].value_counts()[11] print(f"--- Aspirin during study ---") print(f'{round((treat6_a_f / (treat6_a_f + treat6_a_t) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to control (p < 0.01)') print() print(f"--- Aspirin + Low Heparin during study ---") print(f'{round((treat6_alh_f / (treat6_alh_f + treat6_alh_t) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to control (p < 0.01)') print() print(f"--- Aspirin + Medium Heparin during study ---") print(f'{round((treat6_amh_f / (treat6_amh_f + treat6_amh_t) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to control (p < 0.01)') print() print(f"--- Low Heparin during study ---") print(f'{round((treat6_lh_f / (treat6_lh_f + treat6_lh_t) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to control (p < 0.01)') print() print(f"--- Medium Heparin during study ---") print(f'{round((treat6_mh_f / (treat6_mh_f + treat6_mh_t) * 100),1)} % of patients living 6 monthsafter their stroke') print(f'-- Significantly different compared to control (p < 0.01)') print() print(f"--- Control (neither Aspirin nor Heparin during study) ---") print(f'{round((treat6_c_f / (treat6_c_f + treat6_c_t) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to all treatments (p < 0.01)') print() ###Output --- Aspirin during study --- 81.9 % of patients living 6 months after their stroke -- Significantly different compared to control (p < 0.01) --- Aspirin + Low Heparin during study --- 82.0 % of patients living 6 months after their stroke -- Significantly different compared to control (p < 0.01) --- Aspirin + Medium Heparin during study --- 81.3 % of patients living 6 months after their stroke -- Significantly different compared to control (p < 0.01) --- Low Heparin during study --- 82.8 % of patients living 6 months after their stroke -- Significantly different compared to control (p < 0.01) --- Medium Heparin during study --- 82.9 % of patients living 6 monthsafter their stroke -- Significantly different compared to control (p < 0.01) --- Control (neither Aspirin nor Heparin during study) --- 85.2 % of patients living 6 months after their stroke -- Significantly different compared to all treatments (p < 0.01) ###Markdown Patients who did receive neither of the treatments were more likely to recover within 6 months. Though, the group itself was much larger (over 5000 patients, compared to about 2000 patients in the other groups), which may have an impact on the outcome. When looking at the 'FDEAD' outcome, we see the same as before. The patients who did not receive either treatment were more likely to be dead after 6 months. ###Code list_treat6 = df.groupby(by = ['TREAT14'])['FDEAD'].apply(list) stat, p = kruskal(list_treat6[0], list_treat6[1], list_treat6[2], list_treat6[3],list_treat6[4], list_treat6[5]) print_stats(stat, p) list_treat6 iterator = product(enumerate(list_treat6), enumerate(list_treat6)) for (first_idx, first_group), (second_idx, second_group) in iterator: print(first_idx, second_idx) stat, p = mannwhitneyu(first_group, second_group) print_stats(stat, p) print() group_treat['FDEAD'].value_counts() treat6_a_n = group_treat['FDEAD'].value_counts()[0] treat6_a_y = group_treat['FDEAD'].value_counts()[1] treat6_alh_n = group_treat['FDEAD'].value_counts()[2] treat6_alh_y = group_treat['FDEAD'].value_counts()[3] treat6_amh_n = group_treat['FDEAD'].value_counts()[4] treat6_amh_y = group_treat['FDEAD'].value_counts()[5] treat6_c_n = group_treat['FDEAD'].value_counts()[6] treat6_c_y = group_treat['FDEAD'].value_counts()[7] treat6_lh_n = group_treat['FDEAD'].value_counts()[8] treat6_lh_y = group_treat['FDEAD'].value_counts()[9] treat6_mh_n = group_treat['FDEAD'].value_counts()[10] treat6_mh_y = group_treat['FDEAD'].value_counts()[11] print(f"--- Aspirin during study ---") print(f'{round((treat6_a_n / (treat6_a_n + treat6_a_y) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to both Heparin groups and the control group (p < 0.05)') print() print(f"--- Aspirin + Low Heparin during study ---") print(f'{round((treat6_alh_n / (treat6_alh_n + treat6_alh_y) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to both Heparin groups and the control group(p < 0.05)') print() print(f"--- Aspirin + Medium Heparin during study ---") print(f'{round((treat6_amh_n / (treat6_amh_n + treat6_amh_y) * 100),1)} % of patients living 6 months after their stroke') print(f'-- Significantly different compared to the control group (p < 0.05)') print() print(f"--- Low Heparin during study ---") print(f'{round((treat6_lh_n / (treat6_lh_n + treat6_lh_y) * 100),1)} % of patients living 6 months after their stroke') print() print(f"--- Medium Heparin during study ---") print(f'{round((treat6_mh_n / (treat6_mh_n + treat6_mh_y) * 100),1)} % of patients living 6 monthsafter their stroke') print() print(f"--- Control (neither Aspirin nor Heparin during study) ---") print(f'{round((treat6_c_n / (treat6_c_n + treat6_c_y) * 100),1)} % of patients living 6 months after their stroke') print() ###Output --- Aspirin during study --- 78.0 % of patients living 6 months after their stroke -- Significantly different compared to both Heparin groups and the control group (p < 0.05) --- Aspirin + Low Heparin during study --- 78.4 % of patients living 6 months after their stroke -- Significantly different compared to both Heparin groups and the control group(p < 0.05) --- Aspirin + Medium Heparin during study --- 77.5 % of patients living 6 months after their stroke -- Significantly different compared to the control group (p < 0.05) --- Low Heparin during study --- 75.4 % of patients living 6 months after their stroke --- Medium Heparin during study --- 75.9 % of patients living 6 monthsafter their stroke --- Control (neither Aspirin nor Heparin during study) --- 74.7 % of patients living 6 months after their stroke
module2-sql-for-analysis/DSPT1_Postgres_Demo.ipynb
###Markdown **POSTGRES DEMO** ###Code #!pip install psycopg2-binary import psycopg2 #dir(psycopg2) #help(psycopg2) ###Output _____no_output_____ ###Markdown 1. Establish Connection2. Cursor3. Execute Query4. Get RESULTS! ###Code dbname = 'lnucdsrb' user = 'lnucdsrb' password = 'b4hmmkgTuxqEwulnohtUqo2BGhizvp7V' host = 'salt.db.elephantsql.com' pg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host) pg_conn pg_cursor = pg_conn.cursor() pg_cursor pg_cursor.execute('SELECT * FROM playground;') pg_cursor.fetchall() #!wget https://github.com/bruno-janota/DS-Unit-3-Sprint-2-SQL-and-Databases/blob/master/module1-introduction-to-sql/rpg_db.sqlite3?raw=true #!mv 'rpg_db.sqlite3?raw=true' rpg_db.sqlite3 import sqlite3 sl_conn = sqlite3.connect('rpg_db.sqlite3') sl_cursor = sl_conn.cursor() count = sl_cursor.execute('SELECT COUNT(*) FROM charactercreator_character;').fetchall() count characters = sl_cursor.execute('SELECT * FROM charactercreator_character;').fetchall() #characters characters[0] # Needed because pg_cursor will now be used to create a table, so we need to free memory by ROLLBACK # "ROLLBACK: Rolls back an explicit or implicit transaction to the beginning of the transaction, # or to a savepoint inside the transaction. You can use ROLLBACK TRANSACTION to erase # all data modifications made from the start of the transaction or to a savepoint. # It also frees resources held by the transaction." pg_cursor.execute("ROLLBACK") create_character_table = ''' CREATE TABLE character_table ( character_id SERIAL PRIMARY KEY, name VARCHAR(30), level INT, exp INT, hp INT, strength INT, intelligence INT, dexterity INT, wisdom INT ) ''' pg_cursor.execute(create_character_table) # Testing - Add one row pg_cursor.execute("INSERT INTO character_table VALUES (1, 'Aliquid iste optio reiciendi', 0,0,10,1,1,1,1);") # Delete that row so that we can add all characters pg_cursor.execute("DELETE FROM character_table WHERE character_id=1;") # Add all characters for character in characters: pg_cursor.execute("INSERT INTO character_table VALUES " + str(character) + ";") pg_cursor.execute('SELECT * FROM character_table LIMIT 10') pg_cursor.fetchall() ###Output _____no_output_____
notebooks/02_SizingScenariosDefinitions.ipynb
###Markdown Sizing scenarios definitions*Written by Marc Budinger (INSA Toulouse) and Scott Delbecq (ISAE-SUPAERO), Toulouse, France.*Before sizing a system, it is important to define all the constraints that could have an effect on the design. It is therefore advisable to list meticulously:- the power components to size in the architecture - criteria and parameters useful for the selection of components - system usage scenarios that can "activate" these sizing scenarios Drone architecture We will represent first here the Work Breakdown Structure of the drone system with the components to be selected. Remark: Python can be used to represent the Work Breakdown Structure (WBS) of an architecture thanks to Graphviz graph diagram. - The Graphviz python wrapper can be installed with `conda install anaconda graphviz --force` and `conda install python-graphviz` for Anaconda python solution. - And you need also to install the [Graphviz visualization solution](https://www.graphviz.org/download/) (use the .msi install to have the 'PATH' well set-up) ###Code # Plot the WBS of the system from graphviz import Digraph dot = Digraph(comment='Drone system') # System dot.node('S', 'Multi-rotor drone', shape='rectangle', color='grey') # Components dot.node('C1', 'Propellers', shape='rectangle') dot.node('C2', 'Motors', shape='rectangle') dot.node('C3', 'ESC', shape='rectangle') dot.node('C4', 'Batteries', shape='rectangle') dot.node('C5', 'Frame', shape='rectangle') dot.edge('S','C1') dot.edge('S','C2') dot.edge('S','C3') dot.edge('S','C4') dot.edge('S','C5') # Render the graph into the notebook dot ###Output _____no_output_____ ###Markdown Design drivers We are going to determine here the possible reasons for the degradation in system’s components. These degrations can be:- Fast and come from transient power demands (such as permanent deformation, rupture for mechanical components)- Gradual and come from continuous power demands which often reduce the component’s and therefore the system’s lifetime (such as mechanical fatigue for mechanical components)The component can also have imperfections which can increase the mechanical stresses on itself or other components.For example, the inertia of an electrical motor increases the torque that this same motor has to deliver or can destroy mechanical component when a sudden stop occurs.We will complete the graph with possible sources of degradation (represented as ellipes) of the components of the DC/DC converter. ###Code ## Design drivers # Propeller dot.node('DD1', 'Max\nThrust') dot.edge('C1', 'DD1') dot.node('DD2', 'Efficiency') dot.edge('C1', 'DD2') # Motor dot.node('DD3', 'Temperature\nrise') dot.edge('C2', 'DD3') dot.node('DD4', 'Max\nvoltage') dot.edge('C2', 'DD4') # ESC dot.node('DD5', 'Temperature\nrise') dot.edge('C3', 'DD5') # Batteries dot.node('DD7', 'Voltage') dot.edge('C4', 'DD7') dot.node('DD8', 'Energy') dot.edge('C4', 'DD8') dot.node('DD9', 'Power') dot.edge('C4', 'DD9') # Frame dot.node('DD10', 'Stress') dot.edge('C5', 'DD10') dot.node('DD11', 'Vibration') dot.edge('C5', 'DD11') # Render the graph into the notebook dot ###Output _____no_output_____ ###Markdown Sizing scenarios We have listed the different degradation risks for our application. Now we have to determine the system usage cases which can create this degradation risks. These sizing scenarios are added on the graph as hexagon. A first version of the sizing code will take into account 2 main sizing scenarios:- the hover flight which will define the flight time- the take off which will define the maximum power operating points ###Code # Sizing scenarios dot.node('SiS1', 'Hover \n flight', shape='hexagon') dot.node('SiS2', 'Take off', shape='hexagon') # Connections between design drivers and sizing scenarios dot.edge('DD1', 'SiS2') dot.edge('DD2', 'SiS1') dot.edge('DD3', 'SiS1') dot.edge('DD4', 'SiS2') dot.edge('DD5', 'SiS2') dot.edge('DD7', 'SiS2') dot.edge('DD8', 'SiS1') dot.edge('DD9', 'SiS2') dot.edge('DD10', 'SiS2') dot.edge('DD11', 'SiS2') # Render the graph into the notebook dot ###Output _____no_output_____ ###Markdown A second version of the sizing code will take into account 3 main sizing scenarios:- the hover flight which will define the flight time- the take off which will define the maximum power operating points- the vertical flight which will define, with the take off phase, the high power mission segments. ###Code # Sizing scenarios dot.node('SiS3', 'Vertical \nflight', shape='hexagon') # Connections between design drivers and sizing scenarios dot.edge('DD1', 'SiS3') dot.edge('DD4', 'SiS3') dot.edge('DD5', 'SiS3') dot.edge('DD7', 'SiS3') dot.edge('DD9', 'SiS3') # Render the graph into the notebook dot ###Output _____no_output_____
notebooks/1.0_ResNetGanDetection.ipynb
###Markdown **AmilGan Detection using ResNet152** Amil Khan | March 1, 2019 | Version 2*** ###Code import numpy as np from collections import OrderedDict import math import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from functools import reduce import torch.utils.model_zoo as model_zoo import torch.nn.functional as F import torchvision.transforms as transforms from torch.utils.data.sampler import SubsetRandomSampler from torchvision import datasets import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.dpi'] = 200 %config InlineBackend.figure_format = 'retina' train_on_gpu = torch.cuda.is_available() ###Output _____no_output_____ ###Markdown Data Loading, Preprocessing, Wrangling, CleansingI chose to functionalize everything in the data preprocessing pipeline for two reasons:- **Reproducibility** Many times, there is a large body of beautiful code that has been written, but no documentation. The data processing step is usually where people get stuck.- **Iterability** I wanted to iterate fast when chnaging parameters in the module, as well as have one block of code that will take care of everything after restarting the kernal.**Inputs**: - `path_to_train`: Path to your training set folder (I am using PyTorch's `ImageFolder` module)- `path_to_test`: Path to your test set folder- `num_workers`: number of subprocesses to use for data loading- `batch_size`: how many samples per batch to load- `valid_size`: percentage of training set to use as validation ###Code def DataConstructor2000(path_to_train, path_to_test, classes=None, num_workers=4, batch_size=32, valid_size = 0.2): # Transformations to the image, edit as need be transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Resize([224, 224]), transforms.ToTensor()]) train_dataset = datasets.ImageFolder(path_to_train, transform=transform) print("Successfully Loaded Training Set.") test_dataset = datasets.ImageFolder(path_to_test, transform=transform) print("Successfully Loaded Test Set.") # obtain training indices that will be used for validation num_train = len(train_dataset) indices = list(range(num_train)) np.random.shuffle(indices) split = int(np.floor(valid_size * num_train)) train_idx, valid_idx = indices[split:], indices[:split] # define samplers for obtaining training and validation batches train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, num_workers=num_workers) valid_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=valid_sampler, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=20, num_workers=num_workers, shuffle=True) if classes != None: print("Number of Classes:", len(classes)) return train_loader, valid_loader, test_loader, classes train_loader, valid_loader,test_loader, classes = DataConstructor2000(path_to_train='/workspace/Documents/pretrained-models.pytorch-master/pretrainedmodels/training/',path_to_test='/workspace/Documents/pretrained-models.pytorch-master/pretrainedmodels/test/', classes=['Fake','Real'],num_workers=40, batch_size=100, valid_size = 0.3) ###Output _____no_output_____ ###Markdown Visualize for ConfirmationYou do not need to change anything here. It should run right out of the box. But feel free to change what you need. ###Code # helper function to un-normalize and display an image def imshow(img): # img = img / 2 + 0.5 # unnormalize if you added normalization in the transformation step plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image # obtain one batch of training images dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() # convert images to numpy for display print(images.shape) # plot the images in the batch, along with the corresponding labels fig = plt.figure(figsize=(25, 4)) # display 20 images for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(classes[labels[idx]]) ###Output _____no_output_____ ###Markdown Time to Define the ModelIn this notebook, I will use `ResNet152`, as it enabled me to test a very deep network for this problem.__Abstract.__ Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. ###Code def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class FBResNet(nn.Module): def __init__(self, block, layers, num_classes=2): self.inplanes = 64 # Special attributs self.input_space = None self.input_size = (299, 299, 3) self.mean = None self.std = None super(FBResNet, self).__init__() # Modules self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=True) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7) self.last_linear = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=True), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def features(self, input): x = self.conv1(input) self.conv1_input = x.clone() x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x def logits(self, features): x = self.avgpool(features) x = x.view(x.size(0), -1) x = self.last_linear(x) return x def forward(self, input): x = self.features(input) x = self.logits(x) return x def fbresnet18(num_classes=1000): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = FBResNet(BasicBlock, [2, 2, 2, 2], num_classes=num_classes) return model def fbresnet34(num_classes=1000): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = FBResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes) return model def fbresnet50(num_classes=1000): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = FBResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes) return model def fbresnet101(num_classes=1000): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = FBResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes) return model def fbresnet152(num_classes=2): """Constructs a ResNet-152 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = FBResNet(Bottleneck, [3, 8, 36, 3], num_classes=num_classes) return model.cuda() model = fbresnet152(num_classes=2) model = nn.DataParallel(model) # model ###Output _____no_output_____ ###Markdown Choosing a Loss Function and OptimizerI went with Cross-Entropy Loss. __Cross-entropy loss__, or log loss, measures the performance of a classification model whose output is a probability value between 0 and 1. Cross-entropy loss increases as the predicted probability diverges from the actual label. Hence, predicting a probability of .012 when the actual observation label is 1 would be bad and result in a high loss value. A perfect model would have a log loss of 0. $$\text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right) = -x[class] + \log\left(\sum_j \exp(x[j])\right)$$ I opted with good old __Stochastic Gradient Descent__. Nuff said. ###Code # specify loss function (categorical cross-entropy) criterion = nn.CrossEntropyLoss() # specify optimizer optimizer = optimizer = torch.optim.SGD([ {'params': list(model.parameters())[:-1], 'lr': 1e-3, 'momentum': 0.9, 'weight_decay': 1e-3}, {'params': list(model.parameters())[-1], 'lr': 5e-5, 'momentum': 0.9, 'weight_decay': 1e-5} ]) ###Output _____no_output_____ ###Markdown Time to TrainThis is where stripes are earned. ###Code # number of epochs to train the model n_epochs = 40 valid_loss_min = np.Inf # track change in validation loss training_vis = [] valid_vis = [] for epoch in range(1, n_epochs+1): # keep track of training and validation loss train_loss = 0.0 valid_loss = 0.0 ################### # train the model # ################### model.train() for data, target in train_loader: # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # clear the gradients of all optimized variables optimizer.zero_grad() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # backward pass: compute gradient of the loss with respect to model parameters loss.backward() # perform a single optimization step (parameter update) optimizer.step() # update training loss train_loss += loss.item()*data.size(0) ###################### # validate the model # ###################### model.eval() for data, target in valid_loader: # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # calculate the batch loss loss = criterion(output, target) # update average validation loss valid_loss += loss.item()*data.size(0) # calculate average losses train_loss = train_loss/len(train_loader.dataset) valid_loss = valid_loss/len(valid_loader.dataset) training_vis.append(train_loss) valid_vis.append(valid_loss) # print training/validation statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss)) # save model if validation loss has decreased if valid_loss <= valid_loss_min: print('\nValidation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict(), 'ResNet35656_gan-detector_after82.pt') valid_loss_min = valid_loss ###Output _____no_output_____ ###Markdown Load ModelI included here the ability to load a model from previous training runs. Uncomment/Modify what you need to and go HAM. In this case, load the model, the optimizer and the criterion. ###Code # model = TheModelClass(*args, **kwargs) # model.load_state_dict(torch.load('ResNet35656_gan-detector_after82.pt')) ###Output _____no_output_____ ###Markdown Test Set Evaluation Earlier we loaded in our test data under the name `test_loader`. ###Code # track test loss test_loss = 0.0 class_correct = list(0. for i in range(2)) class_total = list(0. for i in range(2)) model.eval() # iterate over test data for data, target in test_loader: # print(target) # move tensors to GPU if CUDA is available if train_on_gpu: data, target = data.cuda(), target.cuda() # forward pass: compute predicted outputs by passing inputs to the model output = model(data) # print(output) # calculate the batch loss loss = criterion(output, target) # update test loss test_loss += loss.item()*data.size(0) # convert output probabilities to predicted class _, pred = torch.max(output, 1) # print(pred) # compare predictions to true label correct_tensor = pred.eq(target.data.view_as(pred)) correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy()) # calculate test accuracy for each object class for i in range(2): # print(i) label = target.data[i] class_correct[label] += correct[i].item() class_total[label] += 1 # average test loss test_loss = test_loss/len(test_loader.dataset) print('Test Loss: {:.6f}\n'.format(test_loss)) for i in range(2): if class_total[i] > 0: print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % ( classes[i], 100 * class_correct[i] / class_total[i], np.sum(class_correct[i]), np.sum(class_total[i]))) else: print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i])) print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % ( 100. * np.sum(class_correct) / np.sum(class_total), np.sum(class_correct), np.sum(class_total))) ###Output _____no_output_____ ###Markdown *** Visualize Model PerformanceMost likely, we will want to see how our model performed throughout each epoch. In this plot, we are visualizing training and validation loss. ###Code plt.plot(range(epoch), training_vis) plt.scatter(range(epoch), training_vis) plt.scatter(range(epoch), valid_vis) plt.plot(range(epoch), valid_vis) plt.savefig('Resnet_77_scatter.svg') ###Output _____no_output_____ ###Markdown **Save the Training and Validation Losses** ###Code np.savetxt('Resnet_77.txt', np.array([training_vis, valid_vis])) ###Output _____no_output_____ ###Markdown Visualize MisclassifiedSimilarly, we will want to see which types of images it correctly classified. In our case, we plot a randomly sampled batch of our test set and place the correct label in parentheses, and the predicted without. ###Code # obtain one batch of test images dataiter = iter(test_loader) images, labels = dataiter.next() # get sample outputs output = model(images) # convert output probabilities to predicted class _, preds = torch.max(output, 1) # prep images for display images = images.numpy() labels = labels.numpy() print(images.shape) # plot the images in the batch, along with predicted and true labels fig = plt.figure(figsize=(25, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[]) ax.imshow(np.swapaxes((images[idx]),axis1=0, axis2=2)) ax.set_title("{} ({})".format(classes[preds[idx]], classes[labels[idx]]), color=("green" if classes[preds[idx]]==classes[labels[idx]] else "red")) plt.savefig('rESNET_miss.pdf') ###Output _____no_output_____ ###Markdown Bonus Sanity Check VisualizationHere, we plot the `RGB` channels of the image, but with a twist. We plot the corresponding RGB value inside the color. Super cool sanity check and overall visualization. ###Code rgb_img = np.squeeze(images[3]) channels = ['red channel', 'green channel', 'blue channel'] fig = plt.figure(figsize = (36, 36)) for idx in np.arange(rgb_img.shape[0]): ax = fig.add_subplot(1, 3, idx + 1) img = rgb_img[idx] ax.imshow(img, cmap='gray') ax.set_title(channels[idx]) width, height = img.shape thresh = img.max()/2.5 for x in range(width): for y in range(height): val = round(img[x][y],2) if img[x][y] !=0 else 0 ax.annotate(str(val), xy=(y,x), horizontalalignment='center', verticalalignment='center', size=8, color='white' if img[x][y]<thresh else 'black') ###Output _____no_output_____
Core/NotebookToScript/ImageBlur.ipynb
###Markdown This notebook blurs an image using a Gaussian blur. We'll use numpy, the Python Image Library, and Matplotlib for thisYou'll need to make sure these are installed on the Notebook server, or if you download the notebook or script, that they are installed on the machine you try to run on.To get PIL, the current package name is "pillow" ###Code import numpy as np from PIL import Image import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown First we're going to load in an image, convert it into a single colour (greyscale), and resize it for convenience.I am using an image of penguin (Photo by Pixabay from Pexels) but any jpg will work. ###Code # CHANGE the image file on this line for a different image img = Image.open('penguin.jpg').convert('L') # We'll work with a resized version of this image # If your image is not square, it will be squashed dim = 2000 img_resized = img.resize((dim,dim)) # ... and convert it into a numpy array of floats img_data = np.asarray(img_resized,dtype=float) # Show the image plt.figure(figsize = [6, 6]) plt.imshow(img_data,cmap='gray'); ###Output _____no_output_____ ###Markdown A simple image blur is done by convolving the image with a Gaussian, so we need to create one of the right size. Convolution is most-easily done by converting to Fourier space and multiplying, then converting back into real space. ###Code #CHANGE this line to adjust blur amount. Higher is more blurred width = 0.2 # Define a Gaussian in 1D on a grid with the same number of points as the image domain = np.linspace(-5,5,dim) gauss = np.exp(-0.5*domain**2/(width*width)) # Roll this around the 1D boundary so that the Gaussian is centered on grid 0,0 shift = int(dim/2) gauss = np.roll(gauss,shift) # Turn into a 2D Gaussian gauss2D = gauss[:,np.newaxis] * gauss[np.newaxis,:] # Fourier transform the image and the Gaussian using standard numpy functions img_fft = np.fft.fft2(img_data) gauss_fft = np.fft.fft2(gauss2D) # Multiplication in Fourier space img_conv = img_fft*gauss_fft # Transform the result back into real space using the inverse transform img_ifft = np.fft.ifft2(img_conv) # Display the result of blurring the picture plt.figure(figsize = [6, 6]) plt.imshow(img_ifft.real,cmap='gray'); ###Output _____no_output_____
Interactive-Sky.ipynb
###Markdown Simple star fieldMoving your mouse pointer across the display belowwill rotate the celestial sphere,showing the brightest stars down to magnitude 5. ###Code %load_ext autoreload %autoreload 2 from lib.downloader import download_all download_all() from lib import sky sky.starfield() ###Output _____no_output_____
class3/adam.ipynb
###Markdown ###Code # 利用鸢尾花数据集,实现前向传播、反向传播,可视化loss曲线 # 导入所需模块 import tensorflow as tf from sklearn import datasets from matplotlib import pyplot as plt import numpy as np import time ##1## # 导入数据,分别为输入特征和标签 x_data = datasets.load_iris().data y_data = datasets.load_iris().target # 随机打乱数据(因为原始数据是顺序的,顺序不打乱会影响准确率) # seed: 随机数种子,是一个整数,当设置之后,每次生成的随机数都一样(为方便教学,以保每位同学结果一致) np.random.seed(116) # 使用相同的seed,保证输入特征和标签一一对应 np.random.shuffle(x_data) np.random.seed(116) np.random.shuffle(y_data) tf.random.set_seed(116) # 将打乱后的数据集分割为训练集和测试集,训练集为前120行,测试集为后30行 x_train = x_data[:-30] y_train = y_data[:-30] x_test = x_data[-30:] y_test = y_data[-30:] # Python的每个对象都分为可变和不可变,主要的核心类型中,数字、字符串、元组是不可变的,列表、字典是可变的。 # 对不可变类型的变量重新赋值,实际上是重新创建一个不可变类型的对象, # 并将原来的变量重新指向新创建的对象(如果没有其他变量引用原有对象的话(即引用计数为0),原有对象就会被回收) # 执行 i += 1 时,内存地址都会变化,因为int 类型是不可变的。 print(hex(id(x_data))) print(hex(id(x_train))) print(hex(id(x_test))) # 转换x的数据类型,否则后面矩阵相乘时会因数据类型不一致报错 x_train = tf.cast(x_train, tf.float32) x_test = tf.cast(x_test, tf.float32) # from_tensor_slices函数使输入特征和标签值一一对应。(把数据集分批次,每个批次batch组数据) train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32) test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) # 生成神经网络的参数,4个输入特征,故输入层为4个输入节点;因为3分类,故输出层为3个神经元 # 用tf.Variable()标记参数可训练 # 使用seed使每次生成的随机数相同(方便教学,使大家结果都一致,在现实使用时不写seed) w1 = tf.Variable(tf.random.truncated_normal([4, 3], stddev=0.1, seed=1)) b1 = tf.Variable(tf.random.truncated_normal([3], stddev=0.1, seed=1)) lr = 0.1 # 学习率为0.1 train_loss_results = [] # 将每轮的loss记录在此列表中,为后续画loss曲线提供数据 test_acc = [] # 将每轮的acc记录在此列表中,为后续画acc曲线提供数据 epoch = 500 # 循环500轮 loss_all = 0 # 每轮分4个step,loss_all记录四个step生成的4个loss的和 # sgdm ########################################################################## # m_w, m_b = 0, 0 # beta = 0.9 ########################################################################## # adagrad ########################################################################## #v_w, v_b = 0, 0 ########################################################################## # rmsprop ########################################################################## # v_w, v_b = 0, 0 # beta = 0.9 ########################################################################## # adam ########################################################################## m_w, m_b = 0, 0 v_w, v_b = 0, 0 beta1, beta2 = 0.9, 0.999 delta_w, delta_b = 0, 0 global_step = 0 ########################################################################## # 训练部分 now_time = time.time() ##2## for epoch in range(epoch): # 数据集级别的循环,每个epoch循环一次数据集 for step, (x_train, y_train) in enumerate(train_db): # batch级别的循环 ,每个step循环一个batch ########################################################################## global_step += 1 ########################################################################## with tf.GradientTape() as tape: # with结构记录梯度信息 y = tf.matmul(x_train, w1) + b1 # 神经网络乘加运算 y = tf.nn.softmax(y) # 使输出y符合概率分布(此操作后与独热码同量级,可相减求loss) y_ = tf.one_hot(y_train, depth=3) # 将标签值转换为独热码格式,方便计算loss和accuracy loss = tf.reduce_mean(tf.square(y_ - y)) # 采用均方误差损失函数mse = mean(sum(y-out)^2) loss_all += loss.numpy() # 将每个step计算出的loss累加,为后续求loss平均值提供数据,这样计算的loss更准确 # 计算loss对各个参数的梯度 grads = tape.gradient(loss, [w1, b1]) # 实现梯度更新 w1 = w1 - lr * w1_grad b = b - lr * b_grad # w1.assign_sub(lr * grads[0]) # 参数w1自更新 # b1.assign_sub(lr * grads[1]) # 参数b自更新 ########################################################################## # sgd-momentun # m_w = beta * m_w + (1 - beta) * grads[0] # m_b = beta * m_b + (1 - beta) * grads[1] # w1.assign_sub(lr * m_w) # b1.assign_sub(lr * m_b) ########################################################################## ########################################################################## #adagrad # v_w += tf.square(grads[0]) # v_b += tf.square(grads[1]) # w1.assign_sub(lr * grads[0] / tf.sqrt(v_w)) # b1.assign_sub(lr * grads[1] / tf.sqrt(v_b)) ########################################################################## ########################################################################## # rmsprop # v_w = beta * v_w + (1 - beta) * tf.square(grads[0]) # v_b = beta * v_b + (1 - beta) * tf.square(grads[1]) # w1.assign_sub(lr * grads[0] / tf.sqrt(v_w)) # b1.assign_sub(lr * grads[1] / tf.sqrt(v_b)) ########################################################################## ########################################################################## # adam m_w = beta1 * m_w + (1 - beta1) * grads[0] m_b = beta1 * m_b + (1 - beta1) * grads[1] v_w = beta2 * v_w + (1 - beta2) * tf.square(grads[0]) v_b = beta2 * v_b + (1 - beta2) * tf.square(grads[1]) m_w_correction = m_w / (1 - tf.pow(beta1, int(global_step))) m_b_correction = m_b / (1 - tf.pow(beta1, int(global_step))) v_w_correction = v_w / (1 - tf.pow(beta2, int(global_step))) v_b_correction = v_b / (1 - tf.pow(beta2, int(global_step))) w1.assign_sub(lr * m_w_correction / tf.sqrt(v_w_correction)) b1.assign_sub(lr * m_b_correction / tf.sqrt(v_b_correction)) ########################################################################## # 每个epoch,打印loss信息 # print("Epoch {}, loss: {}".format(epoch, loss_all / 4)) train_loss_results.append(loss_all / 4) # 将4个step的loss求平均记录在此变量中 loss_all = 0 # loss_all归零,为记录下一个epoch的loss做准备 # 测试部分 # total_correct为预测对的样本个数, total_number为测试的总样本数,将这两个变量都初始化为0 total_correct, total_number = 0, 0 for x_test, y_test in test_db: # 使用更新后的参数进行预测 y = tf.matmul(x_test, w1) + b1 y = tf.nn.softmax(y) pred = tf.argmax(y, axis=1) # 返回y中最大值的索引,即预测的分类 # 将pred转换为y_test的数据类型 pred = tf.cast(pred, dtype=y_test.dtype) # 若分类正确,则correct=1,否则为0,将bool型的结果转换为int型 correct = tf.cast(tf.equal(pred, y_test), dtype=tf.int32) # 将每个batch的correct数加起来 correct = tf.reduce_sum(correct) # 将所有batch中的correct数加起来 total_correct += int(correct) # total_number为测试的总样本数,也就是x_test的行数,shape[0]返回变量的行数 total_number += x_test.shape[0] # 总的准确率等于total_correct/total_number acc = total_correct / total_number test_acc.append(acc) # print("Test_acc:", acc) # print("--------------------------") total_time = time.time() - now_time ##3## print("total_time", total_time) ##4## # 绘制 loss 曲线 plt.title('Loss Function Curve') # 图片标题 plt.xlabel('Epoch') # x轴变量名称 plt.ylabel('Loss') # y轴变量名称 plt.plot(train_loss_results, label="$Loss$") # 逐点画出trian_loss_results值并连线,连线图标是Loss plt.legend() # 画出曲线图标 plt.show() # 画出图像 # 绘制 Accuracy 曲线 plt.title('Acc Curve') # 图片标题 plt.xlabel('Epoch') # x轴变量名称 plt.ylabel('Acc') # y轴变量名称 plt.plot(test_acc, label="$Accuracy$") # 逐点画出test_acc值并连线,连线图标是Accuracy plt.legend() plt.show() ###Output _____no_output_____
Python/2. Python Basics (cont.)/6.Thoroughly Bringing It Together From Scratch Each - Data Cleaning, Parsing, Visualizing in Python/Introduction to Network Science and Analysis.ipynb
###Markdown Functions ###Code def create_dir_save_file(dir_path: Path, url: str): """ Check if the path exists and create it if it does not. Check if the file exists and download it if it does not. """ if not dir_path.parents[0].exists(): dir_path.parents[0].mkdir(parents=True) print(f'Directory Created: {dir_path.parents[0]}') else: print('Directory Exists') if not dir_path.exists(): r = requests.get(url, allow_redirects=True) open(dir_path, 'wb').write(r.content) print(f'File Created: {dir_path.name}') else: print('File Exists') data_dir = Path('data/') images_dir = Path('images/') ###Output _____no_output_____ ###Markdown Datasets ###Code twitter = 'data/ego-twitter.p' github = 'data/github_users.p' datasets = [twitter, github] data_paths = list() for data in datasets: file_name = data.split('/')[-1].replace('?raw=true', '') data_path = data_dir / file_name create_dir_save_file(data_path, data) data_paths.append(data_path) ###Output Directory Exists File Exists Directory Exists File Exists ###Markdown Data ###Code T = nx.read_gpickle(data_paths[0]) Gh = nx.read_gpickle(data_paths[1]) ###Output _____no_output_____ ###Markdown Introduction to networksIn this chapter, you'll be introduced to fundamental concepts in network analytics while exploring a real-world Twitter network dataset. You'll also learn about [NetworkX][1], a library that allows you to manipulate, analyze, and model graph data. You'll learn about the different types of graphs and how to rationally visualize them. [1]: https://networkx.github.io/documentation/stable/index.html Networks Examples of Networks1. Social - In a social network, we're modeling the relationship between people.1. Transportation - In a transportation network, we're modeling the connectivity between locations, as determined by the roads or flight paths connection them.- Networks are a useful tool for modeling relationships between entities. Insights1. Important entities: influencers in social networks1. Pathfinding: most efficient transportation path1. Clustering: finding communities- By modeling the data as a network, you can gain insight into what entities (or nodes) are important, such as broadcasters or influencers in a social network.- You can start to think about optimizing transportation between cities.- Leverage the network structure to find communities in the network. Network Structure![network_structure][1]- Networks are described by two sets of items, which form a "network". - Nodes - Edges- In mathematical terms, this is a graph.- Nodes and edges can have metadata associated with them. - Lets say there are two friends, Hugo and myself, who met on May 21, 2016. - The nodes may be "Hugo" and myself, with metadata stored in a `key-value` pair as `id` and `age`. - The friendship is represented as a line between two nodes, and may have metadata such as `date`, which represents the date we first met. ![social graph][2] NetworkX- This python library allows us to manipulate, analyze, and model, graph data.- Using `nx.Graph()`, we initialize an empty graph, to which we can add nodes and edges- The integers 1, 2, and 3 can be entered as nodes, using the `add_nodes_from` method, passing in the list `[1, 2, 3]`, as an argument.- Use the `.nodes` method to see the nodes present in the graph.- Similarly, use `.add_edges` and `.edges` to add and see the edges present in the graph.- Edges between nodes are represented as a tuple, in which each tuple shows the nodes that are present on that edge.```pythonimport networkx as nxG = nx.Graph()G.add_nodes_from([1, 2, 3])G.nodes()>>> [1, 2, 3]G.add_edge(1, 2)G.edges()>>> [(1, 2)]```- Metadata can be stored on the graph as well.- For example, I can add to the node `1`, a `label` key with the value `blue`, just as I would assign a value to the key of a dictionary.- The node list can be retrieved with `G.nodes()` and passing the `data=True` parameter. - This returns a list of tuples, in which the first element of each tuple is the node, and the second element is a dictionary, in which the `key-value` pairs correspond to the metadata.```pythonG.node[1]['label'] = 'blue'G.nodes(data=True)>>>[(1, {'label': 'blue}), (2, {}), (3, {})]```- `networkx` as provides basic drawing functionality, using the `nx.draw()` function, which takes in a graph `G` as an argument.- In the IPyhton shell, you'll also have to call `plt.show()` function in order to display the graph screen.- With this graph, the `nx.draw()` function will draw to screen what we call a **node-link diagram** rendering of the graph.```pythonimport matplotlib.pyplot as pltnx.draw(G)plt.show()```- The first set of exercises we'll be doing is essentially exploratory data analysis on graphs. Other Resources1. [Wikipedia: Network Theory][3] [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/network_structure_1.JPG [2]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/network_structure_2.JPG [3]: https://en.wikipedia.org/wiki/Network_theory What is a network?Let's think again about examples of networks. Which of the following data is least easily modeled as a network?- ~~Airplane transportation.~~- **Phone numbers in a telephone directory.**- ~~Co-authorship of papers.~~- ~~Atoms in a molecule.~~1. Compared to the other options, it would not be as easy to model phone numbers in a telephone directory as a network. Basics of NetworkX API, using Twitter networkTo get you up and running with the NetworkX API, we will run through some basic functions that let you query a Twitter network that has been pre-loaded for you and is available in the IPython Shell as `T`. The Twitter network comes from [KONECT][1], and shows a snapshot of a subset of Twitter users. It is an anonymized Twitter network with metadata.You're now going to use the NetworkX API to explore some basic properties of the network, and are encouraged to experiment with the data in the IPython Shell.Wait for the IPython shell to indicate that the graph that has been preloaded under the variable name T (representing a Twitter network), and then answer the following question:What is the size of the graph `T`, the type of `T.nodes()`, and the data structure of the third element of the last edge listed in `T.edges(data=True)`? The `len()` and `type()` functions will be useful here. To access the last entry of `T.edges(data=True)`, you can use `list(T.edges(data=True))[-1]`.- **23369, `networkx.classes.reportviews.NodeView`, `dict`.**- ~~32369, `tuple`, `datetime`.~~- ~~23369, `networkx.classes.reportviews.NodeView`, `datetime`.~~- ~~22339, `dict`, `dict`.~~ [1]: http://konect.uni-koblenz.de/ ###Code print(len(T)) print(type(T.nodes())) print(list(T.edges(data=True))[-1]) print(type(list(T.edges(data=True))[-1][2])) ###Output 23369 <class 'networkx.classes.reportviews.NodeView'> (23324, 23336, {'date': datetime.date(2010, 9, 20)}) <class 'dict'> ###Markdown Basic drawing of a network using NetworkXNetworkX provides some basic drawing functionality that works for small graphs. We have selected a subset of nodes from the graph for you to practice using NetworkX's drawing facilities. It has been pre-loaded as `T_sub`.**Instructions**- Import `matplotlib.pyplot` as `plt` and `networkx` as `nx`.- Draw `T_sub` to the screen by using the `nx.draw()` function, and don't forget to also use `plt.show()` to display it. Creating `T_sub`- Use [DiGraph][1] instead of [Graph][2]- [Why don't edges added with G.add_edges_from(), match G.edges()?][3] [1]: https://networkx.github.io/documentation/networkx-1.10/reference/classes.digraph.htmlnetworkx.DiGraph [2]: https://networkx.github.io/documentation/networkx-1.10/reference/classes.graph.htmlnetworkx.Graph [3]: https://stackoverflow.com/questions/62032404/why-dont-edges-added-with-g-add-edges-from-match-g-edges ###Code T_sub = nx.DiGraph() edges_from_T = [x for x in T.edges(list(range(50)), data=True) if x[0] in [1, 16, 18, 19, 28, 36, 37, 39, 42, 43, 45] if x[1] < 50] T_sub.add_edges_from(edges_from_T) plt.figure(figsize=(8, 8)) nx.draw(T_sub, with_labels=True) plt.show() ###Output _____no_output_____ ###Markdown Queries on a graphNow that you know some basic properties of the graph and have practiced using NetworkX's drawing facilities to visualize components of it, it's time to explore how you can query it for nodes and edges. Specifically, you're going to look for "nodes of interest" and "edges of interest". To achieve this, you'll make use of the `.nodes()` and `.edges()` methods that Eric went over in the video. The `.nodes()` method returns a list of nodes, while the `.edges()` method returns a list of tuples, in which each tuple shows the nodes that are present on that edge. Recall that passing in the keyword argument data=True in these methods retrieves the corresponding metadata associated with the nodes and edges as well.You'll write list comprehensions to effectively build these queries in one line. For a refresher on list comprehensions, refer to [Part 2][1] of DataCamp's Python Data Science Toolbox course. Here's the recipe for a list comprehension:`[` *output expression* `for` *iterator variable* `in` *iterable* `if` *predicate expression* `]`.You have to fill in the `_iterable_` and the `_predicate expression_`. Feel free to prototype your answer by exploring the graph in the IPython Shell before submitting your solution.**Instructions**- Use a list comprehension to get a **list of nodes** from the graph `T` that have the `'occupation'` label of `'scientist'`. - The _output expression_ `n` has been specified for you, along with the _iterator variables_ `n` and `d`. Your task is to fill in the _iterable_ and the _conditional expression_. - Use the `.nodes()` method of `T` access its nodes, and be sure to specify `data=True` to obtain the metadata for the nodes. - The iterator variable `d` is a dictionary. The key of interest here is `'occupation'` and value of interest is `'scientist'`.- Use a list comprehension to get a **list of edges** from the graph T that were formed for at least 6 years, i.e., from before **1 Jan 2010**. - Your task once again is to fill in the _iterable_ and _conditional expression_. - Use the `.edges()` method of `T` to access its edges. Be sure to obtain the metadata for the edges as well. - The dates are stored as `datetime.date` objects in the metadata dictionary `d`, under the key `'date'`. To access the date 1 Jan 2009, for example, the dictionary value would be `date(2009, 1, 1)`. [1]: https://www.datacamp.com/courses/python-data-science-toolbox-part-2 ###Code # Use a list comprehension to get the nodes of interest: noi noi = [n for n, d in T.nodes(data=True) if d['occupation'] == 'scientist'] # Use a list comprehension to get the edges of interest: eoi eoi = [(u, v) for u, v, d in T.edges(data=True) if d['date'].year < 2010] print(noi[:10]) print(eoi[:10]) ###Output [5, 9, 13, 15, 17, 19, 20, 22, 23, 27] [(1, 5), (1, 9), (1, 13), (1, 15), (1, 16), (1, 17), (1, 18), (1, 19), (1, 24), (1, 27)] ###Markdown Types of Graphs1. Undirected graphs - Facebook - They are comprised of edges that don't have any inherent directionality associated with them. - With Facebook, for example, when one user befriends another, the two are automatically connected with an edge. - This is commonly drawn as a line with no arrows between two circles. - Undirected graphs have the type `Graph` ```pythonimport networkx as nxG = nx.Graph()type(G)>>> networkx.classes.graph.Graph``` 2. Directed graphs - Twitter - This is because of the nature of how users interact with one another. - For example, one user may follow another, but that other user may not follow back. - As such, there's an inherent directionality associated with the graph - Directed grasphs have the type `DiGraph` ```pythonD = nx.DiGraph()type(D)>>> networkx.classes.digraph.DiGraph```3. Muti-edge (Directed) graphs - Graph in which there are multiple edges permitted between the nodes - For example, we may want to model trips between bike sharing stations - Each trip may be one edge between the pair of stations - Sometimes, for practical reasons, it may be too memory-intensive too model multiple edges per pair of nodes, and so one may choose to collapse the edges into a single edge that contains a metadata summary of the original. - For example, we may want to collapse these three edges into a single one and give them a _weight_ metadata with the value _3_, indicating that it was originally 3 edges between the pair of nodes.![multi-edge][1]```pythonM = nx.MultiGraphtype(M)>>> networkx.classes.multigraph.MultiGraphMD = nx.MultiDiGraphtype(MD)>>> networkx.classes.multidigraph.MultiDiGraph```4. Self-loops - Self-loops can be used in certain scenarios, such as in bike sharing data, where a trip begins at a station and ends at the same station. [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/multi-edge.JPG Checking the un/directed status of a graphIn the video, Eric described to you different types of graphs. Which type of graph do you think the Twitter network data you have been working with corresponds to? Use Python's built-in `type()` function in the IPython Shell to find out. The network, as before, has been pre-loaded as `T`.Of the four below choices below, which one corresponds to the type of graph that `T` is?- ~~Undirected Graph.~~- **Directed Graph.**- ~~Undirected Multi-Edge Graph.~~- ~~Directed Multi-Edge Graph.~~ Specifying a weight on edgesWeights can be added to edges in a graph, typically indicating the "strength" of an edge. In NetworkX, the weight is indicated by the `'weight'` key in the metadata dictionary.Before attempting the exercise, use the IPython Shell to access the dictionary metadata of `T` and explore it, for instance by running the commands `T.edges[1, 10]` and then `T.edges[10, 1]`. Note how there's only one field, and now you're going to add another field, called `'weight'`.**Instructions**- Set the `'weight'` attribute of the edge between node `1` and `10` of `T` to be equal to `2`. Refer to the following template to set an attribute of an edge: `network_name.edges[node1, node2]['attribute'] = value`. Here, the `'attribute'` is `'weight'`.- Set the weight of every edge involving node `293` to be equal to `1.1`. To do this: - Using a `for` loop, iterate over all the edges of `T`, including the `metadata`. - If `293` is involved in the list of nodes `[u, v]`: - Set the weight of the edge between `u` and `v` to be `1.1`. ###Code # Set the weight of the edge T.edges[1, 10]['weight'] = 2 # Iterate over all the edges (with metadata) for u, v, d in T.edges(data=True): # Check if node 293 is involved if 293 in [u, v]: # Set the weight to 1.1 T.edges[u, v]['weight'] = 1.1 ###Output _____no_output_____ ###Markdown Checking whether there are self-loops in the graphAs Eric discussed, NetworkX also allows edges that begin and end on the same node; while this would be non-intuitive for a social network graph, it is useful to model data such as trip networks, in which individuals begin at one location and end in another.It is useful to check for this before proceeding with further analyses, and NetworkX graphs provide a method for this purpose: `.number_of_selfloops().`In this exercise as well as later ones, you'll find the `assert` statement useful. An `assert`-ions checks whether the statement placed after it evaluates to True, otherwise it will throw an `AssertionError`.To begin, use the `.number_of_selfloops()` method on `T` in the IPython Shell to get the number of edges that begin and end on the same node. A number of self-loops have been synthetically added to the graph. Your job in this exercise is to write a function that returns these edges.**Instructions**- Define a function called `find_selfloop_nodes()` which takes one argument: `G`. - Using a `for` loop, iterate over all the edges in `G` (excluding the metadata). - If node `u` is equal to node `v`: - Append `u` to the list `nodes_in_selfloops`. - Return the list `nodes_in_selfloops`.- Check that the number of self loops in the graph equals the number of nodes in self loops. This has been done for you, so hit 'Submit Answer' to see the result! ###Code # Define find_selfloop_nodes() def find_selfloop_nodes(G): """ Finds all nodes that have self-loops in the graph G. """ nodes_in_selfloops = [] # Iterate over all the edges of G for u, v in G.edges(): # Check if node u and node v are the same if u == v: # Append node u to nodes_in_selfloops nodes_in_selfloops.append(u) return nodes_in_selfloops # Check whether number of self loops equals the number of nodes in self loops # number_of_selfloops() is deprecated assert len(list(nx.selfloop_edges(T))) == len(find_selfloop_nodes(T)) len(list(nx.selfloop_edges(T))) ###Output _____no_output_____ ###Markdown **There are 42 nodes in T that have self-loops.** Network Visualization1. Matrix plots - ![matrix_plots][1] - Nodes are the rows and columns of a matrix, and cells are filled in according to whether an edge exists between the pairs of nodes. - In an undirected graph, the matrix is symmetrical around the diagonal, which is highlighted in gray. - Figure 3: edge (A, B) is equivalent to edge (B, A). Highlighted in yellow. - Figure 4: edge (A, C) is equivalent to edge (C, A), because there's no directionality associated with that edge. - With directed graphs, the matrix representation is not necessarily going to be symmetrical. - Figure 5: there's a bidirectional edge between A and C, but only an edge from A to B and not B to A. - (A, B) will be filled in, but not (B, A) - If the nodes are ordered along the rows and columns, such that neighbors are listed close to one another, then a matrix plot can be used to visualized clusters, or communities, of nodes.1. Arc plots - ![arc_plots][2] - Are a transformation of the node-link diagram layout, in which nodes are ordered along one axis of the plot, and edges are drawn using circular arcs from one node to another. - If the nodes are ordered according to some sortable rule, such as age in a social network of users, or otherwise grouped together, by geographic location in map for a transportation network, then it will be possible to visualized the relationship between connectivity and the sorted (or grouped) property. - Arc plots are a good starting point for visualizing a network, as it forms the basis of the later plots that we'll take a look at.1. Circos plots - ![circo_plot][3] - Are a transformation of the Arc Plot, such that two ends of the Arc Plot are joined together into a circle. - Were originally designed for use in genomics, and you can think of them as an aesthetic and compact alternative to Arc Plots.- You will be using [nxviz][4] to plot the graphs.```pythonimport nxviz as nvimport matplotlib.pyplot as pltap = nv.ArcPlot(G)ap.draw()plt.show()``` [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/matrix_plots.JPG [2]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/arc_plots.JPG [3]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/circo_plots.JPG [4]: https://github.com/ericmjl/nxviz Visualizing using Matrix plotsIt is time to try your first "fancy" graph visualization method: a matrix plot. To do this, `nxviz` provides a `MatrixPlot` object.`nxviz` is a package for visualizing graphs in a rational fashion. Under the hood, the `MatrixPlo`t utilizes `nx.to_numpy_matrix(G)`, which returns the matrix form of the graph. Here, each node is one column and one row, and an edge between the two nodes is indicated by the value 1. In doing so, however, only the `weight` metadata is preserved; all other metadata is lost, as you'll verify using an `assert` statement.A corresponding `nx.from_numpy_matrix(A)` allows one to quickly create a graph from a NumPy matrix. The default graph type is `Graph()`; if you want to make it a `DiGraph()`, that has to be specified using the `create_using` keyword argument, e.g. (`nx.from_numpy_matrix(A, create_using=nx.DiGraph`)).One final note, `matplotlib.pyplot` and `networkx` have already been imported as `plt` and `nx`, respectively, and the graph `T` has been pre-loaded. For simplicity and speed, we have sub-sampled only 100 edges from the network.**Instructions**- Import `nxviz` as `nv`.- Plot the graph `T` as a matrix plot. To do this: - Create the `MatrixPlot` object called `m` using the `nv.MatrixPlot()` function with `T` passed in as an argument. - Draw the `m` to the screen using the `.draw()` method. - Display the plot using `plt.show()`.- Convert the graph to a matrix format, and then convert the graph back to the NetworkX form from the matrix as a directed graph.- Check that the `category` metadata field is lost from each node. ###Code data = [(13829, 1386, {'date': date(2012, 3, 13)}), (3086, 669, {'date': date(2009, 3, 1)}), (3086, 3100, {'date': date(2013, 8, 10)}), (3086, 3117, {'date': date(2007, 12, 20)}), (4111, 4133, {'date': date(2007, 10, 28)}), (4111, 12582, {'date': date(2013, 6, 1)}), (6163, 7331, {'date': date(2013, 2, 3)}), (6163, 7336, {'date': date(2007, 9, 12)}), (6163, 6168, {'date': date(2008, 4, 2)}), (6163, 1326, {'date': date(2009, 2, 16)}), (4636, 4666, {'date': date(2012, 3, 20)}), (543, 12123, {'date': date(2014, 8, 12)}), (36, 24, {'date': date(2009, 4, 23)}), (12836, 12856, {'date': date(2014, 2, 21)}), (10790, 10854, {'date': date(2009, 8, 11)}), (9262, 9338, {'date': date(2007, 11, 11)}), (4160, 4161, {'date': date(2012, 4, 14)}), (4160, 3681, {'date': date(2013, 8, 10)}), (7746, 3396, {'date': date(2007, 2, 23)}), (7746, 543, {'date': date(2011, 7, 14)}), (7746, 5073, {'date': date(2012, 2, 25)}), (7746, 1130, {'date': date(2013, 8, 4)}), (7746, 3265, {'date': date(2008, 11, 8)}), (19013, 21248, {'date': date(2009, 3, 21)}), (19013, 21253, {'date': date(2007, 8, 17)}), (14916, 14930, {'date': date(2012, 4, 10)}), (5717, 5752, {'date': date(2009, 2, 2)}), (3159, 3176, {'date': date(2011, 8, 15)}), (3159, 3177, {'date': date(2008, 2, 1)}), (3159, 3193, {'date': date(2014, 1, 2)}), (1627, 1648, {'date': date(2011, 11, 7)}), (19550, 19570, {'date': date(2009, 11, 13)}), (5215, 5217, {'date': date(2011, 1, 7)}), (4198, 16203, {'date': date(2014, 12, 1)}), (3177, 3159, {'date': date(2010, 6, 2)}), (3177, 3176, {'date': date(2014, 11, 10)}), (1134, 2406, {'date': date(2011, 7, 10)}), (1134, 543, {'date': date(2011, 10, 20)}), (4206, 21962, {'date': date(2014, 4, 17)}), (6770, 2671, {'date': date(2010, 5, 18)}), (11384, 4955, {'date': date(2007, 2, 12)}), (11384, 2406, {'date': date(2007, 10, 13)}), (11384, 11402, {'date': date(2009, 11, 27)}), (10877, 10917, {'date': date(2014, 7, 17)}), (10877, 7886, {'date': date(2009, 1, 4)}), (10877, 36, {'date': date(2008, 10, 25)}), (10877, 10936, {'date': date(2008, 12, 26)}), (10877, 1326, {'date': date(2012, 1, 19)}), (10373, 1386, {'date': date(2011, 4, 21)}), (10373, 2501, {'date': date(2012, 8, 13)}), (3213, 3214, {'date': date(2008, 4, 19)}), (3213, 3265, {'date': date(2007, 8, 8)}), (3213, 3266, {'date': date(2009, 1, 4)}), (5626, 16654, {'date': date(2011, 3, 5)}), (20122, 20177, {'date': date(2014, 12, 21)}), (667, 669, {'date': date(2014, 2, 3)}), (667, 747, {'date': date(2008, 2, 23)}), (161, 18159, {'date': date(2014, 9, 22)}), (7331, 6163, {'date': date(2012, 10, 15)}), (7331, 6168, {'date': date(2012, 3, 17)}), (7331, 7606, {'date': date(2008, 11, 7)}), (7331, 24, {'date': date(2009, 9, 21)}), (7331, 36, {'date': date(2010, 5, 26)}), (10917, 10877, {'date': date(2011, 9, 7)}), (10917, 24, {'date': date(2014, 10, 16)}), (10917, 7886, {'date': date(2011, 7, 13)}), (10917, 36, {'date': date(2013, 8, 8)}), (10917, 1326, {'date': date(2009, 5, 23)}), (22184, 22188, {'date': date(2012, 11, 3)}), (6318, 6301, {'date': date(2011, 4, 3)}), (8367, 8421, {'date': date(2012, 7, 10)}), (21167, 3931, {'date': date(2012, 7, 9)}), (3265, 4955, {'date': date(2012, 6, 6)}), (3265, 4811, {'date': date(2014, 9, 18)}), (3265, 3266, {'date': date(2008, 9, 24)}), (3266, 4811, {'date': date(2007, 2, 16)}), (3266, 1926, {'date': date(2008, 3, 16)}), (3266, 3265, {'date': date(2014, 5, 25)}), (4291, 4306, {'date': date(2014, 9, 9)}), (4291, 4310, {'date': date(2007, 9, 11)}), (4811, 4955, {'date': date(2007, 4, 19)}), (4811, 3265, {'date': date(2013, 8, 23)}), (4811, 3266, {'date': date(2008, 8, 25)}), (7886, 10877, {'date': date(2012, 9, 5)}), (7886, 10917, {'date': date(2008, 9, 19)}), (7886, 24, {'date': date(2014, 8, 17)}), (7886, 36, {'date': date(2012, 9, 21)}), (7886, 1326, {'date': date(2012, 3, 23)}), (6863, 14981, {'date': date(2012, 8, 7)}), (13012, 13017, {'date': date(2012, 3, 4)}), (16597, 24, {'date': date(2012, 5, 2)}), (16597, 36, {'date': date(2011, 4, 19)}), (16597, 16610, {'date': date(2011, 11, 11)}), (12507, 12523, {'date': date(2012, 3, 11)}), (16618, 16619, {'date': date(2013, 3, 3)}), (13036, 15161, {'date': date(2012, 4, 23)}), (3826, 669, {'date': date(2014, 11, 9)}), (3826, 3100, {'date': date(2009, 2, 24)}), (3826, 3860, {'date': date(2013, 7, 27)}), (3826, 3681, {'date': date(2013, 2, 14)}), (7410, 7437, {'date': date(2012, 5, 2)}), (15092, 2148, {'date': date(2014, 11, 11)}), (15092, 20816, {'date': date(2009, 4, 8)}), (2820, 2826, {'date': date(2011, 10, 1)}), (2820, 2871, {'date': date(2010, 3, 26)}), (2311, 2371, {'date': date(2008, 8, 26)}), (11020, 11042, {'date': date(2008, 5, 12)}), (13070, 13079, {'date': date(2009, 10, 15)}), (20755, 24, {'date': date(2010, 11, 26)}), (20755, 1326, {'date': date(2014, 5, 2)}), (20756, 669, {'date': date(2012, 9, 7)}), (20756, 3100, {'date': date(2013, 3, 17)}), (20756, 3681, {'date': date(2010, 12, 9)}), (14613, 14617, {'date': date(2009, 6, 2)}), (10004, 10030, {'date': date(2008, 3, 1)}), (10004, 10111, {'date': date(2011, 7, 15)}), (4892, 1130, {'date': date(2011, 12, 10)}), (4892, 4947, {'date': date(2012, 3, 6)}), (2871, 15372, {'date': date(2014, 4, 27)}), (6968, 669, {'date': date(2010, 2, 6)}), (6968, 1386, {'date': date(2012, 9, 20)}), (6968, 4133, {'date': date(2012, 9, 22)}), (3385, 3396, {'date': date(2008, 3, 4)}), (8001, 23190, {'date': date(2008, 6, 26)}), (19787, 13581, {'date': date(2012, 4, 3)}), (21325, 21371, {'date': date(2009, 8, 2)}), (16720, 16734, {'date': date(2007, 10, 26)}), (16720, 16739, {'date': date(2008, 10, 24)}), (12119, 24, {'date': date(2013, 6, 1)}), (12119, 36, {'date': date(2007, 5, 1)}), (22872, 22918, {'date': date(2014, 7, 15)}), (7519, 16041, {'date': date(2012, 7, 8)}), (12640, 18082, {'date': date(2007, 4, 20)}), (12640, 18086, {'date': date(2008, 7, 8)}), (5987, 4957, {'date': date(2010, 12, 4)}), (5987, 492, {'date': date(2009, 6, 25)}), (19316, 19344, {'date': date(2013, 3, 15)}), (19316, 2501, {'date': date(2011, 11, 15)}), (13690, 4955, {'date': date(2014, 4, 5)}), (12162, 15560, {'date': date(2011, 1, 4)}), (2443, 3564, {'date': date(2014, 1, 9)}), (401, 492, {'date': date(2010, 11, 25)}), (401, 502, {'date': date(2013, 6, 27)}), (9106, 9107, {'date': date(2012, 5, 4)}), (8598, 8616, {'date': date(2011, 2, 6)}), (21919, 4161, {'date': date(2007, 7, 18)}), (21919, 669, {'date': date(2011, 12, 12)}), (21919, 3100, {'date': date(2012, 10, 15)}), (21919, 3681, {'date': date(2011, 1, 14)}), (10143, 4306, {'date': date(2009, 5, 26)}), (4517, 6414, {'date': date(2008, 7, 15)}), (20906, 2406, {'date': date(2012, 9, 5)}), (20906, 492, {'date': date(2013, 8, 12)}), (6059, 20368, {'date': date(2008, 11, 20)}), (2501, 1386, {'date': date(2009, 5, 1)}), (2501, 10373, {'date': date(2012, 7, 15)}), (13257, 13286, {'date': date(2008, 11, 4)}), (1484, 1492, {'date': date(2008, 6, 5)}), (1484, 1499, {'date': date(2013, 8, 9)}), (1996, 6038, {'date': date(2012, 7, 28)}), (20947, 669, {'date': date(2011, 7, 19)}), (20947, 3100, {'date': date(2010, 11, 23)}), (20947, 3681, {'date': date(2008, 6, 20)}), (5590, 5601, {'date': date(2012, 8, 17)}), (5590, 5626, {'date': date(2013, 8, 18)}), (18917, 19012, {'date': date(2013, 12, 15)}), (18917, 19013, {'date': date(2014, 1, 17)}), (10730, 24, {'date': date(2010, 1, 21)}), (10730, 36, {'date': date(2014, 1, 11)}), (10730, 1326, {'date': date(2008, 11, 4)}), (23024, 669, {'date': date(2010, 6, 15)}), (23024, 3100, {'date': date(2012, 6, 1)}), (23024, 23091, {'date': date(2011, 5, 20)}), (5109, 5112, {'date': date(2007, 5, 10)}), (13306, 13334, {'date': date(2014, 5, 18)})] attr = {13829: {'category': 'D', 'occupation': 'celebrity'}, 15372: {'category': 'D', 'occupation': 'celebrity'}, 3086: {'category': 'P', 'occupation': 'scientist'}, 4111: {'category': 'D', 'occupation': 'scientist'}, 6163: {'category': 'P', 'occupation': 'scientist'}, 13334: {'category': 'P', 'occupation': 'scientist'}, 6168: {'category': 'P', 'occupation': 'celebrity'}, 24: {'category': 'P', 'occupation': 'politician'}, 3100: {'category': 'D', 'occupation': 'scientist'}, 4636: {'category': 'D', 'occupation': 'celebrity'}, 543: {'category': 'D', 'occupation': 'politician'}, 36: {'category': 'D', 'occupation': 'scientist'}, 12836: {'category': 'I', 'occupation': 'celebrity'}, 4133: {'category': 'D', 'occupation': 'scientist'}, 10790: {'category': 'D', 'occupation': 'scientist'}, 3117: {'category': 'P', 'occupation': 'politician'}, 9262: {'category': 'I', 'occupation': 'scientist'}, 23091: {'category': 'P', 'occupation': 'celebrity'}, 12856: {'category': 'D', 'occupation': 'politician'}, 4666: {'category': 'P', 'occupation': 'politician'}, 4160: {'category': 'P', 'occupation': 'politician'}, 4161: {'category': 'I', 'occupation': 'politician'}, 7746: {'category': 'P', 'occupation': 'scientist'}, 19012: {'category': 'D', 'occupation': 'politician'}, 19013: {'category': 'I', 'occupation': 'politician'}, 14916: {'category': 'I', 'occupation': 'politician'}, 14930: {'category': 'P', 'occupation': 'celebrity'}, 5717: {'category': 'P', 'occupation': 'politician'}, 3159: {'category': 'I', 'occupation': 'scientist'}, 1627: {'category': 'I', 'occupation': 'scientist'}, 19550: {'category': 'D', 'occupation': 'politician'}, 5215: {'category': 'P', 'occupation': 'politician'}, 5217: {'category': 'D', 'occupation': 'scientist'}, 3681: {'category': 'I', 'occupation': 'politician'}, 2148: {'category': 'D', 'occupation': 'politician'}, 4198: {'category': 'D', 'occupation': 'celebrity'}, 10854: {'category': 'D', 'occupation': 'politician'}, 3176: {'category': 'D', 'occupation': 'scientist'}, 3177: {'category': 'I', 'occupation': 'scientist'}, 1130: {'category': 'I', 'occupation': 'celebrity'}, 1134: {'category': 'P', 'occupation': 'scientist'}, 4206: {'category': 'I', 'occupation': 'scientist'}, 2671: {'category': 'P', 'occupation': 'scientist'}, 1648: {'category': 'P', 'occupation': 'politician'}, 19570: {'category': 'P', 'occupation': 'scientist'}, 6770: {'category': 'D', 'occupation': 'celebrity'}, 5752: {'category': 'D', 'occupation': 'scientist'}, 11384: {'category': 'D', 'occupation': 'politician'}, 3193: {'category': 'P', 'occupation': 'politician'}, 9338: {'category': 'I', 'occupation': 'politician'}, 10877: {'category': 'P', 'occupation': 'celebrity'}, 10373: {'category': 'I', 'occupation': 'politician'}, 14981: {'category': 'P', 'occupation': 'celebrity'}, 11402: {'category': 'I', 'occupation': 'politician'}, 3213: {'category': 'D', 'occupation': 'politician'}, 3214: {'category': 'P', 'occupation': 'celebrity'}, 5626: {'category': 'P', 'occupation': 'politician'}, 23190: {'category': 'P', 'occupation': 'celebrity'}, 20122: {'category': 'D', 'occupation': 'politician'}, 667: {'category': 'P', 'occupation': 'politician'}, 6301: {'category': 'I', 'occupation': 'scientist'}, 669: {'category': 'I', 'occupation': 'celebrity'}, 161: {'category': 'D', 'occupation': 'politician'}, 18082: {'category': 'I', 'occupation': 'celebrity'}, 7331: {'category': 'I', 'occupation': 'celebrity'}, 10917: {'category': 'P', 'occupation': 'scientist'}, 18086: {'category': 'D', 'occupation': 'scientist'}, 22184: {'category': 'P', 'occupation': 'scientist'}, 7336: {'category': 'D', 'occupation': 'scientist'}, 16041: {'category': 'P', 'occupation': 'scientist'}, 22188: {'category': 'I', 'occupation': 'scientist'}, 6318: {'category': 'P', 'occupation': 'politician'}, 8367: {'category': 'I', 'occupation': 'politician'}, 21167: {'category': 'D', 'occupation': 'celebrity'}, 10936: {'category': 'P', 'occupation': 'scientist'}, 3265: {'category': 'I', 'occupation': 'celebrity'}, 3266: {'category': 'P', 'occupation': 'politician'}, 4291: {'category': 'P', 'occupation': 'politician'}, 15560: {'category': 'P', 'occupation': 'celebrity'}, 4811: {'category': 'P', 'occupation': 'politician'}, 7886: {'category': 'P', 'occupation': 'politician'}, 6863: {'category': 'I', 'occupation': 'scientist'}, 20177: {'category': 'P', 'occupation': 'scientist'}, 4306: {'category': 'P', 'occupation': 'scientist'}, 13012: {'category': 'D', 'occupation': 'celebrity'}, 16597: {'category': 'P', 'occupation': 'celebrity'}, 4310: {'category': 'D', 'occupation': 'scientist'}, 13017: {'category': 'I', 'occupation': 'politician'}, 12507: {'category': 'I', 'occupation': 'politician'}, 16610: {'category': 'I', 'occupation': 'celebrity'}, 8421: {'category': 'I', 'occupation': 'politician'}, 16618: {'category': 'D', 'occupation': 'politician'}, 16619: {'category': 'I', 'occupation': 'celebrity'}, 13036: {'category': 'D', 'occupation': 'politician'}, 12523: {'category': 'P', 'occupation': 'politician'}, 747: {'category': 'D', 'occupation': 'politician'}, 18159: {'category': 'P', 'occupation': 'celebrity'}, 3826: {'category': 'P', 'occupation': 'politician'}, 7410: {'category': 'D', 'occupation': 'scientist'}, 15092: {'category': 'I', 'occupation': 'scientist'}, 21248: {'category': 'I', 'occupation': 'politician'}, 2820: {'category': 'I', 'occupation': 'celebrity'}, 21253: {'category': 'P', 'occupation': 'celebrity'}, 2311: {'category': 'I', 'occupation': 'politician'}, 2826: {'category': 'D', 'occupation': 'celebrity'}, 11020: {'category': 'D', 'occupation': 'politician'}, 7437: {'category': 'P', 'occupation': 'celebrity'}, 16654: {'category': 'I', 'occupation': 'scientist'}, 6414: {'category': 'D', 'occupation': 'scientist'}, 13070: {'category': 'I', 'occupation': 'politician'}, 13581: {'category': 'D', 'occupation': 'politician'}, 20755: {'category': 'D', 'occupation': 'celebrity'}, 20756: {'category': 'I', 'occupation': 'scientist'}, 14613: {'category': 'I', 'occupation': 'celebrity'}, 10004: {'category': 'I', 'occupation': 'politician'}, 3860: {'category': 'D', 'occupation': 'celebrity'}, 13079: {'category': 'D', 'occupation': 'celebrity'}, 14617: {'category': 'P', 'occupation': 'celebrity'}, 4892: {'category': 'P', 'occupation': 'celebrity'}, 11042: {'category': 'I', 'occupation': 'celebrity'}, 12582: {'category': 'P', 'occupation': 'politician'}, 1326: {'category': 'D', 'occupation': 'scientist'}, 10030: {'category': 'I', 'occupation': 'celebrity'}, 2871: {'category': 'P', 'occupation': 'scientist'}, 6968: {'category': 'P', 'occupation': 'celebrity'}, 3385: {'category': 'D', 'occupation': 'scientist'}, 15161: {'category': 'D', 'occupation': 'scientist'}, 8001: {'category': 'P', 'occupation': 'celebrity'}, 2371: {'category': 'D', 'occupation': 'scientist'}, 3396: {'category': 'P', 'occupation': 'scientist'}, 16203: {'category': 'D', 'occupation': 'politician'}, 19787: {'category': 'D', 'occupation': 'politician'}, 21325: {'category': 'P', 'occupation': 'scientist'}, 20816: {'category': 'I', 'occupation': 'scientist'}, 16720: {'category': 'I', 'occupation': 'scientist'}, 4947: {'category': 'I', 'occupation': 'celebrity'}, 12119: {'category': 'D', 'occupation': 'politician'}, 22872: {'category': 'I', 'occupation': 'politician'}, 12123: {'category': 'D', 'occupation': 'celebrity'}, 4955: {'category': 'P', 'occupation': 'celebrity'}, 4957: {'category': 'D', 'occupation': 'celebrity'}, 16734: {'category': 'D', 'occupation': 'politician'}, 7519: {'category': 'I', 'occupation': 'politician'}, 12640: {'category': 'P', 'occupation': 'celebrity'}, 3931: {'category': 'I', 'occupation': 'celebrity'}, 5987: {'category': 'D', 'occupation': 'scientist'}, 16739: {'category': 'I', 'occupation': 'celebrity'}, 2406: {'category': 'I', 'occupation': 'scientist'}, 1386: {'category': 'I', 'occupation': 'politician'}, 19316: {'category': 'I', 'occupation': 'politician'}, 13690: {'category': 'I', 'occupation': 'celebrity'}, 21371: {'category': 'P', 'occupation': 'scientist'}, 10111: {'category': 'I', 'occupation': 'celebrity'}, 12162: {'category': 'I', 'occupation': 'celebrity'}, 22918: {'category': 'I', 'occupation': 'celebrity'}, 1926: {'category': 'D', 'occupation': 'celebrity'}, 2443: {'category': 'I', 'occupation': 'scientist'}, 20368: {'category': 'I', 'occupation': 'politician'}, 19344: {'category': 'D', 'occupation': 'politician'}, 401: {'category': 'P', 'occupation': 'celebrity'}, 9106: {'category': 'P', 'occupation': 'scientist'}, 9107: {'category': 'D', 'occupation': 'celebrity'}, 6038: {'category': 'I', 'occupation': 'celebrity'}, 8598: {'category': 'P', 'occupation': 'politician'}, 21919: {'category': 'I', 'occupation': 'celebrity'}, 10143: {'category': 'P', 'occupation': 'celebrity'}, 4517: {'category': 'D', 'occupation': 'celebrity'}, 8616: {'category': 'D', 'occupation': 'celebrity'}, 20906: {'category': 'D', 'occupation': 'celebrity'}, 6059: {'category': 'D', 'occupation': 'scientist'}, 7606: {'category': 'P', 'occupation': 'scientist'}, 2501: {'category': 'P', 'occupation': 'scientist'}, 13257: {'category': 'D', 'occupation': 'celebrity'}, 21962: {'category': 'D', 'occupation': 'politician'}, 1484: {'category': 'I', 'occupation': 'politician'}, 1996: {'category': 'D', 'occupation': 'celebrity'}, 5073: {'category': 'P', 'occupation': 'celebrity'}, 20947: {'category': 'I', 'occupation': 'celebrity'}, 1492: {'category': 'D', 'occupation': 'celebrity'}, 5590: {'category': 'I', 'occupation': 'politician'}, 1499: {'category': 'P', 'occupation': 'celebrity'}, 5601: {'category': 'P', 'occupation': 'scientist'}, 18917: {'category': 'D', 'occupation': 'celebrity'}, 13286: {'category': 'I', 'occupation': 'scientist'}, 10730: {'category': 'D', 'occupation': 'celebrity'}, 492: {'category': 'I', 'occupation': 'politician'}, 3564: {'category': 'P', 'occupation': 'scientist'}, 23024: {'category': 'P', 'occupation': 'politician'}, 5109: {'category': 'I', 'occupation': 'politician'}, 502: {'category': 'P', 'occupation': 'scientist'}, 5112: {'category': 'I', 'occupation': 'celebrity'}, 13306: {'category': 'D', 'occupation': 'scientist'}} t_131 = nx.DiGraph() t_131.add_nodes_from(attr) t_131.add_edges_from(data) nx.set_node_attributes(t_131, attr) # Create the MatrixPlot object: m m = nv.MatrixPlot(t_131) # Draw m to the screen m.draw() # Display the plot plt.show() # Convert T to a matrix format: A A = nx.to_numpy_matrix(t_131) # Convert A back to the NetworkX form as a directed graph: T_conv T_conv = nx.from_numpy_matrix(A, create_using=nx.DiGraph()) # Check that the `category` metadata field is lost from each node for n, d in T_conv.nodes(data=True): assert 'category' not in d.keys() ###Output _____no_output_____ ###Markdown Visualizing using Circos plotsCircos plots are a rational, non-cluttered way of visualizing graph data, in which nodes are ordered around the circumference in some fashion, and the edges are drawn within the circle that results, giving a beautiful as well as informative visualization about the structure of the network.In this exercise, you'll continue getting practice with the nxviz API, this time with the `CircosPlot` object. `matplotlib.pyplot` has been imported for you as `plt`.**Instructions**- Import `CircosPlot` from `nxviz`.- Plot the Twitter network `T` as a Circos plot without any styling. Use the `CircosPlot()` function to do this. Don't forget to draw it to the screen using `.draw()` and then display it using `plt.show()`. ###Code # Create the CircosPlot object: c c = nv.CircosPlot(t_131, figsize=(8, 8)) # Draw c to the screen c.draw() # Display the plot plt.show() ###Output _____no_output_____ ###Markdown Visualizing using Arc plotsFollowing on what you've learned about the nxviz API, now try making an ArcPlot of the network. Two keyword arguments that you will try here are `node_order='keyX'` and `node_color='keyX'`, in which you specify a key in the node metadata dictionary to color and order the nodes by.`matplotlib.pyplot` has been imported for you as `plt`.**Instructions**- Import `ArcPlot` from `nxviz`.- Create an un-customized ArcPlot of `T`. To do this, use the `ArcPlot()` function with just `T` as the argument.- Create another ArcPlot of `T` in which the nodes are ordered and colored by the `'category'` keyword. You'll have to specify the `node_order` and `node_color` parameters to do this. For both plots, be sure to draw them to the screen and display them with `plt.show()`. ###Code # Create the un-customized ArcPlot object: a a = nv.ArcPlot(t_131, figsize=(20, 20)) # Draw a to the screen a.draw() # Display the plot plt.show() # Create the customized ArcPlot object: a2 a2 = nv.ArcPlot(t_131, node_order='category', node_color='category', figsize=(20, 20)) # Draw a2 to the screen a2.draw() # Display the plot plt.show() ###Output _____no_output_____ ###Markdown **Notice the node coloring in the customized ArcPlot compared to the uncustomized version. In the customized ArcPlot, the nodes in each of the categories - `'I'`, `'D'`, and `'P'` - have their own color. If it's difficult to see on your screen, you can expand the plot into a new window by clicking on the pop-out icon on the top-left next to 'Plots'.** Important nodesYou'll learn about ways to identify nodes that are important in a network. In doing so, you'll be introduced to more advanced concepts in network analysis as well as the basics of path-finding algorithms. The chapter concludes with a deep dive into the Twitter network dataset which will reinforce the concepts you've learned, such as degree centrality and betweenness centrality. Degree Centrality- How to determine which nodes are important- Which center node might be more important? - ![star_graph][1] - The center node of the left graph is more important because it is connected to more nodes. - Being connected to other nodes means other nodes are considered a neighbor of that node.- From the concept of neighbors, we can now introduce the concept of _degree centrality_1. Degree centrality - This is one of many metrics we can use to evaluate the importance of a node, and is simply defined as the number of neighbors that a node has divided by the total number of neighbors that the node could possible have: $\frac{\text{Number of Neighbors I Have}}{\text{Number of Neighbors I could Possibly Have}}$ - There are two scenarios possible here: - if self-loops are allowed, such as in a network mapping of all bike trips in a bike sharing system, then the number of neighbors that I could possibly have, is every single node in the graph, including myself. - if self-loops are not allowed, such as in the Twitter social network, where, by definition, my account cannot follow itself, the the number of neighbors I could possibly have is every other node in the graph, excluding myself. - In real life, example of nodes in a graph that have high degree centrality might be: - Twitter broadcasters, that is, users that are followed by many other users - Airport transportation hubs, such as New York, London or Tokyo - Disease super-spreaders, who are the individuals that epidemiologists would want to track down to help stop the spread of a disease1. Betweenness centrality Number of Neighbors```pythonG.edges()>>> [(1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9)]G.neighbors(1)>>> [2, 3, 4, 5, 6, 7, 8, 9]G.neighbors(8)>>> [1]G.neighbors(10)>>> NetworkXError: The node 10 is not in the graph.``` Degree Centrality- self-loops are not considered```pythonnx.degree_centrality(G)>>> {1: 1.0, 2: 0.125, 3: 0.125, 4: 0.125, 5: 0.125, 6: 0.125, 7: 0.125, 8: 0.125, 9: 0.125}``` [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/star_graphs.JPG Compute number of neighbors for each nodeHow do you evaluate whether a node is an important one or not? There are a few ways to do so, and here, you're going to look at one metric: the number of neighbors that a node has.Every NetworkX graph `G` exposes a `.neighbors(n)` method that returns a list of nodes that are the neighbors of the node `n`. To begin, use this method in the IPython Shell on the Twitter network `T` to get the neighbors of of node `1`. This will get you familiar with how the function works. Then, your job in this exercise is to write a function that returns all nodes that have `m` neighbors.**Instructions**- Write a function called `nodes_with_m_nbrs()` that has two parameters - `G` and `m` - and returns all nodes that have `m` neighbors. To do this: - Iterate over all nodes in `G` (**not** including the metadata). - Use the `len()` and `list()` functions together with the `.neighbors()` method to calculate the total number of neighbors that node `n` in graph `G` has. - If the number of neighbors of node `n` is equal to `m`, add `n` to the set `nodes` using the `.add()` method. - After iterating over all the nodes in `G`, return the set `nodes`.- Use your `nodes_with_m_nbrs()` function to retrieve all the nodes that have 6 neighbors in the graph `T`. ###Code # Define nodes_with_m_nbrs() def nodes_with_m_nbrs(G, m): """ Returns all nodes in graph G that have m neighbors. """ nodes = set() # Iterate over all nodes in G for n in G.nodes(): # Check if the number of neighbors of n matches m if len(list(G.neighbors(n))) == m: # Add the node n to the set nodes.add(n) # Return the nodes with m neighbors return nodes # Compute and print all nodes in T that have 6 neighbors six_nbrs = nodes_with_m_nbrs(T, 6) print(six_nbrs) ###Output {22533, 1803, 11276, 11279, 6161, 4261, 10149, 3880, 16681, 5420, 14898, 64, 14539, 6862, 20430, 9689, 475, 1374, 6112, 9186, 17762, 14956, 2927, 11764, 4725} ###Markdown **The number of neighbors a node has is one way to identify important nodes. It looks like 25 nodes in graph `T` have 6 neighbors.** Compute degree distributionThe number of neighbors that a node has is called its "degree", and it's possible to compute the degree distribution across the entire graph. In this exercise, your job is to compute the degree distribution across `T`.**Instructions**- Use a list comprehension along with the `.neighbors(n)` method to get the degree of every node. The result should be a list of integers. - Use `n` as your _iterator variable_. - The _output expression_ of your list comprehension should be the number of neighbors that node `n` has - that is, its degree. Use the `len()` and `list()` functions together with the `.neighbors()` method to compute this. - The _iterable_ in your list comprehension is all the nodes in `T`, accessed using the `.nodes()` method.- Print the degrees. ###Code degrees = [len(list(T.neighbors(n))) for n in T.nodes()] degrees[:20] ###Output _____no_output_____ ###Markdown Degree centrality distributionThe degree of a node is the number of neighbors that it has. The degree centrality is the number of neighbors divided by all possible neighbors that it could have. Depending on whether self-loops are allowed, the set of possible neighbors a node could have could also include the node itself.The `nx.degree_centrality(G)` function returns a dictionary, where the keys are the nodes and the values are their degree centrality values.The degree distribution `degrees` you computed in the previous exercise using the list comprehension has been pre-loaded.**Instructions**- Compute the degree centrality of the Twitter network `T`.- Using `plt.hist()`, plot a histogram of the degree centrality distribution of `T`. This can be accessed using `list(deg_cent.values())`.- Plot a histogram of the degree distribution `degrees` of `T`. This is the same list you computed in the last exercise.- Create a scatter plot with `degrees` on the x-axis and the degree centrality distribution `list(deg_cent.values())` on the y-axis. ###Code # Compute the degree centrality of the Twitter network: deg_cent deg_cent = nx.degree_centrality(T) # Plot a histogram of the degree centrality distribution of the graph. plt.figure() plt.hist(list(deg_cent.values())) plt.show() # Plot a histogram of the degree distribution of the graph plt.figure() plt.hist(degrees) plt.show() # Plot a scatter plot of the centrality distribution and the degree distribution plt.figure() plt.scatter(degrees, list(deg_cent.values())) plt.show() ###Output _____no_output_____ ###Markdown **Given the similarities of their histograms, it should not surprise you to see a perfect correlation between the centrality distribution and the degree distribution.** Graph Algorithms Finding Paths- Pathfinding is important for: - Optimization: - finding the shortest transportation path between two nodes - shortest transport paths - Modeling: - the spread of things - disease spread - information spread in a social network- How do we find out if there's a path between two nodes?- If there's a path, how do we find out what the shortest path is?- Algorithm: Breadth-first search- Developed in the 1950s as a way of finding the shortest path out of a maze.- Given the following network comprised of 11 nodes, and we want to find the shortest path between the yellow and red nodes.- If we start at the yellow node, we first ask for the yellow node's neighbors.- We then ask if the destination node is present in the set of yellow node's neighbors- If not, we continue on.- Going out a 2nd degree of separation, we ask for the neighbors of our neighbors.- The destination node is still not present, so we continue on.- On our 3rd degree of separation out, we see that the destination node is present.- At this point, we can stop and ignore the next degree of separation.- Note there was one other path possible, but it was longer.- As such, with the breadth-first search algorithm, we have achieved our goal of finding the _shortest_ path between the pair of nodes.- ![bfs][1]- If we do `G.neighbors(1)`, we get back a list containing the nodes that are neighbors of `1`- Let's go one degree out, to the first node in the list of node `1`'s neighbors, which is node `10`.- Let's check the neighbors of `10`: note that we have `1`, which is correct, and then `19`, and then a whole slew of other nodes.- Since `19`, our destination node, is present in the neighbors of node `10`, we can stop there.- If `19` wasn't there, we would go on to check the neighbors of node `5`, which was the next node in the list of node `1`'s neighbors.```pythonG>>> len(G.edges())>>> 57len(G.nodes())>>> 20list(G.neighbors(1))>>> [10, 5, 14, 7]list(G.neighborys(10))>>> [1, 19, 5, 17, 8, 9, 13, 14]```- This was a manual check, but in the exercises, you'll be implementing an automatic version of the breadth-first search algorithm. [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/bfs.JPG Shortest Path IYou can leverage what you know about finding neighbors to try finding paths in a network. One algorithm for path-finding between two nodes is the "breadth-first search" (BFS) algorithm. In a BFS algorithm, you start from a particular node and iteratively search through its neighbors and neighbors' neighbors until you find the destination node.Pathfinding algorithms are important because they provide another way of assessing node importance; you'll see this in a later exercise.In this set of 3 exercises, you're going to build up slowly to get to the final BFS algorithm. The problem has been broken into 3 parts that, if you complete in succession, will get you to a first pass implementation of the BFS algorithm.**Instructions**- Create a function called `path_exists()` that has 3 parameters - `G`, `node1`, and `node2` - and returns whether or not a path exists between the two nodes.- Initialize the queue of nodes to visit with the first node, `node1`. `queue` should be a list.- Iterate over the nodes in `queue`.- Get the neighbors of the node using the `.neighbors()` method of the graph `G`.- Check to see if the destination node `node2` is in the set of `neighbors`. If it is, return `True`. ###Code # Define path_exists() def path_exists(G, node1, node2): """ This function checks whether a path exists between two nodes (node1, node2) in graph G. """ visited_nodes = set() # Initialize the queue of nodes to visit with the first node: queue queue = [node1] # Iterate over the nodes in the queue for node in queue: # Get neighbors of the node neighbors = G.neighbors(node) # Check to see if the destination node is in the set of neighbors if node2 in neighbors: print('Path exists between nodes {0} and {1}'.format(node1, node2)) return True break ###Output _____no_output_____ ###Markdown Shortest Path IINow that you've got the code for checking whether the destination node is present in neighbors, next up, you're going to extend the same function to write the code for the condition where the destination node is not present in the neighbors.All the code you need to write is in the `else` condition; that is, if `node2` is not in `neighbors`.**Instructions**- Using the `.add()` method, add the current node `node` to the set `visited_nodes` to keep track of what nodes have already been visited.- Add the _neighbors_ of the current node `node` that have not yet been visited to `queue`. To do this, you'll need to use the `.extend()` method of `queue` together with a list comprehension. The `.extend()` method appends all the items in a given list. - The _output expression_ and _iterator variable_ of the list comprehension are both `n`. The iterable is the list `neighbors`, and the conditional is if `n` is not in the visited nodes. ###Code def path_exists(G, node1, node2): """ This function checks whether a path exists between two nodes (node1, node2) in graph G. """ visited_nodes = set() queue = [node1] for node in queue: neighbors = G.neighbors(node) if node2 in neighbors: print('Path exists between nodes {0} and {1}'.format(node1, node2)) return True else: # Add current node to visited nodes visited_nodes.add(node) # Add neighbors of current node that have not yet been visited queue.extend([n for n in neighbors if n not in visited_nodes]) ###Output _____no_output_____ ###Markdown Shortest Path IIIThis is the final exercise of this trio! You're now going to complete the problem by writing the code that returns `False` if there's no path between two nodes.**Instructions**- Check to see if the queue has been emptied. You can do this by inspecting the last element of queue with `[-1]`.- Place the appropriate return statement for indicating whether there's a path between these two nodes. ###Code def path_exists(G, node1, node2): """ This function checks whether a path exists between two nodes (node1, node2) in graph G. """ visited_nodes = set() queue = [node1] for node in queue: neighbors = G.neighbors(node) if node2 in neighbors: print('Path exists between nodes {0} and {1}'.format(node1, node2)) return True break else: visited_nodes.add(node) queue.extend([n for n in neighbors if n not in visited_nodes]) # Check to see if the final element of the queue has been reached if node == queue[-1]: print('Path does not exist between nodes {0} and {1}'.format(node1, node2)) # Place the appropriate return statement return False ###Output _____no_output_____ ###Markdown Betweenness Centrality- Let's revisit our notions of what it means to be an _important node_, but leveraging what we know about paths- We're now going to learn about betweenness centrality, but before we talk about that, we need to extend our knowledge with one key concept - all shortest paths All Shortest Paths- In the previous section, we learned about how to find the shortest path between any pair of nodes, using the breadth-first search algorithm.- Imagine now we used the BFS to find every shortest path between every pair of nodes.- What we would get back is the **set of _all shortest paths_** in a graph.- In other word, _all shortest paths_ are the set of paths in a graph, such that **each path is the shortest path between a given pair of nodes**, done **for all pairs of nodes**. Betweenness Centrality- Definition: $\frac{\text{num. shortest paths through node}}{\text{all possible shortest paths}}$ - The number of shortest paths in a graph that pass through a node, divided by the number of shortest paths that exist between every pair of nodes in a graph. - This metric captures a different view of importance - in essence, it captures bottleneck nodes in a graph, rather that highly connected nodes; this will become much clearer as we go on.- Where might betweenness centrality be useful?- Application: - One example would be individuals that bridge between to communities - An individual bridging liberal-leaning and conservative-leaning Twitter users - Alternatively, consider the Internet, where there are crucial links that bridge two network of computers. - If we removed those crucial nodes in the Internet, then information will not flow (or at least not as easily) between subnetworks. Examples- Singapore: Raffles & Jurong East - Let's look at the Singapore subway system to make this more concrete - Take a look at two sets of stations that have been circled with purple. - In the south, there's a cluster of stations in the central business district that serve as connectors between different lines, but there's also this other station called Jurong East, which is only connected to three other stations, but serves as a major transit connector point between the red and green lines. - ![singapore_map][1]- Hight betweenness centrality, low degree centrality- We have a graph `G` that is a barbell graph, which can be created as in the following code cells.- `m1` is the number of nodes in the barbell ends, and `m2` is the number of nodes in the barbell bridge.- The `keys` are the nodes, and the `values` are the `betweenness centrality` scores.- Some values are `0` because they're located at the ends of the barbell graph and the nodes within each end are fully connected with one another.- With the exception of the bridge node and the two nodes it's connected to, there's no shortest path that has to run through any of those nodes to get to other nodes. [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/singapore_station_map.jpg ###Code G = nx.barbell_graph(m1=5, m2=1) nx.betweenness_centrality(G) nx.draw(G) plt.show() ###Output _____no_output_____ ###Markdown NetworkX betweenness centrality on a social networkBetweenness centrality is a node importance metric that uses information about the shortest paths in a network. It is defined as the fraction of all possible shortest paths between any pair of nodes that pass through the node.NetworkX provides the `nx.betweenness_centrality(G)` function for computing the betweenness centrality of every node in a graph, and it returns a dictionary where the keys are the nodes and the values are their betweenness centrality measures.**Instructions**- Compute the betweenness centrality `bet_cen` of the nodes in the graph `T`.- Compute the degree centrality `deg_cen` of the nodes in the graph `T`.- Compare betweenness centrality to degree centrality by creating a scatterplot of the two, with `list(bet_cen.values())` on the x-axis and `list(deg_cen.values())` on the y-axis. ###Code # Compute the betweenness centrality of T: bet_cen bet_cen = nx.betweenness_centrality(t_131) # Compute the degree centrality of T: deg_cen deg_cen = nx.degree_centrality(t_131) # Create a scatter plot of betweenness centrality and degree centrality plt.scatter(list(bet_cen.values()), list(deg_cen.values())) # Display the plot plt.xlim(-0.00005, 0.0002) plt.xticks(rotation=45) plt.show() ###Output _____no_output_____ ###Markdown Deep dive - Twitter networkYou're going to now take a deep dive into a Twitter network, which will help reinforce what you've learned earlier. First, you're going to find the nodes that can broadcast messages very efficiently to lots of people one degree of separation away.NetworkX has been pre-imported for you as `nx`.**Instructions**- Write a function `find_nodes_with_highest_deg_cent(G)` that returns the node(s) with the highest degree centrality using the following steps: - Compute the degree centrality of `G`. - Compute the maximum degree centrality using the `max()` function on `list(deg_cent.values())`. - Iterate over the degree centrality dictionary, `deg_cent.items()`. - If the degree centrality value `v` of the current node `k` is equal to `max_dc`, add it to the set of nodes.- Use your function to find the node(s) that has the highest degree centrality in `T`.- Write an assertion statement that checks that the node(s) is/are correctly identified. This has been done for you, so hit 'Submit Answer' to see the result! ###Code # Define find_nodes_with_highest_deg_cent() def find_nodes_with_highest_deg_cent(G): # Compute the degree centrality of G: deg_cent deg_cent = nx.degree_centrality(G) # Compute the maximum degree centrality: max_dc max_dc = max(list(deg_cent.values())) nodes = set() # Iterate over the degree centrality dictionary for k, v in deg_cent.items(): # Check if the current value has the maximum degree centrality if v == max_dc: # Add the current node to the set of nodes nodes.add(k) return nodes # Find the node(s) that has the highest degree centrality in T: top_dc top_dc = find_nodes_with_highest_deg_cent(T) print(f'It looks like node {top_dc} has the highest degree centrality') # Write the assertion statement for node in top_dc: assert nx.degree_centrality(T)[node] == max(nx.degree_centrality(T).values()) ###Output It looks like node {11824} has the highest degree centrality ###Markdown Deep dive - Twitter network part IINext, you're going to do an analogous deep dive on betweenness centrality! Just a few hints to help you along: remember that betweenness centrality is computed using `nx.betweenness_centrality(G)`.**Instructions**- Write a function `find_node_with_highest_bet_cent(G)` that returns the node(s) with the highest betweenness centrality. - Compute the betweenness centrality of `G`. - Compute the maximum betweenness centrality using the `max()` function on `list(bet_cent.values())`. - Iterate over the degree centrality dictionary, `bet_cent.items()`. - If the degree centrality value `v` of the current node `k` is equal to `max_bc`, add it to the set of nodes.- Use your function to find the node(s) that has the highest betweenness centrality in `T`.- Write an assertion statement that you've got the right node. This has been done for you, so hit 'Submit Answer' to see the result! ###Code # Define find_node_with_highest_bet_cent() def find_node_with_highest_bet_cent(G): # Compute betweenness centrality: bet_cent bet_cent = nx.betweenness_centrality(G) # Compute maximum betweenness centrality: max_bc max_bc = max(list(bet_cent.values())) nodes = set() # Iterate over the betweenness centrality dictionary for k, v in bet_cent.items(): # Check if the current value has the maximum betweenness centrality if v == max_bc: # Add the current node to the set of nodes nodes.add(k) return nodes # Use that function to find the node(s) that has the highest betweenness centrality in the network: top_bc top_bc = find_node_with_highest_bet_cent(t_131) print(f'Node {top_bc} has the highest betweenness centrality.') # Write an assertion statement that checks that the node(s) is/are correctly identified. for node in top_bc: assert nx.betweenness_centrality(t_131)[node] == max(nx.betweenness_centrality(t_131).values()) ###Output Node {3265} has the highest betweenness centrality. ###Markdown StructuresThis chapter is all about finding interesting structures within network data. You'll learn about essential concepts such as cliques, communities, and subgraphs, which will leverage all of the skills you acquired in Chapter 2. By the end of this chapter, you'll be ready to apply the concepts you've learned to a real-world case study. Communities & Cliques- We're now going to explore the concepts of structures and subgraphs, using NetworkX.- A lot of this is going to leverage what you've learned in the 2nd chapter, particularly on path finding and the use of neighbors.- Have your ever been in a clique?- How would you characterize the clique?- It probably was comprised of people who know everybody else in the group to a pretty strong degree, right?- Now imagine if there was another new person not originally part of the clique that joined in - now, in this group, it wouldn't feel like a _clique_, because the new person doesn't really know everybody else, and vise versa.- ![cliques][1]- In network theory, a _clique_ is essentially defined on the social version of a clique: - A set of nodes that are completely connected by an edge to every other node in the set. - It is, then, a completely connected graph.- What might be the _simplest clique_?- By the definition of a clique, an edge is the simplest clique possible.- Now, what would be the simplest _complex clique_? - 3 nodes fully connected, a triangle- What are some applications of finding _triangles_ in a network? - One example is a friend recommendation system. - If `A` knows `B` and `A` knows `C`, but `B` and `C` are not yet connected, then there's a good chance that `B` knows `C` as well, and may want to be connected online, by doing what we call triangle closures. Clique Code- How might you write code that finds all triangles that a node is involved in?- That will be the exercise that you're going to go through.- Suppose you had a graph `G`, and you wanted to iterate over every pair of nodes, and not only every edge.- Rather than writing a nested for-loop, you might want to use the combinations function from the Python `itertool` module.- In this way, your for-loop can iterate over every pair of nodes in the network, by using the following code [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/cliques.JPG ###Code t_131 for n in list(combinations(t_131.nodes(), 2))[:10]: print(n[0], n[1]) ###Output 13829 15372 13829 3086 13829 4111 13829 6163 13829 13334 13829 6168 13829 24 13829 3100 13829 4636 13829 543 ###Markdown Identifying triangle relationshipsNow that you've learned about cliques, it's time to try leveraging what you know to find structures in a network. Triangles are what you'll go for first. We may be interested in triangles because they're the simplest complex clique. Let's write a few functions; these exercises will bring you through the fundamental logic behind network algorithms.In the Twitter network, each node has an `'occupation'` label associated with it, in which the Twitter user's work occupation is divided into `celebrity`, `politician` and `scientist`. One potential application of triangle-finding algorithms is to find out whether users that have similar occupations are more likely to be in a clique with one another.**Instructions**- Import `combinations` from `itertools`.- Write a function `is_in_triangle()` that has two parameters - `G` and `n` - and checks whether a given node is in a triangle relationship or not. - `combinations(iterable, n)` returns combinations of size `n` from `iterable`. This will be useful here, as you want combinations of size `2` from `G.neighbors(n)`. - To check whether an edge exists between two nodes, use the `.has_edge(node1, node2)` method. If an edge exists, then the given node is in a triangle relationship, and you should return `True`. ###Code # Define is_in_triangle() def is_in_triangle(G, n): """ Checks whether a node `n` in graph `G` is in a triangle relationship or not. Returns a boolean. """ in_triangle = False # Iterate over all possible triangle relationship combinations for n1, n2 in combinations(G.neighbors(n), 2): # Check if an edge exists between n1 and n2 if G.has_edge(n1, n2): in_triangle = True break return in_triangle is_in_triangle(T, 1) for node in sorted(list(T.nodes())[:60]): x = is_in_triangle(T, node) if x == True: print(f'{node}: {x}') ###Output 1: True 16: True 18: True 19: True 28: True 36: True 39: True 43: True 45: True ###Markdown Finding nodes involved in trianglesNetworkX provides an API for counting the number of triangles that every node is involved in: `nx.triangles(G)`. It returns a dictionary of nodes as the keys and number of triangles as the values. Your job in this exercise is to modify the function defined earlier to extract all of the nodes involved in a triangle relationship with a given node.**Instructions**- Write a function `nodes_in_triangle()` that has two parameters - `G` and `n` - and identifies all nodes in a triangle relationship with a given node. - In the `for` loop, iterate over all possible triangle relationship combinations. - Check whether the nodes `n1` and `n2` have an edge between them. If they do, add both nodes to the set ``triangle_nodes``.- Use your function in an `assert` statement to check that the number of nodes involved in a triangle relationship with node `1` of graph `T` is equal to `35`. ###Code # Write a function that identifies all nodes in a triangle relationship with a given node. def nodes_in_triangle(G, n): """ Returns the nodes in a graph `G` that are involved in a triangle relationship with the node `n`. """ triangle_nodes = set([n]) # Iterate over all possible triangle relationship combinations for n1, n2 in combinations(G.neighbors(n), 2): # Check if n1 and n2 have an edge between them if G.has_edge(n1, n2): # Add n1 to triangle_nodes triangle_nodes.add(n1) # Add n2 to triangle_nodes triangle_nodes.add(n2) return triangle_nodes # Write the assertion statement assert len(nodes_in_triangle(T, 1)) == 23 ###Output _____no_output_____ ###Markdown Finding open trianglesLet us now move on to finding open triangles! Recall that they form the basis of friend recommendation systems; if "A" knows "B" and "A" knows "C", then it's probable that "B" also knows "C".**Instructions**- Write a function `node_in_open_triangle()` that has two parameters - `G` and `n` - and identifies whether a node is present in an open triangle with its neighbors. - In the `for` loop, iterate over all possible triangle relationship combinations. - If the nodes `n1` and `n2` do not have an edge between them, set `in_open_triangle` to `True`, break out from the `if` statement and return `in_open_triangle`.- Use this function to count the number of open triangles that exist in `T`. - In the `for` loop, iterate over all the nodes in `T`. - If the current node `n` is in an open triangle, increment `num_open_triangles`. ###Code # Define node_in_open_triangle() def node_in_open_triangle(G, n): """ Checks whether pairs of neighbors of node `n` in graph `G` are in an 'open triangle' relationship with node `n`. """ in_open_triangle = False # Iterate over all possible triangle relationship combinations for n1, n2 in combinations(G.neighbors(n), 2): # Check if n1 and n2 do NOT have an edge between them if not G.has_edge(n1, n2): in_open_triangle = True break return in_open_triangle # Compute the number of open triangles in T num_open_triangles = 0 # Iterate over all the nodes in T for n in T.nodes(): # Check if the current node is in an open triangle if node_in_open_triangle(T, n): # Increment num_open_triangles num_open_triangles += 1 print(f'{num_open_triangles} nodes in graph T are in open triangles.') ###Output 908 nodes in graph T are in open triangles. ###Markdown Maximal Cliques- ![maximal_cliques][1]- They are a clique, but that clique can't be extended by adding another node in the graph.- For example, the sub-clique of the 3 green nodes can be extended by one blue node to form a large clique.- As such, these 3 green nodes do not form a _maximal clique_ in the graph.- The 4 nodes connected as a clique together cannot be extended and still remain a clique, as the remaining node is not fully connected to the other four nodes.- As such, these four nodes constitute a _maximal clique_. Communities- The concept of _maximal cliques_ has its uses in community finding algorithms.- Without going into the myriad of details possible, I hope to define, at a basic level, what a community is.- Cliques form a good starting point for finding communities, as they are fully connected subgraphs within a larger graph.- By identifying these _maximal cliques_, one naive way of identifying communities might be identifying the unions of maximal cliques that share some number of members, but are also of some minimum size. NetworkX API- NetworkX provides a function for finding all maximal cliques, which is the `find_clique` function. - Works for `Graph`, but doesn't work for `DiGraph`- `find_cliqeus` will not just find any clique, but the set of _maximal cliques_.- There are two _maximal cliques_ of size 5 in the graph.- There are also two _maximal cliques_ of size 2 - these are the edges between nodes `4` and `5`, and nodes `5` and `6`.- Recall that edges are also a clique. [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/max_cliques.JPG ###Code G = nx.barbell_graph(m1=5, m2=1) nx.find_cliques(G) gl = list(nx.find_cliques(G)) print(gl) nx.draw(G, with_labels=True) G2 = nx.barbell_graph(m1=5, m2=2) gl2 = list(nx.find_cliques(G2)) print(gl2) nx.draw(G2, with_labels=True) ###Output [[4, 0, 1, 2, 3], [4, 5], [6, 5], [6, 7], [7, 8, 9, 10, 11]] ###Markdown Finding all maximal cliques of size "n"Now that you've explored triangles (and open triangles), let's move on to the concept of maximal cliques. Maximal cliques are cliques that cannot be extended by adding an adjacent edge, and are a useful property of the graph when finding communities. NetworkX provides a function that allows you to identify the nodes involved in each maximal clique in a graph: `nx.find_cliques(G)`. Play around with the function by using it on `T` in the IPython Shell, and then try answering the exercise.**Instructions**- Write a function `maximal_cliques()` that has two parameters - `G` and `size` - and finds all maximal cliques of size `n`. - In the `for` loop, iterate over all the cliques in `G` using the `nx.find_cliques()` function. - If the current clique is of size `size`, append it to the list `mcs`.- Use an assert statement and your `maximal_cliques()` function to check that there are `33` maximal cliques of size `3` in the graph `T`. ###Code nodes = {1: {'category': 'I', 'occupation': 'politician'}, 3: {'category': 'D', 'occupation': 'celebrity'}, 4: {'category': 'I', 'occupation': 'politician'}, 5: {'category': 'I', 'occupation': 'scientist'}, 6: {'category': 'D', 'occupation': 'politician'}, 7: {'category': 'I', 'occupation': 'politician'}, 8: {'category': 'I', 'occupation': 'celebrity'}, 9: {'category': 'D', 'occupation': 'scientist'}, 10: {'category': 'D', 'occupation': 'celebrity'}, 11: {'category': 'I', 'occupation': 'celebrity'}, 12: {'category': 'I', 'occupation': 'celebrity'}, 13: {'category': 'P', 'occupation': 'scientist'}, 14: {'category': 'D', 'occupation': 'celebrity'}, 15: {'category': 'P', 'occupation': 'scientist'}, 16: {'category': 'P', 'occupation': 'politician'}, 17: {'category': 'I', 'occupation': 'scientist'}, 18: {'category': 'I', 'occupation': 'celebrity'}, 19: {'category': 'I', 'occupation': 'scientist'}, 20: {'category': 'P', 'occupation': 'scientist'}, 21: {'category': 'I', 'occupation': 'celebrity'}, 22: {'category': 'D', 'occupation': 'scientist'}, 23: {'category': 'D', 'occupation': 'scientist'}, 24: {'category': 'P', 'occupation': 'politician'}, 25: {'category': 'I', 'occupation': 'celebrity'}, 26: {'category': 'P', 'occupation': 'celebrity'}, 27: {'category': 'D', 'occupation': 'scientist'}, 28: {'category': 'P', 'occupation': 'celebrity'}, 29: {'category': 'I', 'occupation': 'celebrity'}, 30: {'category': 'P', 'occupation': 'scientist'}, 31: {'category': 'D', 'occupation': 'scientist'}, 32: {'category': 'P', 'occupation': 'politician'}, 33: {'category': 'I', 'occupation': 'politician'}, 34: {'category': 'D', 'occupation': 'celebrity'}, 35: {'category': 'P', 'occupation': 'scientist'}, 36: {'category': 'D', 'occupation': 'scientist'}, 37: {'category': 'I', 'occupation': 'scientist'}, 38: {'category': 'P', 'occupation': 'celebrity'}, 39: {'category': 'D', 'occupation': 'celebrity'}, 40: {'category': 'I', 'occupation': 'celebrity'}, 41: {'category': 'I', 'occupation': 'celebrity'}, 42: {'category': 'P', 'occupation': 'scientist'}, 43: {'category': 'I', 'occupation': 'celebrity'}, 44: {'category': 'I', 'occupation': 'politician'}, 45: {'category': 'D', 'occupation': 'scientist'}, 46: {'category': 'I', 'occupation': 'politician'}, 47: {'category': 'I', 'occupation': 'celebrity'}, 48: {'category': 'P', 'occupation': 'celebrity'}, 49: {'category': 'P', 'occupation': 'politician'}} edges = [(1, 3, {'date': date(2012, 11, 16)}), (1, 4, {'date': date(2013, 6, 7)}), (1, 5, {'date': date(2009, 7, 27)}), (1, 6, {'date': date(2014, 12, 18)}), (1, 7, {'date': date(2010, 10, 18)}), (1, 8, {'date': date(2012, 4, 18)}), (1, 9, {'date': date(2007, 10, 14)}), (1, 10, {'date': date(2012, 9, 8)}), (1, 11, {'date': date(2010, 1, 6)}), (1, 12, {'date': date(2012, 12, 27)}), (1, 13, {'date': date(2008, 12, 18)}), (1, 14, {'date': date(2014, 5, 25)}), (1, 15, {'date': date(2009, 11, 12)}), (1, 16, {'date': date(2008, 8, 6)}), (1, 17, {'date': date(2007, 8, 11)}), (1, 18, {'date': date(2009, 10, 7)}), (1, 19, {'date': date(2008, 7, 24)}), (1, 20, {'date': date(2013, 11, 18)}), (1, 21, {'date': date(2011, 3, 28)}), (1, 22, {'date': date(2013, 3, 4)}), (1, 23, {'date': date(2012, 4, 20)}), (1, 24, {'date': date(2009, 6, 6)}), (1, 25, {'date': date(2013, 6, 18)}), (1, 26, {'date': date(2014, 11, 20)}), (1, 27, {'date': date(2007, 4, 28)}), (1, 28, {'date': date(2014, 3, 28)}), (1, 29, {'date': date(2014, 1, 23)}), (1, 30, {'date': date(2007, 10, 9)}), (1, 31, {'date': date(2009, 2, 17)}), (1, 32, {'date': date(2009, 10, 14)}), (1, 33, {'date': date(2010, 5, 19)}), (1, 34, {'date': date(2009, 12, 21)}), (1, 35, {'date': date(2014, 11, 16)}), (1, 36, {'date': date(2010, 2, 25)}), (1, 37, {'date': date(2010, 9, 23)}), (1, 38, {'date': date(2007, 4, 28)}), (1, 39, {'date': date(2007, 4, 8)}), (1, 40, {'date': date(2010, 5, 15)}), (1, 41, {'date': date(2009, 8, 12)}), (1, 42, {'date': date(2013, 3, 9)}), (1, 43, {'date': date(2011, 11, 14)}), (1, 44, {'date': date(2013, 4, 6)}), (1, 45, {'date': date(2010, 1, 18)}), (1, 46, {'date': date(2011, 8, 20)}), (1, 47, {'date': date(2014, 8, 3)}), (1, 48, {'date': date(2010, 3, 15)}), (1, 49, {'date': date(2007, 9, 2)}), (5, 19, {'date': date(2013, 6, 12)}), (5, 28, {'date': date(2010, 12, 4)}), (5, 36, {'date': date(2013, 4, 7)}), (7, 28, {'date': date(2011, 11, 21)}), (8, 19, {'date': date(2010, 11, 5)}), (8, 28, {'date': date(2007, 6, 26)}), (11, 19, {'date': date(2012, 4, 16)}), (11, 28, {'date': date(2011, 6, 21)}), (13, 19, {'date': date(2012, 12, 13)}), (14, 28, {'date': date(2013, 12, 18)}), (15, 19, {'date': date(2008, 12, 13)}), (15, 28, {'date': date(2014, 6, 3)}), (16, 18, {'date': date(2008, 8, 5)}), (16, 35, {'date': date(2014, 6, 4)}), (16, 36, {'date': date(2008, 10, 10)}), (16, 48, {'date': date(2014, 1, 27)}), (17, 19, {'date': date(2007, 11, 11)}), (17, 28, {'date': date(2012, 10, 11)}), (18, 24, {'date': date(2009, 2, 4)}), (18, 35, {'date': date(2008, 12, 1)}), (18, 36, {'date': date(2013, 2, 6)}), (19, 20, {'date': date(2008, 11, 9)}), (19, 21, {'date': date(2007, 7, 23)}), (19, 24, {'date': date(2013, 12, 13)}), (19, 30, {'date': date(2012, 6, 6)}), (19, 31, {'date': date(2011, 1, 27)}), (19, 35, {'date': date(2014, 3, 3)}), (19, 36, {'date': date(2007, 10, 22)}), (19, 37, {'date': date(2008, 4, 20)}), (19, 48, {'date': date(2010, 12, 23)}), (20, 28, {'date': date(2012, 4, 15)}), (21, 28, {'date': date(2014, 4, 27)}), (24, 28, {'date': date(2013, 1, 27)}), (24, 36, {'date': date(2009, 4, 23)}), (24, 37, {'date': date(2008, 6, 27)}), (24, 39, {'date': date(2007, 1, 27)}), (24, 43, {'date': date(2014, 2, 12)}), (25, 28, {'date': date(2014, 5, 9)}), (27, 28, {'date': date(2007, 8, 9)}), (28, 29, {'date': date(2012, 4, 3)}), (28, 30, {'date': date(2007, 12, 2)}), (28, 31, {'date': date(2008, 6, 1)}), (28, 35, {'date': date(2012, 11, 16)}), (28, 36, {'date': date(2012, 9, 26)}), (28, 37, {'date': date(2014, 11, 12)}), (28, 44, {'date': date(2007, 11, 18)}), (28, 48, {'date': date(2008, 5, 25)}), (28, 49, {'date': date(2011, 12, 19)}), (29, 43, {'date': date(2014, 6, 4)}), (33, 39, {'date': date(2011, 9, 5)}), (35, 36, {'date': date(2008, 12, 1)}), (35, 37, {'date': date(2014, 5, 7)}), (35, 39, {'date': date(2007, 6, 17)}), (35, 43, {'date': date(2009, 6, 10)}), (36, 37, {'date': date(2014, 5, 13)}), (36, 39, {'date': date(2014, 12, 6)}), (36, 43, {'date': date(2013, 12, 17)}), (37, 43, {'date': date(2012, 1, 22)}), (38, 39, {'date': date(2009, 5, 15)}), (39, 40, {'date': date(2011, 6, 3)}), (39, 41, {'date': date(2009, 10, 5)}), (39, 45, {'date': date(2011, 1, 12)}), (41, 45, {'date': date(2009, 9, 7)}), (43, 47, {'date': date(2014, 12, 21)}), (43, 48, {'date': date(2013, 1, 28)})] t_321 = nx.Graph() t_321.add_nodes_from(nodes) t_321.add_edges_from(edges) nx.set_node_attributes(t_321, nodes) # Define maximal_cliques() def maximal_cliques(G, n): """ Finds all maximal cliques in graph `G` that are of size `size`. """ mcs = [] for clique in nx.find_cliques(G): if len(clique) == n: mcs.append(clique) return mcs # Check that there are 33 maximal cliques of size 3 in the graph T assert len(maximal_cliques(t_321, 3)) == 33 ###Output _____no_output_____ ###Markdown Subgraphs- When you have a large graph, and you want to visualize just a small portion of it, it can be helpful to extract those nodes and their associated edges as a separate graph object.- For example, you might want to visualized a particular path through the network, or you might want to visualized a particular community or clique.- Alternatively, you might just want to explore the structure of the graph around a node out of a number of degrees of separation.- In all of these scenarios, it's useful to be able to "slice out" the nodes and edges of interest, and visualize them. ###Code random.seed(121) G = nx.erdos_renyi_graph(n=20, p=0.2) G.nodes() nx.draw(G, with_labels=True) nodes = list(G.neighbors(8)) nodes nodes.append(8) G_eight = G.subgraph(nodes) G_eight.edges() display(G_eight) display(G) nx.draw(G_eight, with_labels=True) ###Output _____no_output_____ ###Markdown Subgraphs IThere may be times when you just want to analyze a subset of nodes in a network. To do so, you can copy them out into another graph object using `G.subgraph(nodes)`, which returns a new `graph` object (of the same type as the original graph) that is comprised of the iterable of `nodes` that was passed in.`matplotlib.pyplot` has been imported for you as `plt`.**Instructions**- Write a function `get_nodes_and_nbrs(G, nodes_of_interest)` that extracts the subgraph from graph `G` comprised of the `nodes_of_interest` and their neighbors. - In the first `for` loop, iterate over `nodes_of_interest` and append the current node `n` to `nodes_to_draw`. - In the second `for` loop, iterate over the neighbors of `n`, and append all the neighbors `nbr` to `nodes_to_draw`.- Use the function to extract the subgraph from `T` comprised of nodes 29, 38, and 42 (contained in the pre-defined list `nodes_of_interest`) and their neighbors. Save the result as `T_draw`.- Draw the subgraph `T_draw` to the screen. ###Code nodes_of_interest = [29, 38, 42] # Define get_nodes_and_nbrs() def get_nodes_and_nbrs(G, nodes_of_interest): """ Returns a subgraph of the graph `G` with only the `nodes_of_interest` and their neighbors. """ nodes_to_draw = [] # Iterate over the nodes of interest for n in nodes_of_interest: # Append the nodes of interest to nodes_to_draw nodes_to_draw.append(n) # Iterate over all the neighbors of node n for nbr in G.neighbors(n): # Append the neighbors of n to nodes_to_draw nodes_to_draw.append(nbr) return G.subgraph(nodes_to_draw) # Extract the subgraph with the nodes of interest: T_draw T_draw = get_nodes_and_nbrs(T, nodes_of_interest) # Draw the subgraph to the screen nx.draw(T_draw, with_labels=True) plt.show() ###Output _____no_output_____ ###Markdown Subgraphs IIIn the previous exercise, we gave you a list of nodes whose neighbors we asked you to extract.Let's try one more exercise in which you extract nodes that have a particular metadata property and their neighbors. This should hark back to what you've learned about using list comprehensions to find nodes. The exercise will also build your capacity to compose functions that you've already written before.**Instructions**- Using a list comprehension, extract nodes that have the metadata `'occupation'` as `'celebrity'` alongside their neighbors: - The _output expression_ of the list comprehension is `n`, and there are two _iterator variables_: `n` and `d`. The iterable is the list of nodes of `T` (including the metadata, which you can specify using `data=True`) and the conditional expression is if the `'occupation'` key of the metadata dictionary `d` equals `'celebrity'`.- Place them in a new subgraph called `T_sub`. To do this: - Iterate over the nodes, compute the neighbors of each node, and add them to the set of nodes `nodeset` by using the `.union()` method. This last part has been done for you. - Use `nodeset` along with the `T.subgraph()` method to calculate `T_sub`.- Draw `T_sub` to the screen. ###Code # Extract the nodes of interest: nodes nodes = [n for n, d in t_321.nodes(data=True) if d['occupation'] == 'celebrity'] # Create the set of nodes: nodeset nodeset = set(nodes) # Iterate over nodes for n in nodes: # Compute the neighbors of n: nbrs nbrs = t_321.neighbors(n) # Compute the union of nodeset and nbrs: nodeset nodeset = nodeset.union(nbrs) # Compute the subgraph using nodeset: T_sub T_sub = t_321.subgraph(nodeset) # Draw T_sub to the screen nx.draw(T_sub, with_labels=True) plt.show() ###Output _____no_output_____ ###Markdown Bringing it all togetherIn this final chapter of the course, you'll consolidate everything you've learned through an in-depth case study of GitHub collaborator network data. This is a great example of real-world social network data, and your newly acquired skills will be fully tested. By the end of this chapter, you'll have developed your very own recommendation system to connect GitHub users who should collaborate together. Case Study Data- This dataset is a GitHub user collaboration network.- GitHub is a social coding site, where users can collaborate on code repositories.- In this network, nodes are users, and edges indicate that two users are collaborators on at least one GitHub repository.- What you'll be accomplishing by the end of the exercises is the following: - You will have analyzed the structure of the graph, including its basic properties. - You will visualize the graph using `nxviz` - You will build a simple recommendation system. - A recommendation system in social networks recommends users to "connect" with one another in some fashion. - In the GitHub context, we will try writing a recommender that suggests users that should collaborate together. Graph properties- Recall from the first chapter about some basic functions for getting a graph's size. ###Code random.seed(121) G = nx.erdos_renyi_graph(n=20, p=0.2) print(len(G.edges())) print(len(G.nodes())) ###Output 45 20 ###Markdown - Are you able to recall what the function names are for computing the degree and betweenness centralities of each node in the graph? - `nx.degree_centrality(G)` - `nx.betweenness_centrality(G)` - In both cases, the key is the node name and the value is the centrality score ###Code pp(nx.degree_centrality(G)) print('\n') pp(nx.betweenness_centrality(G)) ###Output {0: 0.10526315789473684, 1: 0.3157894736842105, 2: 0.21052631578947367, 3: 0.05263157894736842, 4: 0.2631578947368421, 5: 0.21052631578947367, 6: 0.3157894736842105, 7: 0.15789473684210525, 8: 0.2631578947368421, 9: 0.21052631578947367, 10: 0.3684210526315789, 11: 0.15789473684210525, 12: 0.21052631578947367, 13: 0.2631578947368421, 14: 0.3157894736842105, 15: 0.21052631578947367, 16: 0.21052631578947367, 17: 0.10526315789473684, 18: 0.5263157894736842, 19: 0.2631578947368421} {0: 0.003759398496240601, 1: 0.08607630186577553, 2: 0.044040657198551936, 3: 0.0, 4: 0.07446393762183234, 5: 0.11647173489278752, 6: 0.08880534670008351, 7: 0.0051239209133945975, 8: 0.11155666945140628, 9: 0.024477861319966585, 10: 0.10698969646338068, 11: 0.01189083820662768, 12: 0.0339459760512392, 13: 0.09337231968810916, 14: 0.05116959064327485, 15: 0.05334168755221386, 16: 0.06688944583681425, 17: 0.0, 18: 0.22528543581175156, 19: 0.02456140350877193} ###Markdown Characterizing the network (I)To start out, let's do some basic characterization of the network, by looking at the number of nodes and number of edges in a network. It has been pre-loaded as `G` and is available for exploration in the IPython Shell. Your job in this exercise is to identify how many nodes and edges are present in the network. You can use the functions `len(G.nodes())` and `len(G.edges())` to calculate the number of nodes and edges respectively.- ~~72900 nodes, 56519 edges.~~- **56519 nodes, 72900 edges.**- ~~47095 nodes, 65789 edges.~~- ~~63762 nodes, 71318 edges.~~ ###Code G = nx.Graph(Gh) print(len(G.nodes())) print(len(G.edges())) ###Output 56519 72900 ###Markdown Characterizing the network (II)Let's continue recalling what you've learned before about node importances, by plotting the degree distribution of a network. This is the distribution of node degrees computed across all nodes in a network.**Instructions**- Plot the degree distribution of the GitHub collaboration network `G`. Recall that there are four steps involved here: - Calculating the degree centrality of `G`. - Using the `.values()` method of `G` and converting it into a list. - Passing the list of degree distributions to `plt.hist()`. - Displaying the histogram with `plt.show()`. ###Code # Plot the degree distribution of the GitHub collaboration network plt.hist(list(nx.degree_centrality(G).values())) plt.show() ###Output _____no_output_____ ###Markdown Characterizing the network (III)The last exercise was on degree centrality; this time round, let's recall betweenness centrality!A small note: if executed correctly, this exercise may need about 5 seconds to execute.**Instructions**- Plot the betweenness centrality distribution of the GitHub collaboration network. You have to follow exactly the same four steps as in the previous exercise, substituting `nx.betweenness_centrality()` in place of `nx.degree_centrality()`. ###Code nodes = {'u41': {'bipartite': 'users', 'grouping': 0}, 'u69': {'bipartite': 'users', 'grouping': 0}, 'u96': {'bipartite': 'users', 'grouping': 0}, 'u156': {'bipartite': 'users', 'grouping': 0}, 'u297': {'bipartite': 'users', 'grouping': 0}, 'u298': {'bipartite': 'users', 'grouping': 0}, 'u315': {'bipartite': 'users', 'grouping': 0}, 'u322': {'bipartite': 'users', 'grouping': 0}, 'u435': {'bipartite': 'users', 'grouping': 0}, 'u440': {'bipartite': 'users', 'grouping': 0}, 'u640': {'bipartite': 'users', 'grouping': 0}, 'u655': {'bipartite': 'users', 'grouping': 0}, 'u698': {'bipartite': 'users', 'grouping': 0}, 'u821': {'bipartite': 'users', 'grouping': 0}, 'u863': {'bipartite': 'users', 'grouping': 0}, 'u901': {'bipartite': 'users', 'grouping': 0}, 'u914': {'bipartite': 'users', 'grouping': 0}, 'u1254': {'bipartite': 'users', 'grouping': 0}, 'u1407': {'bipartite': 'users', 'grouping': 0}, 'u1468': {'bipartite': 'users', 'grouping': 0}, 'u1908': {'bipartite': 'users', 'grouping': 0}, 'u2022': {'bipartite': 'users', 'grouping': 0}, 'u2066': {'bipartite': 'users', 'grouping': 0}, 'u2137': {'bipartite': 'users', 'grouping': 0}, 'u2289': {'bipartite': 'users', 'grouping': 0}, 'u2482': {'bipartite': 'users', 'grouping': 0}, 'u2552': {'bipartite': 'users', 'grouping': 0}, 'u2643': {'bipartite': 'users', 'grouping': 0}, 'u2737': {'bipartite': 'users', 'grouping': 0}, 'u2906': {'bipartite': 'users', 'grouping': 0}, 'u3083': {'bipartite': 'users', 'grouping': 0}, 'u3174': {'bipartite': 'users', 'grouping': 0}, 'u3231': {'bipartite': 'users', 'grouping': 0}, 'u3243': {'bipartite': 'users', 'grouping': 0}, 'u3271': {'bipartite': 'users', 'grouping': 0}, 'u3658': {'bipartite': 'users', 'grouping': 0}, 'u3974': {'bipartite': 'users', 'grouping': 0}, 'u3979': {'bipartite': 'users', 'grouping': 0}, 'u4159': {'bipartite': 'users', 'grouping': 0}, 'u4199': {'bipartite': 'users', 'grouping': 0}, 'u4329': {'bipartite': 'users', 'grouping': 0}, 'u4412': {'bipartite': 'users', 'grouping': 0}, 'u4513': {'bipartite': 'users', 'grouping': 0}, 'u4710': {'bipartite': 'users', 'grouping': 0}, 'u4761': {'bipartite': 'users', 'grouping': 0}, 'u4953': {'bipartite': 'users', 'grouping': 0}, 'u5082': {'bipartite': 'users', 'grouping': 0}, 'u5337': {'bipartite': 'users', 'grouping': 0}, 'u5693': {'bipartite': 'users', 'grouping': 0}, 'u5993': {'bipartite': 'users', 'grouping': 0}, 'u6081': {'bipartite': 'users', 'grouping': 0}, 'u7418': {'bipartite': 'users', 'grouping': 0}, 'u7623': {'bipartite': 'users', 'grouping': 0}, 'u7963': {'bipartite': 'users', 'grouping': 0}, 'u8135': {'bipartite': 'users', 'grouping': 0}, 'u9866': {'bipartite': 'users', 'grouping': 0}, 'u9869': {'bipartite': 'users', 'grouping': 0}, 'u9997': {'bipartite': 'users', 'grouping': 0}, 'u10090': {'bipartite': 'users', 'grouping': 0}, 'u10340': {'bipartite': 'users', 'grouping': 0}, 'u10500': {'bipartite': 'users', 'grouping': 0}, 'u10603': {'bipartite': 'users', 'grouping': 0}, 'u14964': {'bipartite': 'users', 'grouping': 1}} edges = [('u41', 'u2022', {}), ('u41', 'u69', {}), ('u41', 'u5082', {}), ('u41', 'u298', {}), ('u41', 'u901', {}), ('u69', 'u315', {}), ('u69', 'u4513', {}), ('u69', 'u5082', {}), ('u69', 'u901', {}), ('u69', 'u298', {}), ('u69', 'u2022', {}), ('u96', 'u315', {}), ('u96', 'u2482', {}), ('u96', 'u10500', {}), ('u96', 'u2022', {}), ('u96', 'u863', {}), ('u96', 'u9997', {}), ('u96', 'u297', {}), ('u96', 'u698', {}), ('u96', 'u2066', {}), ('u96', 'u7963', {}), ('u96', 'u156', {}), ('u96', 'u2906', {}), ('u96', 'u2552', {}), ('u156', 'u315', {}), ('u156', 'u2482', {}), ('u156', 'u10500', {}), ('u156', 'u863', {}), ('u156', 'u2022', {}), ('u156', 'u297', {}), ('u156', 'u9997', {}), ('u156', 'u698', {}), ('u156', 'u2066', {}), ('u156', 'u7963', {}), ('u156', 'u2906', {}), ('u156', 'u2552', {}), ('u297', 'u315', {}), ('u297', 'u2482', {}), ('u297', 'u863', {}), ('u297', 'u2022', {}), ('u297', 'u9997', {}), ('u297', 'u698', {}), ('u297', 'u10500', {}), ('u297', 'u2066', {}), ('u297', 'u7963', {}), ('u297', 'u2906', {}), ('u297', 'u2552', {}), ('u298', 'u5082', {}), ('u298', 'u901', {}), ('u298', 'u2022', {}), ('u315', 'u2482', {}), ('u315', 'u10500', {}), ('u315', 'u2022', {}), ('u315', 'u863', {}), ('u315', 'u9997', {}), ('u315', 'u698', {}), ('u315', 'u2066', {}), ('u315', 'u7963', {}), ('u315', 'u2906', {}), ('u315', 'u4513', {}), ('u315', 'u2552', {}), ('u322', 'u3174', {}), ('u322', 'u3974', {}), ('u322', 'u7623', {}), ('u322', 'u8135', {}), ('u322', 'u2022', {}), ('u322', 'u4953', {}), ('u322', 'u640', {}), ('u322', 'u4412', {}), ('u322', 'u10340', {}), ('u322', 'u4159', {}), ('u322', 'u1908', {}), ('u322', 'u3979', {}), ('u322', 'u435', {}), ('u322', 'u821', {}), ('u322', 'u9869', {}), ('u322', 'u9866', {}), ('u322', 'u10603', {}), ('u322', 'u3083', {}), ('u322', 'u4710', {}), ('u435', 'u7623', {}), ('u435', 'u8135', {}), ('u435', 'u2022', {}), ('u435', 'u10603', {}), ('u435', 'u10340', {}), ('u435', 'u9866', {}), ('u440', 'u5693', {}), ('u440', 'u2643', {}), ('u440', 'u10090', {}), ('u440', 'u3271', {}), ('u640', 'u1908', {}), ('u640', 'u3174', {}), ('u640', 'u3979', {}), ('u640', 'u3974', {}), ('u640', 'u821', {}), ('u640', 'u4953', {}), ('u640', 'u4412', {}), ('u640', 'u9869', {}), ('u640', 'u4159', {}), ('u640', 'u4710', {}), ('u640', 'u3083', {}), ('u655', 'u2643', {}), ('u655', 'u2906', {}), ('u655', 'u2137', {}), ('u655', 'u914', {}), ('u655', 'u4513', {}), ('u698', 'u10500', {}), ('u698', 'u2066', {}), ('u698', 'u2906', {}), ('u698', 'u2552', {}), ('u698', 'u9997', {}), ('u698', 'u7963', {}), ('u698', 'u2482', {}), ('u698', 'u863', {}), ('u698', 'u2022', {}), ('u821', 'u3174', {}), ('u821', 'u3974', {}), ('u821', 'u4953', {}), ('u821', 'u4412', {}), ('u821', 'u4159', {}), ('u821', 'u1908', {}), ('u821', 'u3979', {}), ('u821', 'u9869', {}), ('u821', 'u4710', {}), ('u821', 'u3083', {}), ('u863', 'u10500', {}), ('u863', 'u2022', {}), ('u863', 'u7963', {}), ('u863', 'u2906', {}), ('u863', 'u2482', {}), ('u863', 'u9997', {}), ('u863', 'u2552', {}), ('u863', 'u2066', {}), ('u901', 'u2022', {}), ('u901', 'u5082', {}), ('u914', 'u2022', {}), ('u914', 'u3231', {}), ('u1254', 'u2737', {}), ('u1254', 'u2289', {}), ('u1254', 'u2643', {}), ('u1254', 'u4329', {}), ('u1254', 'u4761', {}), ('u1407', 'u2643', {}), ('u1407', 'u6081', {}), ('u1407', 'u3658', {}), ('u1468', 'u5993', {}), ('u1468', 'u2643', {}), ('u1468', 'u2022', {}), ('u1468', 'u7418', {}), ('u1468', 'u5337', {}), ('u1468', 'u9869', {}), ('u1908', 'u3174', {}), ('u1908', 'u3979', {}), ('u1908', 'u3974', {}), ('u1908', 'u4953', {}), ('u1908', 'u4412', {}), ('u1908', 'u9869', {}), ('u1908', 'u4159', {}), ('u1908', 'u4710', {}), ('u1908', 'u3083', {}), ('u2022', 'u2482', {}), ('u2022', 'u5993', {}), ('u2022', 'u7623', {}), ('u2022', 'u8135', {}), ('u2022', 'u10500', {}), ('u2022', 'u10340', {}), ('u2022', 'u9997', {}), ('u2022', 'u3231', {}), ('u2022', 'u2643', {}), ('u2022', 'u2906', {}), ('u2022', 'u5082', {}), ('u2022', 'u4199', {}), ('u2022', 'u9869', {}), ('u2022', 'u2066', {}), ('u2022', 'u9866', {}), ('u2022', 'u7418', {}), ('u2022', 'u7963', {}), ('u2022', 'u5337', {}), ('u2022', 'u10603', {}), ('u2022', 'u2552', {}), ('u2066', 'u10500', {}), ('u2066', 'u7963', {}), ('u2066', 'u2906', {}), ('u2066', 'u2482', {}), ('u2066', 'u9997', {}), ('u2066', 'u2552', {}), ('u2137', 'u2643', {}), ('u2137', 'u4513', {}), ('u2289', 'u4329', {}), ('u2289', 'u4761', {}), ('u2289', 'u2643', {}), ('u2289', 'u2737', {}), ('u2482', 'u10500', {}), ('u2482', 'u7963', {}), ('u2482', 'u2906', {}), ('u2482', 'u9997', {}), ('u2482', 'u2552', {}), ('u2552', 'u10500', {}), ('u2552', 'u9997', {}), ('u2552', 'u2906', {}), ('u2552', 'u7963', {}), ('u2643', 'u10090', {}), ('u2643', 'u5993', {}), ('u2643', 'u5693', {}), ('u2643', 'u4329', {}), ('u2643', 'u4761', {}), ('u2643', 'u9869', {}), ('u2643', 'u6081', {}), ('u2643', 'u2737', {}), ('u2643', 'u3658', {}), ('u2643', 'u3243', {}), ('u2643', 'u7418', {}), ('u2643', 'u5337', {}), ('u2643', 'u4513', {}), ('u2643', 'u3271', {}), ('u2737', 'u4329', {}), ('u2737', 'u4761', {}), ('u2906', 'u10500', {}), ('u2906', 'u9997', {}), ('u2906', 'u7963', {}), ('u3083', 'u3174', {}), ('u3083', 'u3974', {}), ('u3083', 'u4953', {}), ('u3083', 'u4412', {}), ('u3083', 'u4159', {}), ('u3083', 'u3979', {}), ('u3083', 'u9869', {}), ('u3083', 'u4710', {}), ('u3174', 'u3974', {}), ('u3174', 'u4953', {}), ('u3174', 'u4412', {}), ('u3174', 'u4159', {}), ('u3174', 'u3979', {}), ('u3174', 'u9869', {}), ('u3174', 'u4710', {}), ('u3231', 'u4159', {}), ('u3243', 'u3271', {}), ('u3271', 'u10090', {}), ('u3271', 'u5693', {}), ('u3658', 'u6081', {}), ('u3974', 'u4953', {}), ('u3974', 'u4412', {}), ('u3974', 'u4159', {}), ('u3974', 'u3979', {}), ('u3974', 'u9869', {}), ('u3974', 'u4710', {}), ('u3979', 'u4953', {}), ('u3979', 'u4412', {}), ('u3979', 'u4159', {}), ('u3979', 'u9869', {}), ('u3979', 'u4710', {}), ('u4159', 'u4412', {}), ('u4159', 'u9869', {}), ('u4159', 'u4710', {}), ('u4159', 'u4953', {}), ('u4329', 'u4761', {}), ('u4412', 'u4953', {}), ('u4412', 'u9869', {}), ('u4412', 'u4710', {}), ('u4710', 'u4953', {}), ('u4710', 'u9869', {}), ('u4953', 'u9869', {}), ('u5337', 'u5993', {}), ('u5337', 'u7418', {}), ('u5337', 'u9869', {}), ('u5693', 'u10090', {}), ('u5993', 'u7418', {}), ('u5993', 'u9869', {}), ('u7418', 'u9869', {}), ('u7623', 'u8135', {}), ('u7623', 'u10603', {}), ('u7623', 'u10340', {}), ('u7623', 'u9866', {}), ('u7963', 'u10500', {}), ('u7963', 'u9997', {}), ('u8135', 'u10603', {}), ('u8135', 'u10340', {}), ('u8135', 'u9866', {}), ('u9866', 'u10603', {}), ('u9866', 'u10340', {}), ('u9997', 'u10500', {}), ('u10340', 'u10603', {})] g_413 = nx.Graph() g_413.add_nodes_from(nodes) g_413.add_edges_from(edges) nx.set_node_attributes(g_413, nodes) # Plot the degree distribution of the GitHub collaboration network plt.hist(list(nx.betweenness_centrality(g_413).values())) plt.show() ###Output _____no_output_____ ###Markdown Case Study: Visualization- Practicing drawing Circos, Hive and Arc plots Connected component subgraphs- The following figure contains two connected component subgraphs- The one on the left, containing the yellow node, and the one on the right containing the purple node.- There are no edges connecting the left graph to the right graph.- A connected component subgraph is a set of nodes connected to one another by some path in the subgraph, and not connected to other nodes in the larger graph.- ![connected_component_subgraph][1] [1]: https://raw.githubusercontent.com/trenton3983/DataCamp/master/Images/2020-05-21_intro_to_network_analysis_in_python/con_com_sub.JPG ###Code random.seed(121) G = nx.erdos_renyi_graph(n=20, p=0.3) circ = nv.CircosPlot(g_413) # node_color='key', node_group='key') circ.draw() random.seed(60) G = nx.erdos_renyi_graph(n=100, p=0.03) nx.connected_components(G) # connected_componet_subgraphs() is deprecated subgraphs = [G.subgraph(c) for c in nx.connected_components(G)] subgraphs for sg in subgraphs: print(f'Subgraph has {len(sg.nodes())} nodes') nx.draw(sg, with_labels=True) plt.show() ###Output _____no_output_____ ###Markdown MatrixPlotLet's now practice making some visualizations. The first one will be the MatrixPlot. In a MatrixPlot, the matrix is the representation of the edges.**Instructions**- Make a MatrixPlot visualization of the largest connected component subgraph, with authors grouped by their user group number. - First, calculate the largest connected component subgraph by using the` nx.connected_component_subgraphs(G)` inside the provided `sorted()` function. Python's built-in [sorted()][1] **function** takes an iterable and returns a sorted list (in ascending order, by default). Therefore, to access the largest connected component subgraph, the statement is sliced with `[-1]`. - Create the `MatrixPlot` object `h`. You have to specify the parameters `graph` and `node_grouping` to be the largest connected component subgraph and `'grouping'`, respectively. - Draw the `MatrixPlot` object to the screen and display the plot. [1]: https://docs.python.org/2/library/functions.htmlsorted ###Code subgraphs = [g_413.subgraph(c) for c in nx.connected_components(g_413)] subgraphs # Calculate the largest connected component subgraph: largest_ccs largest_ccs = sorted(subgraphs, key=lambda x: len(x))[-1] # Create the customized MatrixPlot object: h h = nv.MatrixPlot(largest_ccs, node_grouping='grouping') # Draw the MatrixPlot to the screen h.draw() plt.show() ###Output _____no_output_____ ###Markdown **Recall that in a MatrixPlot, nodes are the rows and columns of the matrix, and cells are filled in according to whether an edge exists between the pairs of nodes.** ###Code plt.figure(figsize=(15, 15)) nx.draw(largest_ccs, with_labels=True, figsize=(15, 15)) plt.show() ###Output _____no_output_____ ###Markdown ArcPlotNext up, let's use the ArcPlot to visualize the network. You're going to practice sorting the nodes in the graph as well.Note: this exercise may take about 4-7 seconds to execute if done correctly.**Instructions**- Make an ArcPlot of the GitHub collaboration network, with authors sorted by degree. To do this: - Iterate over all the nodes in `G`, including the metadata (by specifying `data=True`). - In each iteration of the loop, calculate the degree of each node `n` with `nx.degree()` and set its `'degree'` attribute. `nx.degree()` accepts two arguments: A graph and a node. - Create the `ArcPlot` object `a` by specifying two parameters: the `graph`, which is `G`, and the `node_order`, which is `'degree'`, so that the nodes are sorted. - Draw the `ArcPlot` object to the screen. ###Code # Iterate over all the nodes in G, including the metadata for n, d in g_413.nodes(data=True): # Calculate the degree of each node: G.node[n]['degree'] g_413.nodes[n]['degree'] = nx.degree(g_413, n) # Create the ArcPlot object: a a = nv.ArcPlot(g_413, node_order='degree', node_grouping='grouping', node_color='grouping', figsize=(8, 8)) # Draw the ArcPlot to the screen a.draw() plt.show() ###Output _____no_output_____ ###Markdown CircosPlotFinally, you're going to make a CircosPlot of the network!**Instructions**- Make a CircosPlot of the network, again, with GitHub users sorted by their degree, and grouped and `coloured` by their 'grouping' key. To do this: - Iterate over all the nodes in `G`, including the metadata (by specifying `data=True`). - In each iteration of the loop, calculate the degree of each node `n` with `nx.degree()` and set its `'degree'` attribute. - Create the `CircosPlot` object `c` by specifying three parameters in addition to the graph `G`: the `node_order`, which is `'degree'`, the `node_grouping` and the `node_color`, which are both `'grouping'`. - Draw the `CircosPlot` object to the screen. ###Code # Iterate over all the nodes in G, including the metadata for n, d in g_413.nodes(data=True): # Calculate the degree of each node: G.node[n]['degree'] g_413.nodes[n]['degree'] = nx.degree(g_413, n) # Create the ArcPlot object: a a = nv.CircosPlot(g_413, node_order='degree', node_grouping='grouping', node_color='grouping', figsize=(8, 8)) # Draw the ArcPlot to the screen a.draw() plt.show() ###Output _____no_output_____ ###Markdown **This CircosPlot provides a compact alternative to the ArcPlot. It is easy to see in this plot that most users belong to one group.** Case Study: Cliques- A group of nodes that are fully connected to one another.- The simplest clique is an edge and the simplest "complex" clique is a triangle.- A maximal clique is a clique that cannot be extended by adding another node in the graph. ###Code G = nx.erdos_renyi_graph(n=100, p=0.15) print(nx.find_cliques(G)) clique_lens = Counter([len(clique) for clique in nx.find_cliques(G)]) clique_lens ###Output <generator object find_cliques at 0x128785660> ###Markdown Finding cliques (I)You're now going to practice finding cliques in `G`. Recall that cliques are "groups of nodes that are fully connected to one another", while a maximal clique is a clique that cannot be extended by adding another node in the graph.**Instructions**- Count the number of maximal cliques present in the graph and print it. - Use the `nx.find_cliques()` function of `G` to find the maximal cliques. - The `nx.find_cliques()` function returns a generator object. To count the number of maximal cliques, you need to first convert it to a list with `list()` and then use the `len()` function. Place this inside a `print()` function to print it. ###Code cliques = sorted([len(cl) for cl in nx.find_cliques(g_413)], reverse=True) print(cliques) print(f'There are {len(cliques)} cliques.') ###Output [14, 13, 8, 7, 6, 6, 5, 4, 4, 3, 3, 3, 3, 3, 2, 2, 2, 2, 1] There are 19 cliques. ###Markdown Finding cliques (II)Let's continue by finding a particular maximal clique, and then plotting that clique.**Instructions**- Find the author(s) that are part of the largest maximal clique, and plot the subgraph of that/one of those clique(s) using a CircosPlot. To do this: - Use the `nx.find_cliques()` function to calculate the maximal cliques in `G`. Place this within the provided `sorted()` function to calculate the largest maximal clique. - Create the subgraph consisting of the largest maximal clique using the `.subgraph()` method and `largest_clique`. - Create the `CircosPlot` object using the subgraph `G_lc` (without any other arguments) and plot it. ###Code # Find the author(s) that are part of the largest maximal clique: largest_clique largest_clique = sorted(nx.find_cliques(g_413), key=lambda x:len(x))[-1] # Create the subgraph of the largest_clique: G_lc G_lc = g_413.subgraph(largest_clique) # Create the CircosPlot object: c c = nv.CircosPlot(G_lc, node_labels=True, figsize=(8, 8)) # Draw the CircosPlot to the screen print(f'The largest maximal clique consists of {len(largest_clique)} users') c.draw() plt.show() ###Output _____no_output_____ ###Markdown Case Study: Final Tasks- Find important GitHub users based on their collaborative relationships - Look for GitHub users that share collaborations with the most number of other users. - _degree centrality_- Find the largest communities of collaborators. - You'll be looking for the largest communities of collaborators. - _maximal clique_- Guild a recommendation system for GitHub users based on the concept of open triangles. - _open triangles_: I'm connected to two people who are not connected to one another Finding important collaboratorsYou'll now look at important nodes once more. Here, you'll make use of the `degree_centrality()` and `betweenness_centrality()` functions in NetworkX to compute each of the respective centrality scores, and then use that information to find the "important nodes". In other words, your job in this exercise is to find the user(s) that have collaborated with the most number of users.**Instructions**- Compute the degree centralities of `G`. Store the result as `deg_cent`.- Compute the maximum degree centrality. Since `deg_cent` is a dictionary, you'll have to use the `.values()` method to get a list of its values before computing the maximum degree centrality with `max()`.- Identify the most prolific collaborators using a list comprehension: - Iterate over the degree centrality dictionary `deg_cent` that was computed earlier using its `.items()` method. What condition should be satisfied if you are seeking to find user(s) that have collaborated with the most number of users? _Hint_: It has do to with the maximum degree centrality.- Hit 'Submit Answer' to see who the most prolific collaborator(s) is/are! ###Code # Compute the degree centralities of G: deg_cent deg_cent = nx.degree_centrality(g_413) # Sorting the dictionary deg_cent = {k: v for k, v in sorted(deg_cent.items(), key=lambda item: item[1], reverse=True)} # Compute the maximum degree centrality: max_dc max_dc = max(list(deg_cent.values())) # Find the user(s) that have collaborated the most: prolific_collaborators prolific_collaborators = [n for n, dc in deg_cent.items() if dc == max_dc] # Print the most prolific collaborator(s) print(f'The most prolific collaborator(s): {prolific_collaborators}') max_dc deg_cent ###Output _____no_output_____ ###Markdown Characterizing editing communitiesYou're now going to combine what you've learned about the BFS algorithm and concept of maximal cliques to visualize the network with an ArcPlot.The largest maximal clique in the Github user collaboration network has been assigned to the subgraph `G_lmc`. Note that for `NetworkX` version 2.x and later, `G.subgraph(nodelist)` returns only an immutable view on the original graph. We must explicitly ask for a `.copy()` of the graph to obtain a mutatable version.**Instructions**- Go out 1 degree of separation from the clique, and add those users to the subgraph. Inside the first `for` loop:- Add nodes to `G_lmc` from the neighbors of `G` using the `.add_nodes_from()` and `.neighbors()` methods.- Using the `.add_edges_from()`, method, add edges to `G_lmc` between the current node and all its neighbors. To do this, you'll have create a list of tuples using the `zip()` function consisting of the current node and each of its neighbors. The first argument to `zip()` should be `[node]*len(list(G.neighbors(node)))`, and the second argument should be the neighbors of `node`.- Record each node's degree centrality score in its node metadata.- Do this by assigning `nx.degree_centrality(G_lmc)[n]` to `G_lmc.node[n]['degree centrality']` in the second `for` loop.- Visualize this network with an ArcPlot sorting the nodes by degree centrality (you can do this using the keyword argument `node_order='degree centrality'`). ###Code # Identify the largest maximal clique: largest_max_clique largest_max_clique = set(sorted(nx.find_cliques(g_413), key=lambda x: len(x))[-1]) print(largest_max_clique) # Create a subgraph from the largest_max_clique: G_lmc G_lmc = g_413.subgraph(largest_max_clique).copy() display(G_lmc) # Go out 1 degree of separation for node in list(G_lmc.nodes()): G_lmc.add_nodes_from(g_413.neighbors(node)) G_lmc.add_edges_from(zip([node]*len(list(g_413.neighbors(node))), g_413.neighbors(node))) # Record each node's degree centrality score for n in G_lmc.nodes(): G_lmc.nodes[n]['degree centrality'] = nx.degree_centrality(G_lmc)[n] # Create the ArcPlot object: a a = nv.ArcPlot(G_lmc, node_order='degree centrality') # Draw the ArcPlot to the screen a.draw() plt.show() ###Output {'u10500', 'u9997', 'u2066', 'u96', 'u297', 'u2906', 'u863', 'u2482', 'u2022', 'u698', 'u2552', 'u156', 'u7963', 'u315'} ###Markdown Recommending co-editors who have yet to edit togetherFinally, you're going to leverage the concept of open triangles to recommend users on GitHub to collaborate!**Instructions**- Compile a list of GitHub users that should be recommended to collaborate with one another. To do this: - In the first `for` loop, iterate over all the nodes in `G`, including the metadata (by specifying `data=True`). - In the second `for` loop, iterate over all the possible triangle `combinations`, which can be identified using the combinations() function with a `size` of `2`. - If `n1` and `n2` **do not** have an edge between them, a collaboration between these two nodes (users) **should** be recommended, so increment the `(n1), (n2)` value of the `recommended` dictionary in this case. You can check whether or not `n1` and `n2` have an edge between them using the `.has_edge()` method.- Using a list comprehension, identify the top 10 pairs of users that should be recommended to collaborate. The _iterable_ should be the key-value pairs of the `recommended` dictionary (which can be accessed with the `.items()` method), while the conditional should be satisfied if `count` is _greater_ than the top 10 in `all_counts`. Note that `all_counts` is sorted in ascending order, so you can access the top 10 with `all_counts[-10]`. ###Code G = nx.Graph(Gh) # Initialize the defaultdict: recommended recommended = defaultdict(int) # Iterate over all the nodes in G for n, d in g_413.nodes(data=True): # Iterate over all possible triangle relationship combinations for n1, n2 in combinations(g_413.neighbors(n), 2): # Check whether n1 and n2 do not have an edge if not g_413.has_edge(n1, n2): # Increment recommended recommended[((n1), (n2))] += 1 # Identify the top 10 pairs of users all_counts = sorted(recommended.values()) top10_pairs = [pair for pair, count in recommended.items() if count > all_counts[-10]] print(f'Pairs of users who should collaborate: {top10_pairs}') plt.figure(figsize=(15, 15)) nx.draw(g_413, with_labels=True, figsize=(20, 20)) plt.show() ###Output _____no_output_____
notebooks/processing/postprocess.ipynb
###Markdown Post processing of aboveground biomass dataset InputRandom forest model prediction results from inference.ipynb. These are parquet files (1 for eachlandsat scene x year) with columns x, y, biomass. x, y are in lat/lon coordinates, and biomass is inunit of Mg biomass / ha and only accounts for aboveground, live, woody biomass. ProcessesFor each 10x10 degree tile in our template1. merge and mosaic all landsat scenes within a 10x10 degree tile for all years available and store the data in zarr format2. fill gaps within the biomass dataset by xarray interpolate_na with linear method (first through dim time, then through dim x, then dim y)3. mask with MODIS MCD12Q1 land cover dataset to only select the forest pixels4. calculate belowground biomass and deadwood and litter ###Code %load_ext autoreload %autoreload 2 from pyproj import CRS import boto3 from rasterio.session import AWSSession from s3fs import S3FileSystem aws_session = AWSSession(boto3.Session(),#profile_name='default'), requester_pays=True) fs = S3FileSystem(requester_pays=True) import xgboost as xgb from osgeo.gdal import VSICurlClearCache import rasterio as rio import numpy as np import xarray as xr import dask import os import fsspec import rioxarray # for the extension to load import pandas as pd from datetime import datetime from dask_gateway import Gateway from carbonplan_trace.v1.landsat_preprocess import access_credentials, test_credentials from carbonplan_trace.v1.inference import predict, predict_delayed from carbonplan_trace.v1 import utils, postprocess, load from carbonplan_trace.tiles import tiles from carbonplan_trace.v1.landsat_preprocess import access_credentials, test_credentials import prefect from prefect import task, Flow, Parameter from prefect.executors import DaskExecutor from prefect.utilities.debug import raise_on_exception from datetime import datetime as time from carbonplan_trace import version %reload_ext watermark print(version) watermark -d -n -t -u -v -p carbonplan_trace -h -m -g -r -b dask.config.set({"array.slicing.split_large_chunks": False}) dask.config.set({"distributed.comm.timeouts.tcp": "50s"}) dask.config.set({"distributed.comm.timeouts.connect": "50s"}) kind_of_cluster = "local" if kind_of_cluster == "local": # spin up local cluster. must be on big enough machine from dask.distributed import Client local_cluster_client = Client(n_workers=5, threads_per_worker=1, resources={"workertoken": 1}) local_cluster_client elif kind_of_cluster == "remote": gateway = Gateway() options = gateway.cluster_options() options.environment = { "AWS_REQUEST_PAYER": "requester", "AWS_REGION_NAME": "us-west-2", "DASK_DISTRIBUTED__WORKER__RESOURCES__WORKERTOKEN": "1", } options.worker_cores = 1 options.worker_memory = 31 options.image = "carbonplan/trace-python-notebook:latest" cluster = gateway.new_cluster(cluster_options=options) cluster.adapt(minimum=0, maximum=150) # cluster.scale(200) postprocess._set_thread_settings() # cluster.shutdown() # local_cluster_client.shutdown() # gateway = Gateway() # clusters = gateway.list_clusters() # cluster = gateway.connect(clusters[0].name) # cluster.shutdown() # cluster # client = cluster.get_client() # client local_cluster_client # cluster.shutdown() access_key_id, secret_access_key = access_credentials() tasks = [] # define starting and ending years (will want to go back to 2014 but that might not be ready right now) year0, year1 = 2014, 2021 # define the size of subtile you want to work in (2 degrees recommended) tile_degree_size = 2 # if you want to write the metadata for the zarr store write_tile_metadata = True chunks_dict = {"lat": 1000, "lon": 1000} log_bucket = "s3://carbonplan-climatetrace/v1.2/postprocess_log/" completed_subtiles = fs.ls(log_bucket) completed_subtiles = [subtile.split("/")[-1].split(".txt")[0] for subtile in completed_subtiles] len(completed_subtiles) len(completed_subtiles) - 6995 print(datetime.now()) running_tiles = [tile for tile in tiles] # if ("E" in tile and "N" in tile) # running_tiles = ["40N_120W"] len(running_tiles) parameters_list = [] # for tile in tiles: for tile in running_tiles: lat_tag, lon_tag = utils.get_lat_lon_tags_from_tile_path(tile) lat_lon_box = utils.parse_bounding_box_from_lat_lon_tags(lat_tag, lon_tag) # find the lat_lon_box for that tile min_lat, max_lat, min_lon, max_lon = lat_lon_box # initialize empty dataset. only need to do this once, and not if the tile has already been processed data_path = postprocess.initialize_empty_dataset( lat_tag, lon_tag, year0, year1, write_tile_metadata=write_tile_metadata ) # now we'll split up each of those tiles into smaller subtiles of length `tile_degree_size` # and run through those. In this case since we've specified 2, we'll have 25 in each box prefect_parameters = { "MIN_LAT": min_lat, "MIN_LON": min_lon, "YEAR_0": year0, "YEAR_1": year1, "TILE_DEGREE_SIZE": tile_degree_size, "DATA_PATH": data_path, "ACCESS_KEY_ID": access_key_id, "SECRET_ACCESS_KEY": secret_access_key, "CHUNKS_DICT": chunks_dict, } for lat_increment in np.arange(0, 10, tile_degree_size): for lon_increment in np.arange(0, 10, tile_degree_size): task_tag = "{}_{}_{}_{}".format(min_lat, min_lon, lat_increment, lon_increment) if task_tag in completed_subtiles: continue else: increment_parameters = prefect_parameters.copy() increment_parameters["LAT_INCREMENT"] = lat_increment increment_parameters["LON_INCREMENT"] = lon_increment parameters_list.append(increment_parameters) # tasks.append(client.compute(postprocess_delayed(subtile_ul_lat, subtile_ul_lon, year0, year1, tile_degree_size, mapper))) len(parameters_list) # print(datetime.now()) # postprocess.postprocess_subtile(parameters_list[4]) print(datetime.now()) import random random.shuffle(parameters_list) if len(parameters_list) > 1000: parameters_list = parameters_list[:900] # postprocess.postprocess_subtile(parameters_list[4]) if kind_of_cluster == "local": executor = DaskExecutor(address=local_cluster_client.scheduler.address) elif kind_of_cluster == "remote": executor = DaskExecutor( address=client.scheduler.address, client_kwargs={"security": cluster.security}, debug=True, ) def fail_nicely(task, old_state, new_state): if new_state.is_running(): print("running!") if new_state.is_failed(): print("this task {} failed".format(task)) raise ValueError("OH NO") # function that sends a notification return new_state # prefect.engine.signals.state.Skipped() postprocess_task = task( postprocess.postprocess_subtile, # .test_to_zarr,# tags=["dask-resource:workertoken=1"], state_handlers=[fail_nicely], ) with Flow("Postprocessing") as flow: # Run postprocess postprocess_task.map(parameters_list) # with raise_on_exception(): # if running locally (no cluster) # flow.run() # if running on cluster flow.run(executor=executor) cluster.shutdown() ###Output _____no_output_____
AI-and-Analytics/Jupyter/Numba_DPPY_Essentials_training/00_DPPY_Prerequisites/Setup_Instructions_Numba.ipynb
###Markdown Introduction to JupyterLab and Notebooks If you are familiar with Jupyter skip below and head to the first exercise. __JupyterLab__ is a sequence of boxes referred to as "cells". Each cell will contain text, like this one, or C++ or Python code that may be executed as part of this tutorial. As you proceed, please note the following: * The active cell is indicated by the blue bar on the left. Click on a cell to select it. * Use the __"run"__ ▶ button at the top or __Shift+Enter__ to execute a selected cell, starting with this one. * Note: If you mistakenly press just Enter, you will enter the editing mode for the cell. To exit editing mode and continue, press Shift+Enter.* Unless stated otherwise, the cells containing code within this tutorial MUST be executed in sequence. * You may save the tutorial at any time, which will save the output, but not the state. Saved Jupyter Notebooks will save sequence numbers which may make a cell appear to have been executed when it has not been executed for the new session. Because state is not saved, re-opening or __restarting a Jupyter Notebook__ will required re-executing all the executable steps, starting in order from the beginning. * If for any reason you need to restart the tutorial from the beginning, you may reset the state of the Jupyter Notebook and clear all output. Use the menu at the top to select __Kernel -> "Restart Kernel and Clear All Outputs"__ * Cells containing Markdown can be executed and will render. However, there is no indication of execution, and it is not necessary to explicitly execute Markdown cells. * Cells containing executable code will have "a [ ]:" to the left of the cell: * __[ ]__ blank indicates that the cell has not yet been executed. * __[\*]__ indicates that the cell is currently executing. * Once a cell is done executing, a number will appear in the small brackets with each cell execution to indicate where in the sequence the cell has been executed. Any output (e.g. print()'s) from the code will appear below the cell. Code editing, Compiling and Running in Jupyter NotebooksThis code shows a simple python Hello world. Inspect code, there are no modifications necessary:1. Inspect the code cell below and click run ▶ to run the file ###Code def main(): print("Hello World") if __name__ == "__main__": main() ###Output _____no_output_____
sound_negation/.ipynb_checkpoints/negate-sound-checkpoint.ipynb
###Markdown Negating ###Code neg = [0.8 - x for x in data[:, 0]] time = np.linspace(0., length, data.shape[0]) plt.plot(time, neg[:], label="Right channel") plt.legend() plt.xlabel("Time [s]") plt.ylabel("Amplitude") plt.show() ###Output _____no_output_____ ###Markdown Summing ###Code neg = [0.8 - x for x in data[:, 0]] combined = np.add(data[:, 0], neg) time = np.linspace(0., length, data.shape[0]) plt.plot(time, sum, label="Right channel") plt.legend() plt.xlabel("Time [s]") plt.ylabel("Amplitude") plt.show() from scipy.io.wavfile import write sample_rate = data.shape[0] samples = data[:, 0] write('output/original.wav', sample_rate, data[:, 0]) write('output/neg.wav', sample_rate, neg) write('output/sum.wav', sample_rate, combined) ###Output _____no_output_____
Exercise_2_Vulnerability.ipynb
###Markdown Exercise 2 - Building vulnerbaility functions In this exercise, we will build a simple vulnerability function using the assumption in impact function from Emanuel (2011), availabile in equation 12 here https://gmd.copernicus.org/articles/12/3085/2019/ import python libraries ###Code import math import pandas as pd ###Output _____no_output_____ ###Markdown retrieve intensity bins ###Code df_intensity = pd.read_csv('model_data/intensity_bin_dict.csv') df_intensity ###Output _____no_output_____ ###Markdown apply the equation ###Code # function to calculate damage factor f_ij from max windspeed v_ij def get_damage_factor(v_ij,v_thresh,v_half): v_ij = max(v_ij - v_thresh,0) / (v_half - v_thresh) f_ij = (v_ij**3) / (1 + v_ij**3) return f_ij #vulnerability functions vulnerability_functions = {'residential': {'vulnerability_id':1, 'v_thresh': 25.7, 'v_half': 74.7}} vulnerability_functions # generate vulnerability file mps_to_knots = 1.94384 v_lst = [] d_lst = [] for v in vulnerability_functions: v_id = vulnerability_functions[v]['vulnerability_id'] v_thresh = vulnerability_functions[v]['v_thresh'] * mps_to_knots v_half = vulnerability_functions[v]['v_half'] * mps_to_knots for index,row in df_intensity.iterrows(): intensity = row['intensity_bin_index'] damage_bin = intensity ws_from = (row['max_windspeed'] - 2.5) ws_to = (row['max_windspeed'] + 2.5) damage_from = get_damage_factor(ws_from,v_thresh,v_half) damage_to = get_damage_factor(ws_to,v_thresh,v_half) v_row = [v_id,intensity,damage_bin] v_lst.append(v_row) d_row = [damage_bin,damage_from,damage_to] d_lst.append(d_row) df_vuln = pd.DataFrame(data=v_lst,columns=['vulnerability_id','intensity_bin_index','damage_bin_index'],dtype='int') df_vuln['prob']=1 df_vuln.to_csv('model_data/vulnerability.csv',index=False) df_vuln ###Output _____no_output_____ ###Markdown discretise the damage factors ###Code # generate damage file df_damage = pd.DataFrame(data=d_lst,columns=['damage_bin_index','damage_from','damage_to']) df_damage['interpolation']=df_damage['damage_from'] + (df_damage['damage_to']-df_damage['damage_from'])/2 df_damage.to_csv('model_data/damage_bin_dict.csv',index=False) df_damage ###Output _____no_output_____
test_demo.ipynb
###Markdown Load dataset ###Code DATASET_DIR = "data/keypoints_data" annotations = os.path.join(DATASET_DIR, "annotations/ann.csv") def parse_record(raw_record): out_dict = {} raw_data = raw_record.split(";") out_dict["file_path"] = raw_data[0] tmp_keypoints = [data.split(",") for data in raw_data[1:9]] out_dict["keypoints"] = [] for keypoint in tmp_keypoints: keypoint = [int(elem) for elem in keypoint] out_dict["keypoints"].append(keypoint) out_dict["position"] = [float(data) for data in raw_data[9:12]] out_dict["rotation"] = [float(data) for data in raw_data[12:]] return out_dict class CubeDataset(Dataset): def __init__(self, dataset_dir, transform=None): self.records = [] with open(os.path.join(dataset_dir, "annotations/ann.csv"), "r") as ann: line = ann.readline().rstrip() while line: record = parse_record(line) self.records.append(record) line = ann.readline().rstrip() self.transform = transform def __len__(self): return len(self.records) def __getitem__(self, idx): if torch.is_tensor(idx): idx = idx.tolist() img_path = self.records[idx]["file_path"] img = Image.open(img_path).convert("RGB") mask_path = self.records[idx]["file_path"].replace("images", "annotations/masks") mask = Image.open(mask_path) mask = np.array(mask) # instances are encoded as different colors obj_ids = np.unique(mask) # first id is the background, so remove it obj_ids = obj_ids[1:] # split the color-encoded mask into a set # of binary masks masks = mask == obj_ids[:, None, None] # get bounding box coordinates for each mask num_objs = len(obj_ids) boxes = [] for i in range(num_objs): pos = np.where(masks[i]) xmin = np.min(pos[1]) xmax = np.max(pos[1]) ymin = np.min(pos[0]) ymax = np.max(pos[0]) boxes.append([xmin, ymin, xmax, ymax]) boxes = torch.as_tensor(boxes, dtype=torch.float32) labels = torch.ones((num_objs,), dtype=torch.int64) masks = torch.as_tensor(masks, dtype=torch.uint8) image_id = torch.tensor([idx]) area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) # suppose all instances are not crowd iscrowd = torch.zeros((num_objs,), dtype=torch.int64) keypoints = np.array(self.records[idx]["keypoints"]) position = self.records[idx]["position"] rotation = self.records[idx]["rotation"] target = {} target["boxes"] = boxes target["labels"] = labels target["masks"] = masks target["image_id"] = image_id target["area"] = area target["iscrowd"] = iscrowd target["keypoints"] = torch.tensor(keypoints[None, :, :]) print(target["keypoints"]) if self.transform: img, target = self.transform(img, target) return img, target class ToTensor(object): def __call__(self, img, target): image, key_pts = img, target if(len(image.shape) == 2): image = image.reshape(image.shape[0], image.shape[1], 1) image = image.transpose((2, 0, 1)) return {'image': torch.from_numpy(image), 'keypoints': torch.from_numpy(key_pts)} transformations = transforms.Compose([ToTensor()]) cube_dataset = CubeDataset( "data/keypoints_data", transformations ) print("Dataset with {} samples loaded".format(len(cube_dataset))) ###Output Dataset with 967 samples loaded ###Markdown Transfer Learning ###Code from torchvision.models.detection.rpn import AnchorGenerator from torchvision.models.detection.keypoint_rcnn import KeypointRCNNPredictor model = torchvision.models.detection.keypointrcnn_resnet50_fpn( pretrained=True) model.roi_heads.keypoint_predictor = KeypointRCNNPredictor(512, 8) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.train() model.to(device) batch_size = 2 train_loader = DataLoader(cube_dataset, batch_size=batch_size, shuffle=True, num_workers=0) # construct an optimizer params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005) # and a learning rate scheduler lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1) def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) # Each epoch has a training and validation phase phase = "train" for phase in ['train', 'val']: if phase == 'train': model.train() # Set model to training mode else: model.eval() # Set model to evaluate mode running_loss = 0.0 running_corrects = 0 # Iterate over data. for inputs, labels in train_loader: inputs = inputs.to(device) labels = labels.to(device) # zero the parameter gradients optimizer.zero_grad() # forward # track history if only in train with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) # backward + optimize only if in training phase if phase == 'train': loss.backward() optimizer.step() scheduler.step() # statistics running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # deep copy the model if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) # load best model weights model.load_state_dict(best_model_wts) return model criterion = nn.MSELoss() model = train_model(model, criterion, optimizer, lr_scheduler, num_epochs=25) ###Output Epoch 0/24 ---------- tensor([[[595, 269, 1], [581, 283, 1], [547, 204, 1], [562, 193, 1], [521, 299, 0], [504, 314, 1], [470, 236, 1], [488, 224, 1]]], dtype=torch.int32)
4_TestData.ipynb
###Markdown Load libraries ###Code import numpy as np import pandas as pd from xgboost import XGBClassifier from sklearn.metrics import matthews_corrcoef, roc_auc_score from sklearn.model_selection import cross_val_score, StratifiedKFold import matplotlib.pyplot as plt import seaborn as sns import category_encoders as ce ###Output _____no_output_____ ###Markdown Load data ###Code # Load pickle file with the categorical features selected import pickle with open("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/final_selected_features.pkl", 'rb') as f: df_importance_150 = pickle.load(f) list_features = list(df_importance_150.feature_name) dat_cols = list(pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_date.csv", nrows=0, dtype=np.float32).columns) num_cols = list(pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_numeric.csv", nrows=0, dtype=np.float32).columns) cat_cols = list(pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_categorical.csv", nrows=0).columns) # Python program to find the common elements # in two lists def common_member(a, b): a_set = set(a) b_set = set(b) if (a_set & b_set): lista = list(a_set & b_set) else: print("No common elements") return lista sel_cat = common_member(list_features, cat_cols) sel_num = common_member(list_features, num_cols) sel_dat = common_member(list_features, dat_cols) date_chunks = pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_date.csv", usecols = sel_dat, chunksize=100000, dtype=np.float32) num_chunks = pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_numeric.csv", chunksize=100000, dtype=np.float32, usecols = sel_num) cat_chunks = pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/train_categorical.csv", usecols = sel_cat, chunksize=100000) X = pd.concat([pd.concat([dchunk, nchunk, cchunk], axis=1) for dchunk, nchunk, cchunk in zip(date_chunks, num_chunks, cat_chunks)]) ###Output C:\Users\luisgasco\.conda\envs\tensorflow\lib\site-packages\IPython\core\interactiveshell.py:3337: DtypeWarning: Columns (1987) have mixed types.Specify dtype option on import or set low_memory=False. if (await self.run_code(code, result, async_=asy)): ###Markdown Transform the categorical variable ###Code import category_encoders as ce # version 1.2.8 filename = "C://Users/luisgasco/Documents/bosh_kaggle_comp/data/encoder_for_testing.sav" ce_target = pickle.load(open(filename, 'rb')) ce_target X_trans = ce_target.transform(X) ###Output _____no_output_____ ###Markdown Load prediction model ###Code # open a file, where you ant to store the data filename = "C://Users/luisgasco/Documents/bosh_kaggle_comp/data/final_prediction_model.sav" model_fin = pickle.load(open(filename, 'rb')) # Make predictions with the threshold 0.246 predicciones_final = (model_fin.predict_proba(X_trans)[:,1] > 0.246).astype(np.int8) ###Output _____no_output_____ ###Markdown Save to the submission csv ###Code sub = pd.read_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/sample_submission.csv", index_col=0) sub.shape sub["Response"] = predicciones_final sub.to_csv("C://Users/luisgasco/Documents/bosh_kaggle_comp/data/submission.csv") ###Output _____no_output_____
torch_v1_total.ipynb
###Markdown Load Data ###Code df_train = pd.read_csv('input/train.csv', dtype=np.float32) df_test = pd.read_csv('input/test.csv', dtype=np.float32) print(df_train.shape, df_test.shape) layer_cols = [c for c in df_train.columns if 'layer_' in c] fea_cols = [c for c in df_train.columns if c not in layer_cols] len(fea_cols), len(layer_cols) df_model = df_train ###Output _____no_output_____ ###Markdown Model ###Code class DNNModel(torch.nn.Module): def __init__(self, input_size, dropout_probability=0.3): super(DNNModel,self).__init__() relu = torch.nn.ReLU() dropout = torch.nn.Dropout(p=dropout_probability) self.model = torch.nn.Sequential( # torch.nn.Linear(input_size, 4), # torch.nn.Linear(input_size, 1), torch.nn.Linear(input_size, 200), relu, torch.nn.BatchNorm1d(200), dropout, torch.nn.Linear(200, 150), relu, torch.nn.BatchNorm1d(150), dropout, torch.nn.Linear(150, 100), relu, torch.nn.BatchNorm1d(100), dropout, torch.nn.Linear(100, 64), relu, torch.nn.BatchNorm1d(64), dropout, torch.nn.Linear(64, 32), relu, torch.nn.BatchNorm1d(32), dropout, torch.nn.Linear(32, 4) ) def forward(self, x): return self.model(x) class CNNModel(torch.nn.Module): def __init__(self, dropout_probability=0.3): super().__init__() relu = torch.nn.ReLU() dropout = torch.nn.Dropout(p=dropout_probability) self.cnn = torch.nn.Sequential( torch.nn.Conv1d(1, 2, 31, stride=1, padding=0), #196 relu, torch.nn.MaxPool1d(2), #98 torch.nn.Conv1d(2, 4, 19, stride=1, padding=0), #80 relu, torch.nn.MaxPool1d(2), #40 torch.nn.Conv1d(4, 8, 11, stride=1, padding=0), #30 relu, torch.nn.MaxPool1d(2), #15 torch.nn.Conv1d(8, 16, 6, stride=1, padding=1), #12 relu, torch.nn.MaxPool1d(2), #6 ) # # torch.nn.Linear(input_size, 4), # torch.nn.Linear(input_size, 200), relu, #torch.nn.BatchNorm1d(200), dropout, # torch.nn.Linear(200, 200), relu, #torch.nn.BatchNorm1d(200), dropout, # torch.nn.Linear(200, 200), relu, #torch.nn.BatchNorm1d(200), dropout, # torch.nn.Linear(200, 150), relu, #torch.nn.BatchNorm1d(200), dropout, # torch.nn.Linear(150, 128), relu, #torch.nn.BatchNorm1d(128), dropout, # torch.nn.Linear(128, 128), relu, #torch.nn.BatchNorm1d(128), dropout, # torch.nn.Linear(128, 100), relu, #torch.nn.BatchNorm1d(128), dropout, # torch.nn.Linear(100, 64), relu, #torch.nn.BatchNorm1d(128), dropout, # torch.nn.Linear(64, 32), relu, #torch.nn.BatchNorm1d(128), dropout, # torch.nn.Linear(32, 16), relu, #torch.nn.BatchNorm1d(128), dropout, # torch.nn.Linear(16, 8), relu, #torch.nn.BatchNorm1d(128), dropout, self.fc = torch.nn.Sequential( torch.nn.Linear(16*6, 64), relu, #torch.nn.BatchNorm1d(128), dropout, torch.nn.Linear(64, 4) ) def forward(self, x): x = x.unsqueeze(1) out = self.cnn(x) dim = 1 for d in out.size()[1:]: #24, 4, 4 dim = dim * d out = out.view(-1, dim) out = self.fc(out) return out return self.model(x) ###Output _____no_output_____ ###Markdown Train ###Code model_ts = datetime.now().strftime('%Y%m%dT%H%M%S') print(model_ts) # layer_cols = [layer_cols[0]] print(f'fea_size {len(fea_cols)} layer_cols {layer_cols}') model = DNNModel(input_size=len(fea_cols), dropout_probability=0.5).to(device) # model = CNNModel(dropout_probability=0.5).to(device) criterion = nn.L1Loss(reduction='mean').to(device) optimizer = torch.optim.Adam(model.parameters(), lr = 0.01) scheduler = StepLR(optimizer, step_size=400, gamma=0.97) X_batch = torch.Tensor(df_model[fea_cols].values).float().to(device) y_batch = torch.Tensor(df_model[layer_cols].values).float().to(device) # model.load_state_dict(torch.load('checkpoint.pt')) total_epoch = 5000 model.train() for e in tqdm_notebook(range(total_epoch), total=total_epoch, desc='Epoch'): y_pred = model(X_batch) # print(y_pred, y_batch) loss = criterion(y_pred, y_batch) print(f'Epock {e} / {total_epoch} loss: {loss.item()}') optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() if e % 1000 == 0: torch.save(model.state_dict(), 'checkpoint_1nn.pt') torch.save(model.state_dict(), 'checkpoint_1nn.pt') ###Output _____no_output_____
tutorials/Continuous optimization.ipynb
###Markdown Most of the time, we want our optimizer to quit when it converges or when the parameter space has been fully explored. Other times, we'd rather optimize continuously, keeping the coordinates near a (potentially drifting) maximum. In these cases, we can pass the "continuous=True" flag into certain optimizers. This overrides a specified number of iterations and will instead run the optimization algorithm until interrupted.Let's see an example of continuous optimization. We define a Gaussian objective function with a second (uncontrolled) parameter determining the location of the peak. We'll start continuous optimization and watch how the GradientDescent optimizer responds when we change the peak location: ###Code import numpy as np from parametric import Parameter from optimistic import experiment, GridSearch, GradientDescent import time x = Parameter('x', 0.5) x0 = Parameter('x0', 0) @experiment def gaussian(): time.sleep(0.0005) return np.exp(-(x-x0)**2) gd = GradientDescent(gaussian, show_progress=False, record_data=False, display=True, continuous=True, learning_rate=8e-2, threaded=True).add_parameter(x, bounds=(-10, 10)) gd.run() x0(0.2) ###Output _____no_output_____ ###Markdown Behind the scenesTo streamline the code, we'd like to choose between "while True" and "for i in range(iterations)" statements depending on the truth value of "continuous." This can be done very cleanly using a generator overriding range: ###Code def custom_range(iterations, continuous=False): if not continuous: yield from range(iterations) else: i = 0 while True: yield i i = (i+1) % iterations for i in custom_range(10, continuous=False): print(i) ###Output _____no_output_____ ###Markdown If continuous==False, then this behaves exactly like a list. However, if continuous==True, then the generator will repeatedly reset i to 0 and repeat the incrementing until the execution is interrupted. This function is defined in the Algorithm base class, so algorithms using "for i in range(iterations)" will automatically run forever if the "continuous" flag is set to True. We also have an "iterate" function which replaces the default Python behavior of "for x in X", where X is a list. If self.continuous is True, the list will be iterated through repeatedly. This function looks like this: ###Code def iterate(lst, continuous=False): if not continuous: yield from list(lst) else: i = 0 while True: yield lst[i] i = (i+1) % len(lst) points = [0, 2, 3, 5] for p in iterate(points, continuous=False): print(p) ###Output _____no_output_____
13_twelve_days/twelve_days.ipynb
###Markdown Twelve Days of ChristmasWrite a program that will generate the verse "The Twelve Days of Christmas" song ###Code (./twelve_days.ps1).split("`n") | select -last 10 ###Output _____no_output_____ ###Markdown The program should accept a `-n` or `--number` (default 12) to control the number of verses that are generated ###Code ./twelve_days.ps1 -n 2 ###Output _____no_output_____
FigS2_S3_map_ohc_ishii_iap.ipynb
###Markdown Figures S2 and S3: OHC hindcats vs observations- Columns of figures are plotted separately- For differences in S2 and S3 adjust variable nyears in respective column to change number of years used for the last panel ###Code import xarray as xr import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import matplotlib.patches as mpatches import cmocean as cmo import numpy as np import cartopy.crs as ccrs import cartopy import pandas as pd from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER import matplotlib.ticker as mticker import warnings warnings.filterwarnings("ignore") import pandas as pd from scipy.interpolate import griddata from scipy.io import loadmat import datetime # from datetime import datetime import string import sys sys.path.append("./") # adds upper level to working directory\n from utils_iohc_ummenhofer2020 import deseason,ohc_anomaly,cut_indo,plot_map,finished_plot,add_ipo_bar,monte_carlo # where to save plots plotsave = 'plots/' datapath = '/vortexfs1/share/clidex/data/' # baseline period for anomalies base = ['1960-01-01','2012-12-31'] # paper # values for heat content calculation cp = 3994 # heat capacity rho = 1029 # reference density # cp and rho from https://xgcm.readthedocs.io/en/latest/example_eccov4.html ###Output _____no_output_____ ###Markdown Ishii Derive and save OHC anomaly ###Code # Ishii ishii = xr.open_dataset(datapath + 'obs/Ishii/ishii_20E_180E_72S_35N.nc') #associated cell area area = xr.open_dataset(datapath + 'obs/Ishii/ishii_gridarea_20E_180E_72S_35N.nc') # need mask for Indian ocean xm,ym = np.meshgrid(np.arange(-179.5,180.5,1),np.arange(-89.5,90.5,1)) mask_ind = griddata((mask['NAV_LON'].values.ravel(),mask['NAV_LAT'].values.ravel()), mask['tmaskind'].values.ravel(),(xm,ym),'linear') # set nan to 0 mask_ind[np.isnan(mask_ind)]=0 mask_ind.shape # rearrange data to match longitudes of oras5 mask1 = np.squeeze(mask_ind[:,np.where(xm[0,:]>=0)]) mask2 = np.squeeze(mask_ind[:,np.where(xm[0,:]<0)]) mask_ind2 = np.concatenate((mask1,mask2),axis=1) # create xarray for mask mask_ishii = xr.DataArray(mask_ind2, dims=['lat', 'lon'],coords={'lon': np.arange(0.5,360.5,1),'lat': ym[:,0]}) mask_ishii = mask_ishii.sel(lon=slice(20,180),lat=slice(-72,35)) # cut out Indian Ocean and multiply by area # ishii_ind_ext = ishii*mask.values*area['cell_area'].values # ishii_ind_ext = ishii_ind_ext.where(ishii_ind_ext['var80']>0,drop=True) ############################################################### ############################################################### ############################################################### # derive mid-layer value and layer thickness for integration dz = np.diff(ishii['depth']) tempdz = (ishii['var80'][:,:-1,:,:].values + ishii['var80'][:,1:,:,:].values)/2 depthdz = (ishii.depth[:-1].values + ishii.depth[1:].values)/2 # create array for multiplication dummy = np.tile(dz,(ishii['var80'].shape[0],ishii['var80'].shape[2],ishii['var80'].shape[3],1)) dzm = np.moveaxis(dummy,-1,1) # multiply with layer thickness and create xarray tempz2 = tempdz*dzm temp = xr.DataArray(tempz2, dims=['time','depth','lat', 'lon'], coords={'lon': ishii.lon.values, 'lat': ishii.lat.values, 'time': ishii.time.values, 'depth': depthdz}).to_dataset(name='temp') ############################################################### ############################################################### ############################################################### # cut upper 700m & derive OHC # base = ['1958-01-01','2012-01-01'] dummy = temp.sel(depth=slice(0,701)) ohc_ishii = dummy.sum(dim=['depth'])*cp*rho ohca_ishii = ohc_ishii.groupby('time.month')-ohc_ishii.sel( time=slice(*base)).groupby('time.month').mean('time') # somehow needed to do that in a separate step ohca_ishii = ohca_ishii.drop('month') ######################################### # save to netcdf # ohca_ishii.rename({'temp':'OHC700'}).to_netcdf('./data/ohca700_ishii.nc') # del ishii_ind,ohc_ishii,dummy,temp ###Output _____no_output_____ ###Markdown Load saved OHC anomaly & monte-carlo for significance ###Code ohca_ishii = xr.open_dataset('../data/ohca700_ishii.nc') # monte carlo simulation for significance ishii_p5 = {} ishii_p95 = {} [ishii_p5['10yr'],ishii_p95['10yr']] = monte_carlo(ohca_ishii['OHC700'],duration=10*12,n=1000,pval=5,timevar='time') [ishii_p5['7yr'],ishii_p95['7yr']] = monte_carlo(ohca_ishii['OHC700'],duration=7*12,n=1000,pval=5,timevar='time') [ishii_p5['3yr'],ishii_p95['3yr']] = monte_carlo(ohca_ishii['OHC700'],duration=3*12,n=1000,pval=5,timevar='time') # save to file np.save('../data/ishii_percentiles_two_tail_90p_base_1960_2012.npy',[ishii_p5,ishii_p95]) ishii_p5,ishii_p95 = np.load('../data/ishii_percentiles_two_tail_90p_base_1960_2012.npy',allow_pickle=True) ###Output _____no_output_____ ###Markdown IAP (Cheng) ###Code # load data iap = xr.open_dataset(datapath+'obs/OHC_temp_Chen/ohc/OHC700_IAP_20E_160W_72S_35N.nc') # somehow time did not read properly --> create new time vector time = pd.date_range('1940-01-01','2019-12-01' , freq='MS') iap['time'] = time # derive anomaly & deseason iap_ind = deseason(iap,timevar='time',refperiod=base) # # monte carlo simulation for significance # iap_p5 = {} # iap_p95 = {} # [iap_p5['10yr'],iap_p95['10yr']] = monte_carlo(iap_ind['OHC700'],duration=10*12,n=1000,pval=5,timevar='time') # [iap_p5['7yr'],iap_p95['7yr']] = monte_carlo(iap_ind['OHC700'],duration=7*12,n=1000,pval=5,timevar='time') # [iap_p5['3yr'],iap_p95['3yr']] = monte_carlo(iap_ind['OHC700'],duration=3*12,n=1000,pval=5,timevar='time') # # save to file # np.save('../data/iap_percentiles_two_tail_90p_base_1960_2012.npy',[iap_p5,iap_p95]) iap_p5,iap_p95 = np.load('../data/iap_percentiles_two_tail_90p_base_1960_2012.npy',allow_pickle=True) ###Output _____no_output_____ ###Markdown Plot decadal averages (like Fig3) ###Code def plot_decave(ds,p95,p5,var,timevar,nyears): """ Plots panels for decadal averages (stippling for significance) INPUT: ds (xarray): data array containing quantitiy to plot p95 (dict): dictionary with numpy arrays containing upper threshold for significance p5 (dict): dictionary with numpy arrays containing lower threshold for significance nyears (int): number of years considered for last panel (varies due to different lengths of datasets) """ plt.rcParams.update({'font.size': 8}) fig,ax=plt.subplots(nrows=6,figsize=(5,10), subplot_kw = dict(projection=ccrs.PlateCarree(central_longitude=120))) plt.subplots_adjust(hspace=0.2) vmin=-1.5 vmax=1.5 cmap = plt.get_cmap('RdBu_r',len(np.arange(-1.2,1.2,0.1))) # loop over datasets and plot for year,i in zip(np.arange(1960,2020,10),range(6)): if i==5: year2 = year+nyears-1 dummy_p95 = p95[str(nyears)+'yr'] dummy_p5 = p5[str(nyears)+'yr'] else: year2 = year+9 dummy_p95 = p95['10yr'] dummy_p5 = p5['10yr'] #print(year,year2) time_bnds = [str(year) + '-01-01',str(year2) + '-12-31'] cc= (ds.sel(time=slice(*time_bnds)).mean(timevar)[var]/1e09).plot(ax=ax[i],cmap=cmap, vmin=vmin,vmax=vmax, transform=ccrs.PlateCarree(), add_colorbar=False) # significance mask = (xr.zeros_like(ds[var].sel(time=slice(*time_bnds)).mean('time')).values)*np.nan dummy = (ds[var].sel(time=slice(*time_bnds)).mean('time').values-dummy_p95) mask[dummy>=0]=1 dummy = (ds[var].sel(time=slice(*time_bnds)).mean('time').values-dummy_p5) mask[dummy<=0]=1 mask[np.isnan(ds[var][0,::])] = np.nan ax[i].pcolor(ds.lon,ds.lat,mask,hatch='...',alpha=0.,transform=ccrs.PlateCarree()) # make plot pretty ax[i].coastlines(resolution='50m') ax[i].add_feature(cartopy.feature.LAND, color='lightgray') ax[i].set_title(str(year) + '-' + str(year2),fontsize=8,loc='left',weight='bold') ax[i].tick_params(axis="x", direction="out") ax[i].tick_params(axis="y",direction="out") ax[i].set_extent((30,181,-40,31),crs=ccrs.PlateCarree()) ############ adjust labels for subplots ######################### ax[i].gridlines(crs=ccrs.PlateCarree(),draw_labels=False, xlocs=[40,80,120,160,200,-160],ylocs=range(-60,60,30)) gl = ax[i].gridlines(crs=ccrs.PlateCarree(),draw_labels=True, xlocs=[40,80,120,160],ylocs=range(-60,60,30)) gl.ylabels_right = False gl.xlabels_top = False # ylabels gl.yformatter = LATITUDE_FORMATTER gl.ylabel_style = {'size':10} # xlabels if i==5: gl.xformatter = LONGITUDE_FORMATTER gl.xlabel_style = {'size':10} else: gl.xlabels_bottom = False t = ax[i].text(0.02, 0.82, string.ascii_lowercase[i]+')', transform=ax[i].transAxes, size=8, weight='bold') t.set_bbox(dict(facecolor='w',boxstyle='square,pad=0.2')) # add colorbar cbaxes = fig.add_axes([0.78, 0.3, 0.02, 0.4]) cb = plt.colorbar(cc,orientation='vertical', cax = cbaxes,extend='both', label='OHC anomaly [$10^{9}\,$J]') # fix title for bottom panels # ax[5].set_title('2010-'+str(int(ds['time'][-1].dt.year.values)),fontsize=8,loc='left',weight='bold') ax[5].set_title('2010-'+str(2009+nyears),fontsize=8,loc='left',weight='bold') return fig # finished_plot(fig,plotsave+'ohc_map_decadal_ishii.png') # call plotting function fig = plot_decave(iap_ind,iap_p95,iap_p5,var='OHC700',timevar='time',nyears=7) # finished_plot(fig,plotsave+'ohc_map_decadal_iap_base_1960_2012_0_700m_stippling_until_2016_V2.png') ###Output _____no_output_____ ###Markdown IshiiSomehow the stippling did not work with function above. Not sure why (something to do with mask I think)! ###Code plt.rcParams.update({'font.size': 8}) fig,ax=plt.subplots(nrows=6,figsize=(5,10), subplot_kw = dict(projection=ccrs.PlateCarree(central_longitude=120))) plt.subplots_adjust(hspace=0.2) # vmin=-0.8 # vmax=0.8 vmin=-1.5 vmax=1.5 cmap = plt.get_cmap('RdBu_r',len(np.arange(-1.2,1.2,0.1))) var='OHC700' ds = ohca_ishii nyears = 7 # numbr of years for last panel # loop over datasets and plot for year,i in zip(np.arange(1960,2020,10),range(6)): if i==5: year2 = year+nyears-1 dummy_p95 = ishii_p95[str(nyears)+'yr'] dummy_p5 = ishii_p5[str(nyears)+'yr'] else: year2 = year+9 dummy_p95 = ishii_p95['10yr'] dummy_p5 = ishii_p5['10yr'] #print(year,year2) time_bnds = [str(year) + '-01-01',str(year2) + '-12-31'] cc= (ds.sel(time=slice(*time_bnds)).mean('time')[var]/1e09).plot(ax=ax[i],cmap=cmap, vmin=vmin,vmax=vmax, transform=ccrs.PlateCarree(), add_colorbar=False) # significance mask = (xr.zeros_like(ds[var].sel(time=slice(*time_bnds)).mean('time')).values)*np.nan dummy = (ds[var].sel(time=slice(*time_bnds)).mean('time').values-dummy_p95) mask[dummy>=0]=1 dummy = (ds[var].sel(time=slice(*time_bnds)).mean('time').values-dummy_p5) mask[dummy<=0]=1 mask[ds[var][0,::].values==0] = np.nan mask[ds[var][0,::].values==0] = np.nan ax[i].pcolor(ds.lon,ds.lat,mask,hatch='...',alpha=0.,transform=ccrs.PlateCarree()) ax[i].coastlines(resolution='50m') ax[i].add_feature(cartopy.feature.LAND, color='lightgray') ax[i].set_title(str(year) + '-' + str(year2),fontsize=8,loc='left',weight='bold') ax[i].tick_params(axis="x", direction="out") ax[i].tick_params(axis="y",direction="out") ax[i].set_extent((30,181,-40,31),crs=ccrs.PlateCarree()) ############ adjust labels for subplots ######################### ############ adjust labels for subplots ######################### ax[i].gridlines(crs=ccrs.PlateCarree(),draw_labels=False, xlocs=[40,80,120,160,200,-160],ylocs=range(-60,60,30)) gl = ax[i].gridlines(crs=ccrs.PlateCarree(),draw_labels=True, xlocs=[40,80,120,160],ylocs=range(-60,60,30)) gl.ylabels_right = False gl.xlabels_top = False # ylabels gl.yformatter = LATITUDE_FORMATTER gl.ylabel_style = {'size':10} # xlabels if i==5: gl.xformatter = LONGITUDE_FORMATTER gl.xlabel_style = {'size':10} else: gl.xlabels_bottom = False t = ax[i].text(0.02, 0.82, string.ascii_lowercase[i+6]+')', transform=ax[i].transAxes, size=8, weight='bold') t.set_bbox(dict(facecolor='w',boxstyle='square,pad=0.2')) # add colorbar cbaxes = fig.add_axes([0.78, 0.3, 0.02, 0.4]) cb = plt.colorbar(cc,orientation='vertical', cax = cbaxes,extend='both',label='OHC anomaly [$10^{9}\,$J]') # fix title for bottom panels # ax[5].set_title('2010-'+str(int(ds['time'][-1].dt.year.values)),fontsize=8,loc='left',weight='bold') ax[5].set_title('2010-'+str(2009+nyears),fontsize=8,loc='left',weight='bold') # fig,mask = plot_decave(ohca_ishii,ishii_p5,ishii_p95,var='temp',timevar='time') # finished_plot(fig,plotsave+'ohc_map_decadal_ishii_base_1960_2012_0_700m_stippling_until_2012_V2.png') ###Output _____no_output_____ ###Markdown K003 ###Code # load data datapath2 = datapath+'publications/IOHC_Ummenhofer/' ohc_k003_700 = deseason(xr.open_dataset(datapath2+'k003_ohc_zint_700m.nc'),refperiod=base)['votemper'].sel(y=slice(200,None)) plt.rcParams.update({'font.size': 8}) fig,ax=plt.subplots(nrows=6,figsize=(5,10), subplot_kw = dict(projection=ccrs.PlateCarree(central_longitude=120))) plt.subplots_adjust(hspace=0.2) vmin=-1.5 vmax=1.5 cmap = plt.get_cmap('RdBu_r',len(np.arange(-1.2,1.2,0.1))) nyears = 7 # how many years for last panel # need to change longitude values in order to have hatching plotted properly lon = ohc_k003_700.nav_lon.values lon[lon<0] = lon[lon<0]+360 lon=lon[300,:] lat=ohc_k003_700.nav_lat[:,200].values # loop over datasets and plot j=0 ll = 0 ds = ohc_k003_700 p95 = dict(np.load('../data/fig3_k003_p95_two_tail_90p_base_1960_2012_n1000.npz')) p5 = dict(np.load('../data/fig3_k003_p5_two_tail_90p_base_1960_2012_n1000.npz')) # loop over time for year,i in zip(np.arange(1960,2020,10),range(6)): if i==5: year2 = year+nyears-1 dummy_p95 = p95[str(nyears)+'yr'] dummy_p5 = p5[str(nyears)+'yr'] else: year2 = year+9 dummy_p95 = p95['10yr'] dummy_p5 = p5['10yr'] #print(year,year2) time_bnds = [str(year) + '-01-01',str(year2) + '-12-31'] hh = ax[i].pcolormesh(lon,lat,(ds/1e09).sel(time_counter=slice(*time_bnds)).mean('time_counter'), transform=ccrs.PlateCarree(),cmap=cmap,vmin=vmin,vmax=vmax) # print('ohc done, now stippling') # significance mask = (xr.zeros_like(ds.sel(time_counter=slice(*time_bnds)).mean('time_counter')).values)*np.nan dummy = (ds.sel(time_counter=slice(*time_bnds)).mean('time_counter').values-dummy_p95) mask[dummy>=0]=1 dummy = (ds.sel(time_counter=slice(*time_bnds)).mean('time_counter').values-dummy_p5) mask[dummy<=0]=1 mask[ds[0,::].values==0] = np.nan ax[i].pcolor(lon,ds.nav_lat,mask,hatch='...',alpha=0.,transform=ccrs.PlateCarree()) # print('stippling done') ax[i].gridlines(crs=ccrs.PlateCarree(),draw_labels=False, xlocs=[40,80,120,160,200],ylocs=range(-60,60,30)) ax[i].coastlines(resolution='50m') ax[i].add_feature(cartopy.feature.LAND, color='lightgray') ax[i].set_title(str(year) + '-' + str(year2),fontsize=8,loc='left',weight='bold') ax[i].tick_params(axis="x", direction="out") ax[i].tick_params(axis="y",direction="out") ax[i].set_extent((30,181,-40,31),crs=ccrs.PlateCarree()) ############ adjust labels for subplots ######################### gl = ax[i].gridlines(crs=ccrs.PlateCarree(),draw_labels=True, xlocs=[40,80,120,160],ylocs=range(-60,60,30)) gl.ylabels_right = False gl.xlabels_top = False # ylabels gl.yformatter = LATITUDE_FORMATTER gl.ylabel_style = {'size':10} # xlabels if i==5: gl.xformatter = LONGITUDE_FORMATTER gl.xlabel_style = {'size':10} else: gl.xlabels_bottom = False t = ax[i].text(0.02, 0.82, string.ascii_lowercase[i+12]+')', transform=ax[i].transAxes, size=8, weight='bold') t.set_bbox(dict(facecolor='w',boxstyle='square,pad=0.2')) # print(str(year)) # add colorbar cbaxes = fig.add_axes([0.78, 0.3, 0.02, 0.4]) cb = plt.colorbar(cc,orientation='vertical', cax = cbaxes,extend='both',label='OHC anomaly [$10^{9}\,$J]') # fix title for bottom panels # ax[5].set_title('2010-'+str(int(ds['time_counter'][-1].dt.year.values)),fontsize=8,loc='left',weight='bold') ax[5].set_title('2010-'+str(2009+nyears),fontsize=8,loc='left',weight='bold') # fig,mask = plot_decave(ohca_ishii,ishii_p5,ishii_p95,var='temp',timevar='time_counter') # finished_plot(fig,plotsave+'ohc_map_decadal_hindcast_base_1960_2012_0_700m_stippling_until_2012.png') ###Output _____no_output_____
BPNN.ipynb
###Markdown 1s ###Code new_model = learn(22, 1000, 50) new_model = learn(22, 1000, 100) new_model = learn(22, 1000, 150) new_model = learn(22, 1000, 200) new_model = learn(22, 1000, 250) new_model = learn(22, 1000, 300) new_model = learn(22, 1000, 350) new_model = learn(22, 1000, 400) new_model = learn(22, 1000, 450) new_model = learn(22, 1000, 500) new_model = learn(22, 1000, 550) new_model = learn(22, 1000, 600) new_model = learn(22, 1000, 650) new_model = learn(22, 1000, 700) ###Output Data is ready best model hidden size: 250 0.015570552685085889 17.65191231969109 ###Markdown 1.5s ###Code new_model = learn(141, 1500, 50) new_model = learn(141, 1500, 100) new_model = learn(141, 1500, 150) new_model = learn(141, 1500, 200) new_model = learn(141, 1500, 250) new_model = learn(141, 1500, 300) new_model = learn(141, 1500, 350) new_model = learn(141, 1500, 400) new_model = learn(141, 1500, 450) new_model = learn(141, 1500, 500) new_model = learn(141, 1500, 550) new_model = learn(141, 1500, 600) new_model = learn(141, 1500, 650) new_model = learn(141, 1500, 700) ###Output Data is ready best model hidden size: 50 0.0006799881906174894 5.59064380371526 ###Markdown 2s ###Code new_model = learn(902, 2000, 50) new_model = learn(902, 2000, 100) new_model = learn(902, 2000, 150) new_model = learn(902, 2000, 200) new_model = learn(902, 2000, 250) new_model = learn(902, 2000, 300) new_model = learn(902, 2000, 350) new_model = learn(902, 2000, 400) new_model = learn(902, 2000, 450) new_model = learn(902, 2000, 500) new_model = learn(902, 2000, 550) new_model = learn(902, 2000, 600) new_model = learn(902, 2000, 650) new_model = learn(902, 2000, 700) new_model = learn(902, 2000, 750) new_model = learn(902, 2000, 800) new_model = learn(902, 2000, 850) new_model = learn(902, 2000, 900) new_model = learn(902, 2000, 950) new_model = learn(902, 2000, 1000) ###Output Data is ready best model hidden size: 50 0.009025376821136816 18.64384591159633 ###Markdown 5s ###Code new_model = learn(19, 5000, 50) new_model = learn(19, 5000, 100) new_model = learn(19, 5000, 150) new_model = learn(19, 5000, 200) new_model = learn(19, 5000, 250) new_model = learn(19, 5000, 300) new_model = learn(19, 5000, 350) new_model = learn(19, 5000, 400) new_model = learn(19, 5000, 450) new_model = learn(19, 5000, 500) new_model = learn(19, 5000, 550) new_model = learn(19, 5000, 600) new_model = learn(19, 5000, 650) new_model = learn(19, 5000, 700) new_model = learn(19, 5000, 750) new_model = learn(19, 5000, 800) new_model = learn(19, 5000, 850) new_model = learn(19, 5000, 900) new_model = learn(19, 5000, 950) new_model = learn(19, 5000, 1000) ###Output Data is ready best model hidden size: 50 0.009815506084718107 117.11642641141019 ###Markdown 15s ###Code new_model = learn(20, 15000, 50) new_model = learn(20, 15000, 100) new_model = learn(20, 15000, 150) new_model = learn(20, 15000, 200) new_model = learn(20, 15000, 250) new_model = learn(20, 15000, 300) new_model = learn(20, 15000, 350) new_model = learn(20, 15000, 400) new_model = learn(20, 15000, 450) new_model = learn(20, 15000, 500) new_model = learn(20, 15000, 550) new_model = learn(20, 15000, 600) new_model = learn(20, 15000, 650) new_model = learn(20, 15000, 700) new_model = learn(20, 15000, 750) new_model = learn(20, 15000, 800) new_model = learn(20, 15000, 850) new_model = learn(20, 15000, 900) new_model = learn(20, 15000, 950) new_model = learn(20, 15000, 1000) num = [str(i) for i in range(22)] p1 = [read_csv('ics_data_preprocessed/06_Smart_Meter_45/P1/1000/' + n + '.csv', squeeze=True, parse_dates=['ts'], index_col='ts') for n in num] p1 = [prepare_data(df, 250) for df in p1] p1 = sum(p1, []) p1 = [df.values.tolist() for df in p1] train, test = train_test_split(p1, test_size=0.2, random_state=42) train_data_normalized = train test_data_normalized = test train_data, val_data = train_test_split(train_data_normalized, test_size=0.15, random_state=42) train_data = torch.FloatTensor(train_data) val_data = torch.FloatTensor(val_data) test_data = torch.FloatTensor(test_data_normalized) X_train = train_data[:, :-10] y_train = train_data[:, -10:] X_val = val_data[:, :-10] y_val = val_data[:, -10:] X_test = test_data[:, :-10] y_test = test_data[:, -10:] print("Data is ready") loss_function = nn.MSELoss() val_losses = [] best_hidden_layer_size = 50 losses = [] best_model = BPNN(250 - 10, best_hidden_layer_size) model = BPNN(250 - 10, best_hidden_layer_size) optimizer = torch.optim.Adam(model.parameters(), lr=0.2) epochs = 1000 for i in range(epochs): model.train() optimizer.zero_grad() y_pred = model(X_train) single_loss = loss_function(y_pred, y_train) losses.append(single_loss) single_loss.backward() optimizer.step() model.eval() with torch.no_grad(): y_pred = model(X_val) single_loss = loss_function(y_pred, y_val) if (len(val_losses) == 0) or (single_loss < min(val_losses)): best_model = model val_losses.append(single_loss) print("best model hidden size:", best_hidden_layer_size) predictions = [] best_model.eval() with torch.no_grad(): predictions = [best_model(seq) for (seq, labels) in zip(X_test, y_test)] pred = [torch.cat((i, j)) for (i, j) in zip(X_test, predictions)] actual_predictions = np.array([np.array(i) for i in pred]) mape = [np.mean(np.abs(actual_predictions[i] - test[i])/np.abs(test[i])) for i in range(len(test))] print(sum(mape) / len(mape)) rmse = [np.mean((actual_predictions[i] - test[i])**2)**.5 for i in range(len(test))] print(sum(rmse) / len(rmse)) predictions = [] new_model.eval() with torch.no_grad(): predictions = [new_model(seq) for (seq, labels) in zip(X_test, y_test)] pred = [torch.cat((i, j)) for (i, j) in zip(X_test, predictions)] actual_predictions = np.array([np.array(i) for i in pred]) mape = [np.mean(np.abs(actual_predictions[i] - test[i])/np.abs(test[i])) for i in range(len(test))] print(sum(mape) / len(mape)) rmse = [np.mean((actual_predictions[i] - test[i])**2)**.5 for i in range(len(test))] print(sum(rmse) / len(rmse)) actual_predictions plt.plot(actual_predictions[1]) plt.plot( n = 10 plt.plot(test_data[n].numpy()) plt.plot(actual_predictions[n]) # 1.5, 350 n = 13 plt.plot(test_data[n].numpy()) plt.plot(actual_predictions[n]) # 1.5, 350 predictions = [] new_model.eval() with torch.no_grad(): predictions = [new_model(seq) for (seq, labels) in zip(X_test, y_test)] pred = [torch.cat((i, j)) for (i, j) in zip(X_test, predictions)] actual_predictions = np.array([np.array(i) for i in pred]) mape = [np.mean(np.abs(actual_predictions[i] - test[i])/np.abs(test[i])) for i in range(len(test))] print(sum(mape) / len(mape)) rmse = [np.mean((actual_predictions[i] - test[i])**2)**.5 for i in range(len(test))] print(sum(rmse) / len(rmse)) n = 1 plt.plot(test_data[n].numpy()) plt.plot(actual_predictions[n]) # 1, 400 num = [str(i) for i in range(902)] p1 = [read_csv('ics_data_preprocessed/06_Smart_Meter_45/P1/2000/' + n + '.csv', squeeze=True, parse_dates=['ts'], index_col='ts') for n in num] p1 = [prepare_data(df, 1000) for df in p1] p1 = sum(p1, []) p1 = [df.values.tolist() for df in p1] train, test = train_test_split(p1, test_size=0.2, random_state=42) train_data_normalized = train test_data_normalized = test train_data, val_data = train_test_split(train_data_normalized, test_size=0.15, random_state=42) train_data = torch.FloatTensor(train_data) val_data = torch.FloatTensor(val_data) test_data = torch.FloatTensor(test_data_normalized) X_train = train_data[:, :-10] y_train = train_data[:, -10:] X_val = val_data[:, :-10] y_val = val_data[:, -10:] X_test = test_data[:, :-10] y_test = test_data[:, -10:] loss_function = nn.MSELoss() val_losses = [] best_hidden_layer_size = 50 losses = [] best_model = BPNN(1000 - 10, best_hidden_layer_size) model = BPNN(1000 - 10, best_hidden_layer_size) optimizer = torch.optim.Adam(model.parameters(), lr=0.2) epochs = 1000 for i in range(epochs): model.train() optimizer.zero_grad() y_pred = model(X_train) single_loss = loss_function(y_pred, y_train) losses.append(single_loss) single_loss.backward() optimizer.step() model.eval() with torch.no_grad(): y_pred = model(X_val) single_loss = loss_function(y_pred, y_val) if (len(val_losses) == 0) or (single_loss < min(val_losses)): best_model = model val_losses.append(single_loss) print("best model hidden size:", best_hidden_layer_size) predictions = [] best_model.eval() with torch.no_grad(): predictions = [best_model(seq) for (seq, labels) in zip(X_test, y_test)] pred = [torch.cat((i, j)) for (i, j) in zip(X_test, predictions)] actual_predictions_bpnn = np.array([np.array(i) for i in pred]) mape = [np.mean(np.abs(actual_predictions_bpnn[i] - test[i])/np.abs(test[i])) for i in range(len(test))] print(sum(mape) / len(mape)) rmse = [np.mean((actual_predictions_bpnn[i] - test[i])**2)**.5 for i in range(len(test))] print(sum(rmse) / len(rmse)) class BPNN2(nn.Module): def __init__(self, input_size, hidden_size1, hidden_size2): super(BPNN2, self).__init__() # an affine operation: y = Wx + b self.fc1 = nn.Linear(input_size, hidden_size1) self.fc2 = nn.Linear(hidden_size1, hidden_size2) self.fc3 = nn.Linear(hidden_size2, 10) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x loss_function = nn.MSELoss() val_losses = [] best_hidden_layer_size = 100 losses = [] best_model = BPNN2(1000 - 10, 100, 150) model = BPNN2(1000 - 10, 100, 150) optimizer = torch.optim.Adam(model.parameters(), lr=0.2) epochs = 1000 for i in range(epochs): model.train() optimizer.zero_grad() y_pred = model(X_train) single_loss = loss_function(y_pred, y_train) losses.append(single_loss) single_loss.backward() optimizer.step() model.eval() with torch.no_grad(): y_pred = model(X_val) single_loss = loss_function(y_pred, y_val) if (len(val_losses) == 0) or (single_loss < min(val_losses)): best_model = model val_losses.append(single_loss) print("best model hidden size:", best_hidden_layer_size) predictions = [] best_model.eval() with torch.no_grad(): predictions = [best_model(seq) for (seq, labels) in zip(X_test, y_test)] pred = [torch.cat((i, j)) for (i, j) in zip(X_test, predictions)] actual_predictions_bpnn2 = np.array([np.array(i) for i in pred]) mape = [np.mean(np.abs(actual_predictions_bpnn2[i] - test[i])/np.abs(test[i])) for i in range(len(test))] print(sum(mape) / len(mape)) rmse = [np.mean((actual_predictions_bpnn2[i] - test[i])**2)**.5 for i in range(len(test))] print(sum(rmse) / len(rmse)) n = 19 plt.plot(test_data[n].numpy()) plt.plot(actual_predictions_bpnn[n], label='BPNN') plt.plot(actual_predictions_bpnn2[n], label='BPNN2') plt.plot(actual_prediction_arima, label='ARIMA') plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) n = 1 plt.plot(test_data[n].numpy()) num = [str(i) for i in range(902)] p1 = [read_csv('ics_data_preprocessed/06_Smart_Meter_45/P1/' + '2000' + '/' + n + '.csv', squeeze=True, parse_dates=['ts'], index_col='ts') for n in num] p1 = [prepare_data(df, 1000) for df in p1] p1 = sum(p1, []) df = pd.Series(test_data[n].numpy()) plt.plot(df) test_data[n] import pmdarima as pm mape_list = [] rmse_list = [] def forecast_accuracy(forecast, actual): mape = np.mean(np.abs(forecast - actual)/np.abs(actual)) # MAPE mape_list.append(mape) me = np.mean(forecast - actual) # ME mae = np.mean(np.abs(forecast - actual)) # MAE mpe = np.mean((forecast - actual)/actual) # MPE rmse = np.mean((forecast - actual)**2)**.5 # RMSE rmse_list.append(rmse) corr = np.corrcoef(forecast, actual)[0,1] # corr return({'mape':mape, 'me':me, 'mae': mae, 'mpe': mpe, 'rmse':rmse, 'corr':corr}) max_m = 5 train = df[:-10] test = df[-10:] best_model = pm.auto_arima(train.values, start_p=0, start_q=0, test='adf', # use adftest to find optimal 'd' max_p=3, max_q=3, # maximum p and q m=1, # frequency of series d=None, # let model determine 'd' seasonal=True, # Seasonality start_P=0, D=1, trace=False, error_action='ignore', suppress_warnings=True, stepwise=True) for i in range(2, max_m): model = pm.auto_arima(train.values, start_p=0, start_q=0, test='adf', # use adftest to find optimal 'd' max_p=3, max_q=3, # maximum p and q m=i, # frequency of series d=None, # let model determine 'd' seasonal=True, # Seasonality start_P=0, D=1, trace=False, error_action='ignore', suppress_warnings=True, stepwise=True) # Find the best model with MSE if np.mean((model.predict(n_periods=test.shape[0]) - test.values)**2) < np.mean((best_model.predict(n_periods=test.shape[0]) - test.values)**2): best_model = model forecast_accuracy(best_model.predict(n_periods=test.shape[0]), test.values) preds, conf_int = best_model.predict(n_periods=test.shape[0], return_conf_int=True, alpha=0.6) lower_series = pd.Series(conf_int[:, 0], index=test.index) upper_series = pd.Series(conf_int[:, 1], index=test.index) actual_prediction_arima = train.append(pd.Series(best_model.predict(n_periods=test.shape[0]), index=test.index)).values plt.plot(df.values) plt.plot(actual_prediction_arima) plt.plot(actual_prediction_arima) ###Output _____no_output_____
notebooks/108/1 - Les operations IO et la serialisation.ipynb
###Markdown Les opérations I/O et la sérialisation Les opérations I/O.NET dispose d’une espace de nom complet dédié aux opérations I/O: `System.IO`.Cette espace de nom permet d’interagir avec la structure de dossier et de fichier de la machine de façon programmatique.De plus, elle permet de manipuler du data binaire en mémoire à l’aide de plusieurs classes spécialisées `System.IO`![image.png](attachment:image.png) Directory et DirectoryInfoLes classes `Directory` et `DirectoryInfo` facilitent l’interaction et la récupération d’informations sur un ou plusieurs dossiers physiques.La classe `Directory` comprend que des méthodes statiques tandis que la classe `DirectoryInfo` comprend que des méthodes membres (un dossier est donc représenté en tant qu’instance de `DirectoryInfo`). La classe `Directory`Les méthodes statiques les plus utilisées sont :- `CreateDirectory(path)` : Crée le dossier spécifié- `Delete(path)` : Supprime le dossier spécifié- `Exists(path)` : Indique si le dossier spécifié existe- `GetCurrentDirectory()` : Retourne la location complète du dossier courant (« current working directory »)- `GetDirectories(path)` : Retourne un tableau de string de tous les sous-dossiers du dossier spécifié- `GetFiles(path)` : Retourne un tableau de string de tous les sous-fichiers du dossier spécifié ![image.png](attachment:image.png) ###Code using System.IO; // Cree un dossier s'il n'existe pas if (!Directory.Exists("test")) Directory.CreateDirectory("test"); // Qu'est-ce que le dossier courant comprend? var cwd = Directory.GetCurrentDirectory(); Console.WriteLine(cwd); Console.WriteLine("[Fichiers]"); foreach (var file in Directory.GetFiles(".")) { Console.WriteLine(file); } Console.WriteLine("[Dossiers]"); foreach (var dir in Directory.GetDirectories(".")) { Console.WriteLine(dir); } // Supprime le dossier Directory.Delete("test"); ###Output /home/jovyan/notebooks/108 [Fichiers] ./Untitled.ipynb [Dossiers] ./.ipynb_checkpoints ./test ###Markdown La classe `DirectoryInfo`Une instance de `DirectoryInfo` peut être créée en passant le nom du dossier en paramètre au constructeur.![image.png](attachment:image.png)Les membres les plus utilisés sont :- `CreateSubdirectory(path)` : Crée (et retourne) un sous-dossier dans le dossier courant (représenté par le `DirectoryInfo`)- `Delete()` : Supprime le dossier courant- `Exists` : Indique si le dossier courant existe- `FullName` : Retourne la location complète du dossier courant- `GetFiles()` : Retourne un tableau de string de tous les sous- fichiers du dossier courant- `GetDirectories()` : Retourne un tableau de string tous les sous- dossiers du dossier courant- `Name` : Retourne le nom du dossier courant- `Parent` : Retourne le `DirectoryInfo` représentant le dossier parant ![image.png](attachment:image.png) ###Code using System.IO; // Cree une instance de DirectoryInfo var di = new DirectoryInfo("."); //var t = new DirectoryInfo(@"C:\33"); // Cree un dossier s'il n'existe pas var diTest = new DirectoryInfo("test"); if (!diTest.Exists) diTest = di.CreateSubdirectory("test"); // Qu'est-ce que le dossier courant comprend? Console.WriteLine(di.FullName); Console.WriteLine("[Fichiers]"); foreach (var file in di.GetFiles()) { Console.WriteLine(file.Name); } Console.WriteLine("[Dossiers]"); foreach (var dir in di.GetDirectories()) { Console.WriteLine(dir.Name); } // Supprime le dossier diTest.Delete(); ###Output /home/jovyan/notebooks/108 [Fichiers] 1 - Les operations IO et la serialisation.ipynb data.bin personne.bin [Dossiers] .ipynb_checkpoints test ###Markdown `File` et `FileInfo`Les classes `File` et `FileInfo` facilitent l’interaction et la récupération d’informations sur un ou plusieurs fichiers physiques.La classe `File` comprend que des méthodes statiques, tandis que la classe `FileInfo` comprend que des méthodes membres (un fichier est donc représenté en tant qu’instance de `FileInfo`) La classe `File`Les méthodes statiques les plus utilisées sont :- `Create(path)` : Crée le fichier spécifié et retourne un `FileStream`- `Copy(source, dest)` : Fait une copie du fichier spécifié- `Delete(path)` : Supprime le fichier spécifié- `Exists(path)` : Indique si le fichier spécifié existe- `Move(source, dest)` : Déplace ou renomme le fichier spécifié![image.png](attachment:image.png) ###Code using System.IO; // Cree un nouveau fichier if (!File.Exists("test.txt")) File.Create("test.txt").Close(); // Copie le fichier File.Copy("test.txt", "test_copie.txt"); // Renome le fichier File.Move("test.txt", "super_fichier.txt"); // Supprime les fichiers File.Delete("super_fichier.txt"); File.Delete("test_copie.txt"); #!powershell ls -al ###Output total 936 drwxr-xr-x 1 jovyan users 4096 Mar 11 03:03 . drwxrwxrwx 1 root root 4096 Mar 11 01:33 .. -rw-r--r-- 1 jovyan users 954379 Mar 11 03:03 1 - Les operations IO et la serialisation.ipynb drwxr-xr-x 1 jovyan users 4096 Mar 11 03:02 .ipynb_checkpoints ###Markdown La classe `FileInfo`Une instance de `FileInfo` peut être créée en passant le nom du fichier en paramètre au constructeur.![image.png](attachment:image.png)Les membres les plus utilisés sont :- `CopyTo(dest)` : Copie le fichier courant (représenté par le `FileInfo`)- `Create()` : Crée le fichier courant et retourne un `FileStream`- `Delete()` : Supprime le fichier courant- `Directory` : Retourne le `DirectoryInfo` du dossier contenant le fichier courant- `Exists` : Indique si le fichier courant existe- `FullName` : Retourne la location complète du fichier courant- `MoveTo(dest)` : Déplace ou renomme le fichier courant- `Name` : Retourne le nom du fichier courant![image-2.png](attachment:image-2.png) ###Code using System.IO; // Cree une instance de FileInfo var fi = new FileInfo("test.txt"); // Cree un nouveau fichier if (!fi.Exists) fi.Create().Close(); // Copie le fichier var fiCopie = fi.CopyTo("test_copie.txt"); // Renome le fichier fi.MoveTo("super_fichier.txt"); // Supprime les fichiers fi.Delete(); fiCopie.Delete(); #!powershell ls -al ###Output total 936 drwxr-xr-x 1 jovyan users 4096 Mar 11 03:03 . drwxrwxrwx 1 root root 4096 Mar 11 01:33 .. -rw-r--r-- 1 jovyan users 954379 Mar 11 03:03 1 - Les operations IO et la serialisation.ipynb drwxr-xr-x 1 jovyan users 4096 Mar 11 03:02 .ipynb_checkpoints ###Markdown Stream Tout flux de données en .NET est représenté par la classe abstraite « `System.IO.Stream` »Toute classe héritant de « `Stream` » peut possiblement (déterminée par le programmeur ayant crée la classe) :- Être utilisée pour lire (`CanRead`)- Être utilisée pour écrire (`CanWrite`)- Être utilisée pour se déplacer à l’intérieur du flux (`CanSeek`)La classe « `Stream` » implémente l’interface `IDisposable`; toute instance d’une classe « `Stream` » devrait être faite à l’aide d’une instruction « `using` »![image.png](attachment:image.png)![image-2.png](attachment:image-2.png) Les membres les plus utilisés sont :- `Close()` : Ferme le flux ainsi que toutes autres ressources associées au flux (tel qu’un « I/O Lock » avec un `FileStream`)- `Flush()` : Prend le contenu du buffer et le transpose (donc vide le buffer) dans la source de donnée sous-jacente- `Length` : Indique le montant de bytes dans le flux- `Position` : Indique la position courante du flux- `Read()` : Retourne une séquence de bytes dans un tableau et avance la position du flux- `ReadByte()` : Retourne un byte et avance la position du flux de un- `Seek()` : Change la position du flux- `Write()` : Écrit une séquence de bytes dans le flux tout en avançant la position du flux- `WriteByte()` : Écrit un byte et avance la position du flux de un La classe `FileStream`Représente un flux de données d’un fichier physique. La classe `MemoryStream`Représente un flux de données stocké en mémoire (donc aucune référence physique). La classe `StreamReader`Un `StreamReader` permet de lire un flux de données représentant du texte (donc une `string`).Un `StreamReader` est la classe recommandée pour la lecture d’un fichier (ou données) texte.Les membres les plus utilisés sont :- `EndOfStream` : Indique si on est à la fin du flux de données- `ReadBlock()` : Retourne un nombre spécifique de caractères du flux- `ReadLine()` : Retourne une `string` délimitée par un `\n`- `ReadToEnd()` : Retourne une `string` représentant le flux de données complet![image.png](attachment:image.png)![image-2.png](attachment:image-2.png) La classe `StreamWriter`Un `StreamWriter` permet d’écrire à un flux de données représentant du texte (donc une `string`).Un `StreamWriter` est la classe recommandée pour l’écriture à un fichier (ou données) texte.Les membres les plus utilisés sont :- `AutoFlush` : Indique si le buffer du flux devrait être vidé à chaque instruction d’écriture (`Write` ou `WriteLine`)- `Write()` : Écrit au flux- `WriteLine()` : Écrit au flux en terminant avec une nouvelle ligne![image.png](attachment:image.png)![image-2.png](attachment:image-2.png) ###Code using System.IO; // Ecrire a un fichier using (var fs = new FileStream("test.txt", FileMode.Create, FileAccess.Write)) { using (var sw = new StreamWriter(fs) { AutoFlush = true }) { sw.WriteLine("Premiere ligne"); sw.WriteLine("Deuxieme ligne"); sw.Write("Sur la meme "); sw.Write("ligne!"); } } #!powershell ls -al #!powershell cat test.txt ###Output Premiere ligne Deuxieme ligne Sur la meme ligne! ###Markdown Les classe `BinaryReader` et `BinaryReader`Les classes `BinaryReader` et `BinaryWriter` sont souvent utilisées pour faire de la sérialisation d’objets de façon manuelle.Ces classes permettent de transformer des types primaires en data binaire pouvant donc être stocké dans un fichier physique ou transférer électroniquement.Un `BinaryReader` comprend plusieurs méthodes différentes commençant par « `Read` » afin de permettre la lecture de tous les types primaires.Un `BinaryWriter` comprend plusieurs signatures différentes de la méthode « `Write` » afin de permettre l’écriture de tous les types primaires. `BinaryWriter`![image.png](attachment:image.png) `BinaryReader`![image.png](attachment:image.png) ###Code // Ecrire a un fichier binaire using (var fs = new FileStream("data.bin", FileMode.Create, FileAccess.Write)) { using (var bw = new BinaryWriter(fs)) { // John Doe bw.Write("John"); // Prenom bw.Write("Doe"); // Nom bw.Write(15); // Age bw.Write(150.74); // Poid // Silvie LaChapelle bw.Write("Silvie"); // Prenom bw.Write("LaChapelle"); // Nom bw.Write(23); // Age bw.Write(114.47); // Poid } } #!powershell ls -al #!powershell cat data.bin // Lire un fichier binaire using (var fs = new FileStream("data.bin", FileMode.Open, FileAccess.Read)) { using (var br = new BinaryReader(fs)) { // Premiere personne Console.WriteLine("{0} {1} a {2} ans et pese {3} lb.", br.ReadString(), br.ReadString(), br.ReadInt32(), br.ReadDouble() ); // Deuxieme personne Console.WriteLine("{0} {1} a {2} ans et pese {3} lb.", br.ReadString(), br.ReadString(), br.ReadInt32(), br.ReadDouble() ); } } ###Output John Doe a 15 ans et pese 150.74 lb. Silvie LaChapelle a 23 ans et pese 114.47 lb. ###Markdown La sérialisation Afin qu’un type puisse être sérialisé, toutes ses propriétés et variables membres doivent être sérialisables elles-mêmesUn membre peut être exclu de la sérialisation en ajoutant l’attribut « `NonSerialized` ».La sérialisation peut être faite en plusieurs formats différents dont:- Binaire (`BinaryFormatter`)- SOAP (en .NET Framework `SoapFormatter`)- XML (`XmlSerializer`)- JSON (`Newtonsoft.Json` ou `System.Text.Json`)La sérialisation d’un objet sérialisable est faite à l’aide d’un flux quelconque (`MemoryStream`, `FileStream`, etc.) `BinaryFormatter`Afin de sérialiser un objet en format binaire, on doit initialiser un objet de type `BinaryFormatter`L’espace de nom « `System.Runtime.Serialization.Formatters.Binary` » doit être rajoutée au fichier .cs. > Plus supporté en .NET Core ou .NET 5.0 > > https://docs.microsoft.com/en-us/dotnet/standard/serialization/binaryformatter-security-guide La méthode « `Serialize` » du `BinaryFormatter` permet de sérialiser un objet à un flux directement.![image.png](attachment:image.png)La méthode « `Deserialize` » du `BinaryFormatter` retourne un objet (de type « `object` » donc une conversion est requise) à partir d’un flux.![image-2.png](attachment:image-2.png) ###Code using System.IO; using System.Runtime.Serialization.Formatters.Binary; public class Personne { public Personne() { } public Personne(string prenom, string nom, int age, double poids) { this.Prenom = prenom; this.Nom = nom; this.Age = age; this.Poids = poids; } public string Prenom { get; set; } public string Nom { get; set; } public int Age { get; set; } public double Poids { get; set; } } // Cree une Personne var p = new Personne("John", "Doe", 15, 150.74); // Cree le serialiseur binaire var bf = new BinaryFormatter(); // Serialise la classe using (var fs = new FileStream("personne.bin", FileMode.Create, FileAccess.Write)) { bf.Serialize(fs, p); } // De-serialise la classe using (var fs = new FileStream("personne.bin", FileMode.Open, FileAccess.Read)) { p = bf.Deserialize(fs) as Personne; } ###Output (31,5): warning SYSLIB0011: 'BinaryFormatter.Serialize(Stream, object)' is obsolete: 'BinaryFormatter serialization is obsolete and should not be used. See https://aka.ms/binaryformatter for more information.' (37,9): warning SYSLIB0011: 'BinaryFormatter.Deserialize(Stream)' is obsolete: 'BinaryFormatter serialization is obsolete and should not be used. See https://aka.ms/binaryformatter for more information.'
etl/home/8_parsingdates_exercise/8_parsingdates_exercise-solution.ipynb
###Markdown Parsing DatesAnother common data transformation involves parsing dates. Parsing generally means that you start with a string and then transform that string into a different data type. In this case, that means taking a date in the format of a string and transforming the string into a date type. Run the next cell to see an example. ###Code import pandas as pd parsed_date = pd.to_datetime('January 1st, 2017') parsed_date parsed_date.month parsed_date.year parsed_date.second ###Output _____no_output_____ ###Markdown Sometimes date string are formatted in unexpected ways. For example, in the United States, dates are given with the month first and then the day. That is what pandas expects by default. However, some countries write the date with the day first and then the month. Run the next three examples to see Panda's default behavior and how you can specify the date formatting. ###Code parsed_date = pd.to_datetime('5/3/2017 5:30') parsed_date.month parsed_date = pd.to_datetime('3/5/2017 5:30', format='%d/%m/%Y %H:%M') parsed_date.month parsed_date = pd.to_datetime('5/3/2017 5:30', format='%m/%d/%Y %H:%M') parsed_date.month ###Output _____no_output_____ ###Markdown The formatting abbreviations are actually part of the python standard. You can see examples at [this link](http://strftime.org/). Part 1 - Practice Parsing DatesRun the code cells below to import the World Bank projects data. The last line of the code outputs all of the column names in the data frame. ###Code # read in the projects data set with all columns type string df_projects = pd.read_csv('../data/projects_data.csv', dtype=str) df_projects.drop(['Unnamed: 56'], axis=1, inplace=True) df_projects.columns ###Output _____no_output_____ ###Markdown Notice there are three columns associated with dates: boardapprovaldate, board_approval_month, and closingdate. Run the code cell below to see what these values look like. ###Code # Run this code cell df_projects.head(15)[['boardapprovaldate', 'board_approval_month', 'closingdate']] ###Output _____no_output_____ ###Markdown Use the pandas to_datetime method to convert the boardapprovaldate and closingdate columns into datetime objects. ###Code # TODO: Use the pandas to_datetime method to convert these two columns # (boardapprovaldate, closingdate) into date times. # HINT: It's easier to do this one column at a time df_projects['boardapprovaldate'] = pd.to_datetime(df_projects['boardapprovaldate']) df_projects['closingdate'] = pd.to_datetime(df_projects['closingdate']) # Run the code cells below to see how you can access the different parts of the datetime objects # Series.dt gives access to the datetime object as explained here: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.dt.html df_projects['boardapprovaldate'].dt.second # Run this code cell to see the output df_projects['boardapprovaldate'].dt.month # Run this code to see the output # weekday represents the day of the week from 0 (Monday) to 6 (Sunday). df_projects['boardapprovaldate'].dt.weekday ###Output _____no_output_____ ###Markdown Part 2 - Create new columnsNow that the boardapprovaldate and closingdates are in datetime formats, create a few new columns in the df_projects data frame:* approvalyear* approvalday* approvalweekday* closingyear* closingday* closingweekday ###Code ### # TODO create the follwing new columns in the df_projects data frame # # approvalyear # approvalday # approvalweekday # closingyear # closingday # closingweekday # # ### df_projects['approvalyear'] = df_projects['boardapprovaldate'].dt.year df_projects['approvalday'] = df_projects['boardapprovaldate'].dt.day df_projects['approvalweekday'] = df_projects['boardapprovaldate'].dt.weekday df_projects['closingyear'] = df_projects['closingdate'].dt.year df_projects['closingday'] = df_projects['closingdate'].dt.day df_projects['closingweekday'] = df_projects['closingdate'].dt.weekday ###Output _____no_output_____ ###Markdown Part 3 (Challenge)Use what you've practiced in this exercise to make a visualization of year on the x-axis and the sum of the totalamt columns per year on the y-axis. You'll first need to clean the totalamt column to get rid of commas and convert the values to numeric. Then you'll need to use pandas' groupby method to sum the totalamt column for each year. Finally, you can use the pandas plot() method to output the visualization. ###Code ### # TODO: Make a visualization with year on the x-axis and the sum of the totalamt columns per year on the y-axis # HINT: The totalamt column is currently a string with commas. For example 100,250,364. You'll need to remove the # commas and convert the column to a numeric variable. # HINT: pandas groupby, sum, and plot methods should also be helpful #### import matplotlib.pyplot as plt %matplotlib inline df_projects['totalamt'] = pd.to_numeric(df_projects['totalamt'].str.replace(',','')) ax = df_projects.groupby('approvalyear')['totalamt'].sum().plot(x='approvalyear', y='totalamt', title ='Total Amount Approved per Year') ax.set_xlabel('year') ax.set_ylabel('amount $') plt.show() ###Output _____no_output_____
jupyter/05 - Functions, Data, and Classes.ipynb
###Markdown **Functions** are bits of code that are packaged so you can easily reuse that code from anywhere. Here is anexample where I am finding the maximum of two numbers (we often do this when we want to know if any drivemotor speed is greater than 1 and we need to scale the speeds), and another where I am clipping a value so itfits within the range 0.0 to 1.0 (we often need to do this when we are tuning parameters): ###Code // What is the maximum motor speed double rightMotorSpeed = 0.5; double leftMotorSpeed = 1.3; double maxMotorSpeed = rightMotorSpeed > leftMotorSpeed ? rightMotorSpeed : leftMotorSpeed; System.out.println("maxMotorSpeed = " + maxMotorSpeed); // clip a value to be in tha range 0.0 to 1.0 double tunedValue = 1.1; double useValue = tunedValue; if (useValue < 0.0) { useValue = 0.0; } else if (useValue > 1.0) { useValue = 1.0; } System.out.println("useValue = " + useValue); ###Output maxMotorSpeed = 1.3 useValue = 1.0 ###Markdown Since these are kinds of things we would do often, we don't want to write that same code everywhere forseveral reasons:* There would be a lot of repeated code increasing the code we need to maintain;* It would be harder to read the code;* If there is a problem in the code it would be repeated all over the code.This is where a **functions** come into play: In Java, the syntax of a function is:``` (){ // code to do something ; or return;>}```And describing this syntax:* The `` will be explained when we talk about **classes**;* the `` is that a function can return a value and the datatype for that value must be declared, or **`void`** should be specified if the operation does not return anything;* the `` is a name that you make up. by convention it is camel case and should describe what the function does;* the `` is a comma -separated list of values passed into the function, each argument has a `datatype` and a name following the camel case convention;* then a block of code ending with a **`return`** of a value of the correct `datatype`.Let's look at functions would be used in the cell above: ###Code // What is the maximum motor speed double max(double value1, double value2) { return value1 > value2 ? value1 : value2; } double rightMotorSpeed = 0.5; double leftMotorSpeed = 1.3; double maxMotorSpeed = max(rightMotorSpeed,leftMotorSpeed); System.out.println("maxMotorSpeed = " + maxMotorSpeed); // clip a value to be in tha range 0.0 to 1.0 double clip(double value) { if (value < 0.0) { value = 0.0; } else if (value > 1.0) { value = 1.0; } return value; } double tunedValue = 1.1; double useValue = clip(1.1); System.out.println("useValue = " + useValue); ###Output maxMotorSpeed = 1.3 useValue = 1.0 ###Markdown A function can have multiple returns: ###Code // clip a value to be in tha range 0.0 to 1.0 double clip(double value) { if (value < 0.0) { return 0.0; } else if (value > 1.0) { return 1.0; } return value; } double tunedValue = 1.1; double useValue = clip(1.1); System.out.println("useValue = " + useValue); ###Output useValue = 1.0 ###Markdown When we make functions, we should think about the versatility of the function. For example, if wefind we are sometimes clipping to a range other than 0.0 to 1.0 we can specify the range in thearguments: ###Code // clip a value to be in tha range min to max double clip(double value, double min, double max) { if (value < min) { return min; } else if (value > max) { return max; } return value; } System.out.println("clip(1.1, 0.0, 1.0) = " + clip(1.1, 0.0, 1.0)); System.out.println("clip(1.1, 0.5, 1.0) = " + clip(1.1, 0.5, 1.0)); System.out.println("clip(1.1, 0.0, 0.5) = " + clip(1.1, 0.0, 0.5)); System.out.println("clip(1.1, 0.0, 2.0) = " + clip(1.1, 0.0, 2.0)); ###Output clip(1.1, 0.0, 1.0) = 1.0 clip(1.1, 0.5, 1.0) = 1.0 clip(1.1, 0.0, 0.5) = 0.5 clip(1.1, 0.0, 2.0) = 1.1 ###Markdown **Advanced Concept:** The combination of name, arguments, and return datatype are refered to as the signatureof the function and must be unique. In the case above, we might note that we occasionaly clip to somerange other than 0.0 to 1.0 - so, we would like a version, `clip(double value)` that clips to 0.0 to 1.0by default. Note that this is a different signature because the arguments are different, so it would be validJava to have both forms as: ###Code // clip a value to be in the range 0.0 to 1.0 double clip(double value) { return clip(value, 0.0, 1.0); } // clip a value to be in the range min to max double clip(double value, double min, double max) { if (value < min) { return min; } else if (value > max) { return max; } return value; } System.out.println("clip(1.1, 0.0, 1.0) = " + clip(1.1, 0.0, 1.0)); System.out.println("clip(1.1, 0.5, 1.0) = " + clip(1.1, 0.5, 1.0)); System.out.println("clip(1.1, 0.0, 0.5) = " + clip(1.1, 0.0, 0.5)); System.out.println("clip(1.1, 0.0, 2.0) = " + clip(1.1, 0.0, 2.0)); System.out.println("clip(-0.1) = " + clip(-0.1)); System.out.println("clip(0.5) = " + clip(0.5)); System.out.println("clip(1.0) = " + clip(1.0)); System.out.println("clip(2.5) = " + clip(2.5)); ###Output clip(1.1, 0.0, 1.0) = 1.0 clip(1.1, 0.5, 1.0) = 1.0 clip(1.1, 0.0, 0.5) = 0.5 clip(1.1, 0.0, 2.0) = 1.1 clip(-0.1) = 0.0 clip(0.5) = 0.5 clip(1.0) = 1.0 clip(2.5) = 1.0 ###Markdown **Note:** It is best practice when you have multiple signature for the same functionality:* to use the same function name;* to put as much of the code as possible in the function that takes all the arguments, which lets you exhaustively test only one function;* to make the the alternate methods simple wrappers that defer to the method with the actual code for the operation. ###Code class SwerveModule { static long s_NavxID = 12; public static void setNavxID(long NavxID) { s_NavxID = NavxID; } // ============ final String m_name; final long m_driveMotorID; final long m_spinMotorID; final long m_analogEncoderId; private double m_power = 0.0; public SwerveModule(String name, long driveMotorID, long spinMotorID, long analogEncoderId) { m_name = name; m_driveMotorID = driveMotorID; m_spinMotorID = spinMotorID; m_analogEncoderId = analogEncoderId; } public void setPower(double power) { double motorControllerValue = 2 * power; // set motor controller power // m_power = power; } public void printInfo() { System.out.println("Module " + m_name + ":"); System.out.println(" driveMotorID = " + m_driveMotorID); System.out.println(" spinMotorID " + m_spinMotorID); System.out.println(" analogEncoderId " + m_analogEncoderId); System.out.println(" speed " + m_power); System.out.println(" NavxID " + s_NavxID); } } SwerveModule rightFront = new SwerveModule("Right Front", 0, 1, 10); SwerveModule leftFront = new SwerveModule("Left Front", 2, 3, 11); rightFront.setPower(0.5); SwerveModule.setNavxID(55); rightFront.printInfo(); leftFront.printInfo(); long rfd = rightFront.m_driveMotorID; // rightFront.driveMotorID = 5; rightFront.m_power = 0.75; rightFront.printInfo(); ###Output Module Right Front: driveMotorID = 0 spinMotorID 1 analogEncoderId 10 speed 0.5 NavxID 55 Module Left Front: driveMotorID = 2 spinMotorID 3 analogEncoderId 11 speed 0.0 NavxID 55
jupyter/Watson Studio Public/Model a Golomb ruler using DO.ipynb
###Markdown Golomb RulerThis tutorial includes everything you need to set up decision optimization engines, build constraint programming models.Table of contents:- [Describe the business problem](Describe-the-business-problem)* [How decision optimization (prescriptive analytics) can help](How--decision-optimization-can-help)* [Use decision optimization](Use-decision-optimization) * [Step 1: Model the Data](Step-1:-Model-the-data) * [Step 2: Set up the prescriptive model](Step-2:-Set-up-the-prescriptive-model) * [Define the decision variables](Define-the-decision-variables) * [Express the business constraints](Express-the-business-constraints) * [Express the objective](Express-the-objective) * [Solve with Decision Optimization solve service](Solve-with-Decision-Optimization-solve-service) * [Step 3: Investigate the solution and run an example analysis](Step-3:-Investigate-the-solution-and-then-run-an-example-analysis)* [Summary](Summary)**** Describe the business problem* A detailed description (from which this paragraph comes from) is available on Wikipedia at https://en.wikipedia.org/wiki/Golomb_ruler.* In mathematics, a Golomb ruler is a set of marks at integer positions along an imaginary ruler such that no two pairs of marks are the same distance apart. The number of marks on the ruler is its order, and the largest distance between two of its marks is its length. Following is an example of Golomb ruler of order 4 and length 6.This problem is not only an intellectual problem. It has a lot of practical applications: within Information Theory related to error correcting codes, the selection of radio frequencies to reduce the effects of intermodulation interference, the design of conference rooms, to maximize the number of possible configurations with a minimum of partitions: ***** How decision optimization can help* Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes. * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes. * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage. + For example: + Automate complex decisions and trade-offs to better manage limited resources. + Take advantage of a future opportunity or mitigate a future risk. + Proactively update recommendations based on changing events. + Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes. Modeling the problemConstraint Programming is a programming paradigm that allows to express a problem using:* the unknowns of the problem (the variables),* the constraints/laws/rules of the problem, mathematical expressions linking variables together (the constraints),* what is to be optimized (the objective function).All this information, plus some configuration parameters, is aggregated into a single object called model. The remainder of this notebook describes in details how to build and solve this problem with IBM CP Optimizer, using its DOcplex Python modeling API. Use decision optimization Step 1: Model the data ###Code # Import Constraint Programming modelization functions from docplex.cp.model import CpoModel ###Output _____no_output_____ ###Markdown Define model input dataThe first thing to define is the model input data.In the case of the Golomb Ruler problem, there is only one input which is the order of the ruler, that is the number of marks: ###Code # Define required number of marks on the ruler ORDER = 7 ###Output _____no_output_____ ###Markdown Step 2: Set up the prescriptive model Create the model containerThe model is represented by a Python object that is filled with the different model elements (variables, constraints, objective function, etc). The first thing to do is then to create such an object: ###Code # Create model object mdl = CpoModel(name="GolombRuler") ###Output _____no_output_____ ###Markdown Define the decision variables* Now, you need to define the variables of the problem. As the expected problem result is the list of mark positions, the simplest choice is to create one integer variable to represent the position of each mark on the ruler.* Each variable has a a set of possible values called his domain. To reduce the search space, it is important to reduce this domain as far as possible.* In our case, we can naively estimate that the maximum distance between two adjacent marks is the order of the ruler minus one. Then the maximal position of a mark is (ORDER - 1)². Each variable domain is then limited to an interval [0..(ORDER - 1)²].* A list of integer variables can be defined using method integer_var_list(). In our case, defining one variable for each mark can be created as follows: ###Code # Create array of variables corresponding to ruler marks marks = mdl.integer_var_list(ORDER, 0, (ORDER - 1) ** 2, "M") ###Output _____no_output_____ ###Markdown Express the business constraints* To express that all possible distances between two marks must be different, create an array that contains all these distances: ###Code # Create an array with all distances between all marks dist = [marks[i] - marks[j] for i in range(1, ORDER) for j in range(0, i)] ###Output _____no_output_____ ###Markdown The operator '-' is used to express the difference between variables. This might appear strange as the variables are not instantiated at that time, but the Python operator has been overloaded to construct a CP expression instead of attempting to compute the arithmetic difference. All other standard Python operators can be used to make operations between CP objects (, =, ==, !=, +, -, /, *, &, |, //, **, ...). See documentation for details.To force all these distances to be different, use the special all_diff() constraint as follows: ###Code # Force all distances to be different mdl.add(mdl.all_diff(dist)) ###Output _____no_output_____ ###Markdown The call mdl.add(...) is necessary to express that the constraint must be added to the model. Remove symmetriesThe constraint you have expressed above is theoretically sufficient, and the model can be solved as it is.However, it does not differentiate between all possible permutations of the different mark positions that are solutions to the problem, for example, 0-1-4-6, 4-6-1-0, 6-0-1-4, etc. As there are ORDER! (factorial of ORDER) such permutations, the search space would be drastically reduced by removing them.You can do that by forcing an order between marks, for example the order of their index: ###Code # Avoid symmetric solutions by ordering marks for i in range(1, ORDER): mdl.add(marks[i] > marks[i - 1]) ###Output _____no_output_____ ###Markdown You also know that first mark is at the beginning of the ruler: ###Code # Force first mark position to zero mdl.add(marks[0] == 0) ###Output _____no_output_____ ###Markdown Avoid mirror solutionsEach optimal solution has a mirror, with all mark distances in the reverse order, for example, 0-1-4-6 and 0-2-5-6. The following constraint can be added to avoid this: ###Code # Avoid mirror solution mdl.add((marks[1] - marks[0]) < (marks[ORDER - 1] - marks[ORDER - 2])) ###Output _____no_output_____ ###Markdown Express the objective* Finally, to get the shortest Golomb Ruler, this can be expressed by minimizing the position of the last mark.As you have ordered the marks, you can do this using: ###Code # Minimize ruler size mdl.add(mdl.minimize(marks[ORDER - 1])) ###Output _____no_output_____ ###Markdown If the marks were not ordered, you could have instead used: mdl.add(mdl.minimize(mdl.max(marks))) Solve with Decision Optimization solve serviceBy default, the modeling layer looks for a local runtime, but other solving environments, such as *docloud*, are also available.Refer to the documentation for a good understanding of the various solving/generation modes.If you're using a Community Edition of CPLEX runtimes, depending on the size of the problem, the solve stage might fail and will need a paying subscription or product installation. The model can be solved by calling: ###Code # Solve the model print("Solving model....") msol = mdl.solve(TimeLimit=10) ###Output _____no_output_____ ###Markdown Step 3: Investigate the solution and then run an example analysisThe shortest way to output the solution that has been found by the solver is to call the method print_solution() as follows: ###Code # Print solution print("Solution: ") msol.write() ###Output _____no_output_____ ###Markdown This output is totally generic and simply prints the value of all model variables, the objective value, and some other solution information.A more specific output can be generated by writing more code. The following example illustrates how to access specific elements of the solution. ###Code # Print solution from sys import stdout if msol: # Print found solution stdout.write("Solution: " + msol.get_solve_status() + "\n") stdout.write("Position of ruler marks: ") for v in marks: stdout.write(" " + str(msol[v])) stdout.write("\n") stdout.write("Solve time: " + str(round(msol.get_solve_time(), 2)) + "s\n") else: # No solution found stdout.write("No solution found. Search status: " + msol.get_solve_status() + "\n") ###Output _____no_output_____ ###Markdown Another possibility is for example to simulate a real ruler using characters, as follows: ###Code # Print solution as a ruler if msol: stdout.write("Ruler: +") for i in range(1, ORDER): stdout.write('-' * (msol[marks[i]] - msol[marks[i - 1]] - 1) + '+') stdout.write("\n") ###Output _____no_output_____
Intro to python/IntroToPy.ipynb
###Markdown Welcome to the "Fundamental of Information Retrieval & Web Search" Course!I'm Parsa KamaliPour, your TA for this semester.In this session we're going to practice Python together, so let's begin! Hello World ###Code print("Hello World!!") ###Output Hello World!! ###Markdown Simple data types ###Code var1 = 20 var2 = 20.2 var3 = 'H' var4 = 'hey' var5 = "hey" print(var1) print(type(var1)) type(var1) print(var2) print(type(var2)) type(var2) print(var3) print(type(var3)) type(var3) print(var4) print(type(var4)) type(var4) print(var5) print(type(var5)) type(var5) var5 var4 var6 = 5 + 2j print(var6) type(var6) var7 = range(10) print(var7) var8 = True var8 var9 = b"hi there" var9 ###Output _____no_output_____ ###Markdown Operators ###Code op1 , op2, op3, op4 = 1, 2, 3, 4 print(op1, op2, op3, op4) print(op1, op2) op1, op2 = op2, op1 print(op1, op2) op3 * op4 op3 ** op4 op4 / op3 op4 // op3 op4 % op3 op3 is op4 op3 is not op4 op3 in [1,2,3,4] op3 in [1, 2, 4] op3.__eq__(op4) op3 != op4 op3 == op4 op3 >= op4 op3 <= op4 op3 > op4 op3 < op4 op3 *= 2 op3 op3 -= 1 op3 "%d" %op4 "%f" %op4 "%.1f" %op4 "%.0f" %op4 "%2.0f" %op4 "%4.1f" %op4 "%e" %op4 "%2e" %op4 "%20e" %op4 "%.20e" %op4 "{}".format(op4) "{}".format(op4) "{}".format(float(op4)) "{}".format(complex(op4)) "{}".format(int(op4)) ###Output _____no_output_____ ###Markdown Input system ###Code inp1 = input("enter a number:") type(inp1) inp2 = eval(input("enter a number:")) type(inp2) print("hi \nhow are you?") print(r"hi \n how are you?") ###Output hi how are you? hi \n how are you? ###Markdown Control flow ###Code op = eval(input()) if op > 10: print("Bigger than 10") elif op == 10: print("Equal to 10") else: print("less than 10") ###Output Bigger than 10 ###Markdown Loop ###Code flag = True while flag: print("hiiiii") flag = False li = [1, 2.0, "three"] for item in li: print(item) li = [1, 2.0, "three"] for index in range(0, 3): print(li[index]) for index in range(len(li)): print(li[index]) for index in range(0, 3, 2): print(li[index]) for index in range(3, 0, -1): print(index) ###Output 3 2 1
_notebooks/2020-06-17-House Price Prediction.ipynb
###Markdown "House Prices"> "Predict the sales price for each house"- toc: false- branch: master- badges: true- comments: true- categories: [fastpages, jupyter]- image: images/some_folder/your_image.png- hide: false- search_exclude: true- metadata_key1: metadata_value1- metadata_key2: metadata_value2 Import Libraries ###Code import pandas as pd import numpy as np import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn import metrics ###Output _____no_output_____ ###Markdown Read datasets into data frames ###Code train = pd.read_csv('./data/train.csv') test = pd.read_csv('./data/test.csv') df = pd.concat([train,test],keys=['train','test'],sort=False) ###Output _____no_output_____ ###Markdown Data Cleaning ###Code df.info() df.isnull().sum()/df.isnull().count() ###Output _____no_output_____ ###Markdown Numerical Features ###Code df_num = df.select_dtypes('number') df_num.info() df_num = df_num.fillna(df.mean()) df_num.info() df_num['log_sp'] = np.log(df_num['SalePrice']) df_num.info() ###Output <class 'pandas.core.frame.DataFrame'> MultiIndex: 2919 entries, (train, 0) to (test, 1458) Data columns (total 39 columns): Id 2919 non-null int64 MSSubClass 2919 non-null int64 LotFrontage 2919 non-null float64 LotArea 2919 non-null int64 OverallQual 2919 non-null int64 OverallCond 2919 non-null int64 YearBuilt 2919 non-null int64 YearRemodAdd 2919 non-null int64 MasVnrArea 2919 non-null float64 BsmtFinSF1 2919 non-null float64 BsmtFinSF2 2919 non-null float64 BsmtUnfSF 2919 non-null float64 TotalBsmtSF 2919 non-null float64 1stFlrSF 2919 non-null int64 2ndFlrSF 2919 non-null int64 LowQualFinSF 2919 non-null int64 GrLivArea 2919 non-null int64 BsmtFullBath 2919 non-null float64 BsmtHalfBath 2919 non-null float64 FullBath 2919 non-null int64 HalfBath 2919 non-null int64 BedroomAbvGr 2919 non-null int64 KitchenAbvGr 2919 non-null int64 TotRmsAbvGrd 2919 non-null int64 Fireplaces 2919 non-null int64 GarageYrBlt 2919 non-null float64 GarageCars 2919 non-null float64 GarageArea 2919 non-null float64 WoodDeckSF 2919 non-null int64 OpenPorchSF 2919 non-null int64 EnclosedPorch 2919 non-null int64 3SsnPorch 2919 non-null int64 ScreenPorch 2919 non-null int64 PoolArea 2919 non-null int64 MiscVal 2919 non-null int64 MoSold 2919 non-null int64 YrSold 2919 non-null int64 SalePrice 2919 non-null float64 log_sp 2919 non-null float64 dtypes: float64(13), int64(26) memory usage: 909.4+ KB ###Markdown Categorical Variables ###Code df_cat = df.select_dtypes('object') df_cat.info() df_cat = pd.get_dummies(df_cat,dummy_na=True,drop_first=True) df_cat.columns df_all = pd.concat([df_num,df_cat],axis=1) ###Output _____no_output_____ ###Markdown Model Building ###Code X = df_all.drop(['SalePrice','log_sp','Id'],axis=1) y = df_num['log_sp'] X_train, X_test, y_train, y_test = train_test_split(X[:'train'],y[:'train'],test_size=0.3) clf = RandomForestRegressor(n_jobs=-1) clf.fit(X_train,y_train) clf.score(X_train,y_train) clf.score(X_test, y_test) feature_importances = pd.DataFrame(clf.feature_importances_, index = X_train.columns, columns=['importance']).sort_values('importance',ascending=False) feature_importances test['log_sp']= clf.predict(X.loc['test']) test['SalePrice']= np.exp(test.log_sp) submission = test[['Id','SalePrice']] submission.to_csv('./data/submission.csv',index=False) ###Output _____no_output_____
official_quickstart/basic_classification.ipynb
###Markdown Train your first neural network: basic classification View on TensorFlow.org Run in Google Colab View source on GitHub This guide trains a neural network model to classify images of clothing, like sneakers and shirts. It's okay if you don't understand all the details, this is a fast-paced overview of a complete TensorFlow program with the details explained as we go.This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. ###Code # Disable GPU, use to compare CPU-only with GPU # import os # os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # TensorFlow and tf.keras import tensorflow as tf from tensorflow import keras # Helper libraries import numpy as np import matplotlib.pyplot as plt print(tf.__version__) ###Output _____no_output_____ ###Markdown Import the Fashion MNIST dataset This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here: <img src="https://tensorflow.org/images/fashion-mnist-sprite.png" alt="Fashion MNIST sprite" width="600"> Figure 1. Fashion-MNIST samples (by Zalando, MIT License).&nbsp; Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code. We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow, just import and load the data: ###Code # load data locally because can't download online behind GFW import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) from utils.local_datasets import load_data_fashion_mnist (train_images, train_labels), (test_images, test_labels) = load_data_fashion_mnist() ###Output _____no_output_____ ###Markdown Loading the dataset returns four NumPy arrays:* The `train_images` and `train_labels` arrays are the *training set*—the data the model uses to learn.* The model is tested against the *test set*, the `test_images`, and `test_labels` arrays.The images are 28x28 NumPy arrays, with pixel values ranging between 0 and 255. The *labels* are an array of integers, ranging from 0 to 9. These correspond to the *class* of clothing the image represents: Label Class 0 T-shirt/top 1 Trouser 2 Pullover 3 Dress 4 Coat 5 Sandal 6 Shirt 7 Sneaker 8 Bag 9 Ankle boot Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images: ###Code class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] ###Output _____no_output_____ ###Markdown Explore the dataLet's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels: ###Code train_images.shape ###Output _____no_output_____ ###Markdown Likewise, there are 60,000 labels in the training set: ###Code len(train_labels) ###Output _____no_output_____ ###Markdown Each label is an integer between 0 and 9: ###Code train_labels ###Output _____no_output_____ ###Markdown There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels: ###Code test_images.shape ###Output _____no_output_____ ###Markdown And the test set contains 10,000 images labels: ###Code len(test_labels) ###Output _____no_output_____ ###Markdown Preprocess the dataThe data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255: ###Code plt.figure() plt.imshow(train_images[0]) plt.colorbar() plt.grid(False) ###Output _____no_output_____ ###Markdown We scale these values to a range of 0 to 1 before feeding to the neural network model. For this, cast the datatype of the image components from an integer to a float, and divide by 255. Here's the function to preprocess the images: It's important that the *training set* and the *testing set* are preprocessed in the same way: ###Code train_images = train_images / 255.0 test_images = test_images / 255.0 ###Output _____no_output_____ ###Markdown Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network. ###Code plt.figure(figsize=(10,10)) for i in range(25): plt.subplot(5,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i], cmap=plt.cm.binary) plt.xlabel(class_names[train_labels[i]]) ###Output _____no_output_____ ###Markdown Build the modelBuilding the neural network requires configuring the layers of the model, then compiling the model. Setup the layersThe basic building block of a neural network is the *layer*. Layers extract representations from the data fed into them. And, hopefully, these representations are more meaningful for the problem at hand.Most of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have parameters that are learned during training. ###Code model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dense(10, activation=tf.nn.softmax) ]) ###Output _____no_output_____ ###Markdown The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (of 28 by 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are densely-connected, or fully-connected, neural layers. The first `Dense` layer has 128 nodes (or neurons). The second (and last) layer is a 10-node *softmax* layer—this returns an array of 10 probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the 10 classes. Compile the modelBefore the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:* *Loss function* —This measures how accurate the model is during training. We want to minimize this function to "steer" the model in the right direction.* *Optimizer* —This is how the model is updated based on the data it sees and its loss function.* *Metrics* —Used to monitor the training and testing steps. The following example uses *accuracy*, the fraction of the images that are correctly classified. ###Code model.compile(optimizer=tf.train.AdamOptimizer(), loss='sparse_categorical_crossentropy', metrics=['accuracy']) ###Output _____no_output_____ ###Markdown Train the modelTraining the neural network model requires the following steps:1. Feed the training data to the model—in this example, the `train_images` and `train_labels` arrays.2. The model learns to associate images and labels.3. We ask the model to make predictions about a test set—in this example, the `test_images` array. We verify that the predictions match the labels from the `test_labels` array. To start training, call the `model.fit` method—the model is "fit" to the training data: ###Code model.fit(train_images, train_labels, epochs=5) ###Output _____no_output_____ ###Markdown As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.88 (or 88%) on the training data. Evaluate accuracyNext, compare how the model performs on the test dataset: ###Code test_loss, test_acc = model.evaluate(test_images, test_labels) print('Test accuracy:', test_acc) ###Output _____no_output_____ ###Markdown It turns out, the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*. Overfitting is when a machine learning model performs worse on new data than on their training data. Make predictionsWith the model trained, we can use it to make predictions about some images. ###Code predictions = model.predict(test_images) ###Output _____no_output_____ ###Markdown Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction: ###Code predictions[0] ###Output _____no_output_____ ###Markdown A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value: ###Code np.argmax(predictions[0]) ###Output _____no_output_____ ###Markdown So the model is most confident that this image is an ankle boot, or `class_names[9]`. And we can check the test label to see this is correct: ###Code test_labels[0] ###Output _____no_output_____ ###Markdown We can graph this to look at the full set of 10 channels ###Code def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plt.grid(False) plt.xticks([]) plt.yticks([]) plt.imshow(img, cmap=plt.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plt.grid(False) plt.xticks([]) plt.yticks([]) thisplot = plt.bar(range(10), predictions_array, color="#777777") plt.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') ###Output _____no_output_____ ###Markdown Let's look at the 0th image, predictions, and prediction array. ###Code i = 0 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) i = 12 plt.figure(figsize=(6,3)) plt.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images) plt.subplot(1,2,2) plot_value_array(i, predictions, test_labels) ###Output _____no_output_____ ###Markdown Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident. ###Code # Plot the first X test images, their predicted label, and the true label # Color correct predictions in blue, incorrect predictions in red num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plt.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plt.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images) plt.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels) ###Output _____no_output_____ ###Markdown Finally, use the trained model to make a prediction about a single image. ###Code # Grab an image from the test dataset img = test_images[0] print(img.shape) ###Output _____no_output_____ ###Markdown `tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list: ###Code # Add the image to a batch where it's the only member. img = (np.expand_dims(img,0)) print(img.shape) ###Output _____no_output_____ ###Markdown Now predict the image: ###Code predictions_single = model.predict(img) print(predictions_single) plot_value_array(0, predictions_single, test_labels) _ = plt.xticks(range(10), class_names, rotation=45) ###Output _____no_output_____ ###Markdown `model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch: ###Code np.argmax(predictions_single[0]) ###Output _____no_output_____
linked_commons_graph_analysis/k_cores.ipynb
###Markdown HITS Algorithm Analysis ###Code import collections import logging import time import json import copy import matplotlib.pyplot as plt import networkx as nx import numpy as np import scipy import cc_graph_ops import graph_tool.all as gt import graph_tool.topology as topo with open('fdg_input_file.json', 'r') as myfile: data = json.load(myfile) g = cc_graph_ops.create_graph_from_file('fdg_input_file.json') hubs5, authorities5 = nx.hits(g, max_iter=5, tol=0.5) # running HITS for 5 iterations takes ~1.5 minutes # Let's look at what domains have the highest hub scores # Generally small blogs print("URL/domain", "\tHub score") sorted_hubs5 = dict() for key in hubs5.keys(): if hubs5[key] > 5e-3 and hubs5[key] != 0: sorted_hubs5.update({key: hubs5[key]}) sorted_hubs5 = dict(sorted(sorted_hubs5.items(), key = lambda item: item[1], reverse=True)) # print(len(sorted_hubs5.keys())) for key in sorted_hubs5.keys(): template="{:<30}{:>10.17f}" print(template.format(key, sorted_hubs5[key])) # Let's look at what domains have the highest authority scores # They seem to be large reference websites high_authorities_dict = {} print("URL/domain", "\tAuthority score", "\tHub Score") for key in authorities5.keys(): if authorities5[key] > 5e-3 and authorities5[key] != 0: high_authorities_dict[key] = authorities5[key] print(key, " \t", authorities5[key], "\t", hubs5[key], sep='') def get_license_qty(g): """Takes a graph and returns the license quantity of any node.""" licenses = [] for node_id, license_count in g.nodes(data='licenses_qty'): licenses.append(int(license_count)) return np.array(licenses) def get_cc_license_qty(g): """Takes a graph and returns the number of cc licenses of any node.""" license_count = [] for node_id, cc_licenses in g.nodes(data='cc_licenses'): counter = 0 if type(cc_licenses)==dict: vals = cc_licenses.values() for x in vals: counter += x license_count.append(counter) return np.array(license_count) def filter_all_cc_licenses(g): """Takes a graph g and returns a list of nodes that have only CC licenses.""" invalid_node_ids = [] for node_id, node_metadata in g.nodes(data=True): # format: (node_id, {'cc_licenses': n, 'licenses_qty': m, 'other attrs': 'attr_data'}), ... counter = 0 if type(node_metadata['cc_licenses']) == dict: for val in node_metadata['cc_licenses'].values(): counter += val if int(counter) != int(node_metadata['licenses_qty']): invalid_node_ids.append(node_id) g.remove_nodes_from(invalid_node_ids) return g # Make a copy of g to construct a subgraph with nodes that use exclusively CC licenses g_copy = copy.deepcopy(g) cc_subgraph = filter_all_cc_licenses(g_copy) # Run HITS on the reduced graph start = time.perf_counter() hubs_cc, authorities_cc = nx.hits(cc_subgraph, tol=0.1) end = time.perf_counter() print((end-start)/60, "minutes") # Let's look at what domains have the highest hub/authority scores in this reduced graph print("URL/domain", "\t\tHub score", "\t\tAuthority Score") template3="{:<24}{:<23.17f}{:>20.17f}" for key in hubs_cc.keys(): if hubs_cc[key] > 5e-3 and hubs_cc[key] != 0: print(template3.format(key, hubs_cc[key], authorities_cc[key])) print("\n") print("Hub scores of high-authority sites:") for key in high_cc_authorities_dict.keys(): template="{:<24}{:>10.17f}" print(template.format(key, hubs_cc[key])) ###Output URL/domain Hub score Authority Score toidicodedao 0.00698701379254315 0.00000027502001664 startups-list 0.00652515712003559 0.00556081629708550 marilynsclosetblog 0.00545838176724295 0.00000000000000000 skidkaonline 0.00550085985526573 0.00000000000000000 brasildefato 0.00695338465179413 0.00089290084941206 laespadaenlatinta 0.00663046055069237 0.00000003075921341 solitariosinvisibles 0.01738148994489122 0.00000000000000000 bulgarian-football 0.00551804690517656 0.00000000716533863 bashkortostan 0.00735433360179777 0.00000069038174975 Hub scores of high-authority sites: startups-list 0.00652515712003559 avc 0.00000063559951764 rada 0.00004111166829409 ok 0.00001424617180565 flaticon 0.00000515708318046 feedblitz 0.00000015959980068 studentdoctor 0.00056254159742869 twitter 0.00000000000000000 bgclubs 0.00000007061175104 facebook 0.00000000000000000 mail 0.00000000000000000 steampowered 0.00000000000000000 ###Markdown Results from the HITS AlgorithmThe HITS algorithm is a ranking algorithm which assigns "hub" and "authority" scores to websites. The idea behind the algorithm is that websites that are large websites with authoritative information, as well as hubs, which may not contain much information themselves but link to other websites with authoritative information. Upon running the HITS algorithm and looking at the nodes with the largest scores, we see that the nodes with the highest authority scores are large reference websites, like google, StackOverflow, Twitter, and Wikipedia. However, the nodes with the highest hub scores (with the exception of blogspot) were small blogs, usually run by individuals with niche interests. Just by spot-checking the websites, they seemed to have many links and images, many of which probably link to nodes with higher authority scores. We spot-checked the top 14 websites, and we noticed that 5 of them were dead, 1 was a website for a small company that made vehicle-driving simulators, and the others were all blogs.Out of curiosity, we decided to check the hub scores of the most "authoritative" websites. The only websites that had high hub scores were Wikipedia/Wikimedia and Wordpress. Shockingly, Facebook had a hub score of 0 (due to how Python floating-points work, its hub score is below $10^{-50}$!). 100%-CC-licensed websites with high hub/authority scoresThe results were extremely different from when we ran HITS on the entire graph. We believe this is because the larger websites have a more diverse set of sources, and thus are likely to use licenses other than CC licenses. Namely, running HITS on the entire graph produced mostly food or lifestyle blogs as the highest hub scoring domains, likely due to the fact that those blogs used a lot of original photos. On the flip side, running HITS on this graph gave almost all relatively-unknown websites.A lot of the high hub scoring, 100% CC licensed websites were in non-English languages. Out of the top 10 that we spot-checked, 5 were for news sources. There was also 1 shopping site, 2 blogs, and 2 professional development sites. Similarly, news sites may have high hub scores because they're likely to put CC licenses on media that they produce, and link to other reputable sources in their news articles.We believe that the websites with the highest authority score in the 100% CC-licensed graph are likely to be the experts in their respective domains, which means they might indicate small communities! These are worth investigating further. k-cores and degree analysis ###Code start = time.perf_counter() k_cores = nx.k_core(g) end = time.perf_counter() print((end-start)/60, "minutes") gt_graph = cc_graph_ops.create_graph_from_file('fdg_input_file.json') test_subgraph = cc_graph_ops.restrict_graph_by_license(gt_graph, "('by-nc', '3.0')") start = time.perf_counter() k_cores = nx.k_core(test_subgraph) end = time.perf_counter() print((end-start)/60, "minutes") # nx.draw_kamada_kawai(k_cores) # turn this into graph-tool graph test_subgraph_gt = cc_graph_ops.nx2gt(test_subgraph) # graph_tool is faster for nitty-gritty graph manipulations def d_cores(k, l, gt_digraph): """Takes integers k and l and graph-tools directed graph gt_digraph and returns its (k,l) d-cores""" # remove all vertices that have indegree < k or outdegree < l f = gt_digraph.copy() to_remove = [] for node in f.vertices(): if f.get_in_degrees([node]) < k or f.get_out_degrees([node]) < l: to_remove.append(node) f.remove_vertex(to_remove) # do 1 more pass to remove isolated vertices lone_nodes = [] for node in f.vertices(): if f.get_in_degrees([node]) == 0 and f.get_out_degrees([node]) == 0: lone_nodes.append(node) f.remove_vertex(lone_nodes, fast=True) return f print("original graph has", len(test_subgraph_gt.get_vertices()), "vertices and", len(test_subgraph_gt.get_edges()), "edges") d = d_cores(2,2, test_subgraph_gt) print("new graph has", len(d.get_vertices()), "vertices and", len(d.get_edges()), "edges") gt.graph_draw(d) # for node in topo.extract_largest_component(d, directed=False).vertices(): # print(d.vertex_properties['provider_domain'][node]) # print(len(list(topo.extract_largest_component(d, directed=False).vertices()))) comp, hist = topo.label_components(d) plt.hist([hist[i] for i in comp]) # make a set of component labels, and grab the nodes with the same component label # the above code currently doesn't work -- the array of vertices is strangely... empty? def summary_np_array(graph): max_indegree = max(list(graph.get_in_degrees(list(graph.vertices())))) max_outdegree = max(list(graph.get_out_degrees(list(graph.vertices())))) edge_dist = np.empty(shape=(max_indegree, max_outdegree), dtype='int32') vertex_dist = np.empty(shape=(max_indegree, max_outdegree), dtype='int32') for i in range(max_indegree): for j in range(max_outdegree): d = d_cores(i, j, graph) # print("graph has", len(d.get_vertices()), "vertices and", len(d.get_edges()), "edges") vertex_dist[i][j] = len(d.get_vertices()) edge_dist[i][j] = len(d.get_edges()) return vertex_dist, edge_dist start = time.time() print(summary_np_array(test_subgraph_gt)[1]) end = time.time() print(end - start, "seconds") # print(summary_np_array(test_subgraph_gt)[1]) node_indegrees = np.empty(1) node_outdegrees = np.empty(1) start = time.perf_counter() for node in g.nodes: node_indegrees = np.append(node_indegrees, g.in_degree(node)) node_outdegrees = np.append(node_indegrees, g.out_degree(node)) end = time.perf_counter() print(end - start, "seconds") # Indegree distribution plt.hist([np.log(i) for i in node_indegrees if i != 0], bins=35) # Outdegree distribution plt.hist([np.log(i) for i in node_outdegrees if i != 0],bins=35) ###Output _____no_output_____
docs/source/notebooks/00-parameterized.ipynb
###Markdown ![seQuencing logo](../images/sequencing-logo.svg) Parameters Short introduction to `attrs``sequencing` relies on the [attrs package](https://www.attrs.org/en/stable/index.html) for defining and instantiating classes with many parameters. `attrs` allows you to define classes with minimal boilerplate by automating the creation of important methods like `__init__`, `__repr__`, `__str__`, and comparison methods like `__eq__`. For full details on `attrs`, check out the [documentation](https://www.attrs.org/en/stable/overview.html).A class defined using `attrs` must be decorated with `@attr.s` (or `attr.attrs`). In place of an explicit `__init__` method, attributes of a class decorated with `@attr.s` can have instance attributes defined at the class level using `attr.ib()` (or `attr.attrib()`). Adding attributes to a class using `attr.ib()` has many advantages:- Attributes may be required or have a default value- Defaults can be defined either with a specific value or with a "factory" function that generates a default value when called- Attributes can have a `type` associated with them, or a `converter` function that converts the user-specified value into the desired format- Attributes can have `validators` which raise an error if an invalid value is providedFor the full list of options see [attr.ib()](https://www.attrs.org/en/stable/api.htmlattr.ib) `Parameterized``sequencing` builds on the functionality of `attrs` using a class called [Parameterized](../api/classes.rstParameterized). `Parameterized` objects must have a `name` and can have any number of `parameters`, which can be created using the functions defined in `sequencing.parameters`, or by using `attrs` directly via [attr.ib()](https://www.attrs.org/en/stable/api.htmlattr.ib).**Parameterized offers the following convenient features:**- Recursive `get()` and `set()` methods for getting and setting attributes of nested `Parameterized` objects.- Methods for converting a `Parameterized` object into a Python `dict`, and creating a new `Parameterized` object from a `dict`.- Methods for serializing a `Parameterized` object to `json` and creating a new `Parameterized` object from `json`.**Notes:**- Subclasses of `Parameterized` must be decorated with `@attr.s`- Subclasses of `Parameterized` can define an `initialize()` method, which takes no arguments. It will be called on instantiation after the `attrs`-generated `__init__` method (see [__attrs_post_init__](https://www.attrs.org/en/stable/examples.html?highlight=__attrsother-goodies) for more details). If defined, the subclass' `initialize()` method should always call `super().initialize()` to ensure that the superclass is correctly initialized. ###Code import json from pprint import pprint import attr from sequencing.parameters import ( Parameterized, StringParameter, BoolParameter, ListParameter, DictParameter, IntParameter, FloatParameter, NanosecondParameter, GigahertzParameter, RadianParameter, ) @attr.s class Engine(Parameterized): cylinders = IntParameter(4) displacement = FloatParameter(2, unit='liter') current_rpm = FloatParameter(0, unit='rpm') turbo_charged = BoolParameter(False) @attr.s class Transmission(Parameterized): manual = BoolParameter(False) num_gears = IntParameter(5) current_gear = IntParameter(1) def initialize(self): super().initialize() # Add private attributes in initialize() self._is_broken = True @property def has_clutch(self): return self.manual def shift_to(self, gear): if gear not in range(self.num_gears+1): # 0 is reverse raise ValueError(f'Cannot shift into gear {gear}') if abs(gear - self.current_gear) > 1: raise ValueError('Cannot skip gears') self.current_gear = gear @attr.s class Car(Parameterized): VALID_CHASSIS = ['sedan', 'coupe', 'hatchback', 'suv'] chassis = StringParameter('sedan', validator=attr.validators.in_(VALID_CHASSIS)) num_doors = IntParameter(4, validator=attr.validators.in_([2,4])) miles_per_gallon = FloatParameter(30, unit='mpg') engine = attr.ib(factory=lambda: Engine('engine')) transmission = attr.ib(factory=lambda: Transmission('transmission')) car = Car('car') # All parameters other than name are optional because they have defaults print(car) pprint(car.as_dict()) car2 = Car.from_dict(car.as_dict()) print(car == car2) car.get('engine.displacement') == {'engine.displacement': car.engine.displacement} car.set_param('engine.displacement', 2.5) car.get_param('engine.displacement') == car.engine.displacement == 2.5 car.set(engine__displacement=3.0) print(car.get('engine.displacement')) print(f'RPM: {car.engine.current_rpm}, gear: {car.transmission.current_gear}') with car.temporarily_set(engine__current_rpm=4000, transmission__current_gear=3): print(f'RPM: {car.engine.current_rpm}, gear: {car.transmission.current_gear}') print(f'RPM: {car.engine.current_rpm}, gear: {car.transmission.current_gear}') try: convertible = Car('convertible', chassis='convertible') except ValueError as e: print('ValueError:', e) try: three_door = Car('three_door', num_doors=3) except ValueError as e: print('ValueError:', e) from qutip.ipynbtools import version_table version_table() ###Output _____no_output_____
workshops/ODSC_timeline_generator.ipynb
###Markdown **State-of-the-art NLP Made Easy with [AdaptNLP](https://www.github.com/novetta/adaptnlp)** 1. Today's Objective: Generate Enriched Data from Unstructured Text *Prerequisite: Install AdaptNLP* ###Code !pip install adaptnlp ###Output Requirement already satisfied: adaptnlp in /home/andrew/Documents/github/adaptnlp Requirement already satisfied: black in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: click in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: flair==0.4.5 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: flake8 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: html_text in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: jupyter in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: jupyterlab in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: transformers==2.8.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from adaptnlp) Requirement already satisfied: attrs>=18.1.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from black->adaptnlp) Requirement already satisfied: toml>=0.9.4 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from black->adaptnlp) Requirement already satisfied: pathspec<1,>=0.6 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from black->adaptnlp) Requirement already satisfied: typed-ast>=1.4.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from black->adaptnlp) Requirement already satisfied: appdirs in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from black->adaptnlp) Requirement already satisfied: regex in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from black->adaptnlp) Requirement already satisfied: urllib3<1.25,>=1.20 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: tabulate in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: sqlitedict>=1.6.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: pytest>=5.3.2 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: torch>=1.1.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: gensim>=3.4.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: scikit-learn>=0.21.3 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: python-dateutil>=2.6.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: matplotlib>=2.2.3 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: hyperopt>=0.1.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: deprecated>=1.2.4 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: mpld3==0.3 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: tqdm>=4.26.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: segtok>=1.5.7 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: langdetect in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: bpemb>=0.2.9 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flair==0.4.5->adaptnlp) Requirement already satisfied: entrypoints<0.4.0,>=0.3.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flake8->adaptnlp) Requirement already satisfied: pyflakes<2.2.0,>=2.1.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flake8->adaptnlp) Requirement already satisfied: pycodestyle<2.6.0,>=2.5.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flake8->adaptnlp) Requirement already satisfied: mccabe<0.7.0,>=0.6.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from flake8->adaptnlp) Requirement already satisfied: lxml in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from html_text->adaptnlp) Requirement already satisfied: notebook in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter->adaptnlp) Requirement already satisfied: jupyter-console in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter->adaptnlp) Requirement already satisfied: qtconsole in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter->adaptnlp) Requirement already satisfied: nbconvert in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter->adaptnlp) Requirement already satisfied: ipykernel in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter->adaptnlp) Requirement already satisfied: ipywidgets in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter->adaptnlp) Requirement already satisfied: jupyterlab-server~=1.0.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyterlab->adaptnlp) Requirement already satisfied: jinja2>=2.10 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyterlab->adaptnlp) Requirement already satisfied: tornado!=6.0.0,!=6.0.1,!=6.0.2 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyterlab->adaptnlp) Requirement already satisfied: numpy in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: sacremoses in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: boto3 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: requests in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: sentencepiece in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: dataclasses; python_version < "3.7" in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: filelock in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: tokenizers==0.5.2 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from transformers==2.8.0->adaptnlp) Requirement already satisfied: importlib-metadata>=0.12; python_version < "3.8" in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from pytest>=5.3.2->flair==0.4.5->adaptnlp) Requirement already satisfied: packaging in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from pytest>=5.3.2->flair==0.4.5->adaptnlp) Requirement already satisfied: pluggy<1.0,>=0.12 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from pytest>=5.3.2->flair==0.4.5->adaptnlp) Requirement already satisfied: more-itertools>=4.0.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from pytest>=5.3.2->flair==0.4.5->adaptnlp) Requirement already satisfied: wcwidth in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from pytest>=5.3.2->flair==0.4.5->adaptnlp) Requirement already satisfied: py>=1.5.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from pytest>=5.3.2->flair==0.4.5->adaptnlp) Requirement already satisfied: six>=1.5.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from gensim>=3.4.0->flair==0.4.5->adaptnlp) Requirement already satisfied: smart-open>=1.8.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from gensim>=3.4.0->flair==0.4.5->adaptnlp) Requirement already satisfied: scipy>=0.18.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from gensim>=3.4.0->flair==0.4.5->adaptnlp) Requirement already satisfied: joblib>=0.11 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from scikit-learn>=0.21.3->flair==0.4.5->adaptnlp) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from matplotlib>=2.2.3->flair==0.4.5->adaptnlp) Requirement already satisfied: cycler>=0.10 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from matplotlib>=2.2.3->flair==0.4.5->adaptnlp) Requirement already satisfied: kiwisolver>=1.0.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from matplotlib>=2.2.3->flair==0.4.5->adaptnlp) Requirement already satisfied: cloudpickle in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from hyperopt>=0.1.1->flair==0.4.5->adaptnlp) Requirement already satisfied: networkx==2.2 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from hyperopt>=0.1.1->flair==0.4.5->adaptnlp) Requirement already satisfied: future in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from hyperopt>=0.1.1->flair==0.4.5->adaptnlp) Requirement already satisfied: wrapt<2,>=1.10 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from deprecated>=1.2.4->flair==0.4.5->adaptnlp) Requirement already satisfied: nbformat in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: ipython-genutils in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: prometheus-client in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: Send2Trash in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: terminado>=0.8.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: traitlets>=4.2.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: pyzmq>=17 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: jupyter-client>=5.3.4 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: jupyter-core>=4.6.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from notebook->jupyter->adaptnlp) Requirement already satisfied: ipython in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter-console->jupyter->adaptnlp) Requirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter-console->jupyter->adaptnlp) Requirement already satisfied: pygments in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyter-console->jupyter->adaptnlp) Requirement already satisfied: qtpy in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from qtconsole->jupyter->adaptnlp) Requirement already satisfied: defusedxml in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from nbconvert->jupyter->adaptnlp) Requirement already satisfied: testpath in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from nbconvert->jupyter->adaptnlp) Requirement already satisfied: bleach in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from nbconvert->jupyter->adaptnlp) Requirement already satisfied: mistune<2,>=0.8.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from nbconvert->jupyter->adaptnlp) Requirement already satisfied: pandocfilters>=1.4.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from nbconvert->jupyter->adaptnlp) Requirement already satisfied: widgetsnbextension~=3.5.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from ipywidgets->jupyter->adaptnlp) Requirement already satisfied: json5 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyterlab-server~=1.0.0->jupyterlab->adaptnlp) Requirement already satisfied: jsonschema>=3.0.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jupyterlab-server~=1.0.0->jupyterlab->adaptnlp) Requirement already satisfied: MarkupSafe>=0.23 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jinja2>=2.10->jupyterlab->adaptnlp) Requirement already satisfied: botocore<1.16.0,>=1.15.2 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from boto3->transformers==2.8.0->adaptnlp) Requirement already satisfied: s3transfer<0.4.0,>=0.3.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from boto3->transformers==2.8.0->adaptnlp) Requirement already satisfied: jmespath<1.0.0,>=0.7.1 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from boto3->transformers==2.8.0->adaptnlp) Requirement already satisfied: certifi>=2017.4.17 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from requests->transformers==2.8.0->adaptnlp) Requirement already satisfied: idna<2.9,>=2.5 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from requests->transformers==2.8.0->adaptnlp) Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from requests->transformers==2.8.0->adaptnlp) Requirement already satisfied: zipp>=0.5 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from importlib-metadata>=0.12; python_version < "3.8"->pytest>=5.3.2->flair==0.4.5->adaptnlp) Requirement already satisfied: boto>=2.32 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from smart-open>=1.8.1->gensim>=3.4.0->flair==0.4.5->adaptnlp) Requirement already satisfied: setuptools in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from kiwisolver>=1.0.1->matplotlib>=2.2.3->flair==0.4.5->adaptnlp) Requirement already satisfied: decorator>=4.3.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from networkx==2.2->hyperopt>=0.1.1->flair==0.4.5->adaptnlp) Requirement already satisfied: ptyprocess; os_name != "nt" in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from terminado>=0.8.1->notebook->jupyter->adaptnlp) Requirement already satisfied: pexpect; sys_platform != "win32" in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from ipython->jupyter-console->jupyter->adaptnlp) Requirement already satisfied: jedi>=0.10 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from ipython->jupyter-console->jupyter->adaptnlp) Requirement already satisfied: pickleshare in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from ipython->jupyter-console->jupyter->adaptnlp) Requirement already satisfied: backcall in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from ipython->jupyter-console->jupyter->adaptnlp) Requirement already satisfied: webencodings in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from bleach->nbconvert->jupyter->adaptnlp) Requirement already satisfied: pyrsistent>=0.14.0 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jsonschema>=3.0.1->jupyterlab-server~=1.0.0->jupyterlab->adaptnlp) Requirement already satisfied: docutils<0.16,>=0.10 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from botocore<1.16.0,>=1.15.2->boto3->transformers==2.8.0->adaptnlp) Requirement already satisfied: parso>=0.5.2 in /home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages (from jedi>=0.10->ipython->jupyter-console->jupyter->adaptnlp) ###Markdown *Prerequisite: Show Unstructured Text Example* ###Code from IPython.core.display import HTML, display example_text = """ The history (and prehistory) of the United States, started with the arrival of Native Americans before 15,000 B.C. Numerous indigenous cultures formed, and many disappeared before 1500. The arrival of Christopher Columbus in the year 1492 started the European colonization of the Americas. Most colonies were formed after 1600, and the early records and writings of John Winthrop make the United States the first nation whose most distant origins are fully recorded.[1] By the 1760s, the thirteen British colonies contained 2.5 million people along the Atlantic Coast east of the Appalachian Mountains. After defeating France, the British government imposed a series of taxes, including the Stamp Act of 1765, rejecting the colonists' constitutional argument that new taxes needed their approval. Resistance to these taxes, especially the Boston Tea Party in 1773, led to Parliament issuing punitive laws designed to end self-government in Massachusetts. Armed conflict began in 1775. In 1776, in Philadelphia, the Second Continental Congress declared the independence of the colonies as the United States. Led by General George Washington, it won the Revolutionary War with large support from France. The peace treaty of 1783 gave the land east of the Mississippi River (except Canada and Florida) to the new nation. The Articles of Confederation established a central government, but it was ineffectual at providing stability as it could not collect taxes and had no executive officer. A convention in 1787 wrote a new Constitution that was adopted in 1789. In 1791, a Bill of Rights was added to guarantee inalienable rights. With Washington as the first president and Alexander Hamilton his chief adviser, a strong central government was created. Purchase of the Louisiana Territory from France in 1803 doubled the size of the United States. A second and final war with Britain was fought in 1812, which solidified national pride. Encouraged by the notion of manifest destiny, U.S. territory expanded all the way to the Pacific Coast. While the United States was large in terms of area, by 1790 its population was only 4 million. It grew rapidly, however, reaching 7.2 million in 1810, 32 million in 1860, 76 million in 1900, 132 million in 1940, and 321 million in 2015. Economic growth in terms of overall GDP was even greater. Compared to European powers, though, the nation's military strength was relatively limited in peacetime before 1940. Westward expansion was driven by a quest for inexpensive land for yeoman farmers and slave owners. The expansion of slavery was increasingly controversial and fueled political and constitutional battles, which were resolved by compromises. Slavery was abolished in all states north of the Mason–Dixon line by 1804, but the South continued to profit from the institution, mostly from the production of cotton. Republican Abraham Lincoln was elected president in 1860 on a platform of halting the expansion of slavery. Seven Southern slave states rebelled and created the foundation of the Confederacy. Its attack of Fort Sumter against the Union forces there in 1861 started the Civil War. Defeat of the Confederates in 1865 led to the impoverishment of the South and the abolition of slavery. In the Reconstruction era following the war, legal and voting rights were extended to freed slaves. The national government emerged much stronger, and because of the Fourteenth Amendment in 1868, it gained explicit duty to protect individual rights. However, when white Democrats regained their power in the South in 1877, often by paramilitary suppression of voting, they passed Jim Crow laws to maintain white supremacy, as well as new disenfranchising state constitutions that prevented most African Americans and many poor whites from voting. This continued until the gains of the civil rights movement in the 1960s and the passage of federal legislation to enforce uniform constitutional rights for all citizens. The United States became the world's leading industrial power at the turn of the 20th century, due to an outburst of entrepreneurship and industrialization in the Northeast and Midwest and the arrival of millions of immigrant workers and farmers from Europe. A national railroad network was completed and large-scale mines and factories were established. Mass dissatisfaction with corruption, inefficiency, and traditional politics stimulated the Progressive movement, from the 1890s to 1920s. This era led to many reforms, including the Sixteenth to Nineteenth constitutional amendments, which brought the federal income tax, direct election of Senators, prohibition, and women's suffrage. Initially neutral during World War I, the United States declared war on Germany in 1917 and funded the Allied victory the following year. Women obtained the right to vote in 1920, with Native Americans obtaining citizenship and the right to vote in 1924. After a prosperous decade in the 1920s, the Wall Street Crash of 1929 marked the onset of the decade-long worldwide Great Depression. Democratic President Franklin D. Roosevelt ended the Republican dominance of the White House and implemented his New Deal programs, which included relief for the unemployed, support for farmers, Social Security and a minimum wage. The New Deal defined modern American liberalism. After the Japanese attack on Pearl Harbor in 1941, the United States entered World War II and financed the Allied war effort and helped defeat Nazi Germany in the European theater. Its involvement culminated in using newly-invented nuclear weapons on two Japanese cities to defeat Imperial Japan in the Pacific theater. The United States and the Soviet Union emerged as rival superpowers in the aftermath of World War II. During the Cold War, the two countries confronted each other indirectly in the arms race, the Space Race, proxy wars, and propaganda campaigns. The goal of the United States in this was to stop the spread of communism. In the 1960s, in large part due to the strength of the civil rights movement, another wave of social reforms was enacted which enforced the constitutional rights of voting and freedom of movement to African Americans and other racial minorities. The Cold War ended when the Soviet Union was officially dissolved in 1991, leaving the United States as the world's only superpower. After the Cold War, the United States's foreign policy has focused on modern conflicts in the Middle East. The beginning of the 21st century saw the September 11 attacks carried out by Al-Qaeda in 2001, which was later followed by wars in Afghanistan and Iraq. In 2007, the United States entered its worst economic crisis since the Great Depression, which was followed by slower-than-usual rates of economic growth during the early 2010s. Economic growth and unemployment rates recovered by the late 2010s, however new economic disruption began in 2020 due to the 2019-20 coronavirus pandemic. """ example_text_html = f""" <!DOCTYPE html> <html> <head> <meta name="viewport" content="width=device-width, initial-scale=1"> <style> .collapsible {{ background-color: #777; color: white; cursor: pointer; padding: 18px; width: 100%; border: none; text-align: left; outline: none; font-size: 15px; }} .active, .collapsible:hover {{ background-color: #555; }} .content {{ padding: 0 18px; display: none; overflow: hidden; background-color: #f1f1f1; }} </style> </head> <body> <button type="button" class="collapsible">Example Unstructured Text</button> <div class="content"> <p>{example_text}</p> </div> <script> var coll = document.getElementsByClassName("collapsible"); var i; for (i = 0; i < coll.length; i++) {{ coll[i].addEventListener("click", function() {{ this.classList.toggle("active"); var content = this.nextElementSibling; if (content.style.display === "block") {{ content.style.display = "none"; }} else {{ content.style.display = "block"; }} }}); }} </script> </body> </html> """ display(HTML(example_text_html)) ###Output _____no_output_____ ###Markdown *Prerequisite: Download Models and Generate Final Timeline* ###Code from adaptnlp import ( EasyTokenTagger, EasySequenceClassifier, EasyQuestionAnswering, EasySummarizer, EasyTranslator, EasyDocumentEmbeddings, ) from dateutil.parser import parse import matplotlib.pyplot as plt import numpy as np import matplotlib.dates as mdates import pprint # Summary summarizer = EasySummarizer() summary = summarizer.summarize(text=example_text, model_name_or_path="t5-base", mini_batch_size=1, num_beams=4, min_length=100, max_length=200) summary = summary[0] # Translation of Summary translator = EasyTranslator() translated_summary = translator.translate(text=summary.split(" . "), model_name_or_path="t5-base", t5_prefix="translate English to French", mini_batch_size=3, min_length=0, max_length=200) translated_summary = " . ".join(translated_summary) # NER nl = "\n" # For f-string formatting tagger = EasyTokenTagger() sentences = tagger.tag_text(text=example_text, model_name_or_path="ner-ontonotes-fast", mini_batch_size=32) ner_dict = sentences[0].to_dict("ner") ner_dict = [f"<b>{i+1}.</b> {pprint.pformat(ent).replace(nl,'<br>')}" for i, ent in enumerate(ner_dict["entities"][:6])] ner_html = "<br>" + "<br>".join(ner_dict) # QA qa = EasyQuestionAnswering() _, top_n = qa.predict_qa(query="What happened in 1776?", context=example_text, model_name_or_path="bert-large-cased-whole-word-masking-finetuned-squad", n_best_size=5, mini_batch_size=1) top_n = [f"<b>{i+1}.</b> {pprint.pformat(dict(ans)).replace(nl,'<br>')}" for i, ans in enumerate(top_n)] top_n_html = "<br>" + "<br>".join(top_n) # Timeline dates = [] for span in sentences[0].get_spans("ner"): if span.tag == "DATE": dates.append(span.text) dates = sorted(dates) dates_map = {} for d in dates: try: dates_map[d] = parse(d, fuzzy=True) except: pass answers_map = {} answer, _ = qa.predict_qa(query=[f"What happened in {t}" for t in dates_map.keys()], context = [example_text]*len(dates_map.keys()), model_name_or_path="bert-large-cased-whole-word-masking-finetuned-squad", n_best_size=7, mini_batch_size=10) def generate_timeline(names_mat: list, dates_mat: list): # Choose levels levels = np.tile([-30, 30, -20, 20, -12, 12, -7, 7, -1, 1], int(np.ceil(len(dates_mat)/10)))[:len(dates_mat)] # Create figure and plot a stem plot with the date fig, ax = plt.subplots(figsize=(20, 6), constrained_layout=True) ax.set_title("Timeline of Significant Events in U.S. History", fontsize=30, fontweight='bold') markerline, stemline, baseline = ax.stem(dates_mat, levels, linefmt="C3-", basefmt="k-", use_line_collection=True) plt.setp(markerline, mec="k", mfc="w", zorder=3) # Shift the markers to the baseline by replacing the y-data by zeros. markerline.set_ydata(np.zeros(len(dates_mat))) # Annotate lines vert = np.array(['top', 'bottom'])[(levels > 0).astype(int)] for d, l, r, va in zip(dates_mat, levels, names_mat, vert): ax.annotate(r, xy=(d, l), xytext=(-3, np.sign(l)*3), textcoords="offset points", va=va, ha="right") # Format xaxis with AutoDateLocator ax.get_xaxis().set_major_locator(mdates.AutoDateLocator()) ax.get_xaxis().set_major_formatter(mdates.DateFormatter("%b %Y")) plt.setp(ax.get_xticklabels(), rotation=30, ha="right") # Remove y axis and spines ax.get_yaxis().set_visible(False) for spine in ["left", "top", "right"]: ax.spines[spine].set_visible(False) ax.margins(y=0.1) plt.show() names_mat = list(answer.values()) [:30] dates_mat = list(dates_map.values()) [:30] generate_timeline(names_mat=names_mat, dates_mat=dates_mat) html = f""" <!DOCTYPE html> <html> <head> <style> .item0 {{ grid-area: timeline; }} .item1 {{ grid-area: header; }} .item2 {{ grid-area: menu; }} .item3 {{ grid-area: main; }} .item4 {{ grid-area: right; }} .grid-container {{ display: grid; grid-template: 'timeline timeline timeline timeline timeline timeline' 'header header main main right right' 'menu menu main main right right'; grid-gap: 5px; background-color: #777; padding: 5px; }} .grid-container > div {{ background-color: rgba(255, 255, 255, .9); text-align: center; padding: 20px; font-size: 12px; }} </style> </head> <body> <div class="grid-container"> <div class="item0"> <h2>Extracted Metadata using AdaptNLP</h2> </div> <div class="item1"> <h3>Summary: </h3> <p style="text-align: center">{summary}</p> </div> <div class="item2"> <h3>Translated French Summary: </h3> <p style="text-align: center">{translated_summary}</p> </div> <div class="item3"> <h3>Extracted Entities: </h3> <p style="text-align: left">{ner_html}</p> </div> <div class="item4"> <h3>Top Answers to the Question: <br><em>"What happened in 1776?"</em></h3> <p style="text-align: left">{top_n_html}</p> </div> </div> </body> </html> """ display(HTML(html)) ###Output PyTorch version 1.6.0 available. ###Markdown 2. Run NLP Tasks: Summarization, Translation, Named Entity Recognition (NER), and Question Answering (QA) [Documentation and Guides](http://novetta.github.io/adaptnlp) *Import "Easy" NLP Task Modules with AdaptNLP* ###Code from adaptnlp import EasySummarizer, EasyTranslator, EasyTokenTagger, EasyQuestionAnswering ###Output _____no_output_____ ###Markdown *Set Example Text* ###Code text = """ The history (and prehistory) of the United States, started with the arrival of Native Americans before 15,000 B.C. Numerous indigenous cultures formed, and many disappeared before 1500. The arrival of Christopher Columbus in the year 1492 started the European colonization of the Americas. Most colonies were formed after 1600, and the early records and writings of John Winthrop make the United States the first nation whose most distant origins are fully recorded.[1] By the 1760s, the thirteen British colonies contained 2.5 million people along the Atlantic Coast east of the Appalachian Mountains. After defeating France, the British government imposed a series of taxes, including the Stamp Act of 1765, rejecting the colonists' constitutional argument that new taxes needed their approval. Resistance to these taxes, especially the Boston Tea Party in 1773, led to Parliament issuing punitive laws designed to end self-government in Massachusetts. Armed conflict began in 1775. In 1776, in Philadelphia, the Second Continental Congress declared the independence of the colonies as the United States. Led by General George Washington, it won the Revolutionary War with large support from France. The peace treaty of 1783 gave the land east of the Mississippi River (except Canada and Florida) to the new nation. The Articles of Confederation established a central government, but it was ineffectual at providing stability as it could not collect taxes and had no executive officer. A convention in 1787 wrote a new Constitution that was adopted in 1789. In 1791, a Bill of Rights was added to guarantee inalienable rights. With Washington as the first president and Alexander Hamilton his chief adviser, a strong central government was created. Purchase of the Louisiana Territory from France in 1803 doubled the size of the United States. A second and final war with Britain was fought in 1812, which solidified national pride. Encouraged by the notion of manifest destiny, U.S. territory expanded all the way to the Pacific Coast. While the United States was large in terms of area, by 1790 its population was only 4 million. It grew rapidly, however, reaching 7.2 million in 1810, 32 million in 1860, 76 million in 1900, 132 million in 1940, and 321 million in 2015. Economic growth in terms of overall GDP was even greater. Compared to European powers, though, the nation's military strength was relatively limited in peacetime before 1940. Westward expansion was driven by a quest for inexpensive land for yeoman farmers and slave owners. The expansion of slavery was increasingly controversial and fueled political and constitutional battles, which were resolved by compromises. Slavery was abolished in all states north of the Mason–Dixon line by 1804, but the South continued to profit from the institution, mostly from the production of cotton. Republican Abraham Lincoln was elected president in 1860 on a platform of halting the expansion of slavery. Seven Southern slave states rebelled and created the foundation of the Confederacy. Its attack of Fort Sumter against the Union forces there in 1861 started the Civil War. Defeat of the Confederates in 1865 led to the impoverishment of the South and the abolition of slavery. In the Reconstruction era following the war, legal and voting rights were extended to freed slaves. The national government emerged much stronger, and because of the Fourteenth Amendment in 1868, it gained explicit duty to protect individual rights. However, when white Democrats regained their power in the South in 1877, often by paramilitary suppression of voting, they passed Jim Crow laws to maintain white supremacy, as well as new disenfranchising state constitutions that prevented most African Americans and many poor whites from voting. This continued until the gains of the civil rights movement in the 1960s and the passage of federal legislation to enforce uniform constitutional rights for all citizens. The United States became the world's leading industrial power at the turn of the 20th century, due to an outburst of entrepreneurship and industrialization in the Northeast and Midwest and the arrival of millions of immigrant workers and farmers from Europe. A national railroad network was completed and large-scale mines and factories were established. Mass dissatisfaction with corruption, inefficiency, and traditional politics stimulated the Progressive movement, from the 1890s to 1920s. This era led to many reforms, including the Sixteenth to Nineteenth constitutional amendments, which brought the federal income tax, direct election of Senators, prohibition, and women's suffrage. Initially neutral during World War I, the United States declared war on Germany in 1917 and funded the Allied victory the following year. Women obtained the right to vote in 1920, with Native Americans obtaining citizenship and the right to vote in 1924. After a prosperous decade in the 1920s, the Wall Street Crash of 1929 marked the onset of the decade-long worldwide Great Depression. Democratic President Franklin D. Roosevelt ended the Republican dominance of the White House and implemented his New Deal programs, which included relief for the unemployed, support for farmers, Social Security and a minimum wage. The New Deal defined modern American liberalism. After the Japanese attack on Pearl Harbor in 1941, the United States entered World War II and financed the Allied war effort and helped defeat Nazi Germany in the European theater. Its involvement culminated in using newly-invented nuclear weapons on two Japanese cities to defeat Imperial Japan in the Pacific theater. The United States and the Soviet Union emerged as rival superpowers in the aftermath of World War II. During the Cold War, the two countries confronted each other indirectly in the arms race, the Space Race, proxy wars, and propaganda campaigns. The goal of the United States in this was to stop the spread of communism. In the 1960s, in large part due to the strength of the civil rights movement, another wave of social reforms was enacted which enforced the constitutional rights of voting and freedom of movement to African Americans and other racial minorities. The Cold War ended when the Soviet Union was officially dissolved in 1991, leaving the United States as the world's only superpower. After the Cold War, the United States's foreign policy has focused on modern conflicts in the Middle East. The beginning of the 21st century saw the September 11 attacks carried out by Al-Qaeda in 2001, which was later followed by wars in Afghanistan and Iraq. In 2007, the United States entered its worst economic crisis since the Great Depression, which was followed by slower-than-usual rates of economic growth during the early 2010s. Economic growth and unemployment rates recovered by the late 2010s, however new economic disruption began in 2020 due to the 2019-20 coronavirus pandemic. """ ###Output _____no_output_____ ###Markdown *Summarize* ###Code summarizer = EasySummarizer() summary = summarizer.summarize( text = text, model_name_or_path = "t5-base", mini_batch_size = 1, num_beams = 4, min_length = 100, max_length = 200, ) summary[0] summary = summary[0].split(" . ") summary ###Output _____no_output_____ ###Markdown *Translate* ###Code translator = EasyTranslator() translated_summary = translator.translate( text = summary, model_name_or_path = "t5-base", t5_prefix = "translate English to French", mini_batch_size = 3, num_beams = 1, min_length = 0, max_length = 200, ) translated_summary ###Output _____no_output_____ ###Markdown *Named Entity Recognition (NER)* ###Code tagger = EasyTokenTagger() sentences = tagger.tag_text( text = text, model_name_or_path = "ner-ontonotes-fast", mini_batch_size = 32, ) sentences[0].get_spans("ner") ###Output _____no_output_____ ###Markdown *Question Answering* ###Code qa = EasyQuestionAnswering() answer, top_n = qa.predict_qa( context = text, query = "What happened in 1776?", model_name_or_path = "bert-large-cased-whole-word-masking-finetuned-squad", mini_batch_size = 1, n_best_size = 5, ) answer, top_n ###Output _____no_output_____ ###Markdown 3. Generate the Timeline: NER and QA *Run NER Task to Extract "Date" Tagged Entities* ###Code sentences = tagger.tag_text( text = text, model_name_or_path = "ner-ontonotes-fast", mini_batch_size = 1, ) spans = sentences[0].get_spans("ner") spans dates = [] for s in spans: if s.tag == "DATE": dates.append(s.text) dates = sorted(dates) dates from dateutil.parser import parse dates_map = {} for d in dates: try: dates_map[d] = parse(d, fuzzy=True) except: pass dates_map ###Output _____no_output_____ ###Markdown *Run QA Task to Extract Information on "What happened in..." Extracted Dates* ###Code query_texts = [f"What happened in {d}?" for d in dates_map.keys()] context_texts = [text]*len(query_texts) query_texts answers, _ = qa.predict_qa( context = context_texts, query = query_texts, model_name_or_path = "bert-large-cased-whole-word-masking-finetuned-squad", n_best_size = 7, mini_batch_size = 10 ) answers ###Output convert squad examples to features: 0%| | 0/34 [00:00<?, ?it/s]/home/andrew/Documents/github/adaptnlp/venv-adaptnlp/lib/python3.6/site-packages/transformers/tokenization_utils_base.py:1321: FutureWarning: The `max_len` attribute has been deprecated and will be removed in a future version, use `model_max_length` instead. FutureWarning, convert squad examples to features: 100%|██████████| 34/34 [00:02<00:00, 13.73it/s] add example index and unique id: 100%|██████████| 34/34 [00:00<00:00, 168964.85it/s] Predicting answer: 100%|██████████| 28/28 [00:13<00:00, 2.07it/s] ###Markdown *Generate Text Timeline* ###Code for d, a in zip(dates_map.keys(), answers.values()): print(d, a) ###Output 1765 the Stamp Act of 1765 1773 Boston Tea Party 1775 Armed conflict 1776 the Second Continental Congress declared the independence of the colonies 1783 The peace treaty of 1783 1787 A convention in 1787 wrote a new Constitution 1789 A convention in 1787 wrote a new Constitution 1790 by 1790 its population was only 4 million. 1791 a Bill of Rights was added 1803 Purchase of the Louisiana Territory from France 1804 Slavery was abolished 1810 It grew rapidly 1812 A second and final war with Britain 1860 32 million in 1860 1861 the Civil War 1865 Defeat of the Confederates 1868 the Fourteenth Amendment 1877 white Democrats regained their power in the South 1900 76 million in 1900 1917 the United States declared war on Germany 1920 Women obtained the right to vote 1924 Native Americans obtaining citizenship and the right to vote 1929 Wall Street Crash 1940 132 million 1941 Japanese attack on Pearl Harbor 1991 the Soviet Union was officially dissolved 2001 September 11 attacks 2007 the United States entered its worst economic crisis 2015 321 million 2020 new economic disruption September 11 the September 11 attacks carried out by Al-Qaeda the 21st century wars in Afghanistan and Iraq the turn of the 20th century United States became the world's leading industrial power the year 1492 The arrival of Christopher Columbus ###Markdown *Generate Stem Timeline with Matplotlib* ###Code import matplotlib.pyplot as plt import numpy as np import matplotlib.dates as mdates from datetime import datetime def generate_timeline(names_mat: list, dates_mat: list): # Choose levels levels = np.tile([-30, 30, -20, 20, -12, 12, -7, 7, -1, 1], int(np.ceil(len(dates_mat)/10)))[:len(dates_mat)] # Create figure and plot a stem plot with the date fig, ax = plt.subplots(figsize=(20, 6), constrained_layout=True) ax.set_title("Timeline of Significant Events in U.S. History", fontsize=30, fontweight='bold') markerline, stemline, baseline = ax.stem(dates_mat, levels, linefmt="C3-", basefmt="k-", use_line_collection=True) plt.setp(markerline, mec="k", mfc="w", zorder=3) # Shift the markers to the baseline by replacing the y-data by zeros. markerline.set_ydata(np.zeros(len(dates_mat))) # Annotate lines vert = np.array(['top', 'bottom'])[(levels > 0).astype(int)] for d, l, r, va in zip(dates_mat, levels, names_mat, vert): ax.annotate(r, xy=(d, l), xytext=(-3, np.sign(l)*3), textcoords="offset points", va=va, ha="right") # Format xaxis with AutoDateLocator ax.get_xaxis().set_major_locator(mdates.AutoDateLocator()) ax.get_xaxis().set_major_formatter(mdates.DateFormatter("%b %Y")) plt.setp(ax.get_xticklabels(), rotation=30, ha="right") # Remove y axis and spines ax.get_yaxis().set_visible(False) for spine in ["left", "top", "right"]: ax.spines[spine].set_visible(False) ax.margins(y=0.1) plt.show() names_mat = list(answers.values())[:30] dates_mat = list(dates_map.values())[:30] generate_timeline(names_mat=names_mat, dates_mat=dates_mat) ###Output _____no_output_____
aoc_2020.ipynb
###Markdown Advent of Code 2020This solution (Jupyter notebook; python 3.7) by kannix68, @ 2020-12. \Using anaconda distro, conda v4.9.2. installation on MacOS v10.14.6 "Mojave". Generic AoC code ###Code import sys import logging import itertools #from operator import mul import re import numpy as np import lib.aochelper as aoc from lib.aochelper import map_list as mapl from lib.aochelper import filter_list as filterl print("Python version:", sys.version) print("Version info:", sys.version_info) log = aoc.getLogger(__name__) print(f"initial log-level={log.getEffectiveLevel()}") EXEC_RESOURCE_HOGS = False EXEC_EXTRAS = False ###Output _____no_output_____ ###Markdown Problem domain code Day 1: Report Repair ###Code print("Day 1 a") THIS_YEAR = 2020 # "Last christmas, I gave you my heart... this year..." - Wham! test_str = """ 1721 979 366 299 675 1456""".strip() tests = list(map(int, test_str.split("\n"))) log.warning(tests) def solve01a(l): for v in itertools.combinations(l, 2): v = np.array(v) # using numpy for elegance, array "object" methods .sum() and .prod() #print(v) if v.sum() == THIS_YEAR: log.info(f"found {v}") p = v.prod() log.debug(f"product={p}") break return p result = solve01a(tests) print("tests solution", result) ins = list(map(int, aoc.read_file_to_list('./in/day01.in'))) #ins result = solve01a(ins) print("Day 1 a solution:", result) def solve01b(l): for v in itertools.combinations(l, 3): v = np.array(v) #print(v) if v.sum() == THIS_YEAR: log.info(f"found {v}") p = v.prod() #np.prod(np.array(v)) log.debug(f"product={p}") break return p print("Day 1 b") print("test results:", solve01b(tests)) print("Day 1 b solution:", solve01b(ins)) ###Output _____no_output_____ ###Markdown Day 2: Password Philosophy ###Code print("Day 2 a") test_str = """ 1-3 a: abcde 1-3 b: cdefg 2-9 c: ccccccccc """.strip() tests = test_str.split("\n") #tests def solve02a(l): ct = 0 for line in l: rules, pwd = line.split(': ') nums, char = rules.split(' ') min_num, max_num = map(int, nums.split('-')) #print(min_num, max_num, char, pwd) num_ocur = pwd.count(char) if num_ocur >= min_num and num_ocur <= max_num: #print(" pwd is valid") ct += 1 #else: # print(" pwd is INvalid") log.debug(f"num of valid passwords={ct}") return ct result = solve02a(tests) print("tests result:", result) ins = aoc.read_file_to_list('./in/day02.in') print("Day 2 a solution:", solve02a(ins)) def solve02b(l): ct = 0 for line in l: rules, pwd = line.split(': ') nums, char = rules.split(' ') min_num, max_num = map(int, nums.split('-')) #print(min_num, max_num, char, pwd) num_ocur = pwd[min_num-1].count(char) + pwd[max_num-1].count(char) if num_ocur == 1: #print(" pwd is valid") ct += 1 #else: # print(" pwd is INvalid") log.debug(f"num of valid passwords={ct}") return ct print("Day 2 b") print("assert day 2 b test conditions") assert( 1 == solve02b([tests[0]]) ) assert( 0 == solve02b([tests[1]]) ) assert( 0 == solve02b([tests[2]]) ) print("assertions were ok.") print("tests result:", solve02b(tests)) print("Day 2 b solution:", solve02b(ins)) ###Output _____no_output_____ ###Markdown Day 3: Toboggan Trajectory ###Code print("Day 3 a") test_str = """ ..##....... #...#...#.. .#....#..#. ..#.#...#.# .#...##..#. ..#.##..... .#.#.#....# .#........# #.##...#... #...##....# .#..#...#.# """.strip() tests = test_str.split("\n") log.debug(tests) def prepare_input(l): outlist = [] for line in l: outlist.append(list(map(lambda it: 1 if it == '#' else 0, list(line)))) return outlist tests = prepare_input(tests) log.debug(tests) def solve03a(l2d): num_rows = len(l2d) num_cols = len(l2d[0]) log.info(f"num rows={num_rows}, cols={num_cols}") posx, posy = [0, 0] dx, dy = [3, 1] ct = 0 tpath = '' for iter in range(1, num_rows+2): #print(f"iter {iter}") if l2d[posy][posx%num_cols] == 1: ct += 1 tpath += 'X' else: tpath += '0' posx += dx posy += dy #print(f"new pos={[posx, posy]}") if posy > num_rows-1: log.debug(f"break at iter#={iter}") break else: iter += 1 outstr = f"encountered {ct} trees." if log.getEffectiveLevel() <= logging.DEBUG: outstr += f"Path={tpath}" log.info(outstr) return ct print("Day 3 a tests:") print(solve03a(tests)) ins = prepare_input(aoc.read_file_to_list('./in/day03.in')) result = solve03a(ins) print("Day 3 a solution:", result) def solve03b(l2d, vec): num_rows = len(l2d) num_cols = len(l2d[0]) log.debug(f"num rows={num_rows}, cols={num_cols}, vector={vec}") posx, posy = [0, 0] dx, dy = vec #reversed(vec) ct = 0 for iter in range(0, num_rows+1): #print(f"i={iter} @{[posx, posy]} : {l2d[posy][posx%num_cols]}") if l2d[posy][posx%num_cols] == 1: ct += 1 posx += dx posy += dy if posy > num_rows-1: log.debug(f"break at iter#={iter}") break else: iter += 1 log.debug(f"encountered {ct} trees.") return ct print("Day 3 b") #print("number of trees encountered:", solve3b(tests, [3, 1])) print("assert day 3 b test conditions:") assert( 2 == solve03b(tests, [1, 1])) assert( 7 == solve03b(tests, [3, 1])) assert( 3 == solve03b(tests, [5, 1])) assert( 4 == solve03b(tests, [7, 1])) assert( 2 == solve03b(tests, [1, 2])) print("assertions were ok.") p = solve03b(tests, [1, 1]) * solve03b(tests, [3, 1]) * solve03b(tests, [5, 1]) \ * solve03b(tests, [7, 1]) * solve03b(tests, [1, 2]) print("day 3 b test result (product):", p) p = solve03b(ins, [1, 1]) * solve03b(ins, [3, 1]) * solve03b(ins, [5, 1]) \ * solve03b(ins, [7, 1]) * solve03b(ins, [1, 2]) print("day 3 b solution (product):", p) ###Output _____no_output_____ ###Markdown Day 4: Passport Processing ###Code fields_mandat = {'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid'} fields_opt = {'cid'} test_str = """ ecl:gry pid:860033327 eyr:2020 hcl:#fffffd byr:1937 iyr:2017 cid:147 hgt:183cm iyr:2013 ecl:amb cid:350 eyr:2023 pid:028048884 hcl:#cfa07d byr:1929 hcl:#ae17e1 iyr:2013 eyr:2024 ecl:brn pid:760753108 byr:1931 hgt:179cm hcl:#cfa07d eyr:2025 pid:166559648 iyr:2011 ecl:brn hgt:59in """.strip() tests = test_str.split("\n\n") log.debug(tests) def passport_valid(passport): entries = re.split(r'\s+', passport) log.debug(entries) fields = [] for entry in entries: field = entry.split(':')[0] fields.append(field) #log.debug(sorted(fields)) b = fields_mandat.issubset(fields) log.debug(f"valid?: {b}") return b def solve04a(passports): ct = 0 for passport in passports: if passport_valid(passport): ct +=1 log.debug(f"valid-count: {ct}") return ct print("tests valid-count:", solve04a(tests)) ins = aoc.read_file_to_str('./in/day04.in').split("\n\n") print("Day 4 a solution: valid-count:", solve04a(ins)) def passport_valid2(passport): entries = re.split(r'\s+', passport) log.debug(entries) fields = [] values = [] for entry in entries: field, val = entry.split(':') fields.append(field) values.append(val) #log.debug(sorted(fields)) if not fields_mandat.issubset(fields): log.debug("invalid: mandatory fields missing") return False for idx, field in enumerate(fields): val = values[idx] if field == 'byr': # byr (Birth Year) - four digits; at least 1920 and at most 2002. ival = int(val) if not (ival >= 1920 and ival <= 2002): log.debug(f"invalid: byr value {val}") return False elif field == 'iyr': # iyr (Issue Year) - four digits; at least 2010 and at most 2020. ival = int(val) if not (ival >= 2010 and ival <= THIS_YEAR): log.debug(f"invalid: iyr value {val}") return False elif field == 'eyr': # eyr (Expiration Year) - four digits; at least 2020 and at most 2030 ival = int(val) if not (ival >= THIS_YEAR and ival <= 2030): log.debug(f"invalid: eyr value {val}") return False elif field == 'hgt': # hgt (Height) - a number followed by either cm or in: # - If cm, the number must be at least 150 and at most 193. # - If in, the number must be at least 59 and at most 76. # py-regex: ^(\d+)(?=cm|in)(cm|in)$ if not re.match(r'^\d+(cm|in)$', val): log.debug(f"invalid: hgt val={val}, form.") return False numstr, unit = re.split(r'(?=cm|in)', val) num = int(numstr) if unit == 'cm': if not (num >= 150 and num <= 193): log.debug(f"invalid: hgt val={val} num={num}") return False elif unit == 'in': if not (num >= 59 and num <= 76): log.debug(f"invalid: hgt val={val} num={num}") return False else: log.debug(f"invalid: hgt val={val} unit={unit}") return False elif field == 'hcl': # hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f. if not re.match(r'^#[0-9a-f]{6}$', val): log.debug(f"invalid: hcl value {val}") return False elif field == 'ecl': # ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth. if not val in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']: log.debug(f"invalid: ecl value {val}") return False elif field == 'pid': # pid (Passport ID) - a nine-digit number, including leading zeroes. if not re.match(r'^[0-9]{9}$', val): log.debug(f"invalid: pid value {val}") return False log.debug("valid!") return True tests_invalid = """ eyr:1972 cid:100 hcl:#18171d ecl:amb hgt:170 pid:186cm iyr:2018 byr:1926 iyr:2019 hcl:#602927 eyr:1967 hgt:170cm ecl:grn pid:012533040 byr:1946 hcl:dab227 iyr:2012 ecl:brn hgt:182cm pid:021572410 eyr:2020 byr:1992 cid:277 hgt:59cm ecl:zzz eyr:2038 hcl:74454a iyr:2023 pid:3556412378 byr:2 """.strip().split("\n\n") print("tests, all invalid:") for passport in tests_invalid: print(passport.replace("\n", " ")) print("valid?:", passport_valid2(passport)) print() tests_valid = """ pid:087499704 hgt:74in ecl:grn iyr:2012 eyr:2030 byr:1980 hcl:#623a2f eyr:2029 ecl:blu cid:129 byr:1989 iyr:2014 pid:896056539 hcl:#a97842 hgt:165cm hcl:#888785 hgt:164cm byr:2001 iyr:2015 cid:88 pid:545766238 ecl:hzl eyr:2022 iyr:2010 hgt:158cm hcl:#b6652a ecl:blu byr:1944 eyr:2021 pid:093154719 """.strip().split("\n\n") print("tests, all valid:") for passport in tests_valid: print(passport.replace("\n", " ")) print("valid?:", passport_valid2(passport)) print() def solve04b(passports): ct = 0 for passport in passports: log.debug(passport) if passport_valid2(passport): ct +=1 log.debug(f"valid-count: {ct}") return ct assert( 0 == solve04b(tests_invalid) ) assert( 4 == solve04b(tests_valid) ) result = solve04b(ins) print("Day 4 b result:", result) ###Output _____no_output_____ ###Markdown Day 5: Binary Boarding ###Code import functools import operator # see: [python - How to make a flat list out of list of lists? - Stack Overflow](https://stackoverflow.com/questions/952914/how-to-make-a-flat-list-out-of-list-of-lists) def flatten_list(l): """Flatten a list.""" return functools.reduce(operator.iconcat, l, []) def get_seat_id(s): rows = aoc.range_list(0, 128) cols = aoc.range_list(0, 8) #log.debug(cols) for c in s: if c == 'F': rows = rows[:len(rows)//2] elif c == 'B': rows = rows[len(rows)//2:] elif c == 'L': cols = cols[:len(cols)//2] elif c == 'R': cols = cols[len(cols)//2:] result_list = flatten_list([rows, cols]) log.debug(result_list) return result_list[0]*8 + result_list[1] boardingpass = 'FBFBBFFRLR' get_seat_id(boardingpass) # Given tests: assert(357 == get_seat_id('FBFBBFFRLR')) assert(567 == get_seat_id('BFFFBBFRRR')) assert(119 == get_seat_id('FFFBBBFRRR')) assert(820 == get_seat_id('BBFFBBFRLL')) ins = aoc.read_file_to_list('./in/day05.in') print( "Day 5 a solution:", max(map(get_seat_id, ins)) ) print("number of boarding passes given:", (len(ins))) #print("number of used rows in plane:", (len(ins)+1)/8.0) min_seat_id = 0*8 + 0 # from min row and min column/seat max_seat_id = 127*8 + 7 # from max row and max column/seat print("seat_id min/max", [min_seat_id, max_seat_id]) seat_ids = aoc.range_list(min_seat_id, max_seat_id+1) for boardingpass in ins: # remove used/given seat_id seat_ids.remove(get_seat_id(boardingpass)) log.debug("ids remain unseen:") log.debug(seat_ids) for seat_id in seat_ids: if not( (seat_id-1) in seat_ids and (seat_id>min_seat_id) ) \ and not( (seat_id+1) in seat_ids and (seat_id<max_seat_id) ): print("(Day 5 b solution) found id:", seat_id) ###Output _____no_output_____ ###Markdown Day 6: Custom Customs ###Code test_str = """ abcx abcy abcz """.strip() test = test_str.split("\n") log.debug(test) from collections import defaultdict def get_group_answers(answers_in): answers = defaultdict(int) for tanswers in answers_in: for tanswer in tanswers: answers[tanswer] += 1 #log.debug(answers) #log.debug(f"len={len(answers.keys())}, vals={answers.keys()}") return answers print("testing...", get_group_answers(test)) assert( 6 == len(get_group_answers(test).keys()) ) test_str = """ abc a b c ab ac a a a a b """.strip() tests = test_str.split("\n\n") log.debug(tests) def solve06a(groupanswers): i = 0 for groupanswer in groupanswers: result = get_group_answers(groupanswer.split("\n")).keys() #log.debug(f"distinctanswers={result} for {groupanswer}") i += len(result) log.info(f"answerssum={i}") return i assert( 11 == solve06a(tests) ) print("test assertion ok.") ins = aoc.read_file_to_str('./in/day06.in').split("\n\n") print("Day 6 a solution: groupanwers-sum:", solve06a(ins)) print("Day 6 b") def get_group_answers2(answers_in): answers = defaultdict(int) num_persons = len(answers_in) for tanswers in answers_in: for tanswer in tanswers: answers[tanswer] += 1 #log.debug(answers) #log.debug(len(answers.keys()), answers.keys()) ct = 0 #for idx, (key, val) in enumerate(d.items()): for key, val in answers.items(): if val == num_persons: ct += 1 return ct def solve06b(groupanswers): i = 0 for groupanswer in groupanswers: result = get_group_answers2(groupanswer.split("\n")) #log.debug(f"all-answers={result} for {groupanswer}") i += result log.info(f"all-answers-sum={i}") return i assert( 6 == solve06b(tests) ) print("test assertion ok.") print("Day 6 b solution: groupanwers-sum:", solve06b(ins)) ###Output _____no_output_____ ###Markdown Day 7: Handy Haversacks ###Code import networkx as nx test_str = """ light red bags contain 1 bright white bag, 2 muted yellow bags. dark orange bags contain 3 bright white bags, 4 muted yellow bags. bright white bags contain 1 shiny gold bag. muted yellow bags contain 2 shiny gold bags, 9 faded blue bags. shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags. dark olive bags contain 3 faded blue bags, 4 dotted black bags. vibrant plum bags contain 5 faded blue bags, 6 dotted black bags. faded blue bags contain no other bags. dotted black bags contain no other bags. """.strip() tests = test_str.split("\n") log.debug(test) def get_bag_graph(l): graph = nx.DiGraph() for line in l: try: src, trg = line.split(" bags contain ") except ValueError: log.error(f"parse error, input=>{line}<") bags_contained = trg.replace(".", "").split(", ") if not (len(bags_contained) == 1 and bags_contained[0].startswith("no other")): graph.add_node(src) for idx, bag_in in enumerate(bags_contained): rxm = re.match(r"^(\d+)\s+(.*?)\s+bag", bag_in) res = [int(rxm.group(1)), rxm.group(2)] #log.debug("src:", src, "; trg:", res) bags_contained[idx] = res graph.add_node(res[1]) #log.debug(f"add_edge {src} => {res[0]} {res[1]}") graph.add_edge(src, res[1], weight=res[0]) else: graph.add_edge(src, "END", weight=0) #print(src, bags_contained) log.info( f"graph # of nodes: {len(graph.nodes())}" ) log.info( f"graph # of edges: {len(graph.edges())}" ) return graph graph = get_bag_graph(tests) for e in graph.edges(): log.debug(f" edge: {e} attrs={nx.get_edge_attributes(graph, 'weight')[e]}") def get_paths_to(graph, trg): paths = [] for src in graph.nodes(): #log.debug("src:", src) for p in nx.all_simple_paths(graph, src, trg): paths.append(p) return paths def solve07a(l, trg): graph = get_bag_graph(l) sources = aoc.map_list(lambda it: it[0], get_paths_to(graph, trg)) num_sources = len(set(sources)) return num_sources trg = 'shiny gold' assert( 4 == solve07a(tests, trg) ) ins = aoc.read_file_to_str('./in/day07.in').strip().split("\n") print("Day 7 a solution: num-distinct-src-colors", solve07a(ins, 'shiny gold')) print("Day 7 b") edge_weights = nx.get_edge_attributes(graph, 'weight') #for p in nx.all_simple_edge_paths(graph, 'shiny gold', "END"): # not available seen_subpaths = [] for p in nx.all_simple_paths(graph, 'shiny gold', "END"): log.debug(p) for snode_idx in range(len(p)-1): tup = tuple([p[snode_idx], p[snode_idx+1]]) subpath = tuple(p[0:snode_idx+2]) log.debug(f"subpath: {subpath}") if not subpath in seen_subpaths: seen_subpaths.append(subpath) log.debug(" new subpath") else: log.debug(" already SEEN subpath") log.debug(f" path-edge#{snode_idx}: {tup} {edge_weights[tup]}") log.debug(seen_subpaths) # see: [python - Getting subgraph of nodes between two nodes? - Stack Overflow](https://stackoverflow.com/questions/32531117/getting-subgraph-of-nodes-between-two-nodes) def subgraph_between(graph, start_node, end_node): paths_between_generator = nx.all_simple_paths(graph, source=start_node,target=end_node) nodes_between_set = {node for path in paths_between_generator for node in path} return( graph.subgraph(nodes_between_set) ) subgraph = subgraph_between(graph, 'shiny gold', 'END') for p in subgraph.edges: log.debug(p) log.info("sub-paths for shiny gold:") for p in nx.all_simple_paths(subgraph, 'shiny gold', "END"): log.info(p) edge_weights = nx.get_edge_attributes(graph, 'weight') seen_subpaths = [] for p in nx.all_simple_paths(graph, 'shiny gold', "END"): log.debug(p) for start_idx in reversed(range(len(p)-2)): seen = False subpath = tuple(p[0:start_idx+2]) if not subpath in seen_subpaths: seen_subpaths.append(subpath) else: seen = True tup = tuple([p[start_idx], p[start_idx+1]]) w = edge_weights[tup] log.debug(f" subedge={tup}, weight={w}; subpath={subpath}, seen={seen}") # Personal solution to day 7 a UNFINISHED. clr = 'shiny gold' clr_edges = filter(lambda it: it[0]==clr, list(graph.edges)) for edge in clr_edges: log.debug(f"edge={edge}, edge-weight={edge_weights[edge]}") # "Inspiration" soltion, copied/stolen from user el-guish's solution in: # [- 2020 Day 07 Solutions - : adventofcode](https://www.reddit.com/r/adventofcode/comments/k8a31f/2020_day_07_solutions/) # Using recursion. rules = open('in/day07.in').readlines() def parse_rule(r): parent, contents = r[:-2].split(' bags contain ') childs = [parse_child_bag(c) for c in contents.split(',') if c != 'no other bags' and c != 'no other bag'] return (parent, childs) def parse_child_bag(child_st): cparts = child_st.split() qty = int(cparts[0]) color = ' '.join(cparts[1:-1]) return (color, qty) def required_contents(bag_color): return sum(q + q * required_contents(color) for color, q in contains[bag_color] ) contains = dict(parse_rule(r) for r in test_str.split("\n")) log.debug("test rules (parsed):", contains) print("tests result", required_contents('shiny gold')) contains = dict(parse_rule(r) for r in rules) print("Day 7 b solution", required_contents('shiny gold')) ###Output _____no_output_____ ###Markdown Day 8: Handheld Halting ###Code def read_prog(l): outlst = aoc.map_list(lambda it: it.split(' '), l) for instr in outlst: instr[1] = int(instr[1]) return outlst def run_cpu_prog(prog): cpuct = 0 pptr = 0 prog_len = len(prog) seen = [] acc = 0 while True: cpuct += 1 if pptr in seen: log.info(f"found inf-loop @cpuct={cpuct} @instr#={pptr} : {instr}") break elif pptr == prog_len: log.info(f"found prog-term @cpuct={cpuct} @instr#={pptr} : {instr}") break else: seen.append(pptr) instr = prog[pptr] op, par = instr log.debug(f"instr#{cpuct} instr={instr}") if cpuct > 10_000: raise Exception("failsafe") if op == 'nop': pptr += 1 #log.debug(f" new pptr={pptr}") elif op == 'acc': acc += par pptr += 1 #log.debug(f" new acc={acc}") elif op == 'jmp': pptr += par #log.debug(f" jmp for={par} to={pptr}") else: raise Exception(f"unknown opcode in {instr}") return acc tests = """ nop +0 acc +1 jmp +4 acc +3 jmp -3 acc -99 acc +1 jmp -4 acc +6 """.strip().split("\n") log.debug(tests) test_prog = read_prog(tests) log.debug(test_prog) run_cpu_prog(test_prog) ins = aoc.read_file_to_str('./in/day08.in').strip().split("\n") prog = read_prog(ins) print("Day 8 a solution: acc:", run_cpu_prog(prog)) print("Day 8 b") def check_cpu_prog(prog): prog_len = len(prog) cpuct = 0 pptr = 0 seen = [] acc = 0 while True: if pptr == prog_len: log.debug(f"OK: prog terminates! @cpuct={cpuct} @instr#={pptr} : last-instr={instr}") return True cpuct += 1 instr = prog[pptr] op, par = instr #log.debug(f"instr#{cpuct} {instr}") if pptr in seen: log.debug(f"Fail: found inf-loop @cpuct={cpuct} @instr#={pptr} : {instr}") return False else: seen.append(pptr) if cpuct > 10_000: raise Exception("failsafe") if op == 'nop': pptr += 1 #log.debug(f" new pptr={pptr}") elif op == 'acc': acc += par pptr += 1 #log.debug(f" new acc={acc}") elif op == 'jmp': pptr += par #log.debug(f" jmp for={par} to={pptr}") else: raise Exception(f"unknown opcode in {instr}") return acc print("test result: check-cpu-prog", check_cpu_prog(test_prog)) from copy import deepcopy def check_prog_variations(prog): base_prog = deepcopy(prog) altinstrs = [] for idx, instr in enumerate(base_prog): if instr[0] in ['nop', 'jmp']: altinstrs.append([idx, instr]) log.debug(f"alternate instructions: {altinstrs}") if check_cpu_prog(base_prog): #log.debug("prog=", base_prog) acc = run_cpu_prog(base_prog) log.debug(f"prog ok, acc={acc}") for elem in altinstrs: #log.debug("elem:", elem) idx, v = elem instr, par = v prog = deepcopy(base_prog) if instr == 'nop': prog[idx][0] = 'jmp' elif instr == 'jmp': prog[idx][0] = 'nop' #log.debug(f"new-instr @{idx}={prog[idx][0]}") #log.debug("new-prog=", prog) if check_cpu_prog(prog): acc = run_cpu_prog(prog) log.info(f"prog ok, acc={acc}") break return acc result = check_prog_variations(test_prog) print("test result: check-prog-variations", result) result = check_prog_variations(read_prog(ins)) print("Day 8 b result: check-prog-variations", result) ###Output _____no_output_____ ###Markdown Day 9: Encoding Error ###Code tests = """ 35 20 15 25 47 40 62 55 65 95 102 117 150 182 127 219 299 277 309 576 """.strip().split("\n") from typing import List def pp_lst(lst): return "[" + str.join(',', aoc.map_list(str, lst)) + "]" def check_xmas_data(xmas_data: int, preamble: List[int]) -> bool: preamble_len = len(preamble) #log.debug("[check_xmas_data] xmas_data:", xmas_data, ", preamble_len;:", len(preamble)) ok = False for combi in itertools.combinations(preamble, 2): # for entries no combination with itself! if sum(combi) == xmas_data: ok = True #log.info(f"[check_xmas_data] OK: xmas-data-elem {xmas_data} is sum of prev-elems:{combi}") break return ok def check_xmas_data_seq(xmas_data_seq: List[int], preamble: List[int]) -> bool: #log.debug("[check_xmas_data_seq] xmas_data_seq:", xmas_data_seq, ", preamble_len;:", len(preamble)) preamble_len = len(preamble) all_ok = True for xmas_data in xmas_data_seq: #log.info(f"[check_xmas_data_seq] elem={xmas_data} preamble={pp_lst(preamble)}") ok = check_xmas_data(xmas_data, preamble) preamble.pop(0) preamble.append(xmas_data) #log.info(f" p appended={xmas_data}, removed={remvd}, preamble={str.join(',', lmap(str, preamble))}") all_ok &= ok return all_ok preamble0 = list(range(1, 25+1)) # numbers 1..25 log.debug(preamble0) assert( True == check_xmas_data(26, preamble0) ) assert( True == check_xmas_data(49, preamble0) ) assert( False == check_xmas_data(100, preamble0) ) assert( False == check_xmas_data(50, preamble0) ) preamble1 = flatten_list( [[20], list(range(1, 20)), list(range(21, 26))] ) log.debug(preamble1) assert( True == check_xmas_data_seq([45, 26], preamble1) ) assert( False == check_xmas_data_seq([45, 65], preamble1) ) assert( True == check_xmas_data_seq([45, 64], preamble1) ) assert( True == check_xmas_data_seq([45, 66], preamble1) ) def verify_xmas_data_seq(xmas_data_rawseq: List[int], preamble_len=25) -> bool: """List `xmas_data_rawseq` contains the preamble as head.""" preamble = xmas_data_rawseq[0:preamble_len] xmas_data_seq = xmas_data_rawseq[preamble_len:] log.debug(f"[verify_xmas_data_seq] xmas_data_seq:{pp_lst(xmas_data_seq)}, preamble:{pp_lst(preamble)}") preamble_len = len(preamble) oks = [] for xmas_data in xmas_data_seq: #log.info(f"[check_xmas_data_seq] elem={xmas_data} preamble={str.join(',', lmap(str, preamble))}") ok = check_xmas_data(xmas_data, preamble) oks.append([xmas_data, ok]) preamble.pop(0) preamble.append(xmas_data) return oks raw_testdata = aoc.map_list(int, tests) res = verify_xmas_data_seq(raw_testdata, preamble_len=5) res = aoc.map_list(lambda it: it[0], aoc.filter_list(lambda it: it[1] == False, res)) log.info(f"test False results: {res}") assert( [127] == res ) ins = aoc.map_list(int, aoc.read_file_to_list('./in/day09.in')) log.debug(ins) res = verify_xmas_data_seq(ins, preamble_len=25) res = aoc.map_list(lambda it: it[0], aoc.filter_list(lambda it: it[1] == False, res)) log.info(f"found invalid number(s): {res}") invalid_num = res[0] print("Day 8 a solution:", invalid_num) # see: [python - List all contiguous sub-arrays](https://stackoverflow.com/questions/41576911/list-all-contiguous-sub-arrays) def get_all_windows(lst, min_win_len=1): """Generator yielding all sub-windows (contiguous sublists) of given list with min_win_len.""" for win_len in range(min_win_len, len(lst)+1): for idx in range(len(lst)-win_len+1): yield lst[idx:idx+win_len] test_invalidnum = 127 raw_testdata2 = raw_testdata.copy() raw_testdata2.remove(test_invalidnum) for subl in get_all_windows(raw_testdata2): if sum(subl) == test_invalidnum: log.info(f"found fulfilling-window: {subl}") break ins2 = ins.copy() ins2.remove(invalid_num) for subl in get_all_windows(ins2): if sum(subl) == invalid_num: log.info(f"found fulfilling-window: {subl}") min_elem = min(subl) max_elem = max(subl) solut = min_elem+max_elem log.info(f"min, max, sum: {[min_elem, max_elem, solut]}") break ###Output _____no_output_____ ###Markdown Day 10: Adapter Array ###Code def solve10a(loi): current = 0 remainders = loi.copy() chain = [current] jolts = [] for i in range(len(remainders)): targets = filterl(lambda it: it >= current and it <= current + 3, remainders) target = min(targets) remainders.remove(target) #log.debug(f"#{i} from={current} targets={targets}, target={target}, djolt={target-current}, remainders={remainders}") chain.append(target) jolts.append(target-current) current = target if len(remainders) == 0: jolts.append(3) # final device 3 jolts higher than lasta dapter in chain j1 = jolts.count(1) j3 = jolts.count(3) res = j1*j3 log.info(f"chain {aoc.cl(chain)} terminated ok, jolts={aoc.cl(jolts)}, j1#={j1}, j3#={j3}, res={res}") return j1*j3 raise Exception("solution not found") tests = """ 16 10 15 5 1 11 7 19 6 12 4 """.strip().split("\n") tests1 = aoc.map_list(int, tests) log.debug(f"test1={tests1}") res = solve10a(tests1) aoc.assert_msg("test 1", 7*5 == res) log.info(f"tests1 solution: {res}") tests = """ 28 33 18 42 31 14 46 20 48 47 24 23 49 45 19 38 39 11 1 32 25 35 8 17 7 9 4 2 34 10 3 """.strip().split("\n") log.setLevel( logging.INFO ) tests2 = mapl(int, tests) res = solve10a(tests2) aoc.assert_msg("test 2", 220 == res) log.info(f"tests2 solution: {res}") ins = mapl(int, aoc.read_file_to_list('./in/day10.in')) res = solve10a(ins) log.info(f"Day 10 a solution: {res}") import time def find_paths(loi): # loi is a list of ints (input) start_tm = int(time.time()) end_elem = max(loi) partials = {0: [[0]]} found_num = 0 current = 0 iter = 0 lastlevel_partials = 0 # just only for printing (debugging) last_partials = [[0, 1]] elems_avail = loi.copy() for lvl in range(1, len(loi)+1): last_partials_keys = mapl(lambda it: it[0], last_partials) min_last_elem = min(last_partials_keys) elems_avail = filterl(lambda it: it > min_last_elem, elems_avail) filtered_elems = {} last_partials_count = {} for src in sorted(set(last_partials_keys)): filtered_elems[src] = filterl(lambda it: it > src and it <= src + 3, elems_avail) last_partials_count[src] = sum(mapl(lambda it: it[1], filterl(lambda it: it[0]==src, last_partials))) partials_diff = len(last_partials_keys)-lastlevel_partials needed_tm = int(time.time()) - start_tm log.debug(f"level={lvl} @{needed_tm}s, found={found_num}, paths-diff={partials_diff:,} before-partials-#={len(last_partials):,}, min-last-elem={min_last_elem}, elems_avail#={len(elems_avail)}") log.debug(f" last-partials-ct={last_partials_count}") lastlevel_partials = len(last_partials) partials = [] for partial in sorted(set(last_partials_keys)): #last_partials: iter += 1 if iter % 100_000_000 == 0: log.debug(f"at iter#={iter:,}, found#={found_num}, level={lvl}") #if iter > 10_000_000_000: # FAILSAFE # return found targets = filtered_elems[partial] for target in targets: if target == end_elem: if found_num % 100_000 == 0: log.debug(f"at found# {found_num}") found_num += last_partials_count[partial] else: partials.append( [target, last_partials_count[partial]] ) last_partials = partials log.info(f"level={lvl} @{needed_tm}s, found={found_num}, paths-diff={partials_diff:,} before-partials-#={len(last_partials):,}, min-last-elem={min_last_elem}, elems_avail#={len(elems_avail)}") return found_num #log.setLevel( aoc.LOGLEVEL_TRACE ) log.debug(f"effective-log-level={log.getEffectiveLevel()}") found = find_paths(tests1) log.info(f"tests1 found {found} from {tests1}") assert( 8 == found ) #found == 8 found = find_paths(tests2) log.info(f"test2 found {found} paths") # 19208 assert( 19208 == found ) found = find_paths(ins) log.info(f"Day 10 b solution: found {found} paths") ###Output _____no_output_____ ###Markdown Day 11: Seating System ###Code #log.setLevel( aoc.LOGLEVEL_TRACE ) log.debug(f"effective-log-level={log.getEffectiveLevel()}") import copy # for deepcopy import hashlib class CellularWorld: def __init__(self, world, store_hashes=False): """World object constructor, world has to be given as a list-of-lists of chars.""" self.world = world self.dim = [len(world[0]), len(world)] self.iter_num = 0 log.info(f'[CellularWorld] new dim={self.dim}') self.world = world self.store_hashes = store_hashes if self.store_hashes: self.world_hashes = [self.get_hash()] def repr(self): """Return representation str (can be used for printing).""" return str.join("\n", map(lambda it: str.join('', it), self.world)) def set_world(self, world): self.world = world self.dim = [len(world[0]), len(world)] def get_hash(self): return hashlib.sha1(self.repr().encode()).hexdigest() def get_neighbors8(self, x, y): """Get cell's surrounding 8 neighbors, omitting boundaries.""" log.trace(f"[CellularWorld]:get_neighbors8({x},{y})") dim_x = self.dim[0] dim_y = self.dim[1] neighbors = '' for nx in range(x-1, x+2): for ny in range(y-1, y+2): if (nx >= 0 and nx < dim_x) and (ny >= 0 and ny < dim_y) and not (nx == x and ny == y): #log.info(f" neighb={[nx, ny]}") neighbors += self.world[ny][nx] return neighbors def iterate(self, steps=1): for i in range(steps): world2 = copy.deepcopy(self.world) for y in range(self.dim[1]): for x in range(self.dim[0]): val = self.world[y][x] neighbors = self.get_neighbors8(x, y) #log.trace(f"[{x},{y}]='{val}' nbs='{neighbors}'") if val == 'L' and neighbors.count('#') == 0: world2[y][x] = '#' elif val == '#' and neighbors.count('#') >= 4: world2[y][x] = 'L' self.iter_num += 1 self.set_world(world2) if self.store_hashes: self.world_hashes.append(self.get_hash()) def find_cycle(self, max_iter=1_000): """This may only be called at initial state, before any previous iterations.""" seen = [world.repr] for i in range(max_iter): if i % 1_000 == 0: log.debug(f"iter# {i}, still running") world.iterate() world_repr = world.repr() if world_repr in seen: start_idx = seen.index(world_repr) log.info(f"found cycle @ iter={i+1}, seen-idx={start_idx}") return([start_idx, i+1]) else: seen.append(world_repr) raise Exception("no world iter cycle found") def find_stable(self, max_iter=1_000): last_hash = self.get_hash() #log.info(f"cworld initial state: (hash={last_hash}).") #log.debug("world-repr=\n{cworld.repr()}") for i in range(1, max_iter+1): self.iterate() this_hash = self.get_hash() #log.debug(f"cworld state after iter#{i}, hash={this_hash}") #":\n{self.repr()}") if this_hash == last_hash: log.info(f"[CellularWorld:find_stable] BREAK on stable beginning @{i-1}") return True else: last_hash = this_hash raise Exception(f"[CellularWorld:find_stable] NO stable world iter found, after break on {max_iter} steps") tests = """ L.LL.LL.LL LLLLLLL.LL L.L.L..L.. LLLL.LL.LL L.LL.LL.LL L.LLLLL.LL ..L.L..... LLLLLLLLLL L.LLLLLL.L L.LLLLL.LL """.strip().split("\n") tests = mapl(list, tests) cworld = CellularWorld(tests) #, store_hashes=True) cworld.find_stable() seats_occ = cworld.repr().count('#') log.info(f"test stable occupied-seats={seats_occ}") ins = aoc.read_file_to_list('./in/day11.in') ins = mapl(list, ins) cworld = CellularWorld(ins) cworld.find_stable() seats_occ = cworld.repr().count('#') log.info(f"Day 11 a solution: stable occupied-seats={seats_occ} after {cworld.iter_num} iterations") print("Day 11 b") class CellularWorldDirected(CellularWorld): def iterate(self, steps=1): for i in range(steps): world2 = copy.deepcopy(self.world) for y in range(self.dim[1]): for x in range(self.dim[0]): val = self.world[y][x] neighbors = self.get_seen_occuppied_seats(x, y) if val == 'L' and neighbors == 0: world2[y][x] = '#' elif val == '#' and neighbors >= 5: world2[y][x] = 'L' self.iter_num += 1 self.set_world(world2) if self.store_hashes: self.world_hashes.append(self.get_hash()) def get_seen_occuppied_seats(self, x, y): directions = [ [1,0], [-1,0], [0,1], [0,-1], [1,1], [-1,1], [1,-1], [-1,-1], ] seen = 0 for d in directions: #dseen = 0 dx, dy = d # directions nx, ny = [x, y] # startpoint while(True): # loop handling one direction vector nx, ny = [nx+dx, ny+dy] if nx < 0 or ny < 0 or nx >= self.dim[0] or ny >= self.dim[1]: break if "#" == self.world[ny][nx]: #dseen += 1 seen += 1 break # in each direction, only 1 occupied can bee seen elif "L" == self.world[ny][nx]: break # empty seats block view return seen def find_cell(self, val): """Find first cell containing given value, return it's `[x, y]` coordinates.""" for y in range(self.dim[1]): for x in range(self.dim[0]): if self.world[y][x] == val: return [x, y] tests = """ .......#. ...#..... .#....... ......... ..#L....# ....#.... ......... #........ ...#..... """.strip().split("\n") tests = mapl(list, tests) cworld = CellularWorldDirected(tests) log.info(f"world repr:\n{cworld.repr()}") c = cworld.find_cell('L') n = cworld.get_seen_occuppied_seats(c[0], c[1]) log.info(f" empty spectator cell={c}, neib-#={n}") assert( 8 == n ) tests = """ ............. .L.L.#.#.#.#. ............. """.strip().split("\n") tests = mapl(list, tests) cworld = CellularWorldDirected(tests) c = cworld.find_cell('L') assert( 0 == cworld.get_seen_occuppied_seats(c[0], c[1]) ) tests = """ .##.##. #.#.#.# ##...## ...L... ##...## #.#.#.# .##.##. """.strip().split("\n") tests = mapl(list, tests) cworld = CellularWorldDirected(tests) c = cworld.find_cell('L') assert( 0 == cworld.get_seen_occuppied_seats(c[0], c[1]) ) tests = """ L.LL.LL.LL LLLLLLL.LL L.L.L..L.. LLLL.LL.LL L.LL.LL.LL L.LLLLL.LL ..L.L..... LLLLLLLLLL L.LLLLLL.L L.LLLLL.LL """.strip().split("\n") tests = mapl(list, tests) cworld = CellularWorldDirected(tests) #for i in range(12): # log.info(f"before: 0,0 val={cworld.world[0][0]} seen-occupied-#={cworld.get_seen_occuppied_seats(0,0)}") # cworld.iterate() # log.info(f"after {cworld.iter_num} iters, hash={cworld.get_hash()}: repr:\n{cworld.repr()}") cworld.find_stable() log.info(f"world stable after {cworld.iter_num} iters.") #": repr:\n{cworld.repr()}") seats_occ = cworld.repr().count('#') assert(26 == seats_occ) log.info(f"test stable occupied-seats={seats_occ}") cworld = CellularWorldDirected(ins) cworld.find_stable() log.info(f"world stable after {cworld.iter_num} iters.") #": repr:\n{cworld.repr()}") seats_occ = cworld.repr().count('#') log.info(f"Day 11 b solution: stable occupied-seats={seats_occ} after {cworld.iter_num} iters") ###Output _____no_output_____ ###Markdown Day 12: Rain Risks ###Code directions = ['N', 'W', 'S', 'E'] direct_vecs = {'N': [0, 1], 'W': [-1, 0], 'S': [0, -1], 'E': [1, 0]} def dist_manhattan(pos, pos_ref): return abs(pos[0]-pos_ref[0]) + abs(pos[1]-pos_ref[1]) def move_ship(los): ship_direct = 'E' ship_vec = direct_vecs[ship_direct] pos_ref = [0, 0] pos = pos_ref.copy() for cmd_str in los: cmd, val = [cmd_str[0], int(cmd_str[1:])] log.debug(f"cmd={[cmd, val]}") if cmd in directions: vec = direct_vecs[cmd] pos[0] += val * vec[0] pos[1] += val * vec[1] log.debug(f" new pos: {pos}") elif cmd == 'F': pos[0] += val * ship_vec[0] pos[1] += val * ship_vec[1] log.debug(f" new pos: {pos}; ship_direct={ship_direct}") elif cmd == 'R' or cmd == 'L': turns = val//90 if cmd == 'R': new_direct_idx = directions.index(ship_direct)-turns elif cmd == 'L': new_direct_idx = (directions.index(ship_direct)+turns) % len(directions) log.debug(f"cur_direct={ship_direct}:{directions.index(ship_direct)}, new_direct_idx={new_direct_idx}; cmd={cmd_str}; turns={turns}") ship_direct = directions[new_direct_idx] ship_vec = direct_vecs[ship_direct] log.debug(f" new ship_direct: {ship_direct}; from turn:{cmd}") return dist_manhattan(pos, pos_ref) tests = """ F10 N3 F7 R90 F11 """.strip().split("\n") assert( 25 == move_ship(tests) ) ins = aoc.read_file_to_list('./in/day12.in') res = move_ship(ins) log.info(f"Day 12 a solution: {res}") print("Day 12 b") def move_ship_by_waypoint(los): pos_ref = [0, 0] waypt_pos = [10, 1] pos = pos_ref.copy() for cmd_str in los: cmd, val = [cmd_str[0], int(cmd_str[1:])] log.debug(f"cmd={[cmd, val]}") if cmd in directions: vec = direct_vecs[cmd] dpos = [val * vec[0], val * vec[1]] waypt_pos[0] += dpos[0] waypt_pos[1] += dpos[1] log.debug(f" new waypt-rpos: {waypt_pos}") elif cmd == 'F': dpos = [val * waypt_pos[0], val * waypt_pos[1]] pos[0] += dpos[0] pos[1] += dpos[1] log.debug(f" new pos: {pos}; waypt-rpos={waypt_pos}") elif cmd == 'R' or cmd == 'L': # rotate cartesian coordinates around origin in 90 degrees steps if cmd_str in ['R90', 'L270']: # rotate RIGHT cx, cy = waypt_pos waypt_pos = [cy, -cx] elif cmd_str in ['L90', 'R270']: # rotate LEFT cx, cy = waypt_pos waypt_pos = [-cy, cx] elif cmd_str in ['L180', 'R180']: # invert 180 cx, cy = waypt_pos waypt_pos = [-cx, -cy] elif cmd_str in ['L180', 'R180']: cx, cy = waypt_pos waypt_pos = [-cx, -cy] else: raise Exception(f"unknown cmd_str={cmd_str}") log.debug(f" new waypt-rpos={waypt_pos} from {[cx, cy]}") dist = dist_manhattan(pos, pos_ref) log.info(f"dist={dist}") return dist assert( 286 == move_ship_by_waypoint(tests) ) log.setLevel( logging.INFO ) res = move_ship_by_waypoint(ins) log.info(f"Day 12 b solution: {res}") ###Output _____no_output_____ ###Markdown Day 13: Shuttle search ###Code tests = """ 939 7,13,x,x,59,x,31,19 """.strip().split("\n") def find_shuttle(los): min_wait_tm, min_bus = [99_999_999, -1] start_tm = int(los[0]) shuttles = los[1].split(',') log.info(f"[find_shuttle] {start_tm} {shuttles}") for bus in shuttles: if bus == 'x': continue bus = int(bus) remainder = start_tm % bus if remainder == 0: wait_tm = 0 else: wait_tm = bus - remainder if wait_tm < min_wait_tm: min_wait_tm, min_bus = [wait_tm, bus] log.info(f"new_min: wait_tm={wait_tm}, 4bus={bus}, rmd={remainder}, res={wait_tm * bus}") if wait_tm == 0: break log.debug(f"wait_tm={wait_tm}, 4bus={bus}, rmd={remainder}, res={wait_tm * bus}") res = min_wait_tm * min_bus log.info(f"MIN: wait_tm={min_wait_tm}, 4bus={min_bus}, res={res}") return res find_shuttle(tests) ins = aoc.read_file_to_list('./in/day13.in') find_shuttle(ins) print("Day 13 b") def find_shuttle_offsetted(s): """Semi-optimized brute-force algorithm implementation.""" start_tm = int(time.time()) log.info(f"[find_shuttle_offsetted] {s}") offsets = {} values = {} for idx, val in enumerate(s.split(',')): if val == 'x': continue val = int(val) values[idx] =val # by offset offsets[val] = idx # by value srtvalues = list(reversed(sorted(list(values.values())))) max_iterator = max(srtvalues) max_iterator_offset = offsets[max_iterator] log.info(f"max_it={max_iterator}->ofst={max_iterator_offset}; srtvalues={srtvalues}, offsets={offsets}, values={values}") #values_len = len(srtvalues) iterator2 = srtvalues[1] iterator2_offset = offsets[iterator2] iterator3 = srtvalues[2] iterator3_offset = offsets[iterator3] print_mod_interval = 100_000_000_000 next_print_mod = print_mod_interval for t in map(lambda it: it * max_iterator -max_iterator_offset, range(1, 9_000_000_000_000_000//max_iterator)): if (t + iterator2_offset) % iterator2 != 0 \ or (t + iterator3_offset) % iterator3 != 0: continue # "FAST EXIT" this loop-item if t >= next_print_mod: #idx >= next_print_mod: log.info(f" calculating @{int(time.time())-start_tm:,}s ...: t#={t:,}") next_print_mod += print_mod_interval loop_ok = True for val in srtvalues[3:]: if (t + offsets[val]) % val != 0: loop_ok = False break if loop_ok: log.info(f"loop-OK for t#={t:,} @{int(time.time())-start_tm:,}s") return t raise Exception(f"No matching shuttle found after step t={t}") test = "7,13,x,x,59,x,31,19" assert( 1068781 == find_shuttle_offsetted(test) ) test = "17,x,13,19" assert( 3417 == find_shuttle_offsetted(test) ) test = "67,7,59,61" assert( 754018 == find_shuttle_offsetted(test) ) test = "67,x,7,59,61" assert( 779210 == find_shuttle_offsetted(test) ) test = "67,7,x,59,61" assert( 1261476 == find_shuttle_offsetted(test) ) test = "1789,37,47,1889" assert( 1202161486 == find_shuttle_offsetted(test) ) print(f"known: solution larger than {100000000000000:,} <= 100000000000000") def find_shuttle_offsetted6(s): """Semi-optimized brute-force algorithm implementation.""" start_tm = int(time.time()) log.info(f"[find_shuttle_offsetted] {s}") offsets = {} values = {} for idx, val in enumerate(s.split(',')): if val == 'x': continue val = int(val) values[idx] = val # by offset offsets[val] = idx # by value srtvalues = list(reversed(sorted(list(values.values())))) iterator1 = max(srtvalues) iterator1_offset = offsets[iterator1] log.info(f"max_it={iterator1}->ofst={iterator1_offset}; srtvalues={srtvalues}, offsets={offsets}, values={values}") #values_len = len(srtvalues) iterator2 = srtvalues[1] iterator2_offset = offsets[iterator2] iterator3 = srtvalues[2] iterator3_offset = offsets[iterator3] iterator4 = srtvalues[3] iterator4_offset = offsets[iterator4] iterator5 = srtvalues[4] iterator5_offset = offsets[iterator5] iterator6 = srtvalues[5] iterator6_offset = offsets[iterator6] print_mod_interval = 100_000_000_000 next_print_mod = print_mod_interval for idx in range(1, 9_000_000_000_000_000//iterator1): t = idx * iterator1 - iterator1_offset if (t + iterator2_offset) % iterator2 != 0: continue # "FAST EXIT" this loop-item elif (t + iterator3_offset) % iterator3 != 0: continue # "FAST EXIT" this loop-item elif (t + iterator4_offset) % iterator4 != 0: continue # "FAST EXIT" this loop-item elif (t + iterator5_offset) % iterator5 != 0: continue # "FAST EXIT" this loop-item elif (t + iterator6_offset) % iterator6 != 0: continue # "FAST EXIT" this loop-item else: if t >= next_print_mod: #idx >= next_print_mod: log.info(f" calculating @{int(time.time())-start_tm:,}s ...: t#={t:,}; {t//(int(time.time())-start_tm):,} Ts/s") next_print_mod += print_mod_interval loop_ok = True for val in srtvalues[6:]: if (t + offsets[val]) % val != 0: loop_ok = False break if loop_ok: log.info(f"loop-OK for t#={t:,} @{int(time.time())-start_tm:,}s") return t raise Exception(f"No matching shuttle found after step t={t}") in13b = ins[1] #EXEC_RESOURCE_HOGS = True if EXEC_RESOURCE_HOGS: res = find_shuttle_offsetted6(in13b) print(f"Day 13 b solution={res}") # 2,448,348,017 Ts/s # 3,163,888,049 Ts/s explicit t calc else: print("Omitting day 13 b resource expensive solution") # Inspiration base: [- 2020 Day 13 Solutions - : adventofcode](https://www.reddit.com/r/adventofcode/comments/kc4njx/2020_day_13_solutions/) # One solution: [adventofcode2020/main.py at master · r0f1/adventofcode2020](https://github.com/r0f1/adventofcode2020/blob/master/day13/main.py) # Math Explanation: [Chinese Remainder Theorem | Brilliant Math & Science Wiki](https://brilliant.org/wiki/chinese-remainder-theorem/) # a wonderful walk-through: [aoc/README.md at master · mebeim/aoc](https://github.com/mebeim/aoc/blob/master/2020/README.md#day-13---shuttle-search) import numpy as np #from math import prod # python 3.8 ? def egcd(a, b): if a == 0: return (b, 0, 1) else: g, y, x = egcd(b % a, a) return (g, x - (b // a) * y, y) #def modinv(a, m): # g, x, y = egcd(a, m) # if g != 1: # raise Exception('modular inverse does not exist') # else: # return x % m def modinv(x, m): g, inv, y = egcd(x, m) assert g == 1, 'modular inverse does not exist' return inv % m def pow38(g,w,p): #log.info(f"pow38({g},{w},{p}) called") if w >= 0: return pow(g, w ,p) else: return modinv(g, p) #, -w, p with open('./in/day13.in') as f: lines = [x.strip() for x in f] arrival = int(lines[0]) buses = [(i, int(e)) for i, e in enumerate(lines[1].split(",")) if e.isdigit()] times = [t for _, t in buses] b = [e - (arrival % e) for e in times] res = np.min(b) * times[np.argmin(b)] print("Day 13 a solution:", res) # Python-3.7 ERROR: pow() 2nd argument cannot be negative when 3rd argument specified def crt(ns, bs): """Solve: Chinese Remainder "problem" using Chinese Remainder Theorem.""" # Chinese Remainder Theorem # https://brilliant.org/wiki/chinese-remainder-theorem/ #N = prod(ns) N = np.prod(ns).item() #x = sum(b * (N // n) * pow(N // n, -1, n) for b, n in zip(bs, ns)) x = sum(b * (N // n) * pow38(N // n, -1, n) for b, n in zip(bs, ns)) return x % N offsets = [time-idx for idx, time in buses] res = crt(times, offsets) print(f"Day 13 b solution: {res:,} <-- {res}") # cool solution from user Rtchaik; this is my preferred!: # at: [- 2020 Day 13 Solutions - : adventofcode](https://www.reddit.com/r/adventofcode/comments/kc4njx/2020_day_13_solutions/) from itertools import count def solve_day13_part2(buses): log.info(f"[solve_day13_part2] {buses}") start_idx, steps = 0, 1 log.info(f" initial startid={start_idx}, steps-delta={steps}") for bus, offset in sorted(buses.items(), reverse=True): for tstamp in count(start_idx, steps): if not (tstamp + offset) % bus: start_idx = tstamp steps *= bus log.info(f" new startid={start_idx}, steps-delta={steps}, tstamp={tstamp}") break log.info(f"found-OK: {tstamp}") return tstamp def prepare_buses(s): buses = {} for idx, val in enumerate(s.split(',')): if val == 'x': continue val = int(val) buses[val] = idx return buses test = "1789,37,47,1889" assert( 1202161486 == solve_day13_part2(prepare_buses(test)) ) #ins = aoc.read_file_to_list('./in/day13.in') res = solve_day13_part2(prepare_buses(ins[1])) log.info(f"Day 13 b solution: {res:,} <-- {res}") ###Output _____no_output_____ ###Markdown Day 14: Docking Data ###Code def solve_day14_a(los): log.info(f"[solve_day14_a] #-instructions={len(los)}") addrs = {} for line in los: if line.startswith('mask'): mask = line.split(' ')[-1] mask_or = mask.replace('0','X').replace('X','0') mask_and = mask.replace('1','X').replace('X','1') num_or = int(mask_or, 2) num_and = int(mask_and, 2) log.debug(f"mask={mask}") log.trace(f" mask_or ={mask_or }; num_or ={num_or}") log.trace(f" mask_and={mask_and}; num_and={num_and}") else: addr, val = mapl(int, filterl(lambda it: it != '', re.split(r'[^\d]', line))) new_val = (val | num_or) & num_and addrs[addr] = new_val log.debug(f"instruct={[addr, val]} new_val={new_val}") res = sum(addrs.values()) log.info(f"[solve_day14_a] value-sum={res} from num-addrs={len(addrs.keys())} addrs[#1-#3]={list(addrs.items())[0:3]}") return res tests = """ mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X mem[8] = 11 mem[7] = 101 mem[8] = 0 """.strip().split("\n") #log.setLevel(logging.DEBUG) solve_day14_a(tests) log.setLevel(logging.INFO) ins = aoc.read_file_to_list('./in/day14.in') solve_day14_a(ins) print("Day 14 b") import itertools # this function by reddit User semicolonator # @ [adventofcode2020/main.py at master · r0f1/adventofcode2020](https://github.com/r0f1/adventofcode2020/blob/master/day14/main.py) def get_possible_addrs(mask, addr): mask2 = "".join(v if m == "0" else m for m, v in zip(mask, f"{addr:036b}")) res = [] for t in itertools.product("01", repeat=mask2.count("X")): it = iter(t) res.append(int("".join(next(it) if c == "X" else c for c in mask2), 2)) return res def solve_day14_b(los): log.info(f"[solve_day14_b] #-instructions={len(los)}") addrs = {} for line in los: if line.startswith('mask'): mask = line.split(' ')[-1] mask_float = mask.replace('1','0') mask_or = mask.replace('X','0') #mask.replace('0','X').replace('X','0') num_or = int(mask_or, 2) log.debug(f"mask={mask}") log.trace(f" mask_float={mask_float}") log.trace(f" mask_or ={mask_or }; num_or ={num_or}") else: new_addrs = {} addr, val = mapl(int, filterl(lambda it: it != '', re.split(r'[^\d]', line))) #new_val = (val | num_or) & num_and # NOP?: If the bitmask bit is 0, the corresponding memory address bit is unchanged. # OR!: If the bitmask bit is 1, the corresponding memory address bit is overwritten with 1. new_addr = addr | num_or log.trace(f" addr={ addr:>8b} ; := { addr}") #log.trace(f" num_or={num_or:>8b} ; := {num_or}") ##log.trace(f" addr-ORd={new_addr:>8b}") log.trace(f" new-addr={new_addr:>8b} ; := {new_addr}") for addr2 in get_possible_addrs(mask, addr): addrs[addr2] = val res = sum(addrs.values()) log.info(f"[solve_day14_b] value-sum={res} from addrs-#={len(addrs.keys())} addrs[#1-#3]={list(addrs.items())[0:3]}") log.trace(f" {addrs}") return res tests = """ mask = 000000000000000000000000000000X1001X mem[42] = 100 mask = 00000000000000000000000000000000X0XX mem[26] = 1 """.strip().split("\n") #log.setLevel(aoc.LOGLEVEL_TRACE) # logging.DEBUG log.setLevel(logging.INFO) solve_day14_b(tests) solve_day14_b(ins) ###Output _____no_output_____ ###Markdown Day 15: Rambunctious Recitation ###Code def solve15a(l, steps=10): log.debug(f"[solve15a(l)] called with l={l}") seen = {} last_spoken = None for idx, n in enumerate(l): last_spoken = n if n in seen: seen[n].append(idx+1) else: seen[n] = [idx+1] log.debug(f"idx#{idx+1}, n={n}, * seen[n]={seen[n]}") #log.trace(f" seen={seen}") for idx in range(idx+2, steps+len(l)-idx): #log.debug(f"idx#{idx}, last_spoken={last_spoken}, seen-len={len(seen)}") #log.trace(f" seen={seen}") if len(seen[last_spoken])==1: n = 0 else: n = seen[last_spoken][-1] - seen[last_spoken][-2] if n in seen: seen[n].append(idx) else: seen[n] = [idx] log.trace(f" new n={n}; seen={seen}") log.debug(f"idx#{idx}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}") last_spoken = n log.info(f"[solve15a] idx#{idx}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}") return n tests = "0,3,6" #log.setLevel(aoc.LOGLEVEL_TRACE) #log.setLevel(logging.DEBUG) log.setLevel(logging.INFO) res = solve15a(mapl(int, tests.split(',')), steps=10) # 0*, 3*, 6*, 0, 3, 3, 1, 0, 4, 0 log.info(f"testing result={0}") res = solve15a([1, 3, 2], steps=2020) assert( 1 == res ) res = solve15a([2, 1, 3], steps=2020) assert( 10 == res ) res = solve15a([1, 2, 3], steps=2020) assert( 27 == res ) res = solve15a([2, 3, 1], steps=2020) assert( 78 == res ) res = solve15a([3, 2, 1], steps=2020) assert( 438 == res ) res = solve15a([3, 1, 2], steps=2020) assert( 1836 == res ) ins = aoc.read_file_to_str('./in/day15.in').strip().split(',') ins = mapl(int, ins) res = solve15a(ins, steps=2020) #log.setLevel(logging.DEBUG) log.info(f"Day 15 a solution: {res} from {ins}") def solve15b(l, steps=10): log.info(f"[solve15b(l)] called with list-len={len(l)}, steps={steps:,}") seen = {} last_spoken = None for idx, n in enumerate(l): last_spoken = n if n in seen: #seen[n].append(idx+1) seen[n] = [seen[n][-1], idx+1] else: seen[n] = [idx+1] #log.debug(f"idx#{idx+1}, n={n}, * seen[n]={seen[n]}") seen_lens = {} for n in seen: seen_lens[n] = len(seen[n]) for idx in range(idx+2, steps+len(l)-idx): if idx % 10_000_000 == 0 and idx < steps: log.info(f" calculating, @ idx={idx:,}") if seen_lens[last_spoken] == 1: #len(seen[last_spoken]) == 1: n = 0 else: n = seen[last_spoken][-1] - seen[last_spoken][-2] if n in seen: #seen[n].append(idx) seen[n] = [seen[n][-1], idx] seen_lens[n] = 2 else: seen[n] = [idx] seen_lens[n] = 1 #log.debug(f"idx#{idx}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}") last_spoken = n log.info(f"[solve15b] idx#{idx:,}, n={n}, last_spoken={last_spoken}, seen-len={len(seen)}") return n # Part a soltions still valid ! res = solve15b([1, 3, 2], steps=2020) assert( 1 == res ) res = solve15b([2, 1, 3], steps=2020) assert( 10 == res ) res = solve15b([1, 2, 3], steps=2020) assert( 27 == res ) res = solve15b([2, 3, 1], steps=2020) assert( 78 == res ) res = solve15b([3, 2, 1], steps=2020) assert( 438 == res ) res = solve15b([3, 1, 2], steps=2020) assert( 1836 == res ) #nsteps = 30000000 nsteps = 30_000_000 def run15b(l, steps, cond): if cond is not None and not EXEC_RESOURCE_HOGS: # omit resource intensive tests return start_tm = int(time.time()) res = solve15b(l, steps=nsteps) if cond is not None: assert( cond == res ) took_tm = int(time.time()) - start_tm log.info(f"result={res} took {took_tm}s") # Given 0,3,6, the 30000000th number spoken is 175594. run15b([0, 3, 6], nsteps, 175594) # Given 1,3,2, the 30000000th number spoken is 2578. run15b([1, 3, 2], nsteps, 2578) # Given 2,1,3, the 30000000th number spoken is 3544142. run15b([2, 1, 3], nsteps, 3544142) # Given 1,2,3, the 30000000th number spoken is 261214. run15b([1, 2, 3], nsteps, 261214) # Given 2,3,1, the 30000000th number spoken is 6895259. run15b([2, 3, 1], nsteps, 6895259) # Given 3,2,1, the 30000000th number spoken is 18. run15b([3, 2, 1], nsteps, 18) # Given 3,1,2, the 30000000th number spoken is 362. run15b([3, 1, 2], nsteps, 362) if EXEC_RESOURCE_HOGS: log.info("Day 15 b solution:") run15b(ins, nsteps, None) else: log.info("Day 15 b solution: [[already solved]] - omitting") ### Day 16: Ticket Translation tests = """ class: 1-3 or 5-7 row: 6-11 or 33-44 seat: 13-40 or 45-50 your ticket: 7,1,14 nearby tickets: 7,3,47 40,4,50 55,2,20 38,6,12 """.strip() def parse_day16_input(s): los = s.split("\n") md = 'fields' myticket = [] other_tickets = [] fields = {} for line in los: if line == '': continue if line == 'your ticket:': md = 'my_ticket' continue elif line == 'nearby tickets:': md = 'other_tickets' continue if md == 'fields': fld, vals = line.split(':') avals = mapl(lambda it: it.strip() , vals.split(' or ')) for idx, aval in enumerate(avals): aval = mapl(int, aval.split('-')) avals[idx] = aval fields[fld] = avals elif md == 'my_ticket' or md == 'other_tickets': this_ticket = mapl(int, line.split(',')) if md == 'my_ticket': my_ticket = this_ticket else: other_tickets.append(this_ticket) return {'fields':fields, 'my_ticket':my_ticket, 'other_tickets':other_tickets} def solve16a(ticket_info): #log.info(f"ticket_info={ticket_info}") valid_nums = [] for field in ticket_info['fields'].keys(): for entry in ticket_info['fields'][field]: min, max = entry for n in range(min, max+1): valid_nums.append(n) valid_nums = sorted(set(valid_nums)) #log.info(f"valid_nums={valid_nums}") invalid_nums = [] for this_ticket in ticket_info['other_tickets']: for n in this_ticket: if not n in valid_nums: invalid_nums.append(n) ticket_error_rate = sum(invalid_nums) log.info(f"ticket_error_rate={ticket_error_rate} invalid_nums={invalid_nums}") return ticket_error_rate ticket_info = parse_day16_input(tests) solve16a(ticket_info) ins = aoc.read_file_to_str('./in/day16.in') ticket_info = parse_day16_input(ins) solve16a(ticket_info) print("Day 16 b") tests2 = """ class: 0-1 or 4-19 row: 0-5 or 8-19 seat: 0-13 or 16-19 your ticket: 11,12,13 nearby tickets: 3,9,18 15,1,5 5,14,9 """.strip() def solve16b(ticket_info): #log.info(f"ticket_info={ticket_info}") fields = ticket_info['fields'] my_ticket = ticket_info['my_ticket'] other_tickets = ticket_info['other_tickets'] all_tickets = other_tickets.copy() all_tickets.append(my_ticket) log.info(f"[solve16b] start all_tickets_len={len(all_tickets)}") all_valid_nums = [] valid_nums = {} for field in fields.keys(): valid_nums[field] = [] for entry in fields[field]: min, max = entry for n in range(min, max+1): valid_nums[field].append(n) all_valid_nums.append(n) for field in valid_nums.keys(): valid_nums[field] = sorted(set(valid_nums[field])) all_valid_nums = sorted(set(all_valid_nums)) log.trace(f"valid_nums={valid_nums}") invalid_tickets = [] for this_ticket in all_tickets: for n in this_ticket: if not n in all_valid_nums: invalid_tickets.append(this_ticket) break for this_ticket in invalid_tickets: log.debug(f"removing invalid ticket {this_ticket}") other_tickets.remove(this_ticket) all_tickets.remove(this_ticket) log.info(f"[solve16b] weedd all_tickets_len={len(all_tickets)}") num_fields = len(ticket_info['fields']) log.info(f"[solve16b] num_fields={num_fields}") assert( len(my_ticket) == num_fields) idx_maybe_field = {} for idx in range(num_fields): idx_maybe_field[idx] = [] ticket_nums_at_idx = mapl(lambda it: it[idx], all_tickets) for field in fields: if set(ticket_nums_at_idx).issubset(set(valid_nums[field])): log.debug(f"idx={idx} field={field} OK for values={ticket_nums_at_idx}") idx_maybe_field[idx].append(field) idx_map = {} for i in range(1, 1001): lens = mapl(lambda it: len(it[1]), idx_maybe_field.items()) # index-order is implcit log.trace(lens) found_this_loop = [] for idx, l in enumerate(lens): if l == 0: continue #if not idx in idx_maybe_field.keys(): # already found # continue if l == 1: fieldnm = idx_maybe_field[idx][0] found_this_loop.append(fieldnm) idx_map[fieldnm] = idx idx_maybe_field[idx] = [] log.debug(f"loop {i} idx_map={idx_map}") for f in found_this_loop: for k in idx_maybe_field.keys(): if f in idx_maybe_field[k]: idx_maybe_field[k].remove(f) if len(idx_map.keys()) >= num_fields: break if i >= 1000: raise Exception("FAILSAFE") return idx_map ticket_info = parse_day16_input(tests) solve16b(ticket_info) ticket_info = parse_day16_input(tests2) solve16b(ticket_info) ticket_info = parse_day16_input(ins) idx_map = solve16b(ticket_info) my_ticket = ticket_info['my_ticket'] f = 1 for k,v in idx_map.items(): if k.startswith('departure'): log.info(f"field-idx={[k, v]} myticket-val={my_ticket[v]}") f *= my_ticket[v] log.info(f"Day 16 b solution: {f}") # not 930240 ###Output _____no_output_____ ###Markdown Day 17: Conway Cubes ###Code tests = """ .#. ..# ### """.strip() tests = mapl(list, tests.split("\n")) log.info(tests) # solution TODO class Grid3d: """Grid of 3d-cells, discrete 3d space, each cell represents a cube.""" def __init__(self): log.debug("[Grid3d] constructor.") def initialize(self, pattern): self.pattern0 = pattern self.points = [] z = 0 for y in range(len(pattern)): for x in range(len(pattern[0])): if pattern[y][x] == '#': self.points.append( (x, y, z) ) def report(self): return f"#pts={len(self.points)} {self.points}" def get_layer(self, z): return filterl(lambda it: z == it[2], self.points) def get_zrange(self): zs = mapl(lambda it: it[2], self.points) return range(min(zs), max(zs)-min(zs)+1) def get_layer_repr(self, z): xs = mapl(lambda it: it[0], self.points) ys = mapl(lambda it: it[1], self.points) extent2d = [[min(xs), max(xs)], [min(ys), max(ys)]] dim_x, dim_y = [ max(xs) - min(xs) + 1, max(ys) - min(ys) + 1 ] x_ofst = -min(xs) y_ofst = -min(ys) rows = [] for y in range(0, max(ys)+y_ofst+1): s = '' for x in range(0, max(xs)+x_ofst+1): if (x-x_ofst, y-y_ofst, z) in self.points: s += '#' else: s += '.' rows.append(s) return f"grid-lvl@z={z} dims={[dim_x, dim_y]} extents={self.get_extents()} x-ofst={-x_ofst} y-ofst={-y_ofst}\n" +str.join("\n", rows) def get_num_neighbors(self, pt): xp, yp, zp = pt num_neighbors = 0 for z in range(zp-1, zp+2): for y in range(yp-1, yp+2): for x in range(xp-1, xp+2): if (x, y, z) == pt: # identity, given point itself continue if (x, y, z) in self.points: num_neighbors += 1 return num_neighbors def get_extents(self): xs = mapl(lambda it: it[0], self.points) ys = mapl(lambda it: it[1], self.points) zs = mapl(lambda it: it[2], self.points) return [[min(xs), max(xs)], [min(ys), max(ys)], [min(zs), max(zs)]] class ConwayCubeGrid(Grid3d): """Conway cellular automaton in 3d, inheriting from class Grid3d.""" def __init__(self): log.debug("[ConwayCubeGrid] constructor.") self.t = 0 def iterate(self, steps=1): for i in range(steps): exts = self.get_extents() new_pts = copy.deepcopy(self.points) for x in range(exts[0][0]-1, exts[0][1]+2): for y in range(exts[1][0]-1, exts[1][1]+2): #if x == 0: # log.trace(f"iter-row {y}") for z in range(exts[2][0]-1, exts[2][1]+2): pt = (x, y, z) is_active = pt in self.points #if is_active: # log.info(f"iterate: pt={pt} was active") nn = self.get_num_neighbors(pt) if is_active: if not (nn in [2, 3]): #log.trace(f"iter-remove {pt}") new_pts.remove( pt ) else: if nn == 3: #log.trace(f"iter-append {pt}") new_pts.append( pt ) self.points = new_pts self.t += 1 grid = Grid3d() log.info(f"grid={grid}") grid.initialize(tests) log.info(f"grid rpt:\n{grid.report()}") assert 1 == grid.get_num_neighbors( (0,0,0) ) assert 2 == grid.get_num_neighbors( (2,0,0) ) assert 5 == grid.get_num_neighbors( (1,1,0) ) assert 0 == grid.get_num_neighbors( (-2,-1,0) ) grid.get_extents() grid = ConwayCubeGrid() log.info(f"grid={grid}") grid.initialize(tests) #log.info(f"grid rpt:\n{grid.report()}") assert 1 == grid.get_num_neighbors( (0,0,0) ) assert 2 == grid.get_num_neighbors( (2,0,0) ) assert 5 == grid.get_num_neighbors( (1,1,0) ) assert 0 == grid.get_num_neighbors( (-2,-1,0) ) grid.get_extents() log.info(f"grid @ t={grid.t} extents={grid.get_extents()} numpts={len(grid.points)}") log.info(grid.get_layer_repr(0)) #res = grid.get_layer(0) for i in range(1, 7): grid.iterate() log.info(f"Iterated: grid @ t={grid.t} extents={grid.get_extents()} numpts={len(grid.points)}") for z in grid.get_zrange(): ##log.info(f"grid @ t={grid.t} pts@z=0 {res}") #log.info(grid.get_layer_repr(z)) True grid = ConwayCubeGrid() grid.initialize(tests) grid.iterate(steps=6) assert( 6 == grid.t ) assert( 112 == len(grid.points) ) ins = aoc.read_file_to_str('in/day17.in').strip() log.info(f"pattern=\n{ins}") ins = mapl(list, ins.split("\n")) grid = ConwayCubeGrid() grid.initialize(ins) grid.iterate(steps=6) assert( 6 == grid.t ) res = len(grid.points) log.info(f"Day 18 a solution: num points after 6 iterations: {res}") class Grid4d: """Grid of 4d-cells, each cell represents a 4d-cube, a hypercube, a tesseract.""" def __init__(self): log.debug("[Grid4d] constructor.") def initialize(self, pattern): self.pattern0 = pattern self.points = [] z, w = 0, 0 for y in range(len(pattern)): for x in range(len(pattern[0])): if pattern[y][x] == '#': self.points.append( (x, y, z, w) ) def report(self): return f"#pts={len(self.points)} {self.points}" def get_layer(self, z, w): return filterl(lambda it: z == it[2] and w == it[3], self.points) def get_zrange(self): zs = mapl(lambda it: it[2], self.points) return range(min(zs), max(zs)+1) def get_wrange(self): ws = mapl(lambda it: it[3], self.points) return range(min(ws), max(ws)+1) def get_layer_repr(self, z, w): xs = mapl(lambda it: it[0], self.points) ys = mapl(lambda it: it[1], self.points) extent2d = [[min(xs), max(xs)], [min(ys), max(ys)]] dim_x, dim_y = [ max(xs) - min(xs) + 1, max(ys) - min(ys) + 1 ] x_ofst = -min(xs) y_ofst = -min(ys) rows = [] for y in range(0, max(ys)+y_ofst+1): s = '' for x in range(0, max(xs)+x_ofst+1): if (x-x_ofst, y-y_ofst, z, w) in self.points: s += '#' else: s += '.' rows.append(s) return f"grid-lvl@[z,w]={[z,w]} dims={[dim_x, dim_y]} extents={self.get_extents()}" \ + f"x-ofst={-x_ofst} y-ofst={-y_ofst}\n" +str.join("\n", rows) def get_num_neighbors(self, pt): xp, yp, zp, wp = pt num_neighbors = 0 for w in range(wp-1, wp+2): for z in range(zp-1, zp+2): for y in range(yp-1, yp+2): for x in range(xp-1, xp+2): if (x, y, z, w) == pt: # identity, given point itself continue if (x, y, z, w) in self.points: num_neighbors += 1 return num_neighbors def get_extents(self): xs = mapl(lambda it: it[0], self.points) ys = mapl(lambda it: it[1], self.points) zs = mapl(lambda it: it[2], self.points) ws = mapl(lambda it: it[3], self.points) return [[min(xs), max(xs)], [min(ys), max(ys)], [min(zs), max(zs)], [min(ws), max(ws)]] class ConwayTesseractGrid(Grid4d): """Conway cellular automaton in 4d, inheriting from class Grid4d.""" def __init__(self): log.debug("[ConwayTesseractGrid] constructor.") self.t = 0 def iterate(self, steps=1): for i in range(steps): exts = self.get_extents() new_pts = copy.deepcopy(self.points) for x in range(exts[0][0]-1, exts[0][1]+2): for y in range(exts[1][0]-1, exts[1][1]+2): #if x == 0: # log.trace(f"iter-row {y}") for w in range(exts[3][0]-1, exts[3][1]+2): for z in range(exts[2][0]-1, exts[2][1]+2): pt = (x, y, z, w) is_active = pt in self.points #if is_active: # log.info(f"iterate: pt={pt} was active") nn = self.get_num_neighbors(pt) if is_active: if not (nn in [2, 3]): #log.trace(f"iter-remove {pt}") new_pts.remove( pt ) else: if nn == 3: #log.trace(f"iter-append {pt}") new_pts.append( pt ) self.points = new_pts self.t += 1 grid = ConwayTesseractGrid() log.info(f"grid={grid}") grid.initialize(tests) #log.info(f"grid rpt:\n{grid.report()}") assert 1 == grid.get_num_neighbors( (0,0,0,0) ) assert 2 == grid.get_num_neighbors( (2,0,0,0) ) assert 5 == grid.get_num_neighbors( (1,1,0,0) ) assert 0 == grid.get_num_neighbors( (-2,-1,0,0) ) grid.get_extents() log.info(f"grid @ t={grid.t} extents={grid.get_extents()} numpts={len(grid.points)}") log.info(grid.get_layer_repr(0, 0)) #res = grid.get_layer(0) grid.iterate() log.info(grid.get_layer_repr(-1, -1)) log.info(grid.get_layer_repr(0, 0)) log.info(grid.get_layer_repr(1, 1)) grid.iterate() log.info(grid.get_layer_repr(-2, -2)) log.info(grid.get_layer_repr(0, 0)) log.info(grid.get_layer_repr(2, 0)) grid.iterate(steps=4) assert( 6 == grid.t ) assert( 848 == len(grid.points) ) if EXEC_RESOURCE_HOGS: # took 226 seconds on my notebook grid = ConwayTesseractGrid() grid.initialize(ins) start_tm = int(time.time()) for i in range(1, 7): grid.iterate(steps=1) npts = len(grid.points) took_tm = int(time.time()) - start_tm log.info(f"after grid iteration {i}: num-points={npts:,} after {took_tm}s") assert( 6 == grid.t ) res = len(grid.points) log.info(f"Day 18 b solution: num points after 6 iterations: {res}") ###Output _____no_output_____ ###Markdown Day 18: : Operation Order ###Code # This definitely is/would be LISP territory ! def parse_equation18a(s): """Parse / tokenize a single "equation".""" l = re.split(r'(?=[\+\-\*\/\(\)])|(?<=[\+\-\*\/\(\)])', s) l = filterl(lambda it: it != '', mapl(lambda it: it.strip(), l)) l = mapl(lambda it: int(it) if not (it in ['+','-','*','/','(',')']) else it, l) log.debug(f"[parse_equation18a] returns={l}") return l def rindex_list(elem, l): """Return the index of the rightmost element in list.""" return len(l) - list(reversed(l)).index(elem) - 1 def find_matching_close_paren_idx(lst): """Assumes input list starting with '(', finds matching ')' and returns it's index. If not found, returns -1.""" tgtcount = 0 tgtidx = -1 for idx in range(len(lst)): if lst[idx] == ')': tgtcount -= 1 elif lst[idx] == '(': tgtcount += 1 if tgtcount < 1: tgtidx = idx break return tgtidx def calc18a(l): log.debug(f"[calc18a] l={l}") rest = l ict = 0 while( len(rest)>1 ): ict += 1 lval, rest = [rest[0], rest[1:]] log.trace(f" in [lval, rest]={[lval, rest]} rest-len={len(rest)}") if lval == '(': rest = [lval] + rest # re-assemble ridx = find_matching_close_paren_idx(rest) sublst = rest[1:ridx] # last/rightmost index of closing parens new_rest = rest[ridx+1:] log.trace(f"calcparen lval={lval} sublst={sublst} new-rest={new_rest} from={rest}") lval = calc18a(sublst.copy()) rest = [lval] + new_rest else: op, rest = [rest[0], rest[1:]] rval = rest[0] log.trace(f" op-mode {[op, rest]} lval={lval} op={op} rval={rval} all-rest={rest}") if rval == '(': idx = find_matching_close_paren_idx(rest) sublst = rest[1:idx] new_rest = rest[idx+1:] log.trace(f"calcparen (lval={lval}) rval sublst={sublst} new-rest={new_rest} from {rest}") rval = calc18a(sublst.copy()) rest = [op] + new_rest log.trace(f" calcparen rval={rval} sublst={sublst} new-rest={new_rest} from {rest}") if op == '+': lval += rval rest = [lval] + rest[1:] elif op == '*': lval *= rval rest = [lval] + rest[1:] else: raise Exception(f"unhandled operator {op}") log.trace(f" loop-end: lval={lval}; new-list={rest}") if len(rest)>1 and rest[1] == ')': # found result of parns in val log.debug(" next is ')' group closing, break") break log.debug(f" returning val={lval}; from={l}") return lval #log.setLevel(aoc.LOGLEVEL_TRACE) #log.setLevel(logging.INFO) test = """ 1 + 2 * 3 + 4 * 5 + 6 """.strip() testlst = parse_equation18a(test) res = calc18a(testlst) print("test result", res) test = """ 1 + (2 * 3) + (4 * (5 + 6)) """.strip() assert( 51 == calc18a(parse_equation18a(test))) test = """ 2 * 3 + (4 * 5) """.strip() res = calc18a(parse_equation18a(test)) assert( 26 == calc18a(parse_equation18a(test))) test = """ 5 + (8 * 3 + 9 + 3 * 4 * 3) """.strip() expectd = 437 assert( expectd == calc18a(parse_equation18a(test))) test = """ 5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) """.strip() expectd = 12240 assert( expectd == calc18a(parse_equation18a(test))) test = """ (1 + 2) """.strip() expectd = 3 assert( expectd == calc18a(parse_equation18a(test))) test = """ ((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 """.strip() expectd = 13632 assert( expectd == calc18a(parse_equation18a(test))) ins = aoc.read_file_to_list('./in/day18.in') csum = 0 for eqstr in ins: csum += calc18a(parse_equation18a(eqstr)) log.info(f"Day 18 a solution: equations cumsum={csum}") print("Day 18 b") def calc18b(l): log.debug(f"[calc18b] l={l}") rest = l ict = 0 while( len(rest)>1 ): ict += 1 lval, rest = [rest[0], rest[1:]] log.trace(f" >in [lval, rest]={[lval, rest]} rest-len={len(rest)}") if lval == '(': rest = [lval] + rest # re-assemble ridx = find_matching_close_paren_idx(rest) sublst = rest[1:ridx] # last/rightmost index of closing parens new_rest = rest[ridx+1:] log.trace(f"calcparen lval={lval} sublst={sublst} new-rest={new_rest} from={rest}") lval = calc18b(sublst.copy()) rest = [lval] + new_rest log.trace(f" cprv new-rest={rest}") else: op, rest = [rest[0], rest[1:]] rval = rest[0] log.trace(f" op-mode {[op, rest]} lval={lval} op={op} rval={rval} all-rest={rest}") if rval == '(': idx = find_matching_close_paren_idx(rest) sublst = rest[1:idx] new_rest = rest[idx+1:] log.trace(f"calcparen (lval={lval}) rval sublst={sublst} new-rest={new_rest} from {rest}") rval = calc18b(sublst.copy()) rest = [rval] + new_rest log.trace(f" calcparen rval={rval} sublst={sublst} new-rest={new_rest} from {rest}") if op == '+': lval += rval rest = [lval] + rest[1:] log.debug(f" (+)=> rval={rval}, lval={lval}, new rest={rest}") elif op == '*': # postpone multiplication ! Rather, recurse fun-call for r-value log.debug(f" PROD in [lval, op, rest]={[lval, op, rest]} rest-len={len(rest)}") if len(rest) > 1: rval = calc18b(rest.copy()) lval *= rval rest = [] log.debug(f" (*)=> rval={rval}, lval={lval}, new rest={rest}") else: raise Exception(f"unhandled operator {op}") log.trace(f" loop-end: lval={lval}; new-list={rest}") if len(rest)>1 and rest[1] == ')': # found result of parens in val log.debug(" next is ')' group closing, break") break log.debug(f"[calc18b] RC={lval} from {l}") return lval test = """ 1 + 2 * 3 + 4 * 5 + 6 """.strip() testlst = parse_equation18a(test) res = calc18b(testlst) print("test result", res) test = """ 1 + (2 * 3) + (4 * (5 + 6)) """.strip() expectd = 51 res = calc18b(parse_equation18a(test)) assert( expectd == res) log.info(f"test result={res}") test = """ 2 * 3 + (4 * 5) """.strip() expectd = 46 res = calc18b(parse_equation18a(test)) assert( expectd == res) log.info(f"test result={res}") test = """ 5 + (8 * 3 + 9 + 3 * 4 * 3) """.strip() expectd = 1445 res = calc18b(parse_equation18a(test)) assert( expectd == res) log.info(f"test result={res}") test = """ 5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4)) """.strip() expectd = 669060 res = calc18b(parse_equation18a(test)) assert( expectd == res) log.info(f"test result={res}") test = """ ((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2 """.strip() expectd = 23340 res = calc18b(parse_equation18a(test)) assert( expectd == res) log.info(f"test result={res}") ins = aoc.read_file_to_list('./in/day18.in') csum = 0 for eqstr in ins: csum += calc18b(parse_equation18a(eqstr)) log.info(f"Day 18 b solution: equations cumsum={csum}") ###Output _____no_output_____ ###Markdown Day 19: Monster MessagesThe most simple/elegant would be to create a grammar for this problem and parse the rules (lexx/yacc) etc.But as a shortcut today I did a fallback on only using/constructing text regular expressions. ###Code def parse_day19_rules(s): rules = s.split("\n") rules = mapl(lambda it: it.split(': '), rules) return rules def parse_day19(s): rules, samples = s.strip().split("\n\n") rules = parse_day19_rules(rules) samples = samples.split("\n") log.debug(f"parsed:\n rules=\n{rules}\n samples=\n{samples}") return rules, samples def solve_day19(rules, max_depth=30, part=1): log.debug(f"[solve19b] started") pd = {} rules_keys = [] for rule in rules: rule_num, rule_expr = rule rules_keys.append(rule_num) if rule_expr.startswith('"') and rule_expr.endswith(''): log.debug(f" added key={rule_num} rule={rule_expr}") pd[rule_num] = rule_expr.replace('"', '') missing_rules_keys = rules_keys.copy() for k in pd.keys(): missing_rules_keys.remove(k) for i in range(1, max_depth+2): log.debug(f" loop#={i}") found_new_key = False for rule in rules: rule_num, rule_expr = rule if part == 2: # apply part 2 conditions: if rule_num == '8': rule_expr = '42 | 42 8' elif rule_num == '11': rule_expr = '42 31 | 42 11 31' if not rule_num in pd.keys(): ree = rule_expr.split(' ') rules_contained = set(filterl(lambda it: it != '|', ree)) log.trace(f"unparsed rule {rule}, rules_contained={rules_contained}") if set(rules_contained).issubset(pd.keys()): log.trace(f"can add {ree}") r = str.join('', mapl(lambda it: pd[it] if it in pd.keys() else it, ree)) pd[rule_num] = '(' + r + ')' found_new_key = True missing_rules_keys.remove(rule_num) log.debug(f" added key={rule_num} rule={r}") else: log.trace(f"can't add {ree}") if not found_new_key: if not '0' in pd.keys(): log.debug(f"rule0 not found after {i} loops, rules-found={sorted(pd.keys())}") log.debug(f" rules_missing={sorted(missing_rules_keys)}") if part == 2: log.debug(f" rules[42]={pd['42']}") log.debug(f" rules[31]={pd['31']}") # THIS is the re secret sauce expressing ca. conditions: # > rule_expr = '42 | 42 8' :: 1..n of pattern 42 pd['8'] = f"({pd['42']})+" # > rule_expr = '42 31 | 42 11 31' :: 1..n of pattern 42 followd by 31 #pd['11'] = f"({pd['42']})+({pd['31']})+" # the first and second + repeat count have to be same ors = [] for i in range(1, 6): pl = pd['42'] pr = pd['31'] ors.append(pl*i+pr*i) pd['11'] = f"({str.join('|', ors)})" # > 8 11 pd['0'] = f"{pd['8']}{pd['11']}" log.debug(f" rules[8]={pd['8']}") log.debug(f" len(rules[11])={len(pd['11'])}") log.debug(f" len(rules[0])={len(pd['0'])}") break log.debug(f"[solve19b] parsed-dict={pd}") return pd tests = """ 0: 1 2 1: "a" 2: 1 3 | 3 1 3: "b" """.strip() log.setLevel(logging.INFO) rules = parse_day19_rules(tests) log.info(f"test: parsed rules\n{rules}") pd = solve_day19(rules) rule0 = pd['0'] assert( re.match(rule0, "aab") ) assert( re.match(rule0, "aba") ) assert( not re.match(rule0, "bba") ) tests = """ 0: 4 1 5 1: 2 3 | 3 2 2: 4 4 | 5 5 3: 4 5 | 5 4 4: "a" 5: "b" """.strip() rules = parse_day19_rules(tests) log.info(f"test: parsed rules\n{rules}") pd = solve_day19(rules) log.info(f"tests parse-dir={pd}") rule0='^' + pd['0'] + '$' samples = "aaaabb,aaabab,abbabb,abbbab,aabaab,aabbbb,abaaab,ababbb".split(',') for sample in samples: assert( re.match(rule0, sample) ) assert( not re.match(rule0, "baaabb") ) assert( not re.match(rule0, "ababba") ) tests = """ 0: 4 1 5 1: 2 3 | 3 2 2: 4 4 | 5 5 3: 4 5 | 5 4 4: "a" 5: "b" ababbb bababa abbbab aaabbb aaaabbb """.strip() #ababbb and abbbab match #bababa, aaabbb, and aaaabbb rules, samples = parse_day19(tests) pd = solve_day19(rules) log.info(f"rule0={pd['0']}") rule0='^' + pd['0'] + '$' smatching = 0 for sample in samples: if re.match(rule0, sample): #log.info(f"{sample} matches {rule0}") smatching +=1 else: #log.info(f"{sample} NOmatch {rule0}") True log.info(f"matching-samples-#={smatching}") assert ( smatching == 2) ins = aoc.read_file_to_str('./in/day19.in') rules, samples = parse_day19(ins) pd = solve_day19(rules) log.debug(f"rule0={pd['0']}") rule0='^' + pd['0'] + '$' log.info(f"parsed-rules, len(rule0)={len(rule0)}") smatching = 0 for sample in samples: if re.match(rule0, sample): smatching +=1 #log.info(f"{sample} matches {rule0}") #else: # log.info(f"{sample} NOmatch {rule0}") log.info(f"matching-samples-#={smatching}") print("Day 19 b") tests = """ 42: 9 14 | 10 1 9: 14 27 | 1 26 10: 23 14 | 28 1 1: "a" 11: 42 31 5: 1 14 | 15 1 19: 14 1 | 14 14 12: 24 14 | 19 1 16: 15 1 | 14 14 31: 14 17 | 1 13 6: 14 14 | 1 14 2: 1 24 | 14 4 0: 8 11 13: 14 3 | 1 12 15: 1 | 14 17: 14 2 | 1 7 23: 25 1 | 22 14 28: 16 1 4: 1 1 20: 14 14 | 1 15 3: 5 14 | 16 1 27: 1 6 | 14 18 14: "b" 21: 14 1 | 1 14 25: 1 1 | 1 14 22: 14 14 8: 42 26: 14 22 | 1 20 18: 15 15 7: 14 5 | 1 21 24: 14 1 abbbbbabbbaaaababbaabbbbabababbbabbbbbbabaaaa bbabbbbaabaabba babbbbaabbbbbabbbbbbaabaaabaaa aaabbbbbbaaaabaababaabababbabaaabbababababaaa bbbbbbbaaaabbbbaaabbabaaa bbbababbbbaaaaaaaabbababaaababaabab ababaaaaaabaaab ababaaaaabbbaba baabbaaaabbaaaababbaababb abbbbabbbbaaaababbbbbbaaaababb aaaaabbaabaaaaababaa aaaabbaaaabbaaa aaaabbaabbaaaaaaabbbabbbaaabbaabaaa babaaabbbaaabaababbaabababaaab aabbbbbaabbbaaaaaabbbbbababaaaaabbaaabba """.strip() log.setLevel(logging.INFO) rules, samples = parse_day19(tests) max_samples_len = max(mapl(len, samples)) log.debug(f"max_samples_len={max_samples_len}") pd = solve_day19(rules, part=2, max_depth=max_samples_len) log.debug(f"rule0={pd['0']}") rule0='^' + pd['0'] + '$' log.info(f"parsed-rules, len(rule0)={len(rule0)}") smatching = 0 for sample in samples: if re.match(rule0, sample): smatching +=1 log.info(f"matching-samples-#={smatching}") assert( 12 == smatching ) log.setLevel(logging.INFO) rules, samples = parse_day19(ins) max_samples_len = max(mapl(len, samples)) log.debug(f"max_samples_len={max_samples_len}") pd = solve_day19(rules, part=2, max_depth=max_samples_len) log.debug(f"rule0={pd['0']}") rule0='^' + pd['0'] + '$' log.info(f"parsed-rules, len(rule0)={len(rule0)}") smatching = 0 for sample in samples: if re.match(rule0, sample): smatching +=1 log.info(f"matching-samples-#={smatching}") ###Output _____no_output_____ ###Markdown Day 20: Jurassic Jigsaw ###Code tests = """ Tile 2311: ..##.#..#. ##..#..... #...##..#. ####.#...# ##.##.###. ##...#.### .#.#.#..## ..#....#.. ###...#.#. ..###..### Tile 1951: #.##...##. #.####...# .....#..## #...###### .##.#....# .###.##### ###.##.##. .###....#. ..#.#..#.# #...##.#.. Tile 1171: ####...##. #..##.#..# ##.#..#.#. .###.####. ..###.#### .##....##. .#...####. #.##.####. ####..#... .....##... Tile 1427: ###.##.#.. .#..#.##.. .#.##.#..# #.#.#.##.# ....#...## ...##..##. ...#.##### .#.####.#. ..#..###.# ..##.#..#. Tile 1489: ##.#.#.... ..##...#.. .##..##... ..#...#... #####...#. #..#.#.#.# ...#.#.#.. ##.#...##. ..##.##.## ###.##.#.. Tile 2473: #....####. #..#.##... #.##..#... ######.#.# .#...#.#.# .######### .###.#..#. ########.# ##...##.#. ..###.#.#. Tile 2971: ..#.#....# #...###... #.#.###... ##.##..#.. .#####..## .#..####.# #..#.#..#. ..####.### ..#.#.###. ...#.#.#.# Tile 2729: ...#.#.#.# ####.#.... ..#.#..... ....#..#.# .##..##.#. .#.####... ####.#.#.. ##.####... ##..#.##.. #.##...##. Tile 3079: #.#.#####. .#..###### ..#....... ######.... ####.#..#. .#...#.##. #.#####.## ..#.###... ..#....... ..#.###... """.strip() def get_dimens(num_tiles): for gridx in range(1, num_tiles+1): for gridy in range(1, num_tiles+1): if gridx * gridy == num_tiles: if gridx > 1 and gridy > 1: log.info(f"[get_dimens] {gridx}x{gridy} dimen possible.") def get_borders(tile): borders = set() rows = tile.split("\n") borders.add(rows[0]) borders.add(rows[0][::-1]) # reversed borders.add(rows[-1]) borders.add(rows[-1][::-1]) # reversed col0 = str.join('', mapl(lambda it: it[0], rows)) col_last = str.join('', mapl(lambda it: it[-1], rows) ) borders.add(col0) borders.add(col0[::-1]) # reversed borders.add(col_last) borders.add(col_last[::-1]) # reversed return borders def find_corner_tiles(tiles): tile_keys = tiles.keys() borders = {} bsects = {} for key in tile_keys: borders[key] = get_borders(tiles[key]) bsects[key] = [] for combi in itertools.permutations(tile_keys, 2): key1, key2 = combi b1 = borders[key1] b2 = borders[key2] bsects[key1].append( len( b1 & b2 ) ) corner_tiles = set() for key in tile_keys: #log.info(f"key: {key} {bsects[key]}") bct = len( filterl(lambda it: it > 0, bsects[key]) ) if bct < 3: #log.info(f"border-tile: {key}") corner_tiles.add(key) #elif bct == 4: # log.info(f"middle-tile: {key}") return corner_tiles def find_border_tiles(tiles): tile_keys = tiles.keys() borders = {} bsects = {} for key in tile_keys: borders[key] = get_borders(tiles[key]) bsects[key] = [] for combi in itertools.permutations(tile_keys, 2): key1, key2 = combi b1 = borders[key1] b2 = borders[key2] bsects[key1].append( len( b1 & b2 ) ) border_tiles = set() for key in tile_keys: bct = len( filterl(lambda it: it > 0, bsects[key]) ) if bct == 3: border_tiles.add(key) return border_tiles def parse_tiles(s): d = {} for tile_str in s.split("\n\n"): tile_repr = '' for idx, line in enumerate(tile_str.split("\n")): if idx == 0: tile_id = int( line.replace('Tile ','').replace(':','') ) else: tile_repr += line + "\n" d[tile_id] = tile_repr.strip() return d tiles = parse_tiles(tests) num_tiles = len(tiles.keys()) tile_keys = tiles.keys() log.info(f"tests num-tiles={num_tiles}") get_dimens(num_tiles) find_corner_tiles(tiles) ins = aoc.read_file_to_str('in/day20.in').strip() tiles = parse_tiles(ins) num_tiles = len(tiles.keys()) log.info(f"input num-tiles={num_tiles}") res = find_corner_tiles(tiles) log.info(f"ins corner-tiles={res}") res = np.prod(list(res)) log.info(f"Day 20 a solution: border-tiles-product={res}") print("Day 20 b") from math import sqrt def flip_vert_tile(s): """Flip a tile vertically, return str repr.""" return str.join("\n", list(reversed(s.split("\n")))) def flip_horiz_tile(s): """Flip a tile horizontally, return str repr.""" new_los = [] for line in s.split("\n"): new_los.append(str.join('', reversed(line))) return str.join("\n", new_los) def rotate_tile(s): """Left-rotate of tile representation, return str repr.""" lol = mapl(lambda it: list(it), s.split("\n")) new_los = [] for islice in reversed(range(len(lol))): line = str.join('', mapl(lambda it: it[islice], lol)) new_los.append(line) log.trace("rot-repr=\n"+str.join("\n", new_los)) return str.join("\n", new_los) def get_tile_transforms(s): """Provide all transforms of a tile as list, including identity.""" transforms = [s] # start with identity as first elem current_repr = s for rot_num in range(3): current_repr = rotate_tile(current_repr) transforms.append(current_repr) current_repr = flip_vert_tile(s) transforms.append(current_repr) for rot_num in range(3): current_repr = rotate_tile(current_repr) transforms.append(current_repr) current_repr = flip_horiz_tile(s) transforms.append(current_repr) for rot_num in range(3): current_repr = rotate_tile(current_repr) transforms.append(current_repr) return set(transforms) def fits_horiz(lefts, rights): lhs = str.join('', mapl(lambda it: it[-1], lefts.split("\n"))) rhs = str.join('', mapl(lambda it: it[0], rights.split("\n"))) return lhs == rhs def fits_vert(tops, bottoms): lhs = tops.split("\n")[-1] rhs = bottoms.split("\n")[0] return lhs == rhs def get_next_coord(coord, image_width): x, y = coord nx = (x+1) % image_width if x == image_width-1: ny = y+1 else: ny = y log.trace(f"next-coord={(nx, ny)}") return (nx, ny) def is_corner(coord, image_width): b = (coord[0] in [0, image_width-1]) and (coord[1] in [0, image_width-1]) if b: log.trace(f"{coord} is corner; image_width={image_width}") return b def is_border(coord, image_width): log.trace(f"{coord} is border image_width={image_width}") b = not is_corner(coord, image_width) and \ ((coord[0] in [0, image_width-1]) or (coord[1] in [0, image_width-1])) if b: log.info(f"{coord} is border; image_width={image_width}") return b def create_image(tiles, tilekeys_left, img, imgidx, coord, corner_tiles, border_tiles, image_width): x, y = coord log.debug(f"[create_image] tks-left={len(tilekeys_left)}, @{coord}") if x >= image_width or y >= image_width: log.debug(f"FOUND\n{np.array(imgidx)}") return True, img, imgidx if y > 0 and x > 0: #log.info(f" check h+v") #if is_corner(coord, image_width): # @ corner # tkl2 = tilekeys_left & corner_tiles #elif is_border(coord, image_width): # @border # tkl2 = tilekeys_left & border_tiles #else: # tkl2 = tilekeys_left #for tk in tkl2: for tk in tilekeys_left: for tvari in get_tile_transforms( tiles[tk] ): if fits_horiz(img[y][x-1], tvari) and fits_vert(img[y-1][x], tvari): tkl_new = tilekeys_left.copy(); tkl_new.remove(tk) img_new = copy.deepcopy(img); img_new[y][x] = tvari log.debug(f"found h+v match for tilekey={tk} @{coord}") imgidx_new = copy.deepcopy(imgidx); imgidx_new[y][x] = tk return create_image(tiles, tkl_new, img_new, imgidx_new, get_next_coord(coord, image_width), corner_tiles, border_tiles, image_width) elif y > 0: #log.info(f" check v") #if is_corner(coord, image_width): # @ corner # tkl2 = tilekeys_left & corner_tiles #else: # @border # tkl2 = tilekeys_left & border_tiles #for tk in tkl2: for tk in tilekeys_left: for tvari in get_tile_transforms( tiles[tk] ): if fits_vert(img[y-1][x], tvari): tkl_new = tilekeys_left.copy(); tkl_new.remove(tk) img_new = copy.deepcopy(img); img_new[y][x] = tvari imgidx_new = copy.deepcopy(imgidx); imgidx_new[y][x] = tk log.debug(f"found h+v match for tilekey={tk} @{coord}") return create_image(tiles, tkl_new, img_new, imgidx_new, get_next_coord(coord, image_width), corner_tiles, border_tiles, image_width) elif x > 0: #log.info(f" check h") #if is_corner(coord, image_width): # @ corner # tkl2 = tilekeys_left & corner_tiles #else: # @border # tkl2 = tilekeys_left & border_tiles #for tk in tkl2: for tk in tilekeys_left: for tvari in get_tile_transforms( tiles[tk] ): if fits_horiz(img[y][x-1], tvari): tkl_new = tilekeys_left.copy(); tkl_new.remove(tk) img_new = copy.deepcopy(img); img_new[y][x] = tvari imgidx_new = copy.deepcopy(imgidx); imgidx_new[y][x] = tk log.debug(f"found h+v match for tilekey={tk} @{coord}") return create_image(tiles, tkl_new, img_new, imgidx_new, get_next_coord(coord, image_width), corner_tiles, border_tiles, image_width) log.trace("[create_image] fall-out") return False, img, imgidx def assemble_image(tiles): tiles_keys = tiles.keys() num_tiles = len(tiles) image_width = int(sqrt(num_tiles)) corner_tiles = find_corner_tiles(tiles) log.info(f"[assemble_image] corner-tiles-#={len(corner_tiles)}") assert( 4 == len(corner_tiles) ) border_tiles = find_border_tiles(tiles) log.info(f"[assemble_image] border-tiles-#={len(border_tiles)}; image_width={image_width}") assert( 4*(image_width-2) == len(border_tiles) ) start_tile = list(corner_tiles)[0] log.info(f"[assemble_image] starting; tiles_set={set(tiles_keys)}") tilekeys_left = set(tiles_keys) - set([start_tile]) for vari in get_tile_transforms( tiles[start_tile] ): img = [[None for x in range(image_width)] for y in range(image_width)] imgidx = [[None for x in range(image_width)] for y in range(image_width)] img[0][0] = vari imgidx[0][0] = start_tile log.debug(f"first corner tile img=\n{vari}") img_found, img_final, imgidx_final = create_image(tiles, tilekeys_left, img, imgidx, get_next_coord((0,0), image_width), corner_tiles, border_tiles, image_width) if img_found: log.info(f"IMG found, idxs=\n{imgidx_final}") break assert( img_found ) return img_found, img_final, imgidx_final def get_image_repr(img): img_len = len(img) tile_len = len(img[0][0].split("\n")) log.debug(f"[get_image_repr] num-tiles={img_len}^2={img_len**2} cells-per-tile={tile_len**2}") images = copy.deepcopy(img) for img_y in range(img_len): for img_x in range(img_len): images[img_y][img_x] = img[img_y][img_x].split("\n") # split each tile line-wise img_rows = [] for img_rowidx in range(img_len): tiles_rows = [] for tile_rowidx in range(tile_len): tiles_row = "" for img_colidx in range(img_len): tiles_row += images[img_rowidx][img_colidx][tile_rowidx] tiles_rows.append(tiles_row) img_rows.append(str.join("\n", tiles_rows)) img_repr = str.join("\n", img_rows) return img_repr def show_image(img): img_len = len(img) tile_len = len(img[0][0].split("\n")) log.info(f"[show_image] num-tiles={img_len}^2={img_len**2} cells-per-tile={tile_len**2}") log.info("\n"+get_image_repr(img)) def cut_tile_borders(tile): los = tile.split("\n") tile_len = len(los) new_los = [] for idx, line in enumerate(los): if idx in [0, tile_len-1]: continue new_line = line[1:-1] assert(len(new_line) == tile_len-2) new_los.append( new_line ) assert(len(new_los) == tile_len-2) return str.join("\n", new_los) def cut_image_borders(img): img_len = len(img) for y in range(img_len): for x in range(img_len): tile = img[y][x] tile = cut_tile_borders(tile) img[y][x] = tile return img sea_monster = """ # # ## ## ### # # # # # # """ def tiles_to_sea_npar(sea_los): """Convert original tiles representation to a 'sea' numpy-array of 0s and 1s.""" tiles = parse_tiles(sea_los) img_found, img, imgidx = assemble_image(tiles) #show_image(test_img) img_cut = cut_image_borders(img) #show_image(test_img_cut) img_cut = get_image_repr(img_cut) # from x*x matrix to 1 str image_los = img_cut.replace(".", "0 ").replace("#", "1 ").split("\n") image_ar = np.array([[int(c) for c in seamst_line.strip().split(" ")] for seamst_line in image_los]) return image_ar # Thanks github user JesperDramsch: def variations_of(npar): """Return identity and all rotation and flip-horiz flip-vert variations of np-array.""" varias = [] for i in range(4): tfar = np.rot90(npar, i) varias.append(tfar) varias.append(np.flip(tfar, 0)) varias.append(np.flip(tfar, 1)) return varias # Inspired # Thanks github user JesperDramsch, via reddit aoc 2020 day 20 solutions/discussion: # https://github.com/JesperDramsch/advent-of-code-1 def eliminate_monsters(sea, seamst): """Given 'sea' and 'seamonster' input numpy-arrays, eliminate all variations of seamonster (rots, flips) from the sea, return sea without monsters (np-array).""" seamst_cct = seamst.sum() seamst_varias = variations_of(seamst) monsters_num = 0 while monsters_num == 0: monster = seamst_varias.pop() mst_y, mst_x = monster.shape for y, x in np.ndindex(sea.shape): sub_arr = sea[y : y + mst_y, x : x + mst_x].copy() if not sub_arr.shape == monster.shape: continue sub_arr *= monster # <= sea & monster if np.sum(sub_arr) == seamst_cct: monsters_num += 1 sea[y : y + mst_y, x : x + mst_x] -= monster # => sea - monster return sea sea_monster = sea_monster.strip("\n") #print(f">{sea_monster}<") # Thanks github user JesperDramsch: sea_monster_los = sea_monster.replace(" ", "0 ").replace("#", "1 ").split("\n") #log.info(f"\n{sea_monster_los}") seamst = np.array([[int(c) for c in seamst_line.strip().split(" ")] for seamst_line in sea_monster_los]) seamst_cct = seamst.sum() log.info(f"Seamonster cell-count={seamst_cct}") log.info(f"\n{seamst}") sea_ar = tiles_to_sea_npar(tests) log.info(f"sea-nparray, shape={sea_ar.shape}::\n{sea_ar}") res = eliminate_monsters(sea_ar, seamst).sum() log.info(f"Day 21 b tests: rough-sea-count={res}") assert( 273 == res ) sea_ar = tiles_to_sea_npar(ins) log.info(f"sea-nparray, shape={sea_ar.shape}::\n{sea_ar}") res = eliminate_monsters(sea_ar, seamst).sum() log.info(f"Day 21 b final solution: rough-sea-count={res}") ###Output _____no_output_____ ###Markdown Day 21: Allergen Assessment ###Code tests = """ mxmxvkd kfcds sqjhc nhms (contains dairy, fish) trh fvjkl sbzzf mxmxvkd (contains dairy) sqjhc fvjkl (contains soy) sqjhc mxmxvkd sbzzf (contains fish) """.strip().split("\n") def solve_day21(los, part=1): ingreds_all = set() log.info(f"[solve21a] num-lines={len(los)}") allerg_assoc = {} recips = [] for line in los: ingreds, allergs = line.split(' (contains ') ingreds = set(ingreds.strip().split(' ')) allergs = allergs.strip().replace(')','').split(', ') log.debug(f" ingreds={ingreds}; allergs={allergs}") ingreds_all |= ingreds recips.append({'ingreds':ingreds, 'allergs':allergs}) for allerg in allergs: if not allerg in allerg_assoc: allerg_assoc[allerg] = set(ingreds) else: allerg_assoc[allerg] &= set(ingreds) for i in range(len(allerg_assoc.keys())): # loop and weed max n times found_allergs = filterl(lambda it: len(allerg_assoc[it]) == 1, allerg_assoc.keys()) found_ingreds = mapl(lambda it: list(allerg_assoc[it])[0], found_allergs) for allerg in allerg_assoc.keys(): if allerg in found_allergs: continue allerg_assoc[allerg] -= set(found_ingreds) if 1 == max( mapl(lambda it: len(allerg_assoc[it]), allerg_assoc.keys()) ): break allerg_assoc = {k:list(v)[0] for k,v in allerg_assoc.items()} # get rid of wrapping set per values log.info(f"allerg_assoc={allerg_assoc}") ingreds_pure = ingreds_all.copy() for ingred_allergic in allerg_assoc.values(): ingred_allergic = ingred_allergic ingreds_pure.remove(ingred_allergic) log.info(f"ingreds-pure={ingreds_pure}") ct = 0 for ingred_pure in ingreds_pure: for recip in recips: if ingred_pure in recip['ingreds']: ct += 1 log.info(f"day 21 part 1: count of pure ingredients occurences={ct}") if part == 1: return ct vals_ordered = [] for k in sorted(allerg_assoc.keys()): vals_ordered.append(allerg_assoc[k]) vals_str = str.join(',', vals_ordered) log.info(f"vals_str=>{vals_str}<") return vals_str #log.setLevel(aoc.LOGLEVEL_TRACE) log.setLevel(logging.INFO) res = solve_day21(tests, part=1) assert( 5 == res ) ins = aoc.read_file_to_list('./in/day21.in') res = solve_day21(ins, part=1) logging.info(f"Day 21 a solution: {res}") print("Day 21 b") #log.setLevel(aoc.LOGLEVEL_TRACE) #log.setLevel(logging.INFO) res = solve_day21(tests, part=2) assert( "mxmxvkd,sqjhc,fvjkl" == res ) res = solve_day21(ins, part=2) log.info(f"Day 21 b solution:\n>{res}<") ###Output _____no_output_____ ###Markdown Day 22: Crab Combat ###Code def parse_day22(s): players = {} players_str = s.split("\n\n") for player_str in players_str: for line in player_str.split("\n"): if line.startswith('Player'): player_id = int(line.replace('Player ', '').replace(':','')) players[player_id] = [] else: players[player_id].append(int(line)) log.debug(f"[parse_day22] {players}") return players def play_crabcardgame(players): t = 0 player_keys = list(players.keys()) while( min( mapl(lambda it: len(players[it]), player_keys) ) > 0 ): draw = mapl(lambda it: players[it].pop(0), player_keys) winner_idx = draw.index(max(draw)) #players[player_keys[winner_idx]] += sorted(draw, reverse=True) loser_idx = (0 if winner_idx == 1 else 1) players[player_keys[winner_idx]] += [draw[winner_idx], draw[loser_idx]] # winner's card first t += 1 log.debug(f"[play_ccg] t={t} draw={draw} {players}") if t > 1_000: raise Exception("failsafe") players['t'] = t players['winner'] = player_keys[winner_idx] return players def score_crabcardgame(players): cardstack = players[players['winner']] log.debug(f"[score_crabcardgame] cardstack={cardstack}") cardstack = list(reversed(cardstack)) score = 0 for idx in range(len(cardstack)): score += (idx+1) * cardstack[idx] return score tests = """ Player 1: 9 2 6 3 1 Player 2: 5 8 4 7 10 """.strip() players = parse_day22(tests) players = play_crabcardgame(players) res = score_crabcardgame(players) assert( 306 == res) ins = aoc.read_file_to_str('in/day22.in').strip() players = parse_day22(ins) players = play_crabcardgame(players) res = score_crabcardgame(players) log.info(f"Day 22 part 1 solution: winning score={res}") print("Day 22 b") def hashrep_of(player): repres = str(player) return hashlib.sha1(repres.encode()).hexdigest() def play_recursivecombat(players):# t = 0 player_keys = list(players.keys()) player_seen_handhashes = set() plcardnums = [len(players[1]), len(players[2])] log.debug(f"[play_recursivecombat] plcard#={plcardnums} t={t} {players}") for t in range(1, 100_000): log.debug(f"t={t} init={players}") # NOTE: The hands-already-seen condition had to be read VERY CAREFULLY !!! player1_hashrep = hashrep_of(players[1]) player2_hashrep = hashrep_of(players[2]) if player1_hashrep in player_seen_handhashes and player2_hashrep in player_seen_handhashes: ### NOTE THE **AND** in above condition !!! log.debug(f" current hands already seen") hand_seen = True else: player_seen_handhashes.add(player1_hashrep) player_seen_handhashes.add(player2_hashrep) hand_seen = False if hand_seen: players['t'] = t players['winner'] = player_keys[0] players['win-cond'] = 'hand_already_seen' log.debug(f"win-cond plcard#={plcardnums} already-played players={players}") return players draw = mapl(lambda it: players[it].pop(0), player_keys) log.debug(f" t={t} draw={draw} keeping {players}") if draw[0] <= len(players[1]) and draw[1] <= len(players[2]): # both players have enough cards left log.debug(f" recursing") recursed_players = copy.deepcopy(players) # the quantity of cards copied is equal to the number on the card they drew to trigger the sub-game if draw[0] < len(players[1]): recursed_players[1] = recursed_players[1][:draw[0]] # cut the stack to size for recursion if draw[1] < len(players[2]): recursed_players[2] = recursed_players[2][:draw[1]] # cut the stack to size for recursion recursed_players = play_recursivecombat(recursed_players) winner = recursed_players['winner'] else: winner = draw.index(max(draw)) + 1 winner_idx = winner - 1 loser_idx = (0 if winner_idx == 1 else 1) players[winner] += [draw[winner_idx], draw[loser_idx]] # winner's card first if min( mapl(lambda it: len(players[it]), player_keys) ) <= 0: players['t'] = t players['winner'] = winner players['win-cond'] = '1player_out_of_cards' log.debug(f"win-cond plcard#={plcardnums} 1-player-run-outof-cards players={players}") return players raise Exception("failsafe") players = play_recursivecombat(parse_day22(tests)) res = score_crabcardgame(players) assert( 291 == res ) tests_loop = """ Player 1: 43 19 Player 2: 2 29 14 """.strip() res = play_recursivecombat(parse_day22(tests_loop)) assert( res['win-cond'] == 'hand_already_seen' ) #log.setLevel(logging.INFO) players = play_recursivecombat(parse_day22(ins)) log.info(f"recursive-combat result for ins: {players}") res = score_crabcardgame(players) log.info(f"Day 22 part 2 solution: recursive-combat winner-score={res}") ###Output _____no_output_____ ###Markdown Day 23: Crab Cups ###Code def play_crabcups_round(l): #orig_lst = l.copy() list_len = len(l) current = l[0] taken = [l.pop(1), l.pop(1), l.pop(1)] # take 3 next_val = current - 1 while(True): if next_val in l: next_idx = l.index(next_val) break else: next_val -= 1 if next_val <= 0: next_val = max(l) log.debug(f"[play_crabcups_round] head={current}, taken={taken}, dest={next_val}") new_list = [next_val] new_list = new_list + taken appending = False for val in itertools.cycle(l): if not appending: if val == next_val: appending = True else: new_list.append(val) if len(new_list) >= list_len: break log.debug(f" new_list={new_list}") tgt_idx = (new_list.index(current)+1) % list_len new_list2 = new_list[tgt_idx:] + new_list[:tgt_idx] log.debug(f" new_list2={new_list2}") return new_list2 def play_crabcups_game(l, rounds=1): log.info(f"[play_crabcups_game] started: l={l}, rounds={rounds}") lst = l.copy() for i in range(1, rounds+1): lst = play_crabcups_round(lst) log.debug(f" round={i} l={lst}") return lst def score_crabcups_game(l): tgt_idx = (l.index(1)+1) % len(l) if tgt_idx == 0: outlst = l[tgt_idx, len(l)-1] else: outlst = l[tgt_idx:] + l[:tgt_idx-1] return int( str.join('', mapl(str,outlst)) ) tests = "389125467" test_lst = mapl(int, list(tests)) res = play_crabcups_game(test_lst, rounds=10) log.info(f"test result={res}") score = score_crabcups_game(res) log.info(f"test result 10rds score={score}") assert( 92658374 == score ) res = play_crabcups_game(test_lst, rounds=100) score = score_crabcups_game(res) log.info(f"test result 100rds score={score}") assert( 67384529 == score) ins = aoc.read_file_to_str('in/day23.in').strip() ins_lst = mapl(int, list(ins)) res = play_crabcups_game(ins_lst, rounds=100) log.info(f"Day 23 part 1 result={res}") score = score_crabcups_game(res) log.info(f"Day 23 part 1 solution: result 100rds score={score}") print("Day 23 b") def assemble_crabcups2_list(l, num_cups = 1_000_000): """Get a cups-list according to part 2 requirements (1mio cups).""" out_lst = l.copy() max_val = max(l) num_new_cups = num_cups - len(out_lst) out_lst += list(range(max_val+1, num_cups+1)) assert( num_cups == len(out_lst) ) return out_lst def play_crabcups_round_opt(l, rounds=1): """Optimize play of crabcups for n rounds, using cycling LinkedList instead of list.""" start_tm = int(time.time()) list_len = len(l) lkl = {} #firstval = l[0] #lastval = l[-1] curval = l[0] for idx, val in enumerate(l): next_idx = idx+1 if next_idx == list_len: next_idx = 0 lkl[val] = l[next_idx] for rd in range(rounds): # The crab picks up the three cups that are immediately clockwise of the current cup. # They are removed from the circle; # cup spacing is adjusted as necessary to maintain the circle. n1 = lkl[curval] n2 = lkl[n1] n3 = lkl[n2] lkl[curval] = lkl[n3] #log.trace(f" re-chained from current={curval} to={lkl[n3]}, taken={[n1, n2, n3]}") # The crab selects a destination cup: # the cup with a label equal to the current cup's label minus one. # If this would select one of the cups that was just picked up, # the crab will keep subtracting one until it finds a cup # that wasn't just picked up. # If at any point in this process the value goes below # the lowest value on any cup's label, it wraps around # to the highest value on any cup's label instead. for _ in range(list_len): if _ == 0: nextval = curval nextval -= 1 #log.trace(f" chknextval={nextval}") if nextval in [n1, n2, n3]: #log.trace(f" is in outtakes") continue if nextval <= 0: nextval = max(lkl.keys())+1 continue else: break #log.trace(f" current={curval} picked={[n1, n2, n3]}, dest={nextval}") # The crab places the cups it just picked up # so that they are immediately clockwise of the destination cup. # They keep the same order as when they were picked up. next_end_val = lkl[nextval] # store end value lkl[nextval] = n1 # break open the chain # lkl[n1] == n2 # lkl[n2] == n3 lkl[n3] = next_end_val # close the chain again # The crab selects a new current cup: # the cup which is immediately clockwise of the current cup curval = lkl[curval] if rd % 1_000_000 == 0: took_tm = int(time.time()) - start_tm log.info(f"round={rd:,} time_taken sofar {took_tm}s") out_lst = [] for i in range(list_len): if i == 0: #last_val = 1 last_val = curval out_lst.append(last_val) last_val = lkl[last_val] return out_lst def play_crabcups_game_opt(l, rounds=1): log.info(f"[play_crabcups_game] started: l={l}, rounds={rounds}") #lst = l.copy() return play_crabcups_round_opt(l, rounds) def score_crabcups_game_part2(l): lst_len = len(l) tgt_idx = (l.index(1)+1) % len(l) if tgt_idx < lst_len - 2: subl = l[tgt_idx : tgt_idx+2] #log.info(subl) else: tgtidx1 = (tgt_idx+1) % lst_len tgtidx2 = (tgt_idx+2) % lst_len subl = [l[tgtidx1], l[tgtidx2]] assert( 2 == len(subl) ) return subl[0] * subl[1] # check part 1 game results and scores still valid... tests = "389125467" test_lst = mapl(int, list(tests)) res = play_crabcups_game_opt(test_lst, rounds=10) log.info(f"test result={res}") score1 = score_crabcups_game(res) log.info(f"test result 10rds score part 1={score1}") log.info(f"test result 10rds score part 2={score}") assert( 92658374 == score1 ) score = score_crabcups_game_part2(res) # still valid... ins = aoc.read_file_to_str('in/day23.in').strip() ins_lst = mapl(int, list(ins)) res = play_crabcups_game_opt(ins_lst, rounds=100) log.info(f"Day 23 part 1 result={res}") score1 = score_crabcups_game(res) log.info(f"Day 23 part 1 solution: result 100rds score={score1}") score = score_crabcups_game_part2(res) log.info(f"Day 23 part 2 check: result 100rds score2={score}") assert( 74698532 == score1 ) # test with long list for part 2 test2_lst = assemble_crabcups2_list(test_lst, num_cups = 1_000_000) log.info("done") assert( 1_000_000 == len(test2_lst) ) res = play_crabcups_game_opt(test2_lst, rounds=10_000_000) log.info("done2") score2 = score_crabcups_game_part2(res) log.info(f"score2={score2}") assert( 1_000_000 == len(res) ) assert( 149245887792 == score2 ) ins2_lst = assemble_crabcups2_list(ins_lst, num_cups = 1_000_000) res = play_crabcups_game_opt(ins2_lst, rounds=10_000_000) log.info("done2") score2 = score_crabcups_game_part2(res) log.info(f"Day 23 part 2 solution: score2={score2}") ###Output _____no_output_____ ###Markdown Day 24: Lobby LayoutHexagonal geometry and hexagonal 2d-coordinates.See red blob games site [Hexagonal Grids](https://www.redblobgames.com/grids/hexagons/)for thorough explanations.Thanks to colleague P S for the hint! \Last used in Advent of Code 2017, day 11. \Todays aoc hint: [Hexagonal tiling - Wikipedia](https://en.wikipedia.org/wiki/Hexagonal_tiling) ###Code def cl(l): """Return compact list str representation.""" return str(l).replace(', ',',') # Using pointy topped grid/geometry and axial coordinates. # Using axis notation [q,r] here, q is west>east and r is south>north hex2d_axial_pt_translations = {'e':[1,0], 'w':[-1,0], 'se':[0,1], 'sw':[-1,1], 'ne':[+1,-1], 'nw':[0,-1]} def hex_axial_distance(a, b): return int((abs(a[0] - b[0]) + abs(a[0] + a[1] - b[0] - b[1]) + abs(a[1] - b[1])) / 2) # east, southeast, southwest, west, northwest, and northeast # => e, se, sw, w, nw, and ne def parse_day24_line(s): log.debug(f"parse_day24_line in={s}") out_trs = [] while len(s) > 0: log.trace(f"out_trs={out_trs} rest={s}") if len(s)>= 2 and s[:2] in ['se','sw','nw','ne']: out_trs.append(s[:2]) s = s[2:] elif len(s)>= 1 and s[:1] in ['e','w']: out_trs.append(s[:1]) s = s[1:] else: raise Exception(f"unforeseen: {s}") log.debug(f"parse_day24_line returns {cl(out_trs)}") return out_trs def parse_day24(los): return mapl(lambda it: parse_day24_line(it), los) def flip_day24_line(steps): #flips = defaultdict(int) c = (0,0) for step in steps: trans = hex2d_axial_pt_translations[step] c = (c[0]+trans[0], c[1]+trans[1]) #flips[c] += 1 #return flips return c def flip_day24_lines(steps_lol): flips = defaultdict(int) c = (0,0) for steps in steps_lol: c = flip_day24_line(steps) flips[c] += 1 return flips test1 = 'esew' flip_day24_line( parse_day24_line(test1) ) test2 = 'nwwswee' flip_day24_line( parse_day24_line(test2) ) tests = """ sesenwnenenewseeswwswswwnenewsewsw neeenesenwnwwswnenewnwwsewnenwseswesw seswneswswsenwwnwse nwnwneseeswswnenewneswwnewseswneseene swweswneswnenwsewnwneneseenw eesenwseswswnenwswnwnwsewwnwsene sewnenenenesenwsewnenwwwse wenwwweseeeweswwwnwwe wsweesenenewnwwnwsenewsenwwsesesenwne neeswseenwwswnwswswnw nenwswwsewswnenenewsenwsenwnesesenew enewnwewneswsewnwswenweswnenwsenwsw sweneswneswneneenwnewenewwneswswnese swwesenesewenwneswnwwneseswwne enesenwswwswneneswsenwnewswseenwsese wnwnesenesenenwwnenwsewesewsesesew nenewswnwewswnenesenwnesewesw eneswnwswnwsenenwnwnwwseeswneewsenese neswnwewnwnwseenwseesewsenwsweewe wseweeenwnesenwwwswnew """.strip().split("\n") flips = flip_day24_lines( parse_day24(tests) ) tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys()) log.info(f"Day 24 part 1 tests solutions: black tiles#={len(tiles_black)}") #" from {tiles_black}") assert( 10 == len(tiles_black)) ins = aoc.read_file_to_list('in/day24.in') flips = flip_day24_lines( parse_day24(ins) ) tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys()) log.info(f"Day 24 part 1 solution: black tiles#={len(tiles_black)}") #" from {tiles_black}") print("Day 24 b") # cellular automaton on this hexagonal tile geometry space def get_extents(tiles_black): qs = mapl(lambda it: it[0], tiles_black) rs = mapl(lambda it: it[1], tiles_black) return [[min(qs), max(qs)], [min(rs), max(rs)]] def num_neighbors(c, tiles_black): nsum = 0 for tilec in tiles_black: #if c != tilec and hex_axial_distance(c, tilec) == 1: if hex_axial_distance(c, tilec) == 1: log.trace(f"{tilec} is neib of {c}") nsum += 1 assert( nsum <= 6 ) return nsum def cell_automate(tiles_black, rounds = 1): exts = get_extents(tiles_black) log.info(f"[cell_automate] at round 0: num-tiles-black={len(tiles_black)}; extents={exts}") #" from {sorted(tiles_black)}") start_tm = int(time.time()) for rnd in range(1, rounds+1): new_tiles_black = tiles_black.copy() exts = get_extents(tiles_black) log.debug(f"round {rnd}: extents found={exts}") q_min, q_max = exts[0] r_min, r_max = exts[1] for q in range(q_min-1, q_max+1+1): for r in range(r_min-1, r_max+1+1): c = (q, r) nneibs = num_neighbors(c, tiles_black) if c in tiles_black: if nneibs == 0 or nneibs > 2: log.debug(f"flip-to-white {c} nneibs={nneibs}") new_tiles_black.remove(c) else: if nneibs == 2: log.debug(f"flip-to-black {c} nneibs={nneibs}") new_tiles_black.append(c) tiles_black = new_tiles_black took_tm = int(time.time()) - start_tm log.info(f" after round {rnd} @{took_tm:>5}s: num-tiles-black={len(tiles_black)}; extents={exts}") #" from {sorted(tiles_black)}") log.info(f"[cell_automate] finished round {rnd}: num-tiles-black={len(tiles_black)}; extents={exts}") #" from {sorted(tiles_black)}") return tiles_black flips = flip_day24_lines( parse_day24(tests) ) tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys()) assert 10 == len(tiles_black) tiles_black2 = cell_automate(tiles_black, rounds=1) assert 15 == len(tiles_black2) tiles_black2 = cell_automate(tiles_black, rounds=2) assert 12 == len(tiles_black2) tiles_black2 = cell_automate(tiles_black, rounds=10) assert 37 == len(tiles_black2) tiles_black2 = cell_automate(tiles_black, rounds=20) assert 132 == len(tiles_black2) if EXEC_RESOURCE_HOGS: tiles_black2 = cell_automate(tiles_black, rounds=100) assert 2208 == len(tiles_black2) if EXEC_RESOURCE_HOGS: flips = flip_day24_lines( parse_day24(ins) ) tiles_black = filterl(lambda it: flips[it] % 2 == 1, flips.keys()) log.info(f"Day 24 part 1 solution: black tiles#={len(tiles_black)}") #" from {tiles_black}") tiles_black2 = cell_automate(tiles_black, rounds=100) log.info(f"Day 24 part 2 solution: black tiles#={len(tiles_black2)}") #" from {tiles_black}") # took 1496 seconds! ###Output _____no_output_____ ###Markdown Day 24: Combo Breaker ###Code def find_loopsize(pubkey, max_iter=100_000): subjectnum = 7 val = 1 for i in range(1, max_iter+1): val = (val * subjectnum) % 20201227 if val == pubkey: break if i == max_iter: raise Exception("failsafe") return i def encrypt_day25(subjectnum=7, loopsize=None): log.info(f"[encrypt_day25] subject#={subjectnum}, loopsize={loopsize}") val = 1 for i in range(loopsize): val = (val * subjectnum) % 20201227 return val tests = """ 5764801 17807724 """.strip() card_pubkey, door_pubkey = mapl(int, tests.split("\n")) log.info("tests card-pubkey={card_pubkey}, door pubkey=(door_pubkey)") card_loopsize = find_loopsize(card_pubkey) door_loopsize = find_loopsize(door_pubkey) log.info(f"tests result: card-loopsize={card_loopsize}, door_loopsize={door_loopsize}") t1 = encrypt_day25(subjectnum=door_pubkey, loopsize=card_loopsize) t2 = encrypt_day25(subjectnum=card_pubkey, loopsize=door_loopsize) log.info(f"tests result: encryption key={t1} : encrypted {t1} =? {t2}") assert( t1 == t2 ) ins = aoc.read_file_to_list('in/day25.in') card_pubkey, door_pubkey = mapl(int, ins) log.info(f"card-pubkey={card_pubkey}, door pubkey={door_pubkey}") card_loopsize = find_loopsize(card_pubkey, max_iter=10_000_000) door_loopsize = find_loopsize(door_pubkey, max_iter=10_000_000) log.info(f"intermed result: card-loopsize={card_loopsize:,}, door_loopsize={door_loopsize:,}") t1 = encrypt_day25(subjectnum=door_pubkey, loopsize=card_loopsize) t2 = encrypt_day25(subjectnum=card_pubkey, loopsize=door_loopsize) log.info(f"Day 25 solution: encryption key={t1} : encrypted {t1} =? {t2}") ###Output _____no_output_____
tfidfvectorizer/Assignment_3_Reference.ipynb
###Markdown Implementing Bag of Words Fit method: With this function, we will find all unique words in the data and we will assign a dimension-number to each unique word. We will create a python dictionary to save all the unique words, such that the key of dictionary represents a unique word and the corresponding value represent it's dimension-number. For example, if you have a review, __'very bad pizza'__ then you can represent each unique word with a dimension_number as, dict = { 'very' : 1, 'bad' : 2, 'pizza' : 3} ###Code import warnings warnings.filterwarnings("ignore") import pandas as pd from tqdm import tqdm import os from tqdm import tqdm # tqdm is a library that helps us to visualize the runtime of for loop. refer this to know more about tqdm #https://tqdm.github.io/ # it accepts only list of sentances def fit(dataset): unique_words = set() # at first we will initialize an empty set # check if its list type or not if isinstance(dataset, (list,)): for row in dataset: # for each review in the dataset for word in row.split(" "): # for each word in the review. #split method converts a string into list of words if len(word) < 2: continue unique_words.add(word) # print('unique words:',unique_words) unique_words = sorted(list(unique_words)) # print('sorted unique words',unique_words) vocab = {j:i for i,j in enumerate(unique_words)} # print('to return dictionary',vocab) return vocab else: print("you need to pass list of sentance") vocab = fit(["abc def aaa prq", "lmn pqr aaaaaaa aaa abbb baaa"]) print(list(vocab.keys())) vocab vocab['lmn'] ###Output _____no_output_____ ###Markdown What is a Sparse Matrix? Before going further into details about Transform method, we will understand what sparse matrix is. Sparse matrix stores only non-zero elements and they occupy less amount of RAM comapre to a dense matrix. You can refer to this link. For example, assume you have a matrix, [[1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 4, 0, 0]] ###Code from sys import getsizeof import numpy as np # we store every element here a = np.array([[1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 4, 0, 0]]) print(getsizeof(a)) # here we are storing only non zero elements here (row, col, value) a = [ (0, 0, 1), (1, 3, 1), (2,2,4)] # with this way of storing we are saving alomost 50% memory for this example print(getsizeof(a)) ###Output 172 88 ###Markdown How to write a Sparse Matrix?: You can use csr_matrix() method of scipy.sparse to write a sparse matrix. You need to pass indices of non-zero elements into csr_matrix() for creating a sparse matrix. You also need to pass element value of each pair of indices. You can use lists to save the indices of non-zero elements and their corresponding element values. For example, Assume you have a matrix, [[1, 0, 0], [0, 0, 1], [4, 0, 6]] Then you can save the indices using a list as,list_of_indices = [(0,0), (1,2), (2,0), (2,2)] And you can save the corresponding element values as, element_values = [1, 1, 4, 6] Further you can refer to the documentation here. Transform method: With this function, we will write a feature matrix using sprase matrix. ###Code from collections import Counter from scipy.sparse import csr_matrix test = 'abc def abc def zzz zzz pqr' a = dict(Counter(test.split())) for i,j in a.items(): print(i, j) test = 'abc def abc def zzz zzz pqr' split_test = test.split() split_test type(Counter) a = dict(Counter(split_test)) for key,value in a.items(): print('key = ',key,' value = ',value) a # https://stackoverflow.com/questions/9919604/efficiently-calculate-word-frequency-in-a-string # https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.sparse.csr_matrix.html # note that we are we need to send the preprocessing text here, we have not inlcluded the processing def transform(dataset,vocab): rows = [] columns = [] values = [] if isinstance(dataset, (list,)): for idx, row in enumerate(tqdm(dataset)): # for each document in the dataset # it will return a dict type object where key is the word and values is its frequency, {word:frequency} word_freq = dict(Counter(row.split())) # for every unique word in the document for word, freq in word_freq.items(): # for each unique word in the review. if len(word) < 2: continue # we will check if its there in the vocabulary that we build in fit() function # dict.get() function will return the values, if the key doesn't exits it will return -1 col_index = vocab.get(word, -1) # retreving the dimension number of a word # if the word exists if col_index !=-1: # we are storing the index of the document rows.append(idx) # we are storing the dimensions of the word columns.append(col_index) # we are storing the frequency of the word values.append(freq) return csr_matrix((values, (rows,columns)), shape=(len(dataset),len(vocab))) else: print("you need to pass list of strings") strings = ["the method of lagrange multipliers is the economists workhorse for solving optimization problems", "the technique is a centerpiece of economic theory but unfortunately its usually taught poorly"] vocab = fit(strings) print(list(vocab.keys())) print(transform(strings, vocab).toarray()) ###Output ['but', 'centerpiece', 'economic', 'economists', 'for', 'is', 'its', 'lagrange', 'method', 'multipliers', 'of', 'optimization', 'poorly', 'problems', 'solving', 'taught', 'technique', 'the', 'theory', 'unfortunately', 'usually', 'workhorse'] ###Markdown Comparing results with countvectorizer ###Code from sklearn.feature_extraction.text import CountVectorizer vec = CountVectorizer(analyzer='word') vec.fit(strings) print(vec.get_feature_names()) feature_matrix_2 = vec.transform(strings) print(feature_matrix_2.toarray()) ###Output ['but', 'centerpiece', 'economic', 'economists', 'for', 'is', 'its', 'lagrange', 'method', 'multipliers', 'of', 'optimization', 'poorly', 'problems', 'solving', 'taught', 'technique', 'the', 'theory', 'unfortunately', 'usually', 'workhorse'] [[0 0 0 1 1 1 0 1 1 1 1 1 0 1 1 0 0 2 0 0 0 1] [1 1 1 0 0 1 1 0 0 0 1 0 1 0 0 1 1 1 1 1 1 0]]
analysis/kushinm/semantic_parts_pilot0_analysis.ipynb
###Markdown Setting up and creating dataframe for analysis ###Code # directory & file hierarchy proj_dir = os.path.abspath('../..') analysis_dir = os.getcwd() results_dir = os.path.join(proj_dir,'results') plot_dir = os.path.join(results_dir,'plots') csv_dir = os.path.join(results_dir,'csv') features_dir= os.path.join(results_dir,'features') exp_dir = os.path.abspath(os.path.join(proj_dir,'experiments')) sketch_dir = os.path.abspath(os.path.join(proj_dir,'sketches')) ## add helpers to python path if os.path.join(proj_dir,'analysis') not in sys.path: sys.path.append(os.path.join(proj_dir,'analysis')) if not os.path.exists(results_dir): os.makedirs(results_dir) if not os.path.exists(plot_dir): os.makedirs(plot_dir) if not os.path.exists(csv_dir): os.makedirs(csv_dir) if not os.path.exists(features_dir): os.makedirs(features_dir) ## add helpers to python path if os.path.join(proj_dir,'analysis') not in sys.path: sys.path.append(os.path.join(proj_dir,'analysis')) # Assign variables within imported analysis helpers import analysis_helpers as h if sys.version_info[0]>=3: from importlib import reload reload(h) ###Output _____no_output_____ ###Markdown setting up connection to mongo ###Code #### set vars auth = pd.read_csv('auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user pswd = auth.values[0][0] key = auth.values[0][0] user = 'sketchloop' host = 'rxdhawkins.me' ## cocolab ip address # have to fix this to be able to analyze from local import pymongo as pm conn = pm.MongoClient('mongodb://sketchloop:' + pswd + '@127.0.0.1') db = conn['semantic_parts'] coll = db['sketchpad_basic'] # which iteration name should we use? iterationName = 'pilot0' num_sketches = coll.find({'iterationName':iterationName}).count() print 'We have {} annotations so far.'.format(num_sketches) jefan = ['A1MMCS8S8CTWKU','A1MMCS8S8CTWKV','A1MMCS8S8CTWKS'] hawkrobe = ['A1BOIDKD33QSDK'] kmukherjee = ['A1WU4IHJNQGVAY'] researchers = jefan + hawkrobe + kmukherjee unique_assignments = coll.find({'iterationName':iterationName}).distinct('aID') print 'We have had {} unique sessions'.format( len(unique_assignments)) ## get list of unique_assignments unique_assignments = coll.find({'iterationName':iterationName}).distinct('aID') ### initialize a bunch of stuff orig_gameID = [] # the gameID from which this sketch was sourced outcome =[] #original outcome for that trial- true/false orig_trial_num = [] # the trialnum in the original game from which this sketch was sourced -- sketch_id = [] # concatenation of orig_gameID and orig_trial_num -- assignmentID = [] # the session in which this annotation was collected -- annotation_id = [] # the unique ID for each annotation trial (different for each session the same sketch appears in) category = [] # e.g., "chair" target = [] # e.g., "inlay" condition = [] # e.g., "closer" vs. "further" or "repeated" vs. "control trial_num = [] workerID = [] #mTurk workerId spline_id =[] #unique spline identifier time_submitted = [] # when the participant clicked "next sketch" time_labeled = [] # unique to each spline labeled time_clicked = [] # when this spline was clicked/selected num_strokes_in_sketch = [] # how many strokes in this sketch num_splines_in_sketch = [] # how many spline elements in this sketch stroke_num = [] # which stroke number this labeled spline came from cumulative_spline_num = [] # spline index in the cumulative spline sequence for the entire sketch within_stroke_spline_num = [] # spline index for the current stroke cumulative_bout_num= [] #which bout of annotation the spline belonged to part_bout_num =[] #which part-specific bout of annotation the spline belonged to label = [] # the label provided by the participant spline_svg_string = [] # the svg spline string that earned this label sketch_svg_string = [] # the entire svg string correponding to this sketch annotation_flag = [] # this is True if all splines were labeled as the same thing annotation_spline_id = [] #unique identifier for specific annotation of a spline png=[] #png string for the annotated sketch stroke_id=[] ## loop through all the unique assignments that have submitted things for this_assignment, aID in enumerate(unique_assignments): if this_assignment%100==0: print 'Analyzing sketches from assignment {} of {} ...'.format(this_assignment, len(unique_assignments)) ### get all the sketch recs for this assignment sketch_recs = coll.find({'$and': [{'iterationName':iterationName}, {'aID':aID}]}).sort('time') try: for sketch_ind,sketch in enumerate(sketch_recs): ## get annotations embedded within record sketch_cat = sketch['category'] annotations_string = sketch['annotations'] ## convert to json dictionary _annotations_dict = json.loads(annotations_string) annotations_dict = _annotations_dict[0][sketch_cat] png_string = _annotations_dict[0]['png'] num_splines = len(annotations_dict) for annotation in annotations_dict: assert sketch['numSplines']==num_splines ## get spline-level metadata workerID.append(h.encode(key,sketch['wID'])) label.append(annotation['label']) stroke_num.append(annotation['strokeNum']) spline_svg_string.append(annotation['svgString']) cumulative_spline_num.append(annotation['cumulativeSplineNum']) within_stroke_spline_num.append(annotation['withinStrokeSplineNum']) time_clicked.append(annotation['timeClicked']) time_labeled.append(annotation['timeLabeled']) spline_id.append('{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],annotation['cumulativeSplineNum'])) stroke_id.append('{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],annotation['strokeNum'])) cumulative_bout_num.append(annotation['boutNum']) part_bout_num.append(annotation['partBoutNum']) ## get sketch-level metadata orig_gameID.append(sketch['originalGameID']) outcome.append(sketch['originalOutcome']) orig_trial_num.append(sketch['originalTrialNum']) sketch_id.append('{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'])) annotation_id.append('{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],sketch['aID'])) assignmentID.append(sketch['aID']) category.append(sketch['category']) target.append(sketch['target']) png.append(png_string) condition.append(sketch['condition']) time_submitted.append(sketch['time']) trial_num.append(sketch['trialNum']) num_splines_in_sketch.append(sketch['numSplines']) num_strokes_in_sketch.append(sketch['numStrokes']) sketch_svg_string.append(sketch['svg']) annotation_flag.append(sketch['sameAnnotflag']) annotation_spline_id.append('{}_{}_{}_{}'.format(sketch['originalGameID'],sketch['originalTrialNum'],sketch['aID'],annotation['cumulativeSplineNum'])) except AssertionError: print 'There were unequal numbers for sketch["numSplines"] vs. num_splines for sketch {} from {}'.\ format(sketch['trialNum'], sketch['aID']) ## make group dataframe D = pd.DataFrame([workerID,orig_gameID, orig_trial_num, outcome, sketch_id, category, assignmentID, target, \ annotation_id, condition, trial_num, time_submitted,\ time_labeled, time_clicked, num_strokes_in_sketch, num_splines_in_sketch,\ stroke_num, cumulative_spline_num, within_stroke_spline_num, cumulative_bout_num,\ part_bout_num, label, spline_svg_string, sketch_svg_string, spline_id, stroke_id,\ annotation_spline_id,png]) D = D.transpose() D.columns = ['workerID','orig_gameID', 'orig_trial_num','outcome', 'sketch_id', 'category', 'assignmentID', 'target',\ 'annotation_id', 'condition', 'trial_num', 'time_submitted',\ 'time_labeled', 'time_clicked', 'num_strokes_in_sketch', 'num_splines_in_sketch',\ 'stroke_num', 'cumulative_spline_num', 'within_stroke_spline_num', 'cumulative_bout_num', 'part_bout_num', 'label',\ 'spline_svg_string', 'sketch_svg_string', 'spline_id','stroke_id','annotation_spline_id','png'] D=D[D['assignmentID']!=''] print 'Annotations dataframe contains {} rows and {} columns.'.format(D.shape[0],D.shape[1]) ##Check to see what dataframe looks like D.head() ###Changing the NAs to "None" strings for ind, row in D.iterrows(): if row['label'] is None: row['label'] = "None" ##Creating a dictionary of dictionaries that maps user input labels to our main labels of interest maplist_dict={} maplist_dict['car'] ={'body':['body','59 decal','Body and hood','Body and windshield','Gas Cap', 'gas tank','Logo','Number','Number Decal','logo','grill',\ 'Grille','Grill','hubcap','seat','grille','ROOF','Roof','roof','number','59 decal','side mirror','Roof Panel',\ 'Undercarriage','numbers','rearview mirror','NUMBER','Top','top','Racing Decal','Side Mirror'], 'bumper':['bumper','Fender','fender','fender well','front bumber','Bumper','Bumper and Hood','step'], 'door':['door','DOOR HANDLE','door handle','handle','window'], 'headlight':['headlight','taillight'], 'hood':['hood','hood release','Hood Ornament','mirror','Mirror'], 'trunk':['trunk','Exhaust'], 'unknown':['Letter R','Letter e','Letter D','letter D','Says the word Drive','unknown','text','Wind','eye','Arrow','Light Beams',\ 'Light beams','Tree','hand','horn','Word',"it's just words, no picture to label",'words','Pavement','Payement'], 'wheel':['rim','Tire','tire','wheel','wheel well','Axle','spokes','Spells the word Red'], 'window':['window'], 'windshield':['windshield','Steering wheel'] } maplist_dict['bird']={'beak':['beak'], 'body':['body','chest','back','speckles','Markings','markings','Coloring','coloring'], 'eye':['eye'], 'feet':['feet'], 'head':['head','neck'], 'leg':['leg'], 'tail':['tail'], 'wing':['wing','feather','feathers'], 'unknown':['unknown','B','I','R','D','This isnt a bird','not sketch','c','h',\ 'i','r','p','Not a bird: The word "orange"','sky']} maplist_dict['dog']={'body':['body','chest','Stomach','back','butt','Butt','fur','fur ','both head and body','Back',\ 'Belly',], 'ear':['ear'], 'eye':['eye'], 'head':['head','neck', 'Nose','nose','Nostrils','snout','NOSE','Snout','face','mask'], 'leg':['leg'], 'mouth':['mout','tongue','muzzle','jaw','Tongue','Muzzle','chin'], 'neck':['neck'], 'paw':['paw','foot'], 'tail':['tail'], 'unknown':['unknown','Straight line in the letter "D"','Curved part of the letter "D"','left half of the letter "O"',\ 'Right part of the letter "O"','Letter "G"','cheating','Person just wrote words','Non-Animal',\ 'not a vaild pict of a dog','letter','W','o','f','word','letter b','Shadow','SHadow','Text',\ 'spelling of dog','smiley face','O','F','R','K','Words "Woof Bark"','not a drawing of a dog','Word','color']} maplist_dict['chair']={'armrest':['armrest','sides','support slats','support slat','armrest support','decorative wood pane',\ 'side spindles','Chair frame','Side support','Design Elements','Leg and armrest','arm rest',\ 'bars','bar'], 'backrest':['backrest','headrest','Spindle','spindles','spindels'], 'seat':['seat', 'Chair support','cushion'], 'leg':['leg','bottom frame','spindle','Support Bar','wheel','leg rail','leg support','support beam',\ 'Wheel','bottom brace','stretcher','supporting wood','Leg support','top of leg','foot',\ 'Reinforcement for legs','Brace','supports','support for legs','Bottom Support','Leg support',\ 'Stretcher','wood beam connecting legs','Wood beam connecting legs','brace','braces','Struts'], 'unknown':['unknown','frame','Descriptive label','letters','Not a chair','Frame','Decoration','Structure',\ 'name ','Label','Words - Bulky Garage','Part of O','letter r','Part of letter a',\ 'Part of letter n','Part of letter g','Part of letter e','Part of arrow',\ 'The word "sit"','word']} for this_cat in unique_cats: maplist=maplist_dict[this_cat] reversed_dict = {val: key for key in maplist for val in maplist[key]} D.loc[D['category']==this_cat,'label']=D[D['category']==this_cat]['label'].map(reversed_dict).fillna(D['label']) #Get a count of how many unique sketches have been annotated unique_sketches = np.unique(D['sketch_id'].values) print 'We have {} unique sketches.'.format(len(unique_sketches)) ###How many unique annotations do we have in total? len(D['annotation_id'].unique()) ###Removing any annotations that don't have all splines annotated for this_sketch in unique_sketches: DS=D[D['sketch_id']==this_sketch] for this_annot in np.unique(DS['annotation_id']): DSS= DS[DS['annotation_id']==this_annot] if DSS[DSS['label']== 'None'].shape[0]>0: D=D[D['annotation_id']!=this_annot] ##How many annotations after filtering? len(D['annotation_id'].unique()) ###Output _____no_output_____ ###Markdown Visualizations and desriptive statistics Number of annotations per sketch ###Code ## get number of times each sketch has been annotated num_times_annotated = [] for this_sketch_id in unique_sketches: num_times_annotated.append(D[D['sketch_id']==this_sketch_id]['assignmentID'].nunique()) ## make a histogram sns.set_context('talk') plt.figure(figsize=(6,5)) h = plt.hist(num_times_annotated) plt.xticks(np.arange(0, 4, step=1)) plt.title('Times each sketch has been annotated') plt.ylabel('number of sketches') ###Subesttting for sketches that have been annotated 3 times num_annots=3 ##Why are some assignment IDs blank? D=D[D['assignmentID']!=''] for this_sketch_id in unique_sketches: if D[D['sketch_id']==this_sketch_id]['assignmentID'].nunique()!=num_annots: D=D[D['sketch_id']!=this_sketch_id] unique_sketches = np.unique(D['sketch_id'].values) ##How many sketches do we have with 3 annotations? len(np.unique(D.sketch_id)) ###Output _____no_output_____ ###Markdown Looking at time taken to annotate ###Code ##Make sure the number of splines for each sketch is consistent across annotations for this_sketch in unique_sketches: assert len(np.unique(D[D['sketch_id']==this_sketch]['num_splines_in_sketch'].values))==1 unique_annotation_trials = np.unique(D['annotation_id'].values) ## get annotation time for each annotation trial annotation_time = [] spline_number_in_sketch = [] for this_annotation_trial in unique_annotation_trials: earliest_click = float(np.min(D[D['annotation_id']==this_annotation_trial]['time_clicked'])) ## all of the splines were submitted at the same time, so time_submitted should be identical for all splines in an annotation trial assert len(np.unique(D[D['annotation_id']==this_annotation_trial]['time_submitted'].values))==1 final_submission = np.unique(D[D['annotation_id']==this_annotation_trial]['time_submitted'].values)[0] annotation_time.append(final_submission-earliest_click) if np.isnan(final_submission) or np.isnan(earliest_click): print 'One of these timestamps is a NaN. Probably means that the participant skipped this trial:' print this_annotation_trial print 'final_submission: {}, earliest_click: {}'.format(final_submission, earliest_click) #### then we will extract how "complex" each sketch i assert len(np.unique(D[D['annotation_id']==this_annotation_trial]['num_splines_in_sketch'].values))==1 spline_number_in_sketch.append(np.unique(D[D['annotation_id']==this_annotation_trial]['num_splines_in_sketch'])[0]) ## convert annotation time to seconds annotation_time_seconds = np.array(annotation_time)/1000 ## make dataframe with annotation time and spline number unique_annotation_trials, spline_number_in_sketch, annotation_time_seconds = map(list, [unique_annotation_trials, spline_number_in_sketch, annotation_time_seconds]) T = pd.DataFrame([unique_annotation_trials,spline_number_in_sketch,annotation_time_seconds]) T = T.transpose() T.columns = ['annotation_trial','spline_number_in_sketch','annotation_time'] ## some preprocessing of T import analysis_helpers as h if sys.version_info[0]>=3: from importlib import reload reload(h) ## make numeric types T = h.convert_numeric(T,'spline_number_in_sketch') T = h.convert_numeric(T,'annotation_time') ## also remove the skipped trial where annotation time is a NaN T = T[~np.isnan(T['annotation_time'])] ## make scatterplot of relationship between annotation time and spline number plt.figure(figsize=(6,6)) sns.scatterplot(x='annotation_time', y='spline_number_in_sketch', data=T) plt.ylabel('number of splines in sketch') plt.xlabel('annotation time (s)') plt.title('Do more complex sketches take longer to annotate?') plt.xlim(0,60*5) ## 5 minute cutoff ... r, p = stats.spearmanr(T['annotation_time'],T['spline_number_in_sketch']) print 'Spearman correlation between annotation time and spline number in sketch' print 'r = {}, p = {}'.format(r,p) ###Output _____no_output_____ ###Markdown Looking at part occurrence ###Code ## get the list of unique labels applied to sketches unique_labels = np.unique(D.label.values) ## Removing Nones and obviously wrong super long lables unique_labels = [i for i in unique_labels if i is not None] unique_labels = [i for i in unique_labels if len(i)<900] print 'we have {} unique labels'.format(len( unique_labels)) ##Get a list of categories unique_cats = np.unique(D['category']) ##Create empty dictionary with categories as keys. We will use this to store part occurrence data for our categories label_vect_dict = {unique_cats[0]:None,unique_cats[1]:None,unique_cats[2]:None,unique_cats[3]:None} ##Create vectors that contain the number of part instances in each sketch for category in unique_cats: DS= D[D['category']==category] unique_sketches_in_cat = np.unique(DS['sketch_id']) unique_labels_in_cat = np.unique(DS['label']) ## initialize matrix that has the correct dimensions Label_Vec = np.zeros((len(unique_sketches_in_cat),len(unique_labels_in_cat)), dtype=int) unique_labels_in_cat= np.array(unique_labels_in_cat) for s,this_sketch in enumerate(unique_sketches_in_cat): label_vec = np.zeros(len(unique_labels_in_cat),dtype=int) DSS = DS[DS['sketch_id']==this_sketch] annotation_ids = np.unique(DSS['annotation_id'].values) for this_annotation in annotation_ids: DSA = DSS[DSS['annotation_id']==this_annotation] label_list = DSA.label.values for this_label in label_list: label_ind = unique_labels_in_cat==this_label label_vec[label_ind] += 1 Label_Vec[s,:]=label_vec/num_annots label_vect_dict[category]= Label_Vec #D['label']=D['label'].map(reversed_dict).fillna(D['label']) valid_labels=[] valid_labels_dict={} for category in unique_cats: vect = label_vect_dict[category] thresh = 50 #print 'These are the labels that appear at least {} times:'.format(thresh) #print unique_labels[np.sum(Label_Vec,0)>thresh] unique_labels_in_cat = np.unique(D[D['category']==category]['label']) plot_labels= unique_labels_in_cat[np.sum(vect,0)>thresh] valid_labels_dict[category]=plot_labels valid_labels.append(plot_labels) prop_labels=[] for part in plot_labels: DS=D[D['category']==category] prop_labels.append(DS[DS['label']==part]['annotation_id'].nunique()/DS['annotation_id'].nunique()) sns.set_context('talk') plt.figure(figsize=(12,7)) plt.ylim(0,1) h = plt.bar(plot_labels,prop_labels) plt.title('Proportion of {} annotations with labels'.format(category)) plt.ylabel('proportion of annotations') plt.xlabel('Part') ##flattening valid labels valid_labels = [item for sublist in valid_labels for item in sublist] len(np.unique(valid_labels)) ##Creating a stroke-level dataframe that takes the mode value of annotation for its children splines to set as its ##label value from collections import Counter from collections import OrderedDict stroke_svgs=OrderedDict() for category in unique_cats: DS=D[D['category']==category] for sketch in np.unique(DS['sketch_id']): DSS=DS[DS['sketch_id']==sketch] for stroke in np.unique(DSS['stroke_num']): DSA=DSS[DSS['stroke_num']==stroke] DSA=DSA.reset_index() stroke_svgs[DSA['stroke_id'][0]] = DSA['sketch_svg_string'][0][stroke] stroke_svg_df= pd.DataFrame.from_dict(stroke_svgs, orient='index') stroke_group_data= D.groupby('stroke_id').agg(lambda x: Counter(x).most_common(1)[0][0]) labels= pd.DataFrame(stroke_group_data[['sketch_id','label','stroke_num','condition','target','category','outcome']]) stroke_df=pd.merge(stroke_svg_df,labels,left_index=True, right_index =True) stroke_df.reset_index(level=0, inplace=True) stroke_df=stroke_df.rename(index=str, columns={"index": "stroke_id", 0: "svg"}) ##Adding total arclength information to stroke dataframe from svgpathtools import parse_path import svgpathtools def calculate_arclength(svg): try: arclength= parse_path(svg).length() except ZeroDivisionError: print 'zero div error' arclength = 0 return arclength stroke_df['arc_length'] = stroke_df['svg'].apply(calculate_arclength) ##Adding total arclength information to stroke dataframe # from svgpathtools import parse_path # import svgpathtools # stroke_df['arc_length'] = "" # for s,stroke in stroke_df.iterrows(): # try: # stroke_df['arc_length'][s] = parse_path(stroke['svg']).length() # except ZeroDivisionError: # print 'zero div error' # stroke_df['arc_length'][s] = 0 #Saving out the PNG for the sketches run=True if run==True: from matplotlib.pyplot import imshow import base64 #num_diff_annots = [] for this_sketch_id in unique_sketches: DS=D[D['sketch_id']==this_sketch_id] unique_splines = np.unique(DS['cumulative_spline_num']) for i,this_spline in enumerate(unique_splines): DSS =DS[DS['cumulative_spline_num']==this_spline] num_diff_annots= len(np.unique(DSS['label'])) if num_diff_annots>0: ##Will update this conditional once we have more annots for instance in np.unique(DS['annotation_id']): imgdata = base64.b64decode(DS[DS['annotation_id']==instance].iloc[0]['png']) filename = '{}_{}'.format(instance, DS[DS['annotation_id']==instance].iloc[0]['target']) # I assume you have a way of picking unique filenames with open(filename, 'wb') as f: f.write(imgdata) im = Image.open(filename) plt.figure() imshow(im) plt.xticks([]) plt.yticks([]) #plt.savefig(xx) run=False ###Output _____no_output_____ ###Markdown Making part-transition matrices ###Code ####Setting up for building transition matrices for each different category from itertools import product as p run=False if run==True: tm_dict={} for category in unique_cats: stroke_df_s = stroke_df[stroke_df['category']==category] num_uniq_labs = len(np.unique(stroke_df_s['label'])) temp_array = np.zeros([num_uniq_labs*num_uniq_labs,2],dtype='|S50') ind=0 for roll in p(stroke_df_s['label'].unique().tolist(), repeat = 2): temp_array[ind,]= roll ind+=1 for sketch in stroke_df_s.sketch_id.unique(): sketch_df= stroke_df_s[stroke_df_s.sketch_id==sketch] sketch_df['incr_stroke_num'] = sketch_df['stroke_num']+1 tm_df=sketch_df.merge(sketch_df, right_on='stroke_num', left_on='incr_stroke_num', how='inner' ) plot_matrix_x= tm_df.label_x.append(pd.Series(temp_array[:,0])) plot_matrix_y=tm_df.label_y.append(pd.Series(temp_array[:,1])) ct_df= pd.crosstab(plot_matrix_x, plot_matrix_y)-1 #ct_df= pd.crosstab(tm_df.label_x, tm_df.label_y) mat= np.matrix(ct_df).sum() plot_df = ct_df.div(mat, axis=0).round(2) plot_df=plot_df.fillna(0) tm_dict[sketch]= plot_df for category in unique_cats: cat_matrices=[] DS=D[D['category']==category] for sketch in DS['sketch_id'].unique(): cat_matrices.append(tm_dict[sketch]) fig,ax=plt.subplots(1, 1, figsize = (10, 8), dpi=150) agg_matrix= sum(cat_matrices) divider=np.matrix(agg_matrix).sum() agg_matrix = agg_matrix.div(divider, axis=0).round(2) sns.heatmap(agg_matrix,cmap="YlGnBu", annot=True) plt.title("Part transition matrix for {}".format(category)) ax.set_ylabel('') ax.set_xlabel('') run=False ## Getting the number of unique labels assigned to a given spline across annotations num_diff_annots = [] for this_cat in unique_cats: DS=D[D['category']==this_cat] labels = valid_labels_dict[this_cat] unique_sketches_in_cat=np.unique(DS['sketch_id']) for this_sketch_id in unique_sketches_in_cat: DSA=DS[DS['sketch_id']==this_sketch_id] unique_splines = np.unique(DSA['cumulative_spline_num']) for i,this_spline in enumerate(unique_splines): DSB =DSA[DSA['cumulative_spline_num']==this_spline] numannots= 4-len(np.unique(DSB['label'])) if numannots==0: numannots=1 num_diff_annots.append(numannots) #plotting variability in spline annots h= plt.hist(num_diff_annots, bins= range(1,5), align='left', density='True') plt.title('Inter-annotator reliability') plt.ylabel('proportion of splines') plt.xlabel('Annotator agreement on label') plt.xticks([1,2,3],['0/3','2/3','3/3']) spline_df= D.groupby('spline_id').agg(lambda x: Counter(x).most_common(1)[0][0]) spline_df.reset_index(level=0, inplace=True) for this_cat in unique_cats: labels = valid_labels_dict[this_cat] DS=spline_df[spline_df['category']==this_cat] spline_annots_per_stroke = [] unique_sketches_in_cat= np.unique(DS['sketch_id']) for this_sketch_id in unique_sketches_in_cat: DSA=DS[DS['sketch_id']==this_sketch_id] unique_strokes = np.unique(DSA['stroke_num']) for i,this_stroke in enumerate(unique_strokes): DSB =DSA[DSA['stroke_num']==this_stroke] numlabels= DSB['label'].nunique() spline_annots_per_stroke.append(numlabels) h= plt.hist(spline_annots_per_stroke, bins =range(1,8), align='left', density="True") plt.title('Within-stroke label agreement') plt.ylabel('proportion of strokes') plt.xlabel('number of different labels within stroke') for this_cat in unique_cats: DS=stroke_df[stroke_df['category']==this_cat] labels= valid_labels_dict[this_cat] strokes_in_part_vect = np.zeros((len(np.unique(DS['sketch_id']))*len(labels),3), dtype='|a1000') ind=0 for this_sketch in np.unique(DS['sketch_id']): DSA= DS[DS['sketch_id']==this_sketch] for this_label in labels: DSB=DSA[DSA['label']==this_label] strokes_in_part_vect[ind,]=[this_sketch, this_label,len(np.unique(DSB['stroke_num']))] ind+=1 strokes_in_part_vect=strokes_in_part_vect[~np.all(strokes_in_part_vect == '', axis=1)] strokes_in_part_df= pd.DataFrame(strokes_in_part_vect, columns=['sketch_id','part','num_strokes']) strokes_in_part_df['num_strokes']=pd.to_numeric(strokes_in_part_df['num_strokes']) plt.figure() b=sns.barplot(x='part',y='num_strokes',data=strokes_in_part_df,ci=95,capsize=0.3, errwidth= 3) for item in b.get_xticklabels(): item.set_rotation(45) ##Creating a dictionary of sketch_id with associated part sequences seq_dict={} for this_sketch in np.unique(stroke_df['sketch_id']): parts_list=[] DS=stroke_df[stroke_df['sketch_id']==this_sketch] for i, row in DS.iterrows(): parts_list.append(stroke_df['label'][i]) seq_dict[this_sketch]=parts_list ##functions for getting 'mean streak_length' from a particular sketch for ground truth and scrambled part orders import random def get_mean_streak(sketch_id): parts = seq_dict[sketch_id] streak_counter=1 list_of_streaks=[] for obj in range(len(parts)-1): if parts[obj]==parts[obj+1]: streak_counter+=1 else: list_of_streaks.append(streak_counter) streak_counter=1 list_of_streaks.append(streak_counter) return np.mean(list_of_streaks) def get_scramble_mean_streak(sketch_id): parts = seq_dict[sketch_id] scram_parts=random.sample(parts,len(parts)) streak_counter=1 list_of_streaks=[] for obj in range(len(scram_parts)-1): if scram_parts[obj]==scram_parts[obj+1]: streak_counter+=1 else: list_of_streaks.append(streak_counter) streak_counter=1 list_of_streaks.append(streak_counter) return np.mean(list_of_streaks) #Iterating over all sketches to get mean streakiness for each sketch_id gt_streak_mean={} for this_cat in unique_cats: DS= stroke_df[stroke_df['category']==this_cat] streak_mean_list=[] for this_sketch in np.unique(DS['sketch_id']): streak_mean_list.append(get_mean_streak(this_sketch)) gt_streak_mean[this_cat]=np.mean(streak_mean_list) gt_streak_mean single_stroke_sketches=[] single_label_sketches=[] strokes_equal_labels_sketches=[] for this_sketch in stroke_df.sketch_id.unique(): stroke_df_s= stroke_df[stroke_df['sketch_id']==this_sketch] if stroke_df_s.stroke_num.nunique()==1: single_stroke_sketches.append(this_sketch) if stroke_df_s.label.nunique()==1: single_label_sketches.append(this_sketch) if stroke_df_s.label.nunique()== stroke_df_s.stroke_num.nunique(): strokes_equal_labels_sketches.append(this_sketch) ss_sketches_labels={} sl_sketches_numstrokes={} sel_sketches_labels={} for this_sketch in single_stroke_sketches: ss_sketches_labels[this_sketch] = stroke_df[stroke_df['sketch_id']==this_sketch].label for this_sketch in single_label_sketches: sl_sketches_numstrokes[this_sketch]=stroke_df[stroke_df['sketch_id']==this_sketch].stroke_num.nunique() for this_sketch in strokes_equal_labels_sketches: sel_sketches_labels[this_sketch]=stroke_df[stroke_df['sketch_id']==this_sketch].label.unique() _donotpermute=single_stroke_sketches + single_label_sketches + strokes_equal_labels_sketches donotpermute=np.unique(_donotpermute).tolist() ##z-score of gt gt_streak_zscore={} for this_target in stroke_df.target.unique(): DA=stroke_df[stroke_df['target']==this_target] for this_sketch in DA.sketch_id.unique(): if this_sketch not in donotpermute: permuted_streak_list = [] for i in range(1000): permuted_streak_list.append(get_scramble_mean_streak(this_sketch)) try: assert np.isnan((get_mean_streak(this_sketch)-np.mean(permuted_streak_list))/np.std(permuted_streak_list)) == False gt_streak_zscore[this_sketch]=(get_mean_streak(this_sketch)-np.mean(permuted_streak_list))/np.std(permuted_streak_list) except AssertionError: print stroke_df[stroke_df.sketch_id==this_sketch].stroke_num.nunique(),stroke_df[stroke_df.sketch_id==this_sketch].label.nunique() def calculate_CI(data, confidence=0.95): a = 1.0 * np.array(data) n = len(a) m, se = np.mean(a), stats.sem(a) h = se * stats.t.ppf((1 + confidence) / 2., n-1) return m, m-h, m+h for this_target in stroke_df.target.unique(): DA=stroke_df[stroke_df['target']==this_target] _sketch_ids= DA.sketch_id.unique() _sketch_ids = [x for x in _sketch_ids if x not in donotpermute] z_scores_sub=dict((k, gt_streak_zscore[k]) for k in _sketch_ids) plt.figure() plt.title('True mean streak Z-score Distribution for {}'.format(this_target)) h=sns.distplot(z_scores_sub.values(),kde=False,hist=True,norm_hist=False) print calculate_CI(z_scores_sub.values()) for this_cond in stroke_df.condition.unique(): DA=stroke_df[stroke_df['condition']==this_cond] _sketch_ids= DA.sketch_id.unique() _sketch_ids = [x for x in _sketch_ids if x not in donotpermute] z_scores_sub=dict((k, gt_streak_zscore[k]) for k in _sketch_ids) plt.figure() plt.title('True mean streak Z-score Distribution for {}'.format(this_cond)) h=sns.distplot(z_scores_sub.values(),kde=False,hist=True,norm_hist=False) print 'mean and CI for {} condition'.format(this_cond), calculate_CI(z_scores_sub.values()) np.mean(gt_streak_zscore.values()) streak_diff_dict={} for this_cat in unique_cats: mean_streak_diff_list=[] DS=stroke_df[stroke_df['category']==this_cat] for i in range(1000): scrambled_streaks=[] real_streaks=[] for sketch in np.unique(DS['sketch_id']): scrambled_streaks.append(get_scramble_mean_streak(sketch)) real_streaks.append(get_mean_streak(sketch)) mean_streak_diff_list.append(np.mean(real_streaks)-np.mean(scrambled_streaks)) streak_diff_dict[this_cat]=mean_streak_diff_list len(streak_diff_dict['bird']) def CIPlot(category): stroke_df_lite_ss=stroke_df[stroke_df['category']==category] mean_streak_diff_list=[] for i in range(1000): this_round_scrambled_streak=[] this_round_real_streak=[] for sketch in np.unique(stroke_df_lite_ss['sketch_id']): this_round_real_streak.append(get_mean_streak(sketch)) this_round_scrambled_streak.append(get_scramble_mean_streak(sketch)) mean_streak_diff_list.append(np.mean(this_round_real_streak)-np.mean(this_round_scrambled_streak)) perm_observed_mean_streak_diff = np.mean(mean_streak_diff_list) lb=np.percentile(mean_streak_diff_list,2.5) ub=np.percentile(mean_streak_diff_list,97.5) plt.figure(figsize=(10,8)) h=sns.distplot(mean_streak_diff_list,kde=False,hist=True,norm_hist=False) plt.axvline(perm_observed_mean_streak_diff, color='yellow', linestyle='solid', linewidth=2) plt.axvline(lb, color='orange', linestyle='solid', linewidth=2) plt.axvline(ub, color='orange', linestyle='solid', linewidth=2) plt.title(category) plt.ylabel('count') plt.xlabel('streak length difference') plt.legend(['mean','95% CI'], ncol=2, bbox_to_anchor=(1, 1.05)) plt.savefig(os.path.join(plot_dir,'Streakiness Diff'),edgecolor='w',bbox_inches='tight') plt.show() return np.mean(mean_streak_diff_list), np.std(mean_streak_diff_list) def CIPlotCatCond(category,condition): stroke_df_lite_ss=stroke_df[(stroke_df['category']==category)&(stroke_df['condition']==condition)] mean_streak_diff_list=[] for i in range(1000): this_round_scrambled_streak=[] this_round_real_streak=[] for sketch in np.unique(stroke_df_lite_ss['sketch_id']): this_round_real_streak.append(get_mean_streak(sketch)) this_round_scrambled_streak.append(get_scramble_mean_streak(sketch)) mean_streak_diff_list.append(np.mean(this_round_real_streak)-np.mean(this_round_scrambled_streak)) perm_observed_mean_streak_diff = np.mean(mean_streak_diff_list) lb=np.percentile(mean_streak_diff_list,2.5) ub=np.percentile(mean_streak_diff_list,97.5) plt.figure(figsize=(10,8)) h=sns.distplot(mean_streak_diff_list,kde=False,hist=True,norm_hist=False) plt.axvline(perm_observed_mean_streak_diff, color='yellow', linestyle='solid', linewidth=2) plt.axvline(lb, color='orange', linestyle='solid', linewidth=2) plt.axvline(ub, color='orange', linestyle='solid', linewidth=2) plt.title('{}_{}'.format(category,condition)) plt.ylabel('count') plt.xlabel('streak length difference') plt.legend(['mean','95% CI'], ncol=2, bbox_to_anchor=(1, 1.05)) plt.savefig(os.path.join(plot_dir,'mean_streak_difference_{}_{}'.format(category, condition)),edgecolor='w',bbox_inches='tight') plt.show() return perm_observed_mean_streak_diff, lb, ub for this_cat in unique_cats: CIPlot(this_cat) for this_condition in np.unique(stroke_df['condition']): for this_category in np.unique(stroke_df['category']): CIPlotCatCond(this_category, this_condition) D_C= D[D['category']=='dog'] D_C.groupby('label').mean ###Output _____no_output_____ ###Markdown Looking at stroke variation between parts ###Code ###Investigating how stroke length varies between parts given a category of objects for category in unique_cats: DS= D[D['category']==category] num_unique_labs = len(DS['label'].unique()) strokes_in_part_vect = np.zeros((len(np.unique(DS['annotation_id']))*num_unique_labs,3), dtype='|a1000') ind=0 for this_annotation in np.unique(DS['annotation_id']): DSA= DS[DS['annotation_id']==this_annotation] for this_label in np.unique(DS['label']): DSB=DSA[DSA['label']==this_label] strokes_in_part_vect[ind,]=[this_annotation, this_label,len(np.unique(DSB['stroke_num']))] ind+=1 strokes_in_part_vect=strokes_in_part_vect[~np.all(strokes_in_part_vect == '', axis=1)] strokes_in_part_df= pd.DataFrame(strokes_in_part_vect, columns=['annotation_id','part','num_strokes']) strokes_in_part_df['num_strokes']=pd.to_numeric(strokes_in_part_df['num_strokes']) fig,ax=plt.subplots(1, 1, figsize = (10, 8), dpi=150) sns.barplot(x='part',y='num_strokes',data=strokes_in_part_df,ci=95,capsize=0.3, errwidth= 3) plt.title('Average number of strokes for {} parts'.format(category)) plt.ylabel('Number of strokes') plt.xlabel('Part') ##Investigating how arc length varies between parts for category in unique_cats: plt.figure(figsize=(10,8)) sns.barplot(x='label', y='arc_length', data=stroke_df[stroke_df['category']=='dog'], ci=68,capsize=0.3, errwidth= 3) plt.title('Average stroke lengths for {} parts'.format(category)) plt.ylabel('arc length') plt.xlabel('part') ###Investigating how arc length varies between parts x conditions for category in unique_cats: plt.figure(figsize=(10,8)) sns.barplot(x='label',y='arc_length',hue='condition', data=stroke_df[stroke_df['category']==category],ci=68,capsize=0.3, errwidth= 3) plt.title('Average stroke lengths for {} parts'.format(category)) plt.ylabel('Arc length') plt.xlabel('Part') # ###Might Delete this - calculating average arc length per part manually#### # part_lengths={} # for category in unique_cats: # stroke_df_lite= stroke_df[stroke_df['category']==category] # arc_length_dict = OrderedDict() # for label in np.unique(stroke_df_lite['label']): # stroke_df_lite0= stroke_df_lite[stroke_df_lite['label']==label] # path_length=0 # for stroke in np.unique(stroke_df_lite0['stroke_id']): # path = parse_path(stroke_df_lite0[stroke_df_lite['stroke_id']==stroke]['svg'].iloc[0]) # path_length= path_length+path.length() # arc_length_dict[label]= path_length/len(stroke_df_lite0) # part_lengths[category]=arc_length_dict ###Output _____no_output_____ ###Markdown Looking at annotation bouts ###Code ### Investigating the number of parts for each part within a category. Does this number correspond to number ###of strokes with that part label? D['part_bout_num'] = pd.to_numeric(D['part_bout_num']) for category in unique_cats: DS=D[D['category']==category] unique_labels_in_cat= DS['label'].unique() plot_vect = np.zeros([1,2], dtype='|a20') for label in unique_labels_in_cat: DSA=DS[DS['label']==label] total_bouts=0 for sketch in DSA['sketch_id'].unique(): DSB= DSA[DSA['sketch_id']==sketch] num_bouts = len(DSB['part_bout_num'].unique()) temp= np.array([label, num_bouts], dtype= '|a20') plot_vect=np.vstack((plot_vect, temp)) plot_vect=np.delete(plot_vect, (0), axis=0) plot_df= pd.DataFrame(plot_vect, columns=['label','num_bouts']) plot_df['num_bouts']=pd.to_numeric(plot_df['num_bouts']) plt.figure(figsize=(10,8)) sns.barplot(x='label',y='num_bouts',data= plot_df,capsize=0.3) plt.title('Average bout numbers for {} parts'.format(category)) plt.ylabel('Average Number of Bouts') plt.xlabel('Part') #label_group_data=DS.groupby('label').agg({'part_bout_num':"mean"}) ###Output _____no_output_____ ###Markdown Creating sketch vectors ###Code from scipy import stats ####Creating different vectors for each category, might look at this later label_vect_dict={} for cat in unique_cats: DS= stroke_df[stroke_df['category']==cat] unique_labels_in_cat=valid_labels_dict[cat] unique_sketches_in_cat=DS['sketch_id'].unique() Label_Vec = np.zeros((len(unique_sketches_in_cat),len(unique_labels_in_cat)*2), dtype=int) arc_length_vec = np.zeros((len(unique_sketches_in_cat),len(valid_labels_dict[cat])), dtype=int) for s,sketch in enumerate(unique_sketches_in_cat): label_vec = np.zeros(len(unique_labels_in_cat),dtype=int) arc_vec = np.zeros(len(unique_labels_in_cat),dtype=int) DSA=DS[DS['sketch_id']==sketch] label_list = DSA.label.values for label in label_list: if label in unique_labels_in_cat: label_ind = unique_labels_in_cat==label label_vec[label_ind] += 1 for label in unique_labels_in_cat: DSB=DSA[DSA['label']==label] label_ind = unique_labels_in_cat==label arc_vec[label_ind] = DSB['arc_length'].sum() Label_Vec[s,0:len(unique_labels_in_cat)]=label_vec Label_Vec[s,len(unique_labels_in_cat):len(unique_labels_in_cat)*2]=arc_vec label_vect_dict[cat]= Label_Vec ##z scoring for cat in unique_cats: label_vect_dict[cat] = stats.zscore(label_vect_dict[cat], axis=1, ddof=1) ###This is where we make a num unique labels * 2 X number of sketches vector feature_vec = np.zeros((len(stroke_df.sketch_id.unique()),len(valid_labels)*2), dtype=int) ind=0 start_pos=0 end_pos=0 meta_list=[] cols = ['sketch_id','target','condition','category','outcome'] for cat in unique_cats: DS= stroke_df[stroke_df['category']==cat] unique_labels_in_cat=valid_labels_dict[cat] unique_sketches_in_cat=DS['sketch_id'].unique() start_pos = end_pos end_pos+= len(unique_labels_in_cat) print start_pos, end_pos Label_Vec = np.zeros((len(unique_sketches_in_cat),len(unique_labels_in_cat)*2), dtype=int) arc_length_vec = np.zeros((len(unique_sketches_in_cat),len(valid_labels_dict[cat])), dtype=int) for s,sketch in enumerate(unique_sketches_in_cat): label_vec = np.zeros(len(unique_labels_in_cat),dtype=int) arc_vec = np.zeros(len(unique_labels_in_cat),dtype=int) DSA=DS[DS['sketch_id']==sketch] meta_list.append(pd.Series([DSA['sketch_id'],DSA['target'].unique(),DSA['condition'].unique(),DSA['category'].unique(),DSA['outcome'].unique()], index=cols)) label_list = DSA.label.values for label in label_list: if label in unique_labels_in_cat: label_ind = unique_labels_in_cat==label label_vec[label_ind] += 1 for label in unique_labels_in_cat: DSB=DSA[DSA['label']==label] label_ind = unique_labels_in_cat==label arc_vec[label_ind] = DSB['arc_length'].sum() feature_vec[ind,start_pos:end_pos]=label_vec feature_vec[ind,start_pos+len(valid_labels):end_pos+len(valid_labels)]=arc_vec ind+=1 feature_vec_meta = pd.DataFrame(meta_list, columns=cols) ##Changing column values from np arrays to strings/boolean def arr_to_str(arr): return (arr[0]) feature_vec_meta['target']=feature_vec_meta['target'].apply(arr_to_str) feature_vec_meta['condition']=feature_vec_meta['condition'].apply(arr_to_str) feature_vec_meta['category']=feature_vec_meta['category'].apply(arr_to_str) feature_vec_meta['outcome']=feature_vec_meta['outcome'].apply(arr_to_str) feature_df= pd.DataFrame(feature_vec, columns=[s + '_numstrokes' for s in valid_labels]+[s + '_total_arclength' for s in valid_labels]) ##creating a compressed version of the feature df with no duplicates for parts labs_numstrokes=[] labs_total_arclength=[] for lab in np.unique(valid_labels): labs_numstrokes.append(lab +'_numstrokes') labs_total_arclength.append(lab+'_total_arclength') feature_df_labs=labs_numstrokes+labs_total_arclength feature_df_final= pd.DataFrame(columns=feature_df_labs) for this_lab in feature_df_labs: duplicates=[col for col in feature_df if col.startswith(this_lab)] feature_df_final[this_lab]= feature_df[duplicates].sum(axis=1) feature_df = feature_df_final ##Check to make sure the df looks okay assert len(feature_df.columns)==len(np.unique(feature_df.columns)) feature_df.head() #Normalizing within row within measure (numstrokes/arclength) feature_df.iloc[:,0:int(len(feature_df.columns)/2)]=feature_df.iloc[:,0:int(len(feature_df.columns)/2)].div(feature_df.iloc[:,0:int(len(feature_df.columns)/2)].sum(axis=1),axis=0) feature_df.iloc[:,int(len(feature_df.columns)/2):int(len(feature_df.columns))]=feature_df.iloc[:,int(len(feature_df.columns)/2):int(len(feature_df.columns))].div(feature_df.iloc[:,int(len(feature_df.columns)/2):int(len(feature_df.columns))].sum(axis=1),axis=0) ###Execute this if we want to save a non-zscore matrix run=False if run==True: feature_df.to_csv(os.path.join(features_dir,'semantic_parts_sketch_features_compressed_non-whitened.csv')) run=False #z-scoring within columns cols=list(feature_df.columns) for this_col in cols: feature_df[this_col]=(feature_df[this_col] - feature_df[this_col].mean())/feature_df[this_col].std(ddof=0) ###Saving out matrices to csv/npy as needed feature_df.to_csv(os.path.join(features_dir,'semantic_parts_sketch_features_compressed.csv')) np.save(os.path.join(features_dir, 'semantic_parts_sketch_features'),feature_vec) feature_vec_meta.to_csv(os.path.join(features_dir,'semantic_parts_sketch_meta.csv')) #full_feature_df = pd.concat((feature_vec_meta,feature_df),axis=1) ###Output _____no_output_____ ###Markdown Some more vector visualization PCA ###Code from sklearn.decomposition import PCA #loading files we need feature_vec = np.load(os.path.join(features_dir,'semantic_parts_sketch_features.npy')) feature_df = pd.DataFrame.from_csv(os.path.join(features_dir,'semantic_parts_sketch_features_compressed.csv')) meta_df= pd.DataFrame.from_csv(os.path.join(features_dir,'semantic_parts_sketch_meta.csv')) for this_part in np.unique(valid_labels): feature_df.rename(columns={this_part:'{}_numstrokes'.format(this_part)}, inplace=True) feature_df pca = PCA(n_components=3) pca_result = pca.fit_transform(feature_df.values) meta_df['pc_1'] = pca_result[:,0] meta_df['pc_2'] = pca_result[:,1] meta_df['pca_3'] = pca_result[:,2] ###This chunk may not run because of ggplot being outdate. Will write up vis in matplotlib if necessary from ggplot import * cat_plot = ggplot( meta_df, aes(x='pc_1', y='pc_2', color='category') ) \ + geom_point(size=75,alpha=0.8) \ + ggtitle("First and Second Principal Components colored by category") cat_plot cond_plot = ggplot( meta_df, aes(x='pc_1', y='pc_2', color='condition') ) \ + geom_point(size=75,alpha=0.8) \ + ggtitle("First and Second Principal Components colored by category") cond_plot tar_plot = ggplot( meta_df, aes(x='pc_1', y='pc_2', color='target') ) \ + geom_point(size=75,alpha=0.8) \ + ggtitle("First and Second Principal Components colored by category") tar_plot ###Output _____no_output_____ ###Markdown t-SNE ###Code import time from sklearn.manifold import TSNE time_start = time.time() tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300) tsne_results = tsne.fit_transform(feature_df.values) meta_df['tsne_dim_1']=tsne_results[:,0] meta_df['tsne_dim_2']=tsne_results[:,1] ###Visualizing using ggplot. Not as flexible as expected # plt.figure() # tsneplot = ggplot( meta_df, aes(x='tsne_dim_1', y='tsne_dim_2', color='category') ) \ # + geom_point(size=70,alpha=0.1) \ # + ggtitle("tSNE dimensions colored by category") # tsneplot # tsneplot.save(filename = os.path.join(plot_dir,'tSNE dimensions colored by category')) # plt.figure() # tsneplot = ggplot( meta_df, aes(x='tsne_dim_1', y='tsne_dim_2', color='condition') ) \ # + geom_point(size=70,alpha=0.1) \ # + ggtitle("tSNE dimensions colored by condition") # tsneplot # tsneplot.save(filename = os.path.join(plot_dir,'tSNE dimensions colored by condition')) # tsneplot = ggplot( meta_df, aes(x='tsne_dim_1', y='tsne_dim_2', color='target') ) \ # + geom_point(size=70,alpha=0.1) \ # + ggtitle("tSNE dimensions colored by target")\ # + x_label("die") # tsneplot # tsneplot.save(filename = os.path.join(plot_dir,'tSNE dimensions colored by target')) ##Visualizing t-SNE results cat_labels = meta_df['category'].unique() rgb_values = sns.color_palette("Set2", meta_df['category'].nunique()) color_map_cat = dict(zip(cat_labels, rgb_values)) cond_labels = meta_df['condition'].unique() rgb_values = sns.color_palette("Set2", meta_df['condition'].nunique()) color_map_cond = dict(zip(cond_labels, rgb_values)) target_labels= meta_df['target'].unique() rgb_values = sns.color_palette("Set2", meta_df['target'].nunique()) color_map_target = dict(zip(target_labels, rgb_values)) f=plt.figure(figsize(10,10)) ax = plt.subplot(aspect='equal') p= plt.scatter(meta_df['tsne_dim_1'],meta_df['tsne_dim_2'],c=meta_df['category'].map(color_map_cat)) plt.ylabel('TSNE Dim 1') plt.xlabel('TSNE Dim 2') plt.tick_params(labelbottom=False, labelleft= False) plt.title('TSNE dimensions colored by categories') txts = [] for cat in enumerate(cat_labels): cat=cat[1] txt = ax.text(xtext, ytext, str(cat), fontsize=24) xtext = np.median(meta_df[meta_df['category'] == cat]['tsne_dim_1'] ) ytext = np.median(meta_df[meta_df['category'] == cat]['tsne_dim_2'] ) txt.set_path_effects([ PathEffects.Stroke(linewidth=5, foreground="w"), PathEffects.Normal()]) txts.append(txt) f.show plt.savefig(os.path.join(plot_dir,'TSNE dimensions colored by categories')) f=plt.figure(figsize(10,10)) ax = plt.subplot(aspect='equal') plt.scatter(meta_df['tsne_dim_1'],meta_df['tsne_dim_2'],c=meta_df['condition'].map(color_map_cond)) plt.ylabel('TSNE Dim 1') plt.xlabel('TSNE Dim 2') plt.tick_params(labelbottom=False, labelleft= False) plt.title('TSNE dimensions colored by condition') f.show plt.savefig(os.path.join(plot_dir,'TSNE dimensions colored by condition')) f=plt.figure(figsize(10,10)) plt.scatter(meta_df['tsne_dim_1'],meta_df['tsne_dim_2'],c=meta_df['target'].map(color_map_target)) plt.ylabel('TSNE Dim 1') plt.xlabel('TSNE Dim 2') plt.tick_params(labelbottom=False, labelleft= False) plt.title('TSNE dimensions colored by target') f.show plt.savefig(os.path.join(plot_dir,'TSNE dimensions colored by target')) ###Output _____no_output_____
notebooks/SCSF_usage_with_PVDAQ_data.ipynb
###Markdown Example usage of Statistical Clear Sky Fitting (SCSF)This notebook shows the basic usage of the SCSF algorithm and code. The source of the data is [NREL's PVDAQ service](https://developer.nrel.gov/docs/solar/pvdaq-v3/). `solardatatools` is a dependency for Statistical Clear Sky, and additional tools are utilized here to prepaire the data for analysis. Imports ###Code from statistical_clear_sky import IterativeFitting from solardatatools import DataHandler, get_pvdaq_data ###Output _____no_output_____ ###Markdown Grab a data set from PVDAQThe `get_pvdaq_data` function is provided by `solardatatools` as an API wrapper for PVDAQ. ###Code df = get_pvdaq_data(sysid=35, api_key='DEMO_KEY', year=[2011, 2012, 2013]) ###Output [============================================================] 100.0% ...queries complete in 6.1 seconds ###Markdown Process the data into a clean matrix for analysis ###Code dh = DataHandler(df) dh.run_pipeline(use_col='ac_power') dh.report() dh.plot_heatmap(matrix='raw'); dh.plot_heatmap(matrix='filled'); ###Output _____no_output_____ ###Markdown Setup and execute the SCSF algorithm using all default values ###Code scsf = IterativeFitting(data_handler_obj=dh) scsf.execute() ###Output obtaining weights obtaining initial value of component r0 Starting at Objective: 6.457e+09, f1: 2.413e+07, f2: 5.157e+02, f3: 4.873e+09, f4: 1.559e+09 Minimizing left matrix Minimizing right matrix 1 - Objective: 2.287e+07, f1: 2.128e+07, f2: 5.083e+02, f3: 1.593e+06, f4: 8.709e-04 Minimizing left matrix Minimizing right matrix 2 - Objective: 2.000e+07, f1: 1.838e+07, f2: 2.247e+03, f3: 1.618e+06, f4: 4.614e-04 Minimizing left matrix Minimizing right matrix 3 - Objective: 1.968e+07, f1: 1.807e+07, f2: 1.755e+03, f3: 1.608e+06, f4: 3.167e-04 Minimizing left matrix Minimizing right matrix 4 - Objective: 1.962e+07, f1: 1.801e+07, f2: 1.619e+03, f3: 1.601e+06, f4: 2.903e-03 Minimizing left matrix Minimizing right matrix 5 - Objective: 1.960e+07, f1: 1.800e+07, f2: 1.572e+03, f3: 1.598e+06, f4: 3.228e-04 Minimization complete in 0.44 minutes ###Markdown Inspect the results of the SCSF procedure ###Code scsf.plot_measured_clear(show_days=True); scsf.plot_energy(show_clear=True, show_days=True, scale_power=True) plt.title('Daily Energy') plt.xlabel('Day Number') plt.ylabel('Energy (kWh)'); print('Estimated degradation: {:.2f}% per year'.format(scsf.beta_value.item() * 100)) scsf.ts_plot(start_day=50) ###Output _____no_output_____
ipynb/classifier-denoised.ipynb
###Markdown Load ksent vectors of random genome samples of 16kb ###Code #export runs = [ pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_top3_500_samples_per_fasta.pkl"), pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_top3_500_samples_per_fasta_run2.pkl"), pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_1000_samples_per_fasta.pkl"), pd.read_pickle("/home/serge/development/fastai-genomic/data/ksent_vectors_baccilium_1000_samples_per_fasta_run2.pkl") ] df = pd.concat(runs) df = df.sample(frac=1).reset_index() df.head() df.ksent.values.shape from denoiser import denoise ksent = np.vstack(df.ksent.values) v_dn = denoise(ksent) data = [v_dn[i] for i in range(v_dn.shape[0])] d=pd.DataFrame(index=df.index) d["spicies"] = df.spicies.values df["ksent"] = data df.head() ###Output _____no_output_____ ###Markdown Create Dataset ###Code #export valid_idx = random.sample(range(df.shape[0]), int(np.floor(df.shape[0]* 0.2))) db = (ItemList.from_df(df,cols="ksent"). split_by_idx(valid_idx). label_from_df(cols="spicies"). databunch()) ###Output _____no_output_____ ###Markdown Create Model ###Code #export def submodel(dims, bias=False): layer_dims = list(zip(dims[:-1],dims[1:])) fcl = [nn.Linear(*x, bias=bias) for x in layer_dims] [nn.init.xavier_uniform_(m.weight) for m in fcl] if bias: for l in fcl: l.bias.data.normal_(0, 1) relu = [nn.ReLU() for _ in range(len(fcl))] layers = np.asarray(list(zip(fcl, relu))).ravel()[:-1] return nn.Sequential(*layers) #export class Classifier (nn.Module): def __init__(self, encoder_dims, classifier_dims): super().__init__() self.encoder = submodel(encoder_dims,bias=True) self.classifier = submodel(classifier_dims,bias=True) def forward(self, x): x = self.encoder(x) return F.softmax(self.classifier(x), dim=1) def save_encoder(self,file:PathOrStr): torch.save(self.encoder.state_dict(), path) def save_model(self, file:PathOrStr, epoch): torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': loss}, file) model = Classifier([100,50,3,2], [2,20,3]).double() model ###Output _____no_output_____ ###Markdown Learner ###Code learn = Learner(db, model,metrics=[accuracy]) learn.loss_func learn.lr_find() learn.recorder.plot() learn.fit_one_cycle(50,1e-2) learn.recorder.plot_metrics() interpretation = learn.interpret() interpretation.plot_confusion_matrix() ###Output _____no_output_____
validation_data/Validation_Datasets.ipynb
###Markdown BUILD COUNTRY-SPECIFIC DEATHS VALIDATION DATASETSSource: https://github.com/owid/covid-19-data/tree/master/public/data Script to build a dataset containing the number of reported COVID-19 deaths in a specific country in a given time-range. The output is saved in *Country_deaths.csv* file in the country-specific folder in the target directory. There will be created two types of validation datasets for each country:- Validation Datasets Experiment: dataset with the validation data from 10th Feb up to 15th of April- Validation Datasets Policy: dataset with the validation data from 10th Feb up to 15th of MayThe choice for the t_start for retrieving the validation data is arbitrary. It is shared for both validation types. It can be chosen arbitrary but it must be a date _before_ the imposition of the lockdowns in the countries (which usually occurred at the early period of March). The end date in case for the Policy is farthest as it is considered a larger time span. TABLE OF CONTENTS [1. Build Validation Datasets Function](validation_fn) [2. Export Validation Datasets](export) [2.1 Validation Datasets for Experiments](validation_ex) [2.2 Validation Datasets for Policy](validation_pol) ###Code # Import libraries import pandas as pd import numpy as np import datetime import os # base directory where it is store the Source covid-data-deaths.csv b_dir = './Source' # target directory where to store the validation datasets target_dir_experiment = './Experiments' # save the validation data for the experiments target_dir_policy = './Policy' # save the validation data for the policies ###Output _____no_output_____ ###Markdown 1. Build Validation Datasets Functions ###Code # Import and preprocess the dataset; Source: https://github.com/owid/covid-19-data/tree/master/public/data def build_validation_dataset(country,t_start,t_end, target): data_deaths = pd.read_csv(os.path.join(b_dir,'covid-data-deaths.csv')) # read dataset data_deaths = data_deaths[['location','date','total_deaths']].fillna(0) # retrieve relevant columns and fill n.a. values data_deaths = data_deaths.rename(columns=lambda x: x.capitalize()) data_deaths = data_deaths.rename(columns={'Total_deaths':'Deaths'}) validation_data = data_deaths[data_deaths['Location']==country].reset_index(drop=True)[['Date','Deaths']] t0 = validation_data.loc[validation_data['Date'] == t_start].index[0] t1 = validation_data.loc[validation_data['Date'] == t_end].index[0] validation_data = validation_data.iloc[t0:t1+1] if target == 'exp': validation_data.to_csv(os.path.join(target_dir_experiment,'%s_deaths.csv' %country),index=False) elif target == 'pol': validation_data.to_csv(os.path.join(target_dir_policy,'%s_deaths.csv' %country),index=False) ###Output _____no_output_____ ###Markdown 2. Export Validation Datasets 2.1 Validation Datasets for Experiments Export the validation dataset containing the information on the number of deaths from a chosen fixed date (arbitrary) up to the **15th of April** ###Code # Build validation dataset ITALY build_validation_dataset('Italy','2020-02-10','2020-04-15','exp') # Build validation dataset SPAIN build_validation_dataset('Spain','2020-02-10','2020-04-15','exp') # Build validation dataset GERMANY build_validation_dataset('Germany','2020-02-10','2020-04-15','exp') # Build validation dataset FRANCE build_validation_dataset('France','2020-02-10','2020-04-15','exp') ###Output _____no_output_____ ###Markdown 2.2 Validation Datasets for Policy Export the validation dataset containing the information on the number of deaths from a chosen fixed date (arbitrary) up to the **15th of May** ###Code # Build validation dataset ITALY build_validation_dataset('Italy','2020-02-10','2020-05-15','pol') # Build validation dataset SPAIN build_validation_dataset('Spain','2020-02-10','2020-05-15','pol') # Build validation dataset GERMANY build_validation_dataset('Germany','2020-02-10','2020-05-15','pol') # Build validation dataset FRANCE build_validation_dataset('France','2020-02-10','2020-05-15','pol') ###Output _____no_output_____
Courses/course.fast.ai/Notebooks/04_05_example.ipynb
###Markdown 1. Initialize the parameters ###Code params = torch.randn(3).requires_grad_() orig_params = params.clone() ###Output _____no_output_____ ###Markdown 2. Calculate the predictions ###Code preds = f(time, params) def show_preds(preds, ax=None): if ax is None: ax = plt.subplots()[1] ax.scatter(time, speed) ax.scatter(time, to_np(preds), color='red') ax.set_ylim(-300, 300) show_preds(preds) ###Output _____no_output_____ ###Markdown 3. Calculate the loss ###Code loss = mse(preds, speed) loss ###Output _____no_output_____ ###Markdown Calculate the gradients ###Code loss.backward() params.grad lr = 1e-4 lr * params.grad params ###Output _____no_output_____ ###Markdown 5. Step the weights Now we need to update the parameters based on the gradients we just calculated ###Code params.data -= lr * params.grad.data params.grad = None preds = f(time, params) mse(preds, speed) show_preds(preds) def apply_step(params, prn=True): preds = f(time, params) loss = mse(preds, speed) loss.backward() params.data -= lr * params.grad.data params.grad = None if prn: print(loss.item()) return preds ###Output _____no_output_____ ###Markdown 6. Repeate the process ###Code for i in range(10): apply_step(params) params = orig_params.detach().requires_grad_() _, axs = plt.subplots(1, 4, figsize=(12, 3)) for ax in axs: show_preds(apply_step(params, False), ax) plt.tight_layout() ###Output _____no_output_____
Py3_Control_Structures.ipynb
###Markdown **Boolearns**Anothertype in Python is the **Boolean** type. There are two boolearn values:- **True**- **False**.They can be created by **comparing values**, for instance by using the equal operator **==**.> my_booleran = True> print(my_boolean)> True> print(2==3)> False> print("hello" == "hello")> TrueBe careful not to confuse assignment **"="** (one equals sign) with comparison **"=="** (two equals signs). **Comparision** Another comparison operator, the **not equal** operator **(!=)**, evaluates to **True** if the items being compared **aren't equal**, and False if they are.> print(1 != 1)> False > print("eleven" != "seven")> True> print(2 != 10)> TrueComparison operators are also called **Relational operators.** ###Code print(7 != 8) ###Output True ###Markdown **Comparison**Python also has operators that determine whether one number (float or integer) is - greater than **">"**- smaller than **"<"**> print( 7 > 5 )> True> print( 10 < 10 )> FalseDifferent numeric types can also be compared, for example, integer and float. ###Code print( 7 > 7.0) ###Output False ###Markdown **Comparision**- The greater than or equal to **">="**- The smaller than or equal to **"<="**They are the same as the strict greater than and smaller than operators, except that they return **True** when **comparing equal numbers**.> print(7 <= 8)> True> print(9 >= 9.0)> TrueGreater than and smaller than operators can also be used to compare strings **lexicographically** (The alphabetical order of words is based on the alphabetical order of their componenet letters).**For example:**> print("Annie" > "Andy")> TrueThe first two characters from "Annie" and "Andy" (A and A) are compared. As they are equal, the second two characters are compared. Because they are also equal, the thrid two characters (n and d) are compared. And because n has greater alphabetical order value than d, "Annie" is greater than "Andy". ###Code print(8.7 <= 8.70) ###Output True ###Markdown **if Statements**You can use if statements to run code if a certain condition holds. if an expression evaluates to True, some statements are carried out. Otherwise, they aren't carried out.An if statement looks like this:> if expression: >> statementsPython uses indentation (white space at the beginning of a line) to delimit blocks of code. Depending on program's logic, indentation can be mandatory. As you can see, the statements in the if should be indented. **if Statements**Here is an example if statement:> if 10 > 5:>> print("10 greater than 5")> print("Program ended")The expression determines whether 10 is greater than 5. Since it is, the indented statement runs, and "10 greater than 5" is output. Then, the unindented statement, which is not part of the if statement, is run, and "Program ended" is displayed.Notice the **colon** at the end of the expression in the if statement. ###Code spam = 7 if spam > 5: print("five") if spam > 8: print("eight") ###Output five ###Markdown **if Statements**To perform more complex checks, if statements can be nested, one inside the other. This means that the inner if statement is the statement part of the outer one. This is one way to see whether multiple conditions are satisfied.For example:> num = 12> if num > 5:>> print("Bigger than 5")>> if num <= 47:>>>print("Between 5 and 47")**Indentation** is used to define the level of nesting. ###Code num = 7 if num > 3: print("3") if num < 5: print("5") if num == 7: print("7") ###Output 3 ###Markdown **else Statements**The **if statemen**t allows you to check a condition and run some statements, if the **condition is True**.The **else statement** can be used to run some statements when then condition of the if **statement is False**.As with if statements, the code inside the block should be indented.> x = 4> if x == 5:>> print("Yes")>else:>> print("No")Notice the colon after the else keyword. ###Code if 1 + 1 == 2: if 2*2 == 8: print("if") else: print("else") ###Output else ###Markdown **else Statements**Every if condition block can have only one else statement.In order to make multiple checks, you can chain if and else statement.For example, the following program checks and outputs the num variable's value as text:> num = 3> if num == 1:>> print("One")> else:>> if num == 2:>>> print("Two")>> else:>>> if num == 3:>>>> print("Three")>>> else:>>>> print("Something else")Indentation determines which **if/else** statements the code blocks belong to. ###Code num = 3 if num == 1: print("One") else: if num == 2: print("Two") else: if num == 3: print("Three") else: print("Something else") x = 10 y = 20 if x > y: print("if statement") else: print("else statement") ###Output else statement ###Markdown **elif Statements**Multiple if/else statements make the code long and not very readable.The elif (short for else if) statement is a shortcut ot use when chaining if and else statements, making the code shorter. The same example from the previous part can be rewritten using elif statements:> num = 3> if num == 1:>> print("One")> elif num == 2:>> print("Two)> elif num == 3:>> print("Three")> else:>> print("Something else")As you can see in the example above, a series of if elif statements can have a **final** **else** block, which is called if **none of the if or elif expressions is True**.The **elif** statement is **equivalent** to an else/if statement. It is used to make the code shorter, more readable, and avoid indentation increase. ###Code num = 3 if num == 1: print("One") elif num == 2: print("Two") elif num == 3: print("Three") else: print("Something else") ###Output Three ###Markdown **Boolean Logic****Boolean logic** is used to make more complicated conditions for if statements that rely on more than one condition.Python's Boolean operators are - and - or - notThe **and** operator takes two arguments, and evaluates and **True**, if, and only if, **both of its arguments are True**. Otherwise, it evaluates to False.> print(1 == 1 and 2 == 2)> True> print(1 == 1 and 2 == 3)> False> print(1 != 1 and 2 == 2) > False> print(2 6)> FalseBoolean operator can be used in expression as many times as needed. ###Code if (1==1) and (2+2>3): print("true") else: print("false") ###Output true ###Markdown **Boolean Or**The **or** operator also takes two arguments. It evaluates to **True** if **either** **(or both)** of its arguemnts are **True**, and False if both arguments are False> print(1 == 1 or 2 == 2)> True> print(1 == 1 or 2 == 3)> True> print(1 != 1 or 2 == 2)> True> print(2 6)> FalseBesides values, you can also compare variables. ###Code age = 15 money = 500 if age > 18 or money > 100: print("Welcome") ###Output Welcome ###Markdown **Boolean Not**Unlike other operators we've seen so far, **not** takes one argument, and **inverts it**.The result of - **not True** is **False** - **not False** goes to **True**> print(not 1 == 1)> False > print(not 1 > 7)> TrueYou can chain multiple conditional statements in an if statement using the Boolearn operators. ###Code if not True: print("1") elif not (1+1 == 3): print("2") else: print("3") ###Output 2 ###Markdown **Operator Precedence**Operator precedence is a very important concept in programming. It is an extension of the methematical idea of order of operations (multiplication being performed before addition, etc.) to include other operators, such as those in Boolean logic.The below code shows that == has a higher precedence than or> print(False == False or True)> True> print(False == (False or True))> False> print((False == False) or True)> True**Python's order** of operations is the same as that of normal mathematics: - parentheses first, - then exponentiation, - then multiplication / division, - and then addition / subtraction. ###Code if 1 + 1*3 == 6: print("Yes") else: print("No") ###Output No ###Markdown **Chaining Multiple Conditions**You can chain multiple conditional statements in an if statement using The Boolean operators.For example, we can if the value of a grade is between 70 and 100:> grade = 88> if (grade >= 70 and grade <= 100):>>print("passed!")You can use multiple and, or, not operators to chain multiple conditions together. ###Code x = 4 y = 2 if not 1 + 1 == y or x == 4 and 7 == 8: # not [(true) or [(true) and (false)]] print("Yes") elif x > y: print("No") ###Output No ###Markdown **Lists****Lists** are used to store items.A list is created using square brackets with commas separating items.> words = ["Hello", "world", "!"]In the example above the words list contains three string items.A certain item in the list can be accessed by using its index in square brackets.For example:> words = ["Hello", "world", "!"]> print(words[0])> print(words[1])> print(words[2])The first list item's index is 0, rather than 1, as might be expected. ###Code nums = [5,4,3,2,1] print(nums[1]) ###Output 4 ###Markdown **Lists**Sometimes you need to create an empty list and populate it later during the program. For example, if you are creating a queue management program, the queue is going to be empty in the beginning and get populated with people data later.An empty list is created with an empty pair of square brackets.> empty_list = []> print(empty_list)In some code samples you might see a comma after the last item in the list. It's not mandatory, but perfectly valid. ###Code nums = [2,] i = 0 for num in nums: i += 1 print(i) ###Output 1 ###Markdown **Lists**Typically, a list will contain items of a single item type, but it is also possible to include several different types.Lists can also be nested within other lists.> number = 3> Things = ["string", 0, [1, 2, number], 4.56]> print(things[1])> print(things[2])> print(things[2][2])Nested lists can be used to represent 2D grids, such as matrices. For example:> m = [> [ 1, 2, 3 ],> [ 4, 5, 6 ],> ] > print(m[1][2])A matrix-like structure can be used in cases where you need to store data in row-column format. For example, when creating a ticketing program, the seat numbers can be stored in a matrix, with their corresponding rows and numbers.The code above outputs the 3rd item of the 2nd row. ###Code list = [42, 55, 67] print(list[2]) ###Output 67 ###Markdown **Lists**Some types, such as strings, can be indexed like lists. Indexing strings behaves as though you are indexing a list containing each character in the string.For example:> str = "Hello world!"> print(str[6])Space(" ") is also a symbol and has an index.Trying to access a non-existing index will produce an error. ###Code num = [5,4,2,[2],1] print(num[0]) print(num[3][0]) #print(num[5]) error ###Output 5 2 ###Markdown **List Operations**The item at a certian index in a list can be reassigned For example:> nums = [7, 7, 7, 7, 7]> nums[2] = 5> print(nums)You can replace the item with an item of a different type. ###Code nums = [1, 2, 3, 4, 5] nums[3] = nums[1] print(nums[3]) ###Output 2 ###Markdown **List Operations**Lists can be added an multiplied in the same way as strings. For example:> nums = [1, 2, 3]> print(nums + [4, 5, 6])> print(nums *3)Lists and strings are similar in many ways string can be thought of as lists of characters that can't be changedFor example, the string "Hello" can be thought of as a list, where each character is an item in the list. The first item is "H", the second item is "e", and so on. ###Code nums = [33, 42, 56] nums[1] = 22 print(nums) ###Output [33, 22, 56] ###Markdown **List Operations**To check if an item is in a list, the **in** operator can be used. - It returns **True** if the item occurs one or more times in the list, and- **False** if it doesn't> words = ["spam", "egg", "spam", "sausage"]> print("spam" in words)> print("egg" in words)> print("tomato" in words)The **in** operator is also used to determien whether or not a string is a substring of another string. ###Code nums = [10, 9, 8, 7, 6, 5] nums[0] = nums[1] - 5 if 4 in nums: print(nums[3]) else: print(nums[4]) ###Output 7 ###Markdown **List Operations**To check if an item is not in a list, you can use the **not** operator in one of the following ways:> nums = [1, 2, 3]> print(not 4 in nums)> print(4 not in nums)> print(not 3 in nums)> print(3 not in nums)> True > True> False> False ###Code letters = ['a', 'b', 'z'] if "z" in letters: print("Yes") ###Output Yes ###Markdown **List Functions**The **append** method adds an item to the end of an existing list.For example:> nums = [1, 2, 3]> nums.append(4)> print(nums)> [1, 2, 3, 4]The **dot** before append is there because it is a **method** of the list class. ###Code words = ["hello"] words.append("world") print(words[1]) ###Output world ###Markdown **List Functions**To get the number of items in a list, you can use the **len** function.> nums = [1, 3, 5, 2, 4]> print(len(nums))Unlike the index of the items, **len** does not start with 0. So, the list above contains 5 items, meaning **len** will return 5.**len** is written before the list it is being called on, without a dot. ###Code letters = ["a", "b", "c"] letters.append("d") print(len(letters)) ###Output 4 ###Markdown **List Functions**The **insert** method is similar to **append**, except that it allows you to insert a new item at any position in the list, as opposed to just at the end.> words = ["Python", "fun]> index = 1> words.insert(index, "is")> print(words)> Python is funElements, that are after the inserted item, are shifted to the right. ###Code nums = [9, 8, 7, 6, 5] nums.append(4) nums.insert(2,11) print(nums) print(len(nums)) ###Output [9, 8, 11, 7, 6, 5, 4] 7 ###Markdown **List Functions**The **index** method finds the first occurrence of a list item and returns its index.if the item isn't in the list, it raises a ValueError.> letters = ['p', 'q', 'r', 's', 'p', 'u']> print(letters.index('r))> print(letters.index('p'))> print(letters.index('z'))> 2> 0> ValueError.there are a few more useful functions and methods for lists.- max(list): Returns the list item with the maximum value.- min(list): Returns the list item with the minimum value.- list.count(item): Returns a count of how many times an item occurs in a list.- list.remove(item): Removes an object from a list.- list.reverse(): Reverses items in a list.For example, you can count how many 42s are there in the lsit using: **items.count(42)**where items is the name of our list. ###Code list = ['x', 'y'] list.append('z') print(len(list)) ###Output 3 ###Markdown **while Loops**A **while** loop is used to repeat a block of code multiple times.For example, let's say we need to process multiple user inputs, so that each time the user inputs something, the same block of code needs to execute.Below is a **while** loop containing a variable that counts up for 1 to 5, at which point the loop terminates.> i = 1> while i <= 5:>> print(i)>> i = i + 1>print("Finished!")During each loop iteration, the i variable will get incremented by one, until it reaches 5.So, the loop will execure the print statement 5 times.The code in the body of a **while** loop is executed repeatedly, This is called **iteration.** ###Code i = 3 while i >= 0: print(i) i = i - 1 ###Output 3 2 1 0 ###Markdown **while Loops**You can use multiple statements in the while loop. For example, you can use an **if** statement to make decisions. This can be useful, if you are making a game and need to loop through a number of player actions and add or remove points of the player.The code below uses an if/else statement inside a while loop to separate the even and odd numbers in the range of 1 to 10:> x = 1> while x < 10:>> if x % 2 == 0:>>> print(str(x) + " is even")>> else:>>> print(str(x) + " is odd")>> x += 1**str(x)** is used to convert the number x to a string, so that it can be used for concatenationIn console, you can stop the program's execution by using the Ctrl-c shortcut or by closing the program. ###Code x = 0 while x <= 20: print(x) x += 2 ###Output 0 2 4 6 8 10 12 14 16 18 20 ###Markdown **break** To end a while loop prematurely, the break statement can be used. For example, we can break an infinite loop if some condition is met:> i = 0> while True:>> print(i)>> i = i + 1>> if i >= 5:>>> print("Breaking")>>> break>print("Finished")**while True** is a short and easy way to make an infinite loop.**An example use case of break:**An infinite while loop can be used to continuously take user input. For example, you are making a calculator and need to take numbers from the user to add an stop, when the user enters **"stop"**.In the case, the **break** statement can be used to end the infinite loop when the user input equals "stop".Using the **break** statement outside of a loop causes an error. ###Code i = 5 while True: print(i) i = i -1 if i <= 2: break ###Output 5 4 3 ###Markdown **continue**Another statement that can be used withing loops is **continue** Unlike **break**, **continue** jumps back to the top of the loop, rather than stopping it.Basically, the **continue** statement stops the current iteration and continues with the next one.**For example:**> i = 1 >while i <= 5:>> print(i)>> i += 1>> if i == 3:>> print("Skipping 3")>> continue**An example use case of continue:**An airline ticketing system needs to calculate the total cost for all tickets purchased. The tickets for childeren under the age of 1 are free. We can use a while loop to iterate through the list of passengers and calculate the total cost of their tickets. Here, the **continue** statement can be used to skip the children.Using the **continue** statement outside of a loop causes and error. **for Loop**The for loop is used to iterate over a given sequence, such as lists or strings. The code below outputs each item in the list and adds an exclamation mark at the end:> words = ["hello", "world", "spam", "eggs"]> for word in words:>> print(word + "!")In the code above, the word variable represents the corresponding item of the list in each iteration of the loop. During the 1st iteration, word is equal to "hello", and during the 2nd iteration it's equal to "world", and so on. ###Code letters = ['a', 'b', 'c'] for l in letters: print(l) ###Output a b c ###Markdown **for Loops**The **for** loop can be used to iterate over strings.**For example:**> str = "testing for loops"> count = 0> for x in str:>> if(x == 't'):>>> count += 1> print(count)The code above defines a count variable, iterates over the string and calculates the count of 't' letters in it. During each iteration, the **x** variable represents the current letter of the string.The count variable is incremented each itme the letter 't' is found, thus, at the end of the loop it represents the number of 't' letters in the string.Similar to **while** loops, the **break** and **continue** statemetns can be used in **for** loops, to stop the loop or jump to the next iteration. ###Code list = [2, 3, 4, 5, 6, 7] for x in list: if (x % 2 == 1 and x > 4): print(x) break ###Output 5 ###Markdown ** for vs while **Both, **for** and **while** loops can be used to execute a block of code for multiple times.It is common to use the **for** loop when the number of iterations is fixed. For example, iterating over a fixed list of items in a shopping list.The **while** loop is used in cases when the number of iterations is not known and depends on some calculations and conditions in the code block of the loop.For example, ending the loop when the user enters a specific input in a calculator program.Both, **for** and **while** loops can be used to achieve the same results, however, the **for** loop has cleaner and shorter syntax, makign it a better choice in most cases. **Range**The **range()** function returns a sequence of numbers.By default, it starts from 0, increments by 1 and stops **before the specified number.**The code below generates a list containing all of the integers, up to 10.> numbers = list(range(10))> print(numbers)In order to output the range as a list, we need to explicitly convert it to a list, using the **list()** function. ###Code list = range(5) print(list[4]) ###Output 4 ###Markdown **Range**if range is **called with one argument**, it produces an object with values **from 0 to that argument**.if it is **called with two arguemnts**, it produces values **from the first to the second**.For example:> numbers = list(range(3, 8))> print(numbers)> print(range(20) == range(0, 20))Remember, the second argument is not included in the range, so range(3, 8) will not include the number 8. ###Code print(len(range(20)) == len(range(0,20))) nums = range(5, 8) print(len(nums)) ###Output 3 ###Markdown **Range****range** can have a **third argument**, which determines the **interval** of the sequence produced, also called the **step**> numbers = list(range(5, 20, 2))> print(numbers)We can also create list of **decreasign numbers**, using a **negative number** as the third argument, for example **lsit(range(20, 5, -2)).** ###Code nums = range(3, 15, 3) print(nums[2]) ###Output 9 ###Markdown **for Loops**The **for** loop is commonly used to repeat some code a certain number of times. This is done by combining for loop with **range** objects.> for i in range(5):>> print("hello!")You don't need to call **list** on the **range** object when it is used in a **for** loop, because it isn't being indexed, so a list isn't required ###Code for i in range(0, 20, 2): print(i) list = [1, 1, 2, 3, 5, 8, 13] print(list[list[4]]) for i in range (10): if not i % 2 == 0 : print(i + 1) while False: print("Looping...") list = [1, 2, 3, 4] if len(list) % 2 == 0: print(list[0]) letters = ['x', 'y', 'z'] letters.insert(1, 'w') print(letters[2]) list = [1, 2, 3] for var in list: print(var) ###Output 1 2 3
PCz_grad_hb.ipynb
###Markdown CREG12.L75-REF08_mesh_hgr.ncincludes all horizontal metrics for each grid points as well. for instance for the scalars such as temperature and salinity, T-point, the scale factors to use are e1t (zonal) e2t (meridional).They are invariant over the depth ###Code fsg = s3fs.S3FileSystem(anon=False, client_kwargs={ 'endpoint_url': 'https://karen.uiogeo-apps.sigma2.no' }) fsg.ls('s3://data') data_path = f's3://data/CREG12.L75-REF08_mesh_hgr.nc' remote_files = fsg.glob(data_path) fileset = [fsg.open(file) for file in remote_files] dm = xr.open_mfdataset(fileset, combine='by_coords',compat='override') dm ###Output _____no_output_____
10_ThoraricSurgery/10_ThoraricSurgery_torch.ipynb
###Markdown 폐암 수술 환자의 생존율 예측 ###Code import os import torch from torch import nn import torch.nn.functional as F from torch.utils.data import TensorDataset from torch.utils.data import DataLoader # 필요한 라이브러리 불러옴 import numpy as np import pandas as pd # 실행할 때마다 같은 결과를 출력하기 위해 설정하는 부분 np.random.seed(3) torch.manual_seed(3) device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu') #device = 'cpu' print("PyTorch version :", torch.__version__) print("Device :", device) ''' # 준비된 수술 환자 데이터를 불러옴 Data_set = np.loadtxt("../dataset/ThoraricSurgery.csv", delimiter=',') # 환자의 기록과 수술 결과를 X와 Y로 구분하여 저장 X = Data_set[:, 0:17] Y = Data_set[:, 17] ''' # 준비된 수술 환자 데이터를 불러옴 by using pandas #df = pd.read_csv("../dataset/ThoraricSurgery.csv", # names=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "class"]) df = pd.read_csv("../dataset/ThoraricSurgery.csv", header=None) print(df.head()) df.rename(columns={17:"class"}, inplace=True) print(df.head()) # 환자의 기록과 수술 결과를 X와 Y로 구분하여 저장 X = df.drop(['class'], axis=1, inplace=False).values Y = df['class'].values class Model(nn.Module): """ """ def __init__(self): super(Model, self).__init__() self.input_size = 17 self.layers = nn.Sequential( nn.Linear(in_features=self.input_size, out_features=30, bias=True), nn.ReLU(inplace=True), nn.Linear(in_features=30, out_features=1, bias=True), nn.Sigmoid() ) def forward(self, x): out = self.layers(x) return out model = Model() #model = nn.DataParallel(model) model.to(device) path = "./trained_models_pytorch" if not os.path.isdir(path): os.mkdir(path) epochs = 1000 batch_size = 64 x = torch.from_numpy(X).type(torch.FloatTensor) y = torch.from_numpy(Y).type(torch.FloatTensor) #print(x.size()) #print(y.size()) dataset = TensorDataset(x, y) train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True) #criterion = nn.CrossEntropyLoss() criterion = nn.BCELoss() #criterion = nn.BCEWithLogitsLoss() # Sigmoid + BCELoss # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), # lr=config.learning_rate, betas=(0.9, 0.98), eps=1e-09) optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.5, 0.999)) for epoch in range(1, epochs+1): phase = 'train' for k, [inputs, targets] in enumerate(train_loader): inputs = inputs.to(device) targets = targets.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase=='train'): output = model(inputs) output = output.squeeze() #targets = targets.type_as(output) #print(output.size()) #print(targets.size()) #print("=======") loss = criterion(output, targets) # print(loss) loss.backward() optimizer.step() if(epoch%100==0): save_path = os.path.join(path, "model-"+ str(epoch)+ ".ckpt") print("Save path :", save_path) torch.save({ "epoch": epoch, "model": model, # model.state_dict(), #"optimizer_state":optimizer.state_dict(), # "scheduler_state":scheduler.state_dict(), #"best_score": best_score, }, save_path) print("The model has been saved as "+save_path) print("Training is over.") path = "./trained_models_pytorch/model-1000.ckpt" checkpoint = torch.load(path) new_model = checkpoint["model"] x_test = x.to(device) prediction = new_model(x_test).to(device) #prediction = torch.sigmoid(prediction) prediction.cpu().detach().numpy() ###Output _____no_output_____
.ipynb_checkpoints/AGII_lab06_gravity-checkpoint.ipynb
###Markdown AG Dynamics of the Earth Juypter notebooks Georg Kaufmann Angewandte Geophysik II: Gravity----*Georg Kaufmann,Geophysics Section,Institute of Geological Sciences,Freie Universität Berlin,Germany* 3D sphere$$ g(x) = {{4}\over{3}} \pi G \Delta\rho R^3 {{D}\over{(x^2 + D^2)^{3/2}}}$$ ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt # define parameter values G = 6.672e-11 # m^3/kg/s^2 def boug_sphere(x,z=100.,r=50.,drho=500.): # Bouguer gravity of solid sphere boug = 4./3.*np.pi*G*drho * r**3*z/(x**2+z**2)**(3/2) return boug # define geometry xmin = -400. xmax = +400. xstep = 101 x = np.linspace(xmin,xmax,xstep) # run model boug1 = boug_sphere(x) boug2 = boug_sphere(x,z=80) boug3 = boug_sphere(x,z=120) boug4 = boug_sphere(x,r=40) boug5 = boug_sphere(x,r=60) # plot Bouguer anomaly plt.figure(figsize=(12.0, 6.0)) plt.title('Sphere') plt.xlim([-400,400]) plt.xticks([x for x in np.linspace(-300,300,7)]) plt.xlabel('Profile [m]') plt.ylim([0,0.4]) plt.yticks([y for y in np.linspace(0,0.4,5)]) plt.ylabel('Gravity [mGal]') plt.plot(x,1.e5*boug1,linewidth=2.0,linestyle='-',color='red',label='r=50m, z=100m') plt.plot(x,1.e5*boug2,linewidth=2.0,linestyle='--',color='red',label='r=50m, z=80m') plt.plot(x,1.e5*boug3,linewidth=2.0,linestyle=':',color='red',label='r=50m, z=120m') plt.plot(x,1.e5*boug4,linewidth=2.0,linestyle='-',color='green',label='r=40m, z=100m') plt.plot(x,1.e5*boug5,linewidth=2.0,linestyle='-',color='blue',label='r=60m, z=100m') plt.legend() import numpy as np import matplotlib.pyplot as plt # define geometry xmin = -400. xmax = +400. xstep = 11 x = np.linspace(xmin,xmax,xstep) print(x) ###Output [-400. -320. -240. -160. -80. 0. 80. 160. 240. 320. 400.] ###Markdown Horizontal cylinder$$ g(x) = 2 \pi G \Delta\rho R^2 {{D}\over{(x^2 + D^2)}}$$ ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt # define parameter values G = 6.672e-11 # m^3/kg/s^2 def boug_sphere(x,z=100.,r=50.,drho=500.): # Bouguer gravity of solid sphere boug = 4./3.*np.pi*G*drho * r**3*z/(x**2+z**2)**(3/2) return boug def boug_cylinder(x,z=100.,r=50.,drho=500.): # Bouguer gravity of solid horizontal cylinder boug = 2.*np.pi*G*drho * r**2*z/(x**2+z**2) return boug # define geometry xmin = -400. xmax = +400. xstep = 101 x = np.linspace(xmin,xmax,xstep) # run model bougs = boug_sphere(x) boug1 = boug_cylinder(x) boug2 = boug_cylinder(x,z=80) boug3 = boug_cylinder(x,z=120) boug4 = boug_cylinder(x,r=40) boug5 = boug_cylinder(x,r=60) # plot Bouguer anomaly plt.figure(figsize=(12.0, 6.0)) plt.title('Cylinder') plt.xlim([-400,400]) plt.xticks([x for x in np.linspace(-300,300,7)]) plt.xlabel('Profile [m]') plt.ylim([0,0.8]) plt.yticks([y for y in np.linspace(0,0.8,9)]) plt.ylabel('Gravity [mGal]') plt.plot(x,1.e5*bougs,linewidth=1.0,linestyle=':',color='black',label='sphere') plt.plot(x,1.e5*boug1,linewidth=2.0,linestyle='-',color='red',label='r=50m, z=100m') #plt.plot(x,1.e5*boug2,linewidth=2.0,linestyle='--',color='red',label='r=50m, z=80m') #plt.plot(x,1.e5*boug3,linewidth=2.0,linestyle=':',color='red',label='r=50m, z=120m') #plt.plot(x,1.e5*boug4,linewidth=2.0,linestyle='-',color='green',label='r=40m, z=100m') #plt.plot(x,1.e5*boug5,linewidth=2.0,linestyle='-',color='blue',label='r=60m, z=100m') plt.legend() ###Output _____no_output_____ ###Markdown Inclined rod$$\begin{array}{rcl} g(x) & = & {{G \pi R^2 \Delta\rho}\over{x \sin\alpha}} \left[ {{x + D \cot\alpha}\over{\sqrt{(D \sin^{-1} \alpha)^2 + 2 x D \cot\alpha + x^2}}} - {{x + D\cot\alpha + L \cos\alpha}\over{\sqrt{(L+D \sin^{-1}\alpha)^2 + x^2 + 2x(L\cos\alpha + D\cot\alpha)}}} \right]\end{array}$$ ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt # define parameter values G = 6.672e-11 # m^3/kg/s^2 def boug_sphere(x,z=100.,r=50.,drho=500.): # Bouguer gravity of solid sphere boug = 4./3.*np.pi*G*drho * r**3*z/(x**2+z**2)**(3/2) return boug def boug_rod(x,z=50.,r=50.,l=200.,alpha=90.,drho=500.): # Bouguer gravity of inclined rod d2r = np.pi/180. boug = ( G*np.pi*r**2*drho / ((x)*np.sin(alpha*d2r)) *( (x + z/np.tan(alpha*d2r)) / np.sqrt(z**2/np.sin(alpha*d2r)**2 + 2.*x*z/np.tan(alpha*d2r) + x**2) -(x + z/np.tan(alpha*d2r) + l*np.cos(alpha*d2r)) / np.sqrt((l+z/np.sin(alpha*d2r))**2 + x**2 + 2.*x*(l*np.cos(alpha*d2r)+z/np.tan(alpha*d2r))) )) return boug # define geometry xmin = -400. xmax = +400. xstep = 101 tiny = 1.e-10 x = np.linspace(xmin,xmax,xstep)+tiny # run model bougs = boug_sphere(x) boug1 = boug_rod(x) boug2 = boug_rod(x,l=100) boug3 = boug_rod(x,l=50) boug4 = boug_rod(x,l=50,alpha=55) boug5 = boug_rod(x,l=50,alpha=155) # plot Bouguer anomaly plt.figure(figsize=(12.0, 6.0)) plt.title('Rod') plt.xlim([-400,400]) plt.xticks([x for x in np.linspace(-300,300,7)]) plt.xlabel('Profile [m]') plt.ylim([0,0.5]) plt.yticks([y for y in np.linspace(0,0.5,6)]) plt.ylabel('Gravity [mGal]') plt.text(-300,0.45,'r=50m') plt.text(-300,0.40,'z=50m') plt.plot(x,1.e5*bougs,linewidth=1.0,linestyle=':',color='black',label='sphere') plt.plot(x,1.e5*boug1,linewidth=2.0,linestyle='-',color='red',label='l=200m,$\\alpha$=90') plt.plot(x,1.e5*boug2,linewidth=2.0,linestyle='--',color='red',label='l=100m,$\\alpha$=90') plt.plot(x,1.e5*boug3,linewidth=2.0,linestyle=':',color='red',label='l=50m,$\\alpha$=90') plt.plot(x,1.e5*boug4,linewidth=2.0,linestyle='-',color='green',label='l=50m,$\\alpha$=45') plt.plot(x,1.e5*boug5,linewidth=2.0,linestyle='-',color='blue',label='l=50m,$\\alpha$=135') plt.legend() ###Output _____no_output_____ ###Markdown 2D inclined plate 2D fault$$g(x) = 2 G \Delta\rho T \left\{ \pi + \arctan\left[ {{x}\over{D_1}} + \cot{\alpha} \right] - \arctan\left[ {{x}\over{D_2}} + \cot{\alpha} \right] \right\}$$ ###Code %matplotlib inline import numpy as np import matplotlib.pyplot as plt # define parameter values G = 6.672e-11 # m^3/kg/s^2 def boug_sphere(x,z=100.,r=50.,drho=500.): # Bouguer gravity of solid sphere boug = 4./3.*np.pi*G*drho * r**3*z/(x**2+z**2)**(3/2) return boug def boug_fault(x,drho=500.,d1=150.,d2=100.,t=150.,alpha=90.): # Bouguer gravity of fault d2r = np.pi/180. boug = (2.*G*drho*t*(np.pi + np.arctan((x/d1)+(1./np.tan(alpha*d2r))) - np.arctan((x/d2)+(1./np.tan(alpha*d2r))))) boug = boug - 2.*G*drho*t*np.pi return boug # define geometry xmin = -400. xmax = +400. xstep = 101 tiny = 1.e-10 x = np.linspace(xmin,xmax,xstep)+tiny # run model bougs = boug_sphere(x) boug1 = boug_fault(x) boug2 = boug_fault(x,alpha=30) boug3 = boug_fault(x,alpha=150) # plot Bouguer anomaly plt.figure(figsize=(12.0, 6.0)) plt.title('Fault') plt.xlim([-400,400]) plt.xticks([x for x in np.linspace(-300,300,7)]) plt.xlabel('Profile [m]') plt.ylim([-1.0,1.0]) plt.yticks([y for y in np.linspace(-1.0,1.0,5)]) plt.ylabel('Gravity [mGal]') plt.text(-300,-0.50,'D$_1$=150m') plt.text(-300,-0.60,'D$_2$=100m') plt.text(-300,-0.70,'T=150m') plt.plot(x,1.e5*bougs,linewidth=1.0,linestyle=':',color='black',label='sphere') plt.plot(x,1.e5*boug1,linewidth=2.0,linestyle='-',color='red',label='$\\alpha$=90') plt.plot(x,1.e5*boug2,linewidth=2.0,linestyle='--',color='red',label='$\\alpha$=30') plt.plot(x,1.e5*boug3,linewidth=2.0,linestyle=':',color='red',label='$\\alpha$=150') plt.legend() ###Output _____no_output_____
Optic_Disk_Segmentation_UNet.ipynb
###Markdown ###Code ###Output _____no_output_____ ###Markdown Data ###Code import os import numpy as np import cv2 import matplotlib.pyplot as plt %matplotlib inline from glob import glob from tqdm import tqdm !pip install -U albumentations from albumentations import HorizontalFlip, VerticalFlip, ElasticTransform, GridDistortion, OpticalDistortion def create_dir(path): if not os.path.exists(path): os.makedirs(path) def load_data(path): """ X = Images and Y = masks """ train_x = sorted(glob(os.path.join(path, "Training", "Images", "*.png"))) train_y = sorted(glob(os.path.join(path, "Training", "GT_OD", "*.png"))) test_x = sorted(glob(os.path.join(path, "Test", "Images", "*.png"))) test_y = sorted(glob(os.path.join(path, "Test", "Test_GT_OD", "*.png"))) return (train_x, train_y), (test_x, test_y) def augment_data(images, masks, save_path, augment=True): H = 512 W = 512 for idx, (x, y) in tqdm(enumerate(zip(images, masks)), total=len(images)): """ Extracting names """ name = x.split("/")[-1].split(".")[0] """ Reading image and mask """ x = cv2.imread(x, cv2.IMREAD_COLOR) x = cv2.cvtColor(x, cv2.COLOR_BGR2RGB) y = cv2.imread(y, cv2.IMREAD_COLOR) print(x.shape, y.shape) if augment == True: aug = HorizontalFlip(p=1.0) augmented = aug(image=x, mask=y) x1 = augmented["image"] y1 = augmented["mask"] aug = VerticalFlip(p=1.0) augmented = aug(image=x, mask=y) x2 = augmented["image"] y2 = augmented["mask"] aug = ElasticTransform(p=1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03) augmented = aug(image=x, mask=y) x3 = augmented['image'] y3 = augmented['mask'] aug = GridDistortion(p=1) augmented = aug(image=x, mask=y) x4 = augmented['image'] y4 = augmented['mask'] aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5) augmented = aug(image=x, mask=y) x5 = augmented['image'] y5 = augmented['mask'] X = [x, x1, x2, x3, x4, x5] Y = [y, y1, y2, y3, y4, y5] else: X = [x] Y = [y] index = 0 for i, m in zip(X, Y): i = cv2.resize(i, (W, H), interpolation = cv2.INTER_CUBIC) #i = cv2.cvtColor(i,cv2.COLOR_BGR2RGB) #Histogram equalisation - start #img_gray = cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) #i = clahe.apply(img_gray) #i = cv2.cvtColor(i,cv2.COLOR_GRAY2RGB) lab = cv2.cvtColor(i, cv2.COLOR_BGR2LAB) lab_planes = cv2.split(lab) clahe = cv2.createCLAHE(clipLimit=3.0,tileGridSize=(10,10)) lab_planes[0] = clahe.apply(lab_planes[0]) lab = cv2.merge(lab_planes) i = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR) i = cv2.cvtColor(i,cv2.COLOR_BGR2RGB) #histogram equalization - end m = cv2.resize(m, (W, H), interpolation = cv2.INTER_CUBIC) if len(X) == 1: tmp_image_name = f"{name}.png" tmp_mask_name = f"{name}.png" else: tmp_image_name = f"{name}_{index}.png" tmp_mask_name = f"{name}_{index}.png" image_path = os.path.join(save_path, "image", tmp_image_name) mask_path = os.path.join(save_path, "mask", tmp_mask_name) cv2.imwrite(image_path, i) cv2.imwrite(mask_path, m) index += 1 if __name__ == "__main__": """ Seeding """ np.random.seed(42) """ Load the data """ data_path = "/content/drive/MyDrive/Drishti/" (train_x, train_y), (test_x, test_y) = load_data(data_path) '''print(f"Train: {len(train_x)} - {len(train_y)}") print(f"Test: {len(test_x)} - {len(test_y)}")''' """ Creating directories """ create_dir("new_data/train/image") create_dir("new_data/train/mask") create_dir("new_data/test/image") create_dir("new_data/test/mask") augment_data(train_x, train_y, "new_data/train/", augment=False) augment_data(test_x, test_y, "new_data/test/", augment=False) ###Output Requirement already satisfied: albumentations in /usr/local/lib/python3.7/dist-packages (1.1.0) Requirement already satisfied: numpy>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from albumentations) (1.19.5) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from albumentations) (1.4.1) Requirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from albumentations) (3.13) Requirement already satisfied: opencv-python-headless>=4.1.1 in /usr/local/lib/python3.7/dist-packages (from albumentations) (4.5.3.56) Requirement already satisfied: scikit-image>=0.16.1 in /usr/local/lib/python3.7/dist-packages (from albumentations) (0.16.2) Requirement already satisfied: qudida>=0.0.4 in /usr/local/lib/python3.7/dist-packages (from albumentations) (0.0.4) Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from qudida>=0.0.4->albumentations) (3.7.4.3) Requirement already satisfied: scikit-learn>=0.19.1 in /usr/local/lib/python3.7/dist-packages (from qudida>=0.0.4->albumentations) (0.22.2.post1) Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image>=0.16.1->albumentations) (1.1.1) Requirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image>=0.16.1->albumentations) (7.1.2) Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image>=0.16.1->albumentations) (2.6.3) Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image>=0.16.1->albumentations) (3.2.2) Requirement already satisfied: imageio>=2.3.0 in /usr/local/lib/python3.7/dist-packages (from scikit-image>=0.16.1->albumentations) (2.4.1) Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.16.1->albumentations) (2.8.2) Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.16.1->albumentations) (1.3.2) Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.16.1->albumentations) (0.10.0) Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.16.1->albumentations) (2.4.7) Requirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from cycler>=0.10->matplotlib!=3.0.0,>=2.0.0->scikit-image>=0.16.1->albumentations) (1.15.0) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.19.1->qudida>=0.0.4->albumentations) (1.0.1) ###Markdown Model ###Code from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Conv2DTranspose, Concatenate, Input from tensorflow.keras.models import Model def conv_block(inputs, num_filters): x = Conv2D(num_filters, 3, padding="same")(inputs) x = BatchNormalization()(x) x = Activation("relu")(x) x = Conv2D(num_filters, 3, padding="same")(x) x = BatchNormalization()(x) x = Activation("relu")(x) return x def encoder_block(inputs, num_filters): x = conv_block(inputs, num_filters) p = MaxPool2D((2, 2))(x) return x, p def decoder_block(inputs, skip_features, num_filters): x = Conv2DTranspose(num_filters, (2, 2), strides=2, padding="same")(inputs) x = Concatenate()([x, skip_features]) x = conv_block(x, num_filters) return x def build_unet(input_shape): inputs = Input(input_shape) s1, p1 = encoder_block(inputs, 64) s2, p2 = encoder_block(p1, 128) s3, p3 = encoder_block(p2, 256) s4, p4 = encoder_block(p3, 512) b1 = conv_block(p4, 1024) d1 = decoder_block(b1, s4, 512) d2 = decoder_block(d1, s3, 256) d3 = decoder_block(d2, s2, 128) d4 = decoder_block(d3, s1, 64) outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d4) model = Model(inputs, outputs, name="UNET") return model if __name__ == "__main__": input_shape = (512, 512, 3) model = build_unet(input_shape) model.summary() ###Output Model: "UNET" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) [(None, 512, 512, 3) 0 __________________________________________________________________________________________________ conv2d (Conv2D) (None, 512, 512, 64) 1792 input_1[0][0] __________________________________________________________________________________________________ batch_normalization (BatchNorma (None, 512, 512, 64) 256 conv2d[0][0] __________________________________________________________________________________________________ activation (Activation) (None, 512, 512, 64) 0 batch_normalization[0][0] __________________________________________________________________________________________________ conv2d_1 (Conv2D) (None, 512, 512, 64) 36928 activation[0][0] __________________________________________________________________________________________________ batch_normalization_1 (BatchNor (None, 512, 512, 64) 256 conv2d_1[0][0] __________________________________________________________________________________________________ activation_1 (Activation) (None, 512, 512, 64) 0 batch_normalization_1[0][0] __________________________________________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 256, 256, 64) 0 activation_1[0][0] __________________________________________________________________________________________________ conv2d_2 (Conv2D) (None, 256, 256, 128 73856 max_pooling2d[0][0] __________________________________________________________________________________________________ batch_normalization_2 (BatchNor (None, 256, 256, 128 512 conv2d_2[0][0] __________________________________________________________________________________________________ activation_2 (Activation) (None, 256, 256, 128 0 batch_normalization_2[0][0] __________________________________________________________________________________________________ conv2d_3 (Conv2D) (None, 256, 256, 128 147584 activation_2[0][0] __________________________________________________________________________________________________ batch_normalization_3 (BatchNor (None, 256, 256, 128 512 conv2d_3[0][0] __________________________________________________________________________________________________ activation_3 (Activation) (None, 256, 256, 128 0 batch_normalization_3[0][0] __________________________________________________________________________________________________ max_pooling2d_1 (MaxPooling2D) (None, 128, 128, 128 0 activation_3[0][0] __________________________________________________________________________________________________ conv2d_4 (Conv2D) (None, 128, 128, 256 295168 max_pooling2d_1[0][0] __________________________________________________________________________________________________ batch_normalization_4 (BatchNor (None, 128, 128, 256 1024 conv2d_4[0][0] __________________________________________________________________________________________________ activation_4 (Activation) (None, 128, 128, 256 0 batch_normalization_4[0][0] __________________________________________________________________________________________________ conv2d_5 (Conv2D) (None, 128, 128, 256 590080 activation_4[0][0] __________________________________________________________________________________________________ batch_normalization_5 (BatchNor (None, 128, 128, 256 1024 conv2d_5[0][0] __________________________________________________________________________________________________ activation_5 (Activation) (None, 128, 128, 256 0 batch_normalization_5[0][0] __________________________________________________________________________________________________ max_pooling2d_2 (MaxPooling2D) (None, 64, 64, 256) 0 activation_5[0][0] __________________________________________________________________________________________________ conv2d_6 (Conv2D) (None, 64, 64, 512) 1180160 max_pooling2d_2[0][0] __________________________________________________________________________________________________ batch_normalization_6 (BatchNor (None, 64, 64, 512) 2048 conv2d_6[0][0] __________________________________________________________________________________________________ activation_6 (Activation) (None, 64, 64, 512) 0 batch_normalization_6[0][0] __________________________________________________________________________________________________ conv2d_7 (Conv2D) (None, 64, 64, 512) 2359808 activation_6[0][0] __________________________________________________________________________________________________ batch_normalization_7 (BatchNor (None, 64, 64, 512) 2048 conv2d_7[0][0] __________________________________________________________________________________________________ activation_7 (Activation) (None, 64, 64, 512) 0 batch_normalization_7[0][0] __________________________________________________________________________________________________ max_pooling2d_3 (MaxPooling2D) (None, 32, 32, 512) 0 activation_7[0][0] __________________________________________________________________________________________________ conv2d_8 (Conv2D) (None, 32, 32, 1024) 4719616 max_pooling2d_3[0][0] __________________________________________________________________________________________________ batch_normalization_8 (BatchNor (None, 32, 32, 1024) 4096 conv2d_8[0][0] __________________________________________________________________________________________________ activation_8 (Activation) (None, 32, 32, 1024) 0 batch_normalization_8[0][0] __________________________________________________________________________________________________ conv2d_9 (Conv2D) (None, 32, 32, 1024) 9438208 activation_8[0][0] __________________________________________________________________________________________________ batch_normalization_9 (BatchNor (None, 32, 32, 1024) 4096 conv2d_9[0][0] __________________________________________________________________________________________________ activation_9 (Activation) (None, 32, 32, 1024) 0 batch_normalization_9[0][0] __________________________________________________________________________________________________ conv2d_transpose (Conv2DTranspo (None, 64, 64, 512) 2097664 activation_9[0][0] __________________________________________________________________________________________________ concatenate (Concatenate) (None, 64, 64, 1024) 0 conv2d_transpose[0][0] activation_7[0][0] __________________________________________________________________________________________________ conv2d_10 (Conv2D) (None, 64, 64, 512) 4719104 concatenate[0][0] __________________________________________________________________________________________________ batch_normalization_10 (BatchNo (None, 64, 64, 512) 2048 conv2d_10[0][0] __________________________________________________________________________________________________ activation_10 (Activation) (None, 64, 64, 512) 0 batch_normalization_10[0][0] __________________________________________________________________________________________________ conv2d_11 (Conv2D) (None, 64, 64, 512) 2359808 activation_10[0][0] __________________________________________________________________________________________________ batch_normalization_11 (BatchNo (None, 64, 64, 512) 2048 conv2d_11[0][0] __________________________________________________________________________________________________ activation_11 (Activation) (None, 64, 64, 512) 0 batch_normalization_11[0][0] __________________________________________________________________________________________________ conv2d_transpose_1 (Conv2DTrans (None, 128, 128, 256 524544 activation_11[0][0] __________________________________________________________________________________________________ concatenate_1 (Concatenate) (None, 128, 128, 512 0 conv2d_transpose_1[0][0] activation_5[0][0] __________________________________________________________________________________________________ conv2d_12 (Conv2D) (None, 128, 128, 256 1179904 concatenate_1[0][0] __________________________________________________________________________________________________ batch_normalization_12 (BatchNo (None, 128, 128, 256 1024 conv2d_12[0][0] __________________________________________________________________________________________________ activation_12 (Activation) (None, 128, 128, 256 0 batch_normalization_12[0][0] __________________________________________________________________________________________________ conv2d_13 (Conv2D) (None, 128, 128, 256 590080 activation_12[0][0] __________________________________________________________________________________________________ batch_normalization_13 (BatchNo (None, 128, 128, 256 1024 conv2d_13[0][0] __________________________________________________________________________________________________ activation_13 (Activation) (None, 128, 128, 256 0 batch_normalization_13[0][0] __________________________________________________________________________________________________ conv2d_transpose_2 (Conv2DTrans (None, 256, 256, 128 131200 activation_13[0][0] __________________________________________________________________________________________________ concatenate_2 (Concatenate) (None, 256, 256, 256 0 conv2d_transpose_2[0][0] activation_3[0][0] __________________________________________________________________________________________________ conv2d_14 (Conv2D) (None, 256, 256, 128 295040 concatenate_2[0][0] __________________________________________________________________________________________________ batch_normalization_14 (BatchNo (None, 256, 256, 128 512 conv2d_14[0][0] __________________________________________________________________________________________________ activation_14 (Activation) (None, 256, 256, 128 0 batch_normalization_14[0][0] __________________________________________________________________________________________________ conv2d_15 (Conv2D) (None, 256, 256, 128 147584 activation_14[0][0] __________________________________________________________________________________________________ batch_normalization_15 (BatchNo (None, 256, 256, 128 512 conv2d_15[0][0] __________________________________________________________________________________________________ activation_15 (Activation) (None, 256, 256, 128 0 batch_normalization_15[0][0] __________________________________________________________________________________________________ conv2d_transpose_3 (Conv2DTrans (None, 512, 512, 64) 32832 activation_15[0][0] __________________________________________________________________________________________________ concatenate_3 (Concatenate) (None, 512, 512, 128 0 conv2d_transpose_3[0][0] activation_1[0][0] __________________________________________________________________________________________________ conv2d_16 (Conv2D) (None, 512, 512, 64) 73792 concatenate_3[0][0] __________________________________________________________________________________________________ batch_normalization_16 (BatchNo (None, 512, 512, 64) 256 conv2d_16[0][0] __________________________________________________________________________________________________ activation_16 (Activation) (None, 512, 512, 64) 0 batch_normalization_16[0][0] __________________________________________________________________________________________________ conv2d_17 (Conv2D) (None, 512, 512, 64) 36928 activation_16[0][0] __________________________________________________________________________________________________ batch_normalization_17 (BatchNo (None, 512, 512, 64) 256 conv2d_17[0][0] __________________________________________________________________________________________________ activation_17 (Activation) (None, 512, 512, 64) 0 batch_normalization_17[0][0] __________________________________________________________________________________________________ conv2d_18 (Conv2D) (None, 512, 512, 1) 65 activation_17[0][0] ================================================================================================== Total params: 31,055,297 Trainable params: 31,043,521 Non-trainable params: 11,776 __________________________________________________________________________________________________ ###Markdown Metrices ###Code import numpy as np import tensorflow as tf from tensorflow.keras import backend as K def iou(y_true, y_pred): def f(y_true, y_pred): intersection = (y_true * y_pred).sum() union = y_true.sum() + y_pred.sum() - intersection x = (intersection + 1e-15) / (union + 1e-15) x = x.astype(np.float32) return x return tf.numpy_function(f, [y_true, y_pred], tf.float32) smooth = 1e-15 def dice_coef(y_true, y_pred): y_true = tf.keras.layers.Flatten()(y_true) y_pred = tf.keras.layers.Flatten()(y_pred) intersection = tf.reduce_sum(y_true * y_pred) return (2. * intersection + smooth) / (tf.reduce_sum(y_true) + tf.reduce_sum(y_pred) + smooth) def dice_loss(y_true, y_pred): return 1.0 - dice_coef(y_true, y_pred) ###Output _____no_output_____ ###Markdown Train ###Code import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" import numpy as np import cv2 from glob import glob from sklearn.utils import shuffle import tensorflow as tf from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard from tensorflow.keras.optimizers import Adam from tensorflow.keras.metrics import Recall, Precision #from Model_UNet_Simple import build_unet #from Metrices import dice_loss, dice_coef, iou H = 512 W = 512 def create_dir(path): if not os.path.exists(path): os.makedirs(path) def load_data(path): x = sorted(glob(os.path.join(path, "image", "*.png"))) y = sorted(glob(os.path.join(path, "mask", "*.png"))) return x, y def shuffling(x, y): x, y = shuffle(x, y, random_state=42) return x, y def read_image(path): path = path.decode() x = cv2.imread(path, cv2.IMREAD_COLOR) # x = cv2.resize(x, (W, H)) x = x/255.0 x = x.astype(np.float32) return x def read_mask(path): path = path.decode() x = cv2.imread(path, cv2.IMREAD_GRAYSCALE) ## (512, 512) # x = cv2.resize(x, (W, H)) x = x/255.0 x = x.astype(np.float32) x = np.expand_dims(x, axis=-1) ## (512, 512, 1) return x def tf_parse(x, y): def _parse(x, y): x = read_image(x) y = read_mask(y) return x, y x, y = tf.numpy_function(_parse, [x, y], [tf.float32, tf.float32]) x.set_shape([H, W, 3]) y.set_shape([H, W, 1]) return x, y def tf_dataset(X, Y, batch_size=2): dataset = tf.data.Dataset.from_tensor_slices((X, Y)) dataset = dataset.map(tf_parse) dataset = dataset.batch(batch_size) dataset = dataset.prefetch(4) return dataset if __name__ == "__main__": """ Seeding """ np.random.seed(42) tf.random.set_seed(42) """ Directory to save files """ create_dir("files") """ Hyperparameters """ batch_size = 2 lr = 1e-4 num_epochs = 100 model_path = os.path.join("files", "model.h5") csv_path = os.path.join("files", "data.csv") """ Dataset """ dataset_path = "new_data" train_path = os.path.join(dataset_path, "train") valid_path = os.path.join(dataset_path, "test") train_x, train_y = load_data(train_path) train_x, train_y = shuffling(train_x, train_y) valid_x, valid_y = load_data(valid_path) print(f"Train: {len(train_x)} - {len(train_y)}") print(f"Valid: {len(valid_x)} - {len(valid_y)}") train_dataset = tf_dataset(train_x, train_y, batch_size=batch_size) valid_dataset = tf_dataset(valid_x, valid_y, batch_size=batch_size) train_steps = len(train_x)//batch_size valid_setps = len(valid_x)//batch_size if len(train_x) % batch_size != 0: train_steps += 1 if len(valid_x) % batch_size != 0: valid_setps += 1 """ Model """ model = build_unet((H, W, 3)) model.compile(loss=dice_loss, optimizer=Adam(lr), metrics=[dice_coef, iou, Recall(), Precision()]) #model.summary() callbacks = [ ModelCheckpoint(model_path, verbose=1, save_best_only=True), #ReduceLROnPlateau(monitor="val_loss", factor=0.1, patience=5, min_lr=1e-6, verbose=1), CSVLogger(csv_path), TensorBoard(), #EarlyStopping(monitor="val_loss", patience=10, restore_best_weights=False) ] model.fit( train_dataset, epochs=num_epochs, validation_data=valid_dataset, steps_per_epoch=train_steps, validation_steps=valid_setps, callbacks=callbacks ) ###Output Train: 80 - 80 Valid: 16 - 16 Epoch 1/100 40/40 [==============================] - 126s 2s/step - loss: 0.8047 - dice_coef: 0.1953 - iou: 0.1100 - recall: 0.9445 - precision: 0.1824 - val_loss: 0.9331 - val_dice_coef: 0.0669 - val_iou: 0.0347 - val_recall: 0.0041 - val_precision: 0.9613 Epoch 00001: val_loss improved from inf to 0.93307, saving model to files/model.h5 Epoch 2/100 40/40 [==============================] - 77s 2s/step - loss: 0.6281 - dice_coef: 0.3719 - iou: 0.2297 - recall: 0.9581 - precision: 0.7422 - val_loss: 0.9172 - val_dice_coef: 0.0828 - val_iou: 0.0432 - val_recall: 0.4655 - val_precision: 0.3763 Epoch 00002: val_loss improved from 0.93307 to 0.91718, saving model to files/model.h5 Epoch 3/100 40/40 [==============================] - 72s 2s/step - loss: 0.5569 - dice_coef: 0.4431 - iou: 0.2856 - recall: 0.9566 - precision: 0.8439 - val_loss: 0.9263 - val_dice_coef: 0.0737 - val_iou: 0.0383 - val_recall: 0.0391 - val_precision: 1.0000 Epoch 00003: val_loss did not improve from 0.91718 Epoch 4/100 40/40 [==============================] - 72s 2s/step - loss: 0.5215 - dice_coef: 0.4785 - iou: 0.3155 - recall: 0.9487 - precision: 0.8454 - val_loss: 0.9016 - val_dice_coef: 0.0984 - val_iou: 0.0519 - val_recall: 0.1252 - val_precision: 0.9763 Epoch 00004: val_loss improved from 0.91718 to 0.90158, saving model to files/model.h5 Epoch 5/100 40/40 [==============================] - 72s 2s/step - loss: 0.4796 - dice_coef: 0.5204 - iou: 0.3528 - recall: 0.9538 - precision: 0.8955 - val_loss: 0.9488 - val_dice_coef: 0.0512 - val_iou: 0.0263 - val_recall: 0.0000e+00 - val_precision: 0.0000e+00 Epoch 00005: val_loss did not improve from 0.90158 Epoch 6/100 40/40 [==============================] - 77s 2s/step - loss: 0.4452 - dice_coef: 0.5548 - iou: 0.3850 - recall: 0.9535 - precision: 0.9246 - val_loss: 0.9543 - val_dice_coef: 0.0457 - val_iou: 0.0234 - val_recall: 0.0000e+00 - val_precision: 0.0000e+00 Epoch 00006: val_loss did not improve from 0.90158 Epoch 7/100 40/40 [==============================] - 72s 2s/step - loss: 0.4188 - dice_coef: 0.5812 - iou: 0.4107 - recall: 0.9474 - precision: 0.9274 - val_loss: 0.9200 - val_dice_coef: 0.0800 - val_iou: 0.0424 - val_recall: 0.0528 - val_precision: 1.0000 Epoch 00007: val_loss did not improve from 0.90158 Epoch 8/100 40/40 [==============================] - 72s 2s/step - loss: 0.3908 - dice_coef: 0.6092 - iou: 0.4392 - recall: 0.9482 - precision: 0.9300 - val_loss: 0.9612 - val_dice_coef: 0.0388 - val_iou: 0.0198 - val_recall: 0.0000e+00 - val_precision: 0.0000e+00 Epoch 00008: val_loss did not improve from 0.90158 Epoch 9/100 40/40 [==============================] - 72s 2s/step - loss: 0.3626 - dice_coef: 0.6374 - iou: 0.4689 - recall: 0.9482 - precision: 0.9477 - val_loss: 0.9436 - val_dice_coef: 0.0564 - val_iou: 0.0291 - val_recall: 0.0184 - val_precision: 1.0000 Epoch 00009: val_loss did not improve from 0.90158 Epoch 10/100 40/40 [==============================] - 77s 2s/step - loss: 0.3318 - dice_coef: 0.6682 - iou: 0.5028 - recall: 0.9491 - precision: 0.9645 - val_loss: 0.9697 - val_dice_coef: 0.0303 - val_iou: 0.0154 - val_recall: 0.0000e+00 - val_precision: 0.0000e+00 Epoch 00010: val_loss did not improve from 0.90158 Epoch 11/100 40/40 [==============================] - 72s 2s/step - loss: 0.3152 - dice_coef: 0.6848 - iou: 0.5216 - recall: 0.9448 - precision: 0.9499 - val_loss: 0.7784 - val_dice_coef: 0.2216 - val_iou: 0.1281 - val_recall: 0.1697 - val_precision: 1.0000 Epoch 00011: val_loss improved from 0.90158 to 0.77840, saving model to files/model.h5 Epoch 12/100 40/40 [==============================] - 77s 2s/step - loss: 0.2888 - dice_coef: 0.7112 - iou: 0.5528 - recall: 0.9458 - precision: 0.9698 - val_loss: 0.7686 - val_dice_coef: 0.2314 - val_iou: 0.1356 - val_recall: 0.1800 - val_precision: 1.0000 Epoch 00012: val_loss improved from 0.77840 to 0.76865, saving model to files/model.h5 Epoch 13/100 40/40 [==============================] - 77s 2s/step - loss: 0.2670 - dice_coef: 0.7330 - iou: 0.5794 - recall: 0.9460 - precision: 0.9763 - val_loss: 0.7219 - val_dice_coef: 0.2781 - val_iou: 0.1665 - val_recall: 0.2134 - val_precision: 0.9988 Epoch 00013: val_loss improved from 0.76865 to 0.72187, saving model to files/model.h5 Epoch 14/100 40/40 [==============================] - 77s 2s/step - loss: 0.2481 - dice_coef: 0.7519 - iou: 0.6033 - recall: 0.9430 - precision: 0.9805 - val_loss: 0.6446 - val_dice_coef: 0.3554 - val_iou: 0.2262 - val_recall: 0.3006 - val_precision: 0.9991 Epoch 00014: val_loss improved from 0.72187 to 0.64462, saving model to files/model.h5 Epoch 15/100 40/40 [==============================] - 72s 2s/step - loss: 0.2375 - dice_coef: 0.7625 - iou: 0.6170 - recall: 0.9409 - precision: 0.9743 - val_loss: 0.2968 - val_dice_coef: 0.7032 - val_iou: 0.5510 - val_recall: 0.7440 - val_precision: 0.9926 Epoch 00015: val_loss improved from 0.64462 to 0.29683, saving model to files/model.h5 Epoch 16/100 40/40 [==============================] - 71s 2s/step - loss: 0.2177 - dice_coef: 0.7823 - iou: 0.6432 - recall: 0.9429 - precision: 0.9814 - val_loss: 0.2630 - val_dice_coef: 0.7370 - val_iou: 0.5902 - val_recall: 0.7831 - val_precision: 0.9960 Epoch 00016: val_loss improved from 0.29683 to 0.26300, saving model to files/model.h5 Epoch 17/100 40/40 [==============================] - 71s 2s/step - loss: 0.2011 - dice_coef: 0.7989 - iou: 0.6659 - recall: 0.9422 - precision: 0.9854 - val_loss: 0.2728 - val_dice_coef: 0.7272 - val_iou: 0.5804 - val_recall: 0.7607 - val_precision: 0.9979 Epoch 00017: val_loss did not improve from 0.26300 Epoch 18/100 40/40 [==============================] - 71s 2s/step - loss: 0.1873 - dice_coef: 0.8127 - iou: 0.6851 - recall: 0.9415 - precision: 0.9880 - val_loss: 0.1899 - val_dice_coef: 0.8101 - val_iou: 0.6834 - val_recall: 0.8808 - val_precision: 0.9906 Epoch 00018: val_loss improved from 0.26300 to 0.18991, saving model to files/model.h5 Epoch 19/100 40/40 [==============================] - 71s 2s/step - loss: 0.1747 - dice_coef: 0.8253 - iou: 0.7032 - recall: 0.9429 - precision: 0.9900 - val_loss: 0.1972 - val_dice_coef: 0.8028 - val_iou: 0.6741 - val_recall: 0.8334 - val_precision: 0.9968 Epoch 00019: val_loss did not improve from 0.18991 Epoch 20/100 40/40 [==============================] - 71s 2s/step - loss: 0.1632 - dice_coef: 0.8368 - iou: 0.7199 - recall: 0.9415 - precision: 0.9920 - val_loss: 0.1769 - val_dice_coef: 0.8231 - val_iou: 0.7021 - val_recall: 0.8561 - val_precision: 0.9948 Epoch 00020: val_loss improved from 0.18991 to 0.17690, saving model to files/model.h5 Epoch 21/100 40/40 [==============================] - 71s 2s/step - loss: 0.1554 - dice_coef: 0.8446 - iou: 0.7315 - recall: 0.9369 - precision: 0.9905 - val_loss: 0.1615 - val_dice_coef: 0.8385 - val_iou: 0.7237 - val_recall: 0.9134 - val_precision: 0.9718 Epoch 00021: val_loss improved from 0.17690 to 0.16149, saving model to files/model.h5 Epoch 22/100 40/40 [==============================] - 71s 2s/step - loss: 0.1796 - dice_coef: 0.8204 - iou: 0.6975 - recall: 0.9226 - precision: 0.9418 - val_loss: 0.4428 - val_dice_coef: 0.5572 - val_iou: 0.3919 - val_recall: 0.9817 - val_precision: 0.4303 Epoch 00022: val_loss did not improve from 0.16149 Epoch 23/100 40/40 [==============================] - 71s 2s/step - loss: 0.1623 - dice_coef: 0.8377 - iou: 0.7217 - recall: 0.9234 - precision: 0.9644 - val_loss: 0.3372 - val_dice_coef: 0.6628 - val_iou: 0.4993 - val_recall: 0.9931 - val_precision: 0.5830 Epoch 00023: val_loss did not improve from 0.16149 Epoch 24/100 40/40 [==============================] - 71s 2s/step - loss: 0.1413 - dice_coef: 0.8587 - iou: 0.7529 - recall: 0.9315 - precision: 0.9811 - val_loss: 0.3561 - val_dice_coef: 0.6439 - val_iou: 0.4796 - val_recall: 0.9976 - val_precision: 0.5370 Epoch 00024: val_loss did not improve from 0.16149 Epoch 25/100 40/40 [==============================] - 71s 2s/step - loss: 0.1302 - dice_coef: 0.8698 - iou: 0.7700 - recall: 0.9346 - precision: 0.9869 - val_loss: 0.1568 - val_dice_coef: 0.8432 - val_iou: 0.7309 - val_recall: 0.9605 - val_precision: 0.8995 Epoch 00025: val_loss improved from 0.16149 to 0.15680, saving model to files/model.h5 Epoch 26/100 40/40 [==============================] - 71s 2s/step - loss: 0.1213 - dice_coef: 0.8787 - iou: 0.7840 - recall: 0.9362 - precision: 0.9901 - val_loss: 0.1266 - val_dice_coef: 0.8734 - val_iou: 0.7764 - val_recall: 0.9233 - val_precision: 0.9663 Epoch 00026: val_loss improved from 0.15680 to 0.12655, saving model to files/model.h5 Epoch 27/100 40/40 [==============================] - 71s 2s/step - loss: 0.1145 - dice_coef: 0.8855 - iou: 0.7948 - recall: 0.9360 - precision: 0.9918 - val_loss: 0.1202 - val_dice_coef: 0.8798 - val_iou: 0.7868 - val_recall: 0.8938 - val_precision: 0.9942 Epoch 00027: val_loss improved from 0.12655 to 0.12020, saving model to files/model.h5 Epoch 28/100 40/40 [==============================] - 71s 2s/step - loss: 0.1084 - dice_coef: 0.8916 - iou: 0.8047 - recall: 0.9352 - precision: 0.9929 - val_loss: 0.1281 - val_dice_coef: 0.8719 - val_iou: 0.7757 - val_recall: 0.8912 - val_precision: 0.9635 Epoch 00028: val_loss did not improve from 0.12020 Epoch 29/100 40/40 [==============================] - 71s 2s/step - loss: 0.1072 - dice_coef: 0.8928 - iou: 0.8067 - recall: 0.9349 - precision: 0.9889 - val_loss: 0.1446 - val_dice_coef: 0.8554 - val_iou: 0.7493 - val_recall: 0.9320 - val_precision: 0.9300 Epoch 00029: val_loss did not improve from 0.12020 Epoch 30/100 40/40 [==============================] - 71s 2s/step - loss: 0.1058 - dice_coef: 0.8942 - iou: 0.8090 - recall: 0.9313 - precision: 0.9872 - val_loss: 0.1242 - val_dice_coef: 0.8758 - val_iou: 0.7812 - val_recall: 0.8834 - val_precision: 0.9945 Epoch 00030: val_loss did not improve from 0.12020 Epoch 31/100 40/40 [==============================] - 71s 2s/step - loss: 0.0968 - dice_coef: 0.9032 - iou: 0.8237 - recall: 0.9313 - precision: 0.9921 - val_loss: 0.1026 - val_dice_coef: 0.8974 - val_iou: 0.8146 - val_recall: 0.9191 - val_precision: 0.9806 Epoch 00031: val_loss improved from 0.12020 to 0.10258, saving model to files/model.h5 Epoch 32/100 40/40 [==============================] - 71s 2s/step - loss: 0.0926 - dice_coef: 0.9074 - iou: 0.8308 - recall: 0.9330 - precision: 0.9922 - val_loss: 0.0950 - val_dice_coef: 0.9050 - val_iou: 0.8269 - val_recall: 0.9374 - val_precision: 0.9815 Epoch 00032: val_loss improved from 0.10258 to 0.09503, saving model to files/model.h5 Epoch 33/100 40/40 [==============================] - 71s 2s/step - loss: 0.0869 - dice_coef: 0.9131 - iou: 0.8404 - recall: 0.9343 - precision: 0.9947 - val_loss: 0.0987 - val_dice_coef: 0.9013 - val_iou: 0.8215 - val_recall: 0.8915 - val_precision: 0.9964 Epoch 00033: val_loss did not improve from 0.09503 Epoch 34/100 40/40 [==============================] - 71s 2s/step - loss: 0.0824 - dice_coef: 0.9176 - iou: 0.8480 - recall: 0.9349 - precision: 0.9959 - val_loss: 0.0925 - val_dice_coef: 0.9075 - val_iou: 0.8316 - val_recall: 0.9030 - val_precision: 0.9939 Epoch 00034: val_loss improved from 0.09503 to 0.09254, saving model to files/model.h5 Epoch 35/100 40/40 [==============================] - 71s 2s/step - loss: 0.0785 - dice_coef: 0.9215 - iou: 0.8546 - recall: 0.9349 - precision: 0.9968 - val_loss: 0.1051 - val_dice_coef: 0.8949 - val_iou: 0.8120 - val_recall: 0.8624 - val_precision: 0.9983 Epoch 00035: val_loss did not improve from 0.09254 Epoch 36/100 40/40 [==============================] - 71s 2s/step - loss: 0.0756 - dice_coef: 0.9244 - iou: 0.8596 - recall: 0.9345 - precision: 0.9973 - val_loss: 0.0989 - val_dice_coef: 0.9011 - val_iou: 0.8220 - val_recall: 0.8738 - val_precision: 0.9962 Epoch 00036: val_loss did not improve from 0.09254 Epoch 37/100 40/40 [==============================] - 71s 2s/step - loss: 0.0729 - dice_coef: 0.9271 - iou: 0.8642 - recall: 0.9341 - precision: 0.9973 - val_loss: 0.0903 - val_dice_coef: 0.9097 - val_iou: 0.8357 - val_recall: 0.8846 - val_precision: 0.9966 Epoch 00037: val_loss improved from 0.09254 to 0.09034, saving model to files/model.h5 Epoch 38/100 40/40 [==============================] - 71s 2s/step - loss: 0.0720 - dice_coef: 0.9280 - iou: 0.8658 - recall: 0.9345 - precision: 0.9960 - val_loss: 0.0797 - val_dice_coef: 0.9203 - val_iou: 0.8529 - val_recall: 0.9112 - val_precision: 0.9903 Epoch 00038: val_loss improved from 0.09034 to 0.07975, saving model to files/model.h5 Epoch 39/100 40/40 [==============================] - 71s 2s/step - loss: 0.0700 - dice_coef: 0.9300 - iou: 0.8693 - recall: 0.9326 - precision: 0.9960 - val_loss: 0.0720 - val_dice_coef: 0.9280 - val_iou: 0.8660 - val_recall: 0.9174 - val_precision: 0.9901 Epoch 00039: val_loss improved from 0.07975 to 0.07201, saving model to files/model.h5 Epoch 40/100 40/40 [==============================] - 71s 2s/step - loss: 0.0674 - dice_coef: 0.9326 - iou: 0.8739 - recall: 0.9344 - precision: 0.9965 - val_loss: 0.0756 - val_dice_coef: 0.9244 - val_iou: 0.8600 - val_recall: 0.9126 - val_precision: 0.9916 Epoch 00040: val_loss did not improve from 0.07201 Epoch 41/100 40/40 [==============================] - 71s 2s/step - loss: 0.0643 - dice_coef: 0.9357 - iou: 0.8794 - recall: 0.9336 - precision: 0.9976 - val_loss: 0.0816 - val_dice_coef: 0.9184 - val_iou: 0.8503 - val_recall: 0.8971 - val_precision: 0.9920 Epoch 00041: val_loss did not improve from 0.07201 Epoch 42/100 40/40 [==============================] - 71s 2s/step - loss: 0.0622 - dice_coef: 0.9378 - iou: 0.8830 - recall: 0.9335 - precision: 0.9978 - val_loss: 0.0759 - val_dice_coef: 0.9241 - val_iou: 0.8596 - val_recall: 0.9044 - val_precision: 0.9919 Epoch 00042: val_loss did not improve from 0.07201 Epoch 43/100 40/40 [==============================] - 71s 2s/step - loss: 0.0610 - dice_coef: 0.9390 - iou: 0.8851 - recall: 0.9340 - precision: 0.9977 - val_loss: 0.0753 - val_dice_coef: 0.9247 - val_iou: 0.8607 - val_recall: 0.8936 - val_precision: 0.9942 Epoch 00043: val_loss did not improve from 0.07201 Epoch 44/100 40/40 [==============================] - 71s 2s/step - loss: 0.0597 - dice_coef: 0.9403 - iou: 0.8875 - recall: 0.9336 - precision: 0.9971 - val_loss: 0.0711 - val_dice_coef: 0.9289 - val_iou: 0.8678 - val_recall: 0.9063 - val_precision: 0.9897 Epoch 00044: val_loss improved from 0.07201 to 0.07113, saving model to files/model.h5 Epoch 45/100 40/40 [==============================] - 71s 2s/step - loss: 0.0582 - dice_coef: 0.9418 - iou: 0.8901 - recall: 0.9323 - precision: 0.9971 - val_loss: 0.0830 - val_dice_coef: 0.9170 - val_iou: 0.8492 - val_recall: 0.9325 - val_precision: 0.9392 Epoch 00045: val_loss did not improve from 0.07113 Epoch 46/100 40/40 [==============================] - 71s 2s/step - loss: 0.0578 - dice_coef: 0.9422 - iou: 0.8908 - recall: 0.9340 - precision: 0.9963 - val_loss: 0.0690 - val_dice_coef: 0.9310 - val_iou: 0.8714 - val_recall: 0.9008 - val_precision: 0.9942 Epoch 00046: val_loss improved from 0.07113 to 0.06903, saving model to files/model.h5 Epoch 47/100 40/40 [==============================] - 71s 2s/step - loss: 0.0570 - dice_coef: 0.9430 - iou: 0.8923 - recall: 0.9325 - precision: 0.9963 - val_loss: 0.0661 - val_dice_coef: 0.9339 - val_iou: 0.8764 - val_recall: 0.9017 - val_precision: 0.9927 Epoch 00047: val_loss improved from 0.06903 to 0.06612, saving model to files/model.h5 Epoch 48/100 40/40 [==============================] - 71s 2s/step - loss: 0.0561 - dice_coef: 0.9439 - iou: 0.8938 - recall: 0.9325 - precision: 0.9963 - val_loss: 0.0660 - val_dice_coef: 0.9340 - val_iou: 0.8765 - val_recall: 0.9209 - val_precision: 0.9807 Epoch 00048: val_loss improved from 0.06612 to 0.06599, saving model to files/model.h5 Epoch 49/100 40/40 [==============================] - 71s 2s/step - loss: 0.0545 - dice_coef: 0.9455 - iou: 0.8967 - recall: 0.9329 - precision: 0.9966 - val_loss: 0.0694 - val_dice_coef: 0.9306 - val_iou: 0.8708 - val_recall: 0.8991 - val_precision: 0.9894 Epoch 00049: val_loss did not improve from 0.06599 Epoch 50/100 40/40 [==============================] - 71s 2s/step - loss: 0.0523 - dice_coef: 0.9477 - iou: 0.9008 - recall: 0.9318 - precision: 0.9975 - val_loss: 0.0660 - val_dice_coef: 0.9340 - val_iou: 0.8768 - val_recall: 0.9228 - val_precision: 0.9734 Epoch 00050: val_loss did not improve from 0.06599 Epoch 51/100 40/40 [==============================] - 71s 2s/step - loss: 0.0505 - dice_coef: 0.9495 - iou: 0.9039 - recall: 0.9331 - precision: 0.9980 - val_loss: 0.0642 - val_dice_coef: 0.9358 - val_iou: 0.8798 - val_recall: 0.9389 - val_precision: 0.9729 Epoch 00051: val_loss improved from 0.06599 to 0.06417, saving model to files/model.h5 Epoch 52/100 40/40 [==============================] - 71s 2s/step - loss: 0.0496 - dice_coef: 0.9504 - iou: 0.9056 - recall: 0.9317 - precision: 0.9976 - val_loss: 0.0701 - val_dice_coef: 0.9299 - val_iou: 0.8707 - val_recall: 0.9371 - val_precision: 0.9528 Epoch 00052: val_loss did not improve from 0.06417 Epoch 53/100 40/40 [==============================] - 72s 2s/step - loss: 0.0488 - dice_coef: 0.9512 - iou: 0.9072 - recall: 0.9327 - precision: 0.9981 - val_loss: 0.0623 - val_dice_coef: 0.9377 - val_iou: 0.8833 - val_recall: 0.9067 - val_precision: 0.9924 Epoch 00053: val_loss improved from 0.06417 to 0.06230, saving model to files/model.h5 Epoch 54/100 40/40 [==============================] - 72s 2s/step - loss: 0.0473 - dice_coef: 0.9527 - iou: 0.9098 - recall: 0.9333 - precision: 0.9983 - val_loss: 0.0596 - val_dice_coef: 0.9404 - val_iou: 0.8880 - val_recall: 0.9076 - val_precision: 0.9948 Epoch 00054: val_loss improved from 0.06230 to 0.05957, saving model to files/model.h5 Epoch 55/100 40/40 [==============================] - 77s 2s/step - loss: 0.0457 - dice_coef: 0.9543 - iou: 0.9127 - recall: 0.9323 - precision: 0.9989 - val_loss: 0.0604 - val_dice_coef: 0.9396 - val_iou: 0.8867 - val_recall: 0.9118 - val_precision: 0.9919 Epoch 00055: val_loss did not improve from 0.05957 Epoch 56/100 40/40 [==============================] - 72s 2s/step - loss: 0.0446 - dice_coef: 0.9554 - iou: 0.9147 - recall: 0.9329 - precision: 0.9990 - val_loss: 0.0569 - val_dice_coef: 0.9431 - val_iou: 0.8929 - val_recall: 0.9131 - val_precision: 0.9935 Epoch 00056: val_loss improved from 0.05957 to 0.05691, saving model to files/model.h5 Epoch 57/100 40/40 [==============================] - 72s 2s/step - loss: 0.0441 - dice_coef: 0.9559 - iou: 0.9157 - recall: 0.9335 - precision: 0.9990 - val_loss: 0.0589 - val_dice_coef: 0.9411 - val_iou: 0.8895 - val_recall: 0.9017 - val_precision: 0.9950 Epoch 00057: val_loss did not improve from 0.05691 Epoch 58/100 40/40 [==============================] - 72s 2s/step - loss: 0.0435 - dice_coef: 0.9565 - iou: 0.9168 - recall: 0.9335 - precision: 0.9988 - val_loss: 0.0591 - val_dice_coef: 0.9409 - val_iou: 0.8891 - val_recall: 0.8989 - val_precision: 0.9947 Epoch 00058: val_loss did not improve from 0.05691 Epoch 59/100 40/40 [==============================] - 72s 2s/step - loss: 0.0431 - dice_coef: 0.9569 - iou: 0.9175 - recall: 0.9335 - precision: 0.9985 - val_loss: 0.0783 - val_dice_coef: 0.9217 - val_iou: 0.8582 - val_recall: 0.9299 - val_precision: 0.9326 Epoch 00059: val_loss did not improve from 0.05691 Epoch 60/100 40/40 [==============================] - 71s 2s/step - loss: 0.0430 - dice_coef: 0.9570 - iou: 0.9177 - recall: 0.9321 - precision: 0.9984 - val_loss: 0.0637 - val_dice_coef: 0.9363 - val_iou: 0.8812 - val_recall: 0.9155 - val_precision: 0.9672 Epoch 00060: val_loss did not improve from 0.05691 Epoch 61/100 40/40 [==============================] - 71s 2s/step - loss: 0.0424 - dice_coef: 0.9576 - iou: 0.9188 - recall: 0.9331 - precision: 0.9983 - val_loss: 0.0595 - val_dice_coef: 0.9405 - val_iou: 0.8883 - val_recall: 0.9180 - val_precision: 0.9800 Epoch 00061: val_loss did not improve from 0.05691 Epoch 62/100 40/40 [==============================] - 71s 2s/step - loss: 0.0423 - dice_coef: 0.9577 - iou: 0.9189 - recall: 0.9321 - precision: 0.9977 - val_loss: 0.0804 - val_dice_coef: 0.9196 - val_iou: 0.8543 - val_recall: 0.9067 - val_precision: 0.9458 Epoch 00062: val_loss did not improve from 0.05691 Epoch 63/100 40/40 [==============================] - 71s 2s/step - loss: 0.0519 - dice_coef: 0.9481 - iou: 0.9016 - recall: 0.9245 - precision: 0.9910 - val_loss: 0.1284 - val_dice_coef: 0.8716 - val_iou: 0.7768 - val_recall: 0.9748 - val_precision: 0.8217 Epoch 00063: val_loss did not improve from 0.05691 Epoch 64/100 40/40 [==============================] - 71s 2s/step - loss: 0.0458 - dice_coef: 0.9542 - iou: 0.9127 - recall: 0.9296 - precision: 0.9942 - val_loss: 0.0987 - val_dice_coef: 0.9013 - val_iou: 0.8235 - val_recall: 0.9683 - val_precision: 0.8792 Epoch 00064: val_loss did not improve from 0.05691 Epoch 65/100 40/40 [==============================] - 71s 2s/step - loss: 0.0442 - dice_coef: 0.9558 - iou: 0.9155 - recall: 0.9313 - precision: 0.9939 - val_loss: 0.0643 - val_dice_coef: 0.9357 - val_iou: 0.8806 - val_recall: 0.9193 - val_precision: 0.9762 Epoch 00065: val_loss did not improve from 0.05691 Epoch 66/100 40/40 [==============================] - 71s 2s/step - loss: 0.0415 - dice_coef: 0.9585 - iou: 0.9204 - recall: 0.9305 - precision: 0.9974 - val_loss: 0.0579 - val_dice_coef: 0.9421 - val_iou: 0.8914 - val_recall: 0.8912 - val_precision: 0.9970 Epoch 00066: val_loss did not improve from 0.05691 Epoch 67/100 40/40 [==============================] - 71s 2s/step - loss: 0.0398 - dice_coef: 0.9602 - iou: 0.9236 - recall: 0.9324 - precision: 0.9982 - val_loss: 0.0561 - val_dice_coef: 0.9439 - val_iou: 0.8944 - val_recall: 0.8964 - val_precision: 0.9958 Epoch 00067: val_loss improved from 0.05691 to 0.05613, saving model to files/model.h5 Epoch 68/100 40/40 [==============================] - 71s 2s/step - loss: 0.0384 - dice_coef: 0.9616 - iou: 0.9261 - recall: 0.9319 - precision: 0.9985 - val_loss: 0.0576 - val_dice_coef: 0.9424 - val_iou: 0.8918 - val_recall: 0.8976 - val_precision: 0.9926 Epoch 00068: val_loss did not improve from 0.05613 Epoch 69/100 40/40 [==============================] - 71s 2s/step - loss: 0.0381 - dice_coef: 0.9619 - iou: 0.9268 - recall: 0.9312 - precision: 0.9986 - val_loss: 0.0549 - val_dice_coef: 0.9451 - val_iou: 0.8966 - val_recall: 0.9042 - val_precision: 0.9925 Epoch 00069: val_loss improved from 0.05613 to 0.05493, saving model to files/model.h5 Epoch 70/100 40/40 [==============================] - 71s 2s/step - loss: 0.0371 - dice_coef: 0.9629 - iou: 0.9286 - recall: 0.9331 - precision: 0.9990 - val_loss: 0.0622 - val_dice_coef: 0.9378 - val_iou: 0.8846 - val_recall: 0.8803 - val_precision: 0.9971 Epoch 00070: val_loss did not improve from 0.05493 Epoch 71/100 40/40 [==============================] - 71s 2s/step - loss: 0.0372 - dice_coef: 0.9628 - iou: 0.9284 - recall: 0.9318 - precision: 0.9986 - val_loss: 0.0581 - val_dice_coef: 0.9419 - val_iou: 0.8913 - val_recall: 0.8984 - val_precision: 0.9933 Epoch 00071: val_loss did not improve from 0.05493 Epoch 72/100 40/40 [==============================] - 72s 2s/step - loss: 0.0362 - dice_coef: 0.9638 - iou: 0.9302 - recall: 0.9324 - precision: 0.9990 - val_loss: 0.0660 - val_dice_coef: 0.9340 - val_iou: 0.8785 - val_recall: 0.9239 - val_precision: 0.9539 Epoch 00072: val_loss did not improve from 0.05493 Epoch 73/100 40/40 [==============================] - 71s 2s/step - loss: 0.0369 - dice_coef: 0.9631 - iou: 0.9290 - recall: 0.9325 - precision: 0.9982 - val_loss: 0.0575 - val_dice_coef: 0.9425 - val_iou: 0.8923 - val_recall: 0.8944 - val_precision: 0.9900 Epoch 00073: val_loss did not improve from 0.05493 Epoch 74/100 40/40 [==============================] - 71s 2s/step - loss: 0.0365 - dice_coef: 0.9635 - iou: 0.9296 - recall: 0.9328 - precision: 0.9985 - val_loss: 0.0742 - val_dice_coef: 0.9258 - val_iou: 0.8642 - val_recall: 0.8585 - val_precision: 0.9956 Epoch 00074: val_loss did not improve from 0.05493 Epoch 75/100 40/40 [==============================] - 71s 2s/step - loss: 0.0361 - dice_coef: 0.9639 - iou: 0.9305 - recall: 0.9321 - precision: 0.9985 - val_loss: 0.0652 - val_dice_coef: 0.9348 - val_iou: 0.8796 - val_recall: 0.8825 - val_precision: 0.9943 Epoch 00075: val_loss did not improve from 0.05493 Epoch 76/100 40/40 [==============================] - 72s 2s/step - loss: 0.0356 - dice_coef: 0.9644 - iou: 0.9314 - recall: 0.9320 - precision: 0.9985 - val_loss: 0.0530 - val_dice_coef: 0.9470 - val_iou: 0.9001 - val_recall: 0.8990 - val_precision: 0.9943 Epoch 00076: val_loss improved from 0.05493 to 0.05302, saving model to files/model.h5 Epoch 77/100 40/40 [==============================] - 71s 2s/step - loss: 0.0354 - dice_coef: 0.9646 - iou: 0.9317 - recall: 0.9324 - precision: 0.9984 - val_loss: 0.0593 - val_dice_coef: 0.9407 - val_iou: 0.8897 - val_recall: 0.8864 - val_precision: 0.9955 Epoch 00077: val_loss did not improve from 0.05302 Epoch 78/100 40/40 [==============================] - 71s 2s/step - loss: 0.0345 - dice_coef: 0.9655 - iou: 0.9333 - recall: 0.9337 - precision: 0.9988 - val_loss: 0.0577 - val_dice_coef: 0.9423 - val_iou: 0.8920 - val_recall: 0.8967 - val_precision: 0.9851 Epoch 00078: val_loss did not improve from 0.05302 Epoch 79/100 40/40 [==============================] - 71s 2s/step - loss: 0.0343 - dice_coef: 0.9657 - iou: 0.9338 - recall: 0.9335 - precision: 0.9985 - val_loss: 0.0506 - val_dice_coef: 0.9494 - val_iou: 0.9042 - val_recall: 0.9143 - val_precision: 0.9853 Epoch 00079: val_loss improved from 0.05302 to 0.05058, saving model to files/model.h5 Epoch 80/100 40/40 [==============================] - 71s 2s/step - loss: 0.0336 - dice_coef: 0.9664 - iou: 0.9350 - recall: 0.9320 - precision: 0.9990 - val_loss: 0.0554 - val_dice_coef: 0.9446 - val_iou: 0.8959 - val_recall: 0.9287 - val_precision: 0.9660 Epoch 00080: val_loss did not improve from 0.05058 Epoch 81/100 40/40 [==============================] - 71s 2s/step - loss: 0.0337 - dice_coef: 0.9663 - iou: 0.9350 - recall: 0.9312 - precision: 0.9988 - val_loss: 0.0556 - val_dice_coef: 0.9444 - val_iou: 0.8955 - val_recall: 0.9224 - val_precision: 0.9703 Epoch 00081: val_loss did not improve from 0.05058 Epoch 82/100 40/40 [==============================] - 71s 2s/step - loss: 0.0346 - dice_coef: 0.9654 - iou: 0.9331 - recall: 0.9317 - precision: 0.9980 - val_loss: 0.0561 - val_dice_coef: 0.9439 - val_iou: 0.8949 - val_recall: 0.9304 - val_precision: 0.9676 Epoch 00082: val_loss did not improve from 0.05058 Epoch 83/100 40/40 [==============================] - 71s 2s/step - loss: 0.0331 - dice_coef: 0.9669 - iou: 0.9361 - recall: 0.9315 - precision: 0.9989 - val_loss: 0.0561 - val_dice_coef: 0.9439 - val_iou: 0.8950 - val_recall: 0.8909 - val_precision: 0.9966 Epoch 00083: val_loss did not improve from 0.05058 Epoch 84/100 40/40 [==============================] - 71s 2s/step - loss: 0.0325 - dice_coef: 0.9675 - iou: 0.9372 - recall: 0.9330 - precision: 0.9990 - val_loss: 0.0512 - val_dice_coef: 0.9488 - val_iou: 0.9030 - val_recall: 0.9165 - val_precision: 0.9826 Epoch 00084: val_loss did not improve from 0.05058 Epoch 85/100 40/40 [==============================] - 71s 2s/step - loss: 0.0319 - dice_coef: 0.9681 - iou: 0.9383 - recall: 0.9329 - precision: 0.9992 - val_loss: 0.0534 - val_dice_coef: 0.9466 - val_iou: 0.8997 - val_recall: 0.8966 - val_precision: 0.9947 Epoch 00085: val_loss did not improve from 0.05058 Epoch 86/100 40/40 [==============================] - 71s 2s/step - loss: 0.0309 - dice_coef: 0.9691 - iou: 0.9401 - recall: 0.9322 - precision: 0.9996 - val_loss: 0.0491 - val_dice_coef: 0.9509 - val_iou: 0.9072 - val_recall: 0.9104 - val_precision: 0.9892 Epoch 00086: val_loss improved from 0.05058 to 0.04909, saving model to files/model.h5 Epoch 87/100 40/40 [==============================] - 71s 2s/step - loss: 0.0305 - dice_coef: 0.9695 - iou: 0.9410 - recall: 0.9325 - precision: 0.9996 - val_loss: 0.0476 - val_dice_coef: 0.9524 - val_iou: 0.9096 - val_recall: 0.9113 - val_precision: 0.9914 Epoch 00087: val_loss improved from 0.04909 to 0.04763, saving model to files/model.h5 Epoch 88/100 40/40 [==============================] - 72s 2s/step - loss: 0.0302 - dice_coef: 0.9698 - iou: 0.9415 - recall: 0.9332 - precision: 0.9997 - val_loss: 0.0588 - val_dice_coef: 0.9412 - val_iou: 0.8908 - val_recall: 0.8800 - val_precision: 0.9960 Epoch 00088: val_loss did not improve from 0.04763 Epoch 89/100 40/40 [==============================] - 71s 2s/step - loss: 0.0304 - dice_coef: 0.9696 - iou: 0.9410 - recall: 0.9335 - precision: 0.9995 - val_loss: 0.0537 - val_dice_coef: 0.9463 - val_iou: 0.8988 - val_recall: 0.9074 - val_precision: 0.9800 Epoch 00089: val_loss did not improve from 0.04763 Epoch 90/100 40/40 [==============================] - 71s 2s/step - loss: 0.0311 - dice_coef: 0.9689 - iou: 0.9397 - recall: 0.9321 - precision: 0.9990 - val_loss: 0.0875 - val_dice_coef: 0.9125 - val_iou: 0.8443 - val_recall: 0.8478 - val_precision: 0.9735 Epoch 00090: val_loss did not improve from 0.04763 Epoch 91/100 40/40 [==============================] - 71s 2s/step - loss: 0.0310 - dice_coef: 0.9690 - iou: 0.9400 - recall: 0.9327 - precision: 0.9993 - val_loss: 0.0675 - val_dice_coef: 0.9325 - val_iou: 0.8765 - val_recall: 0.9021 - val_precision: 0.9598 Epoch 00091: val_loss did not improve from 0.04763 Epoch 92/100 40/40 [==============================] - 71s 2s/step - loss: 0.0299 - dice_coef: 0.9701 - iou: 0.9421 - recall: 0.9320 - precision: 0.9995 - val_loss: 0.0677 - val_dice_coef: 0.9323 - val_iou: 0.8758 - val_recall: 0.8896 - val_precision: 0.9680 Epoch 00092: val_loss did not improve from 0.04763 Epoch 93/100 40/40 [==============================] - 71s 2s/step - loss: 0.0294 - dice_coef: 0.9706 - iou: 0.9430 - recall: 0.9333 - precision: 0.9998 - val_loss: 0.0664 - val_dice_coef: 0.9336 - val_iou: 0.8788 - val_recall: 0.9021 - val_precision: 0.9514 Epoch 00093: val_loss did not improve from 0.04763 Epoch 94/100 40/40 [==============================] - 72s 2s/step - loss: 0.0293 - dice_coef: 0.9707 - iou: 0.9432 - recall: 0.9328 - precision: 0.9997 - val_loss: 0.0668 - val_dice_coef: 0.9332 - val_iou: 0.8787 - val_recall: 0.9041 - val_precision: 0.9476 Epoch 00094: val_loss did not improve from 0.04763 Epoch 95/100 40/40 [==============================] - 71s 2s/step - loss: 0.0294 - dice_coef: 0.9706 - iou: 0.9430 - recall: 0.9324 - precision: 0.9995 - val_loss: 0.0711 - val_dice_coef: 0.9289 - val_iou: 0.8715 - val_recall: 0.8918 - val_precision: 0.9582 Epoch 00095: val_loss did not improve from 0.04763 Epoch 96/100 40/40 [==============================] - 71s 2s/step - loss: 0.0297 - dice_coef: 0.9703 - iou: 0.9425 - recall: 0.9315 - precision: 0.9993 - val_loss: 0.0600 - val_dice_coef: 0.9400 - val_iou: 0.8896 - val_recall: 0.9239 - val_precision: 0.9523 Epoch 00096: val_loss did not improve from 0.04763 Epoch 97/100 40/40 [==============================] - 71s 2s/step - loss: 0.0311 - dice_coef: 0.9689 - iou: 0.9397 - recall: 0.9331 - precision: 0.9982 - val_loss: 0.0493 - val_dice_coef: 0.9507 - val_iou: 0.9069 - val_recall: 0.9058 - val_precision: 0.9933 Epoch 00097: val_loss did not improve from 0.04763 Epoch 98/100 40/40 [==============================] - 72s 2s/step - loss: 0.0310 - dice_coef: 0.9690 - iou: 0.9400 - recall: 0.9310 - precision: 0.9988 - val_loss: 0.0620 - val_dice_coef: 0.9380 - val_iou: 0.8871 - val_recall: 0.9266 - val_precision: 0.9432 Epoch 00098: val_loss did not improve from 0.04763 Epoch 99/100 40/40 [==============================] - 72s 2s/step - loss: 0.0312 - dice_coef: 0.9688 - iou: 0.9396 - recall: 0.9317 - precision: 0.9984 - val_loss: 0.0552 - val_dice_coef: 0.9448 - val_iou: 0.8963 - val_recall: 0.9461 - val_precision: 0.9564 Epoch 00099: val_loss did not improve from 0.04763 Epoch 100/100 40/40 [==============================] - 71s 2s/step - loss: 0.0299 - dice_coef: 0.9701 - iou: 0.9420 - recall: 0.9312 - precision: 0.9990 - val_loss: 0.0547 - val_dice_coef: 0.9453 - val_iou: 0.8979 - val_recall: 0.8980 - val_precision: 0.9908 Epoch 00100: val_loss did not improve from 0.04763 ###Markdown Eval ###Code import os os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" import numpy as np import pandas as pd import cv2 from glob import glob from tqdm import tqdm import tensorflow as tf from tensorflow.keras.utils import CustomObjectScope from sklearn.metrics import accuracy_score, f1_score, jaccard_score, precision_score, recall_score #from metrics import dice_loss, dice_coef, iou H = 512 W = 512 def create_dir(path): if not os.path.exists(path): os.makedirs(path) def read_image(path): x = cv2.imread(path, cv2.IMREAD_COLOR) # x = cv2.resize(x, (W, H)) ori_x = x x = x/255.0 x = x.astype(np.float32) return ori_x, x def read_mask(path): x = cv2.imread(path, cv2.IMREAD_GRAYSCALE) ## (512, 512) # x = cv2.resize(x, (W, H)) ori_x = x x = x/255.0 x = x.astype(np.int32) return ori_x, x def load_data(path): x = sorted(glob(os.path.join(path, "image", "*.png"))) y = sorted(glob(os.path.join(path, "mask", "*.png"))) return x, y def save_results(ori_x, ori_y, y_pred, save_image_path): line = np.ones((H, 10, 3)) * 255 ori_y = np.expand_dims(ori_y, axis=-1) ori_y = np.concatenate([ori_y, ori_y, ori_y], axis=-1) y_pred = np.expand_dims(y_pred, axis=-1) y_pred = np.concatenate([y_pred, y_pred, y_pred], axis=-1) * 255 cat_images = np.concatenate([ori_x, line, ori_y, line, y_pred], axis=1) cv2.imwrite(save_image_path, cat_images) if __name__ == "__main__": """ Save the results in this folder """ create_dir("results") """ Load the model """ with CustomObjectScope({'iou': iou, 'dice_coef': dice_coef, 'dice_loss': dice_loss}): model = tf.keras.models.load_model("files/model.h5") """ Load the dataset """ dataset_path = os.path.join("new_data", "test") test_x, test_y = load_data(dataset_path) """ Make the prediction and calculate the metrics values """ SCORE = [] for x, y in tqdm(zip(test_x, test_y), total=len(test_x)): """ Extracting name """ name = x.split("/")[-1].split(".")[0] """ Read the image and mask """ ori_x, x = read_image(x) ori_y, y = read_mask(y) """ Prediction """ y_pred = model.predict(np.expand_dims(x, axis=0))[0] y_pred = y_pred > 0.5 y_pred = y_pred.astype(np.int32) y_pred = np.squeeze(y_pred, axis=-1) """ Saving the images """ save_image_path = f"results/{name}.png" save_results(ori_x, ori_y, y_pred, save_image_path) """ Flatten the array """ y = y.flatten() y_pred = y_pred.flatten() """ Calculate the metrics """ acc_value = accuracy_score(y, y_pred) f1_value = f1_score(y, y_pred, labels=[0, 1], average="binary") jac_value = jaccard_score(y, y_pred, labels=[0, 1], average="binary") recall_value = recall_score(y, y_pred, labels=[0, 1], average="binary") precision_value = precision_score(y, y_pred, labels=[0, 1], average="binary") SCORE.append([name, acc_value, f1_value, jac_value, recall_value, precision_value]) score = [s[1:] for s in SCORE] score = np.mean(score, axis=0) print(f"Accuracy: {score[0]:0.5f}") print(f"F1: {score[1]:0.5f}") print(f"Jaccard: {score[2]:0.5f}") print(f"Recall: {score[3]:0.5f}") print(f"Precision: {score[4]:0.5f}") """ Saving """ df = pd.DataFrame(SCORE, columns=["Image", "Acc", "F1", "Jaccard", "Recall", "Precision"]) df.to_csv("files/score.csv") ###Output 100%|██████████| 16/16 [00:14<00:00, 1.08it/s] ###Markdown Downloading Results ###Code !zip -r /content/results.zip /content/results from google.colab import files files.download("/content/results.zip") ###Output _____no_output_____
examples/doctable_connectengine.ipynb
###Markdown Manage SQL Connections with DocTableThis is meant to give a bit more depth describing how doctable works under-the-hood. I won't cover the details of DocTable methods or working with doctable objects, but I will try to give a clearer sense of how connections and tables are managed within a doctable instance.The driving motivator behind doctable is to create an object-oriented interface for working with sql tables by linking schemas described in your code with the structure of the databases you work with. This model is less ideal for the kinds of application-based frameworks where you would define the database schema once and build code around it separately, but works well for data science applications where you will be creating new tables and playing with different schemas regularly as your approach and end-goals change.When you instantiate a DocTable (or inheriting class), the object will convert your provided schema into a set of sqlalchemy objects which are then stored in-memory as part of the doctable instance. If the table does not already exist in the actual database, DocTable can create one that matches the provided schema, and then the schema will be used to work with the underlying database table. I will now discuss the lower-level objects that manage the metadata and connections to the database. ###Code import sys sys.path.append('..') import doctable ###Output _____no_output_____ ###Markdown ConnectEngine ClassEach doctable maintains a `ConnectEngine` object to manage database connections and metadata that make all other database operations possible. I'll demonstrate how to instantiate this class manually to show how it works.The constructor takes arguments for dialect (sqlite, mysql, etc) and database target (filename or database server) to create new sqlalchemy [engine](https://docs.sqlalchemy.org/en/13/core/engines_connections.html) and [metadata](https://docs.sqlalchemy.org/en/13/faq/metadata_schema.html) objects. The engine object stores information about the target and can generate database connections, the metadata object stores schemas for registered tables. To work with a table, the metadata object must have the table schema registered, although it can be constructed from the database object itself.See here that the constructor requires a target (file or server where the database is located) and a dialect (flavor of database engine). This connection sits above individual table connections, and thus maintains no connections of it's own - only the engine that can create connections. We can, however list the tables in the database and perform other operations on the table. ###Code engine = doctable.ConnectEngine(target=':memory:', dialect='sqlite') engine ###Output _____no_output_____ ###Markdown Working with tablesYou can also execute connectionless queries directly from this object, although normally you would create a connection object first and then execute queries from the connection. In this example I use a custom sql query to create a new table.As the ConnectEngine sits above the level of tables, we can list and drop tables from here. ###Code # see there are no tables here yet. engine.list_tables() # run this raw sql query just for example # NOTE: Normally you would NOT create a table this way using doctable. # This is just for example purposes. query = 'CREATE TABLE temp (id INTEGER PRIMARY KEY, number INTEGER NOT NULL)' engine.execute(query) # see that the table is now in the database engine.list_tables() # uses inspect to ask the database directly for the schema engine.schema('temp') # or as a dataframe engine.schema_df('temp') ###Output _____no_output_____ ###Markdown All of these methods I've shown so far access the database tables directly, but currently our python objects do not have any idea of what the table schema looks like. You can view the sqlalchemy table objects actually registered with the engine by using the .tables property. See that it is currently empty! Our python code is not able to work with the table using objects because it does not have record of the schema. Now we'll show how to register tables with the engine. Creating and accessing tablesTo create a data structure internally representing the database structure, we can either ask sqlalchemy to read the database and create the schema, or we can provide lists of sqlalchemy column objects. Wee that we can access the registered tables using the .tables property. ###Code # see that currently our engine does not have information about the table we created above. engine.tables # now I ask doctable to read the database schema and register the table in metadata. engine.add_table('temp') # and we can see that the table is registered engine.tables ###Output _____no_output_____ ###Markdown When add_table() is called, a new sqlalchemy.Table object is registered in the engine's metadata and returned. If add_table() is called again, it will return the table already registered in the metadata. Because we usually use doctable to manage tables, we'll just show a short example here. ###Code # while we can use doctable to do most of this work # usually, I'll just show how sqlalchemy core objects # can be used to create a table in ConnectEngine. from sqlalchemy import Column, Integer, String # create a list of columns columns = ( Column('id', Integer, primary_key = True), Column('name', String), ) # we similarly use the add_table() method to store the schema # in the metadata engine.add_table('temp2', columns=columns) # see now that the engine has information about both tables engine.tables # and see that you can get individual table object references like this engine.tables['temp'] ###Output _____no_output_____ ###Markdown Dropping tablesDropping tables is simple enough, but remember that the schema stored in the database and the objects in code mirror each other, so it is best to manipulate them at the same time. Use .drop_table instead of issuing CREATE TABLE query to make sure they stay in sync. The method can also be used on tables that are not in the metadata engine. ###Code # by providing the argument as a string engine.drop_table('temp') engine.list_tables() ###Output _____no_output_____ ###Markdown In cases where an underlying table has been deleted but metadata is retained, the drop_table() method will still work but you may need to call clear_metadata() to flush all metadata and add_all_tables() to re-create the metadata from the actual data. ###Code # see this works although the temp3 table is not registered in engine metadata query = 'CREATE TABLE temp3 (id INTEGER PRIMARY KEY, number INTEGER NOT NULL)' engine.execute(query) engine.drop_table('temp3') # this will delete the underlying table even though the metadata information still exists. query = 'CREATE TABLE temp4 (id INTEGER PRIMARY KEY, number INTEGER NOT NULL)' engine.execute(query) engine.execute(f'DROP TABLE IF EXISTS temp4') engine.list_tables() # see that the table is still registered in the metadata engine.tables # in this case, it might be simplest just to clear all metadata # and re-build according to exising tables engine.clear_metadata() engine.reflect() engine.tables ###Output _____no_output_____ ###Markdown Managing connections with ConnectEngineConnectEngine objects are used to create database connections which are maintained by individual doctable objects. Use the get_connection() function to retreive a new connection object which you can use to execute queries. While garbage collecting the connection objects will close the individual connection, sometimes all connections need to be closed simultaneously. This is especially important because garbage-collecting the ConnectEngine object doesn't mean the connections will be garbage-collected if they have references elsewhere in your code. You can close all connections using the close_connections() method. ###Code # make new connection conn = engine.connect() conn # see here we just run a select query on the empty table, returning an empty list list(conn.execute('SELECT * FROM temp2')) ###Output _____no_output_____ ###Markdown An important use-case of this feature is when you have multiple processes accessing the same database. In general, each process should have separate connections to the database, but both the engine and metadata stored with the ConnectEngine can be copied. Here I'll show a basic multiprocessing case using the Distribute class (it works much like multiprocessing.Pool()).In using the map function we open two processes, and in the thread function we call the close_connections() method to delete existing connections which don't exist in this new memory space. ###Code def thread(nums, engine: doctable.ConnectEngine): # close connections that were opened in other thread #engine.close_connections() engine.dispose() # create a new connection for this thread thread_conn = engine.connect() numbers = [1,2] with doctable.Distribute(2) as d: d.map(thread, numbers, engine) engine.list_tables() ###Output _____no_output_____ ###Markdown DocTable and ConnectEngineEvery DocTable object maintains a ConnectEngine to store information about the table they represent, and can be accessed through the engine property. When a target and dialect are provided to doctable, it will automatically initialize a new ConnectEngine and store a new connection object. ###Code # create a new doctable and view it's engine schema = (('idcol', 'id'), ('string', 'name')) db = doctable.DocTable(target=':memory:', schema=schema) str(db.engine) ###Output _____no_output_____ ###Markdown The DocTable constructor can also accept an engine in place of a target and dialect, and thus share ConnectEngines between multiple DocTable objects. In this case, the doctable constructor will use the provided schema to insert the table information into the engine metadata and create the table if doesn't already exist. It will also generate a new connection object from the ConnectEngine. ###Code # a w engine.clear_metadata() print(engine.tables.keys()) print(engine.list_tables()) # make a new doctable using the existing engine schema = (('idcol', 'id'), ('string', 'name')) db = doctable.DocTable(engine=engine, schema=schema, tabname='tmp5') db # make another doctable using existing engine schema2 = (('idcol', 'id'), ('string', 'name')) db2 = doctable.DocTable(engine=engine, schema=schema2, tabname='tmp6') db2 # we can see that both tables have been created in the database engine.list_tables() # and that both are registered in the metadata engine.tables.keys() ###Output _____no_output_____ ###Markdown Some ConnectEngine methods are also accessable through the DocTable instances. ###Code db.list_tables() db.schema_table() # and this is equivalent to calling the engine method reopen(), which clears # metadata and closes connection pool db.reopen_engine() ###Output _____no_output_____
India_Climate_Commodities_LogReg.ipynb
###Markdown ###Code import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns !pip install category_encoders==2.* !pip install pandas-profiling==2.* %%time # Important library for many geopython libraries !apt install gdal-bin python-gdal python3-gdal # Install rtree - Geopandas requirment !apt install python3-rtree # Install Geopandas !pip install git+git://github.com/geopandas/geopandas.git # Install descartes - Geopandas requirment !pip install descartes # Install Folium for Geographic data visualization !pip install folium # Install plotlyExpress !pip install plotly_express import zipfile import shutil from glob import glob import os import pandas as pd import geopandas as gdp import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split !pip install PyShp !pip install shapely import fiona from zipfile import ZipFile from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn import metrics import sys if 'google.colab' in sys.modules: !pip install category_encoders==2.* !pip install pandas-profiling==2.* !pip install plotly==4.* !unzip /content/commodity_trade_statistics_data.csv.zip !unzip /content/IndiaTemps.zip !unzip /content/IndiaRain.zip trade = pd.read_csv('/content/commodity_trade_statistics_data.csv', na_values=[' --'], dtype={'Max. CPC': pd.np.float64}) trade.dropna(how='all') trade['trade_usd'].mean() trade['year'].nunique trade.isnull().sum() rain = pd.read_csv('/content/rainfall in india 1901-2015.csv') rain.head() rain = rain.drop(['SUBDIVISION', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'], axis=1) rain rain['YEAR'].nunique temp = pd.read_csv('/content/temperatures.csv') temp.head() temp = temp.drop(['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'], axis=1) temp temp['YEAR'].nunique temp_rain = pd.merge(temp, rain, how='inner', on='YEAR', left_on=None, right_on=None, left_index=False, right_index=False, sort=True) temp_rain.head() temp_rain.shape temp_rain.dtypes df_year = temp_rain[temp_rain['YEAR'] < 1995 ] TR = temp_rain.drop(df_year.index, axis=0) TR.shape trade.rename(columns = {'year':'YEAR'}, inplace = True) trade.head() trade = trade.drop(['flow', 'comm_code', 'weight_kg', 'quantity_name', 'category'], axis=1) trade trade['country_or_area'].nunique() trade_sort = trade.sort_values(by='trade_usd') print(trade_sort) trades = trade_sort[trade_sort['YEAR'] < 2000 ] trades = trade_sort.drop(trades.index, axis=0) trades.shape trade1 = trades.groupby('commodity').trade_usd.mean().reset_index(name='trade_usd') trade1 = trades.nlargest(1000,'trade_usd') trade1 trade1['trade_usd'].mean() IndiaFin = pd.merge(TR, trade1, how='inner', on='YEAR', left_on=None, right_on=None, left_index=False, right_index=False, sort=True) IndiaFin.head() IndiaFin.shape IndiaFin = IndiaFin.drop(['quantity'], axis=1) train, test = train_test_split(IndiaFin, train_size=0.80, test_size=0.20, random_state=42) test.shape train, val = train_test_split(train, train_size=0.80, test_size=0.20, random_state=42) val.shape train.shape trained = train.drop(['trade_usd'], axis=1) numeric_features = trained.select_dtypes(include='number').columns.tolist() # Get a series with the cardinality of the nonnumeric features cardinality = trained.select_dtypes(exclude='number').nunique() # Get a list of all categorical features with cardinality <= 10000 categorical_features = cardinality[cardinality <= 10000].index.tolist() # Combine the lists features = numeric_features + categorical_features print(features) target = train['trade_usd'] x_train = train[features] y_train = train['trade_usd'] x_val = val[features] y_val = val['trade_usd'] x_test = test[features] x_train.shape import category_encoders as ce from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler pipeline = make_pipeline( ce.OrdinalEncoder(), SimpleImputer(strategy='mean'), StandardScaler(), LogisticRegression(multi_class='auto', solver='lbfgs', n_jobs=-1) ) pipeline.fit(x_train, y_train) y_pred = pipeline.predict(x_test) print('Validation Accuracy', pipeline.score(x_val, y_val)) import xgboost as xgb regr = xgb.XGBRegressor(colsample_bytree=0.2, gamma=0.0, learning_rate=0.05, max_depth=6, min_child_weight=1.5, n_estimators=7200, reg_alpha=0.9, reg_lambda=0.6, subsample=0.2, seed=42, silent=1) regr.fit(train_new, label_df) ###Output _____no_output_____
0.2 - 2D Kalman.ipynb
###Markdown 2D Kalman Filtering Jack Nelson, April 2016 Introduction This notebook demonstrates basic Kalman filtering for recursively and probabalistically modeling the state vector of some actor over some period of discrete time. In other words, we model how a robot would use its noisy sensors to develop a probabilistic model of where it is in a 2D grid. The concepts behind basic linear Kalman filtering are thoroughly explained as we build actual executable Python code that runs the simulated robot's Kalman filter.**Note** - There's a bug with Google Chrome where Latex-style math equations have a vertical bar to the right of them. If this bothers you, I suggest you open this notebook in another browser. Acknowledgments The notation and theoretical descriptions of the algorithm are taken heavily from Sebastian Thrun and Wolfram Burgard's text *Probabalistic Robotics* (http://www.amazon.com/Probabilistic-Robotics-Intelligent-Autonomous-Agents/dp/0262201623?ie=UTF8&keywords=probabilistic%20robotics&qid=1461459899&ref_=sr_1_1&sr=8-1). ###Code %matplotlib notebook import matplotlib import numpy as np import matplotlib.pyplot as plt ###Output _____no_output_____ ###Markdown Let's setup the initial parameters and the Kalman filter.First we set the number of iteration steps, the initial position of the robot, and enable or disable movement of the robot. Next we setup the state transition matrices and the initial assumptions of the Kalman filter. Initial State The initial state (position) of the robot is modeled as a Gaussian with mean $x_0$ and covariance $\Sigma_0$. State Transition Probability The state transition probability $p(x_t\ |\ u_t,\ x_t-1)$ is a linear Gaussian function of the previous state, the current control input, and Gaussian noise:\begin{equation}x_t = A_tx_{t-1} + B_tu_t + \epsilon_t\end{equation}where $ A_t $ is an $n x n$ state transition matrix where $n$ is the dimension of the state vector $x_t$, which in this case is 2, and $B_t$ is an $n x m$ control matrix where $m$ is the dimension of the control vector $u_t$. $\epsilon_t$ is an n-dimensional Gaussian random vector that models the uncertainty introduced by the state transition. It's mean is zero and its covariance is $R_t$.The state transition equation represents the robot's internal model (belief) of its physical system and its state in that system at each time step. $A_t$ models how the system changes over time and is diagonal (that is it's zero everywhere but the diagonal) if the system is not cross-dependent. For example, in a system where our robot is sitting on a perfectly flat table with no external forces, $A_t$ would be zero everywhere because the robot would not be moving unless a command was sent to its actuators to move it.$B_t$ models how our command inputs map to actual outcomes in our robot's state. Once again, this matrix is diagonal if there are no cross-dependencies of outcomes on control inputs, i.e. if each "axis" of the state vector has its own actuator. Measurement Probability The measurement probability is a linear Gaussian function of the current state $x_t$ and Gaussian noise:$$ z_t = C_tx_t + \delta_t $$where $C_t$ is a matrix of size $kxn$ where $k$ is the dimension of the measurement vector $z_t$. The vector $\delta_t$ is the measurement noise modeled by a multivariate Gaussian with zero mean and covariance $Q_t$.The measurement probability function models our robot's sensors perception of the robot's state and returns what the sensors believe the mean of the robot's state is. $C_t$ models the sensors' dependency on the state of the robot. $C_t$ should usually be an $n\ x\ k$ identity matrix. ###Code # setup simulation parameters n_iter = 100 # number of iterations n = 2 # state vector dimension m = 2 # control vector dimension k = 2 # measurement vector dimension # initial state Gaussian mu_0 = np.array([[-0.5],[0.5]]) #Initial state vector Sigma_0 = np.array([[1,1],[1,1]]) #Initial state covariance # covariance matrices R_t = np.array([[0.1, 0.1], [0.1, 0.1]])# state transition noise covariance Q_t = np.array([[0.5, 0.5], [0.5, 0.5]]) # measurement noise covariance # initialize arrays to hold state, control, and measurement values (to plot later) mu = np.zeros((n_iter, n)) # array of actual position values u = np.zeros((n_iter, m)) # array of control inputs z = np.zeros((n_iter, k)) # measurement array # initialize the posterior Gaussian mu_bar = np.zeros((n_iter, n)) # posterior estimates of x Sigma_bar = np.zeros((n_iter,n,n)) # posterior error estimates (covariance) K = np.zeros((n,n)) # Kalman gain or blending factor # initial position mu[0] = mu_0.T ###Output _____no_output_____ ###Markdown Recursive Kalman Filter Algorithm Now that the filter has been set up, we can define a function to actually perform the filter, then recursively apply the filter over the `n_iter` number of time steps. Filter Inputs and Outputs The recursive Kalman filter takes as inputs the previous time-step's state probability in the form of the the mean $\mu_{t-1}$ and the covariance $\Sigma_{t-1}$, as well as the control input at that time step $u_t$ and the measurement at that step $z_t$. The filter returns the state probability at that time step as the mean $\mu_t$ and covariance $\Sigma_t$. The mean and covariance returned at time $t$ then becomes the input to the filter at time $t+1$, the filter is applied again, and so on and so forth. Calculating the A Priori Belief (Prediciton Step) The filter starts by making a predicted belief of the state probability at time $t$, known as the a priori belief (before we incorporate sensor measurements), from the information provided about the state probability at time $t-1$ and the control input to the state transition function from $t-1$ to $t$, or in other words how the robot thinks it moved during the last time step. As a reminder, the state transition function connects the state at $t-1$ to the current state at $t$ by:$$ x_t = A_tx_{t-1} + B_tu_t + \epsilon_t $$where $\epsilon_t$ is a Gaussian random variable representing noise in the transition process.The Kalman filter predicts the mean and covariance of the state at $t$ as follows:$$ \bar{\mu_t} = A_t\mu_{t-1} + B_tu_t $$$$ \bar{\Sigma_t} = A_t\Sigma_{t-1}A_t^T + R_t $$Where the predicted belief of the state is represented by the predicted mean $\bar{\mu_t}$ and the predicted covariance $\bar{\Sigma_t}$, which describes a standard normal Gaussian probability distribution of the current predicted state, which in this case is the x-y location of a robot in a 2D grid.In the first equation, the mean update, the $A_t$ and $B_t$ are the state transition and control matrices, respectively. $A_t$ effectively describes how the system changes on its own from time $t-1$ to $t$, while $B_t$ describes how a control input $u_t$ affects the state of the system from $t-1$ to $t$.In the second equation, the covariance update considers the fact that states depend on previous states through the linear matrix $A_t$, which is multiplied twice into the covariance, since the covariance is a quadratic matrix. Calculating the Kalman Gain The next part of the recursive filter algorithm calculates the Kalman gain $K_t$, which specifies the degree to which the measurement $z_t$ is to be incorporated into the new state estimate.$$ K_t = \frac{\bar{\Sigma_t}C_t^T}{ C_t\bar{\Sigma_t}C_t^T + Q_t} $$As a reminder, the measurement $z_t$ is given by:$$z_t = C_tx_t + \delta_t$$where $\delta_t$ is a multivariate Gaussian with zero mean and covariance $Q_t$, so $Q_t$ describes the "spread" of the noise. Calculating the Posterior Belief (Measurement Update Step) With the Kalman gain calculated, we now go about calculating the posterior belief and thus updating our robot's internal belief as to where it actually is. Once again, this "belief" takes the form of a normal Gaussian distribution centered around the robot's "best guess" as to where it is, that is the mean $\mu_t$, with covariance $\Sigma_t$.Both the mean and covariance at time $t$ are updated from the robot's prediction of where it is after the last time step and its sensor observation(s) of its location. The magnitude of the Kalman gain weights how much the new observations are incorporated into the robot's belief. The mean and covariance are updated as follows:$$ \mu_t = \bar{\mu_t} + K_t(z_t - C_t\bar{\mu_t}) $$$$ \Sigma_t = (I - K_tC_t)\bar{\Sigma_t} $$where $I$ is the identity matrix. In the first equation, the calculation of the posterior mean $\mu_t$, the robot adds the difference between its measured location and its a priori predicted location, weighted by the Kalman gain, to its the a priori predicted location. The difference between the measurement $z_t$ and the expected measurement $C_t\bar{\mu_t}$ is termed the *innovation*.In the second equation the posterior covariance $\Sigma_t$ is calculated. **TODO - Add more info about the posterior covariance**.Now let's put it all together. First the prior belief update:$$ \bar{\mu_t} = A_t\mu_{t-1} + B_tu_t $$$$ \bar{\Sigma_t} = A_t\Sigma_{t-1}A_t^T + R_t $$Then the Kalman gain:$$ K_t = \frac{\bar{\Sigma_t}C_t^T}{ C_t\bar{\Sigma_t}C_t^T + Q_t} $$And finally the posterior belief update:$$ \mu_t = \bar{\mu_t} + K_t(z_t - C_t\bar{\mu_t}) $$$$ \Sigma_t = (I - K_tC_t)\bar{\Sigma_t} $$This algorithm is applied recursively, meaning the outputs of the filter at time $t-1$ form the inputs (along with a measurement and control input) to the filter at time $t$, and so on and so forth over and over again.As a Python function, it looks like this: ###Code def kalman_filter(mu_minus, Sigma_minus, u_t, z_t): # a priori belief (prediction) mu_bar = np.matmul(A_t, mu_minus) + np.matmul(B_t, u_t) Sigma_bar = A_t * Sigma_minus * A_t.T + R_t # Kalman gain K_t = (Sigma_bar * C_t.T) / (C_t * Sigma_bar * C_t.T + Q_t) # posterior belief update (measurement update) mu_t = mu_bar + K_t*(z_t - C_t*mu_bar) Sigma_t = (np.identity((n)) - K_t*C_t) * Sigma_bar return mu_t, Sigma_t ###Output _____no_output_____ ###Markdown The Kalman filter described above alternates between predicting the robot's movement (the prediciton step) and using its sensors to ascertain its actual position and integrating that information into its internal model of its location (the measurement update). The prediction step increases the robot's uncertainty of where it is, while the measurement step decreases its uncertainty. Summary of vectors and matrices Before we go any further, let's list some of the vectors and matrices we've been using.** indices **- n - spatial dimension- m - control dimension- k - measurement dimension**vectors**- $x_t$ - $(n\ x\ 1)$ posterior state vector- $u_t$ - $(m\ x\ 1)$ control vector- $z_t$ - $(k\ x\ 1)$ measurement vector- $\mu_t$ - $(n\ x\ 1)$ posterior state vector mean- $\bar{\mu_t}$ - $(n\ x\ 1)$ a priori (prediction) state vector mean- $\epsilon_t$ -$(n\ x\ 1)$ state transition uncertainty (Gaussian random vector)- $\delta_t$ - $(k\ x\ 1)$ measurement noise vector (Gaussian random vector)** matrices **- $ \Sigma_t$ - $(n\ x\ n)$ posterior state covariance matrix.- $\bar{\Sigma_t}$ - $(n\ x\ n)$ a priori (prediction) state vector covariance.- $ A_t$ - $(n\ x\ n)$ state transition matrix.- $ B_t$ - $(m\ x\ n)$ control matrix.- $ C_t$ - $(k\ x\ n)$ sensor measurement matrix.- $ K_t$ - $(n\ x\ n)$ Kalman gain matrix.- $ Q_t$ - $(n\ x\ n)$ sensor measurement covariance. A Simple Example - Robot on a Flat Table Now let's apply our Kalman filter theory to a working example. We want to model a robot sitting on a table at rest initially. The robot can move in the x and y direction, and has a suite of ultrasound sensors that sense its distance from walls that form the x and y axes. Modeling the robot's state We start by setting up our state transition model. Our robot is sitting on a flat table with no external forces acting on it (we assume), so from one time step to another, unless we command it to, it should stay still at $(x_0, y_0)$. Our state transition matrix $A_t$ is then zero, and the state transitions of the robot are completely determined by our command inputs and the random Gaussian noise of our model.We'll assume our control inputs are normalized floats between 0 and 1 for each axis of the table. Our control matrix then maps the magnitude of our inputs to movement outputs that correspond to how the robot actually moves on the table. That is, a magnitude 1.0 input maps to the largest magnitude position change the robot can achieve in one time step. Once again, we'll assume our robot's movement has no cross-dependency on control inputs so our control matrix $B_t$ can be diagonal.Our ultrasound "ping" sensors which measure our robot's distance from the x and y axes (walls) return float values that directly correspond to distance and are not cross-dependent, so $C_t$ can be the $(n\ x\ k)$ identity matrix since the measurement values do not need to be scaled. Ping sensors tend to be quite noisy and unreliable, as you probably know if you've ever worked with them. Modeling the full characteristics of ping sensor measurements is beyond the scope of this example. Our model will assume that their measurement values are at least roughly centered about the robot's actual distance from each wall, but have a standard deviation of 0.2. We'll assume our sensor noise is not cross-dependent so $Q_t$, the sensor noise covariance matrix, can be diagonal.In Python, our robot's state transition and sensor measurement models look like this: ###Code # transition matrices u_max = 1.0 A_t = np.zeros((n,n)) # state transition matrix B_t = np.array([[u_max, 0.0], [0.0, u_max]]) # control input transformation C_t = np.identity(n) # measurement transformation (identity matrix) ###Output _____no_output_____ ###Markdown We now setup our simulation similarly to the 1-D case. We'll loop from 1 to `n_iter`. During each iteration, we'll generate 2-D control inputs to our robot, move our robot according to our state transition model, generate a measurement of the new state, then pass this information to our Kalman filter. ###Code T = 0.5 # random control input gain for k in range(1, n_iter): # generate control inputs u[k] = [np.random.randn()*T for i in range(n)] # random walk movement # move the robot epsilon_t = [np.random.randn() for i in range(n)] # Gaussian movement noise mu[k] = np.matmul(A_t, mu[k-1]) + np.matmul(B_t,u[k]) + epsilon_t # generate a sensor measurement about the new state x[k]. Noise # is added to the measurement by drawing from a multivariate gaussian # distribution with mean centered around the robot's current location z[k] = np.matmul(C_t, mu[k]) + np.random.multivariate_normal(mu[k], Q_t) #apply the kalman filter mu_bar[k], Sigma_bar[k] = kalman_filter(mu_bar[k-1], Sigma_bar[k-1], u[k], z[k]) print("%1.3f, %1.3f" %(xhat[k], Vhat[k])) ###Output _____no_output_____
ML real fight/chapter-2/chapter2_code.ipynb
###Markdown 2.3Get the data 2.3.2Download the data ###Code import os import tarfile import urllib DOWNLOAD_ROOT="https://raw.githubusercontent.com/ageron/handson-ml2/master/" HOUSING_PATH=os.path.join("datasets1",'housing') HOUSING_URL=DOWNLOAD_ROOT+'datasets/housing/housing.tgz' def fetch_housing_data(housing_url=HOUSING_URL,housing_path=HOUSING_PATH): os.makedirs(housing_path,exist_ok=True) tgz_path=os.path.join(housing_path,'housing.tgz') urllib.request.urlretrieve(housing_url,tgz_path) housing_tgz=tarfile.open(tgz_path) housing_tgz.extractall(path=housing_path) housing_tgz.close() fetch_housing_data() import pandas as pd def load_housing_data(housing_path=HOUSING_PATH): csv_path=os.path.join(housing_path,'housing.csv') return pd.read_csv(csv_path) ###Output _____no_output_____ ###Markdown 2.3.3 Take a quick look at data stracture ###Code housing=load_housing_data() housing.head() housing.info() housing['ocean_proximity'].value_counts() housing.describe() %matplotlib inline import matplotlib.pyplot as plt housing.hist(bins=50,figsize=(20,15)) plt.show() ###Output _____no_output_____ ###Markdown 2.3.4 create test set ###Code import numpy as np np.random.seed=42 from sklearn.model_selection import train_test_split train_set,test_set=train_test_split(housing,test_size=0.2,random_state=42) test_set.head() housing['median_income'].hist() housing['income_cat']=pd.cut(housing['median_income'],bins=[0.,1.5,3.,4.5,6.,np.inf],labels=[1,2,3,4,5]) housing.head() housing['income_cat'].value_counts() housing['income_cat'].hist() from sklearn.model_selection import StratifiedShuffleSplit split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42) for train_index,test_index in split.split(housing,housing['income_cat']): strat_train_set=housing.loc[train_index] strat_test_set=housing.loc[test_index] train_index strat_train_set['income_cat'].value_counts()/len(strat_train_set) ###Output _____no_output_____ ###Markdown 2.4 discover and visualize the data to gain the insights 2.4.1 visualizing geographical data ###Code housing=strat_train_set.copy() housing.plot(kind='scatter',x='longitude',y='latitude') housing.plot(kind='scatter',x='longitude',y='latitude',alpha=0.1) housing.plot(kind='scatter',x='longitude',y='latitude',alpha=0.4, s=housing['population']/100,label='population',figsize=(10,7), c=housing['median_house_value'],cmap=plt.get_cmap('jet'),colorbar=True,) plt.legend() ###Output _____no_output_____ ###Markdown 2.4.2寻找相关性 ###Code corr_matrix=housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) from pandas.plotting import scatter_matrix attributes=['median_house_value','median_income','total_rooms','housing_median_age'] scatter_matrix(housing[attributes],figsize=(12,8)) housing.plot(kind='scatter',x='median_income',y='median_house_value',alpha=0.1) plt.show() ###Output _____no_output_____ ###Markdown 2.4.3 experimenting with attributes combinations ###Code housing["rooms_per_household"] = housing["total_rooms"]/housing["households"] housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"] housing["population_per_household"]=housing["population"]/housing["households"] corr_matrix=housing.corr() corr_matrix['median_house_value'].sort_values(ascending=False) housing.plot(kind='scatter',x='rooms_per_household',y='median_house_value',alpha=0.4) plt.axis([0,5,0,500000]) plt.show() housing.describe() ###Output _____no_output_____ ###Markdown 2.5 prepare the data for ml algorithms 2.5.1 data cleaning ###Code median=housing['total_bedrooms'].median() # housing['total_bedrooms'].fillna(median,inplace=True) from sklearn.impute import SimpleImputer imputer=SimpleImputer(strategy='median') housing_num=housing.drop('ocean_proximity',axis=1) imputer.fit(housing_num) imputer.statistics_ X=imputer.transform(housing_num) housing_tr=pd.DataFrame(X,columns=housing_num.columns,index=housing_num.index) housing_tr imputer.strategy ###Output _____no_output_____ ###Markdown 2.5.2 handling text and categorical attributes ###Code housing_cat=housing[['ocean_proximity']] housing_cat from sklearn.preprocessing import OrdinalEncoder ordinal_encoder=OrdinalEncoder() housing_cat_encoded=ordinal_encoder.fit_transform(housing_cat) housing_cat_encoded[:10] ordinal_encoder.categories_ from sklearn.preprocessing import OneHotEncoder cat_encoder=OneHotEncoder() housing_cat_onehot=cat_encoder.fit_transform(housing_cat) housing_cat_onehot.toarray() cat_encoder.categories_ ###Output _____no_output_____ ###Markdown 2.5.3 custom transformers ###Code from sklearn.base import BaseEstimator, TransformerMixin # column index rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6 class CombinedAttributesAdder(BaseEstimator, TransformerMixin): def __init__(self, add_bedrooms_per_room=True): # no *args or **kargs self.add_bedrooms_per_room = add_bedrooms_per_room def fit(self, X, y=None): return self # nothing else to do def transform(self, X): rooms_per_household = X[:, rooms_ix] / X[:, households_ix] population_per_household = X[:, population_ix] / X[:, households_ix] if self.add_bedrooms_per_room: bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix] return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room] else: return np.c_[X, rooms_per_household, population_per_household] attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False) housing_extra_attribs = attr_adder.transform(housing.values) col_names = "total_rooms", "total_bedrooms", "population", "households" rooms_ix, bedrooms_ix, population_ix, households_ix = [ housing.columns.get_loc(c) for c in col_names] # get the column indices ###Output _____no_output_____ ###Markdown 2.5.5 transform pipelines ###Code from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline num_pipeline=Pipeline([ ('imputer',SimpleImputer(strategy='median')), ("attribs_adder",CombinedAttributesAdder()), ("std_scaler",StandardScaler()) ]) housing_num_tr=num_pipeline.fit_transform(housing_num) housing_num_tr from sklearn.compose import ColumnTransformer num_attribs=list(housing_num) cat_attribs=['ocean_proximity'] full_pipeline=ColumnTransformer([ ('num',num_pipeline,num_attribs), ('cat',OneHotEncoder(),cat_attribs) ]) housing_prepared=full_pipeline.fit_transform(housing) housing_prepared housing_prepared.shape ###Output _____no_output_____
ETL Pipelines/12_dummyvariables_exercise/12_dummyvariables_exercise.ipynb
###Markdown Dummy Variables ExerciseIn this exercise, you'll create dummy variables from the projects data set. The idea is to transform categorical data like this:| Project ID | Project Category ||------------|------------------|| 0 | Energy || 1 | Transportation || 2 | Health || 3 | Employment |into new features that look like this:| Project ID | Energy | Transportation | Health | Employment ||------------|--------|----------------|--------|------------|| 0 | 1 | 0 | 0 | 0 || 1 | 0 | 1 | 0 | 0 || 2 | 0 | 0 | 1 | 0 || 3 | 0 | 0 | 0 | 1 |(Note if you were going to use this data with a model influenced by multicollinearity, you would want to eliminate one of the columns to avoid redundant information.) The reasoning behind these transformations is that machine learning algorithms read in numbers not text. Text needs to be converted into numbers. You could assign a number to each category like 1, 2, 3, and 4. But a categorical variable has no inherent order, so you want to reflect this in your features.Pandas makes it very easy to create dummy variables with the [get_dummies](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.get_dummies.html) method. In this exercise, you'll create dummy variables from the World Bank projects data; however, there's a caveat. The World Bank data is not particularly clean, so you'll need to explore and wrangle the data first.You'll focus on the text values in the sector variables.Run the code cells below to read in the World Bank projects data set and then to filter out the data for text variables. ###Code import pandas as pd import numpy as np # read in the projects data set and do basic wrangling projects = pd.read_csv('../data/projects_data.csv', dtype=str) projects.drop('Unnamed: 56', axis=1, inplace=True) projects['totalamt'] = pd.to_numeric(projects['totalamt'].str.replace(',', '')) projects['countryname'] = projects['countryname'].str.split(';', expand=True)[0] projects['boardapprovaldate'] = pd.to_datetime(projects['boardapprovaldate']) # keep the project name, lending, sector and theme data sector = projects.copy() sector = sector[['project_name', 'lendinginstr', 'sector1', 'sector2', 'sector3', 'sector4', 'sector5', 'sector', 'mjsector1', 'mjsector2', 'mjsector3', 'mjsector4', 'mjsector5', 'mjsector', 'theme1', 'theme2', 'theme3', 'theme4', 'theme5', 'theme ', 'goal', 'financier', 'mjtheme1name', 'mjtheme2name', 'mjtheme3name', 'mjtheme4name', 'mjtheme5name']] ###Output _____no_output_____ ###Markdown Run the code cell below. This cell shows the percentage of each variable that is null. Notice the mjsector1 through mjsector5 variables are all null. The mjtheme1name through mjtheme5name are also all null as well as the theme variable. Because these variables contain so many null values, they're probably not very useful. ###Code # output percentage of values that are missing 100 * sector.isnull().sum() / sector.shape[0] ###Output _____no_output_____ ###Markdown Exercise 1The sector1 variable looks promising; it doesn't contain any null values at all. In the next cell, store the unique sector1 values in a list and output the results. Use the sort_values() and unique() methods. ###Code # TODO: Create a list of the unique values in sector1. Use the sort_values() and unique() pandas methods. # And then convert those results into a Python list uniquesectors1 = list(sector['sector1'].sort_values().unique()) uniquesectors1 # run this code cell to see the number of unique values print('Number of unique values in sector1:', len(uniquesectors1)) ###Output Number of unique values in sector1: 3060 ###Markdown 3060 different categories is quite a lot! Remember that with dummy variables, if you have n categorical values, you need n - 1 new variables! That means 3059 extra columns! Exercise 2There are a few issues with this 'sector1' variable. First, there are values labeled '!$!0'. These should be substituted with NaN.Furthermore, each sector1 value ends with a ten or eleven character string like '!$!49!$!EP'. Some sectors show up twice in the list like: 'Other Industry; Trade and Services!$!70!$!YZ', 'Other Industry; Trade and Services!$!63!$!YZ',But it seems like those are actually the same sector. You'll need to remove everything past the exclamation point. Many values in the sector1 variable start with the term '(Historic)'. Try removing that phrase as well. replace() methodWith pandas, you can use the replace() method to search for text and replace parts of a string with another string. If you know the exact string you're looking for, the replace() method is straight forward. For example, say you wanted to remove the string '(Trial)' from this data:| data ||--------------------------|| '(Trial) Banking' || 'Banking' || 'Farming' || '(Trial) Transportation' |You could use `df['data'].replace('(Trial)', '')` to replace (Trial) with an empty string. regular expressionsWhat about this data?| data ||------------------------------------------------|| 'Other Industry; Trade and Services?$ab' || 'Other Industry; Trade and Services?ceg' |This type of data is trickier. In this case, there's a pattern where you want to remove a string that starts with an exclamation point and then has an unknown number of characters after it. When you need to match patterns of character, you can use [regular expressions](https://en.wikipedia.org/wiki/Regular_expression).The replace method can take a regular expression. Sodf['data'].replace('?.+', regex=True) where '?.+' means find a set of characters that starts with a question mark is then followed by one or more characters. You can see a [regular expression cheat sheet](https://medium.com/factory-mind/regex-tutorial-a-simple-cheatsheet-by-examples-649dc1c3f285) here.Fix these issues in the code cell below. ###Code # TODO: In the sector1 variable, replace the string '!$!0' with nan # Put the results back into the sector1 variable # HINT: you can use the pandas replace() method and numpy.nan sector['sector1'] = sector['sector1'].replace('!$!0', np.nan) # TODO: In the sector1 variable, remove the last 10 or 11 characters from the sector1 variable. # HINT: There is more than one way to do this. To do it with one line of code, # you can use the replace method with a regex expression '!.+' # That regex expression looks for a string with an exclamation # point followed by one or more characters sector['sector1'] = sector['sector1'].replace('!.+', '', regex=True) # TODO: Remove the string '(Historic)' from the sector1 variable # HINT: You can use the replace method sector['sector1'] = sector['sector1'].replace('^(\(Historic\))', '', regex=True) print('Number of unique sectors after cleaning:', len(list(sector['sector1'].unique()))) print('Percentage of null values after cleaning:', 100 * sector['sector1'].isnull().sum() / sector['sector1'].shape[0]) ###Output Number of unique sectors after cleaning: 156 Percentage of null values after cleaning: 3.49627356423 ###Markdown Now there are 156 unique categorical values. That's better than 3060. If you were going to use this data with a supervised learning machine model, you could try converting these 156 values to dummy variables. You'd still have to train and test a model to see if those are good features.You could try to consolidate similar categories together, which is what the challenge exercise in part 4 is about.There are also still many entries with NaN values. How could you fill these in? You might try to determine an appropriate category from the 'project_name' or 'lendinginstr' variables. If you make dummy variables including NaN values, then you could consider a feature with all zeros to represent NaN. Or you could delete these records from the data set. Pandas will ignore NaN values by default. That means, for a given row, all dummy variables will have a value of 0 if the sector1 value was NaN.Don't forget about the bigger context! This data is being prepared for a machine learning algorithm. Whatever techniques you use to engineer new features, you'll need to use those when running your model on new data. So if your new data does not contain a sector1 value, you'll have to run whatever feature engineering processes you did on your training set. Exercise 3In this next exercise, use the pandas pd.get_dummies() method to create dummy variables. Then use the concat() method to concatenate the dummy variables to a dataframe that contains the project totalamt variable and the project year from the boardapprovaldate. ###Code dummies = pd.get_dummies(sector, columns=['sector1']) dummies.head() # TODO: Create dummy variables from the sector1 data. Put the results into a dataframe called dummies # Hint: Use the get_dummies method dummies = pd.get_dummies(sector['sector1']) # TODO: Create a new dataframe called df by # filtering the projects data for the totalamt and # the year from boardapprovaldate projects['year'] = projects['boardapprovaldate'].dt.year df = projects[['totalamt','year']] # TODO: Concatenate the results of dummies and projects # into a single data frame df_final = pd.concat([df, dummies], axis=1) df_final.head() ###Output _____no_output_____ ###Markdown You could continue to consolidate sector values using other techniques. For example, in the next exercise, you'll find categories with similar terms and then combine them together. Keep in mind that how much to consolidate will depend on your machine learning model performance and your hardware's ability to handle the extra features in memory. If your hardware's memory can handle 3060 new features and your machine learning algorithm performs better, then go for it! Exercise 4 (Challenge)But can you do anything else with the sector1 variable?The percentage of null values for 'sector1' is now 3.49%. That turns out to be the same number as the null values for the 'sector' column. You can see this if you scroll back up to where the code calculated the percentage of null values for each variable. Perhaps the 'sector1' and 'sector' variable have the same information. If you look at the 'sector' variable, however, it also needs cleaning. The values look like this:'Urban Transport;Urban Transport;Public Administration - Transportation'It turns out the 'sector' variable combines information from the 'sector1' through 'sector5' variables and the 'mjsector' variable. Run the code cell below to look at the sector variable. ###Code sector['sector'] ###Output _____no_output_____ ###Markdown What else can you do? If you look at all of the diferent sector1 categories, it might be useful to combine a few of them together. For example, there are various categories with the term "Energy" in them. And then there are other categories that seem related to energy but don't have the word energy in them like "Thermal" and "Hydro". Some categories have the term "Renewable Energy", so perhaps you could make a separate "Renewable Energy" category.Similarly, there are categories with the term "Transportation" in them, and then there are related categories like "Highways".In the next cell, find all sector1 values with the term 'Energy' in them. For each of these rows, put the string 'energy' in a new column called 'sector1_aggregates'. Do the same for "Transportation". ###Code import re # Create the sector1_aggregates variable sector.loc[:,'sector1_aggregates'] = sector['sector1'] # TODO: The code above created a new variable called sector1_aggregates. # Currently, sector1_aggregates has all of the same values as sector1 # For this task, find all the rows in sector1_aggregates with the term 'Energy' in them, # For all of these rows, replace whatever is the value is with the term 'Energy'. # The idea is to simplify the category names by combining various categories together. # Then, do the same for the term 'Transportation # HINT: You can use the contains() methods. See the documentation for how to ignore case using the re library # HINT: You might get an error saying "cannot index with vector containing NA / NaN values." # Try converting NaN values to something else like False or a string sector.loc[sector['sector1_aggregates'].str.contains('Energy', re.IGNORECASE).replace(np.nan, False), 'sector1_aggregates'] = 'Energy' sector.loc[sector['sector1_aggregates'].str.contains('Transportation', re.IGNORECASE).replace(np.nan, False), 'sector1_aggregates'] = 'Transportation' print('Number of unique sectors after cleaning:', len(list(sector['sector1_aggregates'].unique()))) ###Output _____no_output_____
FetalFraction_with_DL-RL data.ipynb
###Markdown Import Required Library ###Code import pandas as pd import numpy as np from sklearn.model_selection import train_test_split , cross_val_score from sklearn.metrics import r2_score , mean_squared_error , mean_absolute_error,accuracy_score ###Output _____no_output_____ ###Markdown Library for Visualization ###Code #import seaborn as sns #from matplotlib import pyplot as plt #sns.set_style("whitegrid") #%matplotlib inline ###Output _____no_output_____ ###Markdown Filter all the Warnings ###Code import warnings warnings.filterwarnings("ignore") ###Output _____no_output_____ ###Markdown Read X and Y Parameter ###Code #x = pd.read_csv("../tsrl10000.csv") #x = x.iloc[:,1:] # #y = pd.read_csv("../srlbininfo_10000",sep ="\t",header =None ) # #y = pd.DataFrame(y.iloc[:,1]) #y.columns = ["RL"] # #df = pd.concat([y,x],axis =1) # #df = df[df.RL < 25] #Ignore values more than 25 #y = pd.DataFrame(df.iloc[:,0]) #x = df.iloc[:,1:] #df.to_csv("../RL_df_10000.csv",index = False) #When Memory Problem Just Load the data df = pd.read_csv("../RL_df_10000.csv") y = pd.DataFrame(df.iloc[:,0]) x = df.iloc[:,1:] print(x.shape, y.shape) display(x.head(2)) display(y.head(2)) ###Output (9977, 3880) (9977, 1) ###Markdown Split the data to training and Testing set ###Code x_train,x_test,y_train, y_test= train_test_split(x,y,test_size=0.2) x_train.shape,y_train.shape, x_test.shape , y_test.shape ###Output _____no_output_____ ###Markdown Linear Regression Starts Here Classical Linear Regressors We used Following 4 Regression Methods First 1. **LinearRegression**([…])-->Ordinary least squares Linear Regression. 2. **Ridge**([alpha, fit_intercept, …]) -->Linear least squares with l2 regularization.3. **RidgeCV**([alphas, …]) -->Ridge regression with built-in cross-validation.4. **SGDRegressor**([loss, penalty, …]) -->Linear model fitted by minimizing a regularized empirical loss with SGD 1.1 LinearRegression()class sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=False, copy_X=True, n_jobs=None) ###Code from sklearn.linear_model import LinearRegression name = "Linear _Regression--> " lr = LinearRegression().fit(x_train,y_train) y_pred = lr.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , lr.intercept_) print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",lr.score(x_train,y_train)) print(name + "Score for test data Set",lr.score(x_test,y_test)) print(name + "Score for Prediction data Set",lr.score(x_test,y_pred)) print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred)) print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test, y_pred)) print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test, y_pred)) display(y_pred[:5]) display(y_test[:5]) ###Output _____no_output_____ ###Markdown This is for Visualizing ###Code #y_pred = lr.predict(x_train) #fig,ax = plt.subplots() #ax.scatter(y_train,y_pred,edgecolor=("r")) #ax.plot([y_train.min(),y_train.max()],[y_train.min(),y_train.max()],"b",lw=2) #ax.set_xlabel("Measured") #ax.set_ylabel("Predicted") #plt.show() # #fit(self,X,y[sample_weight]) #get_params(self[,deep]) #predict(self,X) #score(self,X,Y[,sample_weight]) #set_params(self,\) #y_pred = lr.predict(x_train) #fig,ax = plt.subplots() #ax.scatter(y_train,y_pred,edgecolor=("r")) ##ax.plot([y_train.min(),y_train.max()],[y_train.min(),y_train.max()],"b",lw=2) #ax.set_xlabel("Measured") #ax.set_ylabel("Predicted") #plt.show() ###Output _____no_output_____ ###Markdown Feature Scalling ###Code #from sklearn.preprocessing import StandardScaler #scaler = StandardScaler() # #print(scaler.fit(x_train)) #print("Scaler Mean",scaler.mean_) #print("Scaler Variance",scaler.var_) # #scaled_x_train = scaler.transform(x_train) #scaled_x_test = scaler.transform(x_test) # #slr = linear_model.LinearRegression() #slr.fit(scaled_x_train,y_train) # #print("Coeff_",slr.coef_) #print("intercept_",slr.intercept_) #print("R2 Score for train Set:{:.3f}".format(slr.score(scaled_x_train,y_train))) #print("R2 Score for test set :{:.3f}".format(slr.score(scaled_x_test,y_test))) ###Output _____no_output_____ ###Markdown polynomial Features ###Code #from sklearn.preprocessing import PolynomialFeatures #poly = PolynomialFeatures(2) #x_train_poly = poly.fit_transform(x_train) #x_test_poly = poly.transform(x_test) #print(x_train.shape , x_train_poly.shape) # For this big Data Memory Error Occured ###Output _____no_output_____ ###Markdown Regularization With as many as 306 features in the model it is natural for the model to get quite complex. The model sticks too much to the data and the model has probably learned the background noise which results in high variance while being fit, which leads to **Overfitting**. This results in poor prediction and generalization power when applied o data outside the training set. To overcome this problem **regularization technique** is used. To find the best model, the common method in machine learning is to define a loss or cost function that describes how well the model fits the data. The goal is to find the model that minimzes this loss function. The idea is to penalize this loss function by adding a complexity term that would give a bigger loss for more complex models. **Regularization** allows to shrink the coefficients to zero by introducing a tuning parameter **'lambda'** or **'alpha'**. This ensures:- Shrinking of parameters, therefore it is mostly used to prevent multicollinearity.- Reduces the model complexity by coefficient shrinkage. The two popular methods used to regularize parameters are:- Ridge Regression- Lasso Regression**Ridge Regression:** Ridge regression uses L2 penalty to penalize coefficients. L2 penalty is the penalty equivalent to **square of the magnitude of coefficients** **Lasso Regression:** Lasso regression uses L1 penalty which is the **absolute value of the magnitude of coefficients**Let us apply Ridge and Lasso models to our data 1.2 Ridge() Regression `class sklearn.linear_model.Ridge(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None)` **Default** ||y - Xw||^2_2 + alpha * ||w||^2_2 Objective Function Methods**fit**(self, X, y[, sample_weight])-->Fit Ridge regression model.**get_params**(self[, deep])--> Get parameters for this estimator.**predict**(self, X)--> Predict using the linear model.**score**(self, X, y[, sample_weight])--> Return the coefficient of determination R^2 of the prediction.**set_params**(self, \*\*params) --> Set the parameters of this estimator. ###Code name = "Ridge -->" from sklearn.linear_model import Ridge clf_ridge = Ridge(alpha=0.1) clf_ridge.fit(x_test,y_test) y_pred_ridge = clf_ridge.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , clf_ridge.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",clf_ridge.score(x_train,y_train)) print(name + "Score for test data Set",clf_ridge.score(x_test,y_test)) print(name + "Score for Predictecd data Set",clf_ridge.score(x_test,y_pred_ridge)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_ridge)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_ridge)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_ridge)) ## ###Output Ridge --> Intercept [-1783.66145115] ********************Accuracy Test Model Fitting ******************** Ridge -->Score for train data set : 0.4504582095812526 Ridge -->Score for test data Set 0.99906169857608 Ridge -->Score for Predictecd data Set 1.0 ********************R2 Score******************** Ridge -->R2 Score for test is = 0.99906169857608 ********************Mean Absolute Error******************** Ridge -->Mean Absolute Error of Test = 0.09226214647739239 ******************** Mean Squared Error******************** Ridge -->Mean Squared Error of Test = 0.014444705813884424 ###Markdown 1.3 RidgeCV`class sklearn.linear_model.RidgeCV(alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, gcv_mode=None, store_cv_values=False)`**fit**(self, X, y[, sample_weight]) --> Fit Ridge regression model with cv. **get_params**(self[, deep])--> Get parameters for this estimator. **predict**(self, X)--> Predict using the linear model.**score**(self, X, y[, sample_weight]) --> Return the coefficient of determination R^2 of the prediction.**set_params**(self, \*\*params) --> Set the parameters of this estimator. ###Code # Import Library from sklearn.linear_model import RidgeCV name = "RidgeCV --> " #Object Creation clf_ridgecv = RidgeCV(alphas= [1e-3, 1e-2, 1e-1, 1]).fit(x_train , y_train) y_pred_ridgecv = clf_ridgecv.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , clf_ridgecv.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",clf_ridgecv.score(x_train,y_train)) print(name + "Score for test data Set",clf_ridgecv.score(x_test,y_test)) print(name + "Score for Predictecd data Set",clf_ridgecv.score(x_test,y_pred_ridgecv)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_ridgecv)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_ridgecv)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_ridgecv)) ## ###Output RidgeCV --> Intercept [-2083.96977257] ********************Accuracy Test Model Fitting ******************** RidgeCV --> Score for train data set : 0.877856913743094 RidgeCV --> Score for test data Set 0.7188842941321579 RidgeCV --> Score for Predictecd data Set 1.0 ********************R2 Score******************** RidgeCV --> R2 Score for test is = 0.7188842941321579 ********************Mean Absolute Error******************** RidgeCV --> Mean Absolute Error of Test = 1.6347961466321737 ******************** Mean Squared Error******************** RidgeCV --> Mean Squared Error of Test = 4.327643087184858 ###Markdown 1.4 SGDRegressor() `class sklearn.linear_model.SGDRegressor(loss='squared_loss', penalty='l2', alpha=0.0001, l1_ratio=0.15, fit_intercept=True, max_iter=1000, tol=0.001, shuffle=True, verbose=0, epsilon=0.1, random_state=None, learning_rate='invscaling', eta0=0.01, power_t=0.25, early_stopping=False, validation_fraction=0.1, n_iter_no_change=5, warm_start=False, average=False)` Methods**densify**(self) --> Convert coefficient matrix to dense array format.**fit**(self, X, y[, coef_init, intercept_init, …])--> Fit linear model with Stochastic Gradient Descent.**get_params**(self[, deep])--> Get parameters for this estimator.**partial_fit**(self, X, y[, sample_weight])--> Perform one epoch of stochastic gradient descent on given samples.**predict**(self, X) --> Predict using the linear model**score**(self, X, y[, sample_weight])--> Return the coefficient of determination R^2 of the prediction.**set_params**(self, \*\*kwargs)--> Set and validate the parameters of estimator.**sparsify**(self) --> Convert coefficient matrix to sparse format. ###Code from sklearn.linear_model import SGDRegressor name = "SGDRegressor-->" clf_sgd = SGDRegressor(max_iter = 50000, tol = 1e-3) clf_sgd.fit(x_train, y_train) y_pred_sgd = clf_sgd.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , clf_sgd.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",clf_sgd.score(x_train,y_train)) print(name + "Score for test data Set",clf_sgd.score(x_test,y_test)) print(name + "Score for Predictecd data Set",clf_sgd.score(x_test,y_pred_sgd)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_sgd)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_sgd)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_sgd)) ## ###Output SGDRegressor--> Intercept [-1.17839138e+08] ********************Accuracy Test Model Fitting ******************** SGDRegressor-->Score for train data set : -8.714954416188797e+17 SGDRegressor-->Score for test data Set -1.570102274205259e+18 SGDRegressor-->Score for Predictecd data Set 1.0 ********************R2 Score******************** SGDRegressor-->R2 Score for test is = -1.570102274205259e+18 ********************Mean Absolute Error******************** SGDRegressor-->Mean Absolute Error of Test = 3918184043.4188356 ******************** Mean Squared Error******************** SGDRegressor-->Mean Squared Error of Test = 2.4170980529746706e+19 ###Markdown The Above Method is Use less ********************** Part 2 ************************************** Regressors with variable selection **1.ElasticNet**([alpha, l1_ratio, …]) Linear regression with combined L1 and L2 priors as regularizer.**2.ElasticNetCV**([l1_ratio, eps, …])Elastic Net model with iterative fitting along a regularization path.**3.Lars**([fit_intercept, verbose, …])Least Angle Regression model a.k.a.**4.LarsCV**([fit_intercept, …])Cross-validated Least Angle Regression model.**5.Lasso**([alpha, fit_intercept, …])Linear Model trained with L1 prior as regularizer (aka the Lasso)**6.LassoCV**([eps, n_alphas, …])Lasso linear model with iterative fitting along a regularization path.**7.LassoLars**([alpha, …])Lasso model fit with Least Angle Regression a.k.a.**8.LassoLarsCV**([fit_intercept, …])Cross-validated Lasso, using the LARS algorithm.**9.LassoLarsIC**([criterion, …])Lasso model fit with Lars using BIC or AIC for model selection**10.OrthogonalMatchingPursuit**([…])Orthogonal Matching Pursuit model (OMP) **11.OrthogonalMatchingPursuitCV**([…])Cross-validated Orthogonal Matching Pursuit model (OMP). 2.1 ElasticNet class sklearn.linear_model.**ElasticNet**(alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, precompute=False, max_iter=1000, copy_X=True, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic') Methods**fit**(self, X, y[, check_input]) --> Fit model with coordinate descent.**get_params**(self[, deep])--> Get parameters for this estimator.**path**(X, y[, l1_ratio, eps, n_alphas, …])--> Compute elastic net path with coordinate descent.**predict**(self, X)Predict using the linear model.**score**(self, X, y[, sample_weight]) --> Return the coefficient of determination R^2 of the prediction.**set_params**(self, \*\*params)--> Set the parameters of this estimator. ###Code from sklearn.linear_model import ElasticNet name = "Elastic net--> " regr_elasticNet = ElasticNet(alpha=0.01,l1_ratio=10,max_iter=50000,random_state=0) regr_elasticNet.fit(x_train , y_train) y_pred_elasticNet = regr_elasticNet.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , clf_ridgecv.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",regr_elasticNet.score(x_train,y_train)) print(name + "Score for test data Set",regr_elasticNet.score(x_test,y_test)) print(name + "Score for Predictecd data Set",regr_elasticNet.score(x_test,y_pred_elasticNet)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_elasticNet)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_elasticNet)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_elasticNet)) ## ###Output Elastic net--> Intercept [-2083.96977257] ********************Accuracy Test Model Fitting ******************** Elastic net--> Score for train data set : 0.0 Elastic net--> Score for test data Set -2.9103782724471117e-05 Elastic net--> Score for Predictecd data Set 1.0 ********************R2 Score******************** Elastic net--> R2 Score for test is = -2.9103782724471117e-05 ********************Mean Absolute Error******************** Elastic net--> Mean Absolute Error of Test = 3.143307544337855 ******************** Mean Squared Error******************** Elastic net--> Mean Squared Error of Test = 15.394974196153752 ###Markdown 2.2 ElasticnetCV() ---------->> **glmnet** in R<<----------- class sklearn.linear_model.**ElasticNetCV**(l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, positive=False, random_state=None, selection='cyclic')`Elastic Net model with iterative fitting along a regularization path` ###Code from sklearn.linear_model import ElasticNetCV name = "ElasticnetCV --> " regr_enetcv = ElasticNetCV(cv=10, random_state=0) regr_enetcv.fit(x_train,y_train) y_pred_elasticnetcv = regr_enetcv.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , regr_enetcv.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",regr_enetcv.score(x_train,y_train)) print(name + "Score for test data Set",regr_enetcv.score(x_test,y_test)) print(name + "Score for Predictecd data Set",regr_enetcv.score(x_test,y_pred_elasticnetcv)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_elasticnetcv)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_elasticnetcv)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_elasticnetcv)) ## ###Output ElasticnetCV --> Intercept -1470.3833602852492 ********************Accuracy Test Model Fitting ******************** ElasticnetCV --> Score for train data set : 0.8403967944981359 ElasticnetCV --> Score for test data Set 0.7280290688454585 ElasticnetCV --> Score for Predictecd data Set 1.0 ********************R2 Score******************** ElasticnetCV --> R2 Score for test is = 0.7280290688454585 ********************Mean Absolute Error******************** ElasticnetCV --> Mean Absolute Error of Test = 1.582099474097651 ******************** Mean Squared Error******************** ElasticnetCV --> Mean Squared Error of Test = 4.186863613659166 ###Markdown 2.3 Lars() class sklearn.linear_model.Lars(fit_intercept=True, verbose=False, normalize=True, precompute='auto', n_nonzero_coefs=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True)[source]¶ Methods**fit**(self, X, y[, Xy])-->Fit the model using X, y as training data.**get_params**(self[, deep])--Get parameters for this estimator.**predict**(self, X)--Predict using the linear model.**score**(self, X, y[, sample_weight])--Return the coefficient of determination R^2 of the prediction.**set_params**(self, \*\*params)--Set the parameters of this estimator. ###Code from sklearn.linear_model import Lars name = "Lars -->" reg_lars = Lars() reg_lars.fit(x_train, y_train) y_pred_lars = reg_lars.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , reg_lars.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_lars.score(x_train,y_train)) print(name + "Score for test data Set",reg_lars.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_lars.score(x_test,y_pred_lars)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_lars)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lars)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lars)) ## ###Output Lars --> Intercept [-326.5215762] ********************Accuracy Test Model Fitting ******************** Lars -->Score for train data set : 0.5295642253281516 Lars -->Score for test data Set 0.44097299867759165 Lars -->Score for Predictecd data Set 1.0 ********************R2 Score******************** Lars -->R2 Score for test is = 0.44097299867759165 ********************Mean Absolute Error******************** Lars -->Mean Absolute Error of Test = 2.304480885679381 ******************** Mean Squared Error******************** Lars -->Mean Squared Error of Test = 8.605955794444107 ###Markdown 2.4 LarsCV() class sklearn.linear_model.**LarsCV**(fit_intercept=True, verbose=False, max_iter=500, normalize=True, precompute='auto', cv=None, max_n_alphas=1000, n_jobs=None, eps=2.220446049250313e-16, copy_X=True) **fit(self, X, y)** --> Fit the model using X, y as training data.**get_params(self[, deep])** --> Get parameters for this estimator.**predict(self, X)** --> Predict using the linear model.**score(self, X, y[, sample_weight])** --> Return the coefficient of determination R^2 of the prediction.**set_params(self, \*\*params)** --> Set the parameters of this estimator. ###Code #import Library from sklearn.linear_model import LarsCV name = "LarsCV-->" reg_larsCV = LarsCV(cv=10).fit(x_train, y_train) y_predict_lars = reg_larsCV.predict(x_test) reg_larsCV.alpha_ #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , reg_lars.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_lars.score(x_train,y_train)) print(name + "Score for test data Set",reg_lars.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_lars.score(x_test,y_pred_lars)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_pred_lars)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lars)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lars)) ## ###Output LarsCV--> Intercept [-326.5215762] ********************Accuracy Test Model Fitting ******************** LarsCV-->Score for train data set : 0.5295642253281516 LarsCV-->Score for test data Set 0.44097299867759165 LarsCV-->Score for Predictecd data Set 1.0 ********************R2 Score******************** LarsCV-->R2 Score for test is = 0.44097299867759165 ********************Mean Absolute Error******************** LarsCV-->Mean Absolute Error of Test = 2.304480885679381 ******************** Mean Squared Error******************** LarsCV-->Mean Squared Error of Test = 8.605955794444107 ###Markdown 2.5 Lasso class sklearn.linear_model.**Lasso**(alpha=1.0, fit_intercept=True, normalize=False, precompute=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')**(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1** **Linear Model trained with L1 prior as regularizer (aka the Lasso)** ###Code # Import Library from sklearn.linear_model import Lasso name = "Lasso --> " clf_lasso = Lasso(alpha = 0.1).fit(x_train,y_train) y_predict_lasso = clf_lasso.predict(x_test) #print(name +" Coefficient ", lr.coef_) print(name + " Intercept " , clf_lasso.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",clf_lasso.score(x_train,y_train)) print(name + "Score for test data Set",clf_lasso.score(x_test,y_test)) print(name + "Score for Predictecd data Set",clf_lasso.score(x_test,y_pred_lars)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test, y_predict_lasso)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_predict_lasso)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_predict_lasso)) ## ###Output _____no_output_____ ###Markdown 2.6 Lasso CV**Lasso linear model with iterative fitting along a regularization path.****(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1**class sklearn.linear_model.**LassoCV**(eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, positive=False, random_state=None, selection='cyclic')**fit(self, X, y)** --> Fit linear model with coordinate descent **get_params(self[, deep])**--> Get parameters for this estimator.**path(X, y[, eps, n_alphas, alphas, …])**--> Compute Lasso path with coordinate descent**predict(self, X)**--> Predict using the linear model.**score(self, X, y[, sample_weight])** --> Return the coefficient of determination R^2 of the prediction.**set_params(self, \*\*params)** -->Set the parameters of this estimator. ###Code # Import library from sklearn.linear_model import LassoCV name = "LassoCV" #make a object reg_LassoCV = LassoCV(cv =10 , random_state = 0).fit(x_train,y_train) y_predict_reg_lassoCV = reg_LassoCV.predict(x_test) #alpha value can be passed seprately to compare results print(name +" Coefficient ", reg_LassoCV.coef_) print(name + " Intercept " , reg_LassoCV.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_LassoCV.score(x_train,y_train)) print(name + "Score for test data Set",reg_LassoCV.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_LassoCV.score(x_test,y_predict_reg_lassoCV)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_predict_reg_lassoCV)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_predict_reg_lassoCV)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_predict_reg_lassoCV)) ## ###Output LassoCV Coefficient [0.26767828 1.46957527 0.87534441 ... 0. 0. 0. ] LassoCV Intercept -1668.8683902826965 ********************Accuracy Test Model Fitting ******************** LassoCVScore for train data set : 0.8578546349284956 LassoCVScore for test data Set 0.7133769655723916 LassoCVScore for Predictecd data Set 1.0 ********************R2 Score******************** LassoCVR2 Score for test is = 0.7133769655723916 ********************Mean Absolute Error******************** LassoCVMean Absolute Error of Test = 1.644344140623386 ******************** Mean Squared Error******************** LassoCVMean Squared Error of Test = 4.412425800754526 ###Markdown 2.7 LassoLarsclass sklearn.linear_model.**LassoLars**(alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, fit_path=True, positive=False)**Lasso model fit with Least Angle Regression a.k.a. LarsIt is a Linear Model trained with an L1 prior as regularizer.****`(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1`** Methods* **fit(self, X, y[, Xy])**--> Fit the model using X, y as training data.* **get_params(self[, deep])**--> Get parameters for this estimator.* **predict(self, X)**--> Predict using the linear model.* **score(self, X, y[, sample_weight])**-->Return the coefficient of determination R^2 of the prediction.* **set_params(self, \*\*params)**--> Set the parameters of this estimator. ###Code # Import the Library from sklearn.linear_model import LassoLars name = "LassoLars -->" reg_Lasso_lars = LassoLars(alpha = 0.1).fit(x_train, y_train) y_pred_lasso_lars = reg_Lasso_lars.predict(x_test) print(name +" Coefficient ",reg_Lasso_lars.coef_) print(name + " Intercept " , reg_Lasso_lars.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_Lasso_lars.score(x_train,y_train)) print(name + "Score for test data Set",reg_Lasso_lars.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_Lasso_lars.score(x_test,y_predict_reg_lassoCV)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_lasso_lars)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lasso_lars)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lasso_lars)) ## ###Output LassoLars --> Coefficient [0. 0. 0. ... 0. 0. 0.] LassoLars --> Intercept [10.34523586] ********************Accuracy Test Model Fitting ******************** LassoLars -->Score for train data set : 0.0 LassoLars -->Score for test data Set -2.9103782724471117e-05 LassoLars -->Score for Predictecd data Set -0.0008965013613695394 ********************R2 Score******************** LassoLars -->R2 Score for test is = -2.9103782724471117e-05 ********************Mean Absolute Error******************** LassoLars -->Mean Absolute Error of Test = 3.143307544337855 ******************** Mean Squared Error******************** LassoLars -->Mean Squared Error of Test = 15.394974196153752 ###Markdown 2.8 LassoLarsCV**Lasso linear model with iterative fitting along a regularization path****The best model is selected by cross-validation.****The optimization objective for Lasso is** **`(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1`**class sklearn.linear_model.**LassoCV**(eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, precompute='auto', max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, positive=False, random_state=None, selection='cyclic') ###Code # Import the library from sklearn.linear_model import LassoLarsCV name = "LassoLars-->" reg_lassoLarsCV = LassoLarsCV(cv = 10).fit(x_train, y_train) y_pred_lassoLarsCV = reg_lassoLarsCV.predict(x_test) print(name +" Coefficient ",reg_lassoLarsCV.coef_) print(name + " Intercept " , reg_lassoLarsCV.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_lassoLarsCV.score(x_train,y_train)) print(name + "Score for test data Set",reg_lassoLarsCV.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_lassoLarsCV.score(x_test,y_pred_lassoLarsCV)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_lassoLarsCV)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lassoLarsCV)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lassoLarsCV)) ## ###Output LassoLars --> Coefficient [0. 0. 0. ... 0. 0. 0.] LassoLars --> Intercept -315.9528711455571 ********************Accuracy Test Model Fitting ******************** LassoLars -->Score for train data set : 0.5247441652685488 LassoLars -->Score for test data Set 0.4377139939693927 LassoLars -->Score for Predictecd data Set 1.0 ********************R2 Score******************** LassoLars -->R2 Score for test is = 0.4377139939693927 ********************Mean Absolute Error******************** LassoLars -->Mean Absolute Error of Test = 2.3101491411023534 ******************** Mean Squared Error******************** LassoLars -->Mean Squared Error of Test = 8.656126627670945 ###Markdown 2.9 LassoLarsIC**Lasso model fit with Lars using `BIC or AIC` for model selection** The optimization objective for Lasso is:**```(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1```**``class sklearn.linear_model.**LassoLarsIC**((criterion='aic', fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.220446049250313e-16, copy_X=True, positive=False`` ###Code #Import the Model from sklearn.linear_model import LassoLarsIC name = "LassoLarsIC-->" #Create a object reg_LassoLarsIC = LassoLarsIC(criterion='bic').fit(x_train,y_train) y_pred_lassoLarsIC = reg_LassoLarsIC.predict(x_test) print(name +" Coefficient ",reg_LassoLarsIC.coef_) print(name + " Intercept " , reg_LassoLarsIC.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_LassoLarsIC.score(x_train,y_train)) print(name + "Score for test data Set",reg_LassoLarsIC.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_LassoLarsIC.score(x_test,y_pred_lassoLarsIC)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_lassoLarsIC)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_lassoLarsIC)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_lassoLarsIC)) ## ###Output LassoLarsIC--> Coefficient [0. 0. 0. ... 0. 0. 0.] LassoLarsIC--> Intercept -30.39341871504015 ********************Accuracy Test Model Fitting ******************** LassoLarsIC-->Score for train data set : 0.24330522318183745 LassoLarsIC-->Score for test data Set 0.20526710315599284 LassoLarsIC-->Score for Predictecd data Set 1.0 ********************R2 Score******************** LassoLarsIC-->R2 Score for test is = 0.20526710315599284 ********************Mean Absolute Error******************** LassoLarsIC-->Mean Absolute Error of Test = 2.782543815044541 ******************** Mean Squared Error******************** LassoLarsIC-->Mean Squared Error of Test = 12.234536368459809 ###Markdown 2.10 OrthogonalMatchingPursuit **`Orthogonal Matching Pursuit model (OMP)`**class sklearn.linear_model.**OrthogonalMatchingPursuit**(n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize=True, precompute='auto') ###Code # Import the Model from sklearn.linear_model import OrthogonalMatchingPursuit name = "OPM --> " #Create a Object reg_opm = OrthogonalMatchingPursuit().fit(x_train , y_train) y_pred_opm = reg_opm.predict(x_test) print(name +" Coefficient ",reg_opm.coef_) print(name + " Intercept " , reg_opm.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_opm.score(x_train,y_train)) print(name + "Score for test data Set",reg_opm.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_opm.score(x_test,y_pred_opm)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_opm)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_opm)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_opm)) ## ###Output OPM --> Coefficient [0. 0. 0. ... 0. 0. 0.] OPM --> Intercept [-867.74139657] ********************Accuracy Test Model Fitting ******************** OPM --> Score for train data set : 0.6895010944790003 OPM --> Score for test data Set 0.4849084512278494 OPM --> Score for Predictecd data Set 1.0 ********************R2 Score******************** OPM --> R2 Score for test is = 0.4849084512278494 ********************Mean Absolute Error******************** OPM --> Mean Absolute Error of Test = 2.2216851683088255 ******************** Mean Squared Error******************** OPM --> Mean Squared Error of Test = 7.929590320930334 ###Markdown 2.11 OrthogonalMatchingPursuitCVclass sklearn.linear_model.**OrthogonalMatchingPursuitCV**(copy=True, fit_intercept=True, normalize=True, max_iter=None, cv=None, n_jobs=None, verbose=False)**Cross-validated Orthogonal Matching Pursuit model (OMP)****fit(self, X, y)**--> Fit the model using X, y as training data.**get_params(self[, deep])**--> Get parameters for this estimator.**predict(self, X)**--> Predict using the linear model.**score(self, X, y[, sample_weight])**--> Return the coefficient of determination R^2 of the prediction.**set_params(self, \*\*params)**--> Set the parameters of this estimator. ###Code # import the Library from sklearn.linear_model import OrthogonalMatchingPursuitCV name = "OPM CV -->" reg_opmCV = OrthogonalMatchingPursuitCV(cv = 10).fit(x_train, y_train) y_pred_opmCV = reg_opmCV.predict(x_test) print(name +" Coefficient ",reg_opmCV.coef_) print(name + " Intercept " , reg_opmCV.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_opmCV.score(x_train,y_train)) print(name + "Score for test data Set",reg_opmCV.score(x_test,y_test)) print(name + "Score for Predictecd data Set",reg_opmCV.score(x_test,y_pred_opm)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_opmCV)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_opmCV)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_opmCV)) ## ###Output OPM CV --> Coefficient [0. 0. 0. ... 0. 0. 0.] OPM CV --> Intercept -867.7413965738857 ********************Accuracy Test Model Fitting ******************** OPM CV -->Score for train data set : 0.6895010944790003 OPM CV -->Score for test data Set 0.4849084512278494 OPM CV -->Score for Predictecd data Set 1.0 ********************R2 Score******************** OPM CV -->R2 Score for test is = 0.4849084512278494 ********************Mean Absolute Error******************** OPM CV -->Mean Absolute Error of Test = 2.2216851683088255 ******************** Mean Squared Error******************** OPM CV -->Mean Squared Error of Test = 7.929590320930334 ###Markdown Part 3 . Bayesian Regressors 3.1 ARDRegression --> bayesian ARD regression```class sklearn.linear_model.ARDRegression(n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06,lambda_2=1e-06, compute_score=False, threshold_lambda=10000.0, fit_intercept=True, normalize=False, copy_X=True, verbose=False)```http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdfpage=15 ###Code #import library from sklearn.linear_model import ARDRegression name = "ARDRegression" clf_ard = ARDRegression() clf_ard.fit(x_train,y_train) pred_y_ard = clf.predict(x_test) print(name +" Coefficient ",clf_ard.coef_) print(name + " Intercept " , clf_ard.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",clf_ard.score(x_train,y_train)) print(name + "Score for test data Set",clf_ard.score(x_test,y_test)) print(name + "Score for Predictecd data Set",clf_ard.score(x_test,pred_y_ard)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,pred_y_ard)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,pred_y_ard)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,pred_y_ard)) ## ###Output _____no_output_____ ###Markdown 3.2 BayesianRidge**```class sklearn.linear_model.BayesianRidge(n_iter=300, tol=0.001, alpha_1=1e-06, alpha_2=1e-06, lambda_1=1e-06, lambda_2=1e-06, alpha_init=None, lambda_init=None, compute_score=False, fit_intercept=True, normalize=False, copy_X=True, verbose=False)```** ###Code ## Import the library from sklearn.linear_modle import BayesianRidge name = "BayesianRidge" clf_bay = bayesianRidge().fit(x_train , y_train) y_pred_bay = clf_bay.predict(x_test) print(name +" Coefficient ",clf_bay.coef_) print(name + " Intercept " , clf_bay.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",clf_bay.score(x_train,y_train)) print(name + "Score for test data Set",clf_bay.score(x_test,y_test)) print(name + "Score for Predictecd data Set", clf_bay.score(x_test,y_pred_bay)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_bay)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_bay)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_bay)) ## ###Output _____no_output_____ ###Markdown +++++++++++++++++++++++++++++++++++++++++ Part 4. MultiTask Linear Regressors with variable selectionThese estimators fit multiple regression problems (or tasks) jointly, while including sparse coefficients.While the inferred coefficients may differ between the tasks, they are constrained to agree on the features that are selected(non-zero coefficients)1. linear_model.**MultiTaskElasticNet([alpha, …])**--> Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer2. linear_model.**MultiTaskElasticNetCV([…])**--> Multi-task L1/L2 ElasticNet with built-in cross-validation.3. linear_model.**MultiTaskLasso([alpha, …])**--> Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.4. linear_model.**MultiTaskLassoCV([eps, …])**--> Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer. 4.1 MultiTaskElasticNetclass sklearn.linear_model.**MultiTaskElasticNet**(alpha=1.0, l1_ratio=0.5, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')**Multi-task ElasticNet Model trained with L1/L2 mixed-norm as regularizer**The optimization objective for MultiTaskElasticNet is : (1 / (2 * n_samples)) * ||Y - XW||_Fro^2 + alpha * l1_ratio * ||W||_21+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 where ||W||_21 = sum_i sqrt(sum_j w_ij ^ 2) ###Code # import library from sklearn.linear_model import MultiTaskElasticNet name = "MT_Enet" reg_MT_Enet = MultiTaskElasticNet(alpha = 0.10).fit(x_train, y_train) y_pred_MT_Enet = reg_MT_Enet.predict(X_test) print(name +" Coefficient ",reg_MT_Enet.coef_) print(name + " Intercept " , reg_MT_Enet.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_MT_Enet.score(x_train,y_train)) print(name + "Score for test data Set",reg_MT_Enet.score(x_test,y_test)) print(name + "Score for Predictecd data Set", reg_MT_Enet.score(x_test,y_pred_MT_Enet)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_Enet)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_Enet)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_Enet)) ## ###Output _____no_output_____ ###Markdown 4.2 MultiTaskElasticNetCV class sklearn.linear_model.**MultiTaskElasticNetCV**(l1_ratio=0.5, eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, cv=None, copy_X=True, verbose=0, n_jobs=None, random_state=None, selection='cyclic')**Multi-task L1/L2 ElasticNet with built-in cross-validation.** ###Code r """(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * l1_ratio * ||W||_21 + 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2 """ #import the Model from sklearn.linear_model import MultiTaskElasticNetCV reg_MT_ECV = MultiTaskElasticNetCV(cv =10).fit(x_train , y_train) y_pred_MT_ECV = reg_MT_ECV.predict(x_test) print(name +" Coefficient ",reg_MT_ECV.coef_) print(name + " Intercept " ,reg_MT_ECV.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_MT_ECV.score(x_train,y_train)) print(name + "Score for test data Set",reg_MT_ECV.score(x_test,y_test)) print(name + "Score for Predictecd data Set", reg_MT_Enet.score(x_test,y_pred_MT_ECV)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_ECV)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_ECV)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_ECV)) ## ###Output _____no_output_____ ###Markdown 4.3 MultiTaskLassoclass sklearn.linear_model.MultiTaskLasso(alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=1000, tol=0.0001, warm_start=False, random_state=None, selection='cyclic')The optimization objective for Lasso is:**(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21**where**||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}** ###Code #import the Library from sklearn.linear_model import MultiTaskLasso name = "MT_Lasso" #Create a object clf_MT_Lasso = MultiTaskLasso(alpha = 0.1).fit(x_train,y_train) y_pred_MT_Lasso = clf_MT_Lasso.predict(x_test) print(name +" Coefficient ",clf_MT_Lasso.coef_) print(name + " Intercept " ,clf_MT_Lasso.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",clf_MT_Lasso.score(x_train,y_train)) print(name + "Score for test data Set",clf_MT_Lasso.score(x_test,y_test)) print(name + "Score for Predictecd data Set", clf_MT_Lasso.score(x_test,y_pred_MT_Lasso)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_Lasso)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_Lasso)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_Lasso)) ## ###Output _____no_output_____ ###Markdown 4.4 MultiTasklassoCV class sklearn.linear_model.**MultiTaskLassoCV**(eps=0.001, n_alphas=100, alphas=None, fit_intercept=True, normalize=False, max_iter=1000, tol=0.0001, copy_X=True, cv=None, verbose=False, n_jobs=None, random_state=None, selection='cyclic')**Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer**(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21Where||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2} ###Code #import the library from sklearn.linear_model import MultiTaskLassoCV name = "MT_LassoCV" #Create a object reg_MT_LassoCV = MultiTaskLassoCV(cv =10,noise=4, random_state =0).fit(x_train,y_train) y_pred_MT_LassoCV = reg_MT_LassoCV.predict(x_test) print(name +" Coefficient ",reg_MT_LassoCV.coef_) print(name + " Intercept " ,reg_MT_LassoCV.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",reg_MT_LassoCV.score(x_train,y_train)) print(name + "Score for test data Set",reg_MT_LassoCV.score(x_test,y_test)) print(name + "Score for Predictecd data Set", reg_MT_LassoCV.score(x_test,y_pred_MT_LassoCV)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_MT_LassoCV)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_MT_LassoCV)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_MT_LassoCV)) ## ###Output _____no_output_____ ###Markdown 5 Outlier-robust regressors`Any estimator using the Huber loss would also be robust to outliers, e.g. SGDRegressor with loss='huber'.`1. **linear_model.HuberRegressor([epsilon, …])--> Linear regression model that is robust to outliers.**2. **linear_model.RANSACRegressor([…])--> RANSAC (RANdom SAmple Consensus) algorithm.**3. **linear_model.TheilSenRegressor([…])--> Theil-Sen Estimator: robust multivariate regression model.** 5.1 HuberRegressor()`class sklearn.linear_model.HuberRegressor(epsilon=1.35, max_iter=100, alpha=0.0001, warm_start=False, fit_intercept=True, tol=1e-05)`**Methods** **fit(self, X, y[, sample_weight])**--> Fit the model according to the given training data.**get_params(self[, deep])**--> Get parameters for this estimator.**predict(self, X)**--> Predict using the linear model.**score(self, X, y[, sample_weight])**--> Return the coefficient of determination R^2 of the prediction.**set_params(self, \*\*params)**--> Set the parameters of this estimator. ###Code # import Library from sklearn.linear_model import HuberRegressor , LinearRegression name = "HuberRegressor" huber = HuberRegressor().fit(x_train,y_train) y_pred_huber = huber.predict(x_test) print(name +" Coefficient ",huber.coef_) print(name + " Intercept " ,huber.intercept_) ## print("****"*5+"Accuracy Test Model Fitting "+"****"*5) print(name + "Score for train data set :",huber.score(x_train,y_train)) print(name + "Score for test data Set",huber.score(x_test,y_test)) print(name + "Score for Predictecd data Set", huber.score(x_test,y_pred_huber)) ## print("\n"+"****"*5+"R2 Score"+"****"*5) print(name + "R2 Score for test is = ", r2_score(y_test,y_pred_huber)) ## print("\n"+"****"*5+"Mean Absolute Error"+"****"*5) print(name+ "Mean Absolute Error of Test = ", mean_absolute_error(y_test,y_pred_huber)) ## print("\n"+"****"*5+" Mean Squared Error"+"****"*5) print(name + "Mean Squared Error of Test = ",mean_squared_error(y_test,y_pred_huber)) ## ###Output _____no_output_____ ###Markdown Deep Learning ###Code #from tensorflow.python import keras #from tensorflow.python.keras.models import Sequential #from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, Dropout #import scipy #import numpy as np #import matplotlib as plt #import pandas as pd #import sklearn #import pydot #import h5py #import tensorflow #from tensorflow import keras ##import theano #print("SCIPY_Version",scipy.__version__) #print("Numpy_Version",np.__version__) #We imported as np so #print("matplotlib_Version",plt.__version__) #we imported as plt so #print("pandas_Version",pd.__version__) #print("Sk_learn_Version",sklearn.__version__) #print("pydot_Version",pydot.__version__) #print("h5py_Version",h5py.__version__) ##print("theano_Version",theano.__version__) #print("tensorflow_Version",tensorflow.__version__) ###Output _____no_output_____ ###Markdown Checking Weather the basic Deep Learning Model is running or Not Build the Model ###Code #model = keras.Sequential([ # keras.layers.Flatten(input_shape = (28,28)), # # keras.layers.Dense(128, activation = "relu",input_shape = [len(x_train)]), # keras.layers.Dense(10,activation= "softmax") # # keras.Dense(1) #]) #model.summary() #len(x_train) ###Output _____no_output_____ ###Markdown Compile the Model ###Code #Loss Function --> This Measures how ccurate the Model is during training. We want to minimize this function to "steer" # the model tight direction # Optimizer --> This is how the model is updated based on the data it sees and its loss function #Metrics --> Used to monitor the training and testing steps #The Following example uses accuracy, the fraction of the images that are correctly classified. ###Output _____no_output_____
contrib/html_demo/JupyterCode/1_image_similarity_export.ipynb
###Markdown Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Image Similarity ExportIn the Scenario->Image Similarity notebook [12_fast_retrieval.ipynb](12_fast_retrieval.ipynb) we implemented the approximate nearest neighbor search method to find similar images from a group of reference images, given a query input image. This notebook repeats some of those steps with the goal of exporting computed reference image features to text file for use in visualizing the results in an HTML web interface. To be able to test the model in a simple HTML interface, we export: the computed reference image features, a separate text file of reference image file names, and thumbnail versions of the reference images. The first two files are initially exported as text files then compressed into zip files to minimuze file size. The reference images are converted to 150x150 pixel thumbnails and stored in a flat directory. All exports are saved to the UICode folder. Notebook **2_upload_ui** is used to upload the exports to your Azure Blob storage account for easy public access. It is assumed you already completed the steps in notebook [12_fast_retrieval.ipynb](12_fast_retrieval.ipynb) and have deployed your query image processing model to an Azure ML resource (container services, Kubernetes services, ML web app, etc.) with a queryable, CORS-compliant API endpoint. Initialization ###Code # Ensure edits to libraries are loaded and plotting is shown in the notebook. %matplotlib inline %reload_ext autoreload %autoreload 2 # Standard python libraries import sys import os import numpy as np from pathlib import Path import random import scrapbook as sb from sklearn.neighbors import NearestNeighbors from tqdm import tqdm import zipfile from zipfile import ZipFile # Fast.ai import fastai from fastai.vision import ( load_learner, cnn_learner, DatasetType, ImageList, imagenet_stats, models, PIL ) # Computer Vision repository sys.path.extend([".", "../../.."]) # to access the utils_cv library from utils_cv.classification.data import Urls from utils_cv.common.data import unzip_url from utils_cv.common.gpu import which_processor, db_num_workers from utils_cv.similarity.metrics import compute_distances from utils_cv.similarity.model import compute_features_learner from utils_cv.similarity.plot import plot_distances, plot_ranks_distribution print(f"Fast.ai version = {fastai.__version__}") which_processor() ###Output Fast.ai version = 1.0.57 Cuda is not available. Torch is using CPU ###Markdown Data preparationWe start with parameter specifications and data preparation. We use the *Fridge objects* dataset, which is composed of 134 images, divided into 4 classes: can, carton, milk bottle and water bottle. ###Code # Data location DATA_PATH = unzip_url(Urls.fridge_objects_path, exist_ok=True) # Image reader configuration BATCH_SIZE = 16 IM_SIZE = 300 # Number of comparison of nearest neighbor versus exhaustive search for accuracy computation NUM_RANK_ITER = 100 # Size of thumbnail images in pixels MAX_SIZE = (150, 150) # Load images into fast.ai's ImageDataBunch object random.seed(642) data = ( ImageList.from_folder(DATA_PATH) .split_by_rand_pct(valid_pct=0.8, seed=20) .label_from_folder() .transform(size=IM_SIZE) .databunch(bs=BATCH_SIZE, num_workers = db_num_workers()) .normalize(imagenet_stats) ) print(f"Training set: {len(data.train_ds.x)} images, validation set: {len(data.valid_ds.x)} images") ###Output Training set: 27 images, validation set: 107 images ###Markdown Load modelBelow we load a [ResNet18](https://arxiv.org/pdf/1512.03385.pdf) CNN from fast.ai's library which is pre-trained on ImageNet. ###Code learn = cnn_learner(data, models.resnet18, ps=0) ###Output _____no_output_____ ###Markdown Alternatively, one can load a model which was trained using the [01_training_and_evaluation_introduction.ipynb](01_training_and_evaluation_introduction.ipynb) notebook using these lines of code:```python learn = load_learner(".", 'image_similarity_01_model') learn.data = data``` Feature extractionWe now compute the DNN features for each image in our validation set. We use the output of the penultimate layer as our image representation, which, for the Resnet-18 model has a dimensionality of 512 floating point values. ###Code # Use penultimate layer as image representation embedding_layer = learn.model[1][-2] print(embedding_layer) # Compute DNN features for all validation images valid_features = compute_features_learner(data, DatasetType.Valid, learn, embedding_layer) print(f"Computed DNN features for the {len(list(valid_features))} validation images,\ each consisting of {len(valid_features[list(valid_features)[0]])} floating point values.\n") # Normalize all reference features to be of unit length valid_features_list = np.array(list(valid_features.values())) valid_features_list /= np.linalg.norm(valid_features_list, axis=1)[:,None] ###Output _____no_output_____ ###Markdown Export for HTML InterfaceHere we package all of the data for upload to Blob Storage to interact with the model in a simple HTML interface. First, we export the computed reference features to ZIP file. ###Code f = open("ref_features.txt", 'w') f.write('[') f.writelines('],\n'.join('[' + ','.join(map(str,i)) for i in valid_features_list)) f.write(']]') f.close() ###Output _____no_output_____ ###Markdown Then we export the reference image file names to disk. Exported file names will include the parent directory name as well. ###Code f = open("ref_filenames.txt", 'w') f.write('["') f.writelines('",\n"'.join((i[len(DATA_PATH)+1:]).replace("/","_").replace("\\","_") for i in valid_features.keys())) f.write('"]') f.close() ###Output _____no_output_____ ###Markdown Next we compress the exported text data into Zip files. ###Code # Writing files to zipfiles, one by one with ZipFile('ref_features.zip','w', zipfile.ZIP_DEFLATED) as zip: zip.write("ref_features.txt") with ZipFile('ref_filenames.zip','w', zipfile.ZIP_DEFLATED) as zip: zip.write("ref_filenames.txt") # Remove the txt files os.remove("ref_features.txt") os.remove("ref_filenames.txt") # Make subfolder to hold all HTML Demo files and a subfolder for the zip files if not os.path.exists('../UICode'): os.makedirs('../UICode') if not os.path.exists('../UICode/data'): os.makedirs('../UICode/data') # Move the zip files to the new directory os.replace("ref_features.zip", "../UICode/data/ref_features.zip") os.replace("ref_filenames.zip", "../UICode/data/ref_filenames.zip") ###Output _____no_output_____ ###Markdown Next, we resize the reference images to 150x150 pixel thumbnails in a new directory called 'small-150' ###Code # Make subfolder to hold all HTML Demo files and a subfolder for the zip files if not os.path.exists('../UICode/small-150'): os.makedirs('../UICode/small-150') path_mr = '../UICode/small-150' # Now resize the images to thumbnails for root, dirs, files in os.walk(DATA_PATH): for file in files: if file.endswith(".jpg"): #fname = path_mr +'/' + root[len(DATA_PATH)+1:] + '_' + file fname = os.path.join(path_mr, root[len(DATA_PATH)+1:] + '_' + file) im = PIL.Image.open(os.path.join(root, file)) im.thumbnail(MAX_SIZE) im.save(fname, 'JPEG', quality=70) ###Output _____no_output_____
Tennis-final.ipynb
###Markdown Collaboration and Competition---In this notebook, you will learn how to use the Unity ML-Agents environment for the third project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program. 1. Start the EnvironmentWe begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/). ###Code from unityagents import UnityEnvironment import numpy as np from collections import deque import matplotlib.pyplot as plt import numpy as np import torch import time ###Output _____no_output_____ ###Markdown Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.- **Mac**: `"path/to/Tennis.app"`- **Windows** (x86): `"path/to/Tennis_Windows_x86/Tennis.exe"`- **Windows** (x86_64): `"path/to/Tennis_Windows_x86_64/Tennis.exe"`- **Linux** (x86): `"path/to/Tennis_Linux/Tennis.x86"`- **Linux** (x86_64): `"path/to/Tennis_Linux/Tennis.x86_64"`- **Linux** (x86, headless): `"path/to/Tennis_Linux_NoVis/Tennis.x86"`- **Linux** (x86_64, headless): `"path/to/Tennis_Linux_NoVis/Tennis.x86_64"`For instance, if you are using a Mac, then you downloaded `Tennis.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:```env = UnityEnvironment(file_name="Tennis.app")``` ###Code env = UnityEnvironment(file_name="Tennis_Linux/Tennis.x86") ###Output INFO:unityagents: 'Academy' started successfully! Unity Academy name: Academy Number of Brains: 1 Number of External Brains : 1 Lesson number : 0 Reset Parameters : Unity brain name: TennisBrain Number of Visual Observations (per agent): 0 Vector Observation space type: continuous Vector Observation space size (per agent): 8 Number of stacked Vector Observation: 3 Vector Action space type: continuous Vector Action space size (per agent): 2 Vector Action descriptions: , ###Markdown Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python. ###Code # get the default brain brain_name = env.brain_names[0] brain = env.brains[brain_name] ###Output _____no_output_____ ###Markdown 2. Examine the State and Action SpacesIn this environment, two agents control rackets to bounce a ball over a net. If an agent hits the ball over the net, it receives a reward of +0.1. If an agent lets a ball hit the ground or hits the ball out of bounds, it receives a reward of -0.01. Thus, the goal of each agent is to keep the ball in play.The observation space consists of 8 variables corresponding to the position and velocity of the ball and racket. Two continuous actions are available, corresponding to movement toward (or away from) the net, and jumping. Run the code cell below to print some information about the environment. ###Code # reset the environment env_info = env.reset(train_mode=True)[brain_name] # number of agents num_agents = len(env_info.agents) print('Number of agents:', num_agents) # size of each action action_size = brain.vector_action_space_size print('Size of each action:', action_size) # examine the state space states = env_info.vector_observations state_size = states.shape[1] print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size)) print('The state for the first agent looks like:', states[0]) ###Output Number of agents: 2 Size of each action: 2 There are 2 agents. Each observes a state with length: 24 The state for the first agent looks like: [ 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. -6.65278625 -1.5 -0. 0. 6.83172083 6. -0. 0. ] ###Markdown When finished, you can close the environment. 4. Initialize the Agents ###Code from multi_ddpg_agent import * # initialize agents agent_one = Agent(state_size, action_size, random_seed=42) agent_two = Agent(state_size, action_size, random_seed=42) def multi_ddpg(n_episodes=5000, max_t=2000, threshold=0.5): scores_total = [] scores_deque = deque(maxlen=100) scores_final = [] for i_episode in range(1, n_episodes+1): env_info = env.reset(train_mode=True)[brain_name] # flatten states states = np.reshape(env_info.vector_observations, (1,48)) agent_one.reset() agent_two.reset() scores = np.zeros(num_agents) for t in range(max_t): # each agent takes actions action_alpha = agent_one.act(states) action_beta = agent_two.act(states) # join the actions from both agents and reshape actions = np.concatenate((action_alpha, action_beta), axis=0).flatten() # send actions from both agents to the environment env_info = env.step(actions)[brain_name] # flatten next states next_states = np.reshape(env_info.vector_observations, (1,48)) rewards = env_info.rewards done = env_info.local_done agent_one.step(states, actions, rewards[0], next_states, done, 0) agent_two.step(states, actions, rewards[1], next_states, done, 1) states = next_states scores += np.max(rewards) if np.any(done): break # mean score on 2 agents mean_score = np.mean(scores) scores_total.append(mean_score) # mean score on 2 agents for 100 episodes scores_deque.append(mean_score) # mean score overall for 100 episodes avg_score = np.mean(scores_deque) scores_final.append(avg_score) # printing statistics if i_episode % 100 == 0: print('Episode: {}, Score: {:.4f}, \tAVG Score: {:.4f}'.format(i_episode, mean_score, avg_score)) if avg_score > threshold: print('\nEnvironment succesfully solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode, avg_score)) torch.save(agent_one.actor_local.state_dict(), 'checkpoint_actor_one.pth') torch.save(agent_one.critic_local.state_dict(), 'checkpoint_critic_one.pth') torch.save(agent_two.actor_local.state_dict(), 'checkpoint_actor_two.pth') torch.save(agent_two.critic_local.state_dict(), 'checkpoint_critic_two.pth') break return scores_total, scores_final ###Output _____no_output_____ ###Markdown 5. Training ###Code scores_total, scores_final = multi_ddpg() scores_total = np.load('scores_total.npy') scores_final = np.load('scores_final.npy') ###Output _____no_output_____ ###Markdown 6. Plots ###Code fig = plt.figure(figsize=[12,8]) ax = fig.add_subplot(111) plt.plot(np.arange(1, len(scores_total)+1), scores_total, 'g', label='Score') plt.ylabel('Score') plt.xlabel('Episode') plt.title('Episode V/S Score') plt.legend(loc='upper right'); plt.show() ###Output _____no_output_____
spectrome/notebooks/spectral_correlation.ipynb
###Markdown Reproducing Fig4C with Matlab Code ###Code import sys sys.path.append("../../") # spectrome imports: from spectrome.forward import runforward from spectrome.utils import functions, path, generate from spectrome.brain import Brain # Other modules: import matplotlib.pyplot as plt import numpy as np from scipy.io import loadmat from scipy.stats import pearsonr from tqdm import tqdm_notebook # An external custom function for re-arranging the individual connectomes to match the Desikan-Killiany atlas brain regions indexing. def individual_connectome_order(): """Get individual connectome's brain region ordering (specific for DK86 atlas). Args: Returns: perm (type): Brain region orders for all regions empty (type): Brain regions with no MEG cort (type): Brain cortical regions. """ cort_lh = np.array([0, 1, 2, 3, 4, 6, 7, 8, 10, 11, 12, 13, 14, 15, 17, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 5, 32, 33, 9]) qsubcort_lh = np.array([0, 40, 36, 39, 38, 37, 35, 34, 0]) qsubcort_rh = qsubcort_lh + 34 + 1 cort = np.concatenate([cort_lh, 34 + cort_lh]) cort_rh = cort_lh + 34 + 7 perm = np.concatenate([cort_lh, cort_rh, qsubcort_lh, qsubcort_rh]) empty = np.array([68, 77, 76, 85]) return perm, empty, cort # external function from matlab: def get_mean_C(C): #C = np.mean(C, axis = 2) C = (C + np.transpose(C))/2 ss = np.argsort(-C[:]) C = np.minimum(C, ss[int(np.round(0.01*len(ss)))]) return C # simulate random connectome: def random_WCij_und(V, E): # upper triangle of zeros: zmat = np.triu(np.ones(V)) np.fill_diagonal(zmat, 0) S = zmat.shape # shape for unraveling later linear_indices = np.squeeze(np.asarray(zmat.ravel().nonzero())) current_E = len(linear_indices)*2 while current_E > E: loc = np.unravel_index(np.random.choice(linear_indices, replace = False), S) zmat[loc] = 0 current_E = np.count_nonzero(zmat)*2 zmat = zmat + np.transpose(zmat) #scale by distance metric: #dist_ind = np.asarray(np.nonzero(np.triu(Dist))) #dist_dist = Dist[dist_ind[0,:], dist_ind[1,:]] #z_ind = np.asarray(np.nonzero(np.triu(zmat))) #for i in np.arange(0,z_ind.shape[1]): # zmat[z_ind[0,i], z_ind[1,i]] = np.random.choice(np.squeeze(dist_dist)) rmat = np.random.random([V,V])*100 Cijw = np.multiply(zmat, rmat) #Cijw = zmat + np.transpose(zmat) return Cijw # new distance matrix: # new function to create distance matrix: def exp_neg_dist_Cij(distance_matrix, sparsity = 0.8): """Generate network weighted by exponent of negative distance given sparsity: Args: distance_matrix (array): input distance matrix Returns: negdist [array]: symmetric distance weighted network with 0 diagonal """ negdist = np.exp(-distance_matrix) negdist = negdist - np.diag(np.diag(negdist)) V = len(negdist) # number of vertices #compute current sparsity: current_sparsity = np.count_nonzero(negdist == 0)/(len(negdist)**2) # Remove lowest distances to achieve sparsity: while current_sparsity < sparsity: triu_negdist = np.triu(negdist) triu_nonzeros = np.asarray(np.triu(negdist).ravel().nonzero()) # replace lowest distances with 0's min_loc = np.where(triu_negdist == np.amin(triu_negdist[np.unravel_index(triu_nonzeros, negdist.shape)])) triu_negdist[min_loc[0]] = 0 #update sparsity: negdist = triu_negdist + np.transpose(triu_negdist) current_sparsity = np.count_nonzero(negdist == 0)/(len(negdist)**2) return negdist # define data directory data_dir = path.get_data_path() # cortical areas with MEG collected + source reconstructed rois_with_MEG = np.arange(0,68) # define frequency bins: fmin = 2 fmax = 45 fvec = np.linspace(fmin, fmax, 40) # 40 frequency bins between 2Hz-45Hz # filter coefficients for smoothing lpf = np.array([1, 2, 5, 2, 1]) lpf = lpf/np.sum(lpf) #tempalte brain: hcp_brain = Brain.Brain() hcp_brain.add_connectome(data_dir) hcp_brain.reorder_connectome(hcp_brain.connectome, hcp_brain.distance_matrix) hcp_brain.bi_symmetric_c() hcp_brain.reduce_extreme_dir() ## Load MEG: ## individual connectomes, this is a Nregion x Nregion x Nsubjects array: ind_data = loadmat(data_dir + '/individual_subjects.mat') ind_cdk = ind_data['A_all_subjs_final'] perm, empty, cort = individual_connectome_order() ## individual optimization results: ind_optr = loadmat(data_dir + '/SCFC_opparam_individual.mat') # extract the spectral correlation values: nsubs = ind_optr['output']['feval'].shape[1] # extract parameters for use later: ind_params = np.zeros([nsubs, 7]) for i, params in enumerate(np.squeeze(ind_optr['output']['param'])): if ind_optr['output']['param'][0,i].shape[1] == 1: ind_params[i] = np.squeeze(params) ind_params = ind_params[~np.all(ind_params == 0, axis=1)] #remove 3 subjects again print(ind_params.shape) ## individual MEG frequency spectrum: ind_freq = loadmat(data_dir + '/freqMEGdata.mat') ind_psd = np.zeros([np.squeeze(ind_freq['freqMEGdata']['psd'])[0].shape[0], len(fvec), nsubs]) empty_psd = np.zeros(3) e = 0 for i, psd in enumerate(np.squeeze(ind_freq['freqMEGdata']['psd'])): if psd.shape[1] != 0: # smooth for q in np.arange(0,len(psd)): ind_psd[q,:,i] = np.convolve(psd[q,:], lpf, 'same') ind_psd[q,:,i] = np.sqrt(np.abs(ind_psd[q,:,i])) ind_psd[q,:,i] = ind_psd[q,:,i] - np.mean(ind_psd[q,:,i]) else: empty_psd[e] = i e += 1 ind_psd = np.delete(ind_psd, empty_psd, axis = 2) print(ind_psd.shape) # Nbins x Nregions x Nsubjects nsubs = ind_params.shape[0] # Plot the source localized MEG spectra: with plt.style.context('seaborn'): plt.plot(fvec, np.transpose(ind_psd[:,:,0])) plt.xlabel('Frequency (Hz)') plt.ylabel('Magnitude (dB)') plt.autoscale(enable=True, axis='x', tight='True') ###Output _____no_output_____ ###Markdown Compute Model Frequency Profile: ###Code ri_corr = np.zeros(nsubs) for s in tqdm_notebook(np.arange(0, nsubs), desc = 'Optimized Parameters'): C_ind = ind_cdk[:,:,s] # grab current subject's individual connectome F_ind = ind_psd[:,:,s] # grab current subject's MEG # permute to fix ordering: C_ind = C_ind[perm,:][:,perm] C_ind[empty,:] = 0 C_ind[:,empty] = 0 # create spectrome brain: brain = Brain.Brain() brain.add_connectome(data_dir) # grabs distance matrix brain.connectome = C_ind # re-assign connectome to individual connectome # re-ordering for DK atlas and normalizing the connectomes: brain.reorder_connectome(brain.connectome, brain.distance_matrix) brain.bi_symmetric_c() brain.reduce_extreme_dir() brain.distance_matrix = get_mean_C(brain.distance_matrix) # re-assigne optimized parameters: brain.ntf_params['tau_e'] = ind_params[s,0] brain.ntf_params['tau_i'] = ind_params[s,1] brain.ntf_params['alpha'] = ind_params[s,2] brain.ntf_params['speed'] = ind_params[s,3] brain.ntf_params['gei'] = ind_params[s,4] brain.ntf_params['gii'] = ind_params[s,5] brain.ntf_params['tauC'] = ind_params[s,6] # simulate model spectra: freq_mdl, freq_resp, _, _ = runforward.run_local_coupling_forward(brain, brain.ntf_params, fvec) freq_mdl = freq_mdl[rois_with_MEG,:] # smooth out spectra # smooth out spectra freq_out = np.zeros(freq_mdl.shape) for p in np.arange(0,len(freq_mdl)): freq_out[p,:] = np.convolve(np.abs(freq_mdl[p,:]), lpf, 'same') freq_out[p,:] = np.sqrt(np.abs(freq_out[p,:])) freq_out[p,:] = freq_out[p,:] - np.mean(freq_out[p,:]) #freq_out[p,:] = functions.mag2db(np.abs(freq_mdl[p,:])) #freq_out[p,:] = functions.mag2db(np.convolve(np.abs(freq_mdl[p,:]), lpf, 'same')) #freq_out[p,:] = np.convolve(np.abs(freq_mdl[p,:]), lpf, 'same') corrs = np.zeros(len(freq_out)) for c in np.arange(0, len(freq_out)): corrs[c] = pearsonr(ind_psd[c,:,s], freq_out[c,:])[0] ri_corr[s] = np.mean(corrs) num_it = 25 # 25 iterations x 36 parameters = 900 simulations total r80_corr = np.zeros([num_it, nsubs]) brain = Brain.Brain() DD = exp_neg_dist_Cij(hcp_brain.distance_matrix, sparsity = 0.8) for r in tqdm_notebook(np.arange(0, num_it), desc = 'Random connectomes'): for s in np.arange(0, nsubs): # create spectrome brain: #np.random.seed(s*r) brain.add_connectome(data_dir) # grabs distance matrix # Create random connectivity matrix: V = 86 # number of nodes E = np.floor((V ** 2)*(1-0.80)) # sparsity on number of edges: 80 brain.reducedConnectome = random_WCij_und(V, E) brain.distance_matrix = generate.distance_matrix(brain.connectome, brain.distance_matrix) brain.distance_matrix = get_mean_C(brain.distance_matrix) # simulate model spectra: brain.ntf_params['tau_e'] = ind_params[s,0] brain.ntf_params['tau_i'] = ind_params[s,1] brain.ntf_params['alpha'] = ind_params[s,2] brain.ntf_params['speed'] = ind_params[s,3] brain.ntf_params['gei'] = ind_params[s,4] brain.ntf_params['gii'] = ind_params[s,5] brain.ntf_params['tauC'] = ind_params[s,6] freq_mdl, freq_resp, _, _ = runforward.run_local_coupling_forward(brain, brain.ntf_params, fvec) freq_mdl = freq_mdl[rois_with_MEG,:] # smooth out spectra freq_out = np.zeros(freq_mdl.shape) for p in np.arange(0,len(freq_mdl)): freq_out[p,:] = np.convolve(np.abs(freq_mdl[p,:]), lpf, 'same') freq_out[p,:] = np.sqrt(np.abs(freq_out[p,:])) freq_out[p,:] = freq_out[p,:] - np.mean(freq_out[p,:]) corrs = np.zeros(len(freq_out)) for c in np.arange(0, len(freq_out)): corrs[c] = pearsonr(ind_psd[c,:,s], freq_out[c,:])[0] r80_corr[r,s] = np.mean(corrs) plt.imshow(brain.reducedConnectome) # Plot the source localized MEG spectra: with plt.style.context('seaborn'): plt.plot(fvec, np.transpose(freq_out)) plt.xlabel('Frequency (Hz)') plt.ylabel('Magnitude (dB)') plt.autoscale(enable=True, axis='x', tight='True') r80_mean = np.mean(r80_corr, axis = 0) with plt.style.context('seaborn-paper'): fig, ax = plt.subplots(1,2, sharey=True) ax[0].violinplot(ri_corr) ax[1].violinplot(r80_mean) from scipy.io import savemat savemat('individualC_optP.mat', {'ind_opt_corr': ri_corr}) print(data_dir) ###Output /media/rajlab/sachin_data_1/userdata/xihe/spectrome/spectrome/data ###Markdown Distance Connectome: ###Code num_it = 1 dist_corr = np.zeros([num_it, nsubs]) # create spectrome brain: brain = Brain.Brain() brain.add_connectome(data_dir) # grabs distance matrix brain.reorder_connectome(brain.connectome, brain.distance_matrix) # Sample distance metrics for random matrix: brain.reducedConnectome = exp_neg_dist_Cij(brain.distance_matrix, sparsity = 0.80) brain.distance_matrix = generate.distance_matrix(brain.reducedConnectome, brain.distance_matrix) for r in tqdm_notebook(np.arange(0, num_it), desc = 'Distance connectomes'): for s in np.arange(0, nsubs): # simulate model spectra: brain.ntf_params['tau_e'] = ind_params[s,0] brain.ntf_params['tau_i'] = ind_params[s,1] brain.ntf_params['alpha'] = ind_params[s,2] brain.ntf_params['speed'] = ind_params[s,3] brain.ntf_params['gei'] = ind_params[s,4] brain.ntf_params['gii'] = ind_params[s,5] brain.ntf_params['tauC'] = ind_params[s,6] freq_d, freq_resp, _, _ = runforward.run_local_coupling_forward(brain, brain.ntf_params, fvec) freq_d = freq_d[rois_with_MEG,:] # smooth out spectra freq_dout = np.zeros(freq_d.shape) for p in np.arange(0,len(freq_d)): #freq_dout[p,:] = np.convolve(np.abs(freq_d[p,:]), lpf, 'same') #freq_dout[p,:] = functions.mag2db(freq_dout[p,:]) #freq_out[p,:] = np.abs(freq_mdl[p,:]) #freq_dout[p, np.nonzero(freq_dout[p,:] ==0)] = np.spacing(1000000) #freq_dout[p,:] = functions.mag2db(freq_dout[p,:]) #freq_out[p, np.isinf(freq_out[p,:])] = np.spacing(1) freq_dout[p,:] = np.convolve(np.abs(freq_d[p,:]), lpf, 'same') freq_dout[p,:] = np.sqrt(np.abs(freq_dout[p,:])) freq_dout[p,:] = freq_dout[p,:] - np.mean(freq_dout[p,:]) corrs = np.zeros(len(freq_dout)) for c in np.arange(0, len(freq_dout)): corrs[c] = pearsonr(ind_psd[c,:,s], freq_dout[c,:])[0] dist_corr[r,s] = np.mean(corrs) plt.imshow(brain.reducedConnectome) # Plot the source localized MEG spectra: with plt.style.context('seaborn'): plt.plot(fvec, np.transpose(freq_dout)) plt.xlabel('Frequency (Hz)') plt.ylabel('Magnitude (dB)') plt.autoscale(enable=True, axis='x', tight='True') print('Sparsity is approximately ' + str(np.around(np.count_nonzero(brain.reducedConnectome)/(86**2),3))) dst_mean = np.mean(dist_corr, axis = 0) with plt.style.context('seaborn-paper'): df, da = plt.subplots(1,2, sharey=True) da[0].violinplot(ri_corr) da[1].violinplot(dst_mean) avg_corr = np.zeros(nsubs) # create spectrome brain: brain = Brain.Brain() brain.add_connectome(data_dir) # grabs distance matrix brain.ntf_params['tau_e'] = np.mean(ind_params[:,0]) brain.ntf_params['tau_i'] = np.mean(ind_params[:,1]) brain.ntf_params['alpha'] = np.mean(ind_params[:,2]) brain.ntf_params['speed'] = np.mean(ind_params[:,3]) brain.ntf_params['gei'] = np.mean(ind_params[:,4]) brain.ntf_params['gii'] = np.mean(ind_params[:,5]) brain.ntf_params['tauC'] = np.mean(ind_params[:,6]) for s in tqdm_notebook(np.arange(0, nsubs), desc = 'Average Parameters'): C_ind = ind_cdk[:,:,s] # grab current subject's individual connectome F_ind = ind_psd[:,:,s] # grab current subject's MEG # permute to fix ordering: C_ind = C_ind[perm,:][:,perm] C_ind[empty,:] = 0 C_ind[:,empty] = 0 brain.connectome = C_ind # re-assign connectome to individual connectome # re-ordering for DK atlas and normalizing the connectomes: brain.reorder_connectome(brain.connectome, brain.distance_matrix) brain.bi_symmetric_c() brain.reduce_extreme_dir() brain.distance_matrix = generate.distance_matrix(brain.reducedConnectome, brain.distance_matrix) brain.distance_matrix = get_mean_C(brain.distance_matrix) # simulate model spectra: freq_a, freq_resp, _, _ = runforward.run_local_coupling_forward(brain, brain.ntf_params, fvec) freq_a = freq_a[rois_with_MEG,:] # smooth out spectra # smooth out spectra freq_aout = np.zeros(freq_a.shape) for p in np.arange(0,len(freq_a)): freq_aout[p,:] = np.convolve(np.abs(freq_a[p,:]), lpf, 'same') freq_aout[p,:] = np.sqrt(np.abs(freq_aout[p,:])) #freq_aout[p,:] = freq_aout[p,:] - np.mean(freq_aout[p,:]) corrs = np.zeros(len(freq_aout)) for c in np.arange(0, len(freq_aout)): corrs[c] = pearsonr(ind_psd[c,:,s], freq_aout[c,:])[0] avg_corr[s] = np.mean(corrs) # Plot the source localized MEG spectra: with plt.style.context('seaborn'): plt.plot(fvec, np.transpose(freq_aout)) plt.xlabel('Frequency (Hz)') plt.ylabel('Magnitude (dB)') plt.autoscale(enable=True, axis='x', tight='True') with plt.style.context('seaborn-paper'): af, aa = plt.subplots(1,2, sharey=True) aa[0].violinplot(ri_corr) aa[1].violinplot(avg_corr) ###Output _____no_output_____
docs/imaging/cube_imaging_example.ipynb
###Markdown Cube Imaging This notebook will demonstrate how to create a cube dirty image with natural weighting using ngCASA. The resulting image will be compared with an image created by CASA. The Dask visualize tool will also be introduced.For this demonstration data from the ALMA First Look at Imaging CASAguide (https://casaguides.nrao.edu/index.php/First_Look_at_Imaging) will be used. The measurement set has been converted to vis.zarr (using convert_ms in cngi.conversion).This walkthrough is designed to be run in a Jupyter notebook on Google Colaboratory. To open the notebook in colab, go [here](https://colab.research.google.com/github/casangi/cngi_prototype/blob/master/docs/prototypes/cube_imaging_example.ipynb). Installation and Dataset Download ###Code import os os.system("pip install --extra-index-url https://test.pypi.org/simple/ cngi-prototype==0.0.64") #https://drive.google.com/file/d/1QAEHs2OwP5h37WZId23nmzXSeQ4qt_LG/view?usp=sharing #https://drive.google.com/file/d/1UeNywNIU-AEwIloWNCpUHZbxuV6RE3lz/view?usp=sharing for id in ['1QAEHs2OwP5h37WZId23nmzXSeQ4qt_LG', '1UeNywNIU-AEwIloWNCpUHZbxuV6RE3lz']: os.system('curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id=%s"' % id) os.system('curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=`awk \'/download/ {print $NF}\' ./cookie`&id=%s" -o ms.tar.gz' % id) os.system('tar -xzf ms.tar.gz') print('complete') ###Output complete ###Markdown Load DatasetTwo datasets are are needed for this notebook: - sis14_twhya_chan_avg_field_5_lsrk_pol_xx.vis.zarr- casa_twhya_standard_gridder_lsrk_cube_natural.img.zarr (for more information about the img.zarr format go [here](https://cngi-prototype.readthedocs.io/en/latest/visibilities.html) and for the vis.zarr format go [here](https://cngi-prototype.readthedocs.io/en/latest/images.html)). The sis14_twhya_chan_avg_field_5_lsrk_pol_xx.vis.zarr dataset is used to create a cube image. The dataset was created by using the ```mstransform``` command in CASA```pythonmstransform('sis14_twhya_calibrated_flagged.ms' outputvis='sis14_twhya_chan_avg_field_5_lsrk_pol_xx.ms', regridms=True, outframe='LSRK', datacolumn='data', correlation='XX', field='5', nchan=7)```and then convert_ms in cngi.conversion```pythoninfile = 'sis14_twhya_chan_avg_field_5_lsrk_pol_xx.ms'outfile = 'sis14_twhya_chan_avg_field_5_lsrk_pol_xx.vis.zarr'chunk_shape=(270, 210, 1, 1)convert_ms(infile, outfile=outfile, chunk_shape=chunk_shape)```The conversion to 'LSRK' is necessary because cngi does not currently have an implementation and tclean does a conversion to 'LSRK' before imaging.To check the ngcasa imaging results the casa_twhya_standard_gridder_lsrk_cube_natural.img.zarr dataset is used. This dataset was generated by running ```tclean``` in CASA```pythontclean(vis='sis14_twhya_chan_avg_field_5_lsrk_pol_xx.ms', imagename='twhya_standard_gridder_lsrk_cube_natural', specmode='cube', deconvolver='hogbom', imsize=[200,400], cell=['0.08arcsec'], weighting='natural', threshold='0mJy', niter=0,stokes='XX')```and then ```image_ms``` in cngi.conversion```pythoninfile = 'cube_image/twhya_standard_gridder_lsrk_cube_natural.image'outfile = 'casa_twhya_standard_gridder_lsrk_cube_natural.img.zarr'convert_image(infile=infile,outfile=outfile)``` ###Code import xarray as xr from cngi.dio import read_vis, read_image xr.set_options(display_style="html") vis_dataset = read_vis("sis14_twhya_chan_avg_field_5_lsrk_pol_xx.vis.zarr", 0) vis_dataset casa_image_dataset = read_image("casa_twhya_standard_gridder_lsrk_cube_natural.img.zarr") casa_image_dataset ###Output _____no_output_____ ###Markdown Note that the chunks parameter in cngi and ngcasa functions specifies the size of a chunk and not the number of chunks (in CASA ```tclean``` chanchunks refers to the number of channel chunks).The dimensionality of the sis14_twhya_chan_avg_field_5_lsrk_pol_xx.vis.zarr dataset is (time:270,baseline:210,chan:7,pol:1) and a zarr chunk size of (time:270,baseline:210,chan:1,pol:1) was chosen. The dask chunk size was chosen to be the same as the zarr chunk size. For more information concerning chunking go to [here](https://cngi-prototype.readthedocs.io/en/latest/design.html). Flag Data and Create Imaging Weights The ```applyflags``` cngi.vis function sets all values that should be flagged to nan. The ngcasa.imaging code does not internally apply flags but does ignore nan values. [applyflags documentation](https://cngi-prototype.readthedocs.io/en/latest/_api/api/cngi.vis.applyflags.htmlcngi.vis.applyflags)The ```make_imaging_weight``` cngi.imaging function takes the WEIGHT or WEIGHT_SPECTRUM data variables and creates IMAGING_WEIGHT data variable that has dimensions time x baseline x chan x pol (matches the visibility DATA variable). Weighting schemes that are supported include natural, uniform, briggs, briggs_abs. Using imaging_weights_parms['chan_mode'] = 'cube' is equavalent to perchanweightdensity=True in CASA. [make_imaging_weight documentation](https://cngi-prototype.readthedocs.io/en/latest/_api/api/imaging.make_imaging_weight.htmlngcasa.imaging.make_imaging_weight)When ```storage_parms['to_disk']``` is False no execution will occur only a graph will be generated. ###Code from cngi.vis import applyflags from ngcasa.imaging import make_imaging_weight vis_dataset_flagged = applyflags(vis_dataset, flags=['FLAG', 'FLAG_ROW']) grid_parms = {} grid_parms['chan_mode'] = 'cube' grid_parms['image_size'] = [200,400] grid_parms['cell_size'] = [0.08,0.08] sel_parms = {} imaging_weights_parms = {} imaging_weights_parms['weighting'] = 'natural' storage_parms = {} storage_parms['to_disk'] = False vis_dataset_flagged = make_imaging_weight(vis_dataset_flagged, imaging_weights_parms, grid_parms, sel_parms, storage_parms) ###Output ######################### Start make_imaging_weights ######################### Setting default uvw to UVW Setting default data to DATA Setting default imaging_weight to IMAGING_WEIGHT Setting default graph_name to make_imaging_weights WEIGHT data variable found, will use WEIGHT to calculate IMAGING_WEIGHT ##################### Created graph for make_imaging_weights ##################### ###Markdown Create Dirty Cube ImageThe ```make_image``` cngi.imaging function grids the data (using the prolate spheroidal function as an anti-aliasing filter), fast Fourier transform the gridded data to an image and normalizes the image. The ```make_pb``` function currently supports rotationally symmetric airy disk primary beams.[make_pb documentation](https://cngi-prototype.readthedocs.io/en/latest/_api/api/imaging.make_pb.htmlngcasa.imaging.make_pb)[make_image documentation](https://cngi-prototype.readthedocs.io/en/latest/_api/api/imaging.make_image.html)To create an image of the execution graph the [dask.visualize](https://docs.dask.org/en/latest/api.htmldask.visualize) method can be used. By keeping ```storage_parms['to_disk'] ``` False the image_dataset returned by ```make_image``` will contain a graph for flagging, ```applyflags```, ```make_imaging_weight```, ```make_image``` and ```make_pb```.Changing the ```storage_parms['to_disk']``` to True will trigger a compute. ###Code from ngcasa.imaging import make_image from ngcasa.imaging import make_pb from cngi.dio import write_zarr import dask import xarray as xr image_dataset = xr.Dataset() storage_parms = {} #Create Dask execution graph storage_parms['to_disk'] = False image_dataset = make_image(vis_dataset_flagged,image_dataset,grid_parms,sel_parms,storage_parms) make_pb_parms = {} make_pb_parms['function'] = 'alma_airy' make_pb_parms['list_dish_diameters'] = [10.7] make_pb_parms['list_blockage_diameters'] = [0.75] make_pb_parms['pb'] = 'PB' image_dataset = make_pb(image_dataset,make_pb_parms, grid_parms, sel_parms, storage_parms) dask.visualize(image_dataset,filename='cube_image_graph.png') #Trigger compute on graph and store result on disk image_dataset = write_zarr(image_dataset, outfile='twhya_standard_gridder_lsrk_cube_natural.img.zarr', graph_name='make_imaging_weights, make_image and make_pb') image_dataset ###Output ######################### Start make_image ######################### Setting default uvw to UVW Setting default data to DATA Setting default imaging_weight to IMAGING_WEIGHT Setting default sum_weight to SUM_WEIGHT Setting default image to IMAGE Setting default pb to PB Setting default weight_pb to WEIGHT_PB Setting default image_center to [100 200] Setting default fft_padding to 1.2 Setting default graph_name to make_image ##################### Created graph for make_image ##################### Setting default pb to PB Setting default pb_name to PB Setting default graph_name to make_pb Setting default image_center to [100 200] Setting default fft_padding to 1.2 ##################### Created graph for make_pb ##################### Time to store and execute graph make_imaging_weights, make_image and make_pb 2.42010235786438 ###Markdown Dask VisualizationThe Dask execution graph below shows how the images for each channel are computed in parallel. Each image is written to disk independently and Dask along with Zarr handles the virtual concatenation (the resulting img.zarr is chunked by channel). This allows for processing cubes that are larger than memory.![title1](https://raw.githubusercontent.com/casangi/cngi_prototype/master/docs/prototypes/cube_image_graph.png) Plot and Compare With CASA ###Code import matplotlib.pylab as plt import numpy as np from ipywidgets import interactive def comparison_plots(chan): print('Frequency',image_dataset.chan[chan].values, 'Hz') dirty_image = image_dataset.IMAGE[:,:,chan,0] casa_dirty_image = casa_image_dataset['RESIDUAL'].values[:, :, chan, 0] fig0, ax0 = plt.subplots(1, 2, sharey=True) im0 = ax0[0].imshow(casa_dirty_image) im1 = ax0[1].imshow(dirty_image) ax0[0].title.set_text('CASA Dirty Image') ax0[1].title.set_text('CNGI Dirty Image') fig0.colorbar(im0, ax=ax0[0], fraction=0.046, pad=0.04) fig0.colorbar(im1, ax=ax0[1], fraction=0.046, pad=0.04) plt.show() plt.figure() plt.imshow(casa_dirty_image - dirty_image) plt.title('Difference Dirty Image') plt.colorbar() plt.show() dirty_image = dirty_image / np.max(np.abs(dirty_image)) casa_dirty_image = casa_dirty_image / np.max(np.abs(casa_dirty_image)) # Calculate max error max_error_dirty_image = np.max(np.abs(dirty_image - casa_dirty_image)).values print('Max Error',max_error_dirty_image) # Calculate root mean square error rms_error_dirty_image = np.linalg.norm(dirty_image - casa_dirty_image, 'fro') print('RMS Error',rms_error_dirty_image) # When running the notebook uncomment the code below to get a slider # interactive_plot = interactive(comparison_plots, chan=(0, 6)) # output = interactive_plot.children[-1] # output.layout.height = '550px' # interactive_plot comparison_plots(3) ###Output Frequency 372521853594.1145 Hz ###Markdown The first channel (channel 0) is flagged by both ngCASA and CASA. Why CASA is flagging the last channel and ngCASA is not needs further investigation. Checking sis14_twhya_chan_avg_field_5_lsrk_pol_xx.ms with browsetable in CASA shows that only the first channel is flagged.The reason for the small difference between ngCASA and CASA, in channels 1 to 5, is due to ngCASA using a different implementation of the Fast Fourier Transform. ###Code import matplotlib.pylab as plt import numpy as np from ipywidgets import interactive #### Primary Beam Corrected Images #### def comparison_plots(chan): print('Frequency',image_dataset.chan[chan].values, 'Hz') pb_limit = 0.2 primary_beam = image_dataset.PB[100,:,chan,0,0].where(image_dataset.PB[100,:,chan,0,0] > pb_limit,other=0.0) dirty_image_pb_cor = image_dataset.IMAGE[:,:,chan,0]/image_dataset.PB[:,:,chan,0,0] dirty_image_pb_cor = dirty_image_pb_cor.where(image_dataset.PB[:,:,chan,0,0] > pb_limit,other=np.nan) casa_primary_beam = casa_image_dataset['PB'][100, :, chan, 0] #Primary beam created by CASA casa_dirty_image_pb_cor = (casa_image_dataset['IMAGE.PBCOR'][:, :, chan, 0]).where(casa_image_dataset['PB'][:, :, chan, 0] > pb_limit,other=np.nan) #Image created by CASA #Plot Primary Beams fig0, ax0, = plt.subplots(1, 2, sharey=True,figsize=(10, 5)) im0 = ax0[0].plot(casa_primary_beam) im1 = ax0[1].plot(primary_beam) ax0[0].title.set_text('CASA Primary Beam Cross Section') ax0[1].title.set_text('ngCASA Primary Beam Cross Section') plt.show() plt.figure() plt.plot(casa_primary_beam-primary_beam) plt.title('Difference Primary Beam') plt.show() #Plotting Images fig0, ax0 = plt.subplots(1, 2, sharey=True,figsize=(10, 5)) im0 = ax0[0].imshow(casa_dirty_image_pb_cor) im1 = ax0[1].imshow(dirty_image_pb_cor) ax0[0].title.set_text('CASA PB Corrected Dirty Image') ax0[1].title.set_text('ngCASA PB Corrected Dirty Image') fig0.colorbar(im0, ax=ax0[0], fraction=0.046, pad=0.04) fig0.colorbar(im1, ax=ax0[1], fraction=0.046, pad=0.04) plt.show() plt.figure() plt.imshow(casa_dirty_image_pb_cor - dirty_image_pb_cor) plt.title('Difference Dirty Image') plt.colorbar() plt.show() dirty_image_pb_cor = dirty_image_pb_cor / np.nanmax(np.abs(dirty_image_pb_cor)) casa_dirty_image_pb_cor = casa_dirty_image_pb_cor / np.nanmax(np.abs(casa_dirty_image_pb_cor)) norm_diff_image_pb_cor = dirty_image_pb_cor - casa_dirty_image_pb_cor # Calculate max error max_error_dirty_image = np.nanmax(np.abs(norm_diff_image_pb_cor)) print('Max Normalized Error',max_error_dirty_image) # Calculate root mean square error rms_error_dirty_image = np.sqrt(np.nansum(np.square(norm_diff_image_pb_cor))) print('RMS Normalized Error',rms_error_dirty_image) interactive_plot = interactive(comparison_plots, chan=(0, 6)) output = interactive_plot.children[-1] output.layout.height = '1100px' interactive_plot #comparison_plots(3) ###Output _____no_output_____
Irirs_kedro/get-started/notebooks/BERT_extensions_multi_lingual.ipynb
###Markdown Imports ###Code !pip install transformers==3.0.0 !pip install emoji import gc import os import emoji as emoji import re import string import numpy as np import pandas as pd import torch import torch.nn as nn from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, accuracy_score from transformers import AutoModel from transformers import BertModel, BertTokenizer import copy from sklearn.preprocessing import LabelEncoder import warnings warnings.filterwarnings('ignore') !git clone https://github.com/suman101112/hasoc-fire-2020 ###Output fatal: destination path 'hasoc-fire-2020' already exists and is not an empty directory. ###Markdown Read and prepare data ###Code data_en = pd.read_excel("/content/hasoc-fire-2020/2020/hasoc_2020_en_train_new_a.xlsx") data_en_test = pd.read_csv("/content/hasoc-fire-2020/2020/english_test_1509.csv") data_en['language'] = 0 data_en_test['language'] = 0 data_de = pd.read_excel("/content/hasoc-fire-2020/2020/hasoc_2020_de_train_new_a.xlsx") data_de_test = pd.read_csv("/content/hasoc-fire-2020/2020/german_test_1509.csv") data_de['language'] = 1 data_de_test['language'] = 1 data_hi = pd.read_excel("/content/hasoc-fire-2020/2020/hasoc_2020_hi_train_a.xlsx") data_hi_test = pd.read_csv("/content/hasoc-fire-2020/2020/hindi_test_1509.csv") data_hi['language'] = 2 data_hi_test['language'] = 2 data = copy.deepcopy(data_en) data = data.append(data_de, ignore_index=True) data = data.append(data_hi, ignore_index=True) data_test = copy.deepcopy(data_en_test) data_test = data_test.append(data_de_test, ignore_index=True) data_test = data_test.append(data_hi_test, ignore_index=True) labels = data[['task1', 'task2', 'language']] le = LabelEncoder() labels['task1'] = le.fit_transform(labels['task1']) le = LabelEncoder() labels['task2'] = le.fit_transform(labels['task2']) labels_test = data_test[['task1', 'task2', 'language']] le = LabelEncoder() labels_test['task1'] = le.fit_transform(labels_test['task1']) le = LabelEncoder() labels_test['task2'] = le.fit_transform(labels_test['task2']) data.head() data = data.drop(columns=['tweet_id','task1', 'task2','language','ID']) X_train, X_val, y_train, y_val = train_test_split(data, labels, train_size=0.85, shuffle=True, random_state=2045) train_set = X_train['text'].to_list() train_labels1 = y_train['task1'].to_list() train_labels2 = y_train['task2'].to_list() train_langs = y_train['language'].to_list() val_set = X_val['text'].to_list() val_labels1 = y_val['task1'].to_list() val_labels2 = y_val['task2'].to_list() val_langs = y_val['language'].to_list() test_set = data_test['text'].to_list() test_labels1 = labels_test['task1'].to_list() test_labels2 = labels_test['task2'].to_list() test_langs = labels_test['language'].to_list() ###Output _____no_output_____ ###Markdown Utility functions ###Code def pre_process_dataset(values): new_values = list() # Emoticons emoticons = [':-)', ':)', '(:', '(-:', ':))', '((:', ':-D', ':D', 'X-D', 'XD', 'xD', 'xD', '<3', '</3', ':\*', ';-)', ';)', ';-D', ';D', '(;', '(-;', ':-(', ':(', '(:', '(-:', ':,(', ':\'(', ':"(', ':((', ':D', '=D', '=)', '(=', '=(', ')=', '=-O', 'O-=', ':o', 'o:', 'O:', 'O:', ':-o', 'o-:', ':P', ':p', ':S', ':s', ':@', ':>', ':<', '^_^', '^.^', '>.>', 'T_T', 'T-T', '-.-', '*.*', '~.~', ':*', ':-*', 'xP', 'XP', 'XP', 'Xp', ':-|', ':->', ':-<', '$_$', '8-)', ':-P', ':-p', '=P', '=p', ':*)', '*-*', 'B-)', 'O.o', 'X-(', ')-X'] for value in values: # Remove dots text = value.replace(".", "").lower() text = re.sub(r"[^a-zA-Z?.!,¿]+", " ", text) users = re.findall("[@]\w+", text) for user in users: text = text.replace(user, "<user>") urls = re.findall(r'(https?://[^\s]+)', text) if len(urls) != 0: for url in urls: text = text.replace(url, "<url >") for emo in text: if emo in emoji.UNICODE_EMOJI: text = text.replace(emo, "<emoticon >") for emo in emoticons: text = text.replace(emo, "<emoticon >") numbers = re.findall('[0-9]+', text) for number in numbers: text = text.replace(number, "<number >") text = text.replace('#', "<hashtag >") text = re.sub(r"([?.!,¿])", r" ", text) text = "".join(l for l in text if l not in string.punctuation) text = re.sub(r'[" "]+', " ", text) new_values.append(text) return new_values def data_process(data, labels): input_ids = [] attention_masks = [] bert_tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") for sentence in data: bert_inp = bert_tokenizer.__call__(sentence, max_length=64, padding='max_length', pad_to_max_length=True, truncation=True, return_token_type_ids=False) input_ids.append(bert_inp['input_ids']) attention_masks.append(bert_inp['attention_mask']) #del bert_tokenizer #gc.collect() #torch.cuda.empty_cache() input_ids = np.asarray(input_ids) attention_masks = np.array(attention_masks) labels = np.array(labels) return input_ids, attention_masks, labels ###Output _____no_output_____ ###Markdown Model ###Code class BERT_Arch(nn.Module): def __init__(self, bert, n_classes, mode='cnn'): super(BERT_Arch, self).__init__() self.bert = BertModel.from_pretrained('bert-base-multilingual-cased') self.n_classes = n_classes self.mode = mode if mode == 'cnn': # CNN self.conv = nn.Conv2d(in_channels=13, out_channels=13, kernel_size=(3, 768), padding='valid') self.relu = nn.ReLU() # change the kernel size either to (3,1), e.g. 1D max pooling # or remove it altogether self.pool = nn.MaxPool2d(kernel_size=(3, 1), stride=1) self.dropout = nn.Dropout(0.1) # be careful here, this needs to be changed according to your max pooling # without pooling: 443, with 3x1 pooling: 416 # Size after conv = BERT max length - 3 + 1 # Size after pool = Size after conv - 3 + 1 # (BERT max length - 3 + 1) - 3 + 1 == BERT max length - 4 # (kernel_size * (BERT max length - 4), num. classes) # IN THIS CASE MAX LENGTH IS SET TO 64 # FC self.fc = nn.Linear(13 * (64 - 4), self.n_classes).to(device) self.flat = nn.Flatten() elif mode == 'rnn': ### RNN self.lstm = nn.LSTM(768, 256, batch_first=True, bidirectional=True) ## FC self.fc = nn.Linear(256*2, self.n_classes) elif mode == 'shallow_fc': self.fc = nn.Linear(768, self.n_classes) elif mode == 'deep_fc': self.leaky_relu = nn.LeakyReLU() self.fc1 = nn.Linear(768, 768) self.fc2 = nn.Linear(768, 768) self.fc3 = nn.Linear(768, self.n_classes) else: raise NotImplementedError("Unsupported extension!") self.softmax = nn.LogSoftmax(dim=1) def forward(self, sent_id, mask): sequence_output, _, all_layers = self.bert(sent_id, attention_mask=mask, output_hidden_states=True) if self.mode == 'cnn': x = torch.transpose(torch.cat(tuple([t.unsqueeze(0) for t in all_layers]), 0), 0, 1) x = self.pool(self.dropout(self.relu(self.conv(self.dropout(x))))) x = self.fc(self.dropout(self.flat(self.dropout(x)))) elif self.mode == 'rnn': lstm_output, (h,c) = self.lstm(sequence_output) hidden = torch.cat((lstm_output[:,-1, :256],lstm_output[:,0, 256:]),dim=-1) x = self.fc(hidden.view(-1,256*2)) elif self.mode == 'shallow_fc': x = self.fc(sequence_output[:,0,:]) elif self.mode == 'deep_fc': x = self.fc1(sequence_output[:,0,:]) x = self.leaky_relu(x) x = self.fc2(x) x = self.leaky_relu(x) x = self.fc3(x) else: raise NotImplementedError("Unsupported extension!") gc.collect() torch.cuda.empty_cache() del all_layers c = self.softmax(x) return c ###Output _____no_output_____ ###Markdown Train ###Code # function to train the model def train(): model.train() total_loss, total_accuracy = 0, 0 # empty list to save model predictions total_preds = [] # iterate over batches total = len(train_dataloader) for i, batch in enumerate(train_dataloader): step = i+1 percent = "{0:.2f}".format(100 * (step / float(total))) lossp = "{0:.2f}".format(total_loss/(total*batch_size)) filledLength = int(100 * step // total) bar = '█' * filledLength + '>' *(filledLength < 100) + '.' * (99 - filledLength) print(f'\rBatch {step}/{total} |{bar}| {percent}% complete, loss={lossp}, accuracy={total_accuracy}', end='') # push the batch to gpu batch = [r.to(device) for r in batch] sent_id, mask, labels = batch del batch gc.collect() torch.cuda.empty_cache() # clear previously calculated gradients model.zero_grad() # get model predictions for the current batch #sent_id = torch.tensor(sent_id).to(device).long() preds = model(sent_id, mask) # compute the loss between actual and predicted values loss = cross_entropy(preds, labels) # add on to the total loss total_loss += float(loss.item()) # backward pass to calculate the gradients loss.backward() # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # update parameters optimizer.step() # model predictions are stored on GPU. So, push it to CPU #preds = preds.detach().cpu().numpy() # append the model predictions #total_preds.append(preds) total_preds.append(preds.detach().cpu().numpy()) gc.collect() torch.cuda.empty_cache() # compute the training loss of the epoch avg_loss = total_loss / (len(train_dataloader)*batch_size) # predictions are in the form of (no. of batches, size of batch, no. of classes). # reshape the predictions in form of (number of samples, no. of classes) total_preds = np.concatenate(total_preds, axis=0) # returns the loss and predictions return avg_loss, total_preds # function for evaluating the model def evaluate(): print("\n\nEvaluating...") # deactivate dropout layers model.eval() total_loss, total_accuracy = 0, 0 # empty list to save the model predictions total_preds = [] # iterate over batches total = len(val_dataloader) for i, batch in enumerate(val_dataloader): step = i+1 percent = "{0:.2f}".format(100 * (step / float(total))) lossp = "{0:.2f}".format(total_loss/(total*batch_size)) filledLength = int(100 * step // total) bar = '█' * filledLength + '>' * (filledLength < 100) + '.' * (99 - filledLength) print(f'\rBatch {step}/{total} |{bar}| {percent}% complete, loss={lossp}, accuracy={total_accuracy}', end='') # push the batch to gpu batch = [t.to(device) for t in batch] sent_id, mask, labels = batch del batch gc.collect() torch.cuda.empty_cache() # deactivate autograd with torch.no_grad(): # model predictions preds = model(sent_id, mask) # compute the validation loss between actual and predicted values loss = cross_entropy(preds, labels) total_loss += float(loss.item()) #preds = preds.detach().cpu().numpy() #total_preds.append(preds) total_preds.append(preds.detach().cpu().numpy()) gc.collect() torch.cuda.empty_cache() # compute the validation loss of the epoch avg_loss = total_loss / (len(val_dataloader)*batch_size) # reshape the predictions in form of (number of samples, no. of classes) total_preds = np.concatenate(total_preds, axis=0) return avg_loss, total_preds ### Extension mode MODE = 'rnn' pre_pro_train_data = pre_process_dataset(train_set) pre_pro_val_data = pre_process_dataset(val_set) pre_pro_test_data = pre_process_dataset(test_set) train_input_ids, train_attention_masks, train_labels = data_process(pre_pro_train_data,train_labels1) val_input_ids, val_attention_masks, val_labels = data_process(pre_pro_val_data,val_labels1) test_input_ids, test_attention_masks, test_labels = data_process(pre_pro_test_data,test_labels1) # Specify the GPU # Setting up the device for GPU usage device = 'cuda' if torch.cuda.is_available() else 'cpu' print(device) train_count = len(train_labels) test_count = len(test_labels) val_count = len(val_labels) # ~~~~~~~~~~~~~~~~~~~~~ Import BERT Model and BERT Tokenizer ~~~~~~~~~~~~~~~~~~~~~# # import BERT-base pretrained model bert = AutoModel.from_pretrained('bert-base-multilingual-cased') # bert = AutoModel.from_pretrained('bert-base-uncased') # Load the BERT tokenizer #tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased') # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Tokenization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # for train set train_seq = torch.tensor(train_input_ids.tolist()) train_mask = torch.tensor(train_attention_masks.tolist()) train_y = torch.tensor(train_labels.tolist()) # for validation set val_seq = torch.tensor(val_input_ids.tolist()) val_mask = torch.tensor(val_attention_masks.tolist()) val_y = torch.tensor(val_labels.tolist()) # for test set test_seq = torch.tensor(test_input_ids.tolist()) test_mask = torch.tensor(test_attention_masks.tolist()) test_y = torch.tensor(test_labels.tolist()) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create DataLoaders ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler # define a batch size batch_size = 32 # wrap tensors train_data = TensorDataset(train_seq, train_mask, train_y) # sampler for sampling the data during training train_sampler = RandomSampler(train_data) # dataLoader for train set train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) # wrap tensors val_data = TensorDataset(val_seq, val_mask, val_y) # sampler for sampling the data during training val_sampler = SequentialSampler(val_data) # dataLoader for validation set val_dataloader = DataLoader(val_data, sampler=val_sampler, batch_size=batch_size) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Freeze BERT Parameters ~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # freeze all the parameters for param in bert.parameters(): param.requires_grad = False # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # pass the pre-trained BERT to our define architecture model = BERT_Arch(bert, n_classes=2, mode=MODE) # push the model to GPU model = model.to(device) # optimizer from hugging face transformers from transformers import AdamW # define the optimizer optimizer = AdamW(model.parameters(), lr=2e-5) #from sklearn.utils.class_weight import compute_class_weight # compute the class weights #class_wts = compute_class_weight('balanced', np.unique(train_labels), train_labels) #print(class_wts) # convert class weights to tensor #weights = torch.tensor(class_wts, dtype=torch.float) #weights = weights.to(device) # loss function #cross_entropy = nn.NLLLoss(weight=weights) cross_entropy = nn.NLLLoss() # set initial loss to infinite best_valid_loss = float('inf') # empty lists to store training and validation loss of each epoch #train_losses = [] #valid_losses = [] #if os.path.isfile("/content/drive/MyDrive/saved_weights.pth") == False: #if os.path.isfile("saved_weights.pth") == False: # number of training epochs epochs = 3 current = 1 # for each epoch while current <= epochs: print(f'\nEpoch {current} / {epochs}:') # train model train_loss, _ = train() # evaluate model valid_loss, _ = evaluate() # save the best model if valid_loss < best_valid_loss: best_valid_loss = valid_loss #torch.save(model.state_dict(), 'saved_weights.pth') # append training and validation loss #train_losses.append(train_loss) #valid_losses.append(valid_loss) print(f'\n\nTraining Loss: {train_loss:.3f}') print(f'Validation Loss: {valid_loss:.3f}') current = current + 1 #else: #print("Got weights!") # load weights of best model #model.load_state_dict(torch.load("saved_weights.pth")) #model.load_state_dict(torch.load("/content/drive/MyDrive/saved_weights.pth"), strict=False) ###Output Epoch 1 / 3: Batch 241/241 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.02, accuracy=0 Evaluating... Batch 43/43 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0 Training Loss: 0.015 Validation Loss: 0.013 Epoch 2 / 3: Batch 241/241 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0 Evaluating... Batch 43/43 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0 Training Loss: 0.013 Validation Loss: 0.013 Epoch 3 / 3: Batch 241/241 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0 Evaluating... Batch 43/43 |████████████████████████████████████████████████████████████████████████████████████████████████████| 100.00% complete, loss=0.01, accuracy=0 Training Loss: 0.011 Validation Loss: 0.013 ###Markdown Test ###Code # get predictions for test data gc.collect() torch.cuda.empty_cache() with torch.no_grad(): preds = model(test_seq.to(device), test_mask.to(device)) #preds = model(test_seq, test_mask) preds = preds.detach().cpu().numpy() print("Performance:") # model's performance preds = np.argmax(preds, axis=1) print('Classification Report') print(classification_report(test_y, preds)) print("Accuracy: " + str(accuracy_score(test_y, preds))) ###Output Performance: Classification Report precision recall f1-score support 0 0.80 0.65 0.72 754 1 0.81 0.90 0.85 1249 accuracy 0.81 2003 macro avg 0.80 0.78 0.78 2003 weighted avg 0.81 0.81 0.80 2003 Accuracy: 0.8067898152770844 ###Markdown Post-processing ###Code ### Analyze the results by language ###Output _____no_output_____
papers/Lavoie_and_Godley_2002/Lavoie and Godley (2002).ipynb
###Markdown Kaleckian models of growth in a coherent stock-flow monetary framework: a Kaldorian view ###Code from pysolve3.model import Model from pysolve3.utils import SFCTable, AddGrowth, ShockModel, SolveSFC import numpy as np import matplotlib.pyplot as plt import seaborn as sns import pandas as pd %matplotlib inline %config InlineBackend.figure_format = 'retina' def create_model(): model = Model() # Accounting model.set_var_default(0) ################################################################## # Variables ################################################################## model.var('Yrh', desc='Regular income of households') model.var('Ws', desc = 'Wages') model.var('FD', desc='Distributed dividends') model.var('Md', desc='Demand for deposits') model.var('Ms', desc='Deposits supply') model.var('Cd', desc='Consumption demand') model.var('V', desc='Household wealth') model.var('CG', desc='Capital Gains') model.var('q', desc='Tobin’s q ratio') model.var('l', desc='debt-to-capital ratio of the firms') model.var('Ld', desc='Demand for Loans') model.var('rcf', desc='ratio of retained earnings to capital') model.var('FU', desc='Undistributed Profits') model.var('Ydre', desc='Expected Yhr') model.var('Ve', desc='Expected V') model.var('CGe', desc='Expected Capital Gains') ################################################################## # Parameters ################################################################## model.param('rm', desc='Deposits rate', default=0.05) model.param('alpha_1', desc='Marginal propensity to consume out of Yhr', default=0.8) model.param('a', desc='', default=1.2) ################################################################## # Equations ################################################################## model.add('Yhr = Ws + FD + rm*Md(-1)') # Eq 1 model.add('V= Yhr - Cd + G') # Eq 4 model.add('q = V/K') # Eq 18 model.add('q = Ld/K') # Eq 19 model.add('rcf = FU/K(-1)') # Eq 20 model.add('Cd = alpha_1*Yhre + (\alpha_1/a)*CG(-1)') # Eq 25 model.add('Ve = V(-1) + Yhre + CGe - Cd') # Eq 32 return model ###Output _____no_output_____
ejercicios/02/04_iteration.ipynb
###Markdown IterationOne of the most basic operations in programming is iterating over a list of elements to perform some kind of operation.In python we use the `for` statement to iterate. It is easier to use than the same statement in C, C++ or FORTRAN because instead of running over a integer index it takes as an input any iterable object and runs over it.Let's see some examples Iterating over listsLists are ordered. Iteration is done in the same order as the input list. ###Code a = [4,5,6,8,10] for i in a: print(i) # A fragment of `One Hundred Years of Solitude` GGM = 'Many years later, as he faced the firing squad, \ Colonel Aureliano Buendía was to remember that dist \ ant afternoon when his father took him to discover ice. \ At that time Macondo was a village of twenty adobe houses,\ built on the bank of a river of clear water that ran along \ a bed of polished stones, which were white and enormous,\ like prehistoric eggs.' print(GGM) dot = GGM.split() # we create a list where each element is a word print(dot) for i in dot: print(i) ###Output _____no_output_____ ###Markdown Iterating over dictionariesDictionaries are not ordered. Iterating over them does not have to produce an order sequence. ###Code a = {} # empty dictionary a[1] = 'one' a[2] = 'two' a[3] = 'three' a[4] = 'four' a[5] = 'five' print(a) for k in a.keys(): # iterate over the keys print(a[k]) for v in a.values(): #iterate over the values print(v) ###Output _____no_output_____ ###Markdown Iterating over a sequenceThe function `range()` is useful to generate a sequence of integers that can be used to iterate. ###Code print(range(10)) # range itself returns an iterable object a = list(range(10)) # this translates that iterable object into a list print(a) # be careful! the lists has 10 objects starting with 0 for i in range(10): # if you given a single argument the iterations starts at 0. print(i) for i in range(4,10): # you can algo give two arguments: range(start, end). print(i) for i in range(0,10,3): # if you give three arguments they are interpreted as range(start, end, step) print(i) ###Output _____no_output_____
CTR Prediction/Exploration+DeepFM.ipynb
###Markdown Imports and Downloads ###Code # kaggle imports """ !pip uninstall -y kaggle !pip install --upgrade pip !pip install kaggle==1.5.12 """ # global libraries import matplotlib.pyplot as plt import time # install deepctr !pip install deepctr==0.8.5 # tensorflow nightly !pip install tf-nightly # relevant libraries import pandas as pd import numpy as np from sklearn.metrics import log_loss, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler from tqdm import tqdm import os from deepctr.models import * from deepctr.feature_column import SparseFeat, DenseFeat, get_feature_names import random import tensorflow as tf from tensorflow.keras.callbacks import EarlyStopping from collections.abc import Iterable # num of CPUs import multiprocessing from multiprocessing import Pool multiprocessing.cpu_count() ###Output _____no_output_____ ###Markdown Kaggle Dataset and Google Colab ###Code # upload kaggle json permissions """ from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) # Then move kaggle.json into the folder where the API expects to find it. !mkdir -p ~/.kaggle/ && mv kaggle.json ~/.kaggle/ && chmod 600 ~/.kaggle/kaggle.json """ # google colab from google.colab import drive drive.mount('/content/drive') # download kaggle dataset #!kaggle competitions download -c bgu-recsys-ctr-prediction -p drive/MyDrive/RecSysKaggle/ #!unzip bgu-recsys-ctr-prediction.zip # change directory to dataset dir %cd drive/MyDrive/RecSysKaggle/ # all files names INPUT_DATA_DIR = "train_data" files = [] for (dirpath, dirnames, filenames) in os.walk(INPUT_DATA_DIR): if filenames: for filename in filenames: files.append(os.path.join(dirpath, filename)) ###Output _____no_output_____ ###Markdown Memory Usage ###Code # print memory usage of objects def mem_usage(pandas_obj): # if it's Df if isinstance(pandas_obj, pd.DataFrame): usage_b = pandas_obj.memory_usage(deep=True).sum() #if not a df it's a series else: usage_b = pandas_obj.memory_usage(deep=True) usage_mb = usage_b / 1024 ** 2 # convert bytes to megabytes print("{:03.2f} MB".format(usage_mb)) # Optimize memory usage - transform object columns into int, # float and object in order to reduce memory usage def optimize_df(df): optimized_df = df.copy() columns_float = df.select_dtypes(include=['float']) converted_float = columns_float.apply(pd.to_numeric,downcast='float') optimized_df[converted_float.columns] = converted_float columns_int = df.select_dtypes(include=['int']) converted_int = columns_int.apply(pd.to_numeric, downcast='unsigned') optimized_df[converted_int.columns] = converted_int columns_obj = df.select_dtypes(include=['object']).copy() converted_obj = pd.DataFrame() for col in columns_obj.columns: converted_obj.loc[:,col] = columns_obj[col] optimized_df[converted_obj.columns] = converted_obj return optimized_df ###Output _____no_output_____ ###Markdown Preparation ###Code # load and optimize Df def load_optimize_df(csv_path): rca_df = pd.read_csv(csv_path) optimized_df = optimize_df(rca_df) return optimized_df # set sparse, dense and target deatures for the DeepFM model sparse_features = ['target_id_hash', 'syndicator_id_hash', 'campaign_id_hash', 'target_item_taxonomy', 'placement_id_hash', 'publisher_id_hash', 'source_id_hash', 'source_item_type', 'browser_platform', 'country_code', 'region', 'day_of_week','time_of_day', "os_family"] dense_features = ['empiric_calibrated_recs', 'empiric_clicks', 'user_recs', 'user_clicks', 'user_target_recs'] target = ['is_click'] ###Output _____no_output_____ ###Markdown Load Data ###Code data = pd.DataFrame(columns=['page_view_start_time', 'user_id_hash', 'target_id_hash', 'syndicator_id_hash', 'campaign_id_hash', 'empiric_calibrated_recs', 'empiric_clicks', 'target_item_taxonomy', 'placement_id_hash', 'user_recs', 'user_clicks', 'user_target_recs', 'publisher_id_hash', 'source_id_hash', 'source_item_type', 'browser_platform', 'os_family', 'country_code', 'region', 'day_of_week', 'time_of_day', 'gmt_offset', 'is_click']) # read random 10 files into memory rand_files = random.choices(files, k=10) for f in rand_files: data = data.append(load_optimize_df(f), ignore_index=True) # Read test data into memory basepath_test = "test_file.csv" rca_df_test = load_optimize_df(basepath_test) ###Output _____no_output_____ ###Markdown Exploration ###Code # find variables type and memory usage data.info(memory_usage='deep') # describe numeric values (dense) data.describe() # describe categorical values (sparse) data.describe(include=object) # Explore num of clicks as function of time of day imp = data.groupby(['time_of_day', 'is_click']).size().unstack() plt.figure() imp.plot(kind='bar', title="Day hours", figsize=(16,8)) plt.ylabel("count of clicks") plt.plot() # Explore num of clicks as function of days imp = data.groupby(['day_of_week', 'is_click']).size().unstack() plt.figure() imp.plot(kind='bar', title="Day", figsize=(16,8)) plt.ylabel("count of clicks") plt.plot() # Explore num of clicks as function of operating system imp = data.groupby(['os_family', 'is_click']).size().unstack() plt.figure() imp.plot(kind='bar', title="Os Family", figsize=(16,8)) plt.ylabel("count of clicks") plt.plot() # Explore num of clicks as function of browser platform imp = data.groupby(['browser_platform', 'is_click']).size().unstack() plt.figure() imp.plot(kind='bar', title="Browser platform", figsize=(16,8)) plt.ylabel("count of clicks") plt.plot() # Intersection users between test and train print(f'Users intersections : {len(set(rca_df_test["user_id_hash"].unique()).intersection(set(data["user_id_hash"].unique())))}') print(f'train : {len(set(data["user_id_hash"].unique()))}') print(f'test: {len(set(rca_df_test["user_id_hash"].unique()))}') ###Output Users intersections : 0 train : 3263525 test: 422613 ###Markdown As we can see, there are 0 intersections of users between train and test sets, So we decided to remove this column from the datasets. ###Code # Intersection item between test and train print(f'Target intersections : {len(set(rca_df_test["target_id_hash"].unique()).intersection(set(data["target_id_hash"].unique())))}') print(f'train : {len(set(data["target_id_hash"].unique()))}') print(f'test: {len(set(rca_df_test["target_id_hash"].unique()))}') ###Output Target intersections : 12086 train : 57887 test: 15303 ###Markdown Split train validation ###Code # take only records with targets that exist in test set data_filtered = data[data["target_id_hash"].isin(rca_df_test["target_id_hash"].unique())] data_filtered # Remove duplicate records data_filtered.drop_duplicates(keep='last', inplace=True) # split train and test train, val = train_test_split(data_filtered, train_size = 0.8) # Split feature columns from target X_train = train.iloc[:,:-1] X_val = val.iloc[:,:-1] X_test = rca_df_test.iloc[:,:-1] y_train = train["is_click"] y_val = val["is_click"] y_test = rca_df_test["Id"] ###Output _____no_output_____ ###Markdown Pre process ###Code # transform page_view_start_time into date - and take only the day because month and year same for each record (April 2020) def transform_unix_to_date(df): df["page_view_start_time_date"] = pd.to_datetime(df["page_view_start_time"], unit='ms') df["day"] = df["page_view_start_time_date"].dt.day df.drop(["page_view_start_time", "gmt_offset", "page_view_start_time_date"], axis=1,inplace= True) # removed because time of day already contains the gmt offset return df # remove userid because cold start problem def drop_userid(df): return df.drop(columns='user_id_hash') # fill nan values with defaults of Unknwon for sparse and 0 for dense def fill_na(df): df[sparse_features] = df[sparse_features].fillna('Unknown', ) df[dense_features] = df[dense_features].fillna(0, ) return df # run preprocess def run_preprocess(df): df = drop_userid(df) df = transform_unix_to_date(df) df = fill_na(df) return df X_train = run_preprocess(X_train) X_val = run_preprocess(X_val) X_test = run_preprocess(X_test) ###Output _____no_output_____ ###Markdown Encode features ###Code class CategoryEncoder(object): """ Once fit method is called, sklearn.preprocessing.LabelEncoder cannot encode new categories. In this category encoder, fit can be called any number times. It encodes categories which it has not seen before, without changing the encoding of existing categories. The first category has encoded value of one. value of zero belongs to Unknown key """ start = 1 def __init__(self, start=1): self.mapping = {} self.start = start def fit(self, l): if not isinstance(l, Iterable): l = [l] for o in l: if o not in self.mapping.keys(): self.mapping[o] = len(self.mapping.keys()) + self.start self.mapping["Unknown"] = 0 return self def transform(self, l): if isinstance(l, Iterable): return [self.mapping.get(o,0) for o in l] else: return self.mapping.get(l,0) def fit_transform(self,l): self.fit(l) return self.transform(l) # Scale dense features with MinMaxScaler - fit on train and trasnform train, test and validation mms = MinMaxScaler(feature_range=(0,1)) X_train[dense_features] = mms.fit_transform(X_train[dense_features]) X_val[dense_features] = mms.transform(X_val[dense_features]) X_test[dense_features] = mms.transform(X_test[dense_features]) # Encode sparse features with CategoryEncoder Class - fit on train and trasnform train, test and validation for feat in sparse_features: cae = CategoryEncoder() X_train[feat] = cae.fit_transform(X_train[feat]) X_val[feat] = cae.transform(X_val[feat]) X_test[feat] = cae.transform(X_test[feat]) # Number of unique values in sparse features for feat in sparse_features: print(f"{feat}: {len(X_train[feat].unique())}") # Show data X_train ###Output _____no_output_____ ###Markdown DeepFM Preprocessing ###Code # the firsts sparse vector for sprase features, the second for small categorcial features, and dense vector for dense features fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=X_train[feat].max()+1,embedding_dim=4) for feat in sparse_features] + [DenseFeat(feat, 1, ) for feat in dense_features] # both go to embedding and dnn dnn_feature_columns = fixlen_feature_columns linear_feature_columns = fixlen_feature_columns feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns) # Create train input for DeepFM input_train = {name:X_train[name] for name in feature_names} output_train = y_train.values # Create validation input for DeepFM input_val = {name:X_val[name] for name in feature_names} output_val = y_val.values # Create test input for DeepFM input_test = {name:X_test[name] for name in feature_names} ###Output _____no_output_____ ###Markdown Build Model ###Code # Define loss function. Although some loss functions are build-in functions and are available to use, this is an example of how you can create\define your own loss function. def root_mean_squared_error(y_true, y_pred): return K.sqrt(K.mean(K.square(y_pred - y_true))) # Build Model model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary', l2_reg_embedding=0.2) # Compile Model model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.BinaryCrossentropy(from_logits=False), metrics=[tf.keras.metrics.RootMeanSquaredError(), tf.keras.metrics.AUC()], ) # Train model with early stopping es = EarlyStopping(monitor='val_loss', patience=10) history = model.fit(input_train, output_train, batch_size=16000, epochs=10, verbose=1, validation_data=(input_val, output_val), callbacks=[es]) ###Output Epoch 1/10 145/145 [==============================] - 24s 142ms/step - loss: 4.2571 - root_mean_squared_error: 0.4639 - auc_4: 0.7855 - val_loss: 1.2322 - val_root_mean_squared_error: 0.4408 - val_auc_4: 0.8152 Epoch 2/10 145/145 [==============================] - 20s 140ms/step - loss: 0.7630 - root_mean_squared_error: 0.4290 - auc_4: 0.8217 - val_loss: 0.5655 - val_root_mean_squared_error: 0.4207 - val_auc_4: 0.8266 Epoch 3/10 145/145 [==============================] - 20s 139ms/step - loss: 0.5338 - root_mean_squared_error: 0.4155 - auc_4: 0.8306 - val_loss: 0.5195 - val_root_mean_squared_error: 0.4123 - val_auc_4: 0.8321 Epoch 4/10 145/145 [==============================] - 20s 139ms/step - loss: 0.5124 - root_mean_squared_error: 0.4093 - auc_4: 0.8351 - val_loss: 0.5094 - val_root_mean_squared_error: 0.4082 - val_auc_4: 0.8353 Epoch 5/10 145/145 [==============================] - 20s 140ms/step - loss: 0.5048 - root_mean_squared_error: 0.4060 - auc_4: 0.8380 - val_loss: 0.5040 - val_root_mean_squared_error: 0.4058 - val_auc_4: 0.8375 Epoch 6/10 145/145 [==============================] - 20s 141ms/step - loss: 0.5003 - root_mean_squared_error: 0.4040 - auc_4: 0.8399 - val_loss: 0.5006 - val_root_mean_squared_error: 0.4043 - val_auc_4: 0.8390 Epoch 7/10 145/145 [==============================] - 21s 145ms/step - loss: 0.4974 - root_mean_squared_error: 0.4027 - auc_4: 0.8413 - val_loss: 0.4984 - val_root_mean_squared_error: 0.4033 - val_auc_4: 0.8401 Epoch 8/10 145/145 [==============================] - 21s 142ms/step - loss: 0.4954 - root_mean_squared_error: 0.4018 - auc_4: 0.8424 - val_loss: 0.4968 - val_root_mean_squared_error: 0.4026 - val_auc_4: 0.8409 Epoch 9/10 145/145 [==============================] - 21s 142ms/step - loss: 0.4939 - root_mean_squared_error: 0.4011 - auc_4: 0.8432 - val_loss: 0.4957 - val_root_mean_squared_error: 0.4021 - val_auc_4: 0.8416 Epoch 10/10 145/145 [==============================] - 21s 141ms/step - loss: 0.4928 - root_mean_squared_error: 0.4006 - auc_4: 0.8438 - val_loss: 0.4948 - val_root_mean_squared_error: 0.4016 - val_auc_4: 0.8421 ###Markdown Test ###Code pred = model.predict(input_test) pred_df = pd.DataFrame(pred) pred_df.reset_index(inplace=True) pred_df.columns = ['Id','Predicted'] pred_df.to_csv('my_submission.csv',index=False) ###Output _____no_output_____
python_data_science_toolbox_part_2/3_bringing_it_all_together.ipynb
###Markdown Dictionaries for data scienceFor this exercise, you'll use what you've learned about the `zip()` function and combine two lists into a dictionary.These lists are actually extracted from a [bigger dataset file of world development indicators from the World Bank](http://data.worldbank.org/data-catalog/world-development-indicators). For pedagogical purposes, we have pre-processed this dataset into the lists that you'll be working with.The first list `feature_names` contains header names of the dataset and the second list `row_vals` contains actual values of a row from the dataset, corresponding to each of the header names.Instructions1. Create a zip object by calling `zip()` and passing to it `feature_names` and `row_vals`. Assign the result to `zipped_lists`.2. Create a dictionary from the `zipped_lists` zip object by calling `dict()` with `zipped_lists`. Assign the resulting dictionary to `rs_dict`. ###Code feature_names = ['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode', 'Year', 'Value'] row_vals = ['Arab World', 'ARB', 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'SP.ADO.TFRT', '1960', '133.56090740552298'] # Zip lists: zipped_lists zipped_lists = zip(feature_names, row_vals) # Create a dictionary: rs_dict rs_dict = dict(zipped_lists) # Print the dictionary print(rs_dict) ###Output {'CountryName': 'Arab World', 'CountryCode': 'ARB', 'IndicatorName': 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'IndicatorCode': 'SP.ADO.TFRT', 'Year': '1960', 'Value': '133.56090740552298'} ###Markdown Writing a function to help youSuppose you needed to repeat the same process done in the previous exercise to many, many rows of data. Rewriting your code again and again could become very tedious, repetitive, and unmaintainable.In this exercise, you will create a function to house the code you wrote earlier to make things easier and much more concise. Why? This way, you only need to call the function and supply the appropriate lists to create your dictionaries! Again, the lists `feature_names` and `row_vals` are preloaded and these contain the header names of the dataset and actual values of a row from the dataset, respectively.Instructions1. Define the function `lists2dict()` with two parameters: first is `list1` and second is `list2`.2. Return the resulting dictionary `rs_dict` in `lists2dict()`.3. Call the `lists2dict()` function with the arguments `feature_names` and `row_vals`. Assign the result of the function call to `rs_fxn`. ###Code # Define lists2dict() def lists2dict(list1, list2): """Return a dictionary where list1 provides the keys and list2 provides the values.""" # Zip lists: zipped_lists zipped_lists = zip(list1, list2) # Create a dictionary: rs_dict rs_dict = dict(zipped_lists) # Return the dictionary return rs_dict # Call lists2dict: rs_fxn rs_fxn = lists2dict(feature_names, row_vals) # Print rs_fxn print(rs_fxn) ###Output {'CountryName': 'Arab World', 'CountryCode': 'ARB', 'IndicatorName': 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'IndicatorCode': 'SP.ADO.TFRT', 'Year': '1960', 'Value': '133.56090740552298'} ###Markdown Using a list comprehensionThis time, you're going to use the `lists2dict()` function you defined in the last exercise to turn a bunch of lists into a list of dictionaries with the help of a list comprehension.The `lists2dict()` function has already been preloaded, together with a couple of lists, `feature_names` and `row_lists`. `feature_names` contains the header names of the World Bank dataset and `row_lists` is a list of lists, where each sublist is a list of actual values of a row from the dataset.Your goal is to use a list comprehension to generate a list of dicts, where the _keys_ are the header names and the _values_ are the row entries.Instructions1. Inspect the contents of `row_lists` by printing the first two lists in `row_lists`.2. Create a list comprehension that generates a dictionary using `lists2dict()` for each sublist in `row_lists`. The keys are from the `feature_names` list and the values are the row entries in `row_lists`. Use `sublist` as your iterator variable and assign the resulting list of dictionaries to `list_of_dicts`.3. Look at the first two dictionaries in `list_of_dicts` by printing them out. ###Code row_lists = [['Arab World', 'ARB', 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'SP.ADO.TFRT', '1960', '133.56090740552298'], ['Arab World', 'ARB', 'Age dependency ratio (% of working-age population)', 'SP.POP.DPND', '1960', '87.7976011532547'], ['Arab World', 'ARB', 'Age dependency ratio, old (% of working-age population)', 'SP.POP.DPND.OL', '1960', '6.634579191565161'], ['Arab World', 'ARB', 'Age dependency ratio, young (% of working-age population)', 'SP.POP.DPND.YG', '1960', '81.02332950839141'], ['Arab World', 'ARB', 'Arms exports (SIPRI trend indicator values)', 'MS.MIL.XPRT.KD', '1960', '3000000.0'], ['Arab World', 'ARB', 'Arms imports (SIPRI trend indicator values)', 'MS.MIL.MPRT.KD', '1960', '538000000.0'], ['Arab World', 'ARB', 'Birth rate, crude (per 1,000 people)', 'SP.DYN.CBRT.IN', '1960', '47.697888095096395'], ['Arab World', 'ARB', 'CO2 emissions (kt)', 'EN.ATM.CO2E.KT', '1960', '59563.9892169935'], ['Arab World', 'ARB', 'CO2 emissions (metric tons per capita)', 'EN.ATM.CO2E.PC', '1960', '0.6439635478877049'], ['Arab World', 'ARB', 'CO2 emissions from gaseous fuel consumption (% of total)', 'EN.ATM.CO2E.GF.ZS', '1960', '5.041291753975099'], ['Arab World', 'ARB', 'CO2 emissions from liquid fuel consumption (% of total)', 'EN.ATM.CO2E.LF.ZS', '1960', '84.8514729446567'], ['Arab World', 'ARB', 'CO2 emissions from liquid fuel consumption (kt)', 'EN.ATM.CO2E.LF.KT', '1960', '49541.707291032304'], ['Arab World', 'ARB', 'CO2 emissions from solid fuel consumption (% of total)', 'EN.ATM.CO2E.SF.ZS', '1960', '4.72698138789597'], ['Arab World', 'ARB', 'Death rate, crude (per 1,000 people)', 'SP.DYN.CDRT.IN', '1960', '19.7544519237187'], ['Arab World', 'ARB', 'Fertility rate, total (births per woman)', 'SP.DYN.TFRT.IN', '1960', '6.92402738655897'], ['Arab World', 'ARB', 'Fixed telephone subscriptions', 'IT.MLT.MAIN', '1960', '406833.0'], ['Arab World', 'ARB', 'Fixed telephone subscriptions (per 100 people)', 'IT.MLT.MAIN.P2', '1960', '0.6167005703199'], ['Arab World', 'ARB', 'Hospital beds (per 1,000 people)', 'SH.MED.BEDS.ZS', '1960', '1.9296220724398703'], ['Arab World', 'ARB', 'International migrant stock (% of population)', 'SM.POP.TOTL.ZS', '1960', '2.9906371279862403'], ['Arab World', 'ARB', 'International migrant stock, total', 'SM.POP.TOTL', '1960', '3324685.0']] # Print the first two lists in row_lists print(row_lists[0]) print(row_lists[1]) # Turn list of lists into list of dicts: list_of_dicts list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists] # Print the first two dictionaries in list_of_dicts print(list_of_dicts[0]) print(list_of_dicts[1]) ###Output ['Arab World', 'ARB', 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'SP.ADO.TFRT', '1960', '133.56090740552298'] ['Arab World', 'ARB', 'Age dependency ratio (% of working-age population)', 'SP.POP.DPND', '1960', '87.7976011532547'] {'CountryName': 'Arab World', 'CountryCode': 'ARB', 'IndicatorName': 'Adolescent fertility rate (births per 1,000 women ages 15-19)', 'IndicatorCode': 'SP.ADO.TFRT', 'Year': '1960', 'Value': '133.56090740552298'} {'CountryName': 'Arab World', 'CountryCode': 'ARB', 'IndicatorName': 'Age dependency ratio (% of working-age population)', 'IndicatorCode': 'SP.POP.DPND', 'Year': '1960', 'Value': '87.7976011532547'} ###Markdown Turning this all into a DataFrameYou've zipped lists together, created a function to house your code, and even used the function in a list comprehension to generate a list of dictionaries. That was a lot of work and you did a great job!You will now use of all these to convert the list of dictionaries into a pandas DataFrame. You will see how convenient it is to generate a DataFrame from dictionaries with the `DataFrame()` function from the pandas package.The `lists2dict()` function, `feature_names` list, and `row_lists` list have been preloaded for this exercise.Go for it!Instructions1. To use the `DataFrame()` function you need, first import the pandas package with the alias `pd`.2. Create a DataFrame from the list of dictionaries in `list_of_dicts` by calling `pd.DataFrame()`. Assign the resulting DataFrame to `df`.3. Inspect the contents of `df` printing the head of the DataFrame. Head of the DataFrame `df` can be accessed by calling `df.head()`. ###Code # Import the pandas package import pandas as pd # Turn list of lists into list of dicts: list_of_dicts list_of_dicts = [lists2dict(feature_names, sublist) for sublist in row_lists] # Turn list of dicts into a DataFrame: df df = pd.DataFrame(list_of_dicts) # Print the head of the DataFrame df.head() ###Output _____no_output_____ ###Markdown Processing data in chunks (1)Sometimes, data sources can be so large in size that storing the entire dataset in memory becomes too resource-intensive. In this exercise, you will process the first 1000 rows of a file line by line, to create a dictionary of the counts of how many times each country appears in a column in the dataset.The csv file `'world_dev_ind.csv'` is in your current directory for your use. To begin, you need to open a connection to this file using what is known as a context manager. For example, the command `with open('datacamp.csv') as datacamp` binds the csv file `'datacamp.csv'` as `datacamp` in the context manager. Here, the `with` statement is the context manager, and its purpose is to ensure that resources are efficiently allocated when opening a connection to a file.If you'd like to learn more about context managers, refer to the [DataCamp course on Importing Data in Python](https://www.datacamp.com/courses/importing-data-in-python-part-1).Instructions1. Use `open()` to bind the csv file `'world_dev_ind.csv'` as file in the context manager.2. Complete the `for` loop so that it iterates **1000** times to perform the loop body and process only the first 1000 rows of data of the file. ###Code # Open a connection to the file with open('world_dev_ind.csv') as file: # Skip the column names file.readline() # Initialize an empty dictionary: counts_dict counts_dict = {} # Process only the first 1000 rows for j in range(0, 1000): # Split the current line into a list: line line = file.readline().split(',') # Get the value for the first column: first_col first_col = line[0] # If the column value is in the dict, increment its value if first_col in counts_dict.keys(): counts_dict[first_col] += 1 # Else, add to the dict and set value to 1 else: counts_dict[first_col] = 1 # Print the resulting dictionary counts_dict ###Output _____no_output_____ ###Markdown Writing a generator to load data in chunks (2)In the previous exercise, you processed a file line by line for a given number of lines. What if, however, you want to do this for the entire file?In this case, it would be useful to use **generators**. Generators allow users to [lazily evaluate data](http://www.blog.pythonlibrary.org/2014/01/27/python-201-an-intro-to-generators/). This concept of _lazy evaluation_ is useful when you have to deal with very large datasets because it lets you generate values in an efficient manner by _yielding_ only chunks of data at a time instead of the whole thing at once.In this exercise, you will define a generator function `read_large_file()` that produces a generator object which yields a single line from a file each time `next()` is called on it. The csv file `'world_dev_ind.csv'` is in your current directory for your use.Note that when you open a connection to a file, the resulting file object is already a generator! So out in the wild, you won't have to explicitly create generator objects in cases such as this. However, for pedagogical reasons, we are having you practice how to do this here with the `read_large_file()` function. Go for it!Instructions1. In the function `read_large_file()`, read a line from `file_object` by using the method `readline()`. Assign the result to `data`.2. In the function `read_large_file()`, `yield` the line read from the file `data`.3. In the context manager, create a generator object `gen_file` by calling your generator function `read_large_file()` and passing `file` to it.4. Print the first three lines produced by the generator object `gen_file` using `next()`. ###Code # Define read_large_file() def read_large_file(file_object): """A generator function to read a large file lazily.""" # Loop indefinitely until the end of the file while True: # Read a line from the file: data data = file_object.readline() # Break if this is the end of the file if not data: break # Yield the line of data yield data # Open a connection to the file with open('world_dev_ind.csv') as file: # Create a generator object for the file: gen_file gen_file = read_large_file(file) # Print the first three lines of the file print(next(gen_file)) print(next(gen_file)) print(next(gen_file)) ###Output CountryName,CountryCode,Year,Total Population,Urban population (% of total) Arab World,ARB,1960,92495902.0,31.285384211605397 Caribbean small states,CSS,1960,4190810.0,31.5974898513652 ###Markdown Writing a generator to load data in chunks (3)Great! You've just created a generator function that you can use to help you process large files.Now let's use your generator function to process the World Bank dataset like you did previously. You will process the file line by line, to create a dictionary of the counts of how many times each country appears in a column in the dataset. For this exercise, however, you won't process just 1000 rows of data, you'll process the entire dataset!The generator function `read_large_file()` and the csv file `'world_dev_ind.csv'` are preloaded and ready for your use. Go for it!Instructions1. Bind the file `'world_dev_ind.csv'` to file in the context manager with `open()`.2. Complete the `for` loop so that it iterates over the generator from the call to `read_large_file()` to process all the rows of the file. ###Code # Initialize an empty dictionary: counts_dict counts_dict = {} # Open a connection to the file with open('world_dev_ind.csv') as file: # Iterate over the generator from read_large_file() for line in read_large_file(file): row = line.split(',') first_col = row[0] if first_col in counts_dict.keys(): counts_dict[first_col] += 1 else: counts_dict[first_col] = 1 # Print counts_dict ###Output _____no_output_____ ###Markdown Writing an iterator to load data in chunks (1)Another way to read data too large to store in memory in chunks is to read the file in as DataFrames of a certain length, say, 100. For example, with the pandas package (imported as `pd`), you can do `pd.read_csv(filename, chunksize=100)`. This creates an iterable **reader object**, which means that you can use `next()` on it.In this exercise, you will read a file in small DataFrame chunks with `read_csv()`. You're going to use the World Bank Indicators data `'ind_pop.csv'`, available in your current directory, to look at the urban population indicator for numerous countries and years.Instructions1. Use `pd.read_csv()` to read in `'ind_pop.csv'` in chunks of size 10. Assign the result to `df_reader`.2. Print the first two chunks from `df_reader`. ###Code # Import the pandas package import pandas as pd # Initialize reader object: df_reader df_reader = pd.read_csv('ind_pop.csv', chunksize=10) # Print two chunks print(next(df_reader)) print(next(df_reader)) ###Output _____no_output_____ ###Markdown Writing an iterator to load data in chunks (2)In the previous exercise, you used `read_csv()` to read in DataFrame chunks from a large dataset. In this exercise, you will read in a file using a bigger DataFrame chunk size and then process the data from the first chunk.To process the data, you will create another DataFrame composed of only the rows from a specific country. You will then zip together two of the columns from the new DataFrame, `'Total Population'` and `'Urban population (% of total)'`. Finally, you will create a list of tuples from the zip object, where each tuple is composed of a value from each of the two columns mentioned.You're going to use the data from `'ind_pop_data.csv'`, available in your current directory. Pandas has been imported as `pd`.Instructions1. Use `pd.read_csv()` to read in the file in `'ind_pop_data.csv'` in chunks of size `1000`. Assign the result to `urb_pop_reader`.2. Get the **first** DataFrame chunk from the iterable `urb_pop_reader` and assign this to `df_urb_pop`.3. Select only the rows of `df_urb_pop` that have a `'CountryCode'` of `'CEB'`. To do this, compare whether `df_urb_pop['CountryCode']` is equal to `'CEB'` within the square brackets in `df_urb_pop[____]`.4. Using `zip()`, zip together the `'Total Population'` and `'Urban population (% of total)'` columns of `df_pop_ceb`. Assign the resulting zip object to `pops`. ###Code # Initialize reader object: urb_pop_reader urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000) # Get the first DataFrame chunk: df_urb_pop df_urb_pop = next(urb_pop_reader) # Check out the head of the DataFrame print(df_urb_pop.head()) # Check out specific country: df_pop_ceb df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB'] # Zip DataFrame columns of interest: pops pops = zip(df_pop_ceb['Total Population'], df_pop_ceb['Urban population (% of total)']) # Turn zip object into list: pops_list pops_list = list(pops) # Print pops_list print(pops_list) ###Output _____no_output_____ ###Markdown Writing an iterator to load data in chunks (3)You're getting used to reading and processing data in chunks by now. Let's push your skills a little further by adding a column to a DataFrame.Starting from the code of the previous exercise, you will be using a _list comprehension_ to create the values for a new column `'Total Urban Population'` from the list of tuples that you generated earlier. Recall from the previous exercise that the first and second elements of each tuple consist of, respectively, values from the columns `'Total Population'` and `'Urban population (% of total)'`. The values in this new column `'Total Urban Population'`, therefore, are the product of the first and second element in each tuple. Furthermore, because the 2nd element is a percentage, you need to divide the entire result by `100`, or alternatively, multiply it by `0.01`.You will also plot the data from this new column to create a visualization of the urban population data.The packages `pandas` and `matplotlib.pyplot` have been imported as `pd` and `plt` respectively for your use.Instructions1. Write a list comprehension to generate a list of values from `pops_list` for the new column `'Total Urban Population'`. The _output expression_ should be the product of the first and second element in each tuple in `pops_list`. Because the 2nd element is a percentage, you also need to either multiply the result by `0.01` or divide it by `100`. In addition, note that the column `'Total Urban Population'` should only be able to take on integer values. To ensure this, make sure you cast the _output expression_ to an integer with `int()`.2. Create a _scatter_ plot where the x-axis are values from the `'Year'` column and the y-axis are values from the `'Total Urban Population'` column. ###Code # Import packages import pandas as pd import matplotlib.pyplot as plt # Code from previous exercise urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000) df_urb_pop = next(urb_pop_reader) df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB'] pops = zip(df_pop_ceb['Total Population'], df_pop_ceb['Urban population (% of total)']) pops_list = list(pops) # Use list comprehension to create new DataFrame column 'Total Urban Population' df_pop_ceb['Total Urban Population'] = [int(tup[0] * tup[1] * 0.01) for tup in pops_list] # Plot urban population data df_pop_ceb.plot(kind='scatter', x='Year', y='Total Urban Population') plt.show() ###Output _____no_output_____ ###Markdown Writing an iterator to load data in chunks (4)In the previous exercises, you've only processed the data from the first DataFrame chunk. This time, you will aggregate the results over all the DataFrame chunks in the dataset. This basically means you will be processing the entire dataset now. This is neat because you're going to be able to process the entire large dataset by just working on smaller pieces of it!You're going to use the data from `'ind_pop_data.csv'`, available in your current directory.Instructions1. Initialize an empty DataFrame `data` using `pd.DataFrame()`.2. In the `for` loop, iterate over `urb_pop_reader` to be able to process all the DataFrame chunks in the dataset.3. Using the method `append()` of the DataFrame `data`, append `df_pop_ceb` to `data`. ###Code # Initialize reader object: urb_pop_reader urb_pop_reader = pd.read_csv('ind_pop_data.csv', chunksize=1000) a # Initialize empty DataFrame: data data = pd.DataFrame() # Iterate over each DataFrame chunk for df_urb_pop in urb_pop_reader: # Check out specific country: df_pop_ceb df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == 'CEB'] # Zip DataFrame columns of interest: pops pops = zip(df_pop_ceb['Total Population'], df_pop_ceb['Urban population (% of total)']) # Turn zip object into list: pops_list pops_list = list(pops) # Use list comprehension to create new DataFrame column 'Total Urban Population' df_pop_ceb['Total Urban Population'] = [int(tup[0] * tup[1] * 0.01) for tup in pops_list] # Append DataFrame chunk to data: data data = data.append(df_pop_ceb) # Plot urban population data data.plot(kind='scatter', x='Year', y='Total Urban Population') plt.show() ###Output _____no_output_____ ###Markdown Writing an iterator to load data in chunks (5)This is the last leg. You've learned a lot about processing a large dataset in chunks. In this last exercise, you will put all the code for processing the data into a single function so that you can reuse the code without having to rewrite the same things all over again.You're going to define the function `plot_pop()` which takes two arguments: the filename of the file to be processed, and the country code of the rows you want to process in the dataset.Because all of the previous code you've written in the previous exercises will be housed in `plot_pop()`, calling the function already does the following:- Loading of the file chunk by chunk,- Creating the new column of urban population values, and- Plotting the urban population data.That's a lot of work, but the function now makes it convenient to repeat the same process for whatever file and country code you want to process and visualize!You're going to use the data from `'ind_pop_data.csv'`, available in your current directory.After you are done, take a moment to look at the plots and reflect on the new skills you have acquired. The journey doesn't end here! If you have enjoyed working with this data, you can continue exploring it using the pre-processed version available on [Kaggle](https://www.kaggle.com/worldbank/world-development-indicators).Instructions1. Define the function `plot_pop()` that has two arguments: first is `filename` for the file to process and second is `country_code` for the country to be processed in the dataset.2. Call `plot_pop()` to process the data for country code `'CEB'` in the file `'ind_pop_data.csv'`.3. Call `plot_pop()` to process the data for country code `'ARB'` in the file `'ind_pop_data.csv'`. ###Code # Define plot_pop() def plot_pop(filename, country_code): # Initialize reader object: urb_pop_reader urb_pop_reader = pd.read_csv(filename, chunksize=1000) # Initialize empty DataFrame: data data = pd.DataFrame() # Iterate over each DataFrame chunk for df_urb_pop in urb_pop_reader: # Check out specific country: df_pop_ceb df_pop_ceb = df_urb_pop[df_urb_pop['CountryCode'] == country_code] # Zip DataFrame columns of interest: pops pops = zip(df_pop_ceb['Total Population'], df_pop_ceb['Urban population (% of total)']) # Turn zip object into list: pops_list pops_list = list(pops) # Use list comprehension to create new DataFrame column 'Total Urban Population' df_pop_ceb['Total Urban Population'] = [int(tup[0] * tup[1] * 0.01) for tup in pops_list] # Append DataFrame chunk to data: data data = data.append(df_pop_ceb) # Plot urban population data data.plot(kind='scatter', x='Year', y='Total Urban Population') plt.show() # Set the filename: fn fn = 'ind_pop_data.csv' # Call plot_pop for country code 'CEB' plot_pop(fn, 'CEB') # Call plot_pop for country code 'ARB' plot_pop(fn, 'ARB') ###Output _____no_output_____
preprocessing/json_to_csv.ipynb
###Markdown choose needed cols from json and save as csv ###Code def get_spark(name="Recsys", cores=2, local_dir="/tmp/spark-temp") -> SparkSession: # make sure the local_dir exists in your file system # default settings work for linux spark = (SparkSession .builder .appName(name) .master("local[{}]".format(cores)) .config("spark.memory.offHeap.enabled", True) .config("spark.memory.offHeap.size", "16g") .config("spark.local.dir", local_dir) .getOrCreate()) spark.sparkContext.setCheckpointDir(os.path.join(local_dir, "chkpts/")) return spark spark = get_spark(cores=4) sc = spark.sparkContext df = spark.read.json("./yelp_dataset/review.json") df.printSchema() df = df.select("business_id", "cool", "date", "funny", "review_id", "stars", "useful", "user_id") df.coalesce(1).write.option("header", "true").csv("ratings.csv") ###Output _____no_output_____ ###Markdown Get Business catefory info using Yelp API ###Code import requests from urllib.error import HTTPError from urllib.parse import quote from urllib.parse import urlencode def request(host, path, api_key, url_params=None): """Given your API_KEY, send a GET request to the API. Args: host (str): The domain host of the API. path (str): The path of the API after the domain. API_KEY (str): Your API Key. url_params (dict): An optional set of query parameters in the request. Returns: dict: The JSON response from the request. Raises: HTTPError: An error occurs from the HTTP request. """ url_params = url_params or {} url = '{0}{1}'.format(host, quote(path.encode('utf8'))) headers = { 'Authorization': 'Bearer %s' % api_key, } print(u'Querying {0} ...'.format(url)) response = requests.request('GET', url, headers=headers, params=url_params) return response.json() key="HwkCu8bbu6Lv43rqp__sk9Z36evS2vSApDUzNSPHb9fguOLzX7G8bOMI2GFZOuVee9mlkY1y-0xif98nQgVTRGcTj9VTjG-BzxQDrdhKrZRbP0fsFXwy7zzCVKHZXXYx" cat_dict = request(host="https://api.yelp.com/v3", path="/categories", api_key=key)["categories"] alias_to_title = {cat["alias"]: cat["title"] for cat in cat_dict} import networkx as nx G = nx.DiGraph() for cat in cat_dict: parent = cat["parent_aliases"] parent_title = alias_to_title[parent[0]] if parent else None if parent_title: G.add_edge(parent_title, cat["title"]) zero_in_degree_nodes = filter(lambda tup: tup[1] == 0, G.in_degree(G.node())) top_categories = list(map(lambda tup: tup[0], zero_in_degree_nodes)) top_categories with open("../../yelp_dataset/top_categories.json", "w") as f: f.write(json.dumps(top_categories)) ###Output _____no_output_____
notebooks/community/ml_ops/stage6/get_started_with_matching_engine_twotowers.ipynb
###Markdown Notebook is a revised version of notebook from [Amy Wu and Shen Zhimo](https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/official/matching_engine/two-tower-model-introduction.ipynb) E2E ML on GCP: MLOps stage 6 : serving: get started with Vertex AI Matching Engine and Two Towers builtin algorithm Run in Colab View on GitHub Run in Vertex Workbench OverviewThis tutorial demonstrates how to use the `Vertex AI Two-Tower` built-in algorithm with `Vertex AI Matching Engine`. DatasetThis tutorial uses the `movielens_100k sample dataset` in the public bucket `gs://cloud-samples-data/vertex-ai/matching-engine/two-tower`, which was generated from the [MovieLens movie rating dataset](https://grouplens.org/datasets/movielens/100k/). For this tutorial, the data only includes the user id feature for users, and the movie id and movie title features for movies. In this example, the user is the query object and the movie is the candidate object, and each training example in the dataset contains a user and a movie they rated (we only include positive ratings in the dataset). The two-tower model will embed the user and the movie in the same embedding space, so that given a user, the model will recommend movies it thinks the user will like. ObjectiveIn this notebook, you will learn how to use the `Two-Tower` builtin algorithms for generating embeddings for a dataset, for use with generating an `Matching Engine Index`, with the `Vertex AI Matching Engine` service.This tutorial uses the following Google Cloud ML services: - `Vertex AI Two-Towers` builtin algorithm- `Vertex AI Matching Engine`- `Vertex AI Batch Prediction`The tutorial covers the following steps:1. Train the `Two-Tower` algorithm to generate embeddings (encoder) for the dataset.2. Hyperparameter tune the trained `Two-Tower` encoder.3. Make example predictions (embeddings) from then trained encoder.4. Generate embeddings using the trained `Two-Tower` builtin algorithm.5. Store embeddings to format supported by `Matching Engine`.6. Create a `Matching Engine Index` for the embeddings.7. Deploy the `Matching Engine Index` to a `Index Endpoint`.8. Make a matching engine prediction request. Costs This tutorial uses billable components of Google Cloud:* Vertex AI* Cloud StorageLearn about [Vertex AIpricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storagepricing](https://cloud.google.com/storage/pricing), and use the [PricingCalculator](https://cloud.google.com/products/calculator/)to generate a cost estimate based on your projected usage. Set up your local development environment**If you are using Colab or Google Cloud Notebooks**, your environment already meetsall the requirements to run this notebook. You can skip this step. **Otherwise**, make sure your environment meets this notebook's requirements.You need the following:* The Google Cloud SDK* Git* Python 3* virtualenv* Jupyter notebook running in a virtual environment with Python 3The Google Cloud guide to [Setting up a Python developmentenvironment](https://cloud.google.com/python/setup) and the [Jupyterinstallation guide](https://jupyter.org/install) provide detailed instructionsfor meeting these requirements. The following steps provide a condensed set ofinstructions:1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)1. [Install Python 3.](https://cloud.google.com/python/setupinstalling_python)1. [Install virtualenv](https://cloud.google.com/python/setupinstalling_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.1. To install Jupyter, run `pip3 install jupyter` on thecommand-line in a terminal shell.1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.1. Open this notebook in the Jupyter Notebook Dashboard. InstallationInstall the packages required for executing this notebook. ###Code import os # The Vertex AI Workbench Notebook product has specific requirements IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME") IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists( "/opt/deeplearning/metadata/env_version" ) # Vertex AI Notebook requires dependencies to be installed with '--user' USER_FLAG = "" if IS_WORKBENCH_NOTEBOOK: USER_FLAG = "--user" ! pip3 install {USER_FLAG} --upgrade tensorflow -q ! pip3 install {USER_FLAG} --upgrade google-cloud-aiplatform tensorboard-plugin-profile -q ! gcloud components update --quiet ###Output _____no_output_____ ###Markdown Restart the kernelAfter you install the additional packages, you need to restart the notebook kernel so it can find the packages. ###Code # Automatically restart kernel after installs import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) ###Output _____no_output_____ ###Markdown Before you begin Set up your Google Cloud project**The following steps are required, regardless of your notebook environment.**1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).1. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com).1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).1. Enter your project ID in the cell below. Then run the cell to make sure theCloud SDK uses the right project for all the commands in this notebook.**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands. Set your project ID**If you do not know your project ID**, you may be able to get your project ID using `gcloud`. ###Code PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID ###Output _____no_output_____ ###Markdown Get your project numberNow that the project ID is set, you get your corresponding project number. ###Code shell_output = ! gcloud projects list --filter="PROJECT_ID:'{PROJECT_ID}'" --format='value(PROJECT_NUMBER)' PROJECT_NUMBER = shell_output[0] print("Project Number:", PROJECT_NUMBER) ###Output _____no_output_____ ###Markdown RegionYou can also change the `REGION` variable, which is used for operationsthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.- Americas: `us-central1`- Europe: `europe-west4`- Asia Pacific: `asia-east1`You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations). ###Code REGION = "[your-region]" # @param {type: "string"} if REGION == "[your-region]": REGION = "us-central1" ###Output _____no_output_____ ###Markdown TimestampIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial. ###Code from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") ###Output _____no_output_____ ###Markdown Authenticate your Google Cloud account**If you are using Vertex AI Workbench Notebooks**, your environment is alreadyauthenticated. Skip this step. **If you are using Colab**, run the cell below and follow the instructionswhen prompted to authenticate your account via oAuth.**Otherwise**, follow these steps:1. In the Cloud Console, go to the [**Create service account key** page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).2. Click **Create service account**.3. In the **Service account name** field, enter a name, and click **Create**.4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"into the filter box, and select **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.5. Click *Create*. A JSON file that contains your key downloads to yourlocal environment.6. Enter the path to your service account key as the`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell. ###Code # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Vertex AI Workbench, then don't execute this code IS_COLAB = False if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv( "DL_ANACONDA_HOME" ): if "google.colab" in sys.modules: IS_COLAB = True from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' ###Output _____no_output_____ ###Markdown Create a Cloud Storage bucket**The following steps are required, regardless of your notebook environment.**Before you submit a training job for the two-tower model, you need to upload your training data and schema to Cloud Storage. Vertex AI trains the model using this input data. In this tutorial, the Two-Tower built-in algorithm also saves the trained model that results from your job in the same bucket. Using this model artifact, you can then create Vertex AI model and endpoint resources in order to serve online predictions.Set the name of your Cloud Storage bucket below. It must be unique across allCloud Storage buckets. ###Code BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"} BUCKET_URI = f"gs://{BUCKET_NAME}" if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]": BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP BUCKET_URI = "gs://" + BUCKET_NAME ###Output _____no_output_____ ###Markdown **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket. ###Code ! gsutil mb -l $REGION $BUCKET_URI ###Output _____no_output_____ ###Markdown Finally, validate access to your Cloud Storage bucket by examining its contents: ###Code ! gsutil ls -al $BUCKET_URI ###Output _____no_output_____ ###Markdown Import libraries and define constants ###Code import os from google.cloud import aiplatform %load_ext tensorboard ###Output _____no_output_____ ###Markdown Initialize Vertex AI SDK for PythonInitialize the Vertex AI SDK for Python for your project and corresponding bucket. ###Code aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI) ###Output _____no_output_____ ###Markdown Set machine typeNext, set the machine type to use for prediction.- Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VMs you will use for for prediction. - `machine type` - `n1-standard`: 3.75GB of memory per vCPU. - `n1-highmem`: 6.5GB of memory per vCPU - `n1-highcpu`: 0.9 GB of memory per vCPU - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*. ###Code if os.getenv("IS_TESTING_DEPLOY_MACHINE"): MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE") else: MACHINE_TYPE = "n1-standard" VCPU = "4" DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU print("Deploy machine type", DEPLOY_COMPUTE) ###Output _____no_output_____ ###Markdown Introduction to Two-Tower algorithmTwo-tower models learn to represent two items of various types (such as user profiles, search queries, web documents, answer passages, or images) in the same vector space, so that similar or related items are close to each other. These two items are referred to as the query and candidate object, since when paired with a nearest neighbor search service such as Vertex Matching Engine, the two-tower model can retrieve candidate objects related to an input query object. These objects are encoded by a query and candidate encoder (the two "towers") respectively, which are trained on pairs of relevant items. This built-in algorithm exports trained query and candidate encoders as model artifacts, which can be deployed in Vertex Prediction for usage in a recommendation system. Configure training parameters for the Two-Tower builtin algorithmThe following table shows parameters that are common to all Vertex AI Training jobs created using the `gcloud ai custom-jobs create` command. See the [official documentation](https://cloud.google.com/sdk/gcloud/reference/ai/custom-jobs/create) for all the possible arguments.| Parameter | Data type | Description | Required ||--|--|--|--|| `display-name` | string | Name of the job. | Yes || `worker-pool-spec` | string | Comma-separated list of arguments specifying a worker pool configuration (see below). | Yes || `region` | string | Region to submit the job to. | No |The `worker-pool-spec` flag can be specified multiple times, one for each worker pool. The following table shows the arguments used to specify a worker pool.| Parameter | Data type | Description | Required ||--|--|--|--|| `machine-type` | string | Machine type for the pool. See the [official documentation](https://cloud.google.com/vertex-ai/docs/training/configure-compute) for supported machines. | Yes || `replica-count` | int | The number of replicas of the machine in the pool. | No || `container-image-uri` | string | Docker image to run on each worker. | No | The following table shows the parameters for the two-tower model training job:| Parameter | Data type | Description | Required ||--|--|--|--|| `training_data_path` | string | Cloud Storage pattern where training data is stored. | Yes || `input_schema_path` | string | Cloud Storage path where the JSON input schema is stored. | Yes || `input_file_format` | string | The file format of input. Currently supports `jsonl` and `tfrecord`. | No - default is `jsonl`. || `job_dir` | string | Cloud Storage directory where the model output files will be stored. | Yes || `eval_data_path` | string | Cloud Storage pattern where eval data is stored. | No || `candidate_data_path` | string | Cloud Storage pattern where candidate data is stored. Only used for top_k_categorical_accuracy metrics. If not set, it's generated from training/eval data. | No || `train_batch_size` | int | Batch size for training. | No - Default is 100. || `eval_batch_size` | int | Batch size for evaluation. | No - Default is 100. || `eval_split` | float | Split fraction to use for the evaluation dataset, if `eval_data_path` is not provided. | No - Default is 0.2 || `optimizer` | string | Training optimizer. Lowercase string name of any TF2.3 Keras optimizer is supported ('sgd', 'nadam', 'ftrl', etc.). See [TensorFlow documentation](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers). | No - Default is 'adagrad'. || `learning_rate` | float | Learning rate for training. | No - Default is the default learning rate of the specified optimizer. || `momentum` | float | Momentum for optimizer, if specified. | No - Default is the default momentum value for the specified optimizer. || `metrics` | string | Metrics used to evaluate the model. Can be either `auc`, `top_k_categorical_accuracy` or `precision_at_1`. | No - Default is `auc`. || `num_epochs` | int | Number of epochs for training. | No - Default is 10. || `num_hidden_layers` | int | Number of hidden layers. | No || `num_nodes_hidden_layer{index}` | int | Num of nodes in hidden layer {index}. The range of index is 1 to 20. | No || `output_dim` | int | The output embedding dimension for each encoder tower of the two-tower model. | No - Default is 64. || `training_steps_per_epoch` | int | Number of steps per epoch to run the training for. Only needed if you are using more than 1 machine or using a master machine with more than 1 gpu. | No - Default is None. || `eval_steps_per_epoch` | int | Number of steps per epoch to run the evaluation for. Only needed if you are using more than 1 machine or using a master machine with more than 1 gpu. | No - Default is None. || `gpu_memory_alloc` | int | Amount of memory allocated per GPU (in MB). | No - Default is no limit. | ###Code DATASET_NAME = "movielens_100k" # Change to your dataset name. # Change to your data and schema paths. These are paths to the movielens_100k # sample data. TRAINING_DATA_PATH = f"gs://cloud-samples-data/vertex-ai/matching-engine/two-tower/{DATASET_NAME}/training_data/*" INPUT_SCHEMA_PATH = f"gs://cloud-samples-data/vertex-ai/matching-engine/two-tower/{DATASET_NAME}/input_schema.json" # URI of the two-tower training Docker image. LEARNER_IMAGE_URI = "us-docker.pkg.dev/vertex-ai-restricted/builtin-algorithm/two-tower" # Change to your output location. OUTPUT_DIR = f"{BUCKET_URI}/experiment/output" TRAIN_BATCH_SIZE = 100 # Batch size for training. NUM_EPOCHS = 3 # Number of epochs for training. print(f"Dataset name: {DATASET_NAME}") print(f"Training data path: {TRAINING_DATA_PATH}") print(f"Input schema path: {INPUT_SCHEMA_PATH}") print(f"Output directory: {OUTPUT_DIR}") print(f"Train batch size: {TRAIN_BATCH_SIZE}") print(f"Number of epochs: {NUM_EPOCHS}") ###Output _____no_output_____ ###Markdown Train on Vertex AI Training with CPUSubmit the Two-Tower training job to `Vertex AI Training`. The following command uses a single CPU machine for training. When using single node training, `training_steps_per_epoch` and `eval_steps_per_epoch` do not need to be set. Prepare your machine specificationNow define the machine specification for your custom hyperparameter tuning job. This tells Vertex what type of machine instance to provision for the hyperparameter tuning. - `machine_type`: The type of GCP instance to provision -- e.g., n1-standard-8. - `accelerator_type`: The type, if any, of hardware accelerator. In this tutorial if you previously set the variable `TRAIN_GPU != None`, you are using a GPU; otherwise you will use a CPU. - `accelerator_count`: The number of accelerators. ###Code TRAIN_COMPUTE = "n1-standard-8" machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} ###Output _____no_output_____ ###Markdown Prepare your disk specification(optional) Now define the disk specification for your custom hyperparameter tuning job. This tells Vertex what type and size of disk to provision in each machine instance for the hyperparameter tuning. - `boot_disk_type`: Either SSD or Standard. SSD is faster, and Standard is less expensive. Defaults to SSD. - `boot_disk_size_gb`: Size of disk in GB. ###Code DISK_TYPE = "pd-ssd" # [ pd-ssd, pd-standard] DISK_SIZE = 200 # GB disk_spec = {"boot_disk_type": DISK_TYPE, "boot_disk_size_gb": DISK_SIZE} ###Output _____no_output_____ ###Markdown Define the worker pool specificationNext, you define the worker pool specification for your custom hyperparameter tuning job. The worker pool specification will consist of the following:- `replica_count`: The number of instances to provision of this machine type.- `machine_spec`: The hardware specification.- `disk_spec` : (optional) The disk storage specification.- `container_spec`: The training container containing the training package.Let's dive deeper now into the container specification:- `image_uri`: The training image.- `command`: The command to invoke in the training image. Defaults to the command entry point specified for the training image.- `args`: The command line arguments to pass to the corresponding command entry point in training image. ###Code JOB_NAME = "twotowers_cpu_" + TIMESTAMP MODEL_DIR = "{}/{}".format(BUCKET_URI, JOB_NAME) CMDARGS = [ f"--training_data_path={TRAINING_DATA_PATH}", f"--input_schema_path={INPUT_SCHEMA_PATH}", f"--job-dir={OUTPUT_DIR}", f"--train_batch_size={TRAIN_BATCH_SIZE}", f"--num_epochs={NUM_EPOCHS}", ] worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "disk_spec": disk_spec, "container_spec": { "image_uri": LEARNER_IMAGE_URI, "command": [], "args": CMDARGS, }, } ] ###Output _____no_output_____ ###Markdown Create a custom jobUse the class `CustomJob` to create a custom job, such as for hyperparameter tuning, with the following parameters:- `display_name`: A human readable name for the custom job.- `worker_pool_specs`: The specification for the corresponding VM instances. ###Code job = aiplatform.CustomJob( display_name="twotower_cpu_" + TIMESTAMP, worker_pool_specs=worker_pool_spec ) ###Output _____no_output_____ ###Markdown Execute the custom jobNext, execute your custom job using the method `run()`. ###Code job.run() ###Output _____no_output_____ ###Markdown View outputAfter the job finishes successfully, you can view the output directory. ###Code ! gsutil ls {OUTPUT_DIR} ! gsutil rm -rf {OUTPUT_DIR}/* ###Output _____no_output_____ ###Markdown Train on Vertex AI Training with GPUNext, train the Two Tower model using a GPU. ###Code JOB_NAME = "twotowers_gpu_" + TIMESTAMP MODEL_DIR = "{}/{}".format(BUCKET_URI, JOB_NAME) TRAIN_COMPUTE = "n1-highmem-4" TRAIN_GPU = "NVIDIA_TESLA_K80" machine_spec = { "machine_type": TRAIN_COMPUTE, "accelerator_type": TRAIN_GPU, "accelerator_count": 1, } CMDARGS = [ f"--training_data_path={TRAINING_DATA_PATH}", f"--input_schema_path={INPUT_SCHEMA_PATH}", f"--job-dir={OUTPUT_DIR}", "--training_steps_per_epoch=1500", "--eval_steps_per_epoch=1500", ] worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "disk_spec": disk_spec, "container_spec": { "image_uri": LEARNER_IMAGE_URI, "command": [], "args": CMDARGS, }, } ] ###Output _____no_output_____ ###Markdown Create and execute the custom jobNext, create and execute the custom job. ###Code job = aiplatform.CustomJob( display_name="twotower_cpu_" + TIMESTAMP, worker_pool_specs=worker_pool_spec ) job.run() ###Output _____no_output_____ ###Markdown View outputAfter the job finishes successfully, you can view the output directory. ###Code ! gsutil ls {OUTPUT_DIR} ! gsutil rm -rf {OUTPUT_DIR}/* ###Output _____no_output_____ ###Markdown Train on Vertex AI Training with TFRecordsNext, train the Two Tower model using TFRecords ###Code TRAINING_DATA_PATH = f"gs://cloud-samples-data/vertex-ai/matching-engine/two-tower/{DATASET_NAME}/tfrecord/*" JOB_NAME = "twotowers_tfrec_" + TIMESTAMP MODEL_DIR = "{}/{}".format(BUCKET_URI, JOB_NAME) TRAIN_COMPUTE = "n1-standard-8" machine_spec = {"machine_type": TRAIN_COMPUTE, "accelerator_count": 0} CMDARGS = [ f"--training_data_path={TRAINING_DATA_PATH}", f"--input_schema_path={INPUT_SCHEMA_PATH}", f"--job-dir={OUTPUT_DIR}", f"--train_batch_size={TRAIN_BATCH_SIZE}", f"--num_epochs={NUM_EPOCHS}", "--input_file_format=tfrecord", ] worker_pool_spec = [ { "replica_count": 1, "machine_spec": machine_spec, "disk_spec": disk_spec, "container_spec": { "image_uri": LEARNER_IMAGE_URI, "command": [], "args": CMDARGS, }, } ] ###Output _____no_output_____ ###Markdown Create and execute the custom jobNext, create and execute the custom job. ###Code job = aiplatform.CustomJob( display_name="twotower_cpu_" + TIMESTAMP, worker_pool_specs=worker_pool_spec ) job.run() ###Output _____no_output_____ ###Markdown View outputAfter the job finishes successfully, you can view the output directory. ###Code ! gsutil ls {OUTPUT_DIR} ! gsutil rm -rf {OUTPUT_DIR} ###Output _____no_output_____ ###Markdown TensorboardWhen the training starts, you can view the logs in TensorBoard. Colab users can use the TensorBoard widget below: For Workbench AI Notebooks users, the TensorBoard widget above won't work. We recommend you to launch TensorBoard through the Cloud Shell.1. In your Cloud Shell, launch Tensorboard on port 8080: ``` export TENSORBOARD_DIR=gs://xxxxx/tensorboard tensorboard --logdir=${TENSORBOARD_DIR} --port=8080 ```2. Click the "Web Preview" button at the top-right of the Cloud Shell window (looks like an eye in a rectangle). 3. Select "Preview on port 8080". This should launch the TensorBoard webpage in a new tab in your browser.After the job finishes successfully, you can view the output directory: ###Code try: TENSORBOARD_DIR = os.path.join(OUTPUT_DIR, "tensorboard") %tensorboard --logdir {TENSORBOARD_DIR} except Exception as e: print(e) ###Output _____no_output_____ ###Markdown Hyperparameter tuningYou may want to optimize the hyperparameters used during training to improve your model's accuracy and performance. For this example, the following command runs a Vertex AI hyperparameter tuning job with 8 trials that attempts to maximize the validation AUC metric. The hyperparameters it optimizes are the number of hidden layers, the size of the hidden layers, and the learning rate.Learn more about [Hyperparameter tuning overview](https://cloud.google.com/vertex-ai/docs/training/hyperparameter-tuning-overview). ###Code from google.cloud.aiplatform import hyperparameter_tuning as hpt hpt_job = aiplatform.HyperparameterTuningJob( display_name="twotowers_" + TIMESTAMP, custom_job=job, metric_spec={ "val_auc": "maximize", }, parameter_spec={ "learning_rate": hpt.DoubleParameterSpec(min=0.0001, max=0.1, scale="log"), "num_hidden_layers": hpt.IntegerParameterSpec(min=0, max=2, scale="linear"), "num_nodes_hidden_layer1": hpt.IntegerParameterSpec( min=1, max=128, scale="log" ), "num_nodes_hidden_layer2": hpt.IntegerParameterSpec( min=1, max=128, scale="log" ), }, search_algorithm=None, max_trial_count=8, parallel_trial_count=1, ) ###Output _____no_output_____ ###Markdown Run the hyperparameter tuning jobUse the `run()` method to execute the hyperparameter tuning job. ###Code hpt_job.run() ###Output _____no_output_____ ###Markdown Display the hyperparameter tuning job trial resultsAfter the hyperparameter tuning job has completed, the property `trials` will return the results for each trial. ###Code print(hpt_job.trials) ###Output _____no_output_____ ###Markdown Best trialNow look at which trial was the best: ###Code best = (None, None, None, 0.0) for trial in hpt_job.trials: # Keep track of the best outcome if float(trial.final_measurement.metrics[0].value) > best[3]: try: best = ( trial.id, float(trial.parameters[0].value), float(trial.parameters[1].value), float(trial.final_measurement.metrics[0].value), ) except: best = ( trial.id, float(trial.parameters[0].value), None, float(trial.final_measurement.metrics[0].value), ) print(best) ###Output _____no_output_____ ###Markdown Delete the hyperparameter tuning jobThe method 'delete()' will delete the hyperparameter tuning job. ###Code hpt_job.delete() ###Output _____no_output_____ ###Markdown View outputAfter the job finishes successfully, you can view the output directory. ###Code BEST_MODEL = OUTPUT_DIR + "/trial_" + best[0] ! gsutil ls {BEST_MODEL} ###Output _____no_output_____ ###Markdown Upload the model to `Vertex AI Model` resourceYour training job will export two TF SavedModels under `gs:///query_model` and `gs:///candidate_model`. These exported models can be used for online or batch prediction in Vertex Prediction. First, import the query (or candidate) model using the `upload()` method, with the following parameters:- `display_name`: A human readable name for the model resource.- `artifact_uri`: The Cloud Storage location of the model artifacts.- `serving_container_image_uri`: The deployment container. In this tutorial, you use the prebuilt Two-Tower deployment container.- `serving_container_health_route`: The URL for the service to periodically ping for a response to verify that the serving binary is running. For Two-Towers, this will be /v1/models/\[model_name\].- `serving_container_predict_route`: The URL for the service to periodically ping for a response to verify that the serving binary is running. For Two-Towers, this will be /v1/models/\[model_name\]:predict.- `serving_container_environment_variables`: Preset environment variables to pass into the deployment container.*Note:* The underlying deployment container is built on TensorFlow Serving. ###Code # The following imports the query (user) encoder model. MODEL_TYPE = "query" # Use the following instead to import the candidate (movie) encoder model. # MODEL_TYPE = 'candidate' DISPLAY_NAME = f"{DATASET_NAME}_{MODEL_TYPE}" # The display name of the model. MODEL_NAME = f"{MODEL_TYPE}_model" # Used by the deployment container. model = aiplatform.Model.upload( display_name=DISPLAY_NAME, artifact_uri=BEST_MODEL, serving_container_image_uri="us-central1-docker.pkg.dev/cloud-ml-algos/two-tower/deploy", serving_container_health_route=f"/v1/models/{MODEL_NAME}", serving_container_predict_route=f"/v1/models/{MODEL_NAME}:predict", serving_container_environment_variables={ "MODEL_BASE_PATH": "$(AIP_STORAGE_URI)", "MODEL_NAME": MODEL_NAME, }, ) ###Output _____no_output_____ ###Markdown Deploy the model to `Vertex AI Endpoint`Deploying the `Vertex AI Model` resoure to a `Vertex AI Endpoint` for online predictions:1. Create an `Endpoint` resource exposing an external interface to users consuming the model. 2. After the `Endpoint` is ready, deploy one or more instances of a model to the `Endpoint`. The deployed model runs the custom container image running Two-Tower encoder to serve embeddings.Refer to Vertex AI Predictions guide to [Deploy a model using the Vertex AI API](https://cloud.google.com/vertex-ai/docs/predictions/deploy-model-api) for more information about the APIs used in the following cells. Create a `Vertex AI Endpoint`Next, you create the `Vertex AI Endpoint`, from which you subsequently deploy your `Vertex AI Model` resource to. ###Code endpoint = aiplatform.Endpoint.create(display_name=DATASET_NAME) ###Output _____no_output_____ ###Markdown Deploying `Model` resources to an `Endpoint` resource.You can deploy one of more `Vertex AI Model` resource instances to the same endpoint. Each `Vertex AI Model` resource that is deployed will have its own deployment container for the serving binary. In the next example, you deploy the `Vertex AI Model` resource to a `Vertex AI Endpoint` resource. The `Vertex AI Model` resource already has defined for it the deployment container image. To deploy, you specify the following additional configuration settings:- The machine type.- The (if any) type and number of GPUs.- Static, manual or auto-scaling of VM instances.In this example, you deploy the model with the minimal amount of specified parameters, as follows:- `model`: The `Model` resource.- `deployed_model_displayed_name`: The human readable name for the deployed model instance.- `machine_type`: The machine type for each VM instance.Do to the requirements to provision the resource, this may take upto a few minutes. ###Code response = endpoint.deploy( model=model, deployed_model_display_name=DISPLAY_NAME, machine_type=DEPLOY_COMPUTE, traffic_split={"0": 100}, ) print(endpoint) ###Output _____no_output_____ ###Markdown Creating embeddingsNow that you have deployed the query/candidate encoder model on `Vertex AI Prediction`, you can call the model to generate embeddings for new data. Make an online prediction with SDK[Online prediction](https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-custom-models) is used to synchronously query a model on a small batch of instances with minimal latency. The following function calls the deployed model using Vertex AI SDK for Python.The input data you want predicted embeddings on should be provided as a stringified JSON in the `data` field. Note that you should also provide a unique `key` field (of type str) for each input instance so that you can associate each output embedding with its corresponding input. ###Code # Input items for the query model: input_items = [ {"data": '{"user_id": ["1"]}', "key": "key1"}, {"data": '{"user_id": ["2"]}', "key": "key2"}, ] # Input items for the candidate model: # input_items = [{ # 'data' : '{"movie_id": ["1"], "movie_title": ["fake title"]}', # 'key': 'key1' # }] encodings = endpoint.predict(input_items) print(f"Number of encodings: {len(encodings.predictions)}") print(encodings.predictions[0]["encoding"]) ###Output _____no_output_____ ###Markdown Make an online prediction with `gcloud`You can also do online prediction using the gcloud CLI. ###Code import json request = json.dumps({"instances": input_items}) with open("request.json", "w") as writer: writer.write(f"{request}\n") ENDPOINT_ID = endpoint.resource_name ! gcloud ai endpoints predict {ENDPOINT_ID} \ --region={REGION} \ --json-request=request.json ###Output _____no_output_____ ###Markdown Make a batch prediction[Batch prediction](https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions) is used to asynchronously make predictions on a batch of input data. This is recommended if you have a large input size and do not need an immediate response, such as getting embeddings for candidate objects in order to create an index for a nearest neighbor search service such as [Vertex Matching Engine](https://cloud.google.com/vertex-ai/docs/matching-engine/overview). Create the batch input fileNext, you generate the batch input file to generate embeddings for the dataset, which you subsequently use to create an index with `Vertex AI Matching Engine`. In this example, the dataset contains a 1000 unique identifiers (0...999). You will use the trained encoder to generate a predicted embedding for each unique identifier.The input data needs to be on Cloud Storage and in JSONL format. You can use the sample query object file provided below. Like with online prediction, it's recommended to have the `key` field so that you can associate each output embedding with its corresponding input. ###Code QUERY_EMBEDDING_PATH = f"{BUCKET_URI}/embeddings/train.jsonl" import tensorflow as tf with tf.io.gfile.GFile(QUERY_EMBEDDING_PATH, "w") as f: for i in range(0, 1000): query = {"data": '{"user_id": ["' + str(i) + '"]}', "key": f"key{i}"} f.write(json.dumps(query) + "\n") print("\nNumber of embeddings: ") ! gsutil cat {QUERY_EMBEDDING_PATH} | wc -l ###Output _____no_output_____ ###Markdown Send the prediction requestTo make a batch prediction request, call the model object's `batch_predict` method with the following parameters: - `instances_format`: The format of the batch prediction request file: "jsonl", "csv", "bigquery", "tf-record", "tf-record-gzip" or "file-list"- `prediction_format`: The format of the batch prediction response file: "jsonl", "csv", "bigquery", "tf-record", "tf-record-gzip" or "file-list"- `job_display_name`: The human readable name for the prediction job.- `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests.- `gcs_destination_prefix`: The Cloud Storage path that the service will write the predictions to.- `model_parameters`: Additional filtering parameters for serving prediction results.- `machine_type`: The type of machine to use for training.- `accelerator_type`: The hardware accelerator type.- `accelerator_count`: The number of accelerators to attach to a worker replica.- `starting_replica_count`: The number of compute instances to initially provision.- `max_replica_count`: The maximum number of compute instances to scale to. In this tutorial, only one instance is provisioned. Compute instance scalingYou can specify a single instance (or node) to process your batch prediction request. This tutorial uses a single node, so the variables `MIN_NODES` and `MAX_NODES` are both set to `1`.If you want to use multiple nodes to process your batch prediction request, set `MAX_NODES` to the maximum number of nodes you want to use. Vertex AI autoscales the number of nodes used to serve your predictions, up to the maximum number you set. Refer to the [pricing page](https://cloud.google.com/vertex-ai/pricingprediction-prices) to understand the costs of autoscaling with multiple nodes. ###Code MIN_NODES = 1 MAX_NODES = 4 batch_predict_job = model.batch_predict( job_display_name=f"batch_predict_{DISPLAY_NAME}", gcs_source=[QUERY_EMBEDDING_PATH], gcs_destination_prefix=f"{BUCKET_URI}/embeddings/output", machine_type=DEPLOY_COMPUTE, starting_replica_count=MIN_NODES, max_replica_count=MAX_NODES, ) ###Output _____no_output_____ ###Markdown Get the predicted embeddingsNext, get the results from the completed batch prediction job.The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method `iter_outputs()` to get a list of each Cloud Storage file generated with the results. Each file contains one or more prediction requests in a JSON format:- `instance`: The prediction request.- `prediction`: The prediction response. ###Code bp_iter_outputs = batch_predict_job.iter_outputs() prediction_results = list() for blob in bp_iter_outputs: if blob.name.split("/")[-1].startswith("prediction"): prediction_results.append(blob.name) result_files = [] for prediction_result in prediction_results: result_file = f"gs://{bp_iter_outputs.bucket.name}/{prediction_result}" result_files.append(result_file) print(result_files) ###Output _____no_output_____ ###Markdown Save the embeddings in JSONL formatNext, you store the predicted embeddings as a JSONL formatted file. Each embedding is stored as: { 'id': .., 'embedding': [ ... ] } The format of the embeddings for the index can be in either CSV, JSON, or Avro format.Learn more about [Embedding Formats for Indexing](https://cloud.google.com/vertex-ai/docs/matching-engine/using-matching-enginejson) ###Code embeddings = [] for result_file in result_files: with tf.io.gfile.GFile(result_file, "r") as f: instances = list(f) for instance in instances: instance = instance.replace('\\"', "'") result = json.loads(instance) prediction = result["prediction"] key = prediction["key"][3:] encoding = prediction["encoding"] embedding = {"id": key, "embedding": encoding} embeddings.append(embedding) print("Number of embeddings", len(embeddings)) print("Encoding Dimensions", len(embeddings[0]["embedding"])) print("Example embedding", embeddings[0]) with open("embeddings.json", "w") as f: for i in range(len(embeddings)): f.write(json.dumps(embeddings[i]).replace('"', "'")) f.write("\n") ! head -n 2 embeddings.json ###Output _____no_output_____ ###Markdown Store the JSONL formatted embeddings in Cloud StorageNext, you upload the training data to your Cloud Storage bucket. ###Code EMBEDDINGS_URI = f"{BUCKET_URI}/embeddings/twotower/" ! gsutil cp embeddings.json {EMBEDDINGS_URI} ###Output _____no_output_____ ###Markdown Create Matching Engine IndexNext, you create the index for your embeddings. Currently, two indexing algorithms are supported:- `create_tree_ah_index()`: Shallow tree + Asymmetric hashing.- `create_brute_force_index()`: Linear search.In this tutorial, you use the `create_tree_ah_index()`for production scale. The method is called with the following parameters:- `display_name`: A human readable name for the index.- `contents_delta_uri`: A Cloud Storage location for the embeddings, which are either to be inserted, updated or deleted.- `dimensions`: The number of dimensions of the input vector- `approximate_neighbors_count`: (for Tree AH) The default number of neighbors to find via approximate search before exact reordering is performed. Exact reordering is a procedure where results returned by an approximate search algorithm are reordered via a more expensive distance computation.- `distance_measure_type`: The distance measure used in nearest neighbor search. - `SQUARED_L2_DISTANCE`: Euclidean (L2) Distance - `L1_DISTANCE`: Manhattan (L1) Distance - `COSINE_DISTANCE`: Cosine Distance. Defined as 1 - cosine similarity. - `DOT_PRODUCT_DISTANCE`: Default value. Defined as a negative of the dot product.- `description`: A human readble description of the index.- `labels`: User metadata in the form of a dictionary.- `leaf_node_embedding_count`: Number of embeddings on each leaf node. The default value is 1000 if not set.- `leaf_nodes_to_search_percent`: The default percentage of leaf nodes that any query may be searched. Must be in range 1-100, inclusive. The default value is 10 (means 10%) if not set.This may take upto 30 minutes.Learn more about [Configuring Matching Engine Indexes](https://cloud.google.com/vertex-ai/docs/matching-engine/configuring-indexes). ###Code DIMENSIONS = len(embeddings[0]["embedding"]) DISPLAY_NAME = "movies" tree_ah_index = aiplatform.MatchingEngineIndex.create_tree_ah_index( display_name=DISPLAY_NAME, contents_delta_uri=EMBEDDINGS_URI, dimensions=DIMENSIONS, approximate_neighbors_count=50, distance_measure_type="DOT_PRODUCT_DISTANCE", description="Two tower generated embeddings", labels={"label_name": "label_value"}, # TreeAH specific parameters leaf_node_embedding_count=100, leaf_nodes_to_search_percent=7, ) INDEX_RESOURCE_NAME = tree_ah_index.resource_name print(INDEX_RESOURCE_NAME) ###Output _____no_output_____ ###Markdown Setup VPC peering networkTo use a `Matching Engine Index`, you setup a VPC peering network between your project and the `Vertex AI Matching Engine` service project. This eliminates additional hops in network traffic and allows using efficient gRPC protocol.Learn more about [VPC peering](https://cloud.google.com/vertex-ai/docs/general/vpc-peering).**IMPORTANT: you can only setup one VPC peering to servicenetworking.googleapis.com per project.** Create VPC peering for default networkFor simplicity, we setup VPC peering to the default network. You can create a different network for your project.If you setup VPC peering with any other network, make sure that the network already exists and that your VM is running on that network. ###Code # This is for display only; you can name the range anything. PEERING_RANGE_NAME = "vertex-ai-prediction-peering-range" NETWORK = "default" # NOTE: `prefix-length=16` means a CIDR block with mask /16 will be # reserved for use by Google services, such as Vertex AI. ! gcloud compute addresses create $PEERING_RANGE_NAME \ --global \ --prefix-length=16 \ --description="peering range for Google service" \ --network=$NETWORK \ --purpose=VPC_PEERING ###Output _____no_output_____ ###Markdown Create the VPC connectionNext, create the connection for VPC peering.*Note:* If you get a PERMISSION DENIED, you may not have the neccessary role 'Compute Network Admin' set for your default service account. In the Cloud Console, do the following steps.1. Goto `IAM & Admin`2. Find your service account.3. Click edit icon.4. Select `Add Another Role`.5. Enter 'Compute Network Admin'.6. Select `Save` ###Code ! gcloud services vpc-peerings connect \ --service=servicenetworking.googleapis.com \ --network=$NETWORK \ --ranges=$PEERING_RANGE_NAME \ --project=$PROJECT_ID ###Output _____no_output_____ ###Markdown Check the status of your peering connections. ###Code ! gcloud compute networks peerings list --network $NETWORK ###Output _____no_output_____ ###Markdown Construct the full network nameYou need to have the full network resource name when you subsequently create an `Matching Engine Index Endpoint` resource for VPC peering. ###Code full_network_name = f"projects/{PROJECT_NUMBER}/global/networks/{NETWORK}" ###Output _____no_output_____ ###Markdown Create an IndexEndpoint with VPC NetworkNext, you create a `Matching Engine Index Endpoint`, similar to the concept of creating a `Private Endpoint` for prediction with a peer-to-peer network.To create the `Index Endpoint` resource, you call the method `create()` with the following parameters:- `display_name`: A human readable name for the `Index Endpoint`.- `description`: A description for the `Index Endpoint`.- `network`: The VPC network resource name. ###Code index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create( display_name="index_endpoint_for_demo", description="index endpoint description", network=full_network_name, ) INDEX_ENDPOINT_NAME = index_endpoint.resource_name print(INDEX_ENDPOINT_NAME) ###Output _____no_output_____ ###Markdown Deploy the `Matching Engine Index` to the `Index Endpoint` resourceNext, deploy your index to the `Index Endpoint` using the method `deploy_index()` with the following parameters:- `display_name`: A human readable name for the deployed index.- `index`: Your index.- `deployed_index_id`: A user assigned identifier for the deployed index.- `machine_type`: (optional) The VM instance type.- `min_replica_count`: (optional) Minimum number of VM instances for auto-scaling.- `max_replica_count`: (optional) Maximum number of VM instances for auto-scaling.Learn more about [Machine resources for Index Endpoint](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/projects.locations.indexEndpointsDeployedIndex) ###Code DEPLOYED_INDEX_ID = "tree_ah_twotower_deployed_" + TIMESTAMP MIN_NODES = 1 MAX_NODES = 2 DEPLOY_COMPUTE = "n1-standard-16" index_endpoint.deploy_index( display_name="deployed_index_for_demo", index=tree_ah_index, deployed_index_id=DEPLOYED_INDEX_ID, machine_type=DEPLOY_COMPUTE, min_replica_count=MIN_NODES, max_replica_count=MAX_NODES, ) print(index_endpoint.deployed_indexes) ###Output _____no_output_____ ###Markdown Create and execute an online queryNow that your index is deployed, you can make queries.First, you construct a vector `query` using synthetic data, to use as the example to return matches for.Next, you make the matching request using the method `match()`, with the following parameters:- `deployed_index_id`: The identifier of the deployed index.- `queries`: A list of queries (instances).- `num_neighbors`: The number of closest matches to return. ###Code # The number of nearest neighbors to be retrieved from database for each query. NUM_NEIGHBOURS = 10 # Test query queries = [embeddings[0]["embedding"], embeddings[1]["embedding"]] matches = index_endpoint.match( deployed_index_id=DEPLOYED_INDEX_ID, queries=queries, num_neighbors=NUM_NEIGHBOURS ) for instance in matches: print("INSTANCE") for match in instance: print(match) ###Output _____no_output_____ ###Markdown Cleaning upTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloudproject](https://cloud.google.com/resource-manager/docs/creating-managing-projectsshutting_down_projects) you used for the tutorial.Otherwise, you can delete the individual resources you created in this tutorial: ###Code # Delete endpoint resource endpoint.delete(force=True) # Delete model resource model.delete() # Force undeployment of indexes and delete endpoint try: index_endpoint.delete(force=True) except Exception as e: print(e) # Delete indexes try: tree_ah_index.delete() brute_force_index.delete() except Exception as e: print(e) # Delete Cloud Storage objects that were created delete_bucket = False if delete_bucket or os.getenv("IS_TESTING"): ! gsutil -m rm -r $OUTPUT_DIR ###Output _____no_output_____
evalvacia_modelu_I_b/evalvacia_modelu_I_b.ipynb
###Markdown 0. Imports ###Code import pandas as pd import numpy as np ###Output _____no_output_____ ###Markdown 1. Load CSV ###Code # change to your file location df = pd.read_csv('/content/drive/MyDrive/Škola/DM/evalvacia_modelu_I_b/MLM_vstup.csv', ';', usecols=range(0,13)) df_stats = pd.read_csv('/content/drive/MyDrive/Škola/DM/evalvacia_modelu_I_b/MLM_ZAM_stats.csv', ';', usecols=range(0,10)) # fiter for students df = df[(df['HODINA'] > 6) & (df['HODINA'] <= 22) & (df['ZAM'] == 1) & (df['KATEGORIA'].isin(['uvod', 'studium', 'o_fakulte', 'oznamy']))] # empty dict to save created crosstables dfDict = {} ###Output _____no_output_____ ###Markdown 2. Create crosstables*Crosstable - PO* ###Code df1 = df[(df['PO'] == 1)] crosstable = pd.crosstab(df1['HODINA'], df1['KATEGORIA'], values=df1['PO'], margins=True, dropna=False, aggfunc='count').reset_index().fillna(0) # Add missing line crosstable = crosstable.append({'HODINA': 18, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 19, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 20, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) # Add PO crosstable into dict dfDict['PO'] = crosstable ###Output _____no_output_____ ###Markdown *Crosstable - UT* ###Code df1 = df[(df['UT'] == 1)] crosstable = pd.crosstab(df1['HODINA'], df1['KATEGORIA'], values=df1['UT'], margins=True, dropna=False, aggfunc='count').reset_index().fillna(0) # Add missing line crosstable = crosstable.append({'HODINA': 19, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 20, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 21, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 22, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) # Add UT crosstable into dict dfDict['UT'] = crosstable ###Output _____no_output_____ ###Markdown *Crosstable - STR* ###Code df1 = df[(df['STR'] == 1)] crosstable = pd.crosstab(df1['HODINA'], df1['KATEGORIA'], values=df1['STR'], margins=True, dropna=False, aggfunc='count').reset_index().fillna(0) # Add missing line crosstable = crosstable.append({'HODINA': 17, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 20, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 21, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 22, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) # Add STR crosstable into dict dfDict['STR'] = crosstable ###Output _____no_output_____ ###Markdown *Crosstable - STVR* ###Code df1 = df[(df['STVR'] == 1)] crosstable = pd.crosstab(df1['HODINA'], df1['KATEGORIA'], values=df1['STVR'], margins=True, dropna=False, aggfunc='count').reset_index().fillna(0) # Add missing lines crosstable = crosstable.append({'HODINA': 18, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 19, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 20, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 21, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 22, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) # Add STVR crosstable into dict dfDict['STVR'] = crosstable ###Output _____no_output_____ ###Markdown *Crosstable - PIA* ###Code df1 = df[(df['PIA'] == 1)] crosstable = pd.crosstab(df1['HODINA'], df1['KATEGORIA'], values=df1['PIA'], margins=True, dropna=False, aggfunc='count').reset_index().fillna(0) # Add missing lines crosstable = crosstable.append({'HODINA': 16, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 17, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 18, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 19, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 20, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 21, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) crosstable = crosstable.append({'HODINA': 22, 'o_fakulte': 0, 'oznamy': 0, 'studium': 0, 'uvod': 0, 'All': 0}, ignore_index=True) # Add PIA crosstable into dict dfDict['PIA'] = crosstable ###Output _____no_output_____ ###Markdown 3. Create collection of weekdays ###Code days = ['PO', 'UT', 'STR', 'STVR', 'PIA'] ###Output _____no_output_____ ###Markdown 4. Create estimates for web parts ###Code df1 = pd.DataFrame() df2 = pd.DataFrame() df3 = pd.DataFrame() df4 = pd.DataFrame() index = 0 # Cycle through hours from 7 to 23 for x in range (7,23): new_row_uvod = {} new_row_studium = {} new_row_oznamy = {} new_row_fakulte = {} i = 1 # Cycle through weekdays for day in days: # Create logits estimates logit_uvod = df_stats.at[index, 'Intercept'] + df_stats.at[index, 'HODINA']*x+df_stats.at[index, 'HODINA_STV']*(x*x)+df_stats.at[index, day] logit_studium = df_stats.at[index+1, 'Intercept'] + df_stats.at[index+1, 'HODINA']*x+df_stats.at[index+1, 'HODINA_STV']*(x*x)+df_stats.at[index+1, day] logit_oznamy = df_stats.at[index+2, 'Intercept'] + df_stats.at[index+2, 'HODINA']*x+df_stats.at[index+2, 'HODINA_STV']*(x*x)+df_stats.at[index+2, day] reference_web = 1 / (1 + np.exp(logit_uvod) + np.exp(logit_studium) + np.exp(logit_oznamy)) # Create estimates for web parts estimate_uvod = np.exp(logit_uvod) * reference_web estimate_studium = np.exp(logit_studium) * reference_web estimate_oznamy = np.exp(logit_oznamy) * reference_web estimate_fakulte = np.exp(reference_web) * reference_web # Create evaluation crosstable = dfDict[day] crosstable = crosstable[(crosstable['HODINA'] == x)] evaluation_uvod = estimate_uvod * crosstable.iloc[0]['All'] evaluation_studium = estimate_studium * crosstable.iloc[0]['All'] evaluation_oznamy = estimate_oznamy * crosstable.iloc[0]['All'] evaluation_fakulte = estimate_fakulte * crosstable.iloc[0]['All'] den = str(i) + '_' + day # Create new rows and append it to dataframe new_row_uvod.update({den: evaluation_uvod}) new_row_studium.update({den: evaluation_studium}) new_row_oznamy.update({den: evaluation_oznamy}) new_row_fakulte.update({den: evaluation_fakulte}) i = i + 1 # Append time to rows new_row_uvod.update({'0_hod': x}) new_row_studium.update({'0_hod': x}) new_row_oznamy.update({'0_hod': x}) new_row_fakulte.update({'0_hod': x}) # Update dataframes df1 = df1.append(new_row_uvod, sort=False, ignore_index=True) df2 = df2.append(new_row_studium, sort=False, ignore_index=True) df3 = df3.append(new_row_oznamy, sort=False, ignore_index=True) df4 = df4.append(new_row_fakulte, sort=False, ignore_index=True) df1.head(16) ###Output _____no_output_____ ###Markdown 4. Export to excel ###Code # Creating Excel Writer Object from Pandas writer = pd.ExcelWriter('ZAM_evaluation_model.xlsx',engine='xlsxwriter') workbook=writer.book worksheet=workbook.add_worksheet('ZAM') writer.sheets['ZAM'] = worksheet # Úvod worksheet.write(0, 0, "Úvod") df1.to_excel(writer, sheet_name='ZAM',startrow=1 , startcol=0, index=False) # Śtúdium worksheet.write(0, 7, "Štúdium") df2.to_excel(writer, sheet_name='ZAM',startrow=1 , startcol=7, index=False) # Oznamy worksheet.write(0, 14, "Oznamy") df3.to_excel(writer, sheet_name='ZAM',startrow=1 , startcol=14, index=False) # O fakulte worksheet.write(0, 21, "O fakulte") df4.to_excel(writer, sheet_name='ZAM',startrow=1 , startcol=21, index=False) writer.save() ###Output _____no_output_____
10 Selenium/archive/02 Htmls auslesen.ipynb
###Markdown Imports ###Code from bs4 import BeautifulSoup import pandas as pd import time import os import re from tqdm import tqdm ###Output _____no_output_____ ###Markdown Liste aller HTMLs ###Code lst = os.listdir('pages/') ###Output _____no_output_____ ###Markdown Seiten einlesen, hier zuerst mit einer Seite ###Code file = open('pages/' + lst[0], 'r') text = file.read() h = BeautifulSoup(text, 'html.parser') ###Output _____no_output_____ ###Markdown Alle Elemente, die uns interessieren ###Code h.find_all('tr', {'class':'ng-scope'}) ###Output _____no_output_____ ###Markdown Alle Elemente ###Code h.find_all('tr', {'class':'ng-scope'})[0].find('div')['title'] h.find_all('tr', {'class':'ng-scope'})[0].find_next('div').find_next('div').find('a')['href'] h.find_all('tr', {'class':'ng-scope'})[0].find_next('div') \ .find_next('div').find_next('div').find_next('div').find_next('div').text h.find_all('tr', {'class':'ng-scope'})[0].find_next('div') \ .find_next('div').find_next('div').find_next('div') \ .find_next('div').find_next('div').text ###Output _____no_output_____ ###Markdown Und jetzt alles in einen For_loop packen ###Code soup_lst = h.find_all('tr', {'class':'ng-scope'}) bäck_lst = [] for bäck in soup_lst: name = bäck.find('div')['title'] url = bäck.find_next('div').find_next('div').find('a')['href'] gem = bäck.find_next('div') \ .find_next('div').find_next('div').find_next('div').find_next('div').text kt = bäck.find_next('div') \ .find_next('div').find_next('div').find_next('div') \ .find_next('div').find_next('div').text minidict = {'Bäckerei': name, 'URL': url, 'Gemeinde': gem, 'Kanton': kt} bäck_lst.append(minidict) ###Output _____no_output_____ ###Markdown Jetzt machen wir daraus eine Funktion ###Code def get_bäcks(html_code): soup_lst = html_code.find_all('tr', {'class':'ng-scope'}) bäck_lst = [] for bäck in soup_lst: name = bäck.find('div')['title'] url = bäck.find_next('div').find_next('div').find('a')['href'] gem = bäck.find_next('div') \ .find_next('div').find_next('div').find_next('div').find_next('div').text kt = bäck.find_next('div') \ .find_next('div').find_next('div').find_next('div') \ .find_next('div').find_next('div').text minidict = {'Bäckerei': name, 'URL': url, 'Gemeinde': gem, 'Kanton': kt} bäck_lst.append(minidict) return bäck_lst file = open('pages/' + lst[0], 'r') text = file.read() h = BeautifulSoup(text, 'html.parser') get_bäcks(h) ###Output _____no_output_____ ###Markdown Und jetzt wenden wir das auf alle html files an ###Code full_bäck_lst = [] for html_file in tqdm(lst): file = open('pages/' + html_file, 'r') text = file.read() h = BeautifulSoup(text, 'html.parser') full_bäck_lst = full_bäck_lst + get_bäcks(h) ###Output 100%|██████████| 36/36 [00:02<00:00, 12.57it/s] ###Markdown Einlesen in die Liste und abspeichern ###Code df = pd.DataFrame(full_bäck_lst) df.to_csv('bäckereien_list.csv') df['Kanton'].value_counts() ###Output _____no_output_____
code/netset8barebone-new-r3.ipynb
###Markdown Sustainable energy transitions data model ###Code import pandas as pd, numpy as np, json, copy, zipfile, random, requests, StringIO import matplotlib.pyplot as plt %matplotlib inline plt.style.use('ggplot') from IPython.core.display import Image Image('favicon.png') ###Output _____no_output_____ ###Markdown Country and region name converters ###Code #country name converters #EIA->pop clist1={'North America':'Northern America', 'United States':'United States of America', 'Central & South America':'Latin America and the Caribbean', 'Bahamas, The':'Bahamas', 'Saint Vincent/Grenadines':'Saint Vincent and the Grenadines', 'Venezuela':'Venezuela (Bolivarian Republic of)', 'Macedonia':'The former Yugoslav Republic of Macedonia', 'Moldova':'Republic of Moldova', 'Russia':'Russian Federation', 'Iran':'Iran (Islamic Republic of)', 'Palestinian Territories':'State of Palestine', 'Syria':'Syrian Arab Republic', 'Yemen':'Yemen ', 'Congo (Brazzaville)':'Congo', 'Congo (Kinshasa)':'Democratic Republic of the Congo', 'Cote dIvoire (IvoryCoast)':"C\xc3\xb4te d'Ivoire", 'Gambia, The':'Gambia', 'Libya':'Libyan Arab Jamahiriya', 'Reunion':'R\xc3\xa9union', 'Somalia':'Somalia ', 'Sudan and South Sudan':'Sudan', 'Tanzania':'United Republic of Tanzania', 'Brunei':'Brunei Darussalam', 'Burma (Myanmar)':'Myanmar', 'Hong Kong':'China, Hong Kong Special Administrative Region', 'Korea, North':"Democratic People's Republic of Korea", 'Korea, South':'Republic of Korea', 'Laos':"Lao People's Democratic Republic", 'Macau':'China, Macao Special Administrative Region', 'Timor-Leste (East Timor)':'Timor-Leste', 'Virgin Islands, U.S.':'United States Virgin Islands', 'Vietnam':'Viet Nam'} #BP->pop clist2={u' European Union #':u'Europe', u'Rep. of Congo (Brazzaville)':u'Congo (Brazzaville)', 'Republic of Ireland':'Ireland', 'China Hong Kong SAR':'China, Hong Kong Special Administrative Region', u'Total Africa':u'Africa', u'Total North America':u'Northern America', u'Total S. & Cent. America':'Latin America and the Caribbean', u'Total World':u'World', u'Total World ':u'World', 'South Korea':'Republic of Korea', u'Trinidad & Tobago':u'Trinidad and Tobago', u'US':u'United States of America'} #WD->pop clist3={u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u'Congo, Rep.':u'Congo (Brazzaville)', u'Caribbean small states':'Carribean', u'East Asia & Pacific (all income levels)':'Eastern Asia', u'Egypt, Arab Rep.':'Egypt', u'European Union':u'Europe', u'Hong Kong SAR, China':u'China, Hong Kong Special Administrative Region', u'Iran, Islamic Rep.':u'Iran (Islamic Republic of)', u'Kyrgyz Republic':u'Kyrgyzstan', u'Korea, Rep.':u'Republic of Korea', u'Latin America & Caribbean (all income levels)':'Latin America and the Caribbean', u'Macedonia, FYR':u'The former Yugoslav Republic of Macedonia', u'Korea, Dem. Rep.':u"Democratic People's Republic of Korea", u'South Asia':u'Southern Asia', u'Sub-Saharan Africa (all income levels)':u'Sub-Saharan Africa', u'Slovak Republic':u'Slovakia', u'Venezuela, RB':u'Venezuela (Bolivarian Republic of)', u'Yemen, Rep.':u'Yemen ', u'Congo, Dem. Rep.':u'Democratic Republic of the Congo'} #COMTRADE->pop clist4={u"Bosnia Herzegovina":"Bosnia and Herzegovina", u'Central African Rep.':u'Central African Republic', u'China, Hong Kong SAR':u'China, Hong Kong Special Administrative Region', u'China, Macao SAR':u'China, Macao Special Administrative Region', u'Czech Rep.':u'Czech Republic', u"Dem. People's Rep. of Korea":"Democratic People's Republic of Korea", u'Dem. Rep. of the Congo':"Democratic Republic of the Congo", u'Dominican Rep.':u'Dominican Republic', u'Fmr Arab Rep. of Yemen':u'Yemen ', u'Fmr Ethiopia':u'Ethiopia', u'Fmr Fed. Rep. of Germany':u'Germany', u'Fmr Panama, excl.Canal Zone':u'Panama', u'Fmr Rep. of Vietnam':u'Viet Nam', u"Lao People's Dem. Rep.":u"Lao People's Democratic Republic", u'Occ. Palestinian Terr.':u'State of Palestine', u'Rep. of Korea':u'Republic of Korea', u'Rep. of Moldova':u'Republic of Moldova', u'Serbia and Montenegro':u'Serbia', u'US Virgin Isds':u'United States Virgin Islands', u'Solomon Isds':u'Solomon Islands', u'United Rep. of Tanzania':u'United Republic of Tanzania', u'TFYR of Macedonia':u'The former Yugoslav Republic of Macedonia', u'USA':u'United States of America', u'USA (before 1981)':u'United States of America', } #Jacobson->pop clist5={u"Korea, Democratic People's Republic of":"Democratic People's Republic of Korea", u'All countries':u'World', u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u'Iran, Islamic Republic of':u'Iran (Islamic Republic of)', u'Macedonia, Former Yugoslav Republic of':u'The former Yugoslav Republic of Macedonia', u'Congo, Democratic Republic of':u"Democratic Republic of the Congo", u'Korea, Republic of':u'Republic of Korea', u'Tanzania, United Republic of':u'United Republic of Tanzania', u'Moldova, Republic of':u'Republic of Moldova', u'Hong Kong, China':u'China, Hong Kong Special Administrative Region', u'All countries.1':"World" } #NREL solar->pop clist6={u"Antigua & Barbuda":u'Antigua and Barbuda', u"Bosnia & Herzegovina":u"Bosnia and Herzegovina", u"Brunei":u'Brunei Darussalam', u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u"Iran":u'Iran (Islamic Republic of)', u"Laos":u"Lao People's Democratic Republic", u"Libya":'Libyan Arab Jamahiriya', u"Moldova":u'Republic of Moldova', u"North Korea":"Democratic People's Republic of Korea", u"Reunion":'R\xc3\xa9union', u'Sao Tome & Principe':u'Sao Tome and Principe', u'Solomon Is.':u'Solomon Islands', u'St. Lucia':u'Saint Lucia', u'St. Vincent & the Grenadines':u'Saint Vincent and the Grenadines', u'The Bahamas':u'Bahamas', u'The Gambia':u'Gambia', u'Virgin Is.':u'United States Virgin Islands', u'West Bank':u'State of Palestine' } #NREL wind->pop clist7={u"Antigua & Barbuda":u'Antigua and Barbuda', u"Bosnia & Herzegovina":u"Bosnia and Herzegovina", u'Occupied Palestinian Territory':u'State of Palestine', u'China Macao SAR':u'China, Macao Special Administrative Region', #"C\xc3\xb4te d'Ivoire":"C\xc3\xb4te d'Ivoire", u'East Timor':u'Timor-Leste', u'TFYR Macedonia':u'The former Yugoslav Republic of Macedonia', u'IAM-country Total':u'World' } #country entroids->pop clist8={u'Burma':'Myanmar', u"Cote d'Ivoire":"C\xc3\xb4te d'Ivoire", u'Republic of the Congo':u'Congo (Brazzaville)', u'Reunion':'R\xc3\xa9union' } def cnc(country): if country in clist1: return clist1[country] elif country in clist2: return clist2[country] elif country in clist3: return clist3[country] elif country in clist4: return clist4[country] elif country in clist5: return clist5[country] elif country in clist6: return clist6[country] elif country in clist7: return clist7[country] elif country in clist8: return clist8[country] else: return country ###Output _____no_output_____ ###Markdown Population Consult the notebook entitled *pop.ipynb* for the details of mining the data from the UN statistics division online database. Due to being the reference database for country names cell, the cell below needs to be run first, before any other databases. ###Code try: import zlib compression = zipfile.ZIP_DEFLATED except: compression = zipfile.ZIP_STORED #pop_path='https://dl.dropboxusercontent.com/u/531697/datarepo/Set/db/ pop_path='E:/Dropbox/Public/datarepo/netset/db/' #suppres warnings import warnings warnings.simplefilter(action = "ignore") cc=pd.read_excel(pop_path+'Country Code and Name ISO2 ISO3.xls') #http://unstats.un.org/unsd/tradekb/Attachment321.aspx?AttachmentType=1 ccs=cc['Country Code'].values neighbors=pd.read_csv(pop_path+'contry-geotime.csv') #https://raw.githubusercontent.com/ppKrauss/country-geotime/master/data/contry-geotime.csv #country name converter from iso to comtrade and back iso2c={} isoc2={} for i in cc.T.iteritems(): iso2c[i[1][0]]=i[1][1] isoc2[i[1][1]]=i[1][0] #country name converter from pop to iso pop2iso={} for i in cc.T.iteritems(): pop2iso[cnc(i[1][1])]=int(i[1][0]) #country name converter from alpha 2 to iso c2iso={} for i in neighbors.T.iteritems(): c2iso[str(i[1][0])]=i[1][1] c2iso['NA']=c2iso['nan'] #adjust for namibia c2iso.pop('nan'); #create country neighbor adjacency list based on iso country number codes c2neighbors={} for i in neighbors.T.iteritems(): z=str(i[1][4]).split(' ') if (str(i[1][1])!='nan'): c2neighbors[int(i[1][1])]=[c2iso[k] for k in z if k!='nan'] #extend iso codes not yet encountered iso2c[729]="Sudan" iso2c[531]="Curacao" iso2c[535]="Bonaire, Sint Eustatius and Saba" iso2c[728]="South Sudan" iso2c[534]="Sint Maarten (Dutch part)" iso2c[652]="Saint Barthélemy" #load h2 min h2=json.loads(file(pop_path+'h2.json','r').read()) #load tradealpha d #predata=json.loads(file(pop_path+'/trade/traded.json','r').read()) predata=json.loads(file(pop_path+'/trade/smalltrade.json','r').read()) tradealpha={} for c in predata: tradealpha[c]={} for year in predata[c]: tradealpha[c][int(year)]=predata[c][year] predata={} #load savedata predata=json.loads(file(pop_path+'savedata6.json','r').read()) data={} for c in predata: data[c]={} for year in predata[c]: data[c][int(year)]=predata[c][year] predata={} #load grids grid=json.loads(file(pop_path+'grid.json','r').read()) grid5=json.loads(file(pop_path+'grid5.json','r').read()) gridz=json.loads(file(pop_path+'gridz.json','r').read()) gridz5=json.loads(file(pop_path+'gridz5.json','r').read()) #load ndists ndists=json.loads(file(pop_path+'ndists.json','r').read()) distancenorm=7819.98 #load goodcountries #goodcountries=list(set(data.keys()).intersection(set(tradealpha.keys()))) goodcountries=json.loads(file(pop_path+'GC.json','r').read()) #goodcountries=goodcountries[:20] #dev rgc={} #reverse goodcountries coder for i in range(len(goodcountries)): rgc[goodcountries[i]]=i cid={} #reverse goodcountries coder for i in range(len(goodcountries)): cid[goodcountries[i]]=i def save3(sd,countrylist=[]): #if True: print 'saving... ',sd, popsave={} countries=[] if countrylist==[]: c=sorted(goodcountries) else: c=countrylist for country in c: popdummy={} tosave=[] for year in data[country]: popdummy[year]=data[country][year]['population'] for fuel in data[country][year]['energy']: #for fuel in allfuels: if fuel not in {'nrg','nrg_sum'}: tosave.append({"t":year,"u":fuel,"g":"f","q1":"pp","q2":999, "s":round(0 if (('navg3' in data[country][year]['energy'][fuel]['prod']) \ and (np.isnan(data[country][year]['energy'][fuel]['prod']['navg3']))) else \ data[country][year]['energy'][fuel]['prod']['navg3'] if \ 'navg3' in data[country][year]['energy'][fuel]['prod'] else 0,3) }) tosave.append({"t":year,"u":fuel,"g":"m","q1":"cc","q2":999, "s":round(0 if (('navg3' in data[country][year]['energy'][fuel]['cons']) \ and (np.isnan(data[country][year]['energy'][fuel]['cons']['navg3']))) else \ data[country][year]['energy'][fuel]['cons']['navg3'] if \ 'navg3' in data[country][year]['energy'][fuel]['cons'] else 0,3) }) #save balances - only for dev #if (year > min(balance.keys())): # if year in balance: # if country in balance[year]: # tosave.append({"t":year,"u":"balance","g":"m","q1":"cc","q2":999, # "s":balance[year][country]}) #no import export flows on global if country not in {"World"}: flowg={"Import":"f","Export":"m","Re-Export":"m","Re-Import":"f"} if country in tradealpha: for year in tradealpha[country]: for fuel in tradealpha[country][year]: for flow in tradealpha[country][year][fuel]: for partner in tradealpha[country][year][fuel][flow]: tosave.append({"t":int(float(year)),"u":fuel,"g":flowg[flow],"q1":flow,"q2":partner, "s":round(tradealpha[country][year][fuel][flow][partner],3) }) popsave[country]=popdummy countries.append(country) file('../json/'+str(sd)+'/data.json','w').write(json.dumps(tosave)) zf = zipfile.ZipFile('../json/'+str(sd)+'/'+str(country.encode('utf-8').replace('/','&&'))+'.zip', mode='w') zf.write('../json/'+str(sd)+'/data.json','data.json',compress_type=compression) zf.close() #save all countries list file('../json/countries.json','w').write(json.dumps(countries)) #save countries populations #file('E:/Dropbox/Public/datarepo/Set/json/pop.json','w').write(json.dumps(popsave)) print ' done' ###Output _____no_output_____ ###Markdown Impex updating ###Code def updatenormimpex(reporter,partner,flow,value,weight=0.1): global nimportmatrix global nexportmatrix global nrimportmatrix global nrexportmatrix i=cid[reporter] j=cid[partner] if flow in {"Export","Re-Export"}: nexportmatrix[i][j]=(nexportmatrix[i][j]*(1-weight))+(value*weight) nrimportmatrix[j][i]=(nrimportmatrix[j][i]*(1-weight))+(value*weight) if flow in {"Import","Re-Import"}: nimportmatrix[i][j]=(nrimportmatrix[i][j]*(1-weight))+(value*weight) nrexportmatrix[j][i]=(nrexportmatrix[j][i]*(1-weight))+(value*weight) return def influence(reporter,partner,selfinfluence=1.0,expfactor=3.0): #country trade influence will tend to have an exponential distribution, therefore we convert to linear #with a strength of expfactor i=cid[reporter] j=cid[partner] if i==j: return selfinfluence else: return (12.0/36*nimportmatrix[i][j]\ +6.0/36*nexportmatrix[j][i]\ +4.0/36*nrimportmatrix[i][j]\ +2.0/36*nrexportmatrix[j][i]\ +6.0/36*nexportmatrix[i][j]\ +3.0/36*nimportmatrix[j][i]\ +2.0/36*nrexportmatrix[i][j]\ +1.0/36*nrimportmatrix[j][i])**(1.0/expfactor) def influenceimp(reporter,partner,selfinfluence=1.0,expfactor=3.0): #country trade influence will tend to have an exponential distribution, therefore we convert to linear #with a strength of expfactor i=cid[reporter] j=cid[partner] if i==j: return selfinfluence else: return (18.0/36*nimportmatrix[i][j]\ +9.0/36*nexportmatrix[j][i]\ +6.0/36*nrimportmatrix[i][j]\ +3.0/36*nrexportmatrix[j][i])**(1.0/expfactor) def influenceexp(reporter,partner,selfinfluence=1.0,expfactor=3.0): #country trade influence will tend to have an exponential distribution, therefore we convert to linear #with a strength of expfactor i=cid[reporter] j=cid[partner] if i==j: return selfinfluence else: return (18.0/36*nexportmatrix[i][j]\ +9.0/36*nimportmatrix[j][i]\ +6.0/36*nrexportmatrix[i][j]\ +3.0/36*nrimportmatrix[j][i])**(1.0/expfactor) #load ! careful, need to rebuild index if tradealpha or data changes # NIMPEX OR IMPEX ni='impex' # CAREFUL HERE predata=json.loads(file(pop_path+'trade/'+ni+'.json','r').read()) #predata=json.loads(file(pop_path+'trade/nimpex.json','r').read()) nexportmatrix=predata["nexport"] nimportmatrix=predata["nimport"] nrexportmatrix=predata["nrexport"] nrimportmatrix=predata["nrimport"] predata={} import scipy import pylab import scipy.cluster.hierarchy as sch import matplotlib as mpl import matplotlib.font_manager as font_manager from matplotlib.ticker import NullFormatter path = '../font/Inconsolata-Bold.ttf' prop = font_manager.FontProperties(fname=path) labeler=json.loads(file('../json/labeler.json','r').read()) isoico=json.loads(file('../json/isoico.json','r').read()) risoico=json.loads(file('../json/risoico.json','r').read()) def dendro(sd='00',selfinfluence=1.0,expfactor=3.0,inf='n'): returnmatrix=scipy.zeros([len(goodcountries),len(goodcountries)]) matrix=scipy.zeros([len(goodcountries),len(goodcountries)]) global labs global labsorder global labs2 global labs3 labs=[] labs2=[] labs3=[] for i in range(len(goodcountries)): labs.append(labeler[goodcountries[i]]) labsorder = pd.Series(np.array(labs)) #create labelorder labsorder=labsorder.rank(method='dense').values.astype(int)-1 alphabetvector=[0 for i in range(len(labsorder))] for i in range(len(labsorder)): alphabetvector[labsorder[i]-1]=i labs=[] for i in range(len(goodcountries)): labs.append(labeler[goodcountries[alphabetvector[i]]]) labs2.append(goodcountries[alphabetvector[i]]) labs3.append(isoico[goodcountries[alphabetvector[i]]]) for j in alphabetvector: if inf=='i': matrix[i][j]=influenceimp(goodcountries[alphabetvector[i]],goodcountries[alphabetvector[j]],0,expfactor) returnmatrix[i][j]=influenceimp(goodcountries[i],goodcountries[j],0,expfactor) elif inf=='e': matrix[i][j]=influenceexp(goodcountries[alphabetvector[i]],goodcountries[alphabetvector[j]],0,expfactor) returnmatrix[i][j]=influenceexp(goodcountries[i],goodcountries[j],0,expfactor) else: matrix[i][j]=influence(goodcountries[alphabetvector[i]],goodcountries[alphabetvector[j]],0,expfactor) returnmatrix[i][j]=influence(goodcountries[i],goodcountries[j],0,expfactor) #self-influence for i in range(len(goodcountries)): selfi=max(returnmatrix[i]) returnmatrix[i][i]=selfi*selfinfluence matrix[alphabetvector.index(i)][alphabetvector.index(i)]=selfi*selfinfluence title=u'Partner Importance of COLUMN Country for ROW Country in Energy Trade [self-influence $q='+\ str(selfinfluence)+'$, power factor $p='+str(expfactor)+'$]' #cmap=plt.get_cmap('RdYlGn_r') #for logplot cmap=plt.get_cmap('YlGnBu') labelpad=32 # Generate random features and distance matrix. D = scipy.zeros([len(matrix),len(matrix)]) for i in range(len(matrix)): for j in range(len(matrix)): D[i,j] =matrix[i][j] # Compute and plot first dendrogram. fig = pylab.figure(figsize=(17,15)) sch.set_link_color_palette(10*["#ababab"]) # Plot original matrix. axmatrix = fig.add_axes([0.3,0.1,0.6,0.6]) im = axmatrix.matshow(D[::-1], aspect='equal', origin='lower', cmap=cmap) #im = axmatrix.matshow(E[::-1], aspect='auto', origin='lower', cmap=cmap) #for logplot axmatrix.set_xticks([]) axmatrix.set_yticks([]) # Plot colorbar. axcolor = fig.add_axes([0.87,0.1,0.02,0.6]) pylab.colorbar(im, cax=axcolor) # Label up axmatrix.set_xticks(range(len(matrix))) mlabs=list(labs) for i in range(len(labs)): kz='-' for k in range(labelpad-len(labs[i])):kz+='-' if i%2==1: mlabs[i]=kz+u' '+labs[i]+u' '+'-' else: mlabs[i]='-'+u' '+labs[i]+u' '+kz axmatrix.set_xticklabels(mlabs, minor=False,fontsize=7,fontproperties=prop) axmatrix.xaxis.set_label_position('top') axmatrix.xaxis.tick_top() pylab.xticks(rotation=-90, fontsize=8) axmatrix.set_yticks(range(len(matrix))) mlabs=list(labs) for i in range(len(labs)): kz='-' for k in range(labelpad-len(labs[i])):kz+='-' if i%2==0: mlabs[i]=kz+u' '+labs[i]+u' '+'-' else: mlabs[i]='-'+u' '+labs[i]+u' '+kz axmatrix.set_yticklabels(mlabs[::-1], minor=False,fontsize=7,fontproperties=prop) axmatrix.yaxis.set_label_position('left') axmatrix.yaxis.tick_left() xlabels = axmatrix.get_xticklabels() for label in range(len(xlabels)): xlabels[label].set_rotation(90) axmatrix.text(1.1, 0.5, title, horizontalalignment='left', verticalalignment='center',rotation=270, transform=axmatrix.transAxes,size=10) axmatrix.xaxis.grid(False) axmatrix.yaxis.grid(False) plt.savefig('../json/'+str(sd)+'/'+inf+'si'+str(selfinfluence)+'expf'+str(expfactor)+'dendrogram.png',dpi=150,bbox_inches = 'tight', pad_inches = 0.1, ) plt.close() m1='centroid' m2='single' # Compute and plot first dendrogram. fig = pylab.figure(figsize=(17,15)) ax1 = fig.add_axes([0.1245,0.1,0.1,0.6]) Y = sch.linkage(D, method=m1) Z1 = sch.dendrogram(Y,above_threshold_color="#ababab", orientation='left') ax1.set_xticks([]) ax1.set_yticks([]) ax1.set_axis_bgcolor('None') # Compute and plot second dendrogram. ax2 = fig.add_axes([0.335,0.825,0.5295,0.1]) Y = sch.linkage(D, method=m2) Z2 = sch.dendrogram(Y,above_threshold_color="#ababab") ax2.set_xticks([]) ax2.set_yticks([]) ax2.set_axis_bgcolor('None') # Plot distance matrix. axmatrix = fig.add_axes([0.3,0.1,0.6,0.6]) idx1 = Z1['leaves'] idx2 = Z2['leaves'] #D = E[idx1,:] #for logplot D = D[idx1,:] D = D[:,idx2] im = axmatrix.matshow(D, aspect='equal', origin='lower', cmap=cmap) axmatrix.set_xticks([]) axmatrix.set_yticks([]) # Plot colorbar. axcolor = fig.add_axes([0.87,0.1,0.02,0.6]) ac=pylab.colorbar(im, cax=axcolor) # Label up axmatrix.set_xticks(np.arange(len(matrix))-0) mlabs=list(np.array(labs)[idx2]) for i in range(len(np.array(labs)[idx2])): kz='-' for k in range(labelpad-len(np.array(labs)[idx2][i])):kz+='-' if i%2==1: mlabs[i]=kz+u' '+np.array(labs)[idx2][i]+u' '+'-' else: mlabs[i]='-'+u' '+np.array(labs)[idx2][i]+u' '+kz axmatrix.set_xticklabels(mlabs, minor=False,fontsize=7,fontproperties=prop) axmatrix.xaxis.set_label_position('top') axmatrix.xaxis.tick_top() pylab.xticks(rotation=-90, fontsize=8) axmatrix.set_yticks(np.arange(len(matrix))+0) mlabs=list(np.array(labs)[idx1]) for i in range(len(np.array(labs)[idx1])): kz='-' for k in range(labelpad-len(np.array(labs)[idx1][i])):kz+='-' if i%2==0: mlabs[i]=kz+u' '+np.array(labs)[idx1][i]+u' '+'-' else: mlabs[i]='-'+u' '+np.array(labs)[idx1][i]+u' '+kz axmatrix.set_yticklabels(mlabs, minor=False,fontsize=7,fontproperties=prop) axmatrix.yaxis.set_label_position('left') axmatrix.yaxis.tick_left() xlabels = axmatrix.get_xticklabels() for label in xlabels: label.set_rotation(90) axmatrix.text(1.11, 0.5, title, horizontalalignment='left', verticalalignment='center',rotation=270, transform=axmatrix.transAxes,size=10) axmatrix.xaxis.grid(False) axmatrix.yaxis.grid(False) plt.savefig('../json/'+str(sd)+'/'+inf+'si'+str(selfinfluence)+'expf'+str(expfactor)+'dendrogram2.png',dpi=150,bbox_inches = 'tight', pad_inches = 0.1, ) plt.close() return [returnmatrix,returnmatrix.T] ###Output _____no_output_____ ###Markdown ###Code #run once #GC=[] #create backup of global country list #for i in goodcountries: GC.append(i) #file(pop_path+'GC.json','w').write(json.dumps(GC)) ###Output _____no_output_____ ###Markdown Normal ###Code sd='r3' inf='n' si=1 basepower=int(sd[1]) print sd,'si',si,'pf',basepower [importancematrix,influencematrix]=dendro(sd,si,basepower) z=[np.mean(i) for i in influencematrix] #sum country influence on columns #if you wanted weighted influence, introduce weights (by trade volume i guess) here in the above mean s = pd.Series(1/np.array(z)) #need to 1/ to create inverse order s=s.rank(method='dense').values.astype(int)-1 #start from 0 not one #s is a ranked array on which country ranks where in country influence #we then composed the ordered vector of country influence influencevector=[0 for i in range(len(s))] for i in range(len(s)): influencevector[s[i]]=i #create mini-world goodcountries2=[goodcountries[i] for i in influencevector[:16]] c=['seaGreen','royalBlue','#dd1c77'] levels=[basepower] toplot=[cid[i] for i in goodcountries2] tolabel=[labeler[i] for i in goodcountries2] fig,ax=plt.subplots(1,2,figsize=(12,5)) for j in range(len(levels)): [importancematrix,influencematrix]=dendro(sd,si,levels[j]) z=[np.mean(i) for i in influencematrix] #sum country influence on columns #if you wanted weighted influence, introduce weights (by trade volume i guess) here in the above mean s = pd.Series(1/np.array(z)) #need to 1/ to create inverse order s=s.rank(method='dense').values.astype(int)-1 #start from 0 not one #s is a ranked array on which country ranks where in country influence #we then composed the ordered vector of country influence influencevector=[0 for i in range(len(s))] for i in range(len(s)): influencevector[s[i]]=i zplot=[] zplot2=[] for i in toplot: zplot.append(s[i]+1) zplot2.append(z[i]) ax[0].scatter(np.array(zplot),np.arange(len(zplot))-0.2+0.2*j,40,color=c[j],label=u'$p='+str(levels[j])+'$') ax[1].scatter(np.array(zplot2),np.arange(len(zplot))-0.2+0.2*j,40,color=c[j],label=u'$p='+str(levels[j])+'$') ax[0].set_ylim(-1,len(toplot)) ax[1].set_ylim(-1,len(toplot)) ax[0].set_xlim(0,20) ax[1].set_xscale('log') ax[0].set_yticks(range(len(toplot))) ax[0].set_yticklabels(tolabel) ax[1].set_yticks(range(len(toplot))) ax[1].set_yticklabels([]) ax[0].set_xlabel("Rank in Country Influence Vector") ax[1].set_xlabel("Average Country Influence") if levels[j]==basepower: civector={} for k in range(len(influencevector)): civector[k+1]={"inf":np.round(z[influencevector[k]],2),"country":labeler[goodcountries[influencevector[k]]]} uk=[] for uj in range(len(influencematrix)): for ui in range(len(influencematrix[uj])): if ui!=uj: uk.append({'type':'Country influence explorer', 'country':labeler[goodcountries[uj]], 'partner':labeler[goodcountries[ui]], 'inf':influencematrix[uj][ui], 'imp':importancematrix[uj][ui]}) #pd.DataFrame(civector).T.to_excel('../json/'+sd+'/'+ni+'_'+inf+'_c.xlsx') #file('../json/'+sd+'/'+ni+'_'+inf+'_uk.json','w').write(json.dumps(uk)) ax[1].legend(loc=1,framealpha=0) plt.subplots_adjust(wspace=0.1) plt.suptitle("Power Factor ($p$) Sensitivity of Normalized Country Influence",fontsize=14) #plt.savefig('../json/'+sd+'/'+ni+'_'+inf+'_powerfactor.png',dpi=150,bbox_inches = 'tight', pad_inches = 0.1, ) plt.show() ###Output _____no_output_____ ###Markdown Create energy cost by filling the matrix with the cost of row importing 1TWh from column. neglecting transport energy costs for now, this will be the extraction energy cost. Let us consider only solar for now. Try optimization with all three source, choose one with best objective value. 1TWh tier changes based on granurality. ###Code #weighted resource class calculator def re(dic,total): if dic!={}: i=max(dic.keys()) mi=min(dic.keys()) run=True keys=[] weights=[] counter=0 while run: counter+=1 #safety break if counter>1000: run=False if i in dic: if total<dic[i]: keys.append(i) weights.append(total) run=False else: total-=dic[i] keys.append(i) weights.append(dic[i]) i-=1 if i<mi: run=False if sum(weights)==0: return 0 else: return np.average(keys,weights=weights) else: return 0 region=pd.read_excel(pop_path+'regions.xlsx').set_index('Country') #load aroei=json.loads(file(pop_path+'aroei.json','r').read()) groei=json.loads(file(pop_path+'groei.json','r').read()) ndists=json.loads(file(pop_path+'ndists.json','r').read()) #average resource quality calculator for the globe def update_aroei(): global aroei aroei={} groei={} for c in res: for r in res[c]: if r not in groei: groei[r]={} for cl in res[c][r]['res']: if cl not in groei[r]: groei[r][cl]=0 groei[r][cl]+=res[c][r]['res'][cl] for r in groei: x=[] y=[] for i in range(len(sorted(groei[r].keys()))): x.append(float(sorted(groei[r].keys())[i])) y.append(float(groei[r][sorted(groei[r].keys())[i]])) aroei[r]=np.average(x,weights=y) #https://www.researchgate.net/publication/299824220_First_Insights_on_the_Role_of_solar_PV_in_a_100_Renewable_Energy_Environment_based_on_hourly_Modeling_for_all_Regions_globally cost=pd.read_excel(pop_path+'/maps/storage.xlsx') #1Bdi - grid def normdistance(a,b): return ndists[cid[a]][cid[b]] def gridtestimator(country,partner,forceptl=False): #return normdistance(country,partner) def electricitytrade(country,partner): scaler=1 gridpartners=grid5['electricity'] #existing trade partners if ((partner in gridpartners[country]) or (country in gridpartners[partner])): scaler+=cost.loc[region.loc[country]]['egrid'].values[0]/2.0 #neighbors, but need to build elif pop2iso[country] in c2neighbors: if (pop2iso[partner] in c2neighbors[pop2iso[country]]): scaler+=cost.loc[region.loc[country]]['grid'].values[0]/2.0*normdistance(country,partner) #not neighbors or partners but in the same region, need to build elif (region.loc[country][0]==region.loc[partner][0]): scaler+=cost.loc[region.loc[country]]['grid'].values[0]*3.0/2.0*normdistance(country,partner) #need to build supergrid, superlative costs else: scaler+=cost.loc[region.loc[country]]['grid'].values[0]*10.0/2.0*normdistance(country,partner) #need to build supergrid, superlative costs else: scaler+=cost.loc[region.loc[country]]['grid'].values[0]*10.0/2.0*normdistance(country,partner) return scaler def ptltrade(country,partner): #ptg costs scale with distance scaler=1+cost.loc[11]['ptg']*100.0*normdistance(country,partner) return scaler if ptltrade(country,partner)<electricitytrade(country,partner) or forceptl: return {"scaler":ptltrade(country,partner),"tradeway":"ptl"} else: return {"scaler":electricitytrade(country,partner),"tradeway":"grid"} #1Bdii - storage &curtailment def storagestimator(country): return cost.loc[region.loc[country]]['min'].values[0] #curtoversizer def curtestimator(country): return cost.loc[region.loc[country]]['curt'].values[0] #global benchmark eroei, due to state of technology eroei={ #'oil':13, #'coal':27, #'gas':14, #'nuclear':10, #'biofuels':1.5, #'hydro':84, #'geo_other':22, 'pv':13.74,#17.6, 'csp':7.31,#10.2, 'wind':11.17,#20.2 #24 } eroei={ 'pv':14,#17.6, 'csp':9,#10.2, 'wind':14,#20.2 #24 } #without esoei #calibrated from global, from Table S1 in ERL paper ###Output _____no_output_____ ###Markdown ALLINONE ###Code #initialize renewable totals for learning total2014={'csp':0,'solar':0,'wind':0} learning={'csp':0.04,'solar':0.04,'wind':0.02} year=2014 for fuel in total2014: total2014[fuel]=np.nansum([np.nansum(data[partner][year]['energy'][fuel]['cons']['navg3'])\ for partner in goodcountries if fuel in data[partner][year]['energy']]) total2014 #scenario id (folder id) #first is scenario family, then do 4 variations of scenarios (2 selfinluence, 2 power factor) as 01, 02... #import resources ################################### ################################### #load resources #predata=json.loads(file(pop_path+'maps/newres.json','r').read()) predata=json.loads(file(pop_path+'maps/res.json','r').read()) res={} for c in predata: res[c]={} for f in predata[c]: res[c][f]={} for r in predata[c][f]: res[c][f][r]={} for year in predata[c][f][r]: res[c][f][r][int(year)]=predata[c][f][r][year] predata={} print 'scenario',sd,'loaded resources', ################################### ################################### #load demand2 predata=json.loads(file(pop_path+'demand2.json','r').read()) demand2={} for c in predata: demand2[c]={} for year in predata[c]: demand2[c][int(year)]=predata[c][year] predata={} print 'demand', ################################### ################################### #load tradealpha d #predata=json.loads(file(pop_path+'/trade/traded.json','r').read()) predata=json.loads(file(pop_path+'/trade/smalltrade.json','r').read()) tradealpha={} for c in predata: tradealpha[c]={} for year in predata[c]: tradealpha[c][int(year)]=predata[c][year] predata={} print 'tradedata', ################################### ################################### #reload impex and normalize predata=json.loads(file(pop_path+'trade/nimpex.json','r').read()) nexportmatrix=predata["nexport"] nimportmatrix=predata["nimport"] nrexportmatrix=predata["nrexport"] nrimportmatrix=predata["nrimport"] predata={} print 'impex', ################################### ################################### #load latest savedata #we dont change the data for now, everything is handled through trade predata=json.loads(file(pop_path+'savedata6.json','r').read()) data={} for c in predata: data[c]={} for year in predata[c]: data[c][int(year)]=predata[c][year] predata={} print 'data' ################################### ################################### #reset balance ybalance={} #recalculate balances for year in range(2015,2101): balance={} if year not in ybalance:ybalance[year]={} for c in goodcountries: balance[c]=0 if c in tradealpha: f1=0 for fuel in tradealpha[c][year]: if 'Import' in tradealpha[c][year][fuel]: f1=np.nansum([f1,sum(tradealpha[c][year][fuel]['Import'].values())]) if 'Re-Import' in tradealpha[c][year][fuel]: f1=np.nansum([f1,sum(tradealpha[c][year][fuel]['Re-Import'].values())]) if 'Export' in tradealpha[c][year][fuel]: f1=np.nansum([f1,-sum(tradealpha[c][year][fuel]['Export'].values())]) if 'Re-Export' in tradealpha[c][year][fuel]: f1=np.nansum([f1,-sum(tradealpha[c][year][fuel]['Re-Export'].values())]) if fuel in data[c][year]['energy']: f1=np.nansum([f1,data[c][year]['energy'][fuel]['prod']['navg3']]) balance[c]-=f1 balance[c]+=demand2[c][year]*8760*1e-12 if 'balance' not in data[c][year]['energy']: data[c][year]['energy']['balance']={'prod':{'navg3':0},'cons':{'navg3':0}} data[c][year]['energy']['balance']['prod']['navg3']=max(0,balance[c])#balance can't be negative data[c][year]['energy']['balance']['cons']['navg3']=max(0,balance[c]) ybalance[year]=balance #save3('0a') #save default def cbalance(year,c): balance=0 if c in tradealpha: f1=0 for fuel in tradealpha[c][year]: if 'Import' in tradealpha[c][year][fuel]: f1=np.nansum([f1,sum(tradealpha[c][year][fuel]['Import'].values())]) if 'Re-Import' in tradealpha[c][year][fuel]: f1=np.nansum([f1,sum(tradealpha[c][year][fuel]['Re-Import'].values())]) if 'Export' in tradealpha[c][year][fuel]: f1=np.nansum([f1,-sum(tradealpha[c][year][fuel]['Export'].values())]) if 'Re-Export' in tradealpha[c][year][fuel]: f1=np.nansum([f1,-sum(tradealpha[c][year][fuel]['Re-Export'].values())]) if '_' in fuel: fuel=fuel[fuel.find('_')+1:] #if fuel in data[c][year]['energy']: # f1=np.nansum([f1,data[c][year]['energy'][fuel]['prod']['navg3']]) for fuel in data[c][year]['energy']: if fuel not in {"nrg_sum","nrg"}: f1=np.nansum([f1,data[c][year]['energy'][fuel]['prod']['navg3']]) balance-=f1 balance+=demand2[c][year]*8760*1e-12 return balance def res_adv(country,fuel): #this country's wavg resource compared to global x=[] y=[] if fuel=='solar':fuel='pv' d=groei[fuel] #global wavg resource class for i in range(len(sorted(d.keys()))): if float(d[sorted(d.keys())[i]])>0.1: x.append(float(sorted(d.keys())[i])) y.append(float(d[sorted(d.keys())[i]])) x2=[] y2=[] if country not in res: return 0 d2=res[country][fuel]['res'] #country's wavg resource class for i in range(len(sorted(d2.keys()))): if float(d2[sorted(d2.keys())[i]])>0.1: x2.append(float(sorted(d2.keys())[i])) y2.append(float(d2[sorted(d2.keys())[i]])) if y2!=[]: return np.average(x2,weights=y2)*1.0/np.average(x,weights=y) else: return 0 def costvectorranker(cv): k={} for i in cv: for j in cv[i]: k[(i)+'_'+str(j)]=cv[i][j] return sorted(k.items(), key=lambda value: value[1]) def trade(country,partner,y0,fuel,value,l0): lifetime=l0+int(random.random()*l0) tradeable[partner][fuel]-=value key=tradeway[country][partner]+'_'+fuel for year in range(y0,min(2101,y0+lifetime)): #add production if fuel not in data[partner][year]['energy']: data[partner][year]['energy'][fuel]={'prod':{'navg3':0},'cons':{'navg3':0}} data[partner][year]['energy'][fuel]['prod']['navg3']+=value data[partner][year]['energy']['nrg_sum']['prod']['navg3']+=value #add consumption if fuel not in data[country][year]['energy']: data[country][year]['energy'][fuel]={'prod':{'navg3':0},'cons':{'navg3':0}} data[country][year]['energy'][fuel]['cons']['navg3']+=value data[country][year]['energy']['nrg_sum']['cons']['navg3']+=value #add storage on country side (if not ptl) if tradeway[country][partner]=='grid': if fuel not in {'csp'}: if 'storage' not in data[country][year]['energy']: data[country][year]['energy']['storage']={'prod':{'navg3':0},'cons':{'navg3':0}} data[country][year]['energy']['storage']['prod']['navg3']+=value*storagestimator(country) data[country][year]['energy']['storage']['cons']['navg3']+=value*storagestimator(country) if country!=partner: #add import flow if key not in tradealpha[country][year]:tradealpha[country][year][key]={} if 'Import' not in tradealpha[country][year][key]:tradealpha[country][year][key]["Import"]={} if str(pop2iso[partner]) not in tradealpha[country][year][key]["Import"]: tradealpha[country][year][key]["Import"][str(pop2iso[partner])]=0 tradealpha[country][year][key]["Import"][str(pop2iso[partner])]+=value #add export flow if key not in tradealpha[partner][year]:tradealpha[partner][year][key]={} if 'Export' not in tradealpha[partner][year][key]:tradealpha[partner][year][key]["Export"]={} if str(pop2iso[country]) not in tradealpha[partner][year][key]["Export"]: tradealpha[partner][year][key]["Export"][str(pop2iso[country])]=0 tradealpha[partner][year][key]["Export"][str(pop2iso[country])]+=value #trade diversificatioin necessity def divfill(cv,divfactor,divbalance): scaler=min(1.0,divbalance/\ sum([tradeable[cv[i][0][:cv[i][0].find('_')]]\ [cv[i][0][cv[i][0].find('_')+1:]] for i in range(divfactor)])) #take all or partial for i in range(divfactor): partner=cv[i][0][:cv[i][0].find('_')] fuel=cv[i][0][cv[i][0].find('_')+1:] trade(country,partner,year,fuel,max(0,tradeable[partner][fuel])*scaler,lifetime) def tradefill(cv): totrade=[] tradesum=0 # #for i in range(len(cv)): #ASSYMETRIC TRADE PARTNER PICKING OUT OF TOP 5 k=[kk for kk in cv] for ii in range(len(cv)): z=min(len(k)-1,int(np.random.exponential(100)/100.0)) i=k[z] k=k[:z]+k[z+1:] ####SAME FROM HERE partner=i[0][:i[0].find('_')] fuel=i[0][i[0].find('_')+1:] if tradeable[partner][fuel]>balance-tradesum: totrade.append((i[0],balance-tradesum)) tradesum+=balance-tradesum break else: totrade.append((i[0],tradeable[partner][fuel])) tradesum+=tradeable[partner][fuel] for i in totrade: partner=i[0][:i[0].find('_')] fuel=i[0][i[0].find('_')+1:] trade(country,partner,year,fuel,i[1],lifetime) def omegafill(cv): global wasalready totrade=[] tradesum=0 for i in range(len(cv)): partner=cv[i][0][:cv[i][0].find('_')] fuel=cv[i][0][cv[i][0].find('_')+1:] if country==partner: if fuel not in wasalready: wasalready.add(fuel) if tradeable[partner][fuel]>balance-tradesum: totrade.append((cv[i][0],balance-tradesum)) tradesum+=balance-tradesum break else: totrade.append((cv[i][0],tradeable[partner][fuel])) tradesum+=tradeable[partner][fuel] #trade(country,partner,year,fuel,min(cv[i][1],tradeable[partner][fuel]),lifetime) for i in totrade: partner=i[0][:i[0].find('_')] fuel=i[0][i[0].find('_')+1:] trade(country,partner,year,fuel,i[1],lifetime) def nrgsum(country,year): return np.nansum([data[country][year]['energy'][i]['prod']['navg3'] for i in data[country][year]['energy'] if i not in ['nrg_sum','sum','nrg']]) def liquidcheck(year,country): oil=data[country][year]['energy']['oil']['prod']['navg3'] gas=data[country][year]['energy']['gas']['prod']['navg3'] try: ptl=sum([sum(tradealpha[country][year][i]['Import'].values()) for i in tradealpha[country][year] if 'ptl' in i]) except: ptl=0 liquidshare=(oil+gas+ptl)/nrgsum(country,year) return max(0,(h2[country]-liquidshare)*nrgsum(country,year)) #return amount to fill with liquids def liquidfill(country,year): toadjust=0 tofill=liquidcheck(year,country) adjustable={} if tofill>0: for fuel in data[country][year]['energy']: if fuel not in {"nrg","nrg_sum","storage","oil","gas"}: if data[country][year]['energy'][fuel]['prod']['navg3']>0: if not np.isnan(data[country][year]['energy'][fuel]['prod']['navg3']): toadjust+=data[country][year]['energy'][fuel]['prod']['navg3'] for fuel in tradealpha[country][year]: if fuel not in {"coal","oil","gas"}: if 'ptl' not in fuel: if 'Import' in tradealpha[country][year][fuel]: toadjust+=np.nansum(tradealpha[country][year][fuel]["Import"].values()) #scan fuels to adjust, calculate adjust scaler adjustscaler=1.0-tofill*1.0/toadjust #scale down fuels, record what to put back as ptl for fuel in data[country][year]['energy']: if fuel not in {"nrg","nrg_sum","storage","oil","gas"}: if data[country][year]['energy'][fuel]['prod']['navg3']>0: if not np.isnan(data[country][year]['energy'][fuel]['prod']['navg3']): data[country][year]['energy'][fuel]['prod']['navg3']*=adjustscaler if fuel not in adjustable: adjustable[fuel]={} adjustable[fuel][pop2iso[country]]=data[country][year]['energy'][fuel]['prod']['navg3']*(1-adjustscaler) for fuel in tradealpha[country][year]: if fuel not in {"coal","oil","gas"}: if 'ptl' not in fuel: if 'Import' in tradealpha[country][year][fuel]: for p in tradealpha[country][year][fuel]["Import"]: tradealpha[country][year][fuel]["Import"][p]*=adjustscaler if fuel[fuel.find('_')+1:] not in adjustable: adjustable[fuel[fuel.find('_')+1:]]={} adjustable[fuel[fuel.find('_')+1:]][p]=tradealpha[country][year][fuel]["Import"][p]*(1-adjustscaler) #put back ptl for fuel in adjustable: for p in adjustable[fuel]: if 'ptl_'+str(fuel) not in tradealpha[country][year]: tradealpha[country][year]['ptl_'+str(fuel)]={} if 'Import' not in tradealpha[country][year]['ptl_'+str(fuel)]: tradealpha[country][year]['ptl_'+str(fuel)]["Import"]={} tradealpha[country][year]['ptl_'+str(fuel)]["Import"][p]=adjustable[fuel][p] #scenario id (folder id) #first is scenario family, then do 4 variations of scenarios (2 selfinluence, 2 power factor) as 01, 02... #import resources ################################### ################################### #load resources #predata=json.loads(file(pop_path+'maps/newres.json','r').read()) predata=json.loads(file(pop_path+'maps/res.json','r').read()) res={} for c in predata: res[c]={} for f in predata[c]: res[c][f]={} for r in predata[c][f]: res[c][f][r]={} for year in predata[c][f][r]: res[c][f][r][int(year)]=predata[c][f][r][year] predata={} print 'scenario',sd,'loaded resources', ################################### ################################### #load demand2 predata=json.loads(file(pop_path+'demand2.json','r').read()) demand2={} for c in predata: demand2[c]={} for year in predata[c]: demand2[c][int(year)]=predata[c][year] predata={} print 'demand', ################################### ################################### #load tradealpha d #predata=json.loads(file(pop_path+'/trade/traded.json','r').read()) predata=json.loads(file(pop_path+'/trade/smalltrade.json','r').read()) tradealpha={} for c in predata: tradealpha[c]={} for year in predata[c]: tradealpha[c][int(year)]=predata[c][year] predata={} print 'tradedata', ################################### ################################### #reload impex and normalize predata=json.loads(file(pop_path+'trade/'+ni+'.json','r').read()) nexportmatrix=predata["nexport"] nimportmatrix=predata["nimport"] nrexportmatrix=predata["nrexport"] nrimportmatrix=predata["nrimport"] predata={} print ni, ################################### ################################### #load latest savedata #we dont change the data for now, everything is handled through trade predata=json.loads(file(pop_path+'savedata6.json','r').read()) data={} for c in predata: data[c]={} for year in predata[c]: data[c][int(year)]=predata[c][year] predata={} print 'data' ################################### ################################### [importancematrix,influencematrix]=dendro(sd,si,basepower) #2,5, or 4,3 z=[np.mean(i) for i in influencematrix] #sum country influence on columns #if you wanted weighted influence, introduce weights (by trade volume i guess) here in the above mean s = pd.Series(1/np.array(z)) #need to 1/ to create inverse order s=s.rank(method='dense').values.astype(int)-1 #start from 0 not one #s is a ranked array on which country ranks where in country influence #we then composed the ordered vector of country influence influencevector=[0 for i in range(len(s))] for i in range(len(s)): influencevector[s[i]]=i CV={} CV2={} TB={} fc={"solar":'pv',"csp":'csp',"wind":'wind'} divfactor=10 #min trade partners in trade diversification divshare=0.2 #min share of the trade diversification, total tradeway={} lifetime=20 #base lifetime maxrut=0.01 #for each type #max rampup total, if zero 5% of 1% 0.05 / 0.001 maxrur=1.5 #growth rate for each techno #max rampup rate 0.5 omegamin=0.1 #min share of the in-country diversification, per fuel random.seed(2) cs=set() for year in range(2025,2101): tradeable={} if year not in TB:TB[year]={} for i in range(len(goodcountries)): country=goodcountries[i] if country not in tradeable:tradeable[country]={'solar':0,'csp':0,'wind':0} for fuel in {"solar","csp","wind"}: if fuel not in data[country][year-1]['energy']: tradeable[country][fuel]=nrgsum(country,year-1)*maxrut elif data[country][year-1]['energy'][fuel]['prod']['navg3']==0: tradeable[country][fuel]=nrgsum(country,year-1)*maxrut else: tradeable[country][fuel]=max(nrgsum(country,year-1)*maxrut, data[country][year-1]['energy'][fuel]['prod']['navg3']*maxrur) for i in range(len(influencevector))[:]:#4344 country=goodcountries[influencevector[i]] cs.add(country) #if year==2015: if True: costvector={} for j in range(len(goodcountries)): partner=goodcountries[j] if partner not in costvector:costvector[partner]={} transactioncost=gridtestimator(country,partner) if country not in tradeway:tradeway[country]={} if partner not in tradeway[country]:tradeway[country][partner]=transactioncost["tradeway"] for fuel in {"solar","csp","wind"}: ru0=0 if fuel not in data[partner][year]['energy']: ru = ru0 elif partner not in res: ru = ru0 elif sum(res[partner][fc[fuel]]['res'].values())==0: ru=1 elif data[partner][year]['energy'][fuel]['prod']['navg3']==0: ru=ru0 else: ru=data[partner][year]['energy'][fuel]['prod']['navg3']*1.0/\ sum(res[partner][fc[fuel]]['res'].values()) ru=max(ru,0) ru=max(1,0.3+ru**0.1) #or 0.3 costvector[partner][fuel]=1.0/influencematrix[influencevector[i]][j]*\ transactioncost['scaler']*\ ru*\ 1.0/(eroei[fc[fuel]]*1.0/np.mean(eroei.values())*\ res_adv(partner,fuel)*\ aroei[fc[fuel]]*1.0/np.mean(aroei.values())) cv=costvectorranker(costvector) #fulfill trade diversification criterion balance=divshare*cbalance(year,country) if balance>0: divfill(cv,divfactor,balance) #fulfill in-country diversification criterion wasalready=set() balance=cbalance(year,country)*omegamin if balance>0: omegafill(cv) #fill first best source to min share omegafill(cv) #fill second best source to min share #fill up rest of trade balance=cbalance(year,country) if balance>0: tradefill(cv) #fill liquids up to min liquid level liquidfill(country,year) print i, #CV2[country]=cv print year save3(sd,cs) file('E:/Dropbox/Public/datarepo/netset/savedata/'+sd+'data.json','w').write(json.dumps(data)) file('E:/Dropbox/Public/datarepo/netset/savedata/'+sd+'trade.json','w').write(json.dumps(tradealpha)) ###Output _____no_output_____
Bonus_resources/deployment/2.Docker/4.docker_volumes.ipynb
###Markdown Add volume to your life!![volumes](./assets/volume.gif)Let's discover docker volumes! GoalsIn this module, you will learn to:* Understand what a Docker volume is* Take advantage of it* Improve your workflow The problemIn the previous module, we discovered that changing the file of a running container can be a pain.Well, it shouldn't be! Let's see how we can fix that. The solutionAnd the solution is....... using a volume! Did you guess it? Wow I'm impressed! What is a volume?In Docker, a volume is a bit like a hard disk. You will give a path on your computer (or in a server, USB stick,...) and you will ask it to mount it in your container in a specific place.So in our case, we will ask to mount out current directory in the `/app` folder of our container. It means that they will be linked. So if we do a modification in our code, it will affect the container. How to do it?In Docker, there are multiple ways of using volumes, you can create volumes and name them. So you can use them in multiple containers.You can create anonymous volumes for example.For our exercise, we will use volumes in their simplest state, anonymous volumes.We can add the `-v` flag to our run command to specify a path in our machine to link to a path in the container.So in our case, we want to copy everything that we have in the code directory (which is in our current working directory) in the `/app` of our container.To do so, we could write the complete path of our current directory. But it would be long to write and annoying to type each time we change it.To tackle this problem, we will use the environment variable `$PWD`, The value of this variable will always be the current working directory.Let's check if it works!If you want more informations, you can read about it in the [Docker documentation](https://docs.docker.com/storage/volumes/). ###Code !echo $PWD ###Output /home/maxim/code/becode/Python-Upskilling/I-O/03.Docker ###Markdown Perfect! the `-v` docker parameter will parse what we will put after it in 3 parts:* The first part will be the directory that we want to link **on our local machine**. That's where we will use the `$PWD` variable. `$PWD/code`* The second part is the separator `:` between the first and third part.* The third part is the path where our local folder will be linked **in the container**. `/app/code`So the complete command is:```bashdocker run -v $PWD/code:/app/code -t hello```In the code folder we also have a `code_folder.txt` file. As we didn't copy it in our Dockerfile, it won't be there by default. But because we asked our volume to mount the entire code folder, we now have this file! Let's check it out.We first run the container without volumes and we add `ls code` at the end of the command. It will start the container, run `ls code` on it, print the output and shutdown itself. ###Code !docker run -t hello ls code ###Output hello_world.py ###Markdown Now we will do the same **with** our volume ###Code !docker run -v $PWD/code:/app/code -t hello ls code ###Output code_folder.txt hello_world.py
airbnb-open-data/seattle/seattle-airbnb.ipynb
###Markdown Reading- [Introduction to Feature Selection methods with an example (or how to select the right variables?)](https://www.analyticsvidhya.com/blog/2016/12/introduction-to-feature-selection-methods-with-an-example-or-how-to-select-the-right-variables/)- [Medium Article](https://towardsdatascience.com/solving-regression-problems-by-combining-statistical-learning-with-machine-learning-82949f7ac18a) 0: Imports and data reading ###Code # Data pre-processing import numpy as np import pandas as pd # Handling warnings import warnings warnings.filterwarnings("ignore", category=FutureWarning) warnings.filterwarnings("ignore", category=DeprecationWarning) # Viz. import matplotlib import missingno import seaborn as sns import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') matplotlib.rcParams['font.family'] = "Arial" import plotly as py import plotly.graph_objs as go from plotly.subplots import make_subplots from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot init_notebook_mode(connected=True) # Utils import collections import itertools # Scientific computing import scipy.stats as stats from scipy.stats import norm from scipy.special import boxcox1p import statsmodels import statsmodels.api as sm # ML Sklearn from sklearn.svm import SVR from sklearn.utils import resample from sklearn.pipeline import Pipeline from sklearn.impute import SimpleImputer from sklearn.kernel_ridge import KernelRidge from sklearn.compose import ColumnTransformer from sklearn.metrics import mean_squared_error, r2_score from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from sklearn.preprocessing import scale, StandardScaler, RobustScaler, OneHotEncoder from sklearn.linear_model import Ridge, RidgeCV, Lasso, LassoCV, LinearRegression, ElasticNet, HuberRegressor from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, KFold, StratifiedKFold, RandomizedSearchCV # Other ML from xgboost import XGBRegressor #Model interpretation modules import eli5 import lime import lime.lime_tabular import shap shap.initjs() # Load JS visualization code to notebook # Reading the data seattle_listing = pd.read_csv("data/listings.csv") len(seattle_listing.columns) ###Output _____no_output_____ ###Markdown Due to high count of columns, EDA on an individual level get challenging. Hence separating all the different types of data ###Code seattle_listing.dtypes.value_counts() # Fetching all the object dtypes seattle_listing.columns[seattle_listing.dtypes == object] # Fetching all the columns that aren't object seattle_listing.columns[seattle_listing.dtypes != object] ###Output _____no_output_____
02.Zoning_problem/Agent_behaviours/Schematic_design_multiagent_behaviours_oop.ipynb
###Markdown Agent Behaviours (ABM)In this Notebook all the behaviours that the spatial agents can perform are defined.The number of agents taken is considered to be one arbitarily one for the purpose of studying the rowth pattern definitively The following Agent Behaviours are planned: 1. Agent initial Position(Birth):* Search(2D-3D)Space according to Stencil and the Space program and assigning the initial position* Attraction / Repulsion movement of initial poostions of the agents based on the closeness relationships 2. Occupy:* (2D) Squareness growth: neighbours of neighbours get higher preference* (2D) Rectillinear Polyomino growth: growth pattern is in a combination of squares [https://en.wikipedia.org/wiki/Polyomino:~:text=A%20polyomino%20is%20a%20plane,of%20the%20regular%20square%20tiling.]* (2D) Organic Polyomino growth : Growth is pure merit based only (holes are avoided)* (3D) Cuboidal growth: neighbours of neighbours get higher preference in a cuboid* (3D) Rectillinear Polyomino growth: growth pattern is in a combination of cubes* (3D) Organic Polyomino growth : Growth is pure merit based only (holes are avoided) 3. Growth Lattice modification:* (2D),(3D) Attraction: The base lattice gets modified with additional values based on graph distances between the selected agents making them grow towards each other* (2D),(3D) Repulsion: The base lattice gets modified with additional values based on graph distances between the selected agents making them grow away from each other 4. Negotiation:?? 5. Unoccupy:?? InitializationLoading the necessary Libraries for the notebook ###Code import os import sys sys.path.append("D:/TU_Delft/Msc_Building_Technology/Semester_3/Graduation/Aditya_Graduation_Project_BT/06_Libraries") import topogenesis as tg import pyvista as pv import trimesh as tm import numpy as np import Widget_for_display as wid import Stencils as stn np.random.seed(0) np.set_printoptions(threshold=sys.maxsize) import networkx as nx stencil = stn.stencil_von_neumann print(stencil) ###Output <function stencil_von_neumann at 0x0000023310BC65E0> ###Markdown Loading the Base Lattice ( Availability Lattice)Since the stencil does not work for corner voxels the sides and the top and bottom voxels are made unavailable ###Code unit = 3 #Dummy Lattice for testing Dim = 15 index = Dim -1 bounds = np.ones((Dim,Dim), dtype=int) bounds[0,:] =0 bounds[:,0] =0 bounds[:,index] =0 bounds[index,:] =0 print (bounds) init = np.tile(bounds,(Dim,1,1)) print(init.shape) avail_lattice = tg.to_lattice(np.copy(init), init.shape) init_avail_lattice = tg.to_lattice(np.copy(init), init.shape) #print (init_avail_lattice) ###Output [[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 1 1 1 1 1 1 1 1 1 1 1 1 1 0] [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]] (15, 15, 15) ###Markdown Loading the Enviornment Lattices ###Code #Dummy Lattice for testing enviornment_lattice = np.random.randint(1, 9, (np.shape(avail_lattice)), dtype='l')/10 #print(enviornment_lattice) one_neighbour_factor = 1 two_neighbour_factor = 2 three_neighbour_factor = 3 four_neighbour_factor = 4 ###Output _____no_output_____ ###Markdown Define the Neighborhoods for the behaviours (Stencils) ###Code # creating neighborhood definition stencil = tg.create_stencil("von_neumann", 1, 1) #print(stencil) # creating neighborhood definition stencil_squareness = tg.create_stencil("moore", 1, 1) # Reshaping the moore neighbourhood stencil_squareness[0,:,:] = 0 stencil_squareness[2,:,:] = 0 stencil_squareness.set_index([0,0,0], 0) stencil_squareness_t = np.transpose(stencil_squareness) #print(stencil_squareness_t) # creating neighborhood definition stencil_squareness_von = tg.create_stencil("von_neumann", 1, 1) # Reshaping the moore neighbourhood stencil_squareness_von[0,:,:] = 0 stencil_squareness_von[2,:,:] = 0 stencil_squareness_von.set_index([0,0,0], 0) stencil_squareness_von_t = np.transpose(stencil_squareness_von) #print(stencil_squareness_von) stencil_full_floor = tg.create_stencil("moore",int(Dim/2) )*0 stencil_full_floor[:,:,int(Dim/2)] = 1 stencil_full_floor.set_index([0,0,0], 0) #print(stencil_full_floor) stencil_cuboid = tg.create_stencil("moore", 1, 1) stencil_cuboid.set_index([0,0,0], 0) #print(stencil_cuboid) stencil_full_lattice = tg.create_stencil("moore",int(Dim/2) ) stencil_full_lattice.set_index([0,0,0], 0) #print(stencil_full_lattice) ###Output _____no_output_____ ###Markdown Vizualising the stencils ###Code Stencil_to_viz = stencil_squareness_t # initiating the plotter p = pv.Plotter(notebook=True) # Create the spatial reference grid = pv.UniformGrid() # Set the grid dimensions: shape because we want to inject our values grid.dimensions = np.array(Stencil_to_viz.shape) + 1 # The bottom left corner of the data set grid.origin = [0,0,0] # These are the cell sizes along each axis grid.spacing = [1,1,1] # Add the data values to the cell data grid.cell_arrays["values"] = Stencil_to_viz.flatten(order="F") # Flatten the stencil threshed = grid.threshold([0.9, 1.1]) # adding the voxels: light red p.add_mesh(threshed, show_edges=True, color="#ff8fa3", opacity=0.3) # plotting p.show(use_ipyvtk=True) ###Output _____no_output_____ ###Markdown Distance Field Construction ###Code # find the number of all voxels vox_count = avail_lattice.size # initialize the adjacency matrix adj_mtrx = np.zeros((vox_count,vox_count)) # extract the neighbourhood of all voxels # all_vox_neighs = avail_lattice.find_neighbours(stencil) # Finding the index of the available voxels in avail_lattice avail_index = np.array(np.where(avail_lattice == 1)).T # fill the adjacency matrix using the list of all neighbours for vox_loc in avail_index: # find the 1D id vox_id = np.ravel_multi_index(vox_loc, avail_lattice.shape) # retrieve the list of neighbours of the voxel based on the stencil vox_neighs = avail_lattice.find_neighbours_masked(stencil, loc = vox_loc) # iterating over the neighbours for neigh in vox_neighs: # setting the entry to one adj_mtrx[vox_id, neigh] = 1.0 # construct the graph g = nx.from_numpy_array(adj_mtrx) # compute the distance of all voxels to all voxels using floyd warshal algorithm #dist_mtrx = nx.floyd_warshall_numpy(g) #csv_flyod_warshall = np.savetxt('data.csv', dist_mtrx, delimiter=',') flyod_warshall_matrix =np.genfromtxt('data.csv',delimiter=',') dist_mtrx = flyod_warshall_matrix ###Output _____no_output_____ ###Markdown Constructing Distance matrix to the agent origin ###Code # select the corresponding row in the matrix myagent_attractor_one_dist = flyod_warshall_matrix[682] # find the maximum valid value max_valid = np.ma.masked_invalid(myagent_attractor_one_dist).max() print(max_valid) # set the infinities to one more than the maximum valid values myagent_attractor_one_dist[myagent_attractor_one_dist == np.inf] = max_valid + 1 # mapping the values from (0, max) to (1, 0) dist_flat_one = 1 - myagent_attractor_one_dist / np.max(myagent_attractor_one_dist) print(dist_flat_one) # constructing the lattice ent_acc_lattice_1 = tg.to_lattice(dist_flat_one.reshape(avail_lattice.shape), avail_lattice) # select the corresponding row in the matrix myagent_attractor_two_dist = dist_mtrx[1200] # find the maximum valid value max_valid = np.ma.masked_invalid(myagent_attractor_two_dist).max() print(max_valid) # set the infinities to one more than the maximum valid values myagent_attractor_two_dist[myagent_attractor_two_dist == np.inf] = max_valid + 1 # mapping the values from (0, max) to (1, 0) dist_flat_two = 1 - myagent_attractor_two_dist / np.max(myagent_attractor_two_dist) print(dist_flat_two) # constructing the lattice ent_acc_lattice_2 = tg.to_lattice(dist_flat_two.reshape(avail_lattice.shape), avail_lattice) ###Output 32.0 [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0. 0. 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0. 0. 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0. 0. 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0. 0. 0.8125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0. 0. 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0. 0. 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0. 0. 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0. 0. 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0. 0. 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0. 0. 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0. 0. 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0. 0. 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0. 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0. 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0. 0. 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0. 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0. 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0. 0. 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0. 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0. 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0. 0. 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0. 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.90625 0.9375 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0. 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0. 0. 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0. 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.90625 0.9375 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 1. 0.96875 0.9375 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.90625 0.9375 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0. 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0. 0. 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0. 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.90625 0.9375 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0. 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0. 0. 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0. 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.875 0.90625 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0. 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0. 0. 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0. 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.84375 0.875 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0. 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0. 0. 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0. 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.8125 0.84375 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0. 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0. 0. 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0. 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.78125 0.8125 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.53125 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0. 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0. 0. 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0. 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.75 0.78125 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.53125 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.5 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0. 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0. 0. 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0. 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.71875 0.75 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.53125 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.5 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.46875 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0. 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0. 0. 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0. 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.6875 0.71875 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.53125 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.5 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.46875 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0.4375 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0.0625 0. 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0.0625 0. 0. 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0. 0.53125 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.65625 0.6875 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.625 0.65625 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.59375 0.625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.5625 0.59375 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.53125 0.5625 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.5 0.53125 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.46875 0.5 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0.4375 0.46875 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0.0625 0.40625 0.4375 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0.0625 0.03125 0. 0.40625 0.375 0.34375 0.3125 0.28125 0.25 0.21875 0.1875 0.15625 0.125 0.09375 0.0625 0.03125 0. ] ###Markdown AgentsIn the agent based system, when the stencil reaches its limit the full lattice/full floor depending on 2D or 3D is considered in the simulation as a stencil. Define the Agents Class ###Code # agent class class agent(): def __init__(self, origin, stencil, id): # define the origin attribute of the agent and making sure that it is an intiger self.origin = np.array(origin).astype(int) # define old origin attribute and assigning the origin to it as the initial state self.old_origin = self.origin # define stencil of the agent self.stencil = stencil #define agent id self.id = id # definition of random/argmax occupancy on a 2d squarish stencil def random_occupy_squareness(self, env): # retrieve the list of neighbours of the agent based on the stencil neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin) neighs_full_floor = env.availibility.find_neighbours_masked(stencil_full_floor, loc = self.origin) # find availability of neighbours neighs_availibility = env.availibility.flatten()[neighs] neighs_availibility_full_floor = env.availibility.flatten()[neighs_full_floor] # separate available neighbours free_neighs = neighs[neighs_availibility==1] free_neighs_full_floor = neighs_full_floor[neighs_availibility_full_floor==1] #print(free_neighs) if len(free_neighs)== 0 : free_neighs = free_neighs_full_floor else: free_neighs= free_neighs # retrieve the value of each neighbour free_neighs_value = env.value.flatten()[free_neighs] # find the neighbour with maximum my value # selected_neigh = free_neighs[np.argmax(free_neighs_value)] selected_neigh = np.random.choice(free_neighs,1) #print(selected_neigh) # update information #################### # set the current origin as the ol origin self.old_origin = self.origin # update the current origin with the new selected neighbour self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten() #print(self.origin) # definition of random/argmax occupancy on a 3d cubish stencil def random_occupy_cubish(self, env): # retrieve the list of neighbours of the agent based on the stencil neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin) neighs_full_lattice = env.availibility.find_neighbours_masked(stencil_full_lattice, loc = self.origin) # find availability of neighbours neighs_availibility = env.availibility.flatten()[neighs] neighs_availibility_full_lattice = env.availibility.flatten()[neighs_full_lattice] # separate available neighbours free_neighs = neighs[neighs_availibility==1] free_neighs_full_lattice = neighs_full_lattice[neighs_availibility_full_lattice==1] #print(free_neighs) if len(free_neighs)== 0 : free_neighs = free_neighs_full_lattice else: free_neighs= free_neighs # retrieve the value of each neighbour free_neighs_value = env.value.flatten()[free_neighs] # find the neighbour with maximum my value selected_neigh = free_neighs[np.argmax(free_neighs_value)] #selected_neigh = np.random.choice(free_neighs,1) #print(selected_neigh) # update information #################### # set the current origin as the ol origin self.old_origin = self.origin # update the current origin with the new selected neighbour self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten() #print(self.origin) def random_occupy_cubish_von_neumann(self, env): # retrieve the list of neighbours of the agent based on the stencil neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin) neighs_full_lattice = env.availibility.find_neighbours_masked(stencil_cuboid, loc = self.origin) # find availability of neighbours neighs_availibility = env.availibility.flatten()[neighs] neighs_availibility_full_lattice = env.availibility.flatten()[neighs_full_lattice] # separate available neighbours free_neighs = neighs[neighs_availibility==1] free_neighs_full_lattice = neighs_full_lattice[neighs_availibility_full_lattice==1] #print(free_neighs) if len(free_neighs)== 0 : free_neighs = free_neighs_full_lattice else: free_neighs= free_neighs # retrieve the value of each neighbour free_neighs_value = env.value.flatten()[free_neighs] # find the neighbour with maximum my value selected_neigh = free_neighs[np.argmax(free_neighs_value)] #selected_neigh = np.random.choice(free_neighs,1) #print(selected_neigh) # update information #################### # set the current origin as the ol origin self.old_origin = self.origin # update the current origin with the new selected neighbour self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten() #print(self.origin) # definition of 2d occupying method for agents def one_neighbour_occupy_squareness_moore(self, env): # retrieve the list of neighbours of the agent based on the stencil neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin) #print(neighs) neighs_full_floor = env.availibility.find_neighbours_masked(stencil_full_floor, loc = self.origin) # find availability of neighbours neighs_availibility = env.availibility.flatten()[neighs] neighs_availibility_full_floor = env.availibility.flatten()[neighs_full_floor] #print(neighs_availibility) # find env values of all neighbours all_neighs_value = env.value.flatten()[neighs] all_neighs_value_mod = np.copy(all_neighs_value) #finding number of neighbours and bumping the values based on adjacency for a 9 neighbourhood #print(neighbourhood_details) one = neighs_availibility[1] + neighs_availibility[2] two = neighs_availibility[0] + neighs_availibility[2] three = neighs_availibility[1] + neighs_availibility[4] four = neighs_availibility[0] + neighs_availibility[6] five = neighs_availibility[2] + neighs_availibility[7] six = neighs_availibility[3] + neighs_availibility[6] seven = neighs_availibility[5] + neighs_availibility[7] eight = neighs_availibility[6] + neighs_availibility[4] neighbourhood_details = [one,two,three,four,five,six,seven,eight] #print(neighbourhood_details) for detail in range(len(neighs_availibility)-1): neighbourhood_condition = neighbourhood_details[detail] #print(neighbourhood_condition) if neighbourhood_condition == 3: all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + one_neighbour_factor elif neighbourhood_condition == 4: all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + two_neighbour_factor else: all_neighs_value_mod[detail] = all_neighs_value_mod[detail] #print(all_neighs_value_mod) neighs_value_flattened = env.value.flatten() for val_mod in all_neighs_value_mod: for neigh in neighs : neighs_value_flattened[neigh]=val_mod # separate available neighbours free_neighs = neighs[neighs_availibility==1] free_neighs_full_floor = neighs_full_floor[neighs_availibility_full_floor==1] #print(free_neighs) if len(free_neighs)== 0 : free_neighs = free_neighs_full_floor else: free_neighs= free_neighs # retrieve the value of each neighbour free_neighs_value = neighs_value_flattened[free_neighs] #print(free_neighs_value) # find the neighbour with maximum my value selected_neigh = free_neighs[np.argmax(free_neighs_value)] #print(selected_neigh) # update information #################### # set the current origin as the ol origin self.old_origin = self.origin # update the current origin with the new selected neighbour self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten() #print(self.origin) # definition of 2d occupying method for agents def one_neighbour_occupy_squareness_von_neumann(self, env): # retrieve the list of neighbours of the agent based on the stencil neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin) neighs_full_floor = env.availibility.find_neighbours_masked(stencil_full_lattice, loc = self.origin) # find availability of neighbours neighs_availibility = env.availibility.flatten()[neighs] neighs_availibility_full_floor = env.availibility.flatten()[neighs_full_floor] # separate available neighbours free_neighs = neighs[neighs_availibility==1] free_neighs_full_floor = neighs_full_floor[neighs_availibility_full_floor==1] #print(free_neighs) if len(free_neighs)== 0 : free_neighs = free_neighs_full_floor else: free_neighs= free_neighs # retrieve the value of each neighbour free_neighs_value = env.value.flatten()[free_neighs] # find the neighbour with maximum my value # selected_neigh = free_neighs[np.argmax(free_neighs_value)] selected_neigh = np.random.choice(free_neighs,1) #print(selected_neigh) # update information #################### # set the current origin as the ol origin self.old_origin = self.origin # update the current origin with the new selected neighbour self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten() #print(self.origin) # definition of 3d occupying method for agents def one_neighbour_occupy_cubish(self, env): # retrieve the list of neighbours of the agent based on the stencil neighs = env.availibility.find_neighbours_masked(self.stencil, loc = self.origin) #print(neighs) neighs_full_lattice = env.availibility.find_neighbours_masked(stencil_full_lattice, loc = self.origin) # find availability of neighbours neighs_availibility = env.availibility.flatten()[neighs] neighs_availibility_full_lattice = env.availibility.flatten()[neighs_full_lattice] #print(neighs_availibility) # find env values of all neighbours all_neighs_value = env.value.flatten()[neighs] all_neighs_value_mod = np.copy(all_neighs_value) #finding number of neighbours and bumping the values based on adjacency for a 25 neighbourhood #print(neighbourhood_details) one = neighs_availibility[1] + neighs_availibility[3] + neighs_availibility[9]+1 two = neighs_availibility[0] + neighs_availibility[2] + neighs_availibility[4] + neighs_availibility[10] three = neighs_availibility[1] + neighs_availibility[5] + neighs_availibility[11] +1 four = neighs_availibility[0] + neighs_availibility[4] + neighs_availibility[6] + neighs_availibility[12] five = neighs_availibility[1] + neighs_availibility[3] + neighs_availibility[5] + neighs_availibility[7] six = neighs_availibility[2] + neighs_availibility[4] + neighs_availibility[8] + neighs_availibility[13] seven = neighs_availibility[3] + neighs_availibility[7] + neighs_availibility[14] +1 eight = neighs_availibility[4] + neighs_availibility[6] + neighs_availibility[8] + neighs_availibility[15] nine = neighs_availibility[5] + neighs_availibility[7] + neighs_availibility[16] +1 ten = neighs_availibility[0] + neighs_availibility[10] + neighs_availibility[12] + neighs_availibility[17] eleven = neighs_availibility[1] + neighs_availibility[9] + neighs_availibility[11] + neighs_availibility[18] twelve = neighs_availibility[2] + neighs_availibility[10] + neighs_availibility[13] + neighs_availibility[19] thirteen = neighs_availibility[3] + neighs_availibility[9] + neighs_availibility[14] + neighs_availibility[20] fourteen = neighs_availibility[5] + neighs_availibility[11] + neighs_availibility[16] + neighs_availibility[22] fifteen = neighs_availibility[6] + neighs_availibility[12] + neighs_availibility[15] + neighs_availibility[23] sixteen = neighs_availibility[7] + neighs_availibility[14] + neighs_availibility[16] + neighs_availibility[24] seventeen = neighs_availibility[8] + neighs_availibility[13] + neighs_availibility[15] + neighs_availibility[25] eighteen = neighs_availibility[9] + neighs_availibility[18] + neighs_availibility[20] +1 nineteen = neighs_availibility[10] + neighs_availibility[17] + neighs_availibility[19] + neighs_availibility[21] twenty = neighs_availibility[11] + neighs_availibility[18] + neighs_availibility[22] +1 twentyone = neighs_availibility[12] + neighs_availibility[17] + neighs_availibility[21] + neighs_availibility[23] twentytwo = neighs_availibility[18] + neighs_availibility[20] + neighs_availibility[22] + neighs_availibility[24] twentythree = neighs_availibility[13] + neighs_availibility[19] + neighs_availibility[21] + neighs_availibility[25] twentyfour = neighs_availibility[14] + neighs_availibility[20] + neighs_availibility[24] +1 twentyfive = neighs_availibility[15] + neighs_availibility[21] + neighs_availibility[23] + neighs_availibility[20] twentysix = neighs_availibility[16] + neighs_availibility[22] + neighs_availibility[24] +1 neighbourhood_details = [one,two,three,four,five,six,seven,eight,nine,ten,eleven,twelve,thirteen,fourteen,fifteen,sixteen, seventeen,eighteen,nineteen,twenty,twentyone,twentytwo,twentythree,twentyfour,twentyfive] #print(neighbourhood_details) for detail in range(len(neighs_availibility)-1): neighbourhood_condition = neighbourhood_details[detail] #print(neighbourhood_condition) if neighbourhood_condition == 5: all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + one_neighbour_factor elif neighbourhood_condition == 6: all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + two_neighbour_factor elif neighbourhood_condition == 7: all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + three_neighbour_factor elif neighbourhood_condition == 8: all_neighs_value_mod[detail]= all_neighs_value_mod[detail] + four_neighbour_factor else: all_neighs_value_mod[detail] = all_neighs_value_mod[detail] #print(all_neighs_value_mod) neighs_value_flattened = env.value.flatten() for val_mod in all_neighs_value_mod: for neigh in neighs : neighs_value_flattened[neigh]=val_mod # separate available neighbours free_neighs = neighs[neighs_availibility==1] free_neighs_full_lattice = neighs_full_lattice[neighs_availibility_full_lattice==1] #print(free_neighs) if len(free_neighs)== 0 : free_neighs = free_neighs_full_lattice else: free_neighs= free_neighs # retrieve the value of each neighbour free_neighs_value = neighs_value_flattened[free_neighs] #print(free_neighs_value) # find the neighbour with maximum my value selected_neigh = free_neighs[np.argmax(free_neighs_value)] #print(selected_neigh) # update information #################### self.old_origin = self.origin # update the current origin with the new selected neighbour self.origin = np.array(np.unravel_index(selected_neigh, env.availibility.shape)).flatten() #print(self.origin) ###Output _____no_output_____ ###Markdown Initializing Agents ###Code # occ_lattice intilization occ_lattice = avail_lattice*0 -1 #print(occ_lattice) # Finding the index of the available voxels in avail_lattice avail_flat = avail_lattice.flatten() avail_index = np.array(np.where(avail_lattice == 1)).T # Randomly choosing one available voxels agn_num = 1 select_id = np.random.choice(len(avail_index), agn_num) agn_origins = tuple(avail_index[select_id].flatten()) #stencil_squareness_t select_id_2 = np.random.choice(len(avail_index), agn_num) agn_origins_2 = tuple(avail_index[select_id_2].flatten()) select_id_3 = np.random.choice(len(avail_index), agn_num) agn_origins_3 = tuple(avail_index[select_id_2].flatten()) print(select_id) print(select_id_2) #print(select_id_2) myagent = agent(agn_origins, stencil_squareness_t, select_id) myagent_2 = agent(agn_origins_2, stencil_cuboid, select_id_2) myagent_3 = agent(agn_origins_3, stencil_squareness_t, select_id_3) #stencil_squareness_t,stencil_cuboid #print(select_id_2) # Agent init class def initialize_agents_random_origin (stencil,avail_lattice): #finding origin occ_lattice = avail_lattice*0 -1 avail_flat = avail_lattice.flatten() avail_index = np.array(np.where(avail_lattice == 1)).T select_id = np.random.choice(len(avail_index), agn_num) agn_origins = tuple(avail_index[select_id].flatten()) # Defining agents myagent = agent(agn_origins, stencil, select_id) return myagent #Agent_2D = Agent_initilization Agent_one=initialize_agents_random_origin (stencil_squareness_von_t,avail_lattice ) Agent_two=initialize_agents_random_origin (stencil,avail_lattice ) print(Agent_one) select_id_attractor_one = 682 agn_origins_attractor_one = tuple(avail_index[select_id_attractor_one].flatten()) myagent_attractor_one = agent(agn_origins_attractor_one, stencil_cuboid, select_id_attractor_one) print(agn_origins_attractor_one) select_id_attractor_two = 1200 agn_origins_attractor_two = tuple(avail_index[select_id_attractor_two].flatten()) myagent_attractor_two = agent(agn_origins_attractor_two, stencil_cuboid, select_id_attractor_two) print(agn_origins_attractor_two) ###Output (7, 2, 5) ###Markdown Define Environment Class ###Code # environment class class environment(): def __init__(self, lattices, agents,number_of_iterations,method_name): self.availibility = lattices["availibility"] self.value = lattices["enviornment"] self.agent_origin = self.availibility * 0 self.agents = agents self.update_agents() self.number_of_iterations = number_of_iterations self.method_name = method_name def update_agents(self): # making previous position available # self.availibility[tuple(self.agents.old_origin)] = self.availibility[tuple(self.agents.old_origin)] * 0 + 1 # removing agent from previous position self.agent_origin[tuple(self.agents.old_origin)] *= 0+1 # making the current position unavailable self.availibility[tuple(self.agents.origin)] = self.agents.id # adding agent to the new position self.agent_origin[tuple(self.agents.origin)] = self.agents.id def random_occupy_squareness_agents(self): # iterate over egents and perform the walk self.agents.random_occupy_squareness(self) # update the agent states in environment self.update_agents() def random_occupy_cubish_agents(self): # iterate over egents and perform the walk self.agents.random_occupy_cubish(self) # update the agent states in environment self.update_agents() def random_occupy_cubish_von_neumann_agents(self): # iterate over egents and perform the walk self.agents.random_occupy_cubish_von_neumann(self) # update the agent states in environment self.update_agents() def one_neighbour_occupy_squareness_moore(self): # iterate over egents and perform the walk self.agents.one_neighbour_occupy_squareness_moore(self) # update the agent states in environment self.update_agents() def one_neighbour_occupy_squareness_von_neumann(self): # iterate over egents and perform the walk self.agents.one_neighbour_occupy_squareness_von_neumann(self) # update the agent states in environment self.update_agents() def one_neighbour_occupy_cubish_agents(self): # iterate over egents and perform the walk self.agents.one_neighbour_occupy_cubish(self) # update the agent states in environment self.update_agents() ###Output _____no_output_____ ###Markdown Creating the Enviornment ###Code # name the lattices myagent_attractor_one env_lattices = {"availibility": avail_lattice,"enviornment": enviornment_lattice} env_lattices_attractors_one = {"availibility": avail_lattice,"enviornment": ent_acc_lattice_1} env_lattices_attractors_two = {"availibility": avail_lattice,"enviornment": ent_acc_lattice_2} # initiate the environment env = environment(env_lattices, myagent,50,"one_neighbour_occupy_squareness_moore") env_2 = environment(env_lattices, myagent_2,100,"one_neighbour_occupy_cubish_agents") env_3 = environment(env_lattices, myagent_3,50,"one_neighbour_occupy_squareness_moore") env_4 = environment(env_lattices, Agent_one,50,"one_neighbour_occupy_squareness_von_neumann") env_5 = environment(env_lattices, Agent_two,100,"random_occupy_cubish_von_neumann_agents") env_6 = environment(env_lattices_attractors_one, myagent_attractor_one,100,"one_neighbour_occupy_cubish_agents") env_7 = environment(env_lattices_attractors_two, myagent_attractor_two,100,"one_neighbour_occupy_cubish_agents") print(env_2.agents) ###Output <__main__.agent object at 0x000002335BBB2700> ###Markdown Run the Simulation ###Code env_availability_viz = [] env_list =[env_6,env_7] number_steps = max(map(lambda e:e.number_of_iterations,env_list)) for a in range(number_steps): # print(env.availibility) # print(env.agent_origin) for e in env_list: if a < e.number_of_iterations : #print(a) #print(e.number_of_iterations) if e.method_name == "one_neighbour_occupy_squareness_moore": e.one_neighbour_occupy_squareness_moore() elif e.method_name == "one_neighbour_occupy_cubish_agents" : e.one_neighbour_occupy_cubish_agents() elif e.method_name == "random_occupy_squareness_agents" : e.random_occupy_squareness_agents() elif e.method_name == "random_occupy_cubish_agents" : e.random_occupy_cubish_agents() elif e.method_name == "random_occupy_cubish_von_neumann_agents" : e.random_occupy_cubish_von_neumann_agents() elif e.method_name == "one_neighbour_occupy_squareness_von_neumann" : e.one_neighbour_occupy_squareness_von_neumann() env_availability_viz.append(e.availibility-1) ###Output _____no_output_____ ###Markdown Vizualize the Simulation ###Code p = pv.Plotter(notebook=True) base_lattice = env_availability_viz[0] print(base_lattice.unit) # Set the grid dimensions: shape + 1 because we want to inject our values on the CELL data grid = pv.UniformGrid() grid.dimensions = np.array(base_lattice.shape) + 1 # The bottom left corner of the data set grid.origin = base_lattice.minbound - base_lattice.unit * 0.5 # These are the cell sizes along each axis grid.spacing = base_lattice.unit # adding the boundingbox wireframe p.add_mesh(grid.outline(), color="grey", label="Domain") # adding the avilability lattice init_avail_lattice.fast_vis(p) # adding axes p.add_axes() p.show_bounds(grid="back", location="back", color="#aaaaaa") def create_mesh(value): f = int(value) lattice = env_availability_viz[f] # Add the data values to the cell data grid.cell_arrays["Agents"] = lattice.flatten(order="F").astype(int) # Flatten the array! # filtering the voxels threshed = grid.threshold([1.0, avail_lattice.size]) # adding the voxels p.add_mesh(threshed, name='sphere', show_edges=True, opacity=1.0, show_scalar_bar=False) return number_steps_2 = sum(map(lambda e:e.number_of_iterations,env_list)) p.add_slider_widget(create_mesh, [0, number_steps_2], title='Time', value=0, event_type="always", style="classic") p.show(use_ipyvtk=True) ###Output [1 1 1]
Databases/SPARQL/1.0 Getting Started/1.2 Thorough - Wikidata-Mayors.ipynb
###Markdown ---layout: postcategory: blogtitle: "Where do Mayors Come From: Querying Wikidata with Python and SPARQL"tags: [Wikidata, SPARQL, Python, Pandas, Data Science]image: /assets/wikidata_mayors_screen.png---In this article, we will be going through building queries for Wikidata with Python and SPARQL by taking a look where mayors in Europe are born. This tutorial is building up the knowledge to collect the data responsible for this [interactive visualization](https://janakiev.com/wikidata-mayors/) from the header image which was done with [deck.gl](http://deck.gl//).[Wikidata](https://www.wikidata.org/) is a free and collaborative [Linked Open Data (LOD)](https://en.wikipedia.org/wiki/Linked_data) knowledge base which can be edited by humans and machines. The project started 2012 by the [Wikimedia Foundation](https://foundation.wikimedia.org/wiki/Home) as an effort to centralize interwiki links, infoboxes and enable rich queries. Its ambitious goal is to structure the whole human knowledge in a way that is machine readable and it speaks well to the vision of Tim Berners-Lee in his [TED talk](https://www.youtube.com/watch?v=OM6XIICm_qo) of 2009. Surprisingly, the idea of the [Semantic Web](https://en.wikipedia.org/wiki/Semantic_Web) existed already in 2001 which is comprised of Linked Data. There have been many projects preceding Wikidata. There is [DBpedia](https://en.wikipedia.org/wiki/DBpedia) which is based on the infoboxes in Wikipedia, [Friend of a Friend (FOAF)](https://en.wikipedia.org/wiki/FOAF_(ontology)) which is an ontology to describe relationships in social networks, [GeoNames](https://en.wikipedia.org/wiki/GeoNames) which provides a database with geographical names, [Upper Mapping and Binding Exchange Layer (UMBEL)](https://en.wikipedia.org/wiki/UMBEL) which is a knowledge graph of concepts and entities and a whole set of others, but Wikidata seems to be the most ambitious project between them.All of the data there is free (under the [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/) aka public domain), while anyone can edit and contribute to it. So it works in a similar way to Wikipedia. On most (if not all) Wikipedia pages, there is a _Wikidata Item_ link to its corresponding item in Wikidata, where you can find the linked information listed. Note that you can still find holes, but as it is a community effort, this is becoming better and growing over time by every contribution. To access the structured data you can query Wikidata by using its [SPARQL endpoint](https://query.wikidata.org/) which enables you to run advanced queries, or by using its [REST API](https://www.wikidata.org/w/api.php).In this diagram, you can see the structure of a Wikidata item. Each item has a list of [statements](https://www.wikidata.org/wiki/Help:Statements), which are triples in the form `SUBJECT` - `PREDICATE` - `OBJECT` (e.g. Douglas Adams is educated at the St John's College). In Wikidata the subject is referred to as [item](https://www.wikidata.org/wiki/Help:Items) and the predicate is referred to as [property](https://www.wikidata.org/wiki/Help:Properties). Each property has a value, which can be again an item, text, number, date, or GPS coordinates among others. Each value can have additional [qualifiers](https://www.wikidata.org/wiki/Help:Qualifiers) which have additional information with other property-value pairs such as start time. This structure will be important when we start to express queries with SPARQL.![Wikidata Data Model](assets/wikidata_data_model.png)image from [SPARQL/WIKIDATA Qualifiers, References and Ranks](https://en.wikibooks.org/wiki/SPARQL/WIKIDATA_Qualifiers,_References_and_Ranks).Also, all the code for this article and the interactive visualization can be found in this [repository](https://github.com/njanakiev/wikidata-mayors). Introducing SPARQLBefore getting to Python we will dissect [SPARQL](https://en.wikipedia.org/wiki/SPARQL) to get comfortable doing some queries. SPARQL is a query language used to retrieve data stored as [RDF](https://en.wikipedia.org/wiki/Resource_Description_Framework) (Resource Description Framework) and it is standardized by the W3C. It is a powerful language to query Linked data and we can also use it to query Wikidata. The syntax is similar to SQL, but it has some differences for people trained in SQL. One key difference is that in SQL you tend to avoid `JOIN` clauses as they can slow down queries, but in SPARQL the queries mostly consist of joins. But hang in there and let's take a look at such a query. In this example, we want to list all countries in the European Union.```sparqlSELECT ?country ?countryLabel WHERE { ?country wdt:P463 wd:Q458. SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }}```You can try this query yourself [here](https://query.wikidata.org/SELECT%20%3Fcountry%20%3FcountryLabel%20WHERE%20%7B%0A%20%20%3Fcountry%20wdt%3AP463%20wd%3AQ458.%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20%0A%20%20%20%20bd%3AserviceParam%20wikibase%3Alanguage%20%22%5BAUTO_LANGUAGE%5D%2Cen%22.%20%0A%20%20%7D%0A%7D). Note that you can test and play with each query at [https://query.wikidata.org/](https://query.wikidata.org/). The editor there offers a handful of useful features. If you hover over the properties and items in the editor you will get information about them and the editor additionally offers autocompletion. You will also find a list of examples which are quite handy when starting fresh.Starting with the `SELECT` clause, we define the variables we want to get (variables are prefixed with a question mark). Inside the `WHERE` clause, we set restrictions which mostly take the form of the triples we have covered previously. The statement `?country wdt:P463 wd:Q458.` collects all items which have the property [member of (P463)](https://www.wikidata.org/wiki/Property:P463) with object [European Union (Q458)](https://www.wikidata.org/wiki/Q458) into the variable `country`. As you can see, the statements read like a sentence (i.e. country is a member of the European Union). You also notice that there are the prefixes `wd:` and `wdt:`. These denote items with `wd:` and properties with `wdt:`. We will cover more complicated prefixes later on in this tutorial when we will get into the SPARQL data representation. Finally, you will see a confusing part `SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }` within the query. This snippet is responsible for retrieving labels for the collected items into an additional variable with `Label` postfix in the specified language (in this case English). In this query, this would be the `countryLabel` variable storing the label for the `country` variable. Note that the label is only retrieved for items that have a label in the particular language selected (in this case `"en"` for English), as there might be items that are not translated into this particular language.Interesting sidenote: When running the query you will notice [Kingdom of the Netherlands](https://en.wikipedia.org/wiki/Kingdom_of_the_Netherlands) with Wikidata item [Q29999](https://www.wikidata.org/wiki/Q29999) in the list of European countries. Surprisingly, [Netherlands](https://en.wikipedia.org/wiki/Netherlands) ([Q55](https://www.wikidata.org/wiki/Q55)) is a constituent country of the Kingdom of the Netherlands, but it is not a country. It is similar to how England is part of the United Kingdom. This [video](https://www.youtube.com/watch?v=WBcn-lzTJbk) does a great job explaining the situation if you were puzzled. Advanced QueriesLet's now explore other properties of the countries we have selected. If you take a look at [Germany (Q183)](https://www.wikidata.org/wiki/Q183), then you can see a whole host of properties like [population (P1082)](https://www.wikidata.org/wiki/Property:P1082), [median income (P3529)](https://www.wikidata.org/wiki/Property:P3529) or even images with the [image (P18)](https://www.wikidata.org/wiki/Property:P18) property. SPARQL enables us to retrieve those too which leads us to the next query.```sparqlSELECT ?country ?countryLabel ?population ?area ?medianIncomeWHERE { ?country wdt:P463 wd:Q458. ?country wdt:P1082 ?population. ?country wdt:P2046 ?area. ?country wdt:P3529 ?medianIncome. SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }}```You can try this query [here](https://query.wikidata.org/SELECT%20%0A%20%20%3Fcountry%20%3FcountryLabel%20%0A%20%20%3Fcapital%20%3FcapitalLabel%0A%20%20%3Fpopulation%20%3Farea%20%3FmedianIncome%0AWHERE%20%7B%0A%20%20%3Fcountry%20wdt%3AP463%20wd%3AQ458.%0A%20%20%3Fcountry%20wdt%3AP36%20%3Fcapital.%0A%20%20%3Fcountry%20wdt%3AP1082%20%3Fpopulation.%0A%20%20%3Fcountry%20wdt%3AP2046%20%3Farea.%0A%20%20%3Fcountry%20wdt%3AP3529%20%3FmedianIncome.%0A%20%20%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22.%20%7D%0A%7D).After trying this query you will notice that the list of countries became shorter. The reason for this is that each country item that has no population, area or median income as a property is ignored by the query. You can imagine those triples also as a filter constraining the triples that only match this query. We can add the `OPTIONAL` clause which will leave those variables empty if the query cannot find triples within this clause.```sparqlSELECT ?country ?countryLabel ?population ?area ?medianIncomeWHERE { ?country wdt:P463 wd:Q458. OPTIONAL { ?country wdt:P1082 ?population } OPTIONAL { ?country wdt:P2046 ?area } OPTIONAL { ?country wdt:P3529 ?medianIncome } SERVICE wikibase:label { bd:serviceParam wikibase:language "en". }}```You can try this query [here](https://query.wikidata.org/SELECT%20%0A%20%20%3Fcountry%20%3FcountryLabel%20%0A%20%20%3Fcapital%20%3FcapitalLabel%0A%20%20%3Fpopulation%20%3Farea%20%3FmedianIncome%0AWHERE%20%7B%0A%20%20%3Fcountry%20wdt%3AP463%20wd%3AQ458.%0A%20%20%3Fcountry%20wdt%3AP36%20%3Fcapital.%0A%20%20%20%20%0A%20%20OPTIONAL%20%7B%20%3Fcountry%20wdt%3AP1082%20%3Fpopulation%20%7D%0A%20%20OPTIONAL%20%7B%20%3Fcountry%20wdt%3AP2046%20%3Farea%20%7D%0A%20%20OPTIONAL%20%7B%20%3Fcountry%20wdt%3AP3529%20%3FmedianIncome%20%7D%0A%20%20%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22.%20%7D%0A%7D). Now we see in the table that we will find all countries again.![Query Result](assets/query_result.png) SPARQL Data RepresentationWe continue our journey with a complicated query which we will unpack step by step. Our goal is now to get for all countries, the capital, the population, the mayor, his birthday and finally his birthplace. The query looks like this.```sparqlSELECT DISTINCT ?country ?countryLabel ?capital ?capitalLabel ?population ?mayor ?mayorLabel ?birth_place ?birth_placeLabel ?birth_date ?ageWHERE { Get all european countries, their capitals and the population of the capital ?country wdt:P463 wd:Q458. ?country wdt:P36 ?capital. OPTIONAL { ?capital wdt:P1082 ?population. } Get all mayors without an end date ?capital p:P6 ?statement. ?statement ps:P6 ?mayor. FILTER NOT EXISTS { ?statement pq:P582 ?end_date } Get birth place, birth date and age of mayor ?mayor wdt:P19 ?birth_place. ?mayor wdt:P569 ?birth_date. BIND(year(now()) - year(?birth_date) AS ?age) SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }}```You can try this query [here](https://query.wikidata.org/SELECT%20DISTINCT%20%0A%20%20%3Fcountry%20%3FcountryLabel%20%3Fcapital%20%3FcapitalLabel%20%3Fpopulation%20%0A%20%20%3Fmayor%20%3FmayorLabel%20%3Fbirth_place%20%3Fbirth_placeLabel%20%3Fbirth_date%20%3Fage%0AWHERE%20%7B%0A%20%20%3Fcountry%20wdt%3AP463%20wd%3AQ458.%0A%20%20%3Fcountry%20wdt%3AP36%20%3Fcapital.%0A%20%20OPTIONAL%20%7B%20%3Fcapital%20wdt%3AP1082%20%3Fpopulation.%20%7D%0A%20%20%0A%20%20%3Fcapital%20p%3AP6%20%3Fstatement.%0A%20%20%3Fstatement%20ps%3AP6%20%3Fmayor.%0A%20%20FILTER%20NOT%20EXISTS%20%7B%20%3Fstatement%20pq%3AP582%20%3Fend_date%20%7D%0A%20%20%20%20%0A%20%20%3Fmayor%20wdt%3AP19%20%3Fbirth_place.%0A%20%20%3Fmayor%20wdt%3AP569%20%3Fbirth_date.%0A%20%20BIND%28year%28now%28%29%29%20-%20year%28%3Fbirth_date%29%20AS%20%3Fage%29%0A%20%20%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20%0A%20%20%20%20bd%3AserviceParam%20wikibase%3Alanguage%20%22%5BAUTO_LANGUAGE%5D%2Cen%22.%20%0A%20%20%7D%0A%7D).Let's unpack what is happening here. First, we start by getting the capital of the country which we simply get via the [capital (P36)](https://www.wikidata.org/wiki/Property:P36) property. Next, we get to a more complicated part. To understand how to get to the mayor we have to look at the SPARQL Data Representation in this diagram.![SPARQL data representation](assets/SPARQL_data_representation.png)image from [SPARQL/WIKIDATA Qualifiers, References and Ranks](https://en.wikibooks.org/wiki/SPARQL/WIKIDATA_Qualifiers,_References_and_Ranks).This graph of the data representation that you see here shows the ways you can traverse it to get to various pieces of information with SPARQL starting from an item (in the graph shown as `wd:Q1234`). You can see on the left the classical path we took in our previous triples by using the `wdt:` prefix which leads to the value which can be another item, a numeric value (e.g. the population as in one of the previous queries) or various other data types.If you take a look at an item like [Rome (Q220)](https://www.wikidata.org/wiki/Q220), you will notice that there are various statements for the [head of government (P6)](https://www.wikidata.org/wiki/Property:P6). We want to get the one which has no end date. We can do this by traversing to the statement node with the `p:` prefix and storing it in the `statement` variable. From this variable, we can get the mayor with the `ps:` prefix. We could have done that with `wdt:` as we already have learned but we want to go one step further. We want to get to [end time (P582)](https://www.wikidata.org/wiki/Property:P582) which is stored as a qualifier in the statement. We can traverse to the qualifier with the `pq:` prefix which would give us the end date, but we want mayors without an end date. This can be done by using the `FILTER NOT EXISTS` clause which excludes all triples with statement node that have an end date.In the final part, we collect the birthplace, the birth date and the age of the mayor. In order to calculate his age, we use the `BIND` expression. This expression can be used to bind some expression to a variable (in our case the age variable). For this expression, we subtract the year of the birth date with the current year. This concludes this query. You can dig deeper in [SPARQL/WIKIDATA Qualifiers, References and Ranks](https://en.wikibooks.org/wiki/SPARQL/WIKIDATA_Qualifiers,_References_and_Ranks) which describes the data representation in further detail. Retrieving SPARQL Queries with PythonWe have seen how to work with SPARQL and we can also download the resulting tables in the editor, but how do we automate the whole process? We can access the Wikidata SPARQL endpoint also with Python, which enables us to directly load and analyze the data we have queried. To do this, we will employ the [request](http://docs.python-requests.org/en/master/) module which does a great job at doing HTTP requests with all its necessary tooling. We can create the request by adding the query as a parameter as follows. ###Code import requests url = 'https://query.wikidata.org/sparql' query = """ SELECT ?countryLabel ?population ?area ?medianIncome ?age WHERE { ?country wdt:P463 wd:Q458. OPTIONAL { ?country wdt:P1082 ?population } OPTIONAL { ?country wdt:P2046 ?area } OPTIONAL { ?country wdt:P3529 ?medianIncome } OPTIONAL { ?country wdt:P571 ?inception. BIND(year(now()) - year(?inception) AS ?age) } SERVICE wikibase:label { bd:serviceParam wikibase:language "en". } } """ r = requests.get(url, params = {'format': 'json', 'query': query}) data = r.json() ###Output _____no_output_____ ###Markdown We have packed the query in the `query` variable and we need to additionally supply request with the SPARQL endpoint URL which is [https://query.wikidata.org/sparql](https://query.wikidata.org/sparql). We want to use JSON as an output file, so we add this also to our request. The API returns XML as default but supports besides JSON also TSV, CSV and Binary RDF. This request returns a JSON with all the rows collected from the query, which we can use collect the rows into a [Pandas](https://pandas.pydata.org/) DataFrame as follows. ###Code import pandas as pd from collections import OrderedDict countries = [] for item in data['results']['bindings']: countries.append(OrderedDict({ 'country': item['countryLabel']['value'], 'population': item['population']['value'], 'area': item['area']['value'] if 'area' in item else None, 'medianIncome': item['medianIncome']['value'] if 'medianIncome' in item else None, 'age': item['age']['value'] if 'age' in item else None})) df = pd.DataFrame(countries) df.set_index('country', inplace=True) df = df.astype({'population': float, 'area': float, 'medianIncome': float, 'age': float}) df.head() ###Output _____no_output_____ ###Markdown Let's explore the collected data visually and compare the various properties for each country. ###Code %matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') plt.figure(figsize=(16, 12)) for i, label in enumerate(['population', 'medianIncome', 'area', 'age']): plt.subplot(2, 2, i + 1) df_plot = df[label].sort_values().dropna() df_plot.plot(kind='barh', color='C0', ax=plt.gca()); plt.ylabel('') plt.xticks(rotation=30) plt.title(label.capitalize()) plt.ticklabel_format(style='plain', axis='x') plt.tight_layout() ###Output _____no_output_____ ###Markdown Mayors of all European CapitalsIn our final query, we will take a look at where mayors are born by adding the coordinates to the query. In order to get the latitude and longitude coordinates as variables, we need to add the following snippet.```sparql?capital p:P625/psv:P625 ?capital_node.?capital_node wikibase:geoLatitude ?capital_lat.?capital_node wikibase:geoLongitude ?capital_lon.```In the first line, we traverse the graph of the previously shown data representation. The slash in `p:P625/psv:P625` means that we continue to the _Value node_ of the [coordinate location (P625)](https://www.wikidata.org/wiki/Property:P625) without using a separate variable for the _Statement node_. Then, `wikibase:geoLatitude` and `wikibase:geoLongitude` are responsible for retrieving the latitude and longitude from the _Value node_ respectively. For more information, take a look at [Precision, Units and Coordinates](https://en.wikibooks.org/wiki/SPARQL/WIKIDATA_Precision,_Units_and_Coordinates). ###Code url = 'https://query.wikidata.org/sparql' query=""" SELECT DISTINCT ?countryLabel ?capitalLabel ?population ?capital_lon ?capital_lat ?mayorLabel ?birth_date ?age ?birth_place ?birth_placeLabel ?birth_place_lon ?birth_place_lat WHERE { ?country wdt:P463 wd:Q458. ?country wdt:P36 ?capital. OPTIONAL { ?capital wdt:P1082 ?population. } # Get latitude longitude coordinates of capital ?capital p:P625/psv:P625 ?capital_node. ?capital_node wikibase:geoLatitude ?capital_lat. ?capital_node wikibase:geoLongitude ?capital_lon. ?capital p:P6 ?statement. ?statement ps:P6 ?mayor. FILTER NOT EXISTS { ?statement pq:P582 ?end_date } ?mayor wdt:P569 ?birth_date. BIND(year(now()) - year(?birth_date) AS ?age) ?mayor wdt:P19 ?birth_place. ?birth_place wdt:P625 ?birth_place_coordinates. # Get latitude longitude coordinates of birth place ?birth_place p:P625/psv:P625 ?birth_place_node. ?birth_place_node wikibase:geoLatitude ?birth_place_lat. ?birth_place_node wikibase:geoLongitude ?birth_place_lon. SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". } } """ r = requests.get(url, params = {'format': 'json', 'query': query}) data = r.json() countries = [] for item in data['results']['bindings']: countries.append(OrderedDict({ label : item[label]['value'] if label in item else None for label in ['countryLabel', 'capitalLabel', 'capital_lon', 'capital_lat', 'population', 'mayorLabel', 'birth_date', 'age', 'birth_placeLabel', 'birth_place_lon', 'birth_place_lat']})) df = pd.DataFrame(countries) df.set_index('capitalLabel', inplace=True) df = df.astype({'population': float, 'age': float, 'capital_lon': float, 'capital_lat': float, 'birth_place_lon': float, 'birth_place_lat': float}) df.head() ###Output _____no_output_____ ###Markdown Taking this data set we can explore the age of the mayors and the population of the capital they are serving. ###Code plt.figure(figsize=(12, 5)) plt.subplot(1, 2, 1) df['age'].sort_values().plot(kind='barh', color='C0', title='Mayors Age') plt.ylabel('') plt.subplot(1, 2, 2) df['population'].sort_values().plot(kind='barh', color='C0', title='Population') plt.ylabel('') plt.ticklabel_format(style='plain', axis='x') plt.tight_layout() ###Output _____no_output_____ ###Markdown Next, let's take a look at how far mayors are born from the capital. For this, we will use the [geopy](https://geopy.readthedocs.io/en/stable/) package to calculate the distance between the coordinates. This [tutorial](https://janakiev.com/blog/gps-points-distance-python/) covers this topic if you are curious why we can't just use euclidean distance on GPS coordinates. ###Code from geopy.distance import distance coordinates = df[['capital_lon', 'capital_lat', 'birth_place_lon', 'birth_place_lat']] df['distance'] = [distance((lat0, lon0), (lat1, lon1)).m for lon0, lat0, lon1, lat1 in coordinates.values] df['distance'].sort_values().plot(kind='barh', color='C0', logx=True, figsize=(12, 7)) plt.xlabel('Distance (m)') plt.ylabel(''); ###Output _____no_output_____
chapters/8.ipynb
###Markdown ~~JAGS~~ PyMC3 * [8.2. A COMPLETE EXAMPLE](8.2.-A-COMPLETE-EXAMPLE)* [8.4. EXAMPLE: DIFFERENCE OF BIASES](8.4.-EXAMPLE:-DIFFERENCE-OF-BIASES)* [8.6.1. Defning new likelihood functions](8.6.1.-Defning-new-likelihood-functions)* [Exercise 8.1](Exercise-8.1)* [Exercise 8.2](Exercise-8.2)* [Exercise 8.4](Exercise-8.4) 8.2. A COMPLETE EXAMPLEPython version of `Jags-ExampleScript.R` ###Code # Load the data: import pandas as pd data_df = pd.read_csv('../datasets/z15N50.csv') data_df.head(5) data = data_df['y'].values # Define the model: # set matplotlib inline so pymc3 plays nicely with jupyter %matplotlib inline import pymc3 as pm with pm.Model() as model: # Priors for unknown model parameters theta = pm.Beta('theta', alpha=1, beta=1, transform=None) # Likelihood (sampling distribution) of observations y = pm.Bernoulli('y', p=theta, observed=data) # define sampling method step = pm.Slice() # similar to Gibbs sampling # step = pm.Metropolis(S=0.2, tune=False) # burn in chains theta_init = sum(data)/len(data) start = {'theta': theta_init} trace = pm.sample(500, step=step, start=start, njobs=3, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=3) %matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') _ = pm.traceplot(trace, figsize=(10,4)) _ = pm.plot_posterior(trace, figsize=(10,4), color='cornflowerblue') _ = pm.plots.autocorrplot(trace, max_lag=35, figsize=(15, 5), symmetric_plot=True) ###Output _____no_output_____ ###Markdown 8.4. EXAMPLE: DIFFERENCE OF BIASESPython version of `Jags-Ydich-XnomSsubj-Mbernbeta-Example.R` ###Code import numpy as np from dbda2e_utils import plotPost data_df = pd.read_csv('../datasets/z6N8z2N7.csv') data_df.head(5) data_df.groupby('s').count() # Define the model: with pm.Model() as model: start = {} for name, group in data_df.groupby('s'): data = group['y'].values # Priors for unknown model parameters theta = pm.Beta('theta_'+name, alpha=2, beta=2, transform=None) # Likelihood (sampling distribution) of observations y = pm.Bernoulli('y_'+name, p=theta, observed=data) start['theta_'+name] = data.mean() # define sampling method step = pm.Slice() # similar to Gibbs sampling # burn in chains trace = pm.sample(500, step=step, start=start, njobs=4, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=4) _ = pm.traceplot(trace) for name in data_df['s'].unique(): name = 'theta_'+name ax = pm.plot_posterior(trace[name], figsize=(10,4), color='cornflowerblue') ax.vlines(start[name], ax.get_ylim()[0], ax.get_ylim()[1], color='r') ax.set_title(name) ax.set_xlim([0,1]) trace_df = pm.trace_to_dataframe(trace) axs = pd.scatter_matrix(trace_df, figsize=(10,10), diagonal='hist', hist_kwds={'bins':30, 'normed':True, 'color':'cornflowerblue'}); # change default limits to better display results axs[0,0].set_xlim(0,1) axs[0,1].set_xlim(0,1) axs[0,1].set_ylim(0,1) axs[1,0].set_xlim(0,1) axs[1,0].set_ylim(0,1) axs[1,1].set_xlim(0,1) plt.show() f, ax = plt.subplots(1,1,figsize=(5,5)) delta = trace_df['theta_Reginald'] - trace_df['theta_Tony'] plotPost(delta, ax, r'$\theta_Reginald - \theta_Tony$') ax.vlines(start['theta_Reginald'] - start['theta_Tony'], ax.get_ylim()[0], ax.get_ylim()[1], color='r') plt.show() ###Output _____no_output_____ ###Markdown 8.5. SAMPLING FROM THE PRIOR DISTRIBUTION ###Code with pm.Model() as model: theta1 = pm.Beta('theta1', alpha=2, beta=2, transform=None) theta2 = pm.Beta('theta2', alpha=2, beta=2, transform=None) pm.Deterministic('delta', theta1 - theta2) step = pm.Slice() # burn in chains trace = pm.sample(500, step=step, njobs=4, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=4) _ = pm.plot_posterior(trace, figsize=(10,8), color='cornflowerblue') ###Output _____no_output_____ ###Markdown 8.6.1. Defning new likelihood functions ###Code import theano.tensor as T # generate test data y = np.random.randn(100) ###Output _____no_output_____ ###Markdown Part ADefine likelihood using native `Normal` function ###Code with pm.Model() as model: mu = pm.Uniform('mu', transform=None) sigma = pm.Uniform('sigma', transform=None) tau = 1 / sigma**2 y_hat = pm.Normal('y_hat', mu=mu, tau=tau, observed=y) step = pm.Metropolis() # burn in chains trace = pm.sample(500, step=step, njobs=4, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=4) _ = pm.plot_posterior(trace, figsize=(10,5), color='cornflowerblue') ###Output [-----------------100%-----------------] 10000 of 10000 complete in 2.0 sec ###Markdown Part BDefine likelihood by manually implementing the formula for normal distribution ###Code with pm.Model() as model: mu = pm.Uniform('mu', transform=None) sigma = pm.Uniform('sigma', transform=None) C = 1000 e = T.exp( -0.5*((y - mu)/sigma)**2) / (sigma*(2*np.pi)**0.5) likelihood = e / C y_hat = pm.Bernoulli('y_hat', likelihood, observed=np.ones_like(y)) step = pm.Metropolis() # burn in chains trace = pm.sample(500, step=step, njobs=4, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=4) _ = pm.plot_posterior(trace, figsize=(10,5), color='cornflowerblue') ###Output [-----------------100%-----------------] 10000 of 10000 complete in 2.1 sec ###Markdown Part CDefine likelihood by using `DensityDist` ###Code with pm.Model() as model: mu = pm.Uniform('mu', transform=None) sigma = pm.Uniform('sigma', transform=None) def likelihood(y): return T.exp( -0.5*((y - mu)/sigma)**2) / (sigma*(2*np.pi)**0.5) # DensityDist requires log probability so transform to log likelihood def log_likelihood(y): return T.log(likelihood(y)) y_hat = pm.DensityDist('y_hat', log_likelihood, observed=y) step = pm.Metropolis() # burn in chains trace = pm.sample(500, step=step, njobs=4, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=4) _ = pm.plot_posterior(trace, figsize=(10,5), color='cornflowerblue') ###Output [-----------------100%-----------------] 10000 of 10000 complete in 2.0 sec ###Markdown Exercise 8.1**Purpose**: Run the high level scripts with other data to see how easy they are. ###Code # load data and append new fictional entries data_df = pd.read_csv('../datasets/z6N8z2N7.csv') new_samples = np.random.rand(50) # sample uniform distribution new_samples = (new_samples > 0.9).astype(int) # convert to 0s and 1s using threshold new_category = ['John'] * len(new_samples) new_entries = list(zip(new_samples, new_category)) data_df = pd.concat((data_df, pd.DataFrame(new_entries, columns=['y', 's'])), ignore_index=True) data_df.tail(5) # Define the model: with pm.Model() as model: start = {} for name, group in data_df.groupby('s'): data = group['y'].values # Priors for unknown model parameters theta = pm.Beta('theta_'+name, alpha=2, beta=2, transform=None) # Likelihood (sampling distribution) of observations y = pm.Bernoulli('y_'+name, p=theta, observed=data) start['theta_'+name] = data.mean() # define sampling method step = pm.Slice() # similar to Gibbs sampling # burn in chains trace = pm.sample(500, step=step, start=start, njobs=4, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=4) for name in data_df['s'].unique(): name = 'theta_'+name ax = pm.plot_posterior(trace[name], figsize=(10,4), color='cornflowerblue') ax.vlines(start[name], ax.get_ylim()[0], ax.get_ylim()[1], color='r') ax.set_title(name) ax.set_xlim([0,1]) ###Output _____no_output_____ ###Markdown Exercise 8.2**Purpose**: Pay attention to the output of ~~`smryMCMC`~~ `summary` ###Code pm.summary(trace) # ESS (Effective Sample Size) pm.effective_n(trace) ###Output _____no_output_____ ###Markdown Exercise 8.4**Purpose**: Explore the prior on a difference of parameters implied from the priors on the individual parameters. ###Code with pm.Model() as model: # beta_params = {'alpha': 1, 'beta': 1} beta_params = {'alpha': 0.5, 'beta': 0.5} theta1 = pm.Beta('theta1', transform=None, **beta_params) theta2 = pm.Beta('theta2', transform=None, **beta_params) pm.Deterministic('delta', theta1 - theta2) step = pm.Slice() # burn in chains trace = pm.sample(500, step=step, njobs=4, progressbar=False) # sample the posterior trace = pm.sample(10000, step=step, start=trace[-1], njobs=4) _ = pm.plot_posterior(trace, figsize=(10,8), color='cornflowerblue') ###Output _____no_output_____
ParteI_Estatistica/Integral_MC_pi_and_Gaussian.ipynb
###Markdown Cálculo de $\pi$ usando integração de Monte CarloNeste exemplo o valor de $\pi$ é calculado com o método de Monte Carlo. Se geramos aleatóriamente pontos num quadrado de lado 2$r$, que tem área $=4 r^2$, e assignamos o valor 1 aos pontos que estão dentro da circumferencia e valor 0 aos que caem fora, a razão entre o número de pontos gerados dentro e fora do círculo é :$$ p= \dfrac{N_{in}}{N_{tot}} \approx \dfrac{\pi r^2}{4 r^2} = \dfrac{\pi}{4}$$Portanto, a estimativa para o valor de $\pi$ é : $$ \pi \approx 4 p = 4 \dfrac{N_{in}}{N_{tot}}$$ ###Code from random import uniform import numpy as np import matplotlib.pyplot as plt from matplotlib import cm def MCPi(Radius = 10.0 , Nb_Data = 1000): '''Função que calcula o valor de pi através do método de Monte Carlo. Retorna duas listas de coordenadas: x e y, a lista com o valor do teste (0 para os pontos fora do circulo, 1 dentro) e finalmente o valor estimado de pi e sua incerteza sigma.''' x_list = [] y_list = [] test = [] Nb_Data_In = 0 for i in range(Nb_Data): x = uniform(-Radius,Radius) y = uniform(-Radius,Radius) x_list.append(x) y_list.append(y) if x**2+y**2 > Radius**2: test.append(0) else: test.append(1) Nb_Data_In = Nb_Data_In + 1 pi = 4.0 * Nb_Data_In / Nb_Data print('pi: {:1.5f}'.format(pi)) p=pi/4.0 sigma = 4.0*np.sqrt(p*(1-p)/Nb_Data) print('sigma: {:1.5f}'.format(sigma)) return x_list,y_list,test,pi,sigma r=10. x_M,y_M,test,pi, sigma =MCPi(r,1000) x_np = np.array(x_M) x_np=np.append(x_np,[-r,r]) x_np.sort() y_circle = (np.sqrt(10**2 -x_np**2)) colormap = plt.get_cmap("cool") plt.figure(figsize=(5,5)) plt.plot(x_np, y_circle ,linewidth=2, color="blue" ) plt.plot(x_np, -y_circle , linewidth=2, color="blue") plt.scatter(x_M,y_M,c=test, cmap=colormap,marker=",",s=1) plt.title("Pi with 1K points = {:1.2f} ± {:1.2f}".format(pi,sigma)) plt.show() ###Output pi: 3.15600 sigma: 0.05161 ###Markdown Ao aumentarmos o número de pontos gerados, a precisão da estimativa melhora. ###Code x_DM,y_DM,test,pi,sigma=MCPi(10.,10000) plt.figure(figsize=(5,5)) plt.plot(x_np, y_circle ,linewidth=2, color="blue" ) plt.plot(x_np, -y_circle , linewidth=2, color="blue") plt.scatter(x_DM,y_DM,c=test, cmap=colormap,marker=",",s=1) plt.title("Pi with 10K points = {:1.3f} ± {:1.3f}".format(pi,sigma)) plt.show() x_G,y_G,test,pi,sigma =MCPi(10.,1000000) plt.figure(figsize=(5,5)) plt.plot(x_np, y_circle ,linewidth=1, color="blue" ) plt.plot(x_np, -y_circle , linewidth=1, color="blue") plt.scatter(x_G,y_G,c=test, cmap=colormap,marker=",",s=1) plt.title("Pi with 1M points ={:1.4f} ± {:1.4f}".format(pi,sigma)) plt.show() from scipy.stats import norm from random import uniform import numpy as np N_in=0 Ntot=100000 x_list=[] y_list=[] test=[] for i in range(Ntot): x=uniform(1,9) y=uniform(0,0.2) x_list.append(x) y_list.append(y) if y < norm.pdf(x,5,2): N_in=N_in+1 if x<7 and x>3: test.append(1) else: test.append(2) else: test.append(0) p=N_in/Ntot sigma = np.sqrt(p*(1-p)/Ntot) Area = N_in/Ntot * 8.0*0.2 print(Area) sigmaA=sigma*8.0*0.2 print(sigmaA) x_p = np.linspace(0,10,1000) #x_p.sort() y_p=norm.pdf(x_p,5,2) import matplotlib.pyplot as plt from matplotlib import cm plt.plot(x_p,y_p) colormap = plt.get_cmap("cool") plt.scatter(x_list,y_list,c=test,cmap=colormap,marker=",",s=1) plt.title("Area com {:d} pontos ={:2.4f} ± {:2.4f} %".format(Ntot,Area*100,sigmaA*100)) ###Output 0.9572480000000001 0.0024804698476216156