title
stringlengths 3
221
| text
stringlengths 17
477k
| parsed
listlengths 0
3.17k
|
---|---|---|
What do you mean by odd loops in C language? | In C programming language, the Control statements are used to repeat a set of statements.
They are as follows −
for loop
while loop
do-while loop
In for loop and while loop, the condition specifies the number of times, in which a loop can be executed.
for (k = 1; k<=5; k++)
Here, the loop will execute until k<=5, when ever k>5 the control come out of the loop.
So, here the for-loop condition specifies the number of times a loop can be executed i.e 5 times the loop executes.
Following is the C program for the for loop −
Live Demo
main( ){
int k;
for (k = 1; k<=5; k++){
printf ("%d",k);
}
}
When the above program is executed, it produces the following output −
1 2 3 4 5
while (k< = 5)
Here, the loop will execute until k<=5, when ever k>5 the control come out of the loop.
So, here also, the while-loop condition specifies the number of times in which a loop can be executed i.e. 5 times the loop executes.
Following is the C program for the while loop −
Live Demo
main( ){
int k;
k = 1;
while (k<=5){
printf ("%d",k);
k++;
}
}
When the above program is executed, it produces the following output −
1 2 3 4 5
Sometimes a user may not know about how many times a loop is to be executed. If we want to execute a loop for unknown number of times, then the concept of odd loops should be implemented. This can be done using for-loop, while-loop or do-while-loops.
Following is the C program for the odd loop −
Live Demo
#include<stdio.h>
int main(){
int number;
number=1;
while(number==1) // odd loop don’t know how many times loop executes{
printf("enter a number:\n");
scanf("%d",&number);
if((number%2)==0)
printf("number is even\n");
else
printf("number is odd\n");
printf("do you want to test any number\n");
printf("if yes then press '1'\n");// if press 1 loop executes again
printf("else press '0'\n");//if press 0 exist from loop
scanf("%d",&number);
}
return 0;
}
When the above program is executed, it produces the following output −
enter a number:
3
number is odd
do you want to test any number
if yes then press '1'
else press '0'
1
enter a number:
4
number is even
do you want to test any number
if yes then press '1'
else press '0'
1
enter a number:
9
number is odd
do you want to test any number
if yes then press '1'
else press '0'
0 | [
{
"code": null,
"e": 1152,
"s": 1062,
"text": "In C programming language, the Control statements are used to repeat a set of statements."
},
{
"code": null,
"e": 1174,
"s": 1152,
"text": "They are as follows −"
},
{
"code": null,
"e": 1183,
"s": 1174,
"text": "for loop"
},
{
"code": null,
"e": 1194,
"s": 1183,
"text": "while loop"
},
{
"code": null,
"e": 1208,
"s": 1194,
"text": "do-while loop"
},
{
"code": null,
"e": 1314,
"s": 1208,
"text": "In for loop and while loop, the condition specifies the number of times, in which a loop can be executed."
},
{
"code": null,
"e": 1337,
"s": 1314,
"text": "for (k = 1; k<=5; k++)"
},
{
"code": null,
"e": 1425,
"s": 1337,
"text": "Here, the loop will execute until k<=5, when ever k>5 the control come out of the loop."
},
{
"code": null,
"e": 1541,
"s": 1425,
"text": "So, here the for-loop condition specifies the number of times a loop can be executed i.e 5 times the loop executes."
},
{
"code": null,
"e": 1587,
"s": 1541,
"text": "Following is the C program for the for loop −"
},
{
"code": null,
"e": 1598,
"s": 1587,
"text": " Live Demo"
},
{
"code": null,
"e": 1674,
"s": 1598,
"text": "main( ){\n int k;\n for (k = 1; k<=5; k++){\n printf (\"%d\",k);\n }\n}"
},
{
"code": null,
"e": 1745,
"s": 1674,
"text": "When the above program is executed, it produces the following output −"
},
{
"code": null,
"e": 1755,
"s": 1745,
"text": "1 2 3 4 5"
},
{
"code": null,
"e": 1770,
"s": 1755,
"text": "while (k< = 5)"
},
{
"code": null,
"e": 1858,
"s": 1770,
"text": "Here, the loop will execute until k<=5, when ever k>5 the control come out of the loop."
},
{
"code": null,
"e": 1992,
"s": 1858,
"text": "So, here also, the while-loop condition specifies the number of times in which a loop can be executed i.e. 5 times the loop executes."
},
{
"code": null,
"e": 2040,
"s": 1992,
"text": "Following is the C program for the while loop −"
},
{
"code": null,
"e": 2051,
"s": 2040,
"text": " Live Demo"
},
{
"code": null,
"e": 2138,
"s": 2051,
"text": "main( ){\n int k;\n k = 1;\n while (k<=5){\n printf (\"%d\",k);\n k++;\n }\n}"
},
{
"code": null,
"e": 2209,
"s": 2138,
"text": "When the above program is executed, it produces the following output −"
},
{
"code": null,
"e": 2219,
"s": 2209,
"text": "1 2 3 4 5"
},
{
"code": null,
"e": 2470,
"s": 2219,
"text": "Sometimes a user may not know about how many times a loop is to be executed. If we want to execute a loop for unknown number of times, then the concept of odd loops should be implemented. This can be done using for-loop, while-loop or do-while-loops."
},
{
"code": null,
"e": 2516,
"s": 2470,
"text": "Following is the C program for the odd loop −"
},
{
"code": null,
"e": 2527,
"s": 2516,
"text": " Live Demo"
},
{
"code": null,
"e": 3073,
"s": 2527,
"text": "#include<stdio.h>\nint main(){\n int number;\n number=1;\n while(number==1) // odd loop don’t know how many times loop executes{\n printf(\"enter a number:\\n\");\n scanf(\"%d\",&number);\n if((number%2)==0)\n printf(\"number is even\\n\");\n else\n printf(\"number is odd\\n\");\n printf(\"do you want to test any number\\n\");\n printf(\"if yes then press '1'\\n\");// if press 1 loop executes again\n printf(\"else press '0'\\n\");//if press 0 exist from loop\n scanf(\"%d\",&number);\n }\n return 0;\n}"
},
{
"code": null,
"e": 3144,
"s": 3073,
"text": "When the above program is executed, it produces the following output −"
},
{
"code": null,
"e": 3451,
"s": 3144,
"text": "enter a number:\n3\nnumber is odd\ndo you want to test any number\nif yes then press '1'\nelse press '0'\n1\nenter a number:\n4\nnumber is even\ndo you want to test any number\nif yes then press '1'\nelse press '0'\n1\nenter a number:\n9\nnumber is odd\ndo you want to test any number\nif yes then press '1'\nelse press '0'\n0"
}
]
|
java.lang.reflect - Field Class | The java.lang.reflect.Field class provides information about, and dynamic access to, a single field of a class or an interface. The reflected field may be a class (static) field or an instance field. A Field permits widening conversions to occur during a get or set access operation, but throws an IllegalArgumentException if a narrowing conversion would occur.
Following is the declaration for java.lang.reflect.Field class −
public final class Field
extends AccessibleObject
implements Member
Compares this Field against the specified object.
Returns the value of the field represented by this Field, on the specified object.
Returns this element's annotation for the specified type if such an annotation is present, else null.
Gets the value of a static or instance boolean field.
Gets the value of a static or instance byte field.
Gets the value of a static or instance field of type char or of another primitive type convertible to type char via a widening conversion.
Returns all annotations that are directly present on this element.
Returns the Class object representing the class or interface that declares the field represented by this Field object.
Gets the value of a static or instance field of type double or of another primitive type convertible to type double via a widening conversion.
Gets the value of a static or instance field of type float or of another primitive type convertible to type float via a widening conversion.
Returns a Type object that represents the declared type for the field represented by this Field object.
Gets the value of a static or instance field of type int or of another primitive type convertible to type int via a widening conversion.
Gets the value of a static or instance field of type long or of another primitive type convertible to type long via a widening conversion.
Returns the Java language modifiers for the field represented by this Field object, as an integer.
RReturns the name of the field represented by this Field object.
Gets the value of a static or instance field of type short or of another primitive type convertible to type short via a widening conversion.
Returns a Class object that identifies the declared type for the field represented by this Field object.
Returns a hashcode for this Field.
Returns true if this field represents an element of an enumerated type; returns false otherwise.
This method returns true if the field represented by the current object is synthetic, else it returns false.
Sets the value of a field as a boolean on the specified object.
Sets the value of a field as a byte on the specified object.
Sets the value of a field as a char on the specified object.
Sets the value of a field as a double on the specified object.
Sets the value of a field as a float on the specified object.
Sets the value of a field as an int on the specified object.
Sets the value of a field as a long on the specified object.
Sets the value of a field as a short on the specified object.
Returns a string describing this Field, including its generic type.
Returns a string describing this Field.
This class inherits methods from the following classes −
java.lang.reflect.AccessibleObject
java.lang.Object
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 1816,
"s": 1454,
"text": "The java.lang.reflect.Field class provides information about, and dynamic access to, a single field of a class or an interface. The reflected field may be a class (static) field or an instance field. A Field permits widening conversions to occur during a get or set access operation, but throws an IllegalArgumentException if a narrowing conversion would occur."
},
{
"code": null,
"e": 1881,
"s": 1816,
"text": "Following is the declaration for java.lang.reflect.Field class −"
},
{
"code": null,
"e": 1959,
"s": 1881,
"text": "public final class Field\n extends AccessibleObject\n implements Member\n"
},
{
"code": null,
"e": 2009,
"s": 1959,
"text": "Compares this Field against the specified object."
},
{
"code": null,
"e": 2092,
"s": 2009,
"text": "Returns the value of the field represented by this Field, on the specified object."
},
{
"code": null,
"e": 2194,
"s": 2092,
"text": "Returns this element's annotation for the specified type if such an annotation is present, else null."
},
{
"code": null,
"e": 2248,
"s": 2194,
"text": "Gets the value of a static or instance boolean field."
},
{
"code": null,
"e": 2299,
"s": 2248,
"text": "Gets the value of a static or instance byte field."
},
{
"code": null,
"e": 2438,
"s": 2299,
"text": "Gets the value of a static or instance field of type char or of another primitive type convertible to type char via a widening conversion."
},
{
"code": null,
"e": 2505,
"s": 2438,
"text": "Returns all annotations that are directly present on this element."
},
{
"code": null,
"e": 2624,
"s": 2505,
"text": "Returns the Class object representing the class or interface that declares the field represented by this Field object."
},
{
"code": null,
"e": 2767,
"s": 2624,
"text": "Gets the value of a static or instance field of type double or of another primitive type convertible to type double via a widening conversion."
},
{
"code": null,
"e": 2908,
"s": 2767,
"text": "Gets the value of a static or instance field of type float or of another primitive type convertible to type float via a widening conversion."
},
{
"code": null,
"e": 3012,
"s": 2908,
"text": "Returns a Type object that represents the declared type for the field represented by this Field object."
},
{
"code": null,
"e": 3149,
"s": 3012,
"text": "Gets the value of a static or instance field of type int or of another primitive type convertible to type int via a widening conversion."
},
{
"code": null,
"e": 3288,
"s": 3149,
"text": "Gets the value of a static or instance field of type long or of another primitive type convertible to type long via a widening conversion."
},
{
"code": null,
"e": 3387,
"s": 3288,
"text": "Returns the Java language modifiers for the field represented by this Field object, as an integer."
},
{
"code": null,
"e": 3452,
"s": 3387,
"text": "RReturns the name of the field represented by this Field object."
},
{
"code": null,
"e": 3593,
"s": 3452,
"text": "Gets the value of a static or instance field of type short or of another primitive type convertible to type short via a widening conversion."
},
{
"code": null,
"e": 3698,
"s": 3593,
"text": "Returns a Class object that identifies the declared type for the field represented by this Field object."
},
{
"code": null,
"e": 3733,
"s": 3698,
"text": "Returns a hashcode for this Field."
},
{
"code": null,
"e": 3830,
"s": 3733,
"text": "Returns true if this field represents an element of an enumerated type; returns false otherwise."
},
{
"code": null,
"e": 3939,
"s": 3830,
"text": "This method returns true if the field represented by the current object is synthetic, else it returns false."
},
{
"code": null,
"e": 4003,
"s": 3939,
"text": "Sets the value of a field as a boolean on the specified object."
},
{
"code": null,
"e": 4064,
"s": 4003,
"text": "Sets the value of a field as a byte on the specified object."
},
{
"code": null,
"e": 4125,
"s": 4064,
"text": "Sets the value of a field as a char on the specified object."
},
{
"code": null,
"e": 4188,
"s": 4125,
"text": "Sets the value of a field as a double on the specified object."
},
{
"code": null,
"e": 4250,
"s": 4188,
"text": "Sets the value of a field as a float on the specified object."
},
{
"code": null,
"e": 4311,
"s": 4250,
"text": "Sets the value of a field as an int on the specified object."
},
{
"code": null,
"e": 4372,
"s": 4311,
"text": "Sets the value of a field as a long on the specified object."
},
{
"code": null,
"e": 4434,
"s": 4372,
"text": "Sets the value of a field as a short on the specified object."
},
{
"code": null,
"e": 4502,
"s": 4434,
"text": "Returns a string describing this Field, including its generic type."
},
{
"code": null,
"e": 4542,
"s": 4502,
"text": "Returns a string describing this Field."
},
{
"code": null,
"e": 4599,
"s": 4542,
"text": "This class inherits methods from the following classes −"
},
{
"code": null,
"e": 4634,
"s": 4599,
"text": "java.lang.reflect.AccessibleObject"
},
{
"code": null,
"e": 4651,
"s": 4634,
"text": "java.lang.Object"
},
{
"code": null,
"e": 4658,
"s": 4651,
"text": " Print"
},
{
"code": null,
"e": 4669,
"s": 4658,
"text": " Add Notes"
}
]
|
Style Pandas Dataframe Like a Master | by Eyal Trabelsi | Towards Data Science | The basic idea behind styling is to leverage visual aids like color and format, in order to communicate insight more efficiently.
One of the most common ways of visualizing a dataset is using a table. Tables allow your data consumers to gather insight by reading the underlying data. For example, you may find yourself in scenarios where you want to provide your consumers access to the underlying data using a table.
In this article, you’ll learn how to add visualization to a pandas dataframe by using pandas styling and options/settings. The Pandas documentation itself is pretty comprehensive, but if you’re looking for a slightly friendlier introduction, I think you came to the right place. I will use kaggle’ “San Fransisco Salaries dataset” as an example, as always we start by loading the dataset using pandas.
Pandas code to load the dataset and some basic data munging:
df = pd.read_csv('Salaries.csv')\ .replace('Not Provided', np.nan)\ .astype({"BasePay":float, "OtherPay":float})
Pandas have an options system that lets you customize some aspects of its behavior, here we will focus on display-related options. You may have experienced the following issues when using when you rendered the data frame:
There’re too many columns/rows in the dataframe and some columns/rows in the middle are omitted on display. For example, if one would like to show at most 7 rows and at most 7 columns one would:
Columns containing long texts get truncated and columns containing floats display too many / too few digits only on display.
And a lot more use cases.
As we mentioned pandas also have a styling system that lets you customize some aspects of its the rendered dataframe, using CSS. You write a “style functions” that take scalars, DataFrame or Series, and return like-indexed DataFrames or Series with CSS "attribute: value" pairs for the values.
The most straightforward styling example is using currency symbols when working with currency values. For instance, in our data some of the columns (BasePay, OtherPay, TotalPay, and TotalPayBenefit) are currency values, so we would like to add dollar signs and commas. This can be done using the style.formatfunction:
Pandas code to render dataframe with formating of currency columns
df.head(10).style.format({"BasePay": "${:20,.0f}", "OtherPay": "${:20,.0f}", "TotalPay": "${:20,.0f}", "TotalPayBenefits":"${:20,.0f}"})
These styling functions can be incrementally passed to the Styler which collects the styles before rendering, thus if we want to add a function that format the EmployeeName and companyTitle as well, this can be done using another style.formatfunction:
Pandas code to render dataframe that also formats some columns to lower case
df.head(10).style.format({"BasePay": "${:20,.0f}", "OtherPay": "${:20,.0f}", "TotalPay": "${:20,.0f}", "TotalPayBenefits":"${:20,.0f}"})\ .format({"JobTitle": lambda x:x.lower(), "EmployeeName": lambda x:x.lower()})
Seems a lot better now, but let’s take it a step forward the Index here doesn’t add any real information, we can use the hide_index function to suppresses the display of the index using the following code snippet:
Pandas code to render the formatted dataframe without the index
df.head(10).style.format({"BasePay": "${:20,.0f}", "OtherPay": "${:20,.0f}", "TotalPay": "${:20,.0f}", "TotalPayBenefits":"${:20,.0f}"})\ .format({"JobTitle": lambda x:x.lower(), "EmployeeName": lambda x:x.lower()})\ .hide_index()
Having this type of flexibility when it comes to rendering our dataset is pretty powerful and useful, but that simply put NOT ENOUGH. You can apply conditional formatting, the visual styling of a DataFrame depending on the actual data within. The simplest example is the builtin functions in the style API, for example, one can highlight the highest number in green and the lowest number in color:
Pandas code that also highlights minimum/maximum values
df.head(10).style.format({"BasePay": "${:20,.0f}", "OtherPay": "${:20,.0f}", "TotalPay": "${:20,.0f}", "TotalPayBenefits":"${:20,.0f}"})\ .format({"JobTitle": lambda x:x.lower(), "EmployeeName": lambda x:x.lower()})\ .hide_index()\ .highlight_max(color='lightgreen')\ .highlight_min(color='#cd4f39')
Another useful function is background_gradientwhich can highlight the range of values in a column. In addition, the cmap argument allows us to choose a color palette for the gradient. The matplotlib documentation lists all the available options (seaborn has some options as well).
Pandas code that also adds a background gradient
df.head(10).style.format({"BasePay": "${:20,.0f}", "OtherPay": "${:20,.0f}", "TotalPay": "${:20,.0f}", "TotalPayBenefits":"${:20,.0f}"})\ .format({"JobTitle": lambda x:x.lower(), "EmployeeName": lambda x:x.lower()})\ .hide_index()\ .background_gradient(cmap='Blues')
One can even use styler.set_properties when the style doesn’t actually depend on the values. In this example, we will render our dataset with a black background and with green color for the text itself.
Pandas code to render the formatted dataframe in the same way for each cell.
df.head(10).style.set_properties(**{'background-color': 'black', 'color': 'lawngreen', 'border-color': 'white'})
But if we are honest, most of the time we would like to change the visualization attributes depending on the values and what we want to emphasis, we can use one of the following to help reach our goal:
Styler.applymap(func) for element-wise styles.
Styler.apply(func, axis=0) for column-wise styles.
Styler.apply(func, axis=1) for row-wise styles.
Styler.apply(func, axis=None) for tablewise styles.
The first example is Highlighting all negative values in a dataframe.
Pandas code to render the formatted dataframe with changed font color if the value is a string
df.head(10).style.format({"BasePay": "${:20,.0f}", "OtherPay": "${:20,.0f}", "TotalPay": "${:20,.0f}", "TotalPayBenefits":"${:20,.0f}"})\ .format({"JobTitle": lambda x:x.lower(), "EmployeeName": lambda x:x.lower()})\ .hide_index()\ .applymap(lambda x: f”color: {‘red’ if isinstance(x,str) else ‘black’}”)
At last the pandas styling API also supports more advanced styling like drawing bar charts within the columns, we will introduce here the bar function and some of the parameters to configure the way it is displayed in the table:
df.head(10).style.format({"BasePay": "${:20,.0f}", "OtherPay": "${:20,.0f}", "TotalPay": "${:20,.0f}", "TotalPayBenefits":"${:20,.0f}"})\ .format({"JobTitle": lambda x:x.lower(), "EmployeeName": lambda x:x.lower()})\ .hide_index()\ .bar(subset=["OtherPay",], color='lightgreen')\ .bar(subset=["BasePay"], color='#ee1f5f')\ .bar(subset=["TotalPay"], color='#FFA07A')
The pandas style API and the options API are really useful when you get towards the end of your data analysis and need to present the results to others. There are a few tricky components to string formatting so hopefully, the items highlighted here are useful to you. | [
{
"code": null,
"e": 176,
"s": 46,
"text": "The basic idea behind styling is to leverage visual aids like color and format, in order to communicate insight more efficiently."
},
{
"code": null,
"e": 464,
"s": 176,
"text": "One of the most common ways of visualizing a dataset is using a table. Tables allow your data consumers to gather insight by reading the underlying data. For example, you may find yourself in scenarios where you want to provide your consumers access to the underlying data using a table."
},
{
"code": null,
"e": 866,
"s": 464,
"text": "In this article, you’ll learn how to add visualization to a pandas dataframe by using pandas styling and options/settings. The Pandas documentation itself is pretty comprehensive, but if you’re looking for a slightly friendlier introduction, I think you came to the right place. I will use kaggle’ “San Fransisco Salaries dataset” as an example, as always we start by loading the dataset using pandas."
},
{
"code": null,
"e": 927,
"s": 866,
"text": "Pandas code to load the dataset and some basic data munging:"
},
{
"code": null,
"e": 1052,
"s": 927,
"text": "df = pd.read_csv('Salaries.csv')\\ .replace('Not Provided', np.nan)\\ .astype({\"BasePay\":float, \"OtherPay\":float})"
},
{
"code": null,
"e": 1274,
"s": 1052,
"text": "Pandas have an options system that lets you customize some aspects of its behavior, here we will focus on display-related options. You may have experienced the following issues when using when you rendered the data frame:"
},
{
"code": null,
"e": 1469,
"s": 1274,
"text": "There’re too many columns/rows in the dataframe and some columns/rows in the middle are omitted on display. For example, if one would like to show at most 7 rows and at most 7 columns one would:"
},
{
"code": null,
"e": 1594,
"s": 1469,
"text": "Columns containing long texts get truncated and columns containing floats display too many / too few digits only on display."
},
{
"code": null,
"e": 1620,
"s": 1594,
"text": "And a lot more use cases."
},
{
"code": null,
"e": 1914,
"s": 1620,
"text": "As we mentioned pandas also have a styling system that lets you customize some aspects of its the rendered dataframe, using CSS. You write a “style functions” that take scalars, DataFrame or Series, and return like-indexed DataFrames or Series with CSS \"attribute: value\" pairs for the values."
},
{
"code": null,
"e": 2232,
"s": 1914,
"text": "The most straightforward styling example is using currency symbols when working with currency values. For instance, in our data some of the columns (BasePay, OtherPay, TotalPay, and TotalPayBenefit) are currency values, so we would like to add dollar signs and commas. This can be done using the style.formatfunction:"
},
{
"code": null,
"e": 2299,
"s": 2232,
"text": "Pandas code to render dataframe with formating of currency columns"
},
{
"code": null,
"e": 2513,
"s": 2299,
"text": "df.head(10).style.format({\"BasePay\": \"${:20,.0f}\", \"OtherPay\": \"${:20,.0f}\", \"TotalPay\": \"${:20,.0f}\", \"TotalPayBenefits\":\"${:20,.0f}\"})"
},
{
"code": null,
"e": 2765,
"s": 2513,
"text": "These styling functions can be incrementally passed to the Styler which collects the styles before rendering, thus if we want to add a function that format the EmployeeName and companyTitle as well, this can be done using another style.formatfunction:"
},
{
"code": null,
"e": 2842,
"s": 2765,
"text": "Pandas code to render dataframe that also formats some columns to lower case"
},
{
"code": null,
"e": 3176,
"s": 2842,
"text": "df.head(10).style.format({\"BasePay\": \"${:20,.0f}\", \"OtherPay\": \"${:20,.0f}\", \"TotalPay\": \"${:20,.0f}\", \"TotalPayBenefits\":\"${:20,.0f}\"})\\ .format({\"JobTitle\": lambda x:x.lower(), \"EmployeeName\": lambda x:x.lower()})"
},
{
"code": null,
"e": 3390,
"s": 3176,
"text": "Seems a lot better now, but let’s take it a step forward the Index here doesn’t add any real information, we can use the hide_index function to suppresses the display of the index using the following code snippet:"
},
{
"code": null,
"e": 3454,
"s": 3390,
"text": "Pandas code to render the formatted dataframe without the index"
},
{
"code": null,
"e": 3819,
"s": 3454,
"text": "df.head(10).style.format({\"BasePay\": \"${:20,.0f}\", \"OtherPay\": \"${:20,.0f}\", \"TotalPay\": \"${:20,.0f}\", \"TotalPayBenefits\":\"${:20,.0f}\"})\\ .format({\"JobTitle\": lambda x:x.lower(), \"EmployeeName\": lambda x:x.lower()})\\ .hide_index()"
},
{
"code": null,
"e": 4217,
"s": 3819,
"text": "Having this type of flexibility when it comes to rendering our dataset is pretty powerful and useful, but that simply put NOT ENOUGH. You can apply conditional formatting, the visual styling of a DataFrame depending on the actual data within. The simplest example is the builtin functions in the style API, for example, one can highlight the highest number in green and the lowest number in color:"
},
{
"code": null,
"e": 4273,
"s": 4217,
"text": "Pandas code that also highlights minimum/maximum values"
},
{
"code": null,
"e": 4765,
"s": 4273,
"text": "df.head(10).style.format({\"BasePay\": \"${:20,.0f}\", \"OtherPay\": \"${:20,.0f}\", \"TotalPay\": \"${:20,.0f}\", \"TotalPayBenefits\":\"${:20,.0f}\"})\\ .format({\"JobTitle\": lambda x:x.lower(), \"EmployeeName\": lambda x:x.lower()})\\ .hide_index()\\ .highlight_max(color='lightgreen')\\ .highlight_min(color='#cd4f39')"
},
{
"code": null,
"e": 5046,
"s": 4765,
"text": "Another useful function is background_gradientwhich can highlight the range of values in a column. In addition, the cmap argument allows us to choose a color palette for the gradient. The matplotlib documentation lists all the available options (seaborn has some options as well)."
},
{
"code": null,
"e": 5095,
"s": 5046,
"text": "Pandas code that also adds a background gradient"
},
{
"code": null,
"e": 5512,
"s": 5095,
"text": "df.head(10).style.format({\"BasePay\": \"${:20,.0f}\", \"OtherPay\": \"${:20,.0f}\", \"TotalPay\": \"${:20,.0f}\", \"TotalPayBenefits\":\"${:20,.0f}\"})\\ .format({\"JobTitle\": lambda x:x.lower(), \"EmployeeName\": lambda x:x.lower()})\\ .hide_index()\\ .background_gradient(cmap='Blues')"
},
{
"code": null,
"e": 5715,
"s": 5512,
"text": "One can even use styler.set_properties when the style doesn’t actually depend on the values. In this example, we will render our dataset with a black background and with green color for the text itself."
},
{
"code": null,
"e": 5792,
"s": 5715,
"text": "Pandas code to render the formatted dataframe in the same way for each cell."
},
{
"code": null,
"e": 6049,
"s": 5792,
"text": "df.head(10).style.set_properties(**{'background-color': 'black', 'color': 'lawngreen', 'border-color': 'white'})"
},
{
"code": null,
"e": 6251,
"s": 6049,
"text": "But if we are honest, most of the time we would like to change the visualization attributes depending on the values and what we want to emphasis, we can use one of the following to help reach our goal:"
},
{
"code": null,
"e": 6298,
"s": 6251,
"text": "Styler.applymap(func) for element-wise styles."
},
{
"code": null,
"e": 6349,
"s": 6298,
"text": "Styler.apply(func, axis=0) for column-wise styles."
},
{
"code": null,
"e": 6397,
"s": 6349,
"text": "Styler.apply(func, axis=1) for row-wise styles."
},
{
"code": null,
"e": 6449,
"s": 6397,
"text": "Styler.apply(func, axis=None) for tablewise styles."
},
{
"code": null,
"e": 6519,
"s": 6449,
"text": "The first example is Highlighting all negative values in a dataframe."
},
{
"code": null,
"e": 6614,
"s": 6519,
"text": "Pandas code to render the formatted dataframe with changed font color if the value is a string"
},
{
"code": null,
"e": 7069,
"s": 6614,
"text": "df.head(10).style.format({\"BasePay\": \"${:20,.0f}\", \"OtherPay\": \"${:20,.0f}\", \"TotalPay\": \"${:20,.0f}\", \"TotalPayBenefits\":\"${:20,.0f}\"})\\ .format({\"JobTitle\": lambda x:x.lower(), \"EmployeeName\": lambda x:x.lower()})\\ .hide_index()\\ .applymap(lambda x: f”color: {‘red’ if isinstance(x,str) else ‘black’}”)"
},
{
"code": null,
"e": 7298,
"s": 7069,
"text": "At last the pandas styling API also supports more advanced styling like drawing bar charts within the columns, we will introduce here the bar function and some of the parameters to configure the way it is displayed in the table:"
},
{
"code": null,
"e": 7846,
"s": 7298,
"text": "df.head(10).style.format({\"BasePay\": \"${:20,.0f}\", \"OtherPay\": \"${:20,.0f}\", \"TotalPay\": \"${:20,.0f}\", \"TotalPayBenefits\":\"${:20,.0f}\"})\\ .format({\"JobTitle\": lambda x:x.lower(), \"EmployeeName\": lambda x:x.lower()})\\ .hide_index()\\ .bar(subset=[\"OtherPay\",], color='lightgreen')\\ .bar(subset=[\"BasePay\"], color='#ee1f5f')\\ .bar(subset=[\"TotalPay\"], color='#FFA07A')"
}
]
|
time.Tick() Function in Golang With Examples - GeeksforGeeks | 21 Apr, 2020
In Go language, time packages supplies functionality for determining as well as viewing time. The Tick() function in Go language is a utility wrapper for NewTicker function. It only allows access to the ticking channel. In addition, Tick is beneficial for clients who don’t require to shut down the Ticker. And Tick method returns nil if the duration stated is less than or equal to 0. Moreover, this function is defined under the time package. Here, you need to import the “time” package in order to use these functions.
Syntax:
func Tick(d Duration) <-chan Time
Here, d is the duration of time for ticker to tick, and chan is the ticking channel.
Note: Tickers are used to do something frequently at regular intervals of the stated time.
Return Value: It returns current date and actual time after regular interval of time. And returns nil if d <= 0.
Example 1:
// Golang program to illustrate the usage of// Tick() function // Including main packagepackage main // Importing fmt and timeimport ( "fmt" "time") // Defining UTCtimefunc UTCtime() string { return ""} // Main functionfunc main() { // Calling Tick method // using range keyword for tick := range time.Tick(3 * time.Second) { // Prints UTC time and date fmt.Println(tick, UTCtime()) }}
Output:
2020-04-02 03:16:22.27131713 +0000 UTC m=+3.000249815
2020-04-02 03:16:25.271334601 +0000 UTC m=+6.000267330
2020-04-02 03:16:28.271312516 +0000 UTC m=+9.000245191
2020-04-02 03:16:31.271369788 +0000 UTC m=+12.000302595
2020-04-02 03:16:34.271309254 +0000 UTC m=+15.000241952
2020-04-02 03:16:37.271324182 +0000 UTC m=+18.000256858
2020-04-02 03:16:40.271322789 +0000 UTC m=+21.000255504
2020-04-02 03:16:43.271295568 +0000 UTC m=+24.000228305......so on
Here, we have used the range keyword in order to iterate over the channel. Here, the current date and time are returned after a regular interval of time. So, the output is different at different runs. And here the loop doesn’t stops until you terminate it.
Example 2:
// Golang program to illustrate the usage of// Tick() function// Including main packagepackage main // Importing fmt and timeimport ( "fmt" "time") // Defining UTCtimefunc UTCtime() string { return ""} // Main functionfunc main() { // Calling Tick method using range // keyword for tick := range time.Tick(-1 * time.Second) { // Prints UTC time and date fmt.Println(tick, UTCtime()) }}
Output:
fatal error: all goroutines are asleep - deadlock!
goroutine 1 [chan receive (nil chan)]:
main.main()
/home/runner/SociableInsubstantialCommunication/main.go:23 +0x149
Here, in the above code, the duration stated is negative so, nil is returned and the error occurs.
GoLang-time
Go Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Command Line Arguments in Golang
How to pass a Slice to Function in Golang?
Class and Object in Golang
time.AfterFunc() Function in Golang With Examples
Golang program that uses switch, multiple value cases
strings.Fields() Function in Golang With Examples
Anonymous function in Go Language
Hello World in Golang
How to Generate a Array of Unique Random Numbers in Golang?
Named Return Parameters in Golang | [
{
"code": null,
"e": 24069,
"s": 24041,
"text": "\n21 Apr, 2020"
},
{
"code": null,
"e": 24591,
"s": 24069,
"text": "In Go language, time packages supplies functionality for determining as well as viewing time. The Tick() function in Go language is a utility wrapper for NewTicker function. It only allows access to the ticking channel. In addition, Tick is beneficial for clients who don’t require to shut down the Ticker. And Tick method returns nil if the duration stated is less than or equal to 0. Moreover, this function is defined under the time package. Here, you need to import the “time” package in order to use these functions."
},
{
"code": null,
"e": 24599,
"s": 24591,
"text": "Syntax:"
},
{
"code": null,
"e": 24634,
"s": 24599,
"text": "func Tick(d Duration) <-chan Time\n"
},
{
"code": null,
"e": 24719,
"s": 24634,
"text": "Here, d is the duration of time for ticker to tick, and chan is the ticking channel."
},
{
"code": null,
"e": 24810,
"s": 24719,
"text": "Note: Tickers are used to do something frequently at regular intervals of the stated time."
},
{
"code": null,
"e": 24923,
"s": 24810,
"text": "Return Value: It returns current date and actual time after regular interval of time. And returns nil if d <= 0."
},
{
"code": null,
"e": 24934,
"s": 24923,
"text": "Example 1:"
},
{
"code": "// Golang program to illustrate the usage of// Tick() function // Including main packagepackage main // Importing fmt and timeimport ( \"fmt\" \"time\") // Defining UTCtimefunc UTCtime() string { return \"\"} // Main functionfunc main() { // Calling Tick method // using range keyword for tick := range time.Tick(3 * time.Second) { // Prints UTC time and date fmt.Println(tick, UTCtime()) }}",
"e": 25363,
"s": 24934,
"text": null
},
{
"code": null,
"e": 25371,
"s": 25363,
"text": "Output:"
},
{
"code": null,
"e": 25834,
"s": 25371,
"text": "2020-04-02 03:16:22.27131713 +0000 UTC m=+3.000249815 \n2020-04-02 03:16:25.271334601 +0000 UTC m=+6.000267330 \n2020-04-02 03:16:28.271312516 +0000 UTC m=+9.000245191 \n2020-04-02 03:16:31.271369788 +0000 UTC m=+12.000302595 \n2020-04-02 03:16:34.271309254 +0000 UTC m=+15.000241952 \n2020-04-02 03:16:37.271324182 +0000 UTC m=+18.000256858 \n2020-04-02 03:16:40.271322789 +0000 UTC m=+21.000255504 \n2020-04-02 03:16:43.271295568 +0000 UTC m=+24.000228305......so on\n"
},
{
"code": null,
"e": 26091,
"s": 25834,
"text": "Here, we have used the range keyword in order to iterate over the channel. Here, the current date and time are returned after a regular interval of time. So, the output is different at different runs. And here the loop doesn’t stops until you terminate it."
},
{
"code": null,
"e": 26102,
"s": 26091,
"text": "Example 2:"
},
{
"code": "// Golang program to illustrate the usage of// Tick() function// Including main packagepackage main // Importing fmt and timeimport ( \"fmt\" \"time\") // Defining UTCtimefunc UTCtime() string { return \"\"} // Main functionfunc main() { // Calling Tick method using range // keyword for tick := range time.Tick(-1 * time.Second) { // Prints UTC time and date fmt.Println(tick, UTCtime()) }}",
"e": 26530,
"s": 26102,
"text": null
},
{
"code": null,
"e": 26538,
"s": 26530,
"text": "Output:"
},
{
"code": null,
"e": 26712,
"s": 26538,
"text": "fatal error: all goroutines are asleep - deadlock!\n\ngoroutine 1 [chan receive (nil chan)]:\nmain.main()\n /home/runner/SociableInsubstantialCommunication/main.go:23 +0x149\n"
},
{
"code": null,
"e": 26811,
"s": 26712,
"text": "Here, in the above code, the duration stated is negative so, nil is returned and the error occurs."
},
{
"code": null,
"e": 26823,
"s": 26811,
"text": "GoLang-time"
},
{
"code": null,
"e": 26835,
"s": 26823,
"text": "Go Language"
},
{
"code": null,
"e": 26933,
"s": 26835,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26942,
"s": 26933,
"text": "Comments"
},
{
"code": null,
"e": 26955,
"s": 26942,
"text": "Old Comments"
},
{
"code": null,
"e": 26988,
"s": 26955,
"text": "Command Line Arguments in Golang"
},
{
"code": null,
"e": 27031,
"s": 26988,
"text": "How to pass a Slice to Function in Golang?"
},
{
"code": null,
"e": 27058,
"s": 27031,
"text": "Class and Object in Golang"
},
{
"code": null,
"e": 27108,
"s": 27058,
"text": "time.AfterFunc() Function in Golang With Examples"
},
{
"code": null,
"e": 27162,
"s": 27108,
"text": "Golang program that uses switch, multiple value cases"
},
{
"code": null,
"e": 27212,
"s": 27162,
"text": "strings.Fields() Function in Golang With Examples"
},
{
"code": null,
"e": 27246,
"s": 27212,
"text": "Anonymous function in Go Language"
},
{
"code": null,
"e": 27268,
"s": 27246,
"text": "Hello World in Golang"
},
{
"code": null,
"e": 27328,
"s": 27268,
"text": "How to Generate a Array of Unique Random Numbers in Golang?"
}
]
|
Convert given Array of Strings to a Camel Case format sentence - GeeksforGeeks | 12 Dec, 2021
Given an array arr[] of N strings, each containing words with upper or lower case English alphabets, the task is to create a sentence in a Camel Case format using them.
Example:
Input: arr[] = {“AnNiruddHA Routh”, “LOVES”, “to”, “COdE everyDAY””}Output: AniruddhaRouth Loves To Code EverydayExplanation: The above sentence is the merged sentence of all the words in the given order in Camel Case.
Input: arr[] = {“I”, “GOT”, “iNtErN”, “at geekSfoRgeekS”}Output: I Got Intern At Geeksforgeeks
Approach: The given problem can be solved by traversing each word one by one and inserting every character in the Camel case format into a resultant string, as shown in steps below:
Create an empty string to store the resultant string
Traverse the array of strings word by word, and for each word: If the character is at first index, insert the current character in upper case formatElse insert all other characters in lower case formatWhenever a word ends, add a space to the string, except for the last word (insert a full stop in this case).
If the character is at first index, insert the current character in upper case format
Else insert all other characters in lower case format
Whenever a word ends, add a space to the string, except for the last word (insert a full stop in this case).
Return the resultant string at the end.
Below is the implementation of the above approach:
C++
Java
Python3
C#
Javascript
// C++ program of the above approach#include <bits/stdc++.h>using namespace std; // Function to convert the given array// of strings into a sentence in the// Camel Case formattingstring convertCase(vector<string> arr, int N){ // Stores the final sentence string ans = ""; // Loop to iterate over the array for (int i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.size() > 0) { ans += ' '; } // Insert the first character of arr[i] ans += toupper(arr[i][0]); // Loop to iterate over current array element for (int j = 1; j < arr[i].size(); j++) { // If a space is found, // the next character // should be in upper case if (arr[i][j] == ' ') { ans += ' '; ans += toupper(arr[i][j + 1]); j++; } // Otherwise the characters // must be in the lower case else { ans += tolower(arr[i][j]); } } } // Return Answer return ans;} // Driver programint main(){ vector<string> arr{ "AnNiruddHA Routh", "LOVES", "to", "COdE everyDAY" }; int N = arr.size(); cout << convertCase(arr, N); return 0;}
// Java code for the above approachimport java.util.*; class GFG{ // Function to convert the given array// of strings into a sentence in the// Camel Case formattingstatic String convertCase(String[] arr, int N){ // Stores the final sentence String ans = ""; // Loop to iterate over the array for(int i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.length() > 0) { ans += ' '; } // Insert the first character of arr[i] ans += Character.toUpperCase(arr[i].charAt(0)); // Loop to iterate over current array element for(int j = 1; j < arr[i].length(); j++) { // If a space is found, // the next character // should be in upper case if (arr[i].charAt(j) == ' ') { ans += ' '; char t = Character.toUpperCase( arr[i].charAt(j + 1)); ans += t; j++; } // Otherwise the characters // must be in the lower case else { ans += Character.toLowerCase( arr[i].charAt(j)); } } } // Return Answer return ans;} // Driver codepublic static void main(String[] args){ String[] arr = { "AnNiruddHA Routh", "LOVES", "to", "COdE everyDAY" }; int N = arr.length; System.out.println(convertCase(arr, N));}} // This code is contributed by Potta Lokesh
# Python3 program of the above approach # Function to convert the given array# of strings into a sentence in the# Camel Case formattingdef convertCase(arr, N) : # Stores the final sentence ans = ""; # Loop to iterate over the array for i in range(N) : # If the current word is not the 1st # word, insert space if (len(ans) > 0) : ans += ' '; # Insert the first character of arr[i] ans += arr[i][0].upper(); j = 1 # Loop to iterate over current array element while j < len(arr[i]) : # If a space is found, # the next character # should be in upper case if (arr[i][j] == ' ') : ans += ' '; ans += arr[i][j + 1].upper(); j += 1; # Otherwise the characters # must be in the lower case else : ans += arr[i][j].lower(); j += 1; # Return Answer return ans; # Driver programif __name__ == "__main__" : arr = ["AnNiruddHA Routh","LOVES", "to","COdE everyDAY"] N = len(arr); print(convertCase(arr, N)); # This code is contributed by AnkThon
// C# code for the above approachusing System; class GFG{ // Function to convert the given array // of strings into a sentence in the // Camel Case formatting static String convertCase(String[] arr, int N) { // Stores the final sentence String ans = ""; // Loop to iterate over the array for (int i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.Length > 0) { ans += ' '; } // Insert the first character of arr[i] ans += char.ToUpper(arr[i][0]); // Loop to iterate over current array element for (int j = 1; j < arr[i].Length; j++) { // If a space is found, // the next character // should be in upper case if (arr[i][j] == ' ') { ans += ' '; char t = char.ToUpper(arr[i][j + 1]); ans += t; j++; } // Otherwise the characters // must be in the lower case else { ans += char.ToLower(arr[i][j]); } } } // Return Answer return ans; } // Driver code public static void Main() { String[] arr = { "AnNiruddHA Routh", "LOVES", "to", "COdE everyDAY" }; int N = arr.Length; Console.Write(convertCase(arr, N)); }} // This code is contributed by gfgking
<script> // JavaScript program of the above approach // Function to convert the given array // of strings into a sentence in the // Camel Case formatting const convertCase = (arr, N) => { // Stores the final sentence let ans = ""; // Loop to iterate over the array for (let i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.length > 0) { ans += ' '; } // Insert the first character of arr[i] ans += (arr[i][0]).toUpperCase(); // Loop to iterate over current array element for (let j = 1; j < arr[i].length; j++) { // If a space is found, // the next character // should be in upper case if (arr[i][j] == ' ') { ans += ' '; ans += (arr[i][j + 1]).toUpperCase(); j++; } // Otherwise the characters // must be in the lower case else { ans += (arr[i][j]).toLowerCase(); } } } // Return Answer return ans; } // Driver program let arr = [ "AnNiruddHA Routh", "LOVES", "to", "COdE everyDAY" ]; let N = arr.length; document.write(convertCase(arr, N)); // This code is contributed by rakeshsahni </script>
Anniruddha Routh Loves To Code Everyday
Time Complexity: O(N*M), where M is the average length of a string over all the given stringsAuxiliary Space: O(N*M)
rakeshsahni
ankthon
lokeshpotta20
gfgking
sagartomar9927
JP Morgan
Arrays
Greedy
Strings
Arrays
Strings
Greedy
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Window Sliding Technique
Trapping Rain Water
Building Heap from Array
Reversal algorithm for array rotation
Program to find sum of elements in a given array
Dijkstra's shortest path algorithm | Greedy Algo-7
Kruskal’s Minimum Spanning Tree Algorithm | Greedy Algo-2
Prim’s Minimum Spanning Tree (MST) | Greedy Algo-5
Huffman Coding | Greedy Algo-3
Write a program to print all permutations of a given string | [
{
"code": null,
"e": 24821,
"s": 24793,
"text": "\n12 Dec, 2021"
},
{
"code": null,
"e": 24990,
"s": 24821,
"text": "Given an array arr[] of N strings, each containing words with upper or lower case English alphabets, the task is to create a sentence in a Camel Case format using them."
},
{
"code": null,
"e": 24999,
"s": 24990,
"text": "Example:"
},
{
"code": null,
"e": 25218,
"s": 24999,
"text": "Input: arr[] = {“AnNiruddHA Routh”, “LOVES”, “to”, “COdE everyDAY””}Output: AniruddhaRouth Loves To Code EverydayExplanation: The above sentence is the merged sentence of all the words in the given order in Camel Case."
},
{
"code": null,
"e": 25313,
"s": 25218,
"text": "Input: arr[] = {“I”, “GOT”, “iNtErN”, “at geekSfoRgeekS”}Output: I Got Intern At Geeksforgeeks"
},
{
"code": null,
"e": 25496,
"s": 25313,
"text": "Approach: The given problem can be solved by traversing each word one by one and inserting every character in the Camel case format into a resultant string, as shown in steps below: "
},
{
"code": null,
"e": 25549,
"s": 25496,
"text": "Create an empty string to store the resultant string"
},
{
"code": null,
"e": 25859,
"s": 25549,
"text": "Traverse the array of strings word by word, and for each word: If the character is at first index, insert the current character in upper case formatElse insert all other characters in lower case formatWhenever a word ends, add a space to the string, except for the last word (insert a full stop in this case)."
},
{
"code": null,
"e": 25945,
"s": 25859,
"text": "If the character is at first index, insert the current character in upper case format"
},
{
"code": null,
"e": 25999,
"s": 25945,
"text": "Else insert all other characters in lower case format"
},
{
"code": null,
"e": 26108,
"s": 25999,
"text": "Whenever a word ends, add a space to the string, except for the last word (insert a full stop in this case)."
},
{
"code": null,
"e": 26148,
"s": 26108,
"text": "Return the resultant string at the end."
},
{
"code": null,
"e": 26199,
"s": 26148,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 26203,
"s": 26199,
"text": "C++"
},
{
"code": null,
"e": 26208,
"s": 26203,
"text": "Java"
},
{
"code": null,
"e": 26216,
"s": 26208,
"text": "Python3"
},
{
"code": null,
"e": 26219,
"s": 26216,
"text": "C#"
},
{
"code": null,
"e": 26230,
"s": 26219,
"text": "Javascript"
},
{
"code": "// C++ program of the above approach#include <bits/stdc++.h>using namespace std; // Function to convert the given array// of strings into a sentence in the// Camel Case formattingstring convertCase(vector<string> arr, int N){ // Stores the final sentence string ans = \"\"; // Loop to iterate over the array for (int i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.size() > 0) { ans += ' '; } // Insert the first character of arr[i] ans += toupper(arr[i][0]); // Loop to iterate over current array element for (int j = 1; j < arr[i].size(); j++) { // If a space is found, // the next character // should be in upper case if (arr[i][j] == ' ') { ans += ' '; ans += toupper(arr[i][j + 1]); j++; } // Otherwise the characters // must be in the lower case else { ans += tolower(arr[i][j]); } } } // Return Answer return ans;} // Driver programint main(){ vector<string> arr{ \"AnNiruddHA Routh\", \"LOVES\", \"to\", \"COdE everyDAY\" }; int N = arr.size(); cout << convertCase(arr, N); return 0;}",
"e": 27553,
"s": 26230,
"text": null
},
{
"code": "// Java code for the above approachimport java.util.*; class GFG{ // Function to convert the given array// of strings into a sentence in the// Camel Case formattingstatic String convertCase(String[] arr, int N){ // Stores the final sentence String ans = \"\"; // Loop to iterate over the array for(int i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.length() > 0) { ans += ' '; } // Insert the first character of arr[i] ans += Character.toUpperCase(arr[i].charAt(0)); // Loop to iterate over current array element for(int j = 1; j < arr[i].length(); j++) { // If a space is found, // the next character // should be in upper case if (arr[i].charAt(j) == ' ') { ans += ' '; char t = Character.toUpperCase( arr[i].charAt(j + 1)); ans += t; j++; } // Otherwise the characters // must be in the lower case else { ans += Character.toLowerCase( arr[i].charAt(j)); } } } // Return Answer return ans;} // Driver codepublic static void main(String[] args){ String[] arr = { \"AnNiruddHA Routh\", \"LOVES\", \"to\", \"COdE everyDAY\" }; int N = arr.length; System.out.println(convertCase(arr, N));}} // This code is contributed by Potta Lokesh",
"e": 29126,
"s": 27553,
"text": null
},
{
"code": "# Python3 program of the above approach # Function to convert the given array# of strings into a sentence in the# Camel Case formattingdef convertCase(arr, N) : # Stores the final sentence ans = \"\"; # Loop to iterate over the array for i in range(N) : # If the current word is not the 1st # word, insert space if (len(ans) > 0) : ans += ' '; # Insert the first character of arr[i] ans += arr[i][0].upper(); j = 1 # Loop to iterate over current array element while j < len(arr[i]) : # If a space is found, # the next character # should be in upper case if (arr[i][j] == ' ') : ans += ' '; ans += arr[i][j + 1].upper(); j += 1; # Otherwise the characters # must be in the lower case else : ans += arr[i][j].lower(); j += 1; # Return Answer return ans; # Driver programif __name__ == \"__main__\" : arr = [\"AnNiruddHA Routh\",\"LOVES\", \"to\",\"COdE everyDAY\"] N = len(arr); print(convertCase(arr, N)); # This code is contributed by AnkThon",
"e": 30354,
"s": 29126,
"text": null
},
{
"code": "// C# code for the above approachusing System; class GFG{ // Function to convert the given array // of strings into a sentence in the // Camel Case formatting static String convertCase(String[] arr, int N) { // Stores the final sentence String ans = \"\"; // Loop to iterate over the array for (int i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.Length > 0) { ans += ' '; } // Insert the first character of arr[i] ans += char.ToUpper(arr[i][0]); // Loop to iterate over current array element for (int j = 1; j < arr[i].Length; j++) { // If a space is found, // the next character // should be in upper case if (arr[i][j] == ' ') { ans += ' '; char t = char.ToUpper(arr[i][j + 1]); ans += t; j++; } // Otherwise the characters // must be in the lower case else { ans += char.ToLower(arr[i][j]); } } } // Return Answer return ans; } // Driver code public static void Main() { String[] arr = { \"AnNiruddHA Routh\", \"LOVES\", \"to\", \"COdE everyDAY\" }; int N = arr.Length; Console.Write(convertCase(arr, N)); }} // This code is contributed by gfgking",
"e": 31971,
"s": 30354,
"text": null
},
{
"code": "<script> // JavaScript program of the above approach // Function to convert the given array // of strings into a sentence in the // Camel Case formatting const convertCase = (arr, N) => { // Stores the final sentence let ans = \"\"; // Loop to iterate over the array for (let i = 0; i < N; i++) { // If the current word is not the 1st // word, insert space if (ans.length > 0) { ans += ' '; } // Insert the first character of arr[i] ans += (arr[i][0]).toUpperCase(); // Loop to iterate over current array element for (let j = 1; j < arr[i].length; j++) { // If a space is found, // the next character // should be in upper case if (arr[i][j] == ' ') { ans += ' '; ans += (arr[i][j + 1]).toUpperCase(); j++; } // Otherwise the characters // must be in the lower case else { ans += (arr[i][j]).toLowerCase(); } } } // Return Answer return ans; } // Driver program let arr = [ \"AnNiruddHA Routh\", \"LOVES\", \"to\", \"COdE everyDAY\" ]; let N = arr.length; document.write(convertCase(arr, N)); // This code is contributed by rakeshsahni </script>",
"e": 33446,
"s": 31971,
"text": null
},
{
"code": null,
"e": 33486,
"s": 33446,
"text": "Anniruddha Routh Loves To Code Everyday"
},
{
"code": null,
"e": 33603,
"s": 33486,
"text": "Time Complexity: O(N*M), where M is the average length of a string over all the given stringsAuxiliary Space: O(N*M)"
},
{
"code": null,
"e": 33617,
"s": 33605,
"text": "rakeshsahni"
},
{
"code": null,
"e": 33625,
"s": 33617,
"text": "ankthon"
},
{
"code": null,
"e": 33639,
"s": 33625,
"text": "lokeshpotta20"
},
{
"code": null,
"e": 33647,
"s": 33639,
"text": "gfgking"
},
{
"code": null,
"e": 33662,
"s": 33647,
"text": "sagartomar9927"
},
{
"code": null,
"e": 33672,
"s": 33662,
"text": "JP Morgan"
},
{
"code": null,
"e": 33679,
"s": 33672,
"text": "Arrays"
},
{
"code": null,
"e": 33686,
"s": 33679,
"text": "Greedy"
},
{
"code": null,
"e": 33694,
"s": 33686,
"text": "Strings"
},
{
"code": null,
"e": 33701,
"s": 33694,
"text": "Arrays"
},
{
"code": null,
"e": 33709,
"s": 33701,
"text": "Strings"
},
{
"code": null,
"e": 33716,
"s": 33709,
"text": "Greedy"
},
{
"code": null,
"e": 33814,
"s": 33716,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 33839,
"s": 33814,
"text": "Window Sliding Technique"
},
{
"code": null,
"e": 33859,
"s": 33839,
"text": "Trapping Rain Water"
},
{
"code": null,
"e": 33884,
"s": 33859,
"text": "Building Heap from Array"
},
{
"code": null,
"e": 33922,
"s": 33884,
"text": "Reversal algorithm for array rotation"
},
{
"code": null,
"e": 33971,
"s": 33922,
"text": "Program to find sum of elements in a given array"
},
{
"code": null,
"e": 34022,
"s": 33971,
"text": "Dijkstra's shortest path algorithm | Greedy Algo-7"
},
{
"code": null,
"e": 34080,
"s": 34022,
"text": "Kruskal’s Minimum Spanning Tree Algorithm | Greedy Algo-2"
},
{
"code": null,
"e": 34131,
"s": 34080,
"text": "Prim’s Minimum Spanning Tree (MST) | Greedy Algo-5"
},
{
"code": null,
"e": 34162,
"s": 34131,
"text": "Huffman Coding | Greedy Algo-3"
}
]
|
Program to convert IP address to hexadecimal in C++ | Given with the input as an IP address value and the task is to represent the given IP address as its hexadecimal equivalent.
IP address or Internet protocol is a unique number to that uniquely describes your hardware connected to a network. Internet means over the network and protocol defines the set of rules and regulations that must be followed for connection. Because of IP address only it is possible for a system to communicate with another system over a network. There are two versions of IP that are −
IPv4(Internet Protocol Version 4)
IPv6(Internet Protocol Version 6)
IP address is represented as the sequence of numbers which are in the form −
151.101.65.121
For this conversion, below program is using the header file “arpa/inet.h” which is created for the internet operations
Input-: 127.0.0.1
Ouput-: 0x7f000001
Input-: 172.31.0.2
Output-: 0xac1f0002
Start
Step1-> Declare function to reverse
void reverse(char* str)
set int len = 2
set int r = strlen(str) – 2
Loop While (len < r)
call swap(str[len++], str[r++])
Call swap(str[len++], str[r])
Set r = r – 3
End
End
Step 2-> Declare function to convert IP address to hexadecimal
void convert(int ip_add)
declare char str[15]
call sprintf(str, "0x%08x", ip_add)
call reverse(str)
print str
step 3-> In main()
declare int ip_add = inet_addr("127.0.0.1")
call convert(ip_add)
Stop
Live Demo
#include <arpa/inet.h>
#include <iostream>
#include <string.h>
using namespace std;
//reverse hexadecimal number
void reverse(char* str) {
int len = 2;
int r = strlen(str) - 2;
while (len < r) {
swap(str[len++], str[r++]);
swap(str[len++], str[r]);
r = r - 3;
}
}
//Convert IP address to heaxdecimal
void convert(int ip_add) {
char str[15];
sprintf(str, "0x%08x", ip_add);
reverse(str);
cout << str << "\n";
}
int main() {
int ip_add = inet_addr("127.0.0.1");
convert(ip_add);
return 0;
}
IF WE RUN THE ABOVE CODE IT WILL GENERATE FOLLOWING OUTPUT
0x7f000001 | [
{
"code": null,
"e": 1187,
"s": 1062,
"text": "Given with the input as an IP address value and the task is to represent the given IP address as its hexadecimal equivalent."
},
{
"code": null,
"e": 1573,
"s": 1187,
"text": "IP address or Internet protocol is a unique number to that uniquely describes your hardware connected to a network. Internet means over the network and protocol defines the set of rules and regulations that must be followed for connection. Because of IP address only it is possible for a system to communicate with another system over a network. There are two versions of IP that are −"
},
{
"code": null,
"e": 1607,
"s": 1573,
"text": "IPv4(Internet Protocol Version 4)"
},
{
"code": null,
"e": 1641,
"s": 1607,
"text": "IPv6(Internet Protocol Version 6)"
},
{
"code": null,
"e": 1718,
"s": 1641,
"text": "IP address is represented as the sequence of numbers which are in the form −"
},
{
"code": null,
"e": 1733,
"s": 1718,
"text": "151.101.65.121"
},
{
"code": null,
"e": 1852,
"s": 1733,
"text": "For this conversion, below program is using the header file “arpa/inet.h” which is created for the internet operations"
},
{
"code": null,
"e": 1928,
"s": 1852,
"text": "Input-: 127.0.0.1\nOuput-: 0x7f000001\nInput-: 172.31.0.2\nOutput-: 0xac1f0002"
},
{
"code": null,
"e": 2495,
"s": 1928,
"text": "Start\nStep1-> Declare function to reverse\n void reverse(char* str)\n set int len = 2\n set int r = strlen(str) – 2\n Loop While (len < r)\n call swap(str[len++], str[r++])\n Call swap(str[len++], str[r])\n Set r = r – 3\n End\n End\nStep 2-> Declare function to convert IP address to hexadecimal\n void convert(int ip_add)\n declare char str[15]\n call sprintf(str, \"0x%08x\", ip_add)\n call reverse(str)\n print str\nstep 3-> In main()\n declare int ip_add = inet_addr(\"127.0.0.1\")\n call convert(ip_add)\nStop"
},
{
"code": null,
"e": 2506,
"s": 2495,
"text": " Live Demo"
},
{
"code": null,
"e": 3046,
"s": 2506,
"text": "#include <arpa/inet.h>\n#include <iostream>\n#include <string.h>\nusing namespace std;\n//reverse hexadecimal number\nvoid reverse(char* str) {\n int len = 2;\n int r = strlen(str) - 2;\n while (len < r) {\n swap(str[len++], str[r++]);\n swap(str[len++], str[r]);\n r = r - 3;\n }\n}\n//Convert IP address to heaxdecimal\nvoid convert(int ip_add) {\n char str[15];\n sprintf(str, \"0x%08x\", ip_add);\n reverse(str);\n cout << str << \"\\n\";\n}\nint main() {\n int ip_add = inet_addr(\"127.0.0.1\");\n convert(ip_add);\n return 0;\n}"
},
{
"code": null,
"e": 3105,
"s": 3046,
"text": "IF WE RUN THE ABOVE CODE IT WILL GENERATE FOLLOWING OUTPUT"
},
{
"code": null,
"e": 3116,
"s": 3105,
"text": "0x7f000001"
}
]
|
PyTorch - Recurrent Neural Network | Recurrent neural networks is one type of deep learning-oriented algorithm which follows a sequential approach. In neural networks, we always assume that each input and output is independent of all other layers. These type of neural networks are called recurrent because they perform mathematical computations in a sequential manner completing one task after another.
The diagram below specifies the complete approach and working of recurrent neural networks −
In the above figure, c1, c2, c3 and x1 are considered as inputs which includes some hidden input values namely h1, h2 and h3 delivering the respective output of o1. We will now focus on implementing PyTorch to create a sine wave with the help of recurrent neural networks.
During training, we will follow a training approach to our model with one data point at a time. The input sequence x consists of 20 data points, and the target sequence is considered to be same as the input sequence.
Import the necessary packages for implementing recurrent neural networks using the below code −
import torch
from torch.autograd import Variable
import numpy as np
import pylab as pl
import torch.nn.init as init
We will set the model hyper parameters with the size of input layer set to 7. There will be 6 context neurons and 1 input neuron for creating target sequence.
dtype = torch.FloatTensor
input_size, hidden_size, output_size = 7, 6, 1
epochs = 300
seq_length = 20
lr = 0.1
data_time_steps = np.linspace(2, 10, seq_length + 1)
data = np.sin(data_time_steps)
data.resize((seq_length + 1, 1))
x = Variable(torch.Tensor(data[:-1]).type(dtype), requires_grad=False)
y = Variable(torch.Tensor(data[1:]).type(dtype), requires_grad=False)
We will generate training data, where x is the input data sequence and y is required target sequence.
Weights are initialized in the recurrent neural network using normal distribution with zero mean. W1 will represent acceptance of input variables and w2 will represent the output which is generated as shown below −
w1 = torch.FloatTensor(input_size,
hidden_size).type(dtype)
init.normal(w1, 0.0, 0.4)
w1 = Variable(w1, requires_grad = True)
w2 = torch.FloatTensor(hidden_size, output_size).type(dtype)
init.normal(w2, 0.0, 0.3)
w2 = Variable(w2, requires_grad = True)
Now, it is important to create a function for feed forward which uniquely defines the neural network.
def forward(input, context_state, w1, w2):
xh = torch.cat((input, context_state), 1)
context_state = torch.tanh(xh.mm(w1))
out = context_state.mm(w2)
return (out, context_state)
The next step is to start training procedure of recurrent neural network’s sine wave implementation. The outer loop iterates over each loop and the inner loop iterates through the element of sequence. Here, we will also compute Mean Square Error (MSE) which helps in the prediction of continuous variables.
for i in range(epochs):
total_loss = 0
context_state = Variable(torch.zeros((1, hidden_size)).type(dtype), requires_grad = True)
for j in range(x.size(0)):
input = x[j:(j+1)]
target = y[j:(j+1)]
(pred, context_state) = forward(input, context_state, w1, w2)
loss = (pred - target).pow(2).sum()/2
total_loss += loss
loss.backward()
w1.data -= lr * w1.grad.data
w2.data -= lr * w2.grad.data
w1.grad.data.zero_()
w2.grad.data.zero_()
context_state = Variable(context_state.data)
if i % 10 == 0:
print("Epoch: {} loss {}".format(i, total_loss.data[0]))
context_state = Variable(torch.zeros((1, hidden_size)).type(dtype), requires_grad = False)
predictions = []
for i in range(x.size(0)):
input = x[i:i+1]
(pred, context_state) = forward(input, context_state, w1, w2)
context_state = context_state
predictions.append(pred.data.numpy().ravel()[0])
Now, it is time to plot the sine wave as the way it is needed.
pl.scatter(data_time_steps[:-1], x.data.numpy(), s = 90, label = "Actual")
pl.scatter(data_time_steps[1:], predictions, label = "Predicted")
pl.legend()
pl.show()
The output for the above process is as follows −
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2626,
"s": 2259,
"text": "Recurrent neural networks is one type of deep learning-oriented algorithm which follows a sequential approach. In neural networks, we always assume that each input and output is independent of all other layers. These type of neural networks are called recurrent because they perform mathematical computations in a sequential manner completing one task after another."
},
{
"code": null,
"e": 2719,
"s": 2626,
"text": "The diagram below specifies the complete approach and working of recurrent neural networks −"
},
{
"code": null,
"e": 2992,
"s": 2719,
"text": "In the above figure, c1, c2, c3 and x1 are considered as inputs which includes some hidden input values namely h1, h2 and h3 delivering the respective output of o1. We will now focus on implementing PyTorch to create a sine wave with the help of recurrent neural networks."
},
{
"code": null,
"e": 3209,
"s": 2992,
"text": "During training, we will follow a training approach to our model with one data point at a time. The input sequence x consists of 20 data points, and the target sequence is considered to be same as the input sequence."
},
{
"code": null,
"e": 3305,
"s": 3209,
"text": "Import the necessary packages for implementing recurrent neural networks using the below code −"
},
{
"code": null,
"e": 3422,
"s": 3305,
"text": "import torch\nfrom torch.autograd import Variable\nimport numpy as np\nimport pylab as pl\nimport torch.nn.init as init\n"
},
{
"code": null,
"e": 3581,
"s": 3422,
"text": "We will set the model hyper parameters with the size of input layer set to 7. There will be 6 context neurons and 1 input neuron for creating target sequence."
},
{
"code": null,
"e": 3951,
"s": 3581,
"text": "dtype = torch.FloatTensor\ninput_size, hidden_size, output_size = 7, 6, 1\nepochs = 300\nseq_length = 20\nlr = 0.1\ndata_time_steps = np.linspace(2, 10, seq_length + 1)\ndata = np.sin(data_time_steps)\ndata.resize((seq_length + 1, 1))\n\nx = Variable(torch.Tensor(data[:-1]).type(dtype), requires_grad=False)\ny = Variable(torch.Tensor(data[1:]).type(dtype), requires_grad=False)"
},
{
"code": null,
"e": 4053,
"s": 3951,
"text": "We will generate training data, where x is the input data sequence and y is required target sequence."
},
{
"code": null,
"e": 4268,
"s": 4053,
"text": "Weights are initialized in the recurrent neural network using normal distribution with zero mean. W1 will represent acceptance of input variables and w2 will represent the output which is generated as shown below −"
},
{
"code": null,
"e": 4522,
"s": 4268,
"text": "w1 = torch.FloatTensor(input_size, \nhidden_size).type(dtype)\ninit.normal(w1, 0.0, 0.4)\nw1 = Variable(w1, requires_grad = True)\nw2 = torch.FloatTensor(hidden_size, output_size).type(dtype)\ninit.normal(w2, 0.0, 0.3)\nw2 = Variable(w2, requires_grad = True)"
},
{
"code": null,
"e": 4624,
"s": 4522,
"text": "Now, it is important to create a function for feed forward which uniquely defines the neural network."
},
{
"code": null,
"e": 4814,
"s": 4624,
"text": "def forward(input, context_state, w1, w2):\n xh = torch.cat((input, context_state), 1)\n context_state = torch.tanh(xh.mm(w1))\n out = context_state.mm(w2)\n return (out, context_state)"
},
{
"code": null,
"e": 5121,
"s": 4814,
"text": "The next step is to start training procedure of recurrent neural network’s sine wave implementation. The outer loop iterates over each loop and the inner loop iterates through the element of sequence. Here, we will also compute Mean Square Error (MSE) which helps in the prediction of continuous variables."
},
{
"code": null,
"e": 6060,
"s": 5121,
"text": "for i in range(epochs):\n total_loss = 0\n context_state = Variable(torch.zeros((1, hidden_size)).type(dtype), requires_grad = True)\n for j in range(x.size(0)):\n input = x[j:(j+1)]\n target = y[j:(j+1)]\n (pred, context_state) = forward(input, context_state, w1, w2)\n loss = (pred - target).pow(2).sum()/2\n total_loss += loss\n loss.backward()\n w1.data -= lr * w1.grad.data\n w2.data -= lr * w2.grad.data\n w1.grad.data.zero_()\n w2.grad.data.zero_()\n context_state = Variable(context_state.data)\n if i % 10 == 0:\n print(\"Epoch: {} loss {}\".format(i, total_loss.data[0]))\n\ncontext_state = Variable(torch.zeros((1, hidden_size)).type(dtype), requires_grad = False)\npredictions = []\n\nfor i in range(x.size(0)):\n input = x[i:i+1]\n (pred, context_state) = forward(input, context_state, w1, w2)\n context_state = context_state\n predictions.append(pred.data.numpy().ravel()[0])"
},
{
"code": null,
"e": 6123,
"s": 6060,
"text": "Now, it is time to plot the sine wave as the way it is needed."
},
{
"code": null,
"e": 6286,
"s": 6123,
"text": "pl.scatter(data_time_steps[:-1], x.data.numpy(), s = 90, label = \"Actual\")\npl.scatter(data_time_steps[1:], predictions, label = \"Predicted\")\npl.legend()\npl.show()"
},
{
"code": null,
"e": 6335,
"s": 6286,
"text": "The output for the above process is as follows −"
},
{
"code": null,
"e": 6342,
"s": 6335,
"text": " Print"
},
{
"code": null,
"e": 6353,
"s": 6342,
"text": " Add Notes"
}
]
|
Hibernate - Bag Mappings | A Bag is a java collection that stores elements without caring about the sequencing, but allow duplicate elements in the list. A bag is a random grouping of the objects in the list.
A Collection is mapped with a <bag> element in the mapping table and initialized with java.util.ArrayList.
Consider a situation where we need to store our employee records in EMPLOYEE table, which will have the following structure −
create table EMPLOYEE (
id INT NOT NULL auto_increment,
first_name VARCHAR(20) default NULL,
last_name VARCHAR(20) default NULL,
salary INT default NULL,
PRIMARY KEY (id)
);
Further, assume each employee can have one or more certificate associated with him/her. We will store certificate related information in a separate table having the following structure −
create table CERTIFICATE (
id INT NOT NULL auto_increment,
certificate_name VARCHAR(30) default NULL,
employee_id INT default NULL,
PRIMARY KEY (id)
);
There will be one-to-many relationship between EMPLOYEE and CERTIFICATE objects.
Let us implement a POJO class Employee, which will be used to persist the objects related to EMPLOYEE table and having a collection of certificates in List variable.
import java.util.*;
public class Employee {
private int id;
private String firstName;
private String lastName;
private int salary;
private Collection certificates;
public Employee() {}
public Employee(String fname, String lname, int salary) {
this.firstName = fname;
this.lastName = lname;
this.salary = salary;
}
public int getId() {
return id;
}
public void setId( int id ) {
this.id = id;
}
public String getFirstName() {
return firstName;
}
public void setFirstName( String first_name ) {
this.firstName = first_name;
}
public String getLastName() {
return lastName;
}
public void setLastName( String last_name ) {
this.lastName = last_name;
}
public int getSalary() {
return salary;
}
public void setSalary( int salary ) {
this.salary = salary;
}
public Collection getCertificates() {
return certificates;
}
public void setCertificates( Collection certificates ) {
this.certificates = certificates;
}
}
We need to define another POJO class corresponding to CERTIFICATE table so that certificate objects can be stored and retrieved into the CERTIFICATE table.
public class Certificate{
private int id;
private String name;
public Certificate() {}
public Certificate(String name) {
this.name = name;
}
public int getId() {
return id;
}
public void setId( int id ) {
this.id = id;
}
public String getName() {
return name;
}
public void setName( String name ) {
this.name = name;
}
}
Let us develop our mapping file, which instructs Hibernate how to map the defined classes to the database tables. The <bag> element will be used to define the rule for the Collection used.
<?xml version = "1.0" encoding = "utf-8"?>
<!DOCTYPE hibernate-mapping PUBLIC
"-//Hibernate/Hibernate Mapping DTD//EN"
"http://www.hibernate.org/dtd/hibernate-mapping-3.0.dtd">
<hibernate-mapping>
<class name = "Employee" table = "EMPLOYEE">
<meta attribute = "class-description">
This class contains the employee detail.
</meta>
<id name = "id" type = "int" column = "id">
<generator class="native"/>
</id>
<bag name = "certificates" cascade="all">
<key column = "employee_id"/>
<one-to-many class="Certificate"/>
</bag>
<property name = "firstName" column = "first_name" type = "string"/>
<property name = "lastName" column = "last_name" type = "string"/>
<property name = "salary" column = "salary" type = "int"/>
</class>
<class name = "Certificate" table = "CERTIFICATE">
<meta attribute = "class-description">
This class contains the certificate records.
</meta>
<id name = "id" type = "int" column = "id">
<generator class="native"/>
</id>
<property name = "name" column = "certificate_name" type = "string"/>
</class>
</hibernate-mapping>
You should save the mapping document in a file with the format <classname>.hbm.xml. We saved our mapping document in the file Employee.hbm.xml. You are already familiar with most of the mapping detail, but let us see all the elements of mapping file once again −
The mapping document is an XML document having <hibernate-mapping> as the root element, which contains two <class> elements corresponding to each class.
The mapping document is an XML document having <hibernate-mapping> as the root element, which contains two <class> elements corresponding to each class.
The <class> elements are used to define specific mappings from a Java classes to the database tables. The Java class name is specified using the name attribute of the class element and the database table name is specified using the table attribute.
The <class> elements are used to define specific mappings from a Java classes to the database tables. The Java class name is specified using the name attribute of the class element and the database table name is specified using the table attribute.
The <meta> element is optional element and can be used to create the class description.
The <meta> element is optional element and can be used to create the class description.
The <id> element maps the unique ID attribute in class to the primary key of the database table. The name attribute of the id element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type.
The <id> element maps the unique ID attribute in class to the primary key of the database table. The name attribute of the id element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type.
The <generator> element within the id element is used to generate the primary key values automatically. The class attribute of the generator element is set to native to let hibernate pick up either native to let hibernate pick up either identity, sequence or hilo algorithm to create primary key depending upon the capabilities of the underlying database.
The <generator> element within the id element is used to generate the primary key values automatically. The class attribute of the generator element is set to native to let hibernate pick up either native to let hibernate pick up either identity, sequence or hilo algorithm to create primary key depending upon the capabilities of the underlying database.
The <property> element is used to map a Java class property to a column in the database table. The name attribute of the element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type.
The <property> element is used to map a Java class property to a column in the database table. The name attribute of the element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type.
The <bag> element is used to set the relationship between Certificate and Employee classes. We used the cascade attribute in the <bag> element to tell Hibernate to persist the Certificate objects at the same time as the Employee objects. The name attribute is set to the defined Collection variable in the parent class, in our case it is certificates.
The <bag> element is used to set the relationship between Certificate and Employee classes. We used the cascade attribute in the <bag> element to tell Hibernate to persist the Certificate objects at the same time as the Employee objects. The name attribute is set to the defined Collection variable in the parent class, in our case it is certificates.
The <key> element is the column in the CERTIFICATE table that holds the foreign key to the parent object i.e. table EMPLOYEE.
The <key> element is the column in the CERTIFICATE table that holds the foreign key to the parent object i.e. table EMPLOYEE.
The <one-to-many> element indicates that one Employee object relates to many Certificate objects and, as such, the Certificate object must have an Employee parent associated with it. You can use either <one-to-one>, <many-to-one> or <many-to-many> elements based on your requirement.
The <one-to-many> element indicates that one Employee object relates to many Certificate objects and, as such, the Certificate object must have an Employee parent associated with it. You can use either <one-to-one>, <many-to-one> or <many-to-many> elements based on your requirement.
Finally, we will create our application class with the main() method to run the application. We will use this application to save few Employees’ records along with their certificates and then we will apply CRUD operations on those records.
import java.util.*;
import org.hibernate.HibernateException;
import org.hibernate.Session;
import org.hibernate.Transaction;
import org.hibernate.SessionFactory;
import org.hibernate.cfg.Configuration;
public class ManageEmployee {
private static SessionFactory factory;
public static void main(String[] args) {
try {
factory = new Configuration().configure().buildSessionFactory();
} catch (Throwable ex) {
System.err.println("Failed to create sessionFactory object." + ex);
throw new ExceptionInInitializerError(ex);
}
ManageEmployee ME = new ManageEmployee();
/* Let us have a set of certificates for the first employee */
ArrayList set1 = new ArrayList();
set1.add(new Certificate("MCA"));
set1.add(new Certificate("MBA"));
set1.add(new Certificate("PMP"));
/* Add employee records in the database */
Integer empID1 = ME.addEmployee("Manoj", "Kumar", 4000, set1);
/* Another set of certificates for the second employee */
ArrayList set2 = new ArrayList();
set2.add(new Certificate("BCA"));
set2.add(new Certificate("BA"));
/* Add another employee record in the database */
Integer empID2 = ME.addEmployee("Dilip", "Kumar", 3000, set2);
/* List down all the employees */
ME.listEmployees();
/* Update employee's salary records */
ME.updateEmployee(empID1, 5000);
/* Delete an employee from the database */
ME.deleteEmployee(empID2);
/* List down all the employees */
ME.listEmployees();
}
/* Method to add an employee record in the database */
public Integer addEmployee(String fname, String lname, int salary, ArrayList cert){
Session session = factory.openSession();
Transaction tx = null;
Integer employeeID = null;
try {
tx = session.beginTransaction();
Employee employee = new Employee(fname, lname, salary);
employee.setCertificates(cert);
employeeID = (Integer) session.save(employee);
tx.commit();
} catch (HibernateException e) {
if (tx!=null) tx.rollback();
e.printStackTrace();
} finally {
session.close();
}
return employeeID;
}
/* Method to list all the employees detail */
public void listEmployees( ){
Session session = factory.openSession();
Transaction tx = null;
try {
tx = session.beginTransaction();
List employees = session.createQuery("FROM Employee").list();
for (Iterator iterator1 = employees.iterator(); iterator1.hasNext();){
Employee employee = (Employee) iterator1.next();
System.out.print("First Name: " + employee.getFirstName());
System.out.print(" Last Name: " + employee.getLastName());
System.out.println(" Salary: " + employee.getSalary());
Collection certificates = employee.getCertificates();
for (Iterator iterator2 = certificates.iterator(); iterator2.hasNext();){
Certificate certName = (Certificate) iterator2.next();
System.out.println("Certificate: " + certName.getName());
}
}
tx.commit();
} catch (HibernateException e) {
if (tx!=null) tx.rollback();
e.printStackTrace();
} finally {
session.close();
}
}
/* Method to update salary for an employee */
public void updateEmployee(Integer EmployeeID, int salary ){
Session session = factory.openSession();
Transaction tx = null;
try {
tx = session.beginTransaction();
Employee employee = (Employee)session.get(Employee.class, EmployeeID);
employee.setSalary( salary );
session.update(employee);
tx.commit();
} catch (HibernateException e) {
if (tx!=null) tx.rollback();
e.printStackTrace();
} finally {
session.close();
}
}
/* Method to delete an employee from the records */
public void deleteEmployee(Integer EmployeeID){
Session session = factory.openSession();
Transaction tx = null;
try {
tx = session.beginTransaction();
Employee employee = (Employee)session.get(Employee.class, EmployeeID);
session.delete(employee);
tx.commit();
} catch (HibernateException e) {
if (tx!=null) tx.rollback();
e.printStackTrace();
} finally {
session.close();
}
}
}
Here are the steps to compile and run the above mentioned application. Make sure, you have set PATH and CLASSPATH appropriately before proceeding for the compilation and execution.
Create hibernate.cfg.xml configuration file as explained in configuration chapter.
Create hibernate.cfg.xml configuration file as explained in configuration chapter.
Create Employee.hbm.xml mapping file as shown above.
Create Employee.hbm.xml mapping file as shown above.
Create Employee.java source file as shown above and compile it.
Create Employee.java source file as shown above and compile it.
Create Certificate.java source file as shown above and compile it.
Create Certificate.java source file as shown above and compile it.
Create ManageEmployee.java source file as shown above and compile it.
Create ManageEmployee.java source file as shown above and compile it.
Execute ManageEmployee binary to run the program.
Execute ManageEmployee binary to run the program.
You would get the following result on the screen, and same time records would be created in EMPLOYEE and CERTIFICATE tables. You can see, certificates has been sorted in reverse order. You can try by changing your mapping file, simply set sort="natural" and execute your program and compare the results.
$java ManageEmployee
.......VARIOUS LOG MESSAGES WILL DISPLAY HERE........
First Name: Manoj Last Name: Kumar Salary: 4000
Certificate: MCA
Certificate: MBA
Certificate: PMP
First Name: Dilip Last Name: Kumar Salary: 3000
Certificate: BCA
Certificate: BA
First Name: Manoj Last Name: Kumar Salary: 5000
Certificate: MCA
Certificate: MBA
Certificate: PMP
If you check your EMPLOYEE and CERTIFICATE tables, they should have the following records −
mysql> select * from EMPLOYEE;
+----+------------+-----------+--------+
| id | first_name | last_name | salary |
+----+------------+-----------+--------+
| 53 | Manoj | Kumar | 5000 |
+----+------------+-----------+--------+
1 row in set (0.00 sec)
mysql> select * from CERTIFICATE;
+----+------------------+-------------+
| id | certificate_name | employee_id |
+----+------------------+-------------+
| 11 | MCA | 53 |
| 12 | MBA | 53 |
| 13 | PMP | 53 |
+----+------------------+-------------+
3 rows in set (0.00 sec)
mysql>
108 Lectures
11 hours
Chaand Sheikh
65 Lectures
5 hours
Karthikeya T
39 Lectures
4.5 hours
TELCOMA Global
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2245,
"s": 2063,
"text": "A Bag is a java collection that stores elements without caring about the sequencing, but allow duplicate elements in the list. A bag is a random grouping of the objects in the list."
},
{
"code": null,
"e": 2352,
"s": 2245,
"text": "A Collection is mapped with a <bag> element in the mapping table and initialized with java.util.ArrayList."
},
{
"code": null,
"e": 2478,
"s": 2352,
"text": "Consider a situation where we need to store our employee records in EMPLOYEE table, which will have the following structure −"
},
{
"code": null,
"e": 2673,
"s": 2478,
"text": "create table EMPLOYEE (\n id INT NOT NULL auto_increment,\n first_name VARCHAR(20) default NULL,\n last_name VARCHAR(20) default NULL,\n salary INT default NULL,\n PRIMARY KEY (id)\n);"
},
{
"code": null,
"e": 2860,
"s": 2673,
"text": "Further, assume each employee can have one or more certificate associated with him/her. We will store certificate related information in a separate table having the following structure −"
},
{
"code": null,
"e": 3024,
"s": 2860,
"text": "create table CERTIFICATE (\n id INT NOT NULL auto_increment,\n certificate_name VARCHAR(30) default NULL,\n employee_id INT default NULL,\n PRIMARY KEY (id)\n);"
},
{
"code": null,
"e": 3105,
"s": 3024,
"text": "There will be one-to-many relationship between EMPLOYEE and CERTIFICATE objects."
},
{
"code": null,
"e": 3271,
"s": 3105,
"text": "Let us implement a POJO class Employee, which will be used to persist the objects related to EMPLOYEE table and having a collection of certificates in List variable."
},
{
"code": null,
"e": 4388,
"s": 3271,
"text": "import java.util.*;\n\npublic class Employee {\n private int id;\n private String firstName; \n private String lastName; \n private int salary;\n private Collection certificates;\n\n public Employee() {}\n \n public Employee(String fname, String lname, int salary) {\n this.firstName = fname;\n this.lastName = lname;\n this.salary = salary;\n }\n \n public int getId() {\n return id;\n }\n \n public void setId( int id ) {\n this.id = id;\n }\n \n public String getFirstName() {\n return firstName;\n }\n \n public void setFirstName( String first_name ) {\n this.firstName = first_name;\n }\n \n public String getLastName() {\n return lastName;\n }\n \n public void setLastName( String last_name ) {\n this.lastName = last_name;\n }\n \n public int getSalary() {\n return salary;\n }\n \n public void setSalary( int salary ) {\n this.salary = salary;\n }\n\n public Collection getCertificates() {\n return certificates;\n }\n \n public void setCertificates( Collection certificates ) {\n this.certificates = certificates;\n }\n}"
},
{
"code": null,
"e": 4544,
"s": 4388,
"text": "We need to define another POJO class corresponding to CERTIFICATE table so that certificate objects can be stored and retrieved into the CERTIFICATE table."
},
{
"code": null,
"e": 4956,
"s": 4544,
"text": "public class Certificate{\n private int id;\n private String name; \n\n public Certificate() {}\n \n public Certificate(String name) {\n this.name = name;\n }\n \n public int getId() {\n return id;\n }\n \n public void setId( int id ) {\n this.id = id;\n }\n \n public String getName() {\n return name;\n }\n \n public void setName( String name ) {\n this.name = name;\n }\n}"
},
{
"code": null,
"e": 5145,
"s": 4956,
"text": "Let us develop our mapping file, which instructs Hibernate how to map the defined classes to the database tables. The <bag> element will be used to define the rule for the Collection used."
},
{
"code": null,
"e": 6412,
"s": 5145,
"text": "<?xml version = \"1.0\" encoding = \"utf-8\"?>\n<!DOCTYPE hibernate-mapping PUBLIC \n\"-//Hibernate/Hibernate Mapping DTD//EN\"\n\"http://www.hibernate.org/dtd/hibernate-mapping-3.0.dtd\"> \n\n<hibernate-mapping>\n <class name = \"Employee\" table = \"EMPLOYEE\">\n \n <meta attribute = \"class-description\">\n This class contains the employee detail. \n </meta>\n \n <id name = \"id\" type = \"int\" column = \"id\">\n <generator class=\"native\"/>\n </id>\n \n <bag name = \"certificates\" cascade=\"all\">\n <key column = \"employee_id\"/>\n <one-to-many class=\"Certificate\"/>\n </bag>\n \n <property name = \"firstName\" column = \"first_name\" type = \"string\"/>\n <property name = \"lastName\" column = \"last_name\" type = \"string\"/>\n <property name = \"salary\" column = \"salary\" type = \"int\"/>\n \n </class>\n\n <class name = \"Certificate\" table = \"CERTIFICATE\">\n \n <meta attribute = \"class-description\">\n This class contains the certificate records. \n </meta>\n \n <id name = \"id\" type = \"int\" column = \"id\">\n <generator class=\"native\"/>\n </id>\n \n <property name = \"name\" column = \"certificate_name\" type = \"string\"/>\n \n </class>\n\n</hibernate-mapping>"
},
{
"code": null,
"e": 6675,
"s": 6412,
"text": "You should save the mapping document in a file with the format <classname>.hbm.xml. We saved our mapping document in the file Employee.hbm.xml. You are already familiar with most of the mapping detail, but let us see all the elements of mapping file once again −"
},
{
"code": null,
"e": 6828,
"s": 6675,
"text": "The mapping document is an XML document having <hibernate-mapping> as the root element, which contains two <class> elements corresponding to each class."
},
{
"code": null,
"e": 6981,
"s": 6828,
"text": "The mapping document is an XML document having <hibernate-mapping> as the root element, which contains two <class> elements corresponding to each class."
},
{
"code": null,
"e": 7230,
"s": 6981,
"text": "The <class> elements are used to define specific mappings from a Java classes to the database tables. The Java class name is specified using the name attribute of the class element and the database table name is specified using the table attribute."
},
{
"code": null,
"e": 7479,
"s": 7230,
"text": "The <class> elements are used to define specific mappings from a Java classes to the database tables. The Java class name is specified using the name attribute of the class element and the database table name is specified using the table attribute."
},
{
"code": null,
"e": 7567,
"s": 7479,
"text": "The <meta> element is optional element and can be used to create the class description."
},
{
"code": null,
"e": 7655,
"s": 7567,
"text": "The <meta> element is optional element and can be used to create the class description."
},
{
"code": null,
"e": 8007,
"s": 7655,
"text": "The <id> element maps the unique ID attribute in class to the primary key of the database table. The name attribute of the id element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type."
},
{
"code": null,
"e": 8359,
"s": 8007,
"text": "The <id> element maps the unique ID attribute in class to the primary key of the database table. The name attribute of the id element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type."
},
{
"code": null,
"e": 8715,
"s": 8359,
"text": "The <generator> element within the id element is used to generate the primary key values automatically. The class attribute of the generator element is set to native to let hibernate pick up either native to let hibernate pick up either identity, sequence or hilo algorithm to create primary key depending upon the capabilities of the underlying database."
},
{
"code": null,
"e": 9071,
"s": 8715,
"text": "The <generator> element within the id element is used to generate the primary key values automatically. The class attribute of the generator element is set to native to let hibernate pick up either native to let hibernate pick up either identity, sequence or hilo algorithm to create primary key depending upon the capabilities of the underlying database."
},
{
"code": null,
"e": 9418,
"s": 9071,
"text": "The <property> element is used to map a Java class property to a column in the database table. The name attribute of the element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type."
},
{
"code": null,
"e": 9765,
"s": 9418,
"text": "The <property> element is used to map a Java class property to a column in the database table. The name attribute of the element refers to the property in the class and the column attribute refers to the column in the database table. The type attribute holds the hibernate mapping type, this mapping types will convert from Java to SQL data type."
},
{
"code": null,
"e": 10117,
"s": 9765,
"text": "The <bag> element is used to set the relationship between Certificate and Employee classes. We used the cascade attribute in the <bag> element to tell Hibernate to persist the Certificate objects at the same time as the Employee objects. The name attribute is set to the defined Collection variable in the parent class, in our case it is certificates."
},
{
"code": null,
"e": 10469,
"s": 10117,
"text": "The <bag> element is used to set the relationship between Certificate and Employee classes. We used the cascade attribute in the <bag> element to tell Hibernate to persist the Certificate objects at the same time as the Employee objects. The name attribute is set to the defined Collection variable in the parent class, in our case it is certificates."
},
{
"code": null,
"e": 10595,
"s": 10469,
"text": "The <key> element is the column in the CERTIFICATE table that holds the foreign key to the parent object i.e. table EMPLOYEE."
},
{
"code": null,
"e": 10721,
"s": 10595,
"text": "The <key> element is the column in the CERTIFICATE table that holds the foreign key to the parent object i.e. table EMPLOYEE."
},
{
"code": null,
"e": 11005,
"s": 10721,
"text": "The <one-to-many> element indicates that one Employee object relates to many Certificate objects and, as such, the Certificate object must have an Employee parent associated with it. You can use either <one-to-one>, <many-to-one> or <many-to-many> elements based on your requirement."
},
{
"code": null,
"e": 11289,
"s": 11005,
"text": "The <one-to-many> element indicates that one Employee object relates to many Certificate objects and, as such, the Certificate object must have an Employee parent associated with it. You can use either <one-to-one>, <many-to-one> or <many-to-many> elements based on your requirement."
},
{
"code": null,
"e": 11529,
"s": 11289,
"text": "Finally, we will create our application class with the main() method to run the application. We will use this application to save few Employees’ records along with their certificates and then we will apply CRUD operations on those records."
},
{
"code": null,
"e": 16146,
"s": 11529,
"text": "import java.util.*;\n \nimport org.hibernate.HibernateException; \nimport org.hibernate.Session; \nimport org.hibernate.Transaction;\nimport org.hibernate.SessionFactory;\nimport org.hibernate.cfg.Configuration;\n\npublic class ManageEmployee {\n private static SessionFactory factory; \n public static void main(String[] args) {\n \n try {\n factory = new Configuration().configure().buildSessionFactory();\n } catch (Throwable ex) { \n System.err.println(\"Failed to create sessionFactory object.\" + ex);\n throw new ExceptionInInitializerError(ex); \n }\n \n ManageEmployee ME = new ManageEmployee();\n /* Let us have a set of certificates for the first employee */\n ArrayList set1 = new ArrayList();\n set1.add(new Certificate(\"MCA\"));\n set1.add(new Certificate(\"MBA\"));\n set1.add(new Certificate(\"PMP\"));\n \n /* Add employee records in the database */\n Integer empID1 = ME.addEmployee(\"Manoj\", \"Kumar\", 4000, set1);\n\n /* Another set of certificates for the second employee */\n ArrayList set2 = new ArrayList();\n set2.add(new Certificate(\"BCA\"));\n set2.add(new Certificate(\"BA\"));\n\n /* Add another employee record in the database */\n Integer empID2 = ME.addEmployee(\"Dilip\", \"Kumar\", 3000, set2);\n\n /* List down all the employees */\n ME.listEmployees();\n\n /* Update employee's salary records */\n ME.updateEmployee(empID1, 5000);\n\n /* Delete an employee from the database */\n ME.deleteEmployee(empID2);\n\n /* List down all the employees */\n ME.listEmployees();\n\n }\n\n /* Method to add an employee record in the database */\n public Integer addEmployee(String fname, String lname, int salary, ArrayList cert){\n Session session = factory.openSession();\n Transaction tx = null;\n Integer employeeID = null;\n \n try {\n tx = session.beginTransaction();\n Employee employee = new Employee(fname, lname, salary);\n employee.setCertificates(cert);\n employeeID = (Integer) session.save(employee); \n tx.commit();\n } catch (HibernateException e) {\n if (tx!=null) tx.rollback();\n e.printStackTrace(); \n } finally {\n session.close(); \n }\n return employeeID;\n }\n\n /* Method to list all the employees detail */\n public void listEmployees( ){\n Session session = factory.openSession();\n Transaction tx = null;\n \n try {\n tx = session.beginTransaction();\n List employees = session.createQuery(\"FROM Employee\").list(); \n for (Iterator iterator1 = employees.iterator(); iterator1.hasNext();){\n Employee employee = (Employee) iterator1.next(); \n System.out.print(\"First Name: \" + employee.getFirstName()); \n System.out.print(\" Last Name: \" + employee.getLastName()); \n System.out.println(\" Salary: \" + employee.getSalary());\n Collection certificates = employee.getCertificates();\n for (Iterator iterator2 = certificates.iterator(); iterator2.hasNext();){\n Certificate certName = (Certificate) iterator2.next(); \n System.out.println(\"Certificate: \" + certName.getName()); \n }\n }\n tx.commit();\n } catch (HibernateException e) {\n if (tx!=null) tx.rollback();\n e.printStackTrace(); \n } finally {\n session.close(); \n }\n }\n \n /* Method to update salary for an employee */\n public void updateEmployee(Integer EmployeeID, int salary ){\n Session session = factory.openSession();\n Transaction tx = null;\n \n try {\n tx = session.beginTransaction();\n Employee employee = (Employee)session.get(Employee.class, EmployeeID); \n employee.setSalary( salary );\n session.update(employee);\n tx.commit();\n } catch (HibernateException e) {\n if (tx!=null) tx.rollback();\n e.printStackTrace(); \n } finally {\n session.close(); \n }\n }\n \n /* Method to delete an employee from the records */\n public void deleteEmployee(Integer EmployeeID){\n Session session = factory.openSession();\n Transaction tx = null;\n \n try {\n tx = session.beginTransaction();\n Employee employee = (Employee)session.get(Employee.class, EmployeeID); \n session.delete(employee); \n tx.commit();\n } catch (HibernateException e) {\n if (tx!=null) tx.rollback();\n e.printStackTrace(); \n } finally {\n session.close(); \n }\n }\n}"
},
{
"code": null,
"e": 16327,
"s": 16146,
"text": "Here are the steps to compile and run the above mentioned application. Make sure, you have set PATH and CLASSPATH appropriately before proceeding for the compilation and execution."
},
{
"code": null,
"e": 16410,
"s": 16327,
"text": "Create hibernate.cfg.xml configuration file as explained in configuration chapter."
},
{
"code": null,
"e": 16493,
"s": 16410,
"text": "Create hibernate.cfg.xml configuration file as explained in configuration chapter."
},
{
"code": null,
"e": 16546,
"s": 16493,
"text": "Create Employee.hbm.xml mapping file as shown above."
},
{
"code": null,
"e": 16599,
"s": 16546,
"text": "Create Employee.hbm.xml mapping file as shown above."
},
{
"code": null,
"e": 16663,
"s": 16599,
"text": "Create Employee.java source file as shown above and compile it."
},
{
"code": null,
"e": 16727,
"s": 16663,
"text": "Create Employee.java source file as shown above and compile it."
},
{
"code": null,
"e": 16794,
"s": 16727,
"text": "Create Certificate.java source file as shown above and compile it."
},
{
"code": null,
"e": 16861,
"s": 16794,
"text": "Create Certificate.java source file as shown above and compile it."
},
{
"code": null,
"e": 16931,
"s": 16861,
"text": "Create ManageEmployee.java source file as shown above and compile it."
},
{
"code": null,
"e": 17001,
"s": 16931,
"text": "Create ManageEmployee.java source file as shown above and compile it."
},
{
"code": null,
"e": 17051,
"s": 17001,
"text": "Execute ManageEmployee binary to run the program."
},
{
"code": null,
"e": 17101,
"s": 17051,
"text": "Execute ManageEmployee binary to run the program."
},
{
"code": null,
"e": 17405,
"s": 17101,
"text": "You would get the following result on the screen, and same time records would be created in EMPLOYEE and CERTIFICATE tables. You can see, certificates has been sorted in reverse order. You can try by changing your mapping file, simply set sort=\"natural\" and execute your program and compare the results."
},
{
"code": null,
"e": 17766,
"s": 17405,
"text": "$java ManageEmployee\n.......VARIOUS LOG MESSAGES WILL DISPLAY HERE........\n\nFirst Name: Manoj Last Name: Kumar Salary: 4000\nCertificate: MCA\nCertificate: MBA\nCertificate: PMP\nFirst Name: Dilip Last Name: Kumar Salary: 3000\nCertificate: BCA\nCertificate: BA\nFirst Name: Manoj Last Name: Kumar Salary: 5000\nCertificate: MCA\nCertificate: MBA\nCertificate: PMP"
},
{
"code": null,
"e": 17858,
"s": 17766,
"text": "If you check your EMPLOYEE and CERTIFICATE tables, they should have the following records −"
},
{
"code": null,
"e": 18466,
"s": 17858,
"text": "mysql> select * from EMPLOYEE;\n+----+------------+-----------+--------+\n| id | first_name | last_name | salary |\n+----+------------+-----------+--------+\n| 53 | Manoj | Kumar | 5000 |\n+----+------------+-----------+--------+\n1 row in set (0.00 sec)\n\nmysql> select * from CERTIFICATE;\n+----+------------------+-------------+\n| id | certificate_name | employee_id |\n+----+------------------+-------------+\n| 11 | MCA | 53 |\n| 12 | MBA | 53 |\n| 13 | PMP | 53 |\n+----+------------------+-------------+\n3 rows in set (0.00 sec)\n\nmysql>"
},
{
"code": null,
"e": 18501,
"s": 18466,
"text": "\n 108 Lectures \n 11 hours \n"
},
{
"code": null,
"e": 18516,
"s": 18501,
"text": " Chaand Sheikh"
},
{
"code": null,
"e": 18549,
"s": 18516,
"text": "\n 65 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 18563,
"s": 18549,
"text": " Karthikeya T"
},
{
"code": null,
"e": 18598,
"s": 18563,
"text": "\n 39 Lectures \n 4.5 hours \n"
},
{
"code": null,
"e": 18614,
"s": 18598,
"text": " TELCOMA Global"
},
{
"code": null,
"e": 18621,
"s": 18614,
"text": " Print"
},
{
"code": null,
"e": 18632,
"s": 18621,
"text": " Add Notes"
}
]
|
Recursively list nested object keys JavaScript | Let’s say, we have an object with other objects being its property value, it is nested to 2-3 levels
or even more.
Here is the sample object −
const people = {
Ram: {
fullName: 'Ram Kumar',
details: {
age: 31,
isEmployed: true
}
},
Sourav: {
fullName: 'Sourav Singh',
details: {
age: 22,
isEmployed: false
}
},
Jay: {
fullName: 'Jay Grewal',
details: {
age: 26,
isEmployed: true
}
}
}
Our job is to write a function that accepts this object and a string, searches the whole object for
that string as key and returns an array that contains value of all the keys that matched with the
string
Let’s call the function recursiveSearch(), recursion would be the most appropriate way of
tackling this situation, given the nesting.
So, the full code for this function recursiveSearch() will be −
const people = {
Ram: {
fullName: 'Ram Kumar',
details: {
age: 31,
isEmployed: true
}
},
Sourav: {
fullName: 'Sourav Singh',
details: {
age: 22,
isEmployed: false
}
},
Jay: {
fullName: 'Jay Grewal',
details: {
age: 26,
isEmployed: true
}
}
}
const recursiveSearch = (obj, searchKey, results = []) => {
const r = results;
Object.keys(obj).forEach(key => {
const value = obj[key];
if(key === searchKey && typeof value !== 'object'){
r.push(value);
}else if(typeof value === 'object'){
recursiveSearch(value, searchKey, r);
}
});
return r;
};
console.log(recursiveSearch(people, 'age'));
The output in the console will be −
[ 31, 22, 26 ]
In the above function, first of all we iterate over the main object and whenever we encounter a
nesting we recursively iterate over the sub object search for the desired key, if we find the
desired key, we immediately record its value in the results array and at the last when we finish
iterating, we return the results array that contains the desired values.
The time complexity of this function is O(mn) where is the number of child objects inside the
main object and m is the deepest level of nesting. | [
{
"code": null,
"e": 1177,
"s": 1062,
"text": "Let’s say, we have an object with other objects being its property value, it is nested to 2-3 levels\nor even more."
},
{
"code": null,
"e": 1205,
"s": 1177,
"text": "Here is the sample object −"
},
{
"code": null,
"e": 1573,
"s": 1205,
"text": "const people = {\n Ram: {\n fullName: 'Ram Kumar',\n details: {\n age: 31,\n isEmployed: true\n }\n },\n Sourav: {\n fullName: 'Sourav Singh',\n details: {\n age: 22,\n isEmployed: false\n }\n },\n Jay: {\n fullName: 'Jay Grewal',\n details: {\n age: 26,\n isEmployed: true\n }\n }\n}"
},
{
"code": null,
"e": 1778,
"s": 1573,
"text": "Our job is to write a function that accepts this object and a string, searches the whole object for\nthat string as key and returns an array that contains value of all the keys that matched with the\nstring"
},
{
"code": null,
"e": 1912,
"s": 1778,
"text": "Let’s call the function recursiveSearch(), recursion would be the most appropriate way of\ntackling this situation, given the nesting."
},
{
"code": null,
"e": 1976,
"s": 1912,
"text": "So, the full code for this function recursiveSearch() will be −"
},
{
"code": null,
"e": 2741,
"s": 1976,
"text": "const people = {\n Ram: {\n fullName: 'Ram Kumar',\n details: {\n age: 31,\n isEmployed: true\n }\n },\n Sourav: {\n fullName: 'Sourav Singh',\n details: {\n age: 22,\n isEmployed: false\n }\n },\n Jay: {\n fullName: 'Jay Grewal',\n details: {\n age: 26,\n isEmployed: true\n }\n }\n}\nconst recursiveSearch = (obj, searchKey, results = []) => {\n const r = results;\n Object.keys(obj).forEach(key => {\n const value = obj[key];\n if(key === searchKey && typeof value !== 'object'){\n r.push(value);\n }else if(typeof value === 'object'){\n recursiveSearch(value, searchKey, r);\n }\n });\n return r;\n};\nconsole.log(recursiveSearch(people, 'age'));"
},
{
"code": null,
"e": 2777,
"s": 2741,
"text": "The output in the console will be −"
},
{
"code": null,
"e": 2792,
"s": 2777,
"text": "[ 31, 22, 26 ]"
},
{
"code": null,
"e": 3152,
"s": 2792,
"text": "In the above function, first of all we iterate over the main object and whenever we encounter a\nnesting we recursively iterate over the sub object search for the desired key, if we find the\ndesired key, we immediately record its value in the results array and at the last when we finish\niterating, we return the results array that contains the desired values."
},
{
"code": null,
"e": 3297,
"s": 3152,
"text": "The time complexity of this function is O(mn) where is the number of child objects inside the\nmain object and m is the deepest level of nesting."
}
]
|
How to convert integer set to int array using Java? | A collection object in Java is the one which stores references of other objects in it. The java.util package provides the classes and interfaces for collections. There are four main collection interfaces namely Set Lists, Queues, Maps.
Set − The set object is a collection which stores group of elements, it grows dynamically and it does not allow duplicate elements.
HashSet and LinkedHashSet are the classes that implements Set interface. You can create a Set object by implementing either of these classes.
import java.util.HashSet;
public class SetExample {
public static void main(String args[]) {
//Instantiating the HashSet
HashSet<String> hashSet = new HashSet<String>();
//Populating the HashSet
hashSet.add("Mango");
hashSet.add("Apple");
hashSet.add("Cherries");
hashSet.add("Banana");
System.out.println(hashSet);
}
}
[Apple, Mango, Cherries, Banana]
You can convert a set object into an array in several ways −
Add each element − You can add each element of the Set object to the array using the foreach loop.
import java.util.HashSet;
import java.util.Set;
public class SetExample {
public static void main(String args[]) {
//Instantiating the HashSet
Set<Integer> hashSet = new HashSet<Integer>();
//Populating the HashSet
hashSet.add(1124);
hashSet.add(3654);
hashSet.add(7854);
hashSet.add(9945);
System.out.println(hashSet);
//Creating an empty integer array
Integer[] array = new Integer[hashSet.size()];
//Converting Set object to integer array
int j = 0;
for (Integer i: hashSet) {
array[j++] = i;
}
}
}
[1124, 3654, 9945, 7854]
Using the toArray() method − The toArray() method of the Set interface accepts an array, populates it with all the elements in the current set object and, returns it. using this method, you can convert a Set object to an array.
import java.util.HashSet;
import java.util.Set;
public class SetExample {
public static void main(String args[]) {
//Instantiating the HashSet
Set<Integer> hashSet = new HashSet<Integer>();
//Populating the HashSet
hashSet.add(1124);
hashSet.add(3654);
hashSet.add(7854);
hashSet.add(9945);
//Creating an empty integer array
Integer[] array = new Integer[hashSet.size()];
//Converting Set object to integer array
hashSet.toArray(array);
System.out.println(Arrays.toString(array));
}
}
[1124, 3654, 9945, 7854]
Using Java8: Since Java8 Streams are introduced and these provide a method to convert collection objects to array.
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
public class SetExample {
public static void main(String args[]) {
//Instantiating the HashSet
Set<Integer> hashSet = new HashSet<Integer>();
//Populating the HashSet
hashSet.add(1124);
hashSet.add(3654);
hashSet.add(7854);
hashSet.add(9945);
System.out.println(hashSet);
//Creating an empty integer array
Integer[] array = hashSet.stream().toArray(Integer[]::new);
System.out.println(Arrays.toString(array));
}
}
[1124, 3654, 9945, 7854] | [
{
"code": null,
"e": 1298,
"s": 1062,
"text": "A collection object in Java is the one which stores references of other objects in it. The java.util package provides the classes and interfaces for collections. There are four main collection interfaces namely Set Lists, Queues, Maps."
},
{
"code": null,
"e": 1430,
"s": 1298,
"text": "Set − The set object is a collection which stores group of elements, it grows dynamically and it does not allow duplicate elements."
},
{
"code": null,
"e": 1572,
"s": 1430,
"text": "HashSet and LinkedHashSet are the classes that implements Set interface. You can create a Set object by implementing either of these classes."
},
{
"code": null,
"e": 1946,
"s": 1572,
"text": "import java.util.HashSet;\npublic class SetExample {\n public static void main(String args[]) {\n //Instantiating the HashSet\n HashSet<String> hashSet = new HashSet<String>();\n //Populating the HashSet\n hashSet.add(\"Mango\");\n hashSet.add(\"Apple\");\n hashSet.add(\"Cherries\");\n hashSet.add(\"Banana\");\n System.out.println(hashSet);\n }\n}"
},
{
"code": null,
"e": 1979,
"s": 1946,
"text": "[Apple, Mango, Cherries, Banana]"
},
{
"code": null,
"e": 2040,
"s": 1979,
"text": "You can convert a set object into an array in several ways −"
},
{
"code": null,
"e": 2139,
"s": 2040,
"text": "Add each element − You can add each element of the Set object to the array using the foreach loop."
},
{
"code": null,
"e": 2740,
"s": 2139,
"text": "import java.util.HashSet;\nimport java.util.Set;\npublic class SetExample {\n public static void main(String args[]) {\n //Instantiating the HashSet\n Set<Integer> hashSet = new HashSet<Integer>();\n //Populating the HashSet\n hashSet.add(1124);\n hashSet.add(3654);\n hashSet.add(7854);\n hashSet.add(9945);\n System.out.println(hashSet);\n //Creating an empty integer array\n Integer[] array = new Integer[hashSet.size()];\n //Converting Set object to integer array\n int j = 0;\n for (Integer i: hashSet) {\n array[j++] = i;\n }\n }\n}"
},
{
"code": null,
"e": 2765,
"s": 2740,
"text": "[1124, 3654, 9945, 7854]"
},
{
"code": null,
"e": 2993,
"s": 2765,
"text": "Using the toArray() method − The toArray() method of the Set interface accepts an array, populates it with all the elements in the current set object and, returns it. using this method, you can convert a Set object to an array."
},
{
"code": null,
"e": 3556,
"s": 2993,
"text": "import java.util.HashSet;\nimport java.util.Set;\npublic class SetExample {\n public static void main(String args[]) {\n //Instantiating the HashSet\n Set<Integer> hashSet = new HashSet<Integer>();\n //Populating the HashSet\n hashSet.add(1124);\n hashSet.add(3654);\n hashSet.add(7854);\n hashSet.add(9945);\n //Creating an empty integer array\n Integer[] array = new Integer[hashSet.size()];\n //Converting Set object to integer array\n hashSet.toArray(array);\n System.out.println(Arrays.toString(array));\n }\n}"
},
{
"code": null,
"e": 3581,
"s": 3556,
"text": "[1124, 3654, 9945, 7854]"
},
{
"code": null,
"e": 3696,
"s": 3581,
"text": "Using Java8: Since Java8 Streams are introduced and these provide a method to convert collection objects to array."
},
{
"code": null,
"e": 4255,
"s": 3696,
"text": "import java.util.Arrays;\nimport java.util.HashSet;\nimport java.util.Set;\npublic class SetExample {\n public static void main(String args[]) {\n //Instantiating the HashSet\n Set<Integer> hashSet = new HashSet<Integer>();\n //Populating the HashSet\n hashSet.add(1124);\n hashSet.add(3654);\n hashSet.add(7854);\n hashSet.add(9945);\n System.out.println(hashSet);\n //Creating an empty integer array\n Integer[] array = hashSet.stream().toArray(Integer[]::new);\n System.out.println(Arrays.toString(array));\n }\n}"
},
{
"code": null,
"e": 4280,
"s": 4255,
"text": "[1124, 3654, 9945, 7854]"
}
]
|
How to print without newline in Python? | In python the print statement adds a new line character by default. So when we have multiple print statements the output from each of them is printed in multiple lines as you can see in the example below. Our goal is to print them in a single line and use some special parameters to the print function to achieve that.
The below example prints multiple statements with new lines in each of them.
print("Apple")
print("Mango")
print("Banana")
Running the above code gives us the following result −
Apple
Mango
Banana
We can use the end parameter to use a specific character at the end of the output from each print statement. In the below example we use "," as special character with the end parameter, which will appear at the end of the output of each print statement. The result will not be printed in multiple lines.
print("Apple" , end ="," )
print("Mango", end =",")
print("Banana")
Running the above code gives us the following result −
Apple,Mango,Banana | [
{
"code": null,
"e": 1381,
"s": 1062,
"text": "In python the print statement adds a new line character by default. So when we have multiple print statements the output from each of them is printed in multiple lines as you can see in the example below. Our goal is to print them in a single line and use some special parameters to the print function to achieve that."
},
{
"code": null,
"e": 1458,
"s": 1381,
"text": "The below example prints multiple statements with new lines in each of them."
},
{
"code": null,
"e": 1504,
"s": 1458,
"text": "print(\"Apple\")\nprint(\"Mango\")\nprint(\"Banana\")"
},
{
"code": null,
"e": 1559,
"s": 1504,
"text": "Running the above code gives us the following result −"
},
{
"code": null,
"e": 1578,
"s": 1559,
"text": "Apple\nMango\nBanana"
},
{
"code": null,
"e": 1882,
"s": 1578,
"text": "We can use the end parameter to use a specific character at the end of the output from each print statement. In the below example we use \",\" as special character with the end parameter, which will appear at the end of the output of each print statement. The result will not be printed in multiple lines."
},
{
"code": null,
"e": 1950,
"s": 1882,
"text": "print(\"Apple\" , end =\",\" )\nprint(\"Mango\", end =\",\")\nprint(\"Banana\")"
},
{
"code": null,
"e": 2005,
"s": 1950,
"text": "Running the above code gives us the following result −"
},
{
"code": null,
"e": 2024,
"s": 2005,
"text": "Apple,Mango,Banana"
}
]
|
How to Make Grouped Boxplot with Jittered Data Points in ggplot2 in R | 05 Nov, 2021
In this article, we will see how to make use of ggplot2 package in R Programming Language to plot grouped boxplots with jittered data points.
Grouped Boxplots help us visualize two or more features/variables in a single plot using the grouping variable in ggplot2. The jittered points are data points that belong to another variable and are plotted on top of the grouped boxplot for a better comparison of the spread of data points of the considered variables.
Here we can use any in-built datasets in R (the list of datasets can be viewed using data( ) command) or even create one. Let’s create a data frame and visualize it.
R
# Creating a data frame # variables for columnsgame = rep(c('Footsal', 'Slamdunk', 'VolleyBall'), each=50)level = rep(c('low', 'high'), each=25)score = runif(150, min=1, max=500)year = rep(c(1992,1993,1994),each=50) data = data.frame(game, level, score,year)head(data)
Output:
Here we are going to plot boxplots using geom_boxplot() methods.
Syntax of geom_boxplot(): geom_boxplot(mapping = NULL, data = NULL, outlier.shape = 19, na.rm = FALSE ... )
Arguments:
mapping: Set of aesthetic mappings
data: data to be displayed in this layer
outlier.shape: Displaying the outliers, NA – hides the outliers
na.rm: dealing with missing values, TRUE – removes any missing values in data
Using ggplot2 let’s plot a grouped box plot using the data frame created. Let’s visualize a boxplot between score and game, filled by level. Here we can compare each level with respect to games.
R
# plotting a grouped boxplotlibrary(ggplot2) ggplot(data, aes(x = game, y = score, fill = level)) + geom_boxplot()
Output:
Let’s visualize another boxplot between game and score, filled by level. This is the same as the above plot but with a changed axis which gives a clear comparison between each game with respect to the levels.
R
# another grouped boxplot ggplot(data, aes(x = level, y = score, fill = game)) + geom_boxplot()
Output:
Here we are adding jitter data points into the grouped boxplot using geom_point() method.
Syntax of geom_point(): geom_point(mapping = NULL, data = NULL, position = “identity”, ... )
Arguments:
mapping – Set of aesthetic mappings
data = dataset
position – Position adjustment of points
Let’s add jittered data points on the boxplot to visualize the spread of the data with respect to each game.
R
# Adding jittered data points on the boxplot ggplot(data, aes(x = game, y = score, fill = level)) + geom_boxplot(outlier.shape = NA)+geom_point(position = position_jitterdodge(), alpha=0.3)
Output:
Let’s add jittered data points on the boxplot using geom_point( ) and facet_wrap( ) functions to compare the performance of scores in each game per year in different facets.
R
# jittered points on boxplots in 3 facets by year ggplot(data, aes(x = level, y = score, fill = game)) + geom_boxplot()+geom_jitter(width = 0.1,alpha = 0.3) +facet_wrap(~year)
Output:
Picked
R-ggplot
R Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n05 Nov, 2021"
},
{
"code": null,
"e": 170,
"s": 28,
"text": "In this article, we will see how to make use of ggplot2 package in R Programming Language to plot grouped boxplots with jittered data points."
},
{
"code": null,
"e": 490,
"s": 170,
"text": "Grouped Boxplots help us visualize two or more features/variables in a single plot using the grouping variable in ggplot2. The jittered points are data points that belong to another variable and are plotted on top of the grouped boxplot for a better comparison of the spread of data points of the considered variables. "
},
{
"code": null,
"e": 656,
"s": 490,
"text": "Here we can use any in-built datasets in R (the list of datasets can be viewed using data( ) command) or even create one. Let’s create a data frame and visualize it."
},
{
"code": null,
"e": 658,
"s": 656,
"text": "R"
},
{
"code": "# Creating a data frame # variables for columnsgame = rep(c('Footsal', 'Slamdunk', 'VolleyBall'), each=50)level = rep(c('low', 'high'), each=25)score = runif(150, min=1, max=500)year = rep(c(1992,1993,1994),each=50) data = data.frame(game, level, score,year)head(data)",
"e": 929,
"s": 658,
"text": null
},
{
"code": null,
"e": 937,
"s": 929,
"text": "Output:"
},
{
"code": null,
"e": 1002,
"s": 937,
"text": "Here we are going to plot boxplots using geom_boxplot() methods."
},
{
"code": null,
"e": 1110,
"s": 1002,
"text": "Syntax of geom_boxplot(): geom_boxplot(mapping = NULL, data = NULL, outlier.shape = 19, na.rm = FALSE ... )"
},
{
"code": null,
"e": 1121,
"s": 1110,
"text": "Arguments:"
},
{
"code": null,
"e": 1156,
"s": 1121,
"text": "mapping: Set of aesthetic mappings"
},
{
"code": null,
"e": 1197,
"s": 1156,
"text": "data: data to be displayed in this layer"
},
{
"code": null,
"e": 1261,
"s": 1197,
"text": "outlier.shape: Displaying the outliers, NA – hides the outliers"
},
{
"code": null,
"e": 1339,
"s": 1261,
"text": "na.rm: dealing with missing values, TRUE – removes any missing values in data"
},
{
"code": null,
"e": 1534,
"s": 1339,
"text": "Using ggplot2 let’s plot a grouped box plot using the data frame created. Let’s visualize a boxplot between score and game, filled by level. Here we can compare each level with respect to games."
},
{
"code": null,
"e": 1536,
"s": 1534,
"text": "R"
},
{
"code": "# plotting a grouped boxplotlibrary(ggplot2) ggplot(data, aes(x = game, y = score, fill = level)) + geom_boxplot()",
"e": 1652,
"s": 1536,
"text": null
},
{
"code": null,
"e": 1660,
"s": 1652,
"text": "Output:"
},
{
"code": null,
"e": 1869,
"s": 1660,
"text": "Let’s visualize another boxplot between game and score, filled by level. This is the same as the above plot but with a changed axis which gives a clear comparison between each game with respect to the levels."
},
{
"code": null,
"e": 1871,
"s": 1869,
"text": "R"
},
{
"code": "# another grouped boxplot ggplot(data, aes(x = level, y = score, fill = game)) + geom_boxplot()",
"e": 1968,
"s": 1871,
"text": null
},
{
"code": null,
"e": 1976,
"s": 1968,
"text": "Output:"
},
{
"code": null,
"e": 2066,
"s": 1976,
"text": "Here we are adding jitter data points into the grouped boxplot using geom_point() method."
},
{
"code": null,
"e": 2160,
"s": 2066,
"text": "Syntax of geom_point(): geom_point(mapping = NULL, data = NULL, position = “identity”, ... )"
},
{
"code": null,
"e": 2171,
"s": 2160,
"text": "Arguments:"
},
{
"code": null,
"e": 2207,
"s": 2171,
"text": "mapping – Set of aesthetic mappings"
},
{
"code": null,
"e": 2222,
"s": 2207,
"text": "data = dataset"
},
{
"code": null,
"e": 2263,
"s": 2222,
"text": "position – Position adjustment of points"
},
{
"code": null,
"e": 2372,
"s": 2263,
"text": "Let’s add jittered data points on the boxplot to visualize the spread of the data with respect to each game."
},
{
"code": null,
"e": 2374,
"s": 2372,
"text": "R"
},
{
"code": "# Adding jittered data points on the boxplot ggplot(data, aes(x = game, y = score, fill = level)) + geom_boxplot(outlier.shape = NA)+geom_point(position = position_jitterdodge(), alpha=0.3)",
"e": 2565,
"s": 2374,
"text": null
},
{
"code": null,
"e": 2573,
"s": 2565,
"text": "Output:"
},
{
"code": null,
"e": 2747,
"s": 2573,
"text": "Let’s add jittered data points on the boxplot using geom_point( ) and facet_wrap( ) functions to compare the performance of scores in each game per year in different facets."
},
{
"code": null,
"e": 2749,
"s": 2747,
"text": "R"
},
{
"code": "# jittered points on boxplots in 3 facets by year ggplot(data, aes(x = level, y = score, fill = game)) + geom_boxplot()+geom_jitter(width = 0.1,alpha = 0.3) +facet_wrap(~year)",
"e": 2926,
"s": 2749,
"text": null
},
{
"code": null,
"e": 2934,
"s": 2926,
"text": "Output:"
},
{
"code": null,
"e": 2941,
"s": 2934,
"text": "Picked"
},
{
"code": null,
"e": 2950,
"s": 2941,
"text": "R-ggplot"
},
{
"code": null,
"e": 2961,
"s": 2950,
"text": "R Language"
}
]
|
CSS | Attribute Selector | 03 Dec, 2018
The CSS Attribute Selector is used to select an element with some specific attribute or attribute value. It is an excellent way to style the HTML elements by grouping them based on some specific attributes and the attribute selector will select those elements with similar attributes.There are several types of attribute selectors which are discussed below:
[attribute] Selector: This type of attribute selector is used to select all the elements that have the specified attribute and applies the CSS property to that attribute. For example the selector [class] will select all the elements with the style attribute.Example:<!DOCTYPE html><html> <head> <title>Attributes selector</title> <style> [class] { text-align:center; Color:green; } .gfg { font-size:40px; font-weight:bold; margin-bottom:-20px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <p class = "geeks">A computer science portal for geeks</p> </body></html> Output:This selector is used to restrict some particular elements, then it needs to specify that element before the attribute selector.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> div[style] { text-align:center; color:green; font-size:40px; font-weight:bold; margin-bottom:-20px; } p { text-align:center; font-size:17px; } </style> </head> <body> <div style = "color:green">GeeksforGeeks</div> <p>A computer science portal for geeks</p> </body></html> Output:Multiple elements can be selected using comma operatorh2, p[style] {
background-color: #00b93e;
}
Example:
<!DOCTYPE html><html> <head> <title>Attributes selector</title> <style> [class] { text-align:center; Color:green; } .gfg { font-size:40px; font-weight:bold; margin-bottom:-20px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <p class = "geeks">A computer science portal for geeks</p> </body></html>
Output:
This selector is used to restrict some particular elements, then it needs to specify that element before the attribute selector.Example:
<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> div[style] { text-align:center; color:green; font-size:40px; font-weight:bold; margin-bottom:-20px; } p { text-align:center; font-size:17px; } </style> </head> <body> <div style = "color:green">GeeksforGeeks</div> <p>A computer science portal for geeks</p> </body></html>
Output:
Multiple elements can be selected using comma operator
h2, p[style] {
background-color: #00b93e;
}
[attribute = “value”] Selector: This selector is used to select all the elements whose attribute has the value exactly same as the specified value.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [title = "gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [title = "geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div title = "gfg">GeeksforGeeks</div> <p title = "geeks">A computer science portal for geeks</p> </body></html> Output:
Example:
<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [title = "gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [title = "geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div title = "gfg">GeeksforGeeks</div> <p title = "geeks">A computer science portal for geeks</p> </body></html>
Output:
[attribute~=”value”] Selector: This selector is used to select all the elements whose attribute value is a list of space-separated values, one of which is exactly equal to the specified value.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class~="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class~="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeks">A computer science portal for geeks </div> <div class = "geeks ide">GeeksforGeeks is coding platform </div> </body></html> Output:
Example:
<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class~="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class~="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeks">A computer science portal for geeks </div> <div class = "geeks ide">GeeksforGeeks is coding platform </div> </body></html>
Output:
[attribute|=”value”] Selector: This selector is used to select all the elements whose attribute has a hyphen-separated list of values beginning with the specified value. The value has to be a whole word either alone or followed by a hyphen.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class|="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class|="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeks-ide">A computer science portal for geeks </div> <div class = "geeks-ide1">GeeksforGeeks is coding platform </div> </body></html> Output:
Example:
<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class|="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class|="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeks-ide">A computer science portal for geeks </div> <div class = "geeks-ide1">GeeksforGeeks is coding platform </div> </body></html>
Output:
[attribute^=”value”] Selector: This selector is used to select all the elements whose attribute value begins with the specified value. The value doesn’t need to be a whole word.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class^="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class^="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeks">A computer science portal for geeks </div> <div class = "geekside">GeeksforGeeks is coding platform </div> </body></html> Output:
Example:
<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class^="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class^="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeks">A computer science portal for geeks </div> <div class = "geekside">GeeksforGeeks is coding platform </div> </body></html>
Output:
[attribute$=”value”] Selector: This selector is used to select all the elements whose attribute value ends with the specified value. The value doesn’t need to be a whole word.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class$="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class$="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeksforgeeks">A computer science portal for geeks </div> <div class = "geeks">GeeksforGeeks is coding platform </div> </body></html> Output:
Example:
<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class$="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class$="geeks"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeksforgeeks">A computer science portal for geeks </div> <div class = "geeks">GeeksforGeeks is coding platform </div> </body></html>
Output:
[attribute*=”value”] Selector: This selector selects all the elements whose attribute value contains the specified value present anywhere. The value doesn’t need to be a whole word.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class*="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class*="for"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeksforgeeks">A computer science portal for geeks </div> <div class = "geeks for">GeeksforGeeks is coding platform </div> </body></html> Output:
Example:
<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class*="gfg"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class*="for"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = "gfg">GeeksforGeeks</div> <div Class = "geeksforgeeks">A computer science portal for geeks </div> <div class = "geeks for">GeeksforGeeks is coding platform </div> </body></html>
Output:
CSS-Basics
Picked
CSS
HTML
Web technologies Questions
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 53,
"s": 25,
"text": "\n03 Dec, 2018"
},
{
"code": null,
"e": 411,
"s": 53,
"text": "The CSS Attribute Selector is used to select an element with some specific attribute or attribute value. It is an excellent way to style the HTML elements by grouping them based on some specific attributes and the attribute selector will select those elements with similar attributes.There are several types of attribute selectors which are discussed below:"
},
{
"code": null,
"e": 1989,
"s": 411,
"text": "[attribute] Selector: This type of attribute selector is used to select all the elements that have the specified attribute and applies the CSS property to that attribute. For example the selector [class] will select all the elements with the style attribute.Example:<!DOCTYPE html><html> <head> <title>Attributes selector</title> <style> [class] { text-align:center; Color:green; } .gfg { font-size:40px; font-weight:bold; margin-bottom:-20px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <p class = \"geeks\">A computer science portal for geeks</p> </body></html> Output:This selector is used to restrict some particular elements, then it needs to specify that element before the attribute selector.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> div[style] { text-align:center; color:green; font-size:40px; font-weight:bold; margin-bottom:-20px; } p { text-align:center; font-size:17px; } </style> </head> <body> <div style = \"color:green\">GeeksforGeeks</div> <p>A computer science portal for geeks</p> </body></html> Output:Multiple elements can be selected using comma operatorh2, p[style] {\n background-color: #00b93e;\n}"
},
{
"code": null,
"e": 1998,
"s": 1989,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attributes selector</title> <style> [class] { text-align:center; Color:green; } .gfg { font-size:40px; font-weight:bold; margin-bottom:-20px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <p class = \"geeks\">A computer science portal for geeks</p> </body></html> ",
"e": 2501,
"s": 1998,
"text": null
},
{
"code": null,
"e": 2509,
"s": 2501,
"text": "Output:"
},
{
"code": null,
"e": 2646,
"s": 2509,
"text": "This selector is used to restrict some particular elements, then it needs to specify that element before the attribute selector.Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> div[style] { text-align:center; color:green; font-size:40px; font-weight:bold; margin-bottom:-20px; } p { text-align:center; font-size:17px; } </style> </head> <body> <div style = \"color:green\">GeeksforGeeks</div> <p>A computer science portal for geeks</p> </body></html> ",
"e": 3205,
"s": 2646,
"text": null
},
{
"code": null,
"e": 3213,
"s": 3205,
"text": "Output:"
},
{
"code": null,
"e": 3268,
"s": 3213,
"text": "Multiple elements can be selected using comma operator"
},
{
"code": null,
"e": 3316,
"s": 3268,
"text": "h2, p[style] {\n background-color: #00b93e;\n}"
},
{
"code": null,
"e": 4061,
"s": 3316,
"text": "[attribute = “value”] Selector: This selector is used to select all the elements whose attribute has the value exactly same as the specified value.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [title = \"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [title = \"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div title = \"gfg\">GeeksforGeeks</div> <p title = \"geeks\">A computer science portal for geeks</p> </body></html> Output:"
},
{
"code": null,
"e": 4070,
"s": 4061,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [title = \"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [title = \"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div title = \"gfg\">GeeksforGeeks</div> <p title = \"geeks\">A computer science portal for geeks</p> </body></html> ",
"e": 4653,
"s": 4070,
"text": null
},
{
"code": null,
"e": 4661,
"s": 4653,
"text": "Output:"
},
{
"code": null,
"e": 5540,
"s": 4661,
"text": "[attribute~=”value”] Selector: This selector is used to select all the elements whose attribute value is a list of space-separated values, one of which is exactly equal to the specified value.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class~=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class~=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeks\">A computer science portal for geeks </div> <div class = \"geeks ide\">GeeksforGeeks is coding platform </div> </body></html> Output:"
},
{
"code": null,
"e": 5549,
"s": 5540,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class~=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class~=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeks\">A computer science portal for geeks </div> <div class = \"geeks ide\">GeeksforGeeks is coding platform </div> </body></html> ",
"e": 6221,
"s": 5549,
"text": null
},
{
"code": null,
"e": 6229,
"s": 6221,
"text": "Output:"
},
{
"code": null,
"e": 7177,
"s": 6229,
"text": "[attribute|=”value”] Selector: This selector is used to select all the elements whose attribute has a hyphen-separated list of values beginning with the specified value. The value has to be a whole word either alone or followed by a hyphen.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class|=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class|=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeks-ide\">A computer science portal for geeks </div> <div class = \"geeks-ide1\">GeeksforGeeks is coding platform </div> </body></html> Output:"
},
{
"code": null,
"e": 7186,
"s": 7177,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class|=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class|=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeks-ide\">A computer science portal for geeks </div> <div class = \"geeks-ide1\">GeeksforGeeks is coding platform </div> </body></html> ",
"e": 7879,
"s": 7186,
"text": null
},
{
"code": null,
"e": 7887,
"s": 7879,
"text": "Output:"
},
{
"code": null,
"e": 8766,
"s": 7887,
"text": "[attribute^=”value”] Selector: This selector is used to select all the elements whose attribute value begins with the specified value. The value doesn’t need to be a whole word.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class^=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class^=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeks\">A computer science portal for geeks </div> <div class = \"geekside\">GeeksforGeeks is coding platform </div> </body></html> Output:"
},
{
"code": null,
"e": 8775,
"s": 8766,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class^=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class^=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeks\">A computer science portal for geeks </div> <div class = \"geekside\">GeeksforGeeks is coding platform </div> </body></html> ",
"e": 9462,
"s": 8775,
"text": null
},
{
"code": null,
"e": 9470,
"s": 9462,
"text": "Output:"
},
{
"code": null,
"e": 10352,
"s": 9470,
"text": "[attribute$=”value”] Selector: This selector is used to select all the elements whose attribute value ends with the specified value. The value doesn’t need to be a whole word.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class$=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class$=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeksforgeeks\">A computer science portal for geeks </div> <div class = \"geeks\">GeeksforGeeks is coding platform </div> </body></html> Output:"
},
{
"code": null,
"e": 10361,
"s": 10352,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class$=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class$=\"geeks\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeksforgeeks\">A computer science portal for geeks </div> <div class = \"geeks\">GeeksforGeeks is coding platform </div> </body></html> ",
"e": 11053,
"s": 10361,
"text": null
},
{
"code": null,
"e": 11061,
"s": 11053,
"text": "Output:"
},
{
"code": null,
"e": 11951,
"s": 11061,
"text": "[attribute*=”value”] Selector: This selector selects all the elements whose attribute value contains the specified value present anywhere. The value doesn’t need to be a whole word.Example:<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class*=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class*=\"for\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeksforgeeks\">A computer science portal for geeks </div> <div class = \"geeks for\">GeeksforGeeks is coding platform </div> </body></html> Output:"
},
{
"code": null,
"e": 11960,
"s": 11951,
"text": "Example:"
},
{
"code": "<!DOCTYPE html><html> <head> <title>Attribute selector</title> <style> [class*=\"gfg\"] { color:green; font-size:40px; font-weight:bold; text-align:center; } [class*=\"for\"] { font-size:17px; text-align:center; margin-top:0px; } </style> </head> <body> <div class = \"gfg\">GeeksforGeeks</div> <div Class = \"geeksforgeeks\">A computer science portal for geeks </div> <div class = \"geeks for\">GeeksforGeeks is coding platform </div> </body></html> ",
"e": 12654,
"s": 11960,
"text": null
},
{
"code": null,
"e": 12662,
"s": 12654,
"text": "Output:"
},
{
"code": null,
"e": 12673,
"s": 12662,
"text": "CSS-Basics"
},
{
"code": null,
"e": 12680,
"s": 12673,
"text": "Picked"
},
{
"code": null,
"e": 12684,
"s": 12680,
"text": "CSS"
},
{
"code": null,
"e": 12689,
"s": 12684,
"text": "HTML"
},
{
"code": null,
"e": 12716,
"s": 12689,
"text": "Web technologies Questions"
},
{
"code": null,
"e": 12721,
"s": 12716,
"text": "HTML"
}
]
|
Ruby | Numeric floor() function | 06 Jan, 2020
The floor() is an inbuilt method in Ruby returns a number less than or equal to the given number with a precision of the given number of digits after the decimal point. In case the number of digits is not given, the default value is taken to be zero.
Syntax: num.floor(ndigits)
Parameters: The function needs a number and ndigits which specifies the number of digits to be round off. If ndigits is not given then, default value is taken to be zero.
Return Value: It returns returns a boolean value.
Example 1:
# Ruby program for floor()# method in Numeric # Initialize a number num1 = -16.7834num2 = -16.78324num3 = 16.873 # Prints floorputs num1.floor(1)puts num2.floor()puts num3.floor()
Output:
-16.8
-17
16
Example 2:
# Ruby program for floor()# method in Numeric # Initialize a number num1 = 12.32num2 = -1321.998321num3 = -12.2321 # Prints floorputs num1.floor(1)puts num2.floor(2)puts num3.floor(3)
Output:
12.3
-1322.0
-12.233
Ruby Numeric-class
Ruby-Methods
Ruby
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n06 Jan, 2020"
},
{
"code": null,
"e": 279,
"s": 28,
"text": "The floor() is an inbuilt method in Ruby returns a number less than or equal to the given number with a precision of the given number of digits after the decimal point. In case the number of digits is not given, the default value is taken to be zero."
},
{
"code": null,
"e": 306,
"s": 279,
"text": "Syntax: num.floor(ndigits)"
},
{
"code": null,
"e": 477,
"s": 306,
"text": "Parameters: The function needs a number and ndigits which specifies the number of digits to be round off. If ndigits is not given then, default value is taken to be zero."
},
{
"code": null,
"e": 527,
"s": 477,
"text": "Return Value: It returns returns a boolean value."
},
{
"code": null,
"e": 538,
"s": 527,
"text": "Example 1:"
},
{
"code": "# Ruby program for floor()# method in Numeric # Initialize a number num1 = -16.7834num2 = -16.78324num3 = 16.873 # Prints floorputs num1.floor(1)puts num2.floor()puts num3.floor()",
"e": 720,
"s": 538,
"text": null
},
{
"code": null,
"e": 728,
"s": 720,
"text": "Output:"
},
{
"code": null,
"e": 742,
"s": 728,
"text": "-16.8\n-17\n16\n"
},
{
"code": null,
"e": 753,
"s": 742,
"text": "Example 2:"
},
{
"code": "# Ruby program for floor()# method in Numeric # Initialize a number num1 = 12.32num2 = -1321.998321num3 = -12.2321 # Prints floorputs num1.floor(1)puts num2.floor(2)puts num3.floor(3)",
"e": 939,
"s": 753,
"text": null
},
{
"code": null,
"e": 947,
"s": 939,
"text": "Output:"
},
{
"code": null,
"e": 968,
"s": 947,
"text": "12.3\n-1322.0\n-12.233"
},
{
"code": null,
"e": 987,
"s": 968,
"text": "Ruby Numeric-class"
},
{
"code": null,
"e": 1000,
"s": 987,
"text": "Ruby-Methods"
},
{
"code": null,
"e": 1005,
"s": 1000,
"text": "Ruby"
}
]
|
turtle.ontimer() function in Python | 26 Jul, 2020
The turtle module provides turtle graphics primitives, in both object-oriented and procedure-oriented ways. Because it uses Tkinter for the underlying graphics, it needs a version of Python installed with Tk support.
This function is used to install a timer, which calls fun after t milliseconds.
Syntax :
turtle.ontimer(fun, t=0)
Parameters:
Below is the implementation of the above method with an example :
Python3
# import packagesimport turtleimport random # global colorscol = ['red', 'yellow', 'green', 'blue', 'white', 'black', 'orange', 'pink'] # method to call on timerdef fxn(): global col ind = random.randint(0, 7) # set background color of the # turtle screen randomly sc.bgcolor(col[ind]) # set screensc = turtle.Screen()sc.setup(400, 300) # loop for timerfor i in range(10): turtle.ontimer(fxn, t=400*(i+1))
Output :
Here we can find that after some time (set by timer) it automatically changes the background color of the turtle graphics window randomly.
Python-turtle
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n26 Jul, 2020"
},
{
"code": null,
"e": 245,
"s": 28,
"text": "The turtle module provides turtle graphics primitives, in both object-oriented and procedure-oriented ways. Because it uses Tkinter for the underlying graphics, it needs a version of Python installed with Tk support."
},
{
"code": null,
"e": 325,
"s": 245,
"text": "This function is used to install a timer, which calls fun after t milliseconds."
},
{
"code": null,
"e": 334,
"s": 325,
"text": "Syntax :"
},
{
"code": null,
"e": 360,
"s": 334,
"text": "turtle.ontimer(fun, t=0)\n"
},
{
"code": null,
"e": 372,
"s": 360,
"text": "Parameters:"
},
{
"code": null,
"e": 443,
"s": 377,
"text": "Below is the implementation of the above method with an example :"
},
{
"code": null,
"e": 451,
"s": 443,
"text": "Python3"
},
{
"code": "# import packagesimport turtleimport random # global colorscol = ['red', 'yellow', 'green', 'blue', 'white', 'black', 'orange', 'pink'] # method to call on timerdef fxn(): global col ind = random.randint(0, 7) # set background color of the # turtle screen randomly sc.bgcolor(col[ind]) # set screensc = turtle.Screen()sc.setup(400, 300) # loop for timerfor i in range(10): turtle.ontimer(fxn, t=400*(i+1))",
"e": 889,
"s": 451,
"text": null
},
{
"code": null,
"e": 898,
"s": 889,
"text": "Output :"
},
{
"code": null,
"e": 1037,
"s": 898,
"text": "Here we can find that after some time (set by timer) it automatically changes the background color of the turtle graphics window randomly."
},
{
"code": null,
"e": 1051,
"s": 1037,
"text": "Python-turtle"
},
{
"code": null,
"e": 1058,
"s": 1051,
"text": "Python"
}
]
|
Difference between LL and LR parser | 29 Jul, 2019
LL Parser includes both the recursive descent parser and non-recursive descent parser. Its one type uses backtracking while another one uses parsing table. Theses are top down parser.Example: Given grammar is
S -> Ac
A -> ab
where S is start symbol, A is non-terminal and a, b, c are terminals.
Input string: abcParse tree generated by LL parser:
LR Parser is one of the bottom up parser which uses parsing table (dynamic programming) to obtain the parse tree form given string using grammar productions.
Example: In the above example, parse tree generated by LR parser:
Difference between LL and LR parser:
Compiler Design
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n29 Jul, 2019"
},
{
"code": null,
"e": 237,
"s": 28,
"text": "LL Parser includes both the recursive descent parser and non-recursive descent parser. Its one type uses backtracking while another one uses parsing table. Theses are top down parser.Example: Given grammar is"
},
{
"code": null,
"e": 254,
"s": 237,
"text": "S -> Ac\nA -> ab "
},
{
"code": null,
"e": 324,
"s": 254,
"text": "where S is start symbol, A is non-terminal and a, b, c are terminals."
},
{
"code": null,
"e": 376,
"s": 324,
"text": "Input string: abcParse tree generated by LL parser:"
},
{
"code": null,
"e": 534,
"s": 376,
"text": "LR Parser is one of the bottom up parser which uses parsing table (dynamic programming) to obtain the parse tree form given string using grammar productions."
},
{
"code": null,
"e": 600,
"s": 534,
"text": "Example: In the above example, parse tree generated by LR parser:"
},
{
"code": null,
"e": 637,
"s": 600,
"text": "Difference between LL and LR parser:"
},
{
"code": null,
"e": 653,
"s": 637,
"text": "Compiler Design"
}
]
|
JQuery | Multiple ID selectors | 03 Aug, 2021
Given an HTML document and the task is to select the elements with different ID’s at the same time using JQuery.
Approach:
Select the ID’s of different element and then use each() method to apply the CSS property on all selected ID’s element.
Then use css() method to set the background color to pink to all selected elements.
Display the text which indicates the multiple ID selectors.
Example 1: In this example, the elements of different ID’s are selected and background color of these elements are changed.
<!DOCTYPE HTML> <html> <head> <title> JQuery | Multiple ID selectors </title> <style> #GFG_DIV { background: green; height: 100px; width: 200px; margin: 0 auto; color: white; } </style> <script src ="https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js"> </script> </head> <body style = "text-align:center;"> <h1 style = "color:green;" > GeeksForGeeks </h1> <p id = "GFG_UP" style = "font-size: 19px; font-weight: bold;"> </p> <div id = "GFG_DIV"> This is Div box. </div> <br> <button onClick = "GFG_Fun()"> click here </button> <p id = "GFG_DOWN" style = "color: green; font-size: 24px; font-weight: bold;"> </p> <script> $('#GFG_UP').text("Click on button to select multiple" + " ID's and change their background-color"); function GFG_Fun() { $("#GFG_UP, #GFG_DIV, #GFG_DOWN").each(function(){ $(this).css("background-color", "pink"); }); $('#GFG_DOWN').text("Background-color of all " + "elements is changed."); } </script> </body> </html>
Output:
Before clicking on the button:
After clicking on the button:
Example 2: In this example, the elements of different ID’s are selected and text color of these elements are changed.
<!DOCTYPE HTML> <html> <head> <title> JQuery | Multiple ID selectors </title> <style> #GFG_DIV { background: green; height: 100px; width: 200px; margin: 0 auto; color: white; } </style> <script src ="https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js"> </script> </head> <body style = "text-align:center;"> <h1 style = "color:green;" > GeeksForGeeks </h1> <p id = "GFG_UP" style = "font-size: 19px; font-weight: bold;"> </p> <div id = "GFG_DIV"> This is Div box. </div> <br> <button onClick = "GFG_Fun()"> click here </button> <p id = "GFG_DOWN" style = "color: green; font-size: 24px; font-weight: bold;"> </p> <script> $('#GFG_UP').text("Click on button to select multiple" + "ID's and change their Text color"); function GFG_Fun() { $("#GFG_UP, #GFG_DIV, #GFG_DOWN").each(function(){ $(this).css("color", "blue"); }); $('#GFG_DOWN').text("Text color of all elements is " + "changed."); } </script> </body> </html>
Output:
Before clicking on the button:
After clicking on the button:
jQuery is an open source JavaScript library that simplifies the interactions between an HTML/CSS document, It is widely famous with it’s philosophy of “Write less, do more”.You can learn jQuery from the ground up by following this jQuery Tutorial and jQuery Examples.
jQuery-Selectors
JQuery
Web Technologies
Web technologies Questions
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n03 Aug, 2021"
},
{
"code": null,
"e": 141,
"s": 28,
"text": "Given an HTML document and the task is to select the elements with different ID’s at the same time using JQuery."
},
{
"code": null,
"e": 151,
"s": 141,
"text": "Approach:"
},
{
"code": null,
"e": 271,
"s": 151,
"text": "Select the ID’s of different element and then use each() method to apply the CSS property on all selected ID’s element."
},
{
"code": null,
"e": 355,
"s": 271,
"text": "Then use css() method to set the background color to pink to all selected elements."
},
{
"code": null,
"e": 415,
"s": 355,
"text": "Display the text which indicates the multiple ID selectors."
},
{
"code": null,
"e": 539,
"s": 415,
"text": "Example 1: In this example, the elements of different ID’s are selected and background color of these elements are changed."
},
{
"code": "<!DOCTYPE HTML> <html> <head> <title> JQuery | Multiple ID selectors </title> <style> #GFG_DIV { background: green; height: 100px; width: 200px; margin: 0 auto; color: white; } </style> <script src =\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"> </script> </head> <body style = \"text-align:center;\"> <h1 style = \"color:green;\" > GeeksForGeeks </h1> <p id = \"GFG_UP\" style = \"font-size: 19px; font-weight: bold;\"> </p> <div id = \"GFG_DIV\"> This is Div box. </div> <br> <button onClick = \"GFG_Fun()\"> click here </button> <p id = \"GFG_DOWN\" style = \"color: green; font-size: 24px; font-weight: bold;\"> </p> <script> $('#GFG_UP').text(\"Click on button to select multiple\" + \" ID's and change their background-color\"); function GFG_Fun() { $(\"#GFG_UP, #GFG_DIV, #GFG_DOWN\").each(function(){ $(this).css(\"background-color\", \"pink\"); }); $('#GFG_DOWN').text(\"Background-color of all \" + \"elements is changed.\"); } </script> </body> </html> ",
"e": 2080,
"s": 539,
"text": null
},
{
"code": null,
"e": 2088,
"s": 2080,
"text": "Output:"
},
{
"code": null,
"e": 2119,
"s": 2088,
"text": "Before clicking on the button:"
},
{
"code": null,
"e": 2149,
"s": 2119,
"text": "After clicking on the button:"
},
{
"code": null,
"e": 2267,
"s": 2149,
"text": "Example 2: In this example, the elements of different ID’s are selected and text color of these elements are changed."
},
{
"code": "<!DOCTYPE HTML> <html> <head> <title> JQuery | Multiple ID selectors </title> <style> #GFG_DIV { background: green; height: 100px; width: 200px; margin: 0 auto; color: white; } </style> <script src =\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.0/jquery.min.js\"> </script> </head> <body style = \"text-align:center;\"> <h1 style = \"color:green;\" > GeeksForGeeks </h1> <p id = \"GFG_UP\" style = \"font-size: 19px; font-weight: bold;\"> </p> <div id = \"GFG_DIV\"> This is Div box. </div> <br> <button onClick = \"GFG_Fun()\"> click here </button> <p id = \"GFG_DOWN\" style = \"color: green; font-size: 24px; font-weight: bold;\"> </p> <script> $('#GFG_UP').text(\"Click on button to select multiple\" + \"ID's and change their Text color\"); function GFG_Fun() { $(\"#GFG_UP, #GFG_DIV, #GFG_DOWN\").each(function(){ $(this).css(\"color\", \"blue\"); }); $('#GFG_DOWN').text(\"Text color of all elements is \" + \"changed.\"); } </script> </body> </html> ",
"e": 3748,
"s": 2267,
"text": null
},
{
"code": null,
"e": 3756,
"s": 3748,
"text": "Output:"
},
{
"code": null,
"e": 3787,
"s": 3756,
"text": "Before clicking on the button:"
},
{
"code": null,
"e": 3817,
"s": 3787,
"text": "After clicking on the button:"
},
{
"code": null,
"e": 4085,
"s": 3817,
"text": "jQuery is an open source JavaScript library that simplifies the interactions between an HTML/CSS document, It is widely famous with it’s philosophy of “Write less, do more”.You can learn jQuery from the ground up by following this jQuery Tutorial and jQuery Examples."
},
{
"code": null,
"e": 4102,
"s": 4085,
"text": "jQuery-Selectors"
},
{
"code": null,
"e": 4109,
"s": 4102,
"text": "JQuery"
},
{
"code": null,
"e": 4126,
"s": 4109,
"text": "Web Technologies"
},
{
"code": null,
"e": 4153,
"s": 4126,
"text": "Web technologies Questions"
}
]
|
Python | Numpy matrix.transpose() | 29 May, 2019
With the help of Numpy matrix.transpose() method, we can find the transpose of the matrix by using the matrix.transpose() method.
Syntax : matrix.transpose()Return : Return transposed matrix
Example #1 :In this example we can see that by using matrix.transpose() method we are able to find the transpose of the given matrix.
# import the important module in pythonimport numpy as np # make matrix with numpygfg = np.matrix('[4, 1; 12, 3]') # applying matrix.transpose() methodgeek = gfg.transpose() print(geek)
[[ 4 12]
[ 1 3]]
Example #2 :
# import the important module in pythonimport numpy as np # make matrix with numpygfg = np.matrix('[4, 1, 9; 12, 3, 1; 4, 5, 6]') # applying matrix.transpose() methodgeek = gfg.transpose() print(geek)
[[ 4 12 4]
[ 1 3 5]
[ 9 1 6]]
Python numpy-Matrix Function
Python-numpy
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n29 May, 2019"
},
{
"code": null,
"e": 158,
"s": 28,
"text": "With the help of Numpy matrix.transpose() method, we can find the transpose of the matrix by using the matrix.transpose() method."
},
{
"code": null,
"e": 219,
"s": 158,
"text": "Syntax : matrix.transpose()Return : Return transposed matrix"
},
{
"code": null,
"e": 353,
"s": 219,
"text": "Example #1 :In this example we can see that by using matrix.transpose() method we are able to find the transpose of the given matrix."
},
{
"code": "# import the important module in pythonimport numpy as np # make matrix with numpygfg = np.matrix('[4, 1; 12, 3]') # applying matrix.transpose() methodgeek = gfg.transpose() print(geek)",
"e": 568,
"s": 353,
"text": null
},
{
"code": null,
"e": 588,
"s": 568,
"text": "[[ 4 12]\n [ 1 3]]\n"
},
{
"code": null,
"e": 601,
"s": 588,
"text": "Example #2 :"
},
{
"code": "# import the important module in pythonimport numpy as np # make matrix with numpygfg = np.matrix('[4, 1, 9; 12, 3, 1; 4, 5, 6]') # applying matrix.transpose() methodgeek = gfg.transpose() print(geek)",
"e": 831,
"s": 601,
"text": null
},
{
"code": null,
"e": 869,
"s": 831,
"text": "[[ 4 12 4]\n [ 1 3 5]\n [ 9 1 6]]\n"
},
{
"code": null,
"e": 898,
"s": 869,
"text": "Python numpy-Matrix Function"
},
{
"code": null,
"e": 911,
"s": 898,
"text": "Python-numpy"
},
{
"code": null,
"e": 918,
"s": 911,
"text": "Python"
}
]
|
Remove the first and last character of each word in a string | 06 May, 2021
Given the string the task is to remove the first and last character of each word in a string.Examples:
Input: Geeks for geeks
Output: eek o eek
Input: Geeksforgeeks is best
Output: eeksforgeek es
Approach
Split the String based on the space
Run a loop from the first letter to the last letter.
Check if the character is the starting or end of the word
Remove this character from the String.
Below is the implementation of the above approach.
C++
Java
Python3
C#
PHP
Javascript
// C++ program to remove the first// and last character of each word in a string.#include<bits/stdc++.h>using namespace std; string FirstAndLast(string str){ // add a space to the end of the string str+=" "; string res="",w=""; // traverse the string and extract words for(int i=0;i<str.length();i++) { if(str[i]==' ') { // excluding the first and // last character res +=w.substr(1,w.length()-2)+" "; // clear the word w=""; } else { // else add the character to word w+=str[i]; } } return res;} // Driver codeint main(){ string str = "Geeks for Geeks"; cout << (str) << endl; cout << FirstAndLast(str) << endl; return 0;} // This code is contributed by Arnab Kundu
// Java program to remove the first// and last character of each word in a string. import java.util.*; class GFG { static String FirstAndLast(String str) { // Split the String based on the space String[] arrOfStr = str.split(" "); // String to store the resultant String String res = ""; // Traverse the words and // remove the first and last letter for (String a : arrOfStr) { res += a.substring(1, a.length() - 1) + " "; } return res; } // Driver code public static void main(String args[]) { String str = "Geeks for Geeks"; System.out.println(str); System.out.println(FirstAndLast(str)); }}
# Python3 program to remove the first# and last character of each word in a string. def FirstAndLast(string) : # Split the String based on the space arrOfStr = string.split(); # String to store the resultant String res = ""; # Traverse the words and # remove the first and last letter for a in arrOfStr : res += a[1:len(a) - 1] + " "; return res; # Driver codeif __name__ == "__main__" : string = "Geeks for Geeks"; print(string); print(FirstAndLast(string)); # This code is contributed by Ryuga
// C# program to remove the first// and last character of each word in a string.using System; class GFG{ static String FirstAndLast(String str) { // Split the String based on the space String[] arrOfStr = str.Split(' '); // String to store the resultant String String res = ""; // Traverse the words and // remove the first and last letter foreach (String a in arrOfStr) { res += a.Substring(1, a.Length-2) + " "; } return res; } // Driver code public static void Main(String []args) { String str = "Geeks for Geeks"; Console.WriteLine(str); Console.WriteLine(FirstAndLast(str)); }} /* This code contributed by PrinciRaj1992 */
<?php// PHP program to remove the first// and last character of each word in a string. function FirstAndLast($str){ // add a space to the end of the string $str .=" "; $res = (string) NULL; $w = (string) NULL; // traverse the string and extract words for($i=0; $i< strlen($str); $i++) { if($str[$i]== ' ') { // excluding the first and // last character $res .=substr($w, 1 ,strlen($w)-2) ; $res .= " "; // clear the word $w= (string) NULL; } else { // else add the character to word $w .=$str[$i]; } } return $res;} // Driver code $str = "Geeks for Geeks";echo $str , "\n";echo FirstAndLast($str); // This code is contributed by ihritik ?>
<script> // JavaScript program to remove the first // and last character of each word in a string. function FirstAndLast(str) { // add a space to the end of the string str += " "; var res = "", w = ""; // traverse the string and extract words for (var i = 0; i < str.length; i++) { if (str[i] === " ") { // excluding the first and // last character res += w.substring(1, w.length - 1) + " "; // clear the word w = ""; } else { // else add the character to word w += str[i]; } } return res; } // Driver code var str = "Geeks for Geeks"; document.write(str + "<br>"); document.write(FirstAndLast(str) + "<br>"); </script>
Geeks for Geeks
eek o eek
princiraj1992
ankthon
andrew1234
ihritik
rdtank
school-programming
Strings
Strings
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n06 May, 2021"
},
{
"code": null,
"e": 157,
"s": 52,
"text": "Given the string the task is to remove the first and last character of each word in a string.Examples: "
},
{
"code": null,
"e": 252,
"s": 157,
"text": "Input: Geeks for geeks\nOutput: eek o eek\n\nInput: Geeksforgeeks is best\nOutput: eeksforgeek es"
},
{
"code": null,
"e": 263,
"s": 252,
"text": "Approach "
},
{
"code": null,
"e": 299,
"s": 263,
"text": "Split the String based on the space"
},
{
"code": null,
"e": 352,
"s": 299,
"text": "Run a loop from the first letter to the last letter."
},
{
"code": null,
"e": 410,
"s": 352,
"text": "Check if the character is the starting or end of the word"
},
{
"code": null,
"e": 449,
"s": 410,
"text": "Remove this character from the String."
},
{
"code": null,
"e": 502,
"s": 449,
"text": "Below is the implementation of the above approach. "
},
{
"code": null,
"e": 506,
"s": 502,
"text": "C++"
},
{
"code": null,
"e": 511,
"s": 506,
"text": "Java"
},
{
"code": null,
"e": 519,
"s": 511,
"text": "Python3"
},
{
"code": null,
"e": 522,
"s": 519,
"text": "C#"
},
{
"code": null,
"e": 526,
"s": 522,
"text": "PHP"
},
{
"code": null,
"e": 537,
"s": 526,
"text": "Javascript"
},
{
"code": "// C++ program to remove the first// and last character of each word in a string.#include<bits/stdc++.h>using namespace std; string FirstAndLast(string str){ // add a space to the end of the string str+=\" \"; string res=\"\",w=\"\"; // traverse the string and extract words for(int i=0;i<str.length();i++) { if(str[i]==' ') { // excluding the first and // last character res +=w.substr(1,w.length()-2)+\" \"; // clear the word w=\"\"; } else { // else add the character to word w+=str[i]; } } return res;} // Driver codeint main(){ string str = \"Geeks for Geeks\"; cout << (str) << endl; cout << FirstAndLast(str) << endl; return 0;} // This code is contributed by Arnab Kundu",
"e": 1399,
"s": 537,
"text": null
},
{
"code": "// Java program to remove the first// and last character of each word in a string. import java.util.*; class GFG { static String FirstAndLast(String str) { // Split the String based on the space String[] arrOfStr = str.split(\" \"); // String to store the resultant String String res = \"\"; // Traverse the words and // remove the first and last letter for (String a : arrOfStr) { res += a.substring(1, a.length() - 1) + \" \"; } return res; } // Driver code public static void main(String args[]) { String str = \"Geeks for Geeks\"; System.out.println(str); System.out.println(FirstAndLast(str)); }}",
"e": 2114,
"s": 1399,
"text": null
},
{
"code": "# Python3 program to remove the first# and last character of each word in a string. def FirstAndLast(string) : # Split the String based on the space arrOfStr = string.split(); # String to store the resultant String res = \"\"; # Traverse the words and # remove the first and last letter for a in arrOfStr : res += a[1:len(a) - 1] + \" \"; return res; # Driver codeif __name__ == \"__main__\" : string = \"Geeks for Geeks\"; print(string); print(FirstAndLast(string)); # This code is contributed by Ryuga",
"e": 2676,
"s": 2114,
"text": null
},
{
"code": "// C# program to remove the first// and last character of each word in a string.using System; class GFG{ static String FirstAndLast(String str) { // Split the String based on the space String[] arrOfStr = str.Split(' '); // String to store the resultant String String res = \"\"; // Traverse the words and // remove the first and last letter foreach (String a in arrOfStr) { res += a.Substring(1, a.Length-2) + \" \"; } return res; } // Driver code public static void Main(String []args) { String str = \"Geeks for Geeks\"; Console.WriteLine(str); Console.WriteLine(FirstAndLast(str)); }} /* This code contributed by PrinciRaj1992 */",
"e": 3432,
"s": 2676,
"text": null
},
{
"code": "<?php// PHP program to remove the first// and last character of each word in a string. function FirstAndLast($str){ // add a space to the end of the string $str .=\" \"; $res = (string) NULL; $w = (string) NULL; // traverse the string and extract words for($i=0; $i< strlen($str); $i++) { if($str[$i]== ' ') { // excluding the first and // last character $res .=substr($w, 1 ,strlen($w)-2) ; $res .= \" \"; // clear the word $w= (string) NULL; } else { // else add the character to word $w .=$str[$i]; } } return $res;} // Driver code $str = \"Geeks for Geeks\";echo $str , \"\\n\";echo FirstAndLast($str); // This code is contributed by ihritik ?>",
"e": 4257,
"s": 3432,
"text": null
},
{
"code": "<script> // JavaScript program to remove the first // and last character of each word in a string. function FirstAndLast(str) { // add a space to the end of the string str += \" \"; var res = \"\", w = \"\"; // traverse the string and extract words for (var i = 0; i < str.length; i++) { if (str[i] === \" \") { // excluding the first and // last character res += w.substring(1, w.length - 1) + \" \"; // clear the word w = \"\"; } else { // else add the character to word w += str[i]; } } return res; } // Driver code var str = \"Geeks for Geeks\"; document.write(str + \"<br>\"); document.write(FirstAndLast(str) + \"<br>\"); </script>",
"e": 5099,
"s": 4257,
"text": null
},
{
"code": null,
"e": 5125,
"s": 5099,
"text": "Geeks for Geeks\neek o eek"
},
{
"code": null,
"e": 5141,
"s": 5127,
"text": "princiraj1992"
},
{
"code": null,
"e": 5149,
"s": 5141,
"text": "ankthon"
},
{
"code": null,
"e": 5160,
"s": 5149,
"text": "andrew1234"
},
{
"code": null,
"e": 5168,
"s": 5160,
"text": "ihritik"
},
{
"code": null,
"e": 5175,
"s": 5168,
"text": "rdtank"
},
{
"code": null,
"e": 5194,
"s": 5175,
"text": "school-programming"
},
{
"code": null,
"e": 5202,
"s": 5194,
"text": "Strings"
},
{
"code": null,
"e": 5210,
"s": 5202,
"text": "Strings"
}
]
|
Queries on subsequence of string | 07 Jul, 2022
Given a string S and Q queries, each query contains a string T. The task is to print “Yes” if T is a subsequence of S, else print “No”.
Examples:
Input : S = "geeksforgeeks"
Query 1: "gg"
Query 2: "gro"
Query 3: "gfg"
Query 4: "orf"
Output :
Yes
No
Yes
No
For each query, using the brute force, start iterating over S looking for the first character of T. As soon as, the first character is found, continue to iterate S now looking for the second character of T and so on (Refer this for details). If manage to find all the character of T, print “Yes”, else “No”. Time complexity is O(Q*N), N is the length of S.
The efficient approach can be if we know the position of the next character of T in S. Then simply skip all the character between current and position of the next character and jump to that position. This can be done by making |S| x 26 size matrix and storing the next position of each character from every position of S.
Below is the implementation of the above idea :
C++
Java
Python3
C#
Javascript
// C++ program to answer subsequence queries for a// given string.#include <bits/stdc++.h>#define MAX 10000#define CHAR_SIZE 26using namespace std; // Precompute the position of each character from// each position of String Svoid precompute(int mat[MAX][CHAR_SIZE], char str[], int len){ for (int i = 0; i < CHAR_SIZE; ++i) mat[len][i] = len; // Computing position of each character from // each position of String S for (int i = len-1; i >= 0; --i) { for (int j = 0; j < CHAR_SIZE; ++j) mat[i][j] = mat[i+1][j]; mat[i][str[i]-'a'] = i; }} // Print "Yes" if T is subsequence of S, else "No"bool query(int mat[MAX][CHAR_SIZE], const char *str, int len){ int pos = 0; // Traversing the string T for (int i = 0; i < strlen(str); ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos][str[i] - 'a'] >= len) return false; // Setting position of next character else pos = mat[pos][str[i] - 'a'] + 1; } return true;} // Driven Programint main(){ char S[]= "geeksforgeeks"; int len = strlen(S); int mat[MAX][CHAR_SIZE]; precompute(mat, S, len); query(mat, "gg", len)? cout << "Yes\n" : cout << "No\n"; query(mat, "gro", len)? cout << "Yes\n" : cout << "No\n"; query(mat, "gfg", len)? cout << "Yes\n" : cout << "No\n"; query(mat, "orf", len)? cout << "Yes\n" : cout << "No\n"; return 0;}
// Java program to answer subsequence queries for// a given string.public class Query_Subsequence { static final int MAX = 10000; static final int CHAR_SIZE = 26; // Precompute the position of each character from // each position of String S static void precompute(int mat[][], String str, int len) { for (int i = 0; i < CHAR_SIZE; ++i) mat[len][i] = len; // Computing position of each character from // each position of String S for (int i = len-1; i >= 0; --i) { for (int j = 0; j < CHAR_SIZE; ++j) mat[i][j] = mat[i+1][j]; mat[i][str.charAt(i)-'a'] = i; } } // Print "Yes" if T is subsequence of S, else "No" static boolean query(int mat[][], String str, int len) { int pos = 0; // Traversing the string T for (int i = 0; i < str.length(); ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos][str.charAt(i) - 'a'] >= len) return false; // Setting position of next character else pos = mat[pos][str.charAt(i) - 'a'] + 1; } return true; } // Driven Program public static void main(String args[]) { String S= "geeksforgeeks"; int len = S.length(); int[][] mat = new int[MAX][CHAR_SIZE]; precompute(mat, S, len); String get = query(mat, "gg", len)? "Yes" :"No"; System.out.println(get); get = query(mat, "gro", len)? "Yes" :"No"; System.out.println(get); get = query(mat, "gfg", len)? "Yes" :"No"; System.out.println(get); get = query(mat, "orf", len)? "Yes" :"No"; System.out.println(get); }}// This code is contributed by Sumit Ghosh
# Python3 program to answer# subsequence queries for# a given string.MAX = 10000CHAR_SIZE = 26 # Precompute the position of# each character from# each position of String Sdef precompute(mat, str, Len): for i in range(CHAR_SIZE): mat[Len][i] = Len # Computing position of each # character from each position # of String S for i in range(Len - 1, -1, -1): for j in range(CHAR_SIZE): mat[i][j] = mat[i + 1][j] mat[i][ord(str[i]) - ord('a')] = i # Print "Yes" if T is# subsequence of S, else "No"def query(mat, str, Len): pos = 0 # Traversing the string T for i in range(len(str)): # If next position is greater than # length of S set flag to false. if(mat[pos][ord(str[i]) - ord('a')] >= Len): return False # Setting position of next character else: pos = mat[pos][ord(str[i]) - ord('a')] + 1 return True # Driven codeS = "geeksforgeeks"Len = len(S)mat = [[0 for i in range(CHAR_SIZE)] for j in range(MAX)]precompute(mat, S, Len) get = "No"if(query(mat, "gg", Len)): get = "Yes"print(get) get = "No"if(query(mat, "gro", Len)): get = "Yes"print(get) get = "No"if(query(mat, "gfg", Len)): get = "Yes"print(get) get = "No"if(query(mat, "orf", Len)): get = "Yes"print(get) # This code is contributed by avanitrachhadiya2155
// C# program to answer subsequence// queries for a given stringusing System;public class Query_Subsequence{ static int MAX = 10000; static int CHAR_SIZE = 26; // Precompute the position of each // character from each position // of String S static void precompute(int [,]mat, string str, int len) { for (int i = 0; i < CHAR_SIZE; ++i) mat[len, i] = len; // Computing position of each // character from each position // of String S for (int i = len - 1; i >= 0; --i) { for (int j = 0; j < CHAR_SIZE; ++j) mat[i, j] = mat[i + 1, j]; mat[i, str[i] - 'a'] = i; } } // Print "Yes" if T is subsequence // of S, else "No" static bool query(int [,]mat, string str, int len) { int pos = 0; // Traversing the string T for (int i = 0; i < str.Length; ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos,str[i] - 'a'] >= len) return false; // Setting position of next character else pos = mat[pos,str[i] - 'a'] + 1; } return true; } // Driver Code public static void Main() { string S= "geeksforgeeks"; int len = S.Length; int[,] mat = new int[MAX,CHAR_SIZE]; precompute(mat, S, len); string get = query(mat, "gg", len)? "Yes" :"No"; Console.WriteLine(get); get = query(mat, "gro", len)? "Yes" :"No"; Console.WriteLine(get); get = query(mat, "gfg", len)? "Yes" :"No"; Console.WriteLine(get); get = query(mat, "orf", len)? "Yes" :"No"; Console.WriteLine(get); }} // This code is contributed by vt_m.
<script> // Javascript program to answer subsequence queries for // a given string. let MAX = 10000; let CHAR_SIZE = 26; // Precompute the position of each character from // each position of String S function precompute(mat, str, len) { for (let i = 0; i < CHAR_SIZE; ++i) mat[len][i] = len; // Computing position of each character from // each position of String S for (let i = len-1; i >= 0; --i) { for (let j = 0; j < CHAR_SIZE; ++j) mat[i][j] = mat[i+1][j]; mat[i][str[i].charCodeAt()-'a'.charCodeAt()] = i; } } // Print "Yes" if T is subsequence of S, else "No" function query(mat, str, len) { let pos = 0; // Traversing the string T for (let i = 0; i < str.length; ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos][str[i].charCodeAt() - 'a'.charCodeAt()] >= len) return false; // Setting position of next character else pos = mat[pos][str[i].charCodeAt() - 'a'.charCodeAt()] + 1; } return true; } let S= "geeksforgeeks"; let len = S.length; let mat = new Array(MAX); for(let i = 0; i < MAX; i++) { mat[i] = new Array(CHAR_SIZE); for(let j = 0; j < CHAR_SIZE; j++) { mat[i][j] = 0; } } precompute(mat, S, len); let get = query(mat, "gg", len)? "Yes" :"No"; document.write(get + "</br>"); get = query(mat, "gro", len)? "Yes" :"No"; document.write(get + "</br>"); get = query(mat, "gfg", len)? "Yes" :"No"; document.write(get + "</br>"); get = query(mat, "orf", len)? "Yes" :"No"; document.write(get + "</br>"); </script>
Yes
No
Yes
No
This article is contributed by Anuj Chauhan. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to [email protected]. See your article appearing on the GeeksforGeeks main page and help other Geeks.
vt_m
avanitrachhadiya2155
mukesh07
hardikkoriintern
subsequence
Strings
Strings
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 54,
"s": 26,
"text": "\n07 Jul, 2022"
},
{
"code": null,
"e": 190,
"s": 54,
"text": "Given a string S and Q queries, each query contains a string T. The task is to print “Yes” if T is a subsequence of S, else print “No”."
},
{
"code": null,
"e": 201,
"s": 190,
"text": "Examples: "
},
{
"code": null,
"e": 312,
"s": 201,
"text": "Input : S = \"geeksforgeeks\"\nQuery 1: \"gg\"\nQuery 2: \"gro\"\nQuery 3: \"gfg\"\nQuery 4: \"orf\"\n\nOutput :\nYes\nNo\nYes\nNo"
},
{
"code": null,
"e": 669,
"s": 312,
"text": "For each query, using the brute force, start iterating over S looking for the first character of T. As soon as, the first character is found, continue to iterate S now looking for the second character of T and so on (Refer this for details). If manage to find all the character of T, print “Yes”, else “No”. Time complexity is O(Q*N), N is the length of S."
},
{
"code": null,
"e": 991,
"s": 669,
"text": "The efficient approach can be if we know the position of the next character of T in S. Then simply skip all the character between current and position of the next character and jump to that position. This can be done by making |S| x 26 size matrix and storing the next position of each character from every position of S."
},
{
"code": null,
"e": 1040,
"s": 991,
"text": "Below is the implementation of the above idea : "
},
{
"code": null,
"e": 1044,
"s": 1040,
"text": "C++"
},
{
"code": null,
"e": 1049,
"s": 1044,
"text": "Java"
},
{
"code": null,
"e": 1057,
"s": 1049,
"text": "Python3"
},
{
"code": null,
"e": 1060,
"s": 1057,
"text": "C#"
},
{
"code": null,
"e": 1071,
"s": 1060,
"text": "Javascript"
},
{
"code": "// C++ program to answer subsequence queries for a// given string.#include <bits/stdc++.h>#define MAX 10000#define CHAR_SIZE 26using namespace std; // Precompute the position of each character from// each position of String Svoid precompute(int mat[MAX][CHAR_SIZE], char str[], int len){ for (int i = 0; i < CHAR_SIZE; ++i) mat[len][i] = len; // Computing position of each character from // each position of String S for (int i = len-1; i >= 0; --i) { for (int j = 0; j < CHAR_SIZE; ++j) mat[i][j] = mat[i+1][j]; mat[i][str[i]-'a'] = i; }} // Print \"Yes\" if T is subsequence of S, else \"No\"bool query(int mat[MAX][CHAR_SIZE], const char *str, int len){ int pos = 0; // Traversing the string T for (int i = 0; i < strlen(str); ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos][str[i] - 'a'] >= len) return false; // Setting position of next character else pos = mat[pos][str[i] - 'a'] + 1; } return true;} // Driven Programint main(){ char S[]= \"geeksforgeeks\"; int len = strlen(S); int mat[MAX][CHAR_SIZE]; precompute(mat, S, len); query(mat, \"gg\", len)? cout << \"Yes\\n\" : cout << \"No\\n\"; query(mat, \"gro\", len)? cout << \"Yes\\n\" : cout << \"No\\n\"; query(mat, \"gfg\", len)? cout << \"Yes\\n\" : cout << \"No\\n\"; query(mat, \"orf\", len)? cout << \"Yes\\n\" : cout << \"No\\n\"; return 0;}",
"e": 2736,
"s": 1071,
"text": null
},
{
"code": "// Java program to answer subsequence queries for// a given string.public class Query_Subsequence { static final int MAX = 10000; static final int CHAR_SIZE = 26; // Precompute the position of each character from // each position of String S static void precompute(int mat[][], String str, int len) { for (int i = 0; i < CHAR_SIZE; ++i) mat[len][i] = len; // Computing position of each character from // each position of String S for (int i = len-1; i >= 0; --i) { for (int j = 0; j < CHAR_SIZE; ++j) mat[i][j] = mat[i+1][j]; mat[i][str.charAt(i)-'a'] = i; } } // Print \"Yes\" if T is subsequence of S, else \"No\" static boolean query(int mat[][], String str, int len) { int pos = 0; // Traversing the string T for (int i = 0; i < str.length(); ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos][str.charAt(i) - 'a'] >= len) return false; // Setting position of next character else pos = mat[pos][str.charAt(i) - 'a'] + 1; } return true; } // Driven Program public static void main(String args[]) { String S= \"geeksforgeeks\"; int len = S.length(); int[][] mat = new int[MAX][CHAR_SIZE]; precompute(mat, S, len); String get = query(mat, \"gg\", len)? \"Yes\" :\"No\"; System.out.println(get); get = query(mat, \"gro\", len)? \"Yes\" :\"No\"; System.out.println(get); get = query(mat, \"gfg\", len)? \"Yes\" :\"No\"; System.out.println(get); get = query(mat, \"orf\", len)? \"Yes\" :\"No\"; System.out.println(get); }}// This code is contributed by Sumit Ghosh",
"e": 4623,
"s": 2736,
"text": null
},
{
"code": "# Python3 program to answer# subsequence queries for# a given string.MAX = 10000CHAR_SIZE = 26 # Precompute the position of# each character from# each position of String Sdef precompute(mat, str, Len): for i in range(CHAR_SIZE): mat[Len][i] = Len # Computing position of each # character from each position # of String S for i in range(Len - 1, -1, -1): for j in range(CHAR_SIZE): mat[i][j] = mat[i + 1][j] mat[i][ord(str[i]) - ord('a')] = i # Print \"Yes\" if T is# subsequence of S, else \"No\"def query(mat, str, Len): pos = 0 # Traversing the string T for i in range(len(str)): # If next position is greater than # length of S set flag to false. if(mat[pos][ord(str[i]) - ord('a')] >= Len): return False # Setting position of next character else: pos = mat[pos][ord(str[i]) - ord('a')] + 1 return True # Driven codeS = \"geeksforgeeks\"Len = len(S)mat = [[0 for i in range(CHAR_SIZE)] for j in range(MAX)]precompute(mat, S, Len) get = \"No\"if(query(mat, \"gg\", Len)): get = \"Yes\"print(get) get = \"No\"if(query(mat, \"gro\", Len)): get = \"Yes\"print(get) get = \"No\"if(query(mat, \"gfg\", Len)): get = \"Yes\"print(get) get = \"No\"if(query(mat, \"orf\", Len)): get = \"Yes\"print(get) # This code is contributed by avanitrachhadiya2155",
"e": 6042,
"s": 4623,
"text": null
},
{
"code": "// C# program to answer subsequence// queries for a given stringusing System;public class Query_Subsequence{ static int MAX = 10000; static int CHAR_SIZE = 26; // Precompute the position of each // character from each position // of String S static void precompute(int [,]mat, string str, int len) { for (int i = 0; i < CHAR_SIZE; ++i) mat[len, i] = len; // Computing position of each // character from each position // of String S for (int i = len - 1; i >= 0; --i) { for (int j = 0; j < CHAR_SIZE; ++j) mat[i, j] = mat[i + 1, j]; mat[i, str[i] - 'a'] = i; } } // Print \"Yes\" if T is subsequence // of S, else \"No\" static bool query(int [,]mat, string str, int len) { int pos = 0; // Traversing the string T for (int i = 0; i < str.Length; ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos,str[i] - 'a'] >= len) return false; // Setting position of next character else pos = mat[pos,str[i] - 'a'] + 1; } return true; } // Driver Code public static void Main() { string S= \"geeksforgeeks\"; int len = S.Length; int[,] mat = new int[MAX,CHAR_SIZE]; precompute(mat, S, len); string get = query(mat, \"gg\", len)? \"Yes\" :\"No\"; Console.WriteLine(get); get = query(mat, \"gro\", len)? \"Yes\" :\"No\"; Console.WriteLine(get); get = query(mat, \"gfg\", len)? \"Yes\" :\"No\"; Console.WriteLine(get); get = query(mat, \"orf\", len)? \"Yes\" :\"No\"; Console.WriteLine(get); }} // This code is contributed by vt_m.",
"e": 8091,
"s": 6042,
"text": null
},
{
"code": "<script> // Javascript program to answer subsequence queries for // a given string. let MAX = 10000; let CHAR_SIZE = 26; // Precompute the position of each character from // each position of String S function precompute(mat, str, len) { for (let i = 0; i < CHAR_SIZE; ++i) mat[len][i] = len; // Computing position of each character from // each position of String S for (let i = len-1; i >= 0; --i) { for (let j = 0; j < CHAR_SIZE; ++j) mat[i][j] = mat[i+1][j]; mat[i][str[i].charCodeAt()-'a'.charCodeAt()] = i; } } // Print \"Yes\" if T is subsequence of S, else \"No\" function query(mat, str, len) { let pos = 0; // Traversing the string T for (let i = 0; i < str.length; ++i) { // If next position is greater than // length of S set flag to false. if (mat[pos][str[i].charCodeAt() - 'a'.charCodeAt()] >= len) return false; // Setting position of next character else pos = mat[pos][str[i].charCodeAt() - 'a'.charCodeAt()] + 1; } return true; } let S= \"geeksforgeeks\"; let len = S.length; let mat = new Array(MAX); for(let i = 0; i < MAX; i++) { mat[i] = new Array(CHAR_SIZE); for(let j = 0; j < CHAR_SIZE; j++) { mat[i][j] = 0; } } precompute(mat, S, len); let get = query(mat, \"gg\", len)? \"Yes\" :\"No\"; document.write(get + \"</br>\"); get = query(mat, \"gro\", len)? \"Yes\" :\"No\"; document.write(get + \"</br>\"); get = query(mat, \"gfg\", len)? \"Yes\" :\"No\"; document.write(get + \"</br>\"); get = query(mat, \"orf\", len)? \"Yes\" :\"No\"; document.write(get + \"</br>\"); </script>",
"e": 9954,
"s": 8091,
"text": null
},
{
"code": null,
"e": 9968,
"s": 9954,
"text": "Yes\nNo\nYes\nNo"
},
{
"code": null,
"e": 10265,
"s": 9968,
"text": "This article is contributed by Anuj Chauhan. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to [email protected]. See your article appearing on the GeeksforGeeks main page and help other Geeks. "
},
{
"code": null,
"e": 10270,
"s": 10265,
"text": "vt_m"
},
{
"code": null,
"e": 10291,
"s": 10270,
"text": "avanitrachhadiya2155"
},
{
"code": null,
"e": 10300,
"s": 10291,
"text": "mukesh07"
},
{
"code": null,
"e": 10317,
"s": 10300,
"text": "hardikkoriintern"
},
{
"code": null,
"e": 10329,
"s": 10317,
"text": "subsequence"
},
{
"code": null,
"e": 10337,
"s": 10329,
"text": "Strings"
},
{
"code": null,
"e": 10345,
"s": 10337,
"text": "Strings"
}
]
|
CSS flex Property | 10 Nov, 2021
The flex CSS shorthand property is the combination of flex-grow, flex-shrink, and flex-basis property. It is used to set the length of flexible items. The flex property is much responsive and mobile-friendly. It is easy to position child elements and the main container. The margin doesn’t collapse with the content margins. The order of any element can be easily changed without editing the HTML section.
Syntax:
flex: flex-grow flex-shrink flex-basis|auto|initial|inherit;
Property Values:
flex-grow Property: A number that specifies, how much items will grow relative to the rest of the flexible items.
flex-shrink Property: A number that specifies, how much items will shrink relative to the rest of the flexible items.
flex-basis Property: It sets the length of items. Legal values of flex-basis are: auto, inherit, or a number followed by %, em, px, or any other length unit.flex-wrap Property: The CSS flex-wrap property is used to specify whether flex items are forced into a single line or wrapped onto multiple lines.
flex-wrap Property: The CSS flex-wrap property is used to specify whether flex items are forced into a single line or wrapped onto multiple lines.
The flex property can be specified with the help of 1, 2 or 3 values:
One-value syntax: the value should contain one of following:number: If it is represented as flex: <number> 1 0; then the value of flex-shrink, flex-basis will supposed to be 1 & 0 respectively.It can be specified by one of the keyword as auto, none or initial.
number: If it is represented as flex: <number> 1 0; then the value of flex-shrink, flex-basis will supposed to be 1 & 0 respectively.
It can be specified by one of the keyword as auto, none or initial.
Two-value syntax: It must contains the following values:The first value should be the number that will represent the flex-grow.The second value should contain one of the following:number: If it is number then it will represented as flex-shrink.a width with the valid value will represents the flex-basis.
The first value should be the number that will represent the flex-grow.
The second value should contain one of the following:number: If it is number then it will represented as flex-shrink.a width with the valid value will represents the flex-basis.
number: If it is number then it will represented as flex-shrink.
a width with the valid value will represents the flex-basis.
Three-value syntax: The values should be in the same order:first number represents the flex-grow.second number represents the flex-shrink.a width with the valid value will represents the flex-basis.
first number represents the flex-grow.
second number represents the flex-shrink.
a width with the valid value will represents the flex-basis.
Example: This example describes the flex property with the help of the single value to represent the flex.
HTML
<!DOCTYPE html><html> <head> <title> CSS flex Property </title> <style> #Geeks { width: 300px; height: 200px; border: 1px solid black; display: flex; } #Geeks div { flex: 1; } .GFG1 { background-color: green; } .GFG2 { background-color: lightgreen; } .GFG3 { background-color: darkgreen; } </style></head> <body> <h2>CSS flex Property</h2> <div id="Geeks"> <div class="GFG1"> GeeksforGeeks </div> <div class="GFG2"> Lite Content </div> <div class="GFG3"> Special Content </div> </div></body> </html>
Output:
Example 2: This example describes the flex property with the help of the 3 values that represents the flex-grow, flex-shrink & flex-basis properties.
HTML
<!DOCTYPE html><html> <head> <title> CSS flex Property </title> <style> #Geeks { width: 300px; height: 200px; border: 1px solid black; display: flex; } #Geeks div { flex: 1 0 auto; } .GFG1 { background-color: green; } .GFG2 { background-color: lightgreen; } .GFG3 { background-color: darkgreen; } </style></head> <body> <h2>CSS flex Property</h2> <div id="Geeks"> <div class="GFG1"> GeeksforGeeks </div> <div class="GFG2"> Lite Content </div> <div class="GFG3"> Special Content </div> </div></body> </html>
Output:
Supported Browsers:
Google Chrome 29.0, 21.0 -webkit-
Microsoft Edge 12.0, 12.0 -webkit-
Internet Explorer 11.0, 10.0 -ms-
Firefox 28.0, 18.0 -moz-
Safari 9.0, 6.1 -webkit-
Opera 17.0
bhaskargeeksforgeeks
surinderdawra388
CSS-Properties
Picked
Technical Scripter 2018
CSS
Technical Scripter
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n10 Nov, 2021"
},
{
"code": null,
"e": 434,
"s": 28,
"text": "The flex CSS shorthand property is the combination of flex-grow, flex-shrink, and flex-basis property. It is used to set the length of flexible items. The flex property is much responsive and mobile-friendly. It is easy to position child elements and the main container. The margin doesn’t collapse with the content margins. The order of any element can be easily changed without editing the HTML section."
},
{
"code": null,
"e": 442,
"s": 434,
"text": "Syntax:"
},
{
"code": null,
"e": 503,
"s": 442,
"text": "flex: flex-grow flex-shrink flex-basis|auto|initial|inherit;"
},
{
"code": null,
"e": 520,
"s": 503,
"text": "Property Values:"
},
{
"code": null,
"e": 634,
"s": 520,
"text": "flex-grow Property: A number that specifies, how much items will grow relative to the rest of the flexible items."
},
{
"code": null,
"e": 752,
"s": 634,
"text": "flex-shrink Property: A number that specifies, how much items will shrink relative to the rest of the flexible items."
},
{
"code": null,
"e": 1056,
"s": 752,
"text": "flex-basis Property: It sets the length of items. Legal values of flex-basis are: auto, inherit, or a number followed by %, em, px, or any other length unit.flex-wrap Property: The CSS flex-wrap property is used to specify whether flex items are forced into a single line or wrapped onto multiple lines."
},
{
"code": null,
"e": 1203,
"s": 1056,
"text": "flex-wrap Property: The CSS flex-wrap property is used to specify whether flex items are forced into a single line or wrapped onto multiple lines."
},
{
"code": null,
"e": 1273,
"s": 1203,
"text": "The flex property can be specified with the help of 1, 2 or 3 values:"
},
{
"code": null,
"e": 1534,
"s": 1273,
"text": "One-value syntax: the value should contain one of following:number: If it is represented as flex: <number> 1 0; then the value of flex-shrink, flex-basis will supposed to be 1 & 0 respectively.It can be specified by one of the keyword as auto, none or initial."
},
{
"code": null,
"e": 1668,
"s": 1534,
"text": "number: If it is represented as flex: <number> 1 0; then the value of flex-shrink, flex-basis will supposed to be 1 & 0 respectively."
},
{
"code": null,
"e": 1736,
"s": 1668,
"text": "It can be specified by one of the keyword as auto, none or initial."
},
{
"code": null,
"e": 2041,
"s": 1736,
"text": "Two-value syntax: It must contains the following values:The first value should be the number that will represent the flex-grow.The second value should contain one of the following:number: If it is number then it will represented as flex-shrink.a width with the valid value will represents the flex-basis."
},
{
"code": null,
"e": 2113,
"s": 2041,
"text": "The first value should be the number that will represent the flex-grow."
},
{
"code": null,
"e": 2291,
"s": 2113,
"text": "The second value should contain one of the following:number: If it is number then it will represented as flex-shrink.a width with the valid value will represents the flex-basis."
},
{
"code": null,
"e": 2356,
"s": 2291,
"text": "number: If it is number then it will represented as flex-shrink."
},
{
"code": null,
"e": 2417,
"s": 2356,
"text": "a width with the valid value will represents the flex-basis."
},
{
"code": null,
"e": 2616,
"s": 2417,
"text": "Three-value syntax: The values should be in the same order:first number represents the flex-grow.second number represents the flex-shrink.a width with the valid value will represents the flex-basis."
},
{
"code": null,
"e": 2655,
"s": 2616,
"text": "first number represents the flex-grow."
},
{
"code": null,
"e": 2697,
"s": 2655,
"text": "second number represents the flex-shrink."
},
{
"code": null,
"e": 2758,
"s": 2697,
"text": "a width with the valid value will represents the flex-basis."
},
{
"code": null,
"e": 2865,
"s": 2758,
"text": "Example: This example describes the flex property with the help of the single value to represent the flex."
},
{
"code": null,
"e": 2870,
"s": 2865,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html> <head> <title> CSS flex Property </title> <style> #Geeks { width: 300px; height: 200px; border: 1px solid black; display: flex; } #Geeks div { flex: 1; } .GFG1 { background-color: green; } .GFG2 { background-color: lightgreen; } .GFG3 { background-color: darkgreen; } </style></head> <body> <h2>CSS flex Property</h2> <div id=\"Geeks\"> <div class=\"GFG1\"> GeeksforGeeks </div> <div class=\"GFG2\"> Lite Content </div> <div class=\"GFG3\"> Special Content </div> </div></body> </html>",
"e": 3515,
"s": 2870,
"text": null
},
{
"code": null,
"e": 3523,
"s": 3515,
"text": "Output:"
},
{
"code": null,
"e": 3673,
"s": 3523,
"text": "Example 2: This example describes the flex property with the help of the 3 values that represents the flex-grow, flex-shrink & flex-basis properties."
},
{
"code": null,
"e": 3678,
"s": 3673,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html> <head> <title> CSS flex Property </title> <style> #Geeks { width: 300px; height: 200px; border: 1px solid black; display: flex; } #Geeks div { flex: 1 0 auto; } .GFG1 { background-color: green; } .GFG2 { background-color: lightgreen; } .GFG3 { background-color: darkgreen; } </style></head> <body> <h2>CSS flex Property</h2> <div id=\"Geeks\"> <div class=\"GFG1\"> GeeksforGeeks </div> <div class=\"GFG2\"> Lite Content </div> <div class=\"GFG3\"> Special Content </div> </div></body> </html>",
"e": 4330,
"s": 3678,
"text": null
},
{
"code": null,
"e": 4338,
"s": 4330,
"text": "Output:"
},
{
"code": null,
"e": 4358,
"s": 4338,
"text": "Supported Browsers:"
},
{
"code": null,
"e": 4392,
"s": 4358,
"text": "Google Chrome 29.0, 21.0 -webkit-"
},
{
"code": null,
"e": 4427,
"s": 4392,
"text": "Microsoft Edge 12.0, 12.0 -webkit-"
},
{
"code": null,
"e": 4461,
"s": 4427,
"text": "Internet Explorer 11.0, 10.0 -ms-"
},
{
"code": null,
"e": 4486,
"s": 4461,
"text": "Firefox 28.0, 18.0 -moz-"
},
{
"code": null,
"e": 4511,
"s": 4486,
"text": "Safari 9.0, 6.1 -webkit-"
},
{
"code": null,
"e": 4522,
"s": 4511,
"text": "Opera 17.0"
},
{
"code": null,
"e": 4543,
"s": 4522,
"text": "bhaskargeeksforgeeks"
},
{
"code": null,
"e": 4560,
"s": 4543,
"text": "surinderdawra388"
},
{
"code": null,
"e": 4575,
"s": 4560,
"text": "CSS-Properties"
},
{
"code": null,
"e": 4582,
"s": 4575,
"text": "Picked"
},
{
"code": null,
"e": 4606,
"s": 4582,
"text": "Technical Scripter 2018"
},
{
"code": null,
"e": 4610,
"s": 4606,
"text": "CSS"
},
{
"code": null,
"e": 4629,
"s": 4610,
"text": "Technical Scripter"
},
{
"code": null,
"e": 4646,
"s": 4629,
"text": "Web Technologies"
}
]
|
Matlab | Dilation of an Image | 23 Mar, 2022
Morphology is known as the broad set of image processing operations that process images based on shapes. It is also known as a tool used for extracting image components that are useful in the representation and description of region shape.
The basic morphological operations are:
Erosion
Dilation
Dilation:
Dilation expands the image pixels i.e. it is used for expanding an element A by using structuring element B.
Dilation adds pixels to object boundaries.
The value of the output pixel is the maximum value of all the pixels in the neighborhood. A pixel is set to 1 if any of the neighboring pixels have the value 1.
Approach:
Read the RGB image.
Using function im2bw(), convert the RGB image to a binary image.
Create a structuring element or you can use any predefined mask eg. special(‘sobel’).
Store the number of rows and columns in an array and loop through it.
Create a zero matrix of the size same as the size of our image.
Leaving the boundary pixels start moving the structuring element on the image and start comparing the pixel with the pixels present in the neighborhood.
If the value of the neighborhood pixel is 1, then change the value of that pixel to 1.
Example:
MATLAB
% MATLAB code for Dilation% read image I=imread('lenna.png'); % convert to binaryI=im2bw(I); % create structuring element se=ones(5, 5); % store number of rows in P and% number of columns in Q. [P, Q]=size(se); % create a zero matrix of size I. In=zeros(size(I, 1), size(I, 2)); for i=ceil(P/2):size(I, 1)-floor(P/2) for j=ceil(Q/2):size(I, 2)-floor(Q/2) % take all the neighbourhoods. on=I(i-floor(P/2):i+floor(P/2), j-floor(Q/2):j+floor(Q/2)); % take logical se nh=on(logical(se)); % compare and take minimum value of the neighbor % and set the pixel value to that minimum value. In(i, j)=max(nh(:)); endend imshow(In);
Output:
Figure: Input image
Figure: Output image
Let’s take another example for dilation.
Syntax:
imread() function is used to read the image.
strel() function is used to define the structuring element. We have chosen a disk-shaped SE, of radius 5.
imdialate() function is used to perform the dilation operation.
imtool() function is used to display the image.
Example:
Matlab
% MATLAB code for Dilation% read the image.k=imread("dilation.png"); % define the structuring element.SE=strel('disk',5); % apply the dilation operation.d=imdilate(k,SE); %display all the images.imtool(k,[]);imtool(d,[]); %see the effective expansion% in original imageimtool(d-k,[]);
Output:
Figure: Left: Original image, Right: Dilated image
Figure: Expansion in the original image
Code Explanation:
k=imread(“dilation_exmp.png”); this line reads the image.
SE=strel(‘disk’,5); this line defines the structuring element.
d=imdilate(k,SE); this line applies the dilation operation.
imtool(k,[]); this line displays the original image.
imtool(e,[]); this line displays the dilated image.
imtool(d-k,[]); this line shows the effective expansion in original image.
The last image shows the extent to which the original image got dilated. We have used the Structuring element of disk-shaped and the image we used is also circular in shape. This gives us the very desired output to understand erosion.
pintusaini
sagartomar9927
Image-Processing
MATLAB
Advanced Computer Subject
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n23 Mar, 2022"
},
{
"code": null,
"e": 269,
"s": 28,
"text": "Morphology is known as the broad set of image processing operations that process images based on shapes. It is also known as a tool used for extracting image components that are useful in the representation and description of region shape. "
},
{
"code": null,
"e": 310,
"s": 269,
"text": "The basic morphological operations are: "
},
{
"code": null,
"e": 319,
"s": 310,
"text": "Erosion "
},
{
"code": null,
"e": 329,
"s": 319,
"text": "Dilation "
},
{
"code": null,
"e": 339,
"s": 329,
"text": "Dilation:"
},
{
"code": null,
"e": 448,
"s": 339,
"text": "Dilation expands the image pixels i.e. it is used for expanding an element A by using structuring element B."
},
{
"code": null,
"e": 491,
"s": 448,
"text": "Dilation adds pixels to object boundaries."
},
{
"code": null,
"e": 652,
"s": 491,
"text": "The value of the output pixel is the maximum value of all the pixels in the neighborhood. A pixel is set to 1 if any of the neighboring pixels have the value 1."
},
{
"code": null,
"e": 663,
"s": 652,
"text": "Approach: "
},
{
"code": null,
"e": 683,
"s": 663,
"text": "Read the RGB image."
},
{
"code": null,
"e": 748,
"s": 683,
"text": "Using function im2bw(), convert the RGB image to a binary image."
},
{
"code": null,
"e": 834,
"s": 748,
"text": "Create a structuring element or you can use any predefined mask eg. special(‘sobel’)."
},
{
"code": null,
"e": 904,
"s": 834,
"text": "Store the number of rows and columns in an array and loop through it."
},
{
"code": null,
"e": 968,
"s": 904,
"text": "Create a zero matrix of the size same as the size of our image."
},
{
"code": null,
"e": 1121,
"s": 968,
"text": "Leaving the boundary pixels start moving the structuring element on the image and start comparing the pixel with the pixels present in the neighborhood."
},
{
"code": null,
"e": 1208,
"s": 1121,
"text": "If the value of the neighborhood pixel is 1, then change the value of that pixel to 1."
},
{
"code": null,
"e": 1217,
"s": 1208,
"text": "Example:"
},
{
"code": null,
"e": 1224,
"s": 1217,
"text": "MATLAB"
},
{
"code": "% MATLAB code for Dilation% read image I=imread('lenna.png'); % convert to binaryI=im2bw(I); % create structuring element se=ones(5, 5); % store number of rows in P and% number of columns in Q. [P, Q]=size(se); % create a zero matrix of size I. In=zeros(size(I, 1), size(I, 2)); for i=ceil(P/2):size(I, 1)-floor(P/2) for j=ceil(Q/2):size(I, 2)-floor(Q/2) % take all the neighbourhoods. on=I(i-floor(P/2):i+floor(P/2), j-floor(Q/2):j+floor(Q/2)); % take logical se nh=on(logical(se)); % compare and take minimum value of the neighbor % and set the pixel value to that minimum value. In(i, j)=max(nh(:)); endend imshow(In);",
"e": 1954,
"s": 1224,
"text": null
},
{
"code": null,
"e": 1963,
"s": 1954,
"text": "Output: "
},
{
"code": null,
"e": 1983,
"s": 1963,
"text": "Figure: Input image"
},
{
"code": null,
"e": 2004,
"s": 1983,
"text": "Figure: Output image"
},
{
"code": null,
"e": 2045,
"s": 2004,
"text": "Let’s take another example for dilation."
},
{
"code": null,
"e": 2053,
"s": 2045,
"text": "Syntax:"
},
{
"code": null,
"e": 2098,
"s": 2053,
"text": "imread() function is used to read the image."
},
{
"code": null,
"e": 2204,
"s": 2098,
"text": "strel() function is used to define the structuring element. We have chosen a disk-shaped SE, of radius 5."
},
{
"code": null,
"e": 2268,
"s": 2204,
"text": "imdialate() function is used to perform the dilation operation."
},
{
"code": null,
"e": 2316,
"s": 2268,
"text": "imtool() function is used to display the image."
},
{
"code": null,
"e": 2325,
"s": 2316,
"text": "Example:"
},
{
"code": null,
"e": 2332,
"s": 2325,
"text": "Matlab"
},
{
"code": "% MATLAB code for Dilation% read the image.k=imread(\"dilation.png\"); % define the structuring element.SE=strel('disk',5); % apply the dilation operation.d=imdilate(k,SE); %display all the images.imtool(k,[]);imtool(d,[]); %see the effective expansion% in original imageimtool(d-k,[]);",
"e": 2618,
"s": 2332,
"text": null
},
{
"code": null,
"e": 2626,
"s": 2618,
"text": "Output:"
},
{
"code": null,
"e": 2677,
"s": 2626,
"text": "Figure: Left: Original image, Right: Dilated image"
},
{
"code": null,
"e": 2717,
"s": 2677,
"text": "Figure: Expansion in the original image"
},
{
"code": null,
"e": 2736,
"s": 2717,
"text": "Code Explanation: "
},
{
"code": null,
"e": 2794,
"s": 2736,
"text": "k=imread(“dilation_exmp.png”); this line reads the image."
},
{
"code": null,
"e": 2857,
"s": 2794,
"text": "SE=strel(‘disk’,5); this line defines the structuring element."
},
{
"code": null,
"e": 2917,
"s": 2857,
"text": "d=imdilate(k,SE); this line applies the dilation operation."
},
{
"code": null,
"e": 2970,
"s": 2917,
"text": "imtool(k,[]); this line displays the original image."
},
{
"code": null,
"e": 3023,
"s": 2970,
"text": "imtool(e,[]); this line displays the dilated image."
},
{
"code": null,
"e": 3098,
"s": 3023,
"text": "imtool(d-k,[]); this line shows the effective expansion in original image."
},
{
"code": null,
"e": 3333,
"s": 3098,
"text": "The last image shows the extent to which the original image got dilated. We have used the Structuring element of disk-shaped and the image we used is also circular in shape. This gives us the very desired output to understand erosion."
},
{
"code": null,
"e": 3344,
"s": 3333,
"text": "pintusaini"
},
{
"code": null,
"e": 3359,
"s": 3344,
"text": "sagartomar9927"
},
{
"code": null,
"e": 3376,
"s": 3359,
"text": "Image-Processing"
},
{
"code": null,
"e": 3383,
"s": 3376,
"text": "MATLAB"
},
{
"code": null,
"e": 3409,
"s": 3383,
"text": "Advanced Computer Subject"
}
]
|
Maximum and Minimum value of a quadratic function | 31 May, 2022
Given a quadratic function ax2 + bx + c. Find the maximum and minimum value of the function possible when x is varied for all real values possible.
Examples:
Input: a = 1, b = -4, c = 4
Output:
Maxvalue = Infinity
Minvalue = 0
Quadratic function given is x2 -4x + 4
At x = 2, value of the function is equal to zero.
Input: a = -1, b = 3, c = -2
Output:
Maxvalue = 0.25
Minvalue = -Infinity
Approach:
Q(x)=ax2 + bx + c.
=a(x + b/(2a))2 + c-b2/(4a).
first part second part
The function is broken into two parts. The first part is a perfect square function. There can be two cases:
Case 1: If value of a is positive. The maximum value would be equal to Infinity.The minimum value of the function will come when the first part is equal to zero because the minimum value of a square function is zero.Case 2: If value of a is negative. The minimum value would be equal to -Infinity.Since a is negative, the task to maximize the negative square function. Again maximum value of a negative square function would be equal to zero as it would be a negative value for any other value of x.
Case 1: If value of a is positive. The maximum value would be equal to Infinity.The minimum value of the function will come when the first part is equal to zero because the minimum value of a square function is zero.
The maximum value would be equal to Infinity.
The minimum value of the function will come when the first part is equal to zero because the minimum value of a square function is zero.
Case 2: If value of a is negative. The minimum value would be equal to -Infinity.Since a is negative, the task to maximize the negative square function. Again maximum value of a negative square function would be equal to zero as it would be a negative value for any other value of x.
The minimum value would be equal to -Infinity.
Since a is negative, the task to maximize the negative square function. Again maximum value of a negative square function would be equal to zero as it would be a negative value for any other value of x.
The second part is a constant value for a given quadratic function and hence cannot change for any value of x. Hence, it will be added in both cases. Hence, the answer to the problem is:
If a > 0,
Maxvalue = Infinity
Minvalue = c - b2 / (4a)
If a < 0,
Maxvalue = c - b2 / (4a)
Minvalue = -Infinity
Below is the implementation of the above approach:
C++
Java
Python3
C#
Javascript
// C++ implementation of the above approach#include <bits/stdc++.h>using namespace std; // Function to print the Maximum and Minimum// values of the quadratic functionvoid PrintMaxMinValue(double a, double b, double c){ // Calculate the value of second part double secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function cout << "Maxvalue = " << "Infinity\n"; cout << "Minvalue = " << secondPart; } else if (a < 0) { // Open downward parabola function cout << "Maxvalue = " << secondPart << "\n"; cout << "Minvalue = " << "-Infinity"; } else { // If a=0 then it is not a quadratic function cout << "Not a quadratic function\n"; }} // Driver codeint main(){ double a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); return 0;}
// Java implementation of the above approachimport java.util.*; class GFG{ // Function to print the Maximum and Minimum // values of the quadratic function static void PrintMaxMinValue(double a, double b, double c) { // Calculate the value of second part double secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function System.out.print("Maxvalue = " + "Infinity\n"); System.out.print("Minvalue = " + secondPart); } else if (a < 0) { // Open downward parabola function System.out.print("Maxvalue = " + secondPart + "\n"); System.out.print("Minvalue = " + "-Infinity"); } else { // If a=0 then it is not a quadratic function System.out.print("Not a quadratic function\n"); } } // Driver code public static void main(String[] args) { double a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); }} // This code is contributed by Rajput-Ji
# Python3 implementation of the above approach # Function to print the Maximum and Minimum# values of the quadratic functiondef PrintMaxMinValue(a, b, c) : # Calculate the value of second part secondPart = c * 1.0 - (b * b / (4.0 * a)); # Print the values if (a > 0) : # Open upward parabola function print("Maxvalue =", "Infinity"); print("Minvalue = ", secondPart); elif (a < 0) : # Open downward parabola function print("Maxvalue = ", secondPart); print("Minvalue =", "-Infinity"); else : # If a=0 then it is not a quadratic function print("Not a quadratic function"); # Driver codeif __name__ == "__main__" : a = -1; b = 3; c = -2; PrintMaxMinValue(a, b, c); # This code is contributed by AnkitRai01
// C# implementation of the above approachusing System; class GFG{ // Function to print the Maximum and Minimum // values of the quadratic function static void PrintMaxMinValue(double a, double b, double c) { // Calculate the value of second part double secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function Console.Write("Maxvalue = " + "Infinity\n"); Console.Write("Minvalue = " + secondPart); } else if (a < 0) { // Open downward parabola function Console.Write("Maxvalue = " + secondPart + "\n"); Console.Write("Minvalue = " + "-Infinity"); } else { // If a=0 then it is not a quadratic function Console.Write("Not a quadratic function\n"); } } // Driver code static public void Main () { double a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); }} // This code is contributed by ajit.
<script> // Javascript implementation of the above approach // Function to print the Maximum and Minimum// values of the quadratic functionfunction PrintMaxMinValue(a, b, c){ // Calculate the value of second part var secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function document.write("Maxvalue = " + "Infinity" + "<br>"); document.write("Minvalue = " + secondPart); } else if (a < 0) { // Open downward parabola function document.write("Maxvalue = " + secondPart + "<br>"); document.write("Minvalue = " + "-Infinity"); } else { // If a=0 then it is not a quadratic function document.write("Not a quadratic function\n"); }} // Driver Codevar a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); // This code is contributed by Ankita saini </script>
Maxvalue = 0.25
Minvalue = -Infinity
Time Complexity: O(1)
Auxiliary Space: O(1)
Rajput-Ji
jit_t
ankthon
Akanksha_Rai
ankita_saini
subhamkumarm348
Geometric
Mathematical
Mathematical
Geometric
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Maximum Manhattan distance between a distinct pair from N coordinates
Area of the largest rectangle possible from given coordinates
Total area of two overlapping rectangles
Equation of circle when three points on the circle are given
Maximum distance between two points in coordinate plane using Rotating Caliper's Method
Program for Fibonacci numbers
Set in C++ Standard Template Library (STL)
Write a program to print all permutations of a given string
C++ Data Types
Merge two sorted arrays | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n31 May, 2022"
},
{
"code": null,
"e": 176,
"s": 28,
"text": "Given a quadratic function ax2 + bx + c. Find the maximum and minimum value of the function possible when x is varied for all real values possible."
},
{
"code": null,
"e": 186,
"s": 176,
"text": "Examples:"
},
{
"code": null,
"e": 419,
"s": 186,
"text": "Input: a = 1, b = -4, c = 4\nOutput:\nMaxvalue = Infinity\nMinvalue = 0\nQuadratic function given is x2 -4x + 4\nAt x = 2, value of the function is equal to zero.\n\nInput: a = -1, b = 3, c = -2\nOutput:\nMaxvalue = 0.25\nMinvalue = -Infinity"
},
{
"code": null,
"e": 430,
"s": 419,
"text": "Approach: "
},
{
"code": null,
"e": 535,
"s": 430,
"text": " Q(x)=ax2 + bx + c.\n =a(x + b/(2a))2 + c-b2/(4a).\n first part second part"
},
{
"code": null,
"e": 644,
"s": 535,
"text": "The function is broken into two parts. The first part is a perfect square function. There can be two cases: "
},
{
"code": null,
"e": 1144,
"s": 644,
"text": "Case 1: If value of a is positive. The maximum value would be equal to Infinity.The minimum value of the function will come when the first part is equal to zero because the minimum value of a square function is zero.Case 2: If value of a is negative. The minimum value would be equal to -Infinity.Since a is negative, the task to maximize the negative square function. Again maximum value of a negative square function would be equal to zero as it would be a negative value for any other value of x."
},
{
"code": null,
"e": 1361,
"s": 1144,
"text": "Case 1: If value of a is positive. The maximum value would be equal to Infinity.The minimum value of the function will come when the first part is equal to zero because the minimum value of a square function is zero."
},
{
"code": null,
"e": 1407,
"s": 1361,
"text": "The maximum value would be equal to Infinity."
},
{
"code": null,
"e": 1544,
"s": 1407,
"text": "The minimum value of the function will come when the first part is equal to zero because the minimum value of a square function is zero."
},
{
"code": null,
"e": 1828,
"s": 1544,
"text": "Case 2: If value of a is negative. The minimum value would be equal to -Infinity.Since a is negative, the task to maximize the negative square function. Again maximum value of a negative square function would be equal to zero as it would be a negative value for any other value of x."
},
{
"code": null,
"e": 1875,
"s": 1828,
"text": "The minimum value would be equal to -Infinity."
},
{
"code": null,
"e": 2078,
"s": 1875,
"text": "Since a is negative, the task to maximize the negative square function. Again maximum value of a negative square function would be equal to zero as it would be a negative value for any other value of x."
},
{
"code": null,
"e": 2265,
"s": 2078,
"text": "The second part is a constant value for a given quadratic function and hence cannot change for any value of x. Hence, it will be added in both cases. Hence, the answer to the problem is:"
},
{
"code": null,
"e": 2392,
"s": 2265,
"text": "If a > 0,\n Maxvalue = Infinity\n Minvalue = c - b2 / (4a)\nIf a < 0,\n Maxvalue = c - b2 / (4a)\n Minvalue = -Infinity"
},
{
"code": null,
"e": 2444,
"s": 2392,
"text": "Below is the implementation of the above approach: "
},
{
"code": null,
"e": 2448,
"s": 2444,
"text": "C++"
},
{
"code": null,
"e": 2453,
"s": 2448,
"text": "Java"
},
{
"code": null,
"e": 2461,
"s": 2453,
"text": "Python3"
},
{
"code": null,
"e": 2464,
"s": 2461,
"text": "C#"
},
{
"code": null,
"e": 2475,
"s": 2464,
"text": "Javascript"
},
{
"code": "// C++ implementation of the above approach#include <bits/stdc++.h>using namespace std; // Function to print the Maximum and Minimum// values of the quadratic functionvoid PrintMaxMinValue(double a, double b, double c){ // Calculate the value of second part double secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function cout << \"Maxvalue = \" << \"Infinity\\n\"; cout << \"Minvalue = \" << secondPart; } else if (a < 0) { // Open downward parabola function cout << \"Maxvalue = \" << secondPart << \"\\n\"; cout << \"Minvalue = \" << \"-Infinity\"; } else { // If a=0 then it is not a quadratic function cout << \"Not a quadratic function\\n\"; }} // Driver codeint main(){ double a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); return 0;}",
"e": 3377,
"s": 2475,
"text": null
},
{
"code": "// Java implementation of the above approachimport java.util.*; class GFG{ // Function to print the Maximum and Minimum // values of the quadratic function static void PrintMaxMinValue(double a, double b, double c) { // Calculate the value of second part double secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function System.out.print(\"Maxvalue = \" + \"Infinity\\n\"); System.out.print(\"Minvalue = \" + secondPart); } else if (a < 0) { // Open downward parabola function System.out.print(\"Maxvalue = \" + secondPart + \"\\n\"); System.out.print(\"Minvalue = \" + \"-Infinity\"); } else { // If a=0 then it is not a quadratic function System.out.print(\"Not a quadratic function\\n\"); } } // Driver code public static void main(String[] args) { double a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); }} // This code is contributed by Rajput-Ji",
"e": 4521,
"s": 3377,
"text": null
},
{
"code": "# Python3 implementation of the above approach # Function to print the Maximum and Minimum# values of the quadratic functiondef PrintMaxMinValue(a, b, c) : # Calculate the value of second part secondPart = c * 1.0 - (b * b / (4.0 * a)); # Print the values if (a > 0) : # Open upward parabola function print(\"Maxvalue =\", \"Infinity\"); print(\"Minvalue = \", secondPart); elif (a < 0) : # Open downward parabola function print(\"Maxvalue = \", secondPart); print(\"Minvalue =\", \"-Infinity\"); else : # If a=0 then it is not a quadratic function print(\"Not a quadratic function\"); # Driver codeif __name__ == \"__main__\" : a = -1; b = 3; c = -2; PrintMaxMinValue(a, b, c); # This code is contributed by AnkitRai01",
"e": 5357,
"s": 4521,
"text": null
},
{
"code": "// C# implementation of the above approachusing System; class GFG{ // Function to print the Maximum and Minimum // values of the quadratic function static void PrintMaxMinValue(double a, double b, double c) { // Calculate the value of second part double secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function Console.Write(\"Maxvalue = \" + \"Infinity\\n\"); Console.Write(\"Minvalue = \" + secondPart); } else if (a < 0) { // Open downward parabola function Console.Write(\"Maxvalue = \" + secondPart + \"\\n\"); Console.Write(\"Minvalue = \" + \"-Infinity\"); } else { // If a=0 then it is not a quadratic function Console.Write(\"Not a quadratic function\\n\"); } } // Driver code static public void Main () { double a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); }} // This code is contributed by ajit.",
"e": 6465,
"s": 5357,
"text": null
},
{
"code": "<script> // Javascript implementation of the above approach // Function to print the Maximum and Minimum// values of the quadratic functionfunction PrintMaxMinValue(a, b, c){ // Calculate the value of second part var secondPart = c * 1.0 - (b * b / (4.0 * a)); // Print the values if (a > 0) { // Open upward parabola function document.write(\"Maxvalue = \" + \"Infinity\" + \"<br>\"); document.write(\"Minvalue = \" + secondPart); } else if (a < 0) { // Open downward parabola function document.write(\"Maxvalue = \" + secondPart + \"<br>\"); document.write(\"Minvalue = \" + \"-Infinity\"); } else { // If a=0 then it is not a quadratic function document.write(\"Not a quadratic function\\n\"); }} // Driver Codevar a = -1, b = 3, c = -2; PrintMaxMinValue(a, b, c); // This code is contributed by Ankita saini </script>",
"e": 7484,
"s": 6465,
"text": null
},
{
"code": null,
"e": 7521,
"s": 7484,
"text": "Maxvalue = 0.25\nMinvalue = -Infinity"
},
{
"code": null,
"e": 7545,
"s": 7523,
"text": "Time Complexity: O(1)"
},
{
"code": null,
"e": 7567,
"s": 7545,
"text": "Auxiliary Space: O(1)"
},
{
"code": null,
"e": 7577,
"s": 7567,
"text": "Rajput-Ji"
},
{
"code": null,
"e": 7583,
"s": 7577,
"text": "jit_t"
},
{
"code": null,
"e": 7591,
"s": 7583,
"text": "ankthon"
},
{
"code": null,
"e": 7604,
"s": 7591,
"text": "Akanksha_Rai"
},
{
"code": null,
"e": 7617,
"s": 7604,
"text": "ankita_saini"
},
{
"code": null,
"e": 7633,
"s": 7617,
"text": "subhamkumarm348"
},
{
"code": null,
"e": 7643,
"s": 7633,
"text": "Geometric"
},
{
"code": null,
"e": 7656,
"s": 7643,
"text": "Mathematical"
},
{
"code": null,
"e": 7669,
"s": 7656,
"text": "Mathematical"
},
{
"code": null,
"e": 7679,
"s": 7669,
"text": "Geometric"
},
{
"code": null,
"e": 7777,
"s": 7679,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 7847,
"s": 7777,
"text": "Maximum Manhattan distance between a distinct pair from N coordinates"
},
{
"code": null,
"e": 7909,
"s": 7847,
"text": "Area of the largest rectangle possible from given coordinates"
},
{
"code": null,
"e": 7950,
"s": 7909,
"text": "Total area of two overlapping rectangles"
},
{
"code": null,
"e": 8011,
"s": 7950,
"text": "Equation of circle when three points on the circle are given"
},
{
"code": null,
"e": 8099,
"s": 8011,
"text": "Maximum distance between two points in coordinate plane using Rotating Caliper's Method"
},
{
"code": null,
"e": 8129,
"s": 8099,
"text": "Program for Fibonacci numbers"
},
{
"code": null,
"e": 8172,
"s": 8129,
"text": "Set in C++ Standard Template Library (STL)"
},
{
"code": null,
"e": 8232,
"s": 8172,
"text": "Write a program to print all permutations of a given string"
},
{
"code": null,
"e": 8247,
"s": 8232,
"text": "C++ Data Types"
}
]
|
TimeSpan.Compare() Method in C# | 25 Jan, 2022
This method is used to compare two TimeSpan values and returns an integer value which indicates whether the first value is shorter than, equal to, or longer than the second value.
Syntax: public static int Compare (TimeSpan t1, TimeSpan t2);Parameters: t1: Specifies the first time interval that will be compared. t2: Specifies the second time interval that will be compared.Return Value: -1: If t1 is shorter than t2. 0: If t1 is equal to t2. 1: If t1 is longer than t2.
Below programs illustrate the use of TimeSpan.Compare(TimeSpan, TimeSpan) Method:Example 1:
csharp
// C# program to demonstrate the// TimeSpan.Compare(TimeSpan,// TimeSpan) Methodusing System; class GFG { // Main Method public static void Main() { // creating the TimeSpans TimeSpan t1 = new TimeSpan(3, 22, 35, 33); TimeSpan t2 = new TimeSpan(1, 11, 15, 16); if (TimeSpan.Compare(t1, t2) == 1) Console.Write("t1 is greater than t2"); else if (TimeSpan.Compare(t1, t2) == 0) Console.Write("t1 is equal to t2"); else Console.Write("t2 is greater than t1"); }}
t1 is greater than t2
Example 2:
csharp
// C# program to demonstrate the// TimeSpan.Compare(TimeSpan,// TimeSpan) Methodusing System; class GFG { // Main Method public static void Main() { // creating the TimeSpans TimeSpan t1 = new TimeSpan(3, 22, 35, 33); TimeSpan t2 = new TimeSpan(4, 31, 15, 10); if (TimeSpan.Compare(t1, t2) == 1) Console.Write("t1 is greater than t2"); else if (TimeSpan.Compare(t1, t2) == 0) Console.Write("t1 is equal to t2"); else Console.Write("t2 is greater than t1"); }}
t2 is greater than t1
Example 3:
csharp
// C# program to demonstrate the// TimeSpan.Compare(TimeSpan,// TimeSpan) Methodusing System; class GFG { // Main Method public static void Main() { // creating the TimeSpans TimeSpan t1 = new TimeSpan(3, 22, 35, 33); TimeSpan t2 = new TimeSpan(3, 22, 35, 33); if (TimeSpan.Compare(t1, t2) == 1) Console.Write("t1 is greater than t2"); else if (TimeSpan.Compare(t1, t2) == 0) Console.Write("t1 is equal to t2"); else Console.Write("t2 is greater than t1"); }}
t1 is equal to t2
Reference:
https://docs.microsoft.com/en-us/dotnet/api/system.timespan.compare?view=netframework-4.7.2
saurabh1990aror
CSharp-method
CSharp-TimeSpan-Struct
C#
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
C# Dictionary with examples
Introduction to .NET Framework
C# | Delegates
C# | Multiple inheritance using interfaces
Differences Between .NET Core and .NET Framework
C# | Method Overriding
C# | Data Types
C# | String.IndexOf( ) Method | Set - 1
C# | Constructors
C# | Class and Object | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n25 Jan, 2022"
},
{
"code": null,
"e": 209,
"s": 28,
"text": "This method is used to compare two TimeSpan values and returns an integer value which indicates whether the first value is shorter than, equal to, or longer than the second value. "
},
{
"code": null,
"e": 503,
"s": 209,
"text": "Syntax: public static int Compare (TimeSpan t1, TimeSpan t2);Parameters: t1: Specifies the first time interval that will be compared. t2: Specifies the second time interval that will be compared.Return Value: -1: If t1 is shorter than t2. 0: If t1 is equal to t2. 1: If t1 is longer than t2. "
},
{
"code": null,
"e": 596,
"s": 503,
"text": "Below programs illustrate the use of TimeSpan.Compare(TimeSpan, TimeSpan) Method:Example 1: "
},
{
"code": null,
"e": 603,
"s": 596,
"text": "csharp"
},
{
"code": "// C# program to demonstrate the// TimeSpan.Compare(TimeSpan,// TimeSpan) Methodusing System; class GFG { // Main Method public static void Main() { // creating the TimeSpans TimeSpan t1 = new TimeSpan(3, 22, 35, 33); TimeSpan t2 = new TimeSpan(1, 11, 15, 16); if (TimeSpan.Compare(t1, t2) == 1) Console.Write(\"t1 is greater than t2\"); else if (TimeSpan.Compare(t1, t2) == 0) Console.Write(\"t1 is equal to t2\"); else Console.Write(\"t2 is greater than t1\"); }}",
"e": 1155,
"s": 603,
"text": null
},
{
"code": null,
"e": 1177,
"s": 1155,
"text": "t1 is greater than t2"
},
{
"code": null,
"e": 1192,
"s": 1179,
"text": "Example 2: "
},
{
"code": null,
"e": 1199,
"s": 1192,
"text": "csharp"
},
{
"code": "// C# program to demonstrate the// TimeSpan.Compare(TimeSpan,// TimeSpan) Methodusing System; class GFG { // Main Method public static void Main() { // creating the TimeSpans TimeSpan t1 = new TimeSpan(3, 22, 35, 33); TimeSpan t2 = new TimeSpan(4, 31, 15, 10); if (TimeSpan.Compare(t1, t2) == 1) Console.Write(\"t1 is greater than t2\"); else if (TimeSpan.Compare(t1, t2) == 0) Console.Write(\"t1 is equal to t2\"); else Console.Write(\"t2 is greater than t1\"); }}",
"e": 1751,
"s": 1199,
"text": null
},
{
"code": null,
"e": 1773,
"s": 1751,
"text": "t2 is greater than t1"
},
{
"code": null,
"e": 1787,
"s": 1775,
"text": "Example 3: "
},
{
"code": null,
"e": 1794,
"s": 1787,
"text": "csharp"
},
{
"code": "// C# program to demonstrate the// TimeSpan.Compare(TimeSpan,// TimeSpan) Methodusing System; class GFG { // Main Method public static void Main() { // creating the TimeSpans TimeSpan t1 = new TimeSpan(3, 22, 35, 33); TimeSpan t2 = new TimeSpan(3, 22, 35, 33); if (TimeSpan.Compare(t1, t2) == 1) Console.Write(\"t1 is greater than t2\"); else if (TimeSpan.Compare(t1, t2) == 0) Console.Write(\"t1 is equal to t2\"); else Console.Write(\"t2 is greater than t1\"); }}",
"e": 2346,
"s": 1794,
"text": null
},
{
"code": null,
"e": 2364,
"s": 2346,
"text": "t1 is equal to t2"
},
{
"code": null,
"e": 2379,
"s": 2366,
"text": "Reference: "
},
{
"code": null,
"e": 2471,
"s": 2379,
"text": "https://docs.microsoft.com/en-us/dotnet/api/system.timespan.compare?view=netframework-4.7.2"
},
{
"code": null,
"e": 2489,
"s": 2473,
"text": "saurabh1990aror"
},
{
"code": null,
"e": 2503,
"s": 2489,
"text": "CSharp-method"
},
{
"code": null,
"e": 2526,
"s": 2503,
"text": "CSharp-TimeSpan-Struct"
},
{
"code": null,
"e": 2529,
"s": 2526,
"text": "C#"
},
{
"code": null,
"e": 2627,
"s": 2529,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2655,
"s": 2627,
"text": "C# Dictionary with examples"
},
{
"code": null,
"e": 2686,
"s": 2655,
"text": "Introduction to .NET Framework"
},
{
"code": null,
"e": 2701,
"s": 2686,
"text": "C# | Delegates"
},
{
"code": null,
"e": 2744,
"s": 2701,
"text": "C# | Multiple inheritance using interfaces"
},
{
"code": null,
"e": 2793,
"s": 2744,
"text": "Differences Between .NET Core and .NET Framework"
},
{
"code": null,
"e": 2816,
"s": 2793,
"text": "C# | Method Overriding"
},
{
"code": null,
"e": 2832,
"s": 2816,
"text": "C# | Data Types"
},
{
"code": null,
"e": 2872,
"s": 2832,
"text": "C# | String.IndexOf( ) Method | Set - 1"
},
{
"code": null,
"e": 2890,
"s": 2872,
"text": "C# | Constructors"
}
]
|
How to validate an Email using PHP? | 13 Feb, 2019
This article contains different methods to validate an email address in PHP. It uses regular expressions and inbuilt email validation function. The input string is taken from the user and matches it with the predefined regular expressions and if the regular expression and input string found to be matched than it returns true and proceed further.
Method 1: Email validation using regular expression.
<?php// PHP program to validate email // Function to validate email using regular expressionfunction email_validation($str) { return (!preg_match("^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,3})$^", $str)) ? FALSE : TRUE;} // Function callif(!email_validation("[email protected]")) { echo "Invalid email address.";}else { echo "Valid email address.";} ?>
Valid email address.
Explanation: In the above example, passing the email to the user defined function email_validation( $email ), which use this example and matches with the regular expression by using the predefined function preg_match(). This predefined function match the whole input with regular expression and returns True if match found otherwise returns False.
Method 2: Email validation using filter_var() method.
<?php // Declare variable and initialize// it to email$email = "[email protected]"; // Validate emailif (filter_var($email, FILTER_VALIDATE_EMAIL)) { echo("$email is a valid email address");} else { echo("$email is not a valid email address");} ?>
[email protected] is a valid email address
Explanation: In the above example, passing the input email address to the predefined function filter_var(), which takes two parameters as input email and second is type of email filter. This function filters the email and returns true or false.
Method 3: Email validation using FILTER_SANITIZE_EMAIL filter.
<?php // Declare variable and store it to email$email = "[email protected]"; // Remove all illegal characters from email$email = filter_var($email, FILTER_SANITIZE_EMAIL); // Validate Emailif (filter_var($email, FILTER_VALIDATE_EMAIL)) { echo("$email is a valid email address");} else { echo("$email is not a valid email address");} ?>
[email protected] is a valid email address
Explanation: In the above example, use FILTER_SANITIZE_EMAIL filter to remove all unsupported characters and then use FILTER_VALIDATE_EMAIL filter to validate email.
Picked
Web-Programs
PHP
PHP Programs
Web Technologies
PHP
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Insert Form Data into Database using PHP ?
How to convert array to string in PHP ?
PHP | Converting string to Date and DateTime
Comparing two dates in PHP
How to receive JSON POST with PHP ?
How to Insert Form Data into Database using PHP ?
How to convert array to string in PHP ?
How to call PHP function on the click of a Button ?
Comparing two dates in PHP
How to get parameters from a URL string in PHP? | [
{
"code": null,
"e": 54,
"s": 26,
"text": "\n13 Feb, 2019"
},
{
"code": null,
"e": 402,
"s": 54,
"text": "This article contains different methods to validate an email address in PHP. It uses regular expressions and inbuilt email validation function. The input string is taken from the user and matches it with the predefined regular expressions and if the regular expression and input string found to be matched than it returns true and proceed further."
},
{
"code": null,
"e": 455,
"s": 402,
"text": "Method 1: Email validation using regular expression."
},
{
"code": "<?php// PHP program to validate email // Function to validate email using regular expressionfunction email_validation($str) { return (!preg_match(\"^[_a-z0-9-]+(\\.[_a-z0-9-]+)*@[a-z0-9-]+(\\.[a-z0-9-]+)*(\\.[a-z]{2,3})$^\", $str)) ? FALSE : TRUE;} // Function callif(!email_validation(\"[email protected]\")) { echo \"Invalid email address.\";}else { echo \"Valid email address.\";} ?>",
"e": 857,
"s": 455,
"text": null
},
{
"code": null,
"e": 879,
"s": 857,
"text": "Valid email address.\n"
},
{
"code": null,
"e": 1227,
"s": 879,
"text": "Explanation: In the above example, passing the email to the user defined function email_validation( $email ), which use this example and matches with the regular expression by using the predefined function preg_match(). This predefined function match the whole input with regular expression and returns True if match found otherwise returns False."
},
{
"code": null,
"e": 1281,
"s": 1227,
"text": "Method 2: Email validation using filter_var() method."
},
{
"code": "<?php // Declare variable and initialize// it to email$email = \"[email protected]\"; // Validate emailif (filter_var($email, FILTER_VALIDATE_EMAIL)) { echo(\"$email is a valid email address\");} else { echo(\"$email is not a valid email address\");} ?>",
"e": 1545,
"s": 1281,
"text": null
},
{
"code": null,
"e": 1596,
"s": 1545,
"text": "[email protected] is a valid email address\n"
},
{
"code": null,
"e": 1841,
"s": 1596,
"text": "Explanation: In the above example, passing the input email address to the predefined function filter_var(), which takes two parameters as input email and second is type of email filter. This function filters the email and returns true or false."
},
{
"code": null,
"e": 1904,
"s": 1841,
"text": "Method 3: Email validation using FILTER_SANITIZE_EMAIL filter."
},
{
"code": "<?php // Declare variable and store it to email$email = \"[email protected]\"; // Remove all illegal characters from email$email = filter_var($email, FILTER_SANITIZE_EMAIL); // Validate Emailif (filter_var($email, FILTER_VALIDATE_EMAIL)) { echo(\"$email is a valid email address\");} else { echo(\"$email is not a valid email address\");} ?> ",
"e": 2262,
"s": 1904,
"text": null
},
{
"code": null,
"e": 2317,
"s": 2262,
"text": "[email protected] is a valid email address\n"
},
{
"code": null,
"e": 2483,
"s": 2317,
"text": "Explanation: In the above example, use FILTER_SANITIZE_EMAIL filter to remove all unsupported characters and then use FILTER_VALIDATE_EMAIL filter to validate email."
},
{
"code": null,
"e": 2490,
"s": 2483,
"text": "Picked"
},
{
"code": null,
"e": 2503,
"s": 2490,
"text": "Web-Programs"
},
{
"code": null,
"e": 2507,
"s": 2503,
"text": "PHP"
},
{
"code": null,
"e": 2520,
"s": 2507,
"text": "PHP Programs"
},
{
"code": null,
"e": 2537,
"s": 2520,
"text": "Web Technologies"
},
{
"code": null,
"e": 2541,
"s": 2537,
"text": "PHP"
},
{
"code": null,
"e": 2639,
"s": 2541,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2689,
"s": 2639,
"text": "How to Insert Form Data into Database using PHP ?"
},
{
"code": null,
"e": 2729,
"s": 2689,
"text": "How to convert array to string in PHP ?"
},
{
"code": null,
"e": 2774,
"s": 2729,
"text": "PHP | Converting string to Date and DateTime"
},
{
"code": null,
"e": 2801,
"s": 2774,
"text": "Comparing two dates in PHP"
},
{
"code": null,
"e": 2837,
"s": 2801,
"text": "How to receive JSON POST with PHP ?"
},
{
"code": null,
"e": 2887,
"s": 2837,
"text": "How to Insert Form Data into Database using PHP ?"
},
{
"code": null,
"e": 2927,
"s": 2887,
"text": "How to convert array to string in PHP ?"
},
{
"code": null,
"e": 2979,
"s": 2927,
"text": "How to call PHP function on the click of a Button ?"
},
{
"code": null,
"e": 3006,
"s": 2979,
"text": "Comparing two dates in PHP"
}
]
|
Find common values between two NumPy arrays | 29 Aug, 2020
In this article, we are going to discuss how to find out the common values between 2 arrays. To find the common values, we can use the numpy.intersect1d(), which will do the intersection operation and return the common values between the 2 arrays in sorted order.
Syntax: numpy.intersect1d(arr1, arr2, assume_unique = False, return_indices = False)
Parameters :arr1, arr2 : [array_like] Input arrays.assume_unique : [bool] If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False.return_indices : [bool] If True, the indices which correspond to the intersection of the two arrays are returned. The first instance of a value is used if there are multiple. Default is False.
Return : [ndarray] Sorted 1D array of common and unique elements.
Example #1: Finding common values between 1d arrays
Python3
import numpy as np # create 2 arraysa = np.array([2, 4, 7, 1, 4])b = np.array([7, 2, 9, 0, 5]) # Display the arraysprint("Original arrays", a, ' ', b) # use the np.intersect1d methodc = np.intersect1d(a, b) # Display resultprint("Common values", c)
Output:
Original arrays [2 4 7 1 4] [7 2 9 0 5]
Common values [2 7]
Example #2: Finding common values between n-dimensional arrays
Python3
import numpy as np # create 2 arraysa = np.array([2,4,7,1,4,9]).reshape(3,2)b = np.array([7,2,9,0,5,3]).reshape(2,3) # Display the arraysprint("Original arrays")print(a)print(b) # use the np.intersect1d methodc = np.intersect1d(a,b) # Display resultprint("Common values",c)
Output:
Original arrays
[[2 4]
[7 1]
[4 9]]
[[7 2 9]
[0 5 3]]
Common values [2 7 9]
Note: No matter what dimension arrays are passed, the common values will be returned in a 1d flattened manner
Python numpy-arrayManipulation
Python-numpy
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n29 Aug, 2020"
},
{
"code": null,
"e": 292,
"s": 28,
"text": "In this article, we are going to discuss how to find out the common values between 2 arrays. To find the common values, we can use the numpy.intersect1d(), which will do the intersection operation and return the common values between the 2 arrays in sorted order."
},
{
"code": null,
"e": 377,
"s": 292,
"text": "Syntax: numpy.intersect1d(arr1, arr2, assume_unique = False, return_indices = False)"
},
{
"code": null,
"e": 754,
"s": 377,
"text": "Parameters :arr1, arr2 : [array_like] Input arrays.assume_unique : [bool] If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False.return_indices : [bool] If True, the indices which correspond to the intersection of the two arrays are returned. The first instance of a value is used if there are multiple. Default is False."
},
{
"code": null,
"e": 820,
"s": 754,
"text": "Return : [ndarray] Sorted 1D array of common and unique elements."
},
{
"code": null,
"e": 872,
"s": 820,
"text": "Example #1: Finding common values between 1d arrays"
},
{
"code": null,
"e": 880,
"s": 872,
"text": "Python3"
},
{
"code": "import numpy as np # create 2 arraysa = np.array([2, 4, 7, 1, 4])b = np.array([7, 2, 9, 0, 5]) # Display the arraysprint(\"Original arrays\", a, ' ', b) # use the np.intersect1d methodc = np.intersect1d(a, b) # Display resultprint(\"Common values\", c)",
"e": 1135,
"s": 880,
"text": null
},
{
"code": null,
"e": 1143,
"s": 1135,
"text": "Output:"
},
{
"code": null,
"e": 1206,
"s": 1143,
"text": "Original arrays [2 4 7 1 4] [7 2 9 0 5]\nCommon values [2 7]\n"
},
{
"code": null,
"e": 1269,
"s": 1206,
"text": "Example #2: Finding common values between n-dimensional arrays"
},
{
"code": null,
"e": 1277,
"s": 1269,
"text": "Python3"
},
{
"code": "import numpy as np # create 2 arraysa = np.array([2,4,7,1,4,9]).reshape(3,2)b = np.array([7,2,9,0,5,3]).reshape(2,3) # Display the arraysprint(\"Original arrays\")print(a)print(b) # use the np.intersect1d methodc = np.intersect1d(a,b) # Display resultprint(\"Common values\",c)",
"e": 1557,
"s": 1277,
"text": null
},
{
"code": null,
"e": 1565,
"s": 1557,
"text": "Output:"
},
{
"code": null,
"e": 1645,
"s": 1565,
"text": "Original arrays\n[[2 4]\n [7 1]\n [4 9]]\n[[7 2 9]\n [0 5 3]]\nCommon values [2 7 9]\n"
},
{
"code": null,
"e": 1755,
"s": 1645,
"text": "Note: No matter what dimension arrays are passed, the common values will be returned in a 1d flattened manner"
},
{
"code": null,
"e": 1786,
"s": 1755,
"text": "Python numpy-arrayManipulation"
},
{
"code": null,
"e": 1799,
"s": 1786,
"text": "Python-numpy"
},
{
"code": null,
"e": 1806,
"s": 1799,
"text": "Python"
}
]
|
Optional ofNullable() method in Java with examples | 30 Jul, 2019
The ofNullable() method of java.util.Optional class in Java is used to get an instance of this Optional class with the specified value of the specified type. If the specified value is null, then this method returns an empty instance of the Optional class.
Syntax:
public static <T>
Optional<T> ofNullable(T value)
Parameters: This method accepts value as parameter of type T to create an Optional instance with this value. It can be null.
Return value: This method returns an instance of this Optional class with the specified value of the specified type. If the specified value is null, then this method returns an empty instance of the Optional class.
Below programs illustrate ofNullable() method:Program 1:
// Java program to demonstrate// Optional.ofNullable() method import java.util.*; public class GFG { public static void main(String[] args) { // create a Optional Optional<Integer> op1 = Optional.ofNullable(9455); // print value System.out.println("Optional 1: " + op1); }}
Optional 1: Optional[9455]
Program 2:
// Java program to demonstrate// Optional.ofNullable() method import java.util.*; public class GFG { public static void main(String[] args) { // create a Optional Optional<String> op2 = Optional.ofNullable(null); // print value System.out.println("Optional 2: " + op2); }}
Optional 2: Optional.empty
Reference: https://docs.oracle.com/javase/9/docs/api/java/util/Optional.html#ofNullable-T-
Java - util package
Java-Functions
Java-Optional
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n30 Jul, 2019"
},
{
"code": null,
"e": 284,
"s": 28,
"text": "The ofNullable() method of java.util.Optional class in Java is used to get an instance of this Optional class with the specified value of the specified type. If the specified value is null, then this method returns an empty instance of the Optional class."
},
{
"code": null,
"e": 292,
"s": 284,
"text": "Syntax:"
},
{
"code": null,
"e": 345,
"s": 292,
"text": "public static <T>\n Optional<T> ofNullable(T value)\n"
},
{
"code": null,
"e": 470,
"s": 345,
"text": "Parameters: This method accepts value as parameter of type T to create an Optional instance with this value. It can be null."
},
{
"code": null,
"e": 685,
"s": 470,
"text": "Return value: This method returns an instance of this Optional class with the specified value of the specified type. If the specified value is null, then this method returns an empty instance of the Optional class."
},
{
"code": null,
"e": 742,
"s": 685,
"text": "Below programs illustrate ofNullable() method:Program 1:"
},
{
"code": "// Java program to demonstrate// Optional.ofNullable() method import java.util.*; public class GFG { public static void main(String[] args) { // create a Optional Optional<Integer> op1 = Optional.ofNullable(9455); // print value System.out.println(\"Optional 1: \" + op1); }}",
"e": 1098,
"s": 742,
"text": null
},
{
"code": null,
"e": 1126,
"s": 1098,
"text": "Optional 1: Optional[9455]\n"
},
{
"code": null,
"e": 1137,
"s": 1126,
"text": "Program 2:"
},
{
"code": "// Java program to demonstrate// Optional.ofNullable() method import java.util.*; public class GFG { public static void main(String[] args) { // create a Optional Optional<String> op2 = Optional.ofNullable(null); // print value System.out.println(\"Optional 2: \" + op2); }}",
"e": 1490,
"s": 1137,
"text": null
},
{
"code": null,
"e": 1518,
"s": 1490,
"text": "Optional 2: Optional.empty\n"
},
{
"code": null,
"e": 1609,
"s": 1518,
"text": "Reference: https://docs.oracle.com/javase/9/docs/api/java/util/Optional.html#ofNullable-T-"
},
{
"code": null,
"e": 1629,
"s": 1609,
"text": "Java - util package"
},
{
"code": null,
"e": 1644,
"s": 1629,
"text": "Java-Functions"
},
{
"code": null,
"e": 1658,
"s": 1644,
"text": "Java-Optional"
},
{
"code": null,
"e": 1663,
"s": 1658,
"text": "Java"
},
{
"code": null,
"e": 1668,
"s": 1663,
"text": "Java"
}
]
|
Python Program to convert complex numbers to Polar coordinates | 01 Oct, 2020
Before starting with the program, let’s see the basics of Polar Coordinates and then use Python’s cmath and abs module to convert it. Polar coordinates are just a different way of representing Cartesian coordinates or Complex Numbers. A complex number z is defined as :
It is completely determined by its real part x and imaginary part y. Here, j is the imaginary unit.
The polar coordinates (r , φ) is completely determined by modulus r and phase angle φ.
Where,
r: Distance from z to origin, i.e.,
φ: Counterclockwise angle measured from the positive x-axis to the line segment that joins z to the origin.
The conversion of complex numbers to polar co-ordinates are explained below with examples.
Python’s cmath module provides access to the mathematical functions for complex numbers. It contains several functions that are used for converting coordinates from one domain to another.
Out of them, some are explained as-
1. cmath.polar(x):
Return the representation of x in polar coordinates. cmath.polar() function is used to convert a complex number to polar coordinates.
Python3
# Python code to implement # the polar()function # importing "cmath" # for mathematical operations import cmath # using cmath.polar() method num = cmath.polar(1) print(num)
(1.0, 0.0)
2. cmath.phase (z): This method returns the phase of complex number z(also known as the argument of z).
Python3
import cmath x = -1.0y = 0.0z = complex(x,y); # printing phase of a complex number using phase() print ("The phase of complex number is : ",end="") print (cmath.phase(z))
The phase of complex number is : 3.141592653589793
abs(): This method returns the modulus (absolute value) of complex number z.
Python3
num1 = 3 + 4jprint('Absolute value of 3 + 4j is:', abs(num1))
Absolute value of 3 + 4j is: 5.0
You are given a complex number z and your task is to convert it to polar coordinates.
Let us consider a complex number as 1+5j, and we need to convert it to Polar coordinates.
Python3
import cmath c = complex(1+5j)print(abs(c))
5.0990195135927845
Python Cmath-library
Python
Python Programs
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 54,
"s": 26,
"text": "\n01 Oct, 2020"
},
{
"code": null,
"e": 325,
"s": 54,
"text": "Before starting with the program, let’s see the basics of Polar Coordinates and then use Python’s cmath and abs module to convert it. Polar coordinates are just a different way of representing Cartesian coordinates or Complex Numbers. A complex number z is defined as :"
},
{
"code": null,
"e": 425,
"s": 325,
"text": "It is completely determined by its real part x and imaginary part y. Here, j is the imaginary unit."
},
{
"code": null,
"e": 512,
"s": 425,
"text": "The polar coordinates (r , φ) is completely determined by modulus r and phase angle φ."
},
{
"code": null,
"e": 519,
"s": 512,
"text": "Where,"
},
{
"code": null,
"e": 556,
"s": 519,
"text": "r: Distance from z to origin, i.e., "
},
{
"code": null,
"e": 666,
"s": 556,
"text": " φ: Counterclockwise angle measured from the positive x-axis to the line segment that joins z to the origin."
},
{
"code": null,
"e": 757,
"s": 666,
"text": "The conversion of complex numbers to polar co-ordinates are explained below with examples."
},
{
"code": null,
"e": 946,
"s": 757,
"text": "Python’s cmath module provides access to the mathematical functions for complex numbers. It contains several functions that are used for converting coordinates from one domain to another. "
},
{
"code": null,
"e": 982,
"s": 946,
"text": "Out of them, some are explained as-"
},
{
"code": null,
"e": 1001,
"s": 982,
"text": "1. cmath.polar(x):"
},
{
"code": null,
"e": 1136,
"s": 1001,
"text": "Return the representation of x in polar coordinates. cmath.polar() function is used to convert a complex number to polar coordinates. "
},
{
"code": null,
"e": 1144,
"s": 1136,
"text": "Python3"
},
{
"code": "# Python code to implement # the polar()function # importing \"cmath\" # for mathematical operations import cmath # using cmath.polar() method num = cmath.polar(1) print(num)",
"e": 1333,
"s": 1144,
"text": null
},
{
"code": null,
"e": 1345,
"s": 1333,
"text": "(1.0, 0.0)\n"
},
{
"code": null,
"e": 1449,
"s": 1345,
"text": "2. cmath.phase (z): This method returns the phase of complex number z(also known as the argument of z)."
},
{
"code": null,
"e": 1457,
"s": 1449,
"text": "Python3"
},
{
"code": "import cmath x = -1.0y = 0.0z = complex(x,y); # printing phase of a complex number using phase() print (\"The phase of complex number is : \",end=\"\") print (cmath.phase(z)) ",
"e": 1634,
"s": 1457,
"text": null
},
{
"code": null,
"e": 1686,
"s": 1634,
"text": "The phase of complex number is : 3.141592653589793\n"
},
{
"code": null,
"e": 1763,
"s": 1686,
"text": "abs(): This method returns the modulus (absolute value) of complex number z."
},
{
"code": null,
"e": 1771,
"s": 1763,
"text": "Python3"
},
{
"code": "num1 = 3 + 4jprint('Absolute value of 3 + 4j is:', abs(num1))",
"e": 1833,
"s": 1771,
"text": null
},
{
"code": null,
"e": 1867,
"s": 1833,
"text": "Absolute value of 3 + 4j is: 5.0\n"
},
{
"code": null,
"e": 1953,
"s": 1867,
"text": "You are given a complex number z and your task is to convert it to polar coordinates."
},
{
"code": null,
"e": 2044,
"s": 1953,
"text": "Let us consider a complex number as 1+5j, and we need to convert it to Polar coordinates. "
},
{
"code": null,
"e": 2052,
"s": 2044,
"text": "Python3"
},
{
"code": "import cmath c = complex(1+5j)print(abs(c))",
"e": 2099,
"s": 2052,
"text": null
},
{
"code": null,
"e": 2119,
"s": 2099,
"text": "5.0990195135927845\n"
},
{
"code": null,
"e": 2140,
"s": 2119,
"text": "Python Cmath-library"
},
{
"code": null,
"e": 2147,
"s": 2140,
"text": "Python"
},
{
"code": null,
"e": 2163,
"s": 2147,
"text": "Python Programs"
}
]
|
Replace Green Screen using OpenCV- Python | 29 Aug, 2021
Prerequisites: OpenCV Python TutorialOpenCV (Open Source Computer Vision) is a computer vision library that contains various functions to perform operations on pictures or videos. This library is cross-platform that is it is available on multiple programming languages such as Python, C++, etc.Green Screen removal is used in the VFX industry for changing the scenes. Here, we will use OpenCV – Python to do the same.
Import all necessary librariesLoad the images or videosResize the images and the videos to the same sizeLoad the upper and lower BGR values of the green colorApply the mask and then use bitwise_andSubtract bitwise_and from the original green screen imageCheck for matrix value 0 after subtraction and replace it by the second imageYou get the desired results.
Import all necessary libraries
Load the images or videos
Resize the images and the videos to the same size
Load the upper and lower BGR values of the green color
Apply the mask and then use bitwise_and
Subtract bitwise_and from the original green screen image
Check for matrix value 0 after subtraction and replace it by the second image
You get the desired results.
Below is the implementation.
Python3
import cv2import numpy as np video = cv2.VideoCapture("green.mp4")image = cv2.imread("bg.jpeg") while True: ret, frame = video.read() frame = cv2.resize(frame, (640, 480)) image = cv2.resize(image, (640, 480)) u_green = np.array([104, 153, 70]) l_green = np.array([30, 30, 0]) mask = cv2.inRange(frame, l_green, u_green) res = cv2.bitwise_and(frame, frame, mask = mask) f = frame - res f = np.where(f == 0, image, f) cv2.imshow("video", frame) cv2.imshow("mask", f) if cv2.waitKey(25) == 27: break video.release()cv2.destroyAllWindows()
Output:
sweetyty
Python-OpenCV
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Install PIP on Windows ?
Python Classes and Objects
Python OOPs Concepts
Python | os.path.join() method
How to drop one or multiple columns in Pandas Dataframe
Introduction To PYTHON
How To Convert Python Dictionary To JSON?
Check if element exists in list in Python
Python | datetime.timedelta() function
Python | Get unique values from a list | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n29 Aug, 2021"
},
{
"code": null,
"e": 448,
"s": 28,
"text": "Prerequisites: OpenCV Python TutorialOpenCV (Open Source Computer Vision) is a computer vision library that contains various functions to perform operations on pictures or videos. This library is cross-platform that is it is available on multiple programming languages such as Python, C++, etc.Green Screen removal is used in the VFX industry for changing the scenes. Here, we will use OpenCV – Python to do the same. "
},
{
"code": null,
"e": 808,
"s": 448,
"text": "Import all necessary librariesLoad the images or videosResize the images and the videos to the same sizeLoad the upper and lower BGR values of the green colorApply the mask and then use bitwise_andSubtract bitwise_and from the original green screen imageCheck for matrix value 0 after subtraction and replace it by the second imageYou get the desired results."
},
{
"code": null,
"e": 839,
"s": 808,
"text": "Import all necessary libraries"
},
{
"code": null,
"e": 865,
"s": 839,
"text": "Load the images or videos"
},
{
"code": null,
"e": 915,
"s": 865,
"text": "Resize the images and the videos to the same size"
},
{
"code": null,
"e": 970,
"s": 915,
"text": "Load the upper and lower BGR values of the green color"
},
{
"code": null,
"e": 1010,
"s": 970,
"text": "Apply the mask and then use bitwise_and"
},
{
"code": null,
"e": 1068,
"s": 1010,
"text": "Subtract bitwise_and from the original green screen image"
},
{
"code": null,
"e": 1146,
"s": 1068,
"text": "Check for matrix value 0 after subtraction and replace it by the second image"
},
{
"code": null,
"e": 1175,
"s": 1146,
"text": "You get the desired results."
},
{
"code": null,
"e": 1205,
"s": 1175,
"text": "Below is the implementation. "
},
{
"code": null,
"e": 1213,
"s": 1205,
"text": "Python3"
},
{
"code": "import cv2import numpy as np video = cv2.VideoCapture(\"green.mp4\")image = cv2.imread(\"bg.jpeg\") while True: ret, frame = video.read() frame = cv2.resize(frame, (640, 480)) image = cv2.resize(image, (640, 480)) u_green = np.array([104, 153, 70]) l_green = np.array([30, 30, 0]) mask = cv2.inRange(frame, l_green, u_green) res = cv2.bitwise_and(frame, frame, mask = mask) f = frame - res f = np.where(f == 0, image, f) cv2.imshow(\"video\", frame) cv2.imshow(\"mask\", f) if cv2.waitKey(25) == 27: break video.release()cv2.destroyAllWindows()",
"e": 1801,
"s": 1213,
"text": null
},
{
"code": null,
"e": 1811,
"s": 1801,
"text": "Output: "
},
{
"code": null,
"e": 1822,
"s": 1813,
"text": "sweetyty"
},
{
"code": null,
"e": 1836,
"s": 1822,
"text": "Python-OpenCV"
},
{
"code": null,
"e": 1843,
"s": 1836,
"text": "Python"
},
{
"code": null,
"e": 1941,
"s": 1843,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 1973,
"s": 1941,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 2000,
"s": 1973,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 2021,
"s": 2000,
"text": "Python OOPs Concepts"
},
{
"code": null,
"e": 2052,
"s": 2021,
"text": "Python | os.path.join() method"
},
{
"code": null,
"e": 2108,
"s": 2052,
"text": "How to drop one or multiple columns in Pandas Dataframe"
},
{
"code": null,
"e": 2131,
"s": 2108,
"text": "Introduction To PYTHON"
},
{
"code": null,
"e": 2173,
"s": 2131,
"text": "How To Convert Python Dictionary To JSON?"
},
{
"code": null,
"e": 2215,
"s": 2173,
"text": "Check if element exists in list in Python"
},
{
"code": null,
"e": 2254,
"s": 2215,
"text": "Python | datetime.timedelta() function"
}
]
|
How do you give a C# Auto-Property a default value? | In C# 5.0 and before to give an value to the Auto Property we have to do in the constructor
The constructor will be automatically called when class is instantiated and the value will be set
After C#5.0 a new way to give a value to auto property has come which is similar in assigning a value to the variable
Set Value in Constructor
class Demo{
public Demo(){
FirstName = "DemoName";
}
public string FirstName { get; set; }
}
class Program{
static void Main(){
Demo obj = new Demo();
System.Console.WriteLine(obj.FirstName);
Console.ReadLine();
}
}
DemoName
class Demo{
public string FirstName { get; set; } = "DemoName";
}
class Program{
static void Main(){
Demo obj = new Demo();
System.Console.WriteLine(obj.FirstName);
Console.ReadLine();
}
}
DemoName | [
{
"code": null,
"e": 1154,
"s": 1062,
"text": "In C# 5.0 and before to give an value to the Auto Property we have to do in the constructor"
},
{
"code": null,
"e": 1252,
"s": 1154,
"text": "The constructor will be automatically called when class is instantiated and the value will be set"
},
{
"code": null,
"e": 1370,
"s": 1252,
"text": "After C#5.0 a new way to give a value to auto property has come which is similar in assigning a value to the variable"
},
{
"code": null,
"e": 1650,
"s": 1370,
"text": "Set Value in Constructor\nclass Demo{\n public Demo(){\n FirstName = \"DemoName\";\n }\n public string FirstName { get; set; }\n}\nclass Program{\n static void Main(){\n Demo obj = new Demo();\n System.Console.WriteLine(obj.FirstName);\n Console.ReadLine();\n }\n}"
},
{
"code": null,
"e": 1659,
"s": 1650,
"text": "DemoName"
},
{
"code": null,
"e": 1875,
"s": 1659,
"text": "class Demo{\n public string FirstName { get; set; } = \"DemoName\";\n}\nclass Program{\n static void Main(){\n Demo obj = new Demo();\n System.Console.WriteLine(obj.FirstName);\n Console.ReadLine();\n }\n}"
},
{
"code": null,
"e": 1884,
"s": 1875,
"text": "DemoName"
}
]
|
Different Operations on Matrices - GeeksforGeeks | 06 May, 2022
For introduction on matrices, you can refer the following article: Matrix Introduction In this article, we will discuss various operations on matrices and their properties:
Matrices Addition – The addition of two matrices A m*n and Bm*n gives a matrix Cm*n. The elements of C are sum of corresponding elements in A and B which can be shown as:
The algorithm for addition of matrices can be written as:
for i in 1 to m
for j in 1 to n
cij = aij + bij
C++
Javascript
// C++ Program for matrix addition #include <iostream>using namespace std; int main(){ int n = 2, m = 2; int a[n][m] = { { 2, 5 }, { 1, 7 } }; int b[n][m] = { { 3, 7 }, { 2, 9 } }; int c[n][m]; for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { c[i][j] = a[i][j] + b[i][j]; } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) cout << c[i][j] << " "; cout << endl; }}
<script> // Javascript Program for matrix additionvar n = 2, m = 2;var a = [[ 2, 5 ], [ 1, 7 ]];var b = [[ 3, 7 ], [ 2, 9 ]];var c = Array.from(Array(n), ()=>Array(m).fill(0));for (var i = 0; i < n; i++) for (var j = 0; j < n; j++) { c[i][j] = a[i][j] + b[i][j]; }for (var i = 0; i < n; i++) { for (var j = 0; j < n; j++) document.write( c[i][j] + " "); document.write("<br>");} // This code is contributed by noob2000.</script>
Output:
5 12
3 16
Time Complexity: O(n * m)
Auxiliary Space: O(n * m)Key points:
Addition of matrices is commutative, which means A+B = B+A
Addition of matrices is associative, which means A+(B+C) = (A+B)+C
The order of matrices A, B, and A+B is always same
If order of A and B is different, A+B can’t be computed
The complexity of the addition operation is O(m*n) where m*n is order of matrices
Matrices Subtraction – The subtraction of two matrices Am*n and Bm*n gives a matrix Cm*n. The elements of C are difference of corresponding elements in A and B which can be represented as:
The algorithm for the subtraction of matrices can be written as:
for i in 1 to m
for j in 1 to n
cij = aij-bij
C++
Javascript
// C++ Program for matrix subtraction #include <iostream>using namespace std; int main(){ int n = 2, m = 2; int a[n][m] = { { 2, 5 }, { 1, 7 } }; int b[n][m] = { { 3, 7 }, { 2, 9 } }; int c[n][m]; for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { c[i][j] = a[i][j] - b[i][j]; } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) cout << c[i][j] << " "; cout << endl; }}
<script> // Javascript Program for matrix subtractionvar n = 2, m = 2;var a = [[ 2, 5 ], [ 1, 7 ]];var b = [[ 3, 7 ], [ 2, 9 ]];var c = Array.from(Array(n), ()=>Array(m).fill(0));for (var i = 0; i < n; i++) for (var j = 0; j < n; j++) { c[i][j] = a[i][j] - b[i][j]; }for (var i = 0; i < n; i++) { for (var j = 0; j < n; j++) document.write( c[i][j] + " "); document.write("<br>");} // This code is contributed by akshitsaxena09</script>
Output:
-1 -2
-1 -2
Key points:
Subtraction of matrices is non-commutative which means A-B ≠ B-A
Subtraction of matrices is non-associative which means A-(B-C) ≠ (A-B)-C
The order of matrices A, B and A-B is always same
If order of A and B is different, A-B can’t be computed
The complexity of subtraction operation is O(m*n) where m*n is order of matrices
Matrices Multiplication – The multiplication of two matrices Am*n and Bn*p gives a matrix Cm*p. It means number of columns in A must be equal to number of rows in B to calculate C=A*B. To calculate element c11, multiply elements of 1st row of A with 1st column of B and add them (5*1+6*4) which can be shown as:
The algorithm for multiplication of matrices A with order m*n and B with order n*p can be written as:
for i in 1 to m
for j in 1 to p
cij = 0
for k in 1 to n
cij += aik*bkj
C++
// C++ Program for matrix Multiplication #include <iostream>using namespace std; int main(){ int n = 2, m = 2; int a[n][m] = { { 2, 5 }, { 1, 7 } }; int b[n][m] = { { 3, 7 }, { 2, 9 } }; int c[n][m]; int i, j, k; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { c[i][j] = 0; for (k = 0; k < n; k++) c[i][j] += a[i][k] * b[k][j]; } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) cout << c[i][j] << " "; cout << endl; }}
Output:
16 59
17 70
Key points:
Multiplication of matrices is non-commutative which means A*B ≠ B*A
Multiplication of matrices is associative which means A*(B*C) = (A*B)*C
For computing A*B, the number of columns in A must be equal to number of rows in B
Existence of A*B does not imply existence of B*A
The complexity of multiplication operation (A*B) is O(m*n*p) where m*n and n*p are order of A and B respectively
The order of matrix C computed as A*B is m*p where m*n and n*p are order of A and B respectively
Read next – Determinant of a Matrix, Adjoint and Inverse of a Matrix
RahulWankhede
Shoaibahmed
Ankk98
samrat2825
subham348
noob2000
anikakapoor
akshitsaxenaa09
Engineering Mathematics
GATE CS
Matrix
Matrix
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Inequalities in LaTeX
Relationship between number of nodes and height of binary tree
Activation Functions
Mathematics | Walks, Trails, Paths, Cycles and Circuits in Graph
Arrow Symbols in LaTeX
Layers of OSI Model
ACID Properties in DBMS
TCP/IP Model
Types of Operating Systems
Normal Forms in DBMS | [
{
"code": null,
"e": 42629,
"s": 42601,
"text": "\n06 May, 2022"
},
{
"code": null,
"e": 42803,
"s": 42629,
"text": "For introduction on matrices, you can refer the following article: Matrix Introduction In this article, we will discuss various operations on matrices and their properties: "
},
{
"code": null,
"e": 42975,
"s": 42803,
"text": "Matrices Addition – The addition of two matrices A m*n and Bm*n gives a matrix Cm*n. The elements of C are sum of corresponding elements in A and B which can be shown as: "
},
{
"code": null,
"e": 43034,
"s": 42975,
"text": "The algorithm for addition of matrices can be written as: "
},
{
"code": null,
"e": 43091,
"s": 43034,
"text": "for i in 1 to m\n for j in 1 to n\n cij = aij + bij"
},
{
"code": null,
"e": 43095,
"s": 43091,
"text": "C++"
},
{
"code": null,
"e": 43106,
"s": 43095,
"text": "Javascript"
},
{
"code": "// C++ Program for matrix addition #include <iostream>using namespace std; int main(){ int n = 2, m = 2; int a[n][m] = { { 2, 5 }, { 1, 7 } }; int b[n][m] = { { 3, 7 }, { 2, 9 } }; int c[n][m]; for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { c[i][j] = a[i][j] + b[i][j]; } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) cout << c[i][j] << \" \"; cout << endl; }}",
"e": 43562,
"s": 43106,
"text": null
},
{
"code": "<script> // Javascript Program for matrix additionvar n = 2, m = 2;var a = [[ 2, 5 ], [ 1, 7 ]];var b = [[ 3, 7 ], [ 2, 9 ]];var c = Array.from(Array(n), ()=>Array(m).fill(0));for (var i = 0; i < n; i++) for (var j = 0; j < n; j++) { c[i][j] = a[i][j] + b[i][j]; }for (var i = 0; i < n; i++) { for (var j = 0; j < n; j++) document.write( c[i][j] + \" \"); document.write(\"<br>\");} // This code is contributed by noob2000.</script>",
"e": 44017,
"s": 43562,
"text": null
},
{
"code": null,
"e": 44025,
"s": 44017,
"text": "Output:"
},
{
"code": null,
"e": 44037,
"s": 44025,
"text": "5 12 \n3 16 "
},
{
"code": null,
"e": 44063,
"s": 44037,
"text": "Time Complexity: O(n * m)"
},
{
"code": null,
"e": 44100,
"s": 44063,
"text": "Auxiliary Space: O(n * m)Key points:"
},
{
"code": null,
"e": 44159,
"s": 44100,
"text": "Addition of matrices is commutative, which means A+B = B+A"
},
{
"code": null,
"e": 44226,
"s": 44159,
"text": "Addition of matrices is associative, which means A+(B+C) = (A+B)+C"
},
{
"code": null,
"e": 44277,
"s": 44226,
"text": "The order of matrices A, B, and A+B is always same"
},
{
"code": null,
"e": 44333,
"s": 44277,
"text": "If order of A and B is different, A+B can’t be computed"
},
{
"code": null,
"e": 44415,
"s": 44333,
"text": "The complexity of the addition operation is O(m*n) where m*n is order of matrices"
},
{
"code": null,
"e": 44605,
"s": 44415,
"text": "Matrices Subtraction – The subtraction of two matrices Am*n and Bm*n gives a matrix Cm*n. The elements of C are difference of corresponding elements in A and B which can be represented as: "
},
{
"code": null,
"e": 44672,
"s": 44605,
"text": "The algorithm for the subtraction of matrices can be written as: "
},
{
"code": null,
"e": 44727,
"s": 44672,
"text": "for i in 1 to m\n for j in 1 to n\n cij = aij-bij"
},
{
"code": null,
"e": 44731,
"s": 44727,
"text": "C++"
},
{
"code": null,
"e": 44742,
"s": 44731,
"text": "Javascript"
},
{
"code": "// C++ Program for matrix subtraction #include <iostream>using namespace std; int main(){ int n = 2, m = 2; int a[n][m] = { { 2, 5 }, { 1, 7 } }; int b[n][m] = { { 3, 7 }, { 2, 9 } }; int c[n][m]; for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { c[i][j] = a[i][j] - b[i][j]; } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) cout << c[i][j] << \" \"; cout << endl; }}",
"e": 45201,
"s": 44742,
"text": null
},
{
"code": "<script> // Javascript Program for matrix subtractionvar n = 2, m = 2;var a = [[ 2, 5 ], [ 1, 7 ]];var b = [[ 3, 7 ], [ 2, 9 ]];var c = Array.from(Array(n), ()=>Array(m).fill(0));for (var i = 0; i < n; i++) for (var j = 0; j < n; j++) { c[i][j] = a[i][j] - b[i][j]; }for (var i = 0; i < n; i++) { for (var j = 0; j < n; j++) document.write( c[i][j] + \" \"); document.write(\"<br>\");} // This code is contributed by akshitsaxena09</script>",
"e": 45664,
"s": 45201,
"text": null
},
{
"code": null,
"e": 45672,
"s": 45664,
"text": "Output:"
},
{
"code": null,
"e": 45686,
"s": 45672,
"text": "-1 -2 \n-1 -2 "
},
{
"code": null,
"e": 45698,
"s": 45686,
"text": "Key points:"
},
{
"code": null,
"e": 45764,
"s": 45698,
"text": "Subtraction of matrices is non-commutative which means A-B ≠ B-A"
},
{
"code": null,
"e": 45838,
"s": 45764,
"text": "Subtraction of matrices is non-associative which means A-(B-C) ≠ (A-B)-C"
},
{
"code": null,
"e": 45888,
"s": 45838,
"text": "The order of matrices A, B and A-B is always same"
},
{
"code": null,
"e": 45944,
"s": 45888,
"text": "If order of A and B is different, A-B can’t be computed"
},
{
"code": null,
"e": 46025,
"s": 45944,
"text": "The complexity of subtraction operation is O(m*n) where m*n is order of matrices"
},
{
"code": null,
"e": 46338,
"s": 46025,
"text": "Matrices Multiplication – The multiplication of two matrices Am*n and Bn*p gives a matrix Cm*p. It means number of columns in A must be equal to number of rows in B to calculate C=A*B. To calculate element c11, multiply elements of 1st row of A with 1st column of B and add them (5*1+6*4) which can be shown as: "
},
{
"code": null,
"e": 46441,
"s": 46338,
"text": "The algorithm for multiplication of matrices A with order m*n and B with order n*p can be written as: "
},
{
"code": null,
"e": 46536,
"s": 46441,
"text": "for i in 1 to m\n for j in 1 to p\n cij = 0\n for k in 1 to n\n cij += aik*bkj"
},
{
"code": null,
"e": 46540,
"s": 46536,
"text": "C++"
},
{
"code": "// C++ Program for matrix Multiplication #include <iostream>using namespace std; int main(){ int n = 2, m = 2; int a[n][m] = { { 2, 5 }, { 1, 7 } }; int b[n][m] = { { 3, 7 }, { 2, 9 } }; int c[n][m]; int i, j, k; for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { c[i][j] = 0; for (k = 0; k < n; k++) c[i][j] += a[i][k] * b[k][j]; } } for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) cout << c[i][j] << \" \"; cout << endl; }}",
"e": 47094,
"s": 46540,
"text": null
},
{
"code": null,
"e": 47102,
"s": 47094,
"text": "Output:"
},
{
"code": null,
"e": 47115,
"s": 47102,
"text": "16 59 \n17 70"
},
{
"code": null,
"e": 47128,
"s": 47115,
"text": "Key points: "
},
{
"code": null,
"e": 47197,
"s": 47128,
"text": "Multiplication of matrices is non-commutative which means A*B ≠ B*A"
},
{
"code": null,
"e": 47269,
"s": 47197,
"text": "Multiplication of matrices is associative which means A*(B*C) = (A*B)*C"
},
{
"code": null,
"e": 47352,
"s": 47269,
"text": "For computing A*B, the number of columns in A must be equal to number of rows in B"
},
{
"code": null,
"e": 47401,
"s": 47352,
"text": "Existence of A*B does not imply existence of B*A"
},
{
"code": null,
"e": 47514,
"s": 47401,
"text": "The complexity of multiplication operation (A*B) is O(m*n*p) where m*n and n*p are order of A and B respectively"
},
{
"code": null,
"e": 47611,
"s": 47514,
"text": "The order of matrix C computed as A*B is m*p where m*n and n*p are order of A and B respectively"
},
{
"code": null,
"e": 47680,
"s": 47611,
"text": "Read next – Determinant of a Matrix, Adjoint and Inverse of a Matrix"
},
{
"code": null,
"e": 47694,
"s": 47680,
"text": "RahulWankhede"
},
{
"code": null,
"e": 47706,
"s": 47694,
"text": "Shoaibahmed"
},
{
"code": null,
"e": 47713,
"s": 47706,
"text": "Ankk98"
},
{
"code": null,
"e": 47724,
"s": 47713,
"text": "samrat2825"
},
{
"code": null,
"e": 47734,
"s": 47724,
"text": "subham348"
},
{
"code": null,
"e": 47743,
"s": 47734,
"text": "noob2000"
},
{
"code": null,
"e": 47755,
"s": 47743,
"text": "anikakapoor"
},
{
"code": null,
"e": 47771,
"s": 47755,
"text": "akshitsaxenaa09"
},
{
"code": null,
"e": 47795,
"s": 47771,
"text": "Engineering Mathematics"
},
{
"code": null,
"e": 47803,
"s": 47795,
"text": "GATE CS"
},
{
"code": null,
"e": 47810,
"s": 47803,
"text": "Matrix"
},
{
"code": null,
"e": 47817,
"s": 47810,
"text": "Matrix"
},
{
"code": null,
"e": 47915,
"s": 47817,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 47937,
"s": 47915,
"text": "Inequalities in LaTeX"
},
{
"code": null,
"e": 48000,
"s": 47937,
"text": "Relationship between number of nodes and height of binary tree"
},
{
"code": null,
"e": 48021,
"s": 48000,
"text": "Activation Functions"
},
{
"code": null,
"e": 48086,
"s": 48021,
"text": "Mathematics | Walks, Trails, Paths, Cycles and Circuits in Graph"
},
{
"code": null,
"e": 48109,
"s": 48086,
"text": "Arrow Symbols in LaTeX"
},
{
"code": null,
"e": 48129,
"s": 48109,
"text": "Layers of OSI Model"
},
{
"code": null,
"e": 48153,
"s": 48129,
"text": "ACID Properties in DBMS"
},
{
"code": null,
"e": 48166,
"s": 48153,
"text": "TCP/IP Model"
},
{
"code": null,
"e": 48193,
"s": 48166,
"text": "Types of Operating Systems"
}
]
|
How can we maintain session between Web Client and Web Server? | Following are the few options to maintain the session between the Web Client and the Web Server −
A webserver can assign a unique session ID as a cookie to each web client and for subsequent requests from the client they can be recognized using the received cookie.
This may not be an effective way as the browser at times does not support a cookie. It is not recommended to use this procedure to maintain the sessions.
A web server can send a hidden HTML form field along with a unique session ID as follows −
<input type = "hidden" name = "sessionid" value = "12345">
This entry means that, when the form is submitted, the specified name and value are automatically included in the GET or the POST data. Each time the web browser sends the request back, the session_id value can be used to keep the track of different web browsers.
This can be an effective way of keeping track of the session but clicking on a regular (<A HREF...>) hypertext link does not result in a form submission, so hidden form fields also cannot support general session tracking.
You can append some extra data at the end of each URL. This data identifies the session; the server can associate that session identifier with the data it has stored about that session.
For example, with http://tutorialspoint.com/file.htm;sessionid=12345, the session identifier is attached as sessionid = 12345 which can be accessed at the web server to identify the client.
URL rewriting is a better way to maintain sessions and works for the browsers when they don't support cookies. The drawback here is that you will have to generate every URL dynamically to assign a session ID though page is a simple static HTML page. | [
{
"code": null,
"e": 1160,
"s": 1062,
"text": "Following are the few options to maintain the session between the Web Client and the Web Server −"
},
{
"code": null,
"e": 1328,
"s": 1160,
"text": "A webserver can assign a unique session ID as a cookie to each web client and for subsequent requests from the client they can be recognized using the received cookie."
},
{
"code": null,
"e": 1482,
"s": 1328,
"text": "This may not be an effective way as the browser at times does not support a cookie. It is not recommended to use this procedure to maintain the sessions."
},
{
"code": null,
"e": 1573,
"s": 1482,
"text": "A web server can send a hidden HTML form field along with a unique session ID as follows −"
},
{
"code": null,
"e": 1632,
"s": 1573,
"text": "<input type = \"hidden\" name = \"sessionid\" value = \"12345\">"
},
{
"code": null,
"e": 1896,
"s": 1632,
"text": "This entry means that, when the form is submitted, the specified name and value are automatically included in the GET or the POST data. Each time the web browser sends the request back, the session_id value can be used to keep the track of different web browsers."
},
{
"code": null,
"e": 2118,
"s": 1896,
"text": "This can be an effective way of keeping track of the session but clicking on a regular (<A HREF...>) hypertext link does not result in a form submission, so hidden form fields also cannot support general session tracking."
},
{
"code": null,
"e": 2304,
"s": 2118,
"text": "You can append some extra data at the end of each URL. This data identifies the session; the server can associate that session identifier with the data it has stored about that session."
},
{
"code": null,
"e": 2494,
"s": 2304,
"text": "For example, with http://tutorialspoint.com/file.htm;sessionid=12345, the session identifier is attached as sessionid = 12345 which can be accessed at the web server to identify the client."
},
{
"code": null,
"e": 2744,
"s": 2494,
"text": "URL rewriting is a better way to maintain sessions and works for the browsers when they don't support cookies. The drawback here is that you will have to generate every URL dynamically to assign a session ID though page is a simple static HTML page."
}
]
|
How can I cycle through line styles in Matplotlib? | To plot multiple lines in a diagram, we can use the cycler that could help to set a new color from the given list of colors. (Here, ‘r’ => ‘red’, ‘g’ => ‘green’, ‘y’ => ‘yellow’, ‘b’ => ‘blue’).
Use a cycler to set the color for the group of lines. The color list consists of ‘r’ for red, ‘g’ for green, ‘b’ for blue, and ‘y’ for yellow.
Use a cycler to set the color for the group of lines. The color list consists of ‘r’ for red, ‘g’ for green, ‘b’ for blue, and ‘y’ for yellow.
The cycler class helps to create a new Cycler object from a single positional argument, a pair of positional arguments, or the combination of keyword arguments.
The cycler class helps to create a new Cycler object from a single positional argument, a pair of positional arguments, or the combination of keyword arguments.
Plot the number of lines with different colors.
Plot the number of lines with different colors.
Use plt.show() to show the figure.
Use plt.show() to show the figure.
import matplotlib.pyplot as plt
from cycler import cycler
plt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y'])))
plt.plot([0, 5])
plt.plot([2, 6])
plt.plot([3, 8])
plt.plot([4, 9])
plt.show() | [
{
"code": null,
"e": 1257,
"s": 1062,
"text": "To plot multiple lines in a diagram, we can use the cycler that could help to set a new color from the given list of colors. (Here, ‘r’ => ‘red’, ‘g’ => ‘green’, ‘y’ => ‘yellow’, ‘b’ => ‘blue’)."
},
{
"code": null,
"e": 1400,
"s": 1257,
"text": "Use a cycler to set the color for the group of lines. The color list consists of ‘r’ for red, ‘g’ for green, ‘b’ for blue, and ‘y’ for yellow."
},
{
"code": null,
"e": 1543,
"s": 1400,
"text": "Use a cycler to set the color for the group of lines. The color list consists of ‘r’ for red, ‘g’ for green, ‘b’ for blue, and ‘y’ for yellow."
},
{
"code": null,
"e": 1704,
"s": 1543,
"text": "The cycler class helps to create a new Cycler object from a single positional argument, a pair of positional arguments, or the combination of keyword arguments."
},
{
"code": null,
"e": 1865,
"s": 1704,
"text": "The cycler class helps to create a new Cycler object from a single positional argument, a pair of positional arguments, or the combination of keyword arguments."
},
{
"code": null,
"e": 1913,
"s": 1865,
"text": "Plot the number of lines with different colors."
},
{
"code": null,
"e": 1961,
"s": 1913,
"text": "Plot the number of lines with different colors."
},
{
"code": null,
"e": 1996,
"s": 1961,
"text": "Use plt.show() to show the figure."
},
{
"code": null,
"e": 2031,
"s": 1996,
"text": "Use plt.show() to show the figure."
},
{
"code": null,
"e": 2237,
"s": 2031,
"text": "import matplotlib.pyplot as plt\nfrom cycler import cycler\n\nplt.rc('axes', prop_cycle=(cycler('color', ['r', 'g', 'b', 'y'])))\n\nplt.plot([0, 5])\nplt.plot([2, 6])\nplt.plot([3, 8])\nplt.plot([4, 9])\nplt.show()"
}
]
|
What is string constant pool in Java? | When you store a String as
String str1 = "Hello";
directly, then JVM creates a String object with the given value in a separate block of memory known as String constant pool.
And whenever we try to create another String as
String str2 = "Hello";
JVM verifies whether any String object with the same value exists in the String constant pool, if so, instead of creating a new object JVM assigns the reference of the existing object to the new variable.
And when we store String as
String str = new String("Hello");
using the new keyword, a new object with the given value is created irrespective of the contents of the String constant pool. | [
{
"code": null,
"e": 1089,
"s": 1062,
"text": "When you store a String as"
},
{
"code": null,
"e": 1112,
"s": 1089,
"text": "String str1 = \"Hello\";"
},
{
"code": null,
"e": 1237,
"s": 1112,
"text": "directly, then JVM creates a String object with the given value in a separate block of memory known as String constant pool."
},
{
"code": null,
"e": 1285,
"s": 1237,
"text": "And whenever we try to create another String as"
},
{
"code": null,
"e": 1308,
"s": 1285,
"text": "String str2 = \"Hello\";"
},
{
"code": null,
"e": 1513,
"s": 1308,
"text": "JVM verifies whether any String object with the same value exists in the String constant pool, if so, instead of creating a new object JVM assigns the reference of the existing object to the new variable."
},
{
"code": null,
"e": 1541,
"s": 1513,
"text": "And when we store String as"
},
{
"code": null,
"e": 1575,
"s": 1541,
"text": "String str = new String(\"Hello\");"
},
{
"code": null,
"e": 1701,
"s": 1575,
"text": "using the new keyword, a new object with the given value is created irrespective of the contents of the String constant pool."
}
]
|
Declare dynamically in SAP ABAP | I think for your implementation, you can declare an internal table in a dynamic manner.
DATA: tbl_TEST TYPE REF TO DATA.
FIELD-SYMBOLS: < tbl_TEST > TYPE STANDARD TABLE
CREATE DATA tbl_TEST TYPE (Received_Type)
ASSIGN tbl_TEST TYPE ->* to < tbl_TEST TYPE > | [
{
"code": null,
"e": 1150,
"s": 1062,
"text": "I think for your implementation, you can declare an internal table in a dynamic manner."
},
{
"code": null,
"e": 1320,
"s": 1150,
"text": "DATA: tbl_TEST TYPE REF TO DATA.\nFIELD-SYMBOLS: < tbl_TEST > TYPE STANDARD TABLE\nCREATE DATA tbl_TEST TYPE (Received_Type)\nASSIGN tbl_TEST TYPE ->* to < tbl_TEST TYPE >"
}
]
|
TensorBoard: Hyperparameter Optimization | by Renu Khandelwal | Towards Data Science | Prerequisites:
TensorBoard- A Visualization suite for Tensorflow models
In this article, you will learn hyperparameter optimization and then display the results of the hyperparameter optimization using TensorBoard.
What is Hyperparameter in the context of a deep neural network?
Your objective in a deep learning neural network is to find the weights of the nodes that will help us understand data patterns in an image, any text or speech.
You can do this by designing your neural network parameters with the values that provide the best accuracy and precision for the model.
So, what are these parameters that are referred to as hyperparameters?
Different parameters used for training the neural network model is called Hyperparameters. These hyperparameters are tuned like knobs to improve the performance of a neural network resulting in an optimized model.
Some of the Hyperparameter in a neural network are
No. of hidden layers
No. of units or nodes in a hidden layer
Learning rate
Dropout rate
Epochs or iterations
Optimizers like SGD, Adam, AdaGrad, Rmsprop, etc.
Activation functions like ReLU, sigmoid, leaky ReLU etc.
Batch size
How to implement hyperparameter optimization?
Hyperparameter optimization is the process to find the value for hyperparameter like optimizers, learning rate, dropout rates, etc. of a deep learning algorithm that will give the best model performance.
You can perform a hyperparameter optimization using the following techniques.
Manual search
Grid search: An exhaustive search of all possible combinations of the specified hyperparameters resulting in a cartesian product.
Random search: Hyperparameters are randomly selected, not every combination of Hyperparameter is tried. As the number of hyperparameters increases, the random search is a better option as it arrives at a good combination of hyperparameters faster.
Bayesian optimization: Incorporates prior data about hyperparameters, including accuracy or loss of the model. Prior information help determine the better approximation of hyperparameter selection for the model.
For visualizing the hyperparameter tuning for the model on the TensorBoard, we will use a Grid Search technique where we will use a few hyperparameters like no. of nodes, different optimizers, or learning rate, and different dropout rates and look at the accuracy and loss of the model.
Why use TensorBoard for hyperparameter optimization?
A picture is a thousand words, and this is also applied to sophisticated deep learning models. Deep learning models were considered as a black box where you send some input data, the model does some complex computation, and voila, you now have your results!!!
TensorBoard is a visualization toolkit from Tensorflow to display different metrics, parameters, and other visualizations that help debug, track, fine-tune, optimize, and share your deep learning experiment results
With TensorBoard, you can track the accuracy and loss of the model at every epoch; and also with different hyperparameters values. Tracking accuracy for different values of Hyperparameter will help you to fine-tune the model faster.
Finally, here is the code implementation in Python...
We will visualize the scalars, graphs, and distribution using TensorBoard using cats and dogs dataset.
Import TensorFlow and the TensorBoard HParams plugin along with Keras libraries for preprocessing the image and creating the model.
import tensorflow as tffrom tensorboard.plugins.hparams import api as hpimport datetimefrom tensorflow.keras.models import Sequentialfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2Dfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_imgimport numpy as np
I have used TensorFlow 2.0.0 version.
# Load the TensorBoard notebook extension%load_ext tensorboard
Setting critical parameters for the training
BASE_PATH = 'Data\\dogs-vs-cats\\train\\'TRAIN_PATH='Data\\dogs-vs-cats\\train_data\\'VAL_PATH='Data\\dogs-vs-cats\\validation_data\\'batch_size = 32 epochs = 5IMG_HEIGHT = 150IMG_WIDTH = 150
Rescale and Apply different Augmentation to the training image
train_image_generator = ImageDataGenerator( rescale=1./255, rotation_range=45, width_shift_range=.15, height_shift_range=.15, horizontal_flip=True, zoom_range=0.3)
Rescale Validation data
validation_image_generator = ImageDataGenerator(rescale=1./255)
Generate batches of normalized data for train and validation data set
train_data_gen = train_image_generator.flow_from_directory(batch_size = batch_size, directory=TRAIN_PATH, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical')val_data_gen = validation_image_generator.flow_from_directory(batch_size = batch_size, directory=VAL_PATH, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical')
We are using four hyperparameters for running our experiment by listing the different values or ranges of value for the Hyperparameter.
For discrete hyperparameters, all possible combinations of parameters will be tried, and for real-valued parameters, only the lower and upper bound will be used.
Num of units in the first Dense layer: 256 and 512Drop out rate: the range is between 0.1 and 0.2. So a dropout rate of 0.1 and 0.2 will be used.Optimizers: adam, SGD, and rmspropLearning rate for the optimizers:0.001, 0.0001 and 0.0005,
Num of units in the first Dense layer: 256 and 512
Drop out rate: the range is between 0.1 and 0.2. So a dropout rate of 0.1 and 0.2 will be used.
Optimizers: adam, SGD, and rmsprop
Learning rate for the optimizers:0.001, 0.0001 and 0.0005,
We also set the metrics as accuracy to be displayed on the TensorBoard
## Create hyperparametersHP_NUM_UNITS=hp.HParam('num_units', hp.Discrete([ 256, 512]))HP_DROPOUT=hp.HParam('dropout', hp.RealInterval(0.1, 0.2))HP_LEARNING_RATE= hp.HParam('learning_rate', hp.Discrete([0.001, 0.0005, 0.0001]))HP_OPTIMIZER=hp.HParam('optimizer', hp.Discrete(['adam', 'sgd', 'rmsprop']))METRIC_ACCURACY='accuracy'
Creating and Configuring the log files
log_dir ='\\logs\\fit\\' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')with tf.summary.create_file_writer(log_dir).as_default(): hp.hparams_config( hparams= [HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER, HP_LEARNING_RATE], metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')], )
Hyperparameters are not hardcoded but are taken from hparams dictionary for different parameters: HP_DROPOUTfor dropout, HP_NUM_UNITS for the number of units in the first Dense layer, HP_OPTIMIZER sets the different optimizers. We take the optimizer that gets used and set the learning rate based on HP_LEARNING_RATE.
The function returns the validation accuracy of the last epoch.
def create_model(hparams): model = Sequential([ Conv2D(64, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)), MaxPooling2D(), #setting the Drop out value based on HParam Dropout(hparams[HP_DROPOUT]), Conv2D(128, 3, padding='same', activation='relu'), MaxPooling2D(), Dropout(hparams[HP_DROPOUT]), Flatten(), Dense(hparams[HP_NUM_UNITS], activation='relu'), Dense(2, activation='softmax')]) #setting the optimizer and learning rate optimizer = hparams[HP_OPTIMIZER] learning_rate = hparams[HP_LEARNING_RATE] if optimizer == "adam": optimizer = tf.optimizers.Adam(learning_rate=learning_rate) elif optimizer == "sgd": optimizer = tf.optimizers.SGD(learning_rate=learning_rate) elif optimizer=='rmsprop': optimizer = tf.optimizers.RMSprop(learning_rate=learning_rate) else: raise ValueError("unexpected optimizer name: %r" % (optimizer_name,)) # Comiple the mode with the optimizer and learninf rate specified in hparams model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) #Fit the model history=model.fit_generator( train_data_gen, steps_per_epoch=1000, epochs=epochs, validation_data=val_data_gen, validation_steps=1000, callbacks=[ tf.keras.callbacks.TensorBoard(log_dir), # log metrics hp.KerasCallback(log_dir, hparams),# log hparams ]) return history.history['val_accuracy'][-1]
For each run of the model, log the hparams summary with Hyperparameter and the final epochs accuracy. We need to convert the validation accuracy of the last epoch to a scalar value.
def run(run_dir, hparams): with tf.summary.create_file_writer(run_dir).as_default(): hp.hparams(hparams) # record the values used in this trial accuracy = create_model(hparams) #converting to tf scalar accuracy= tf.reshape(tf.convert_to_tensor(accuracy), []).numpy() tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)
Experimentation here uses Grid Search and tests all possible combinations of hyperparameters for the number of units for the first layer, dropout rate, optimizers, and their learning rates, and accuracy is used for accuracy.
session_num = 0for num_units in HP_NUM_UNITS.domain.values: for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value): for optimizer in HP_OPTIMIZER.domain.values: for learning_rate in HP_LEARNING_RATE.domain.values: hparams = { HP_NUM_UNITS: num_units, HP_DROPOUT: dropout_rate, HP_OPTIMIZER: optimizer, HP_LEARNING_RATE: learning_rate, } run_name = "run-%d" % session_num print('--- Starting trial: %s' % run_name) print({h.name: hparams[h] for h in hparams}) run('logs/hparam_tuning/' + run_name, hparams) session_num += 1
You can view the HParams TensorBoard dashboard using different commands: either in Jupyter notebook or using cmd
Using cmd
you will display the Hparam dashboard by providing the directory path where the different run logs were stored using the following command
python -m tensorboard.main --logdir="logs/hparam_tuning"
When sorting the accuracy in descending order, you can see that the most optimized model is with 256 units with a dropout rate of 0.2 and rmsprop optimizer with a learning rate of 0.0005.
Using Jupyter notebook
%tensorboard --logdir='\logs\hparam_tuning'
you can also view the Parallel Coordinates View, displaying the individual runs for each hyperparameter and also displaying accuracy
Tensorboard Hparams dashboard help find the most optimized hyperparameters for the best model accuracy
TensorBoard hyperparameters tuning provides a visual way to understand which hyperparameters can be used for fine-tuning the deep learning model for best accuracy | [
{
"code": null,
"e": 187,
"s": 172,
"text": "Prerequisites:"
},
{
"code": null,
"e": 244,
"s": 187,
"text": "TensorBoard- A Visualization suite for Tensorflow models"
},
{
"code": null,
"e": 387,
"s": 244,
"text": "In this article, you will learn hyperparameter optimization and then display the results of the hyperparameter optimization using TensorBoard."
},
{
"code": null,
"e": 451,
"s": 387,
"text": "What is Hyperparameter in the context of a deep neural network?"
},
{
"code": null,
"e": 612,
"s": 451,
"text": "Your objective in a deep learning neural network is to find the weights of the nodes that will help us understand data patterns in an image, any text or speech."
},
{
"code": null,
"e": 748,
"s": 612,
"text": "You can do this by designing your neural network parameters with the values that provide the best accuracy and precision for the model."
},
{
"code": null,
"e": 819,
"s": 748,
"text": "So, what are these parameters that are referred to as hyperparameters?"
},
{
"code": null,
"e": 1033,
"s": 819,
"text": "Different parameters used for training the neural network model is called Hyperparameters. These hyperparameters are tuned like knobs to improve the performance of a neural network resulting in an optimized model."
},
{
"code": null,
"e": 1084,
"s": 1033,
"text": "Some of the Hyperparameter in a neural network are"
},
{
"code": null,
"e": 1105,
"s": 1084,
"text": "No. of hidden layers"
},
{
"code": null,
"e": 1145,
"s": 1105,
"text": "No. of units or nodes in a hidden layer"
},
{
"code": null,
"e": 1159,
"s": 1145,
"text": "Learning rate"
},
{
"code": null,
"e": 1172,
"s": 1159,
"text": "Dropout rate"
},
{
"code": null,
"e": 1193,
"s": 1172,
"text": "Epochs or iterations"
},
{
"code": null,
"e": 1243,
"s": 1193,
"text": "Optimizers like SGD, Adam, AdaGrad, Rmsprop, etc."
},
{
"code": null,
"e": 1300,
"s": 1243,
"text": "Activation functions like ReLU, sigmoid, leaky ReLU etc."
},
{
"code": null,
"e": 1311,
"s": 1300,
"text": "Batch size"
},
{
"code": null,
"e": 1357,
"s": 1311,
"text": "How to implement hyperparameter optimization?"
},
{
"code": null,
"e": 1561,
"s": 1357,
"text": "Hyperparameter optimization is the process to find the value for hyperparameter like optimizers, learning rate, dropout rates, etc. of a deep learning algorithm that will give the best model performance."
},
{
"code": null,
"e": 1639,
"s": 1561,
"text": "You can perform a hyperparameter optimization using the following techniques."
},
{
"code": null,
"e": 1653,
"s": 1639,
"text": "Manual search"
},
{
"code": null,
"e": 1783,
"s": 1653,
"text": "Grid search: An exhaustive search of all possible combinations of the specified hyperparameters resulting in a cartesian product."
},
{
"code": null,
"e": 2031,
"s": 1783,
"text": "Random search: Hyperparameters are randomly selected, not every combination of Hyperparameter is tried. As the number of hyperparameters increases, the random search is a better option as it arrives at a good combination of hyperparameters faster."
},
{
"code": null,
"e": 2243,
"s": 2031,
"text": "Bayesian optimization: Incorporates prior data about hyperparameters, including accuracy or loss of the model. Prior information help determine the better approximation of hyperparameter selection for the model."
},
{
"code": null,
"e": 2530,
"s": 2243,
"text": "For visualizing the hyperparameter tuning for the model on the TensorBoard, we will use a Grid Search technique where we will use a few hyperparameters like no. of nodes, different optimizers, or learning rate, and different dropout rates and look at the accuracy and loss of the model."
},
{
"code": null,
"e": 2583,
"s": 2530,
"text": "Why use TensorBoard for hyperparameter optimization?"
},
{
"code": null,
"e": 2843,
"s": 2583,
"text": "A picture is a thousand words, and this is also applied to sophisticated deep learning models. Deep learning models were considered as a black box where you send some input data, the model does some complex computation, and voila, you now have your results!!!"
},
{
"code": null,
"e": 3058,
"s": 2843,
"text": "TensorBoard is a visualization toolkit from Tensorflow to display different metrics, parameters, and other visualizations that help debug, track, fine-tune, optimize, and share your deep learning experiment results"
},
{
"code": null,
"e": 3291,
"s": 3058,
"text": "With TensorBoard, you can track the accuracy and loss of the model at every epoch; and also with different hyperparameters values. Tracking accuracy for different values of Hyperparameter will help you to fine-tune the model faster."
},
{
"code": null,
"e": 3345,
"s": 3291,
"text": "Finally, here is the code implementation in Python..."
},
{
"code": null,
"e": 3448,
"s": 3345,
"text": "We will visualize the scalars, graphs, and distribution using TensorBoard using cats and dogs dataset."
},
{
"code": null,
"e": 3580,
"s": 3448,
"text": "Import TensorFlow and the TensorBoard HParams plugin along with Keras libraries for preprocessing the image and creating the model."
},
{
"code": null,
"e": 3904,
"s": 3580,
"text": "import tensorflow as tffrom tensorboard.plugins.hparams import api as hpimport datetimefrom tensorflow.keras.models import Sequentialfrom tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2Dfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_imgimport numpy as np"
},
{
"code": null,
"e": 3942,
"s": 3904,
"text": "I have used TensorFlow 2.0.0 version."
},
{
"code": null,
"e": 4005,
"s": 3942,
"text": "# Load the TensorBoard notebook extension%load_ext tensorboard"
},
{
"code": null,
"e": 4050,
"s": 4005,
"text": "Setting critical parameters for the training"
},
{
"code": null,
"e": 4242,
"s": 4050,
"text": "BASE_PATH = 'Data\\\\dogs-vs-cats\\\\train\\\\'TRAIN_PATH='Data\\\\dogs-vs-cats\\\\train_data\\\\'VAL_PATH='Data\\\\dogs-vs-cats\\\\validation_data\\\\'batch_size = 32 epochs = 5IMG_HEIGHT = 150IMG_WIDTH = 150"
},
{
"code": null,
"e": 4305,
"s": 4242,
"text": "Rescale and Apply different Augmentation to the training image"
},
{
"code": null,
"e": 4749,
"s": 4305,
"text": "train_image_generator = ImageDataGenerator( rescale=1./255, rotation_range=45, width_shift_range=.15, height_shift_range=.15, horizontal_flip=True, zoom_range=0.3)"
},
{
"code": null,
"e": 4773,
"s": 4749,
"text": "Rescale Validation data"
},
{
"code": null,
"e": 4837,
"s": 4773,
"text": "validation_image_generator = ImageDataGenerator(rescale=1./255)"
},
{
"code": null,
"e": 4907,
"s": 4837,
"text": "Generate batches of normalized data for train and validation data set"
},
{
"code": null,
"e": 5650,
"s": 4907,
"text": "train_data_gen = train_image_generator.flow_from_directory(batch_size = batch_size, directory=TRAIN_PATH, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical')val_data_gen = validation_image_generator.flow_from_directory(batch_size = batch_size, directory=VAL_PATH, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='categorical')"
},
{
"code": null,
"e": 5786,
"s": 5650,
"text": "We are using four hyperparameters for running our experiment by listing the different values or ranges of value for the Hyperparameter."
},
{
"code": null,
"e": 5948,
"s": 5786,
"text": "For discrete hyperparameters, all possible combinations of parameters will be tried, and for real-valued parameters, only the lower and upper bound will be used."
},
{
"code": null,
"e": 6186,
"s": 5948,
"text": "Num of units in the first Dense layer: 256 and 512Drop out rate: the range is between 0.1 and 0.2. So a dropout rate of 0.1 and 0.2 will be used.Optimizers: adam, SGD, and rmspropLearning rate for the optimizers:0.001, 0.0001 and 0.0005,"
},
{
"code": null,
"e": 6237,
"s": 6186,
"text": "Num of units in the first Dense layer: 256 and 512"
},
{
"code": null,
"e": 6333,
"s": 6237,
"text": "Drop out rate: the range is between 0.1 and 0.2. So a dropout rate of 0.1 and 0.2 will be used."
},
{
"code": null,
"e": 6368,
"s": 6333,
"text": "Optimizers: adam, SGD, and rmsprop"
},
{
"code": null,
"e": 6427,
"s": 6368,
"text": "Learning rate for the optimizers:0.001, 0.0001 and 0.0005,"
},
{
"code": null,
"e": 6498,
"s": 6427,
"text": "We also set the metrics as accuracy to be displayed on the TensorBoard"
},
{
"code": null,
"e": 6827,
"s": 6498,
"text": "## Create hyperparametersHP_NUM_UNITS=hp.HParam('num_units', hp.Discrete([ 256, 512]))HP_DROPOUT=hp.HParam('dropout', hp.RealInterval(0.1, 0.2))HP_LEARNING_RATE= hp.HParam('learning_rate', hp.Discrete([0.001, 0.0005, 0.0001]))HP_OPTIMIZER=hp.HParam('optimizer', hp.Discrete(['adam', 'sgd', 'rmsprop']))METRIC_ACCURACY='accuracy'"
},
{
"code": null,
"e": 6866,
"s": 6827,
"text": "Creating and Configuring the log files"
},
{
"code": null,
"e": 7169,
"s": 6866,
"text": "log_dir ='\\\\logs\\\\fit\\\\' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')with tf.summary.create_file_writer(log_dir).as_default(): hp.hparams_config( hparams= [HP_NUM_UNITS, HP_DROPOUT, HP_OPTIMIZER, HP_LEARNING_RATE], metrics=[hp.Metric(METRIC_ACCURACY, display_name='Accuracy')], )"
},
{
"code": null,
"e": 7487,
"s": 7169,
"text": "Hyperparameters are not hardcoded but are taken from hparams dictionary for different parameters: HP_DROPOUTfor dropout, HP_NUM_UNITS for the number of units in the first Dense layer, HP_OPTIMIZER sets the different optimizers. We take the optimizer that gets used and set the learning rate based on HP_LEARNING_RATE."
},
{
"code": null,
"e": 7551,
"s": 7487,
"text": "The function returns the validation accuracy of the last epoch."
},
{
"code": null,
"e": 9090,
"s": 7551,
"text": "def create_model(hparams): model = Sequential([ Conv2D(64, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)), MaxPooling2D(), #setting the Drop out value based on HParam Dropout(hparams[HP_DROPOUT]), Conv2D(128, 3, padding='same', activation='relu'), MaxPooling2D(), Dropout(hparams[HP_DROPOUT]), Flatten(), Dense(hparams[HP_NUM_UNITS], activation='relu'), Dense(2, activation='softmax')]) #setting the optimizer and learning rate optimizer = hparams[HP_OPTIMIZER] learning_rate = hparams[HP_LEARNING_RATE] if optimizer == \"adam\": optimizer = tf.optimizers.Adam(learning_rate=learning_rate) elif optimizer == \"sgd\": optimizer = tf.optimizers.SGD(learning_rate=learning_rate) elif optimizer=='rmsprop': optimizer = tf.optimizers.RMSprop(learning_rate=learning_rate) else: raise ValueError(\"unexpected optimizer name: %r\" % (optimizer_name,)) # Comiple the mode with the optimizer and learninf rate specified in hparams model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy']) #Fit the model history=model.fit_generator( train_data_gen, steps_per_epoch=1000, epochs=epochs, validation_data=val_data_gen, validation_steps=1000, callbacks=[ tf.keras.callbacks.TensorBoard(log_dir), # log metrics hp.KerasCallback(log_dir, hparams),# log hparams ]) return history.history['val_accuracy'][-1]"
},
{
"code": null,
"e": 9272,
"s": 9090,
"text": "For each run of the model, log the hparams summary with Hyperparameter and the final epochs accuracy. We need to convert the validation accuracy of the last epoch to a scalar value."
},
{
"code": null,
"e": 9609,
"s": 9272,
"text": "def run(run_dir, hparams): with tf.summary.create_file_writer(run_dir).as_default(): hp.hparams(hparams) # record the values used in this trial accuracy = create_model(hparams) #converting to tf scalar accuracy= tf.reshape(tf.convert_to_tensor(accuracy), []).numpy() tf.summary.scalar(METRIC_ACCURACY, accuracy, step=1)"
},
{
"code": null,
"e": 9834,
"s": 9609,
"text": "Experimentation here uses Grid Search and tests all possible combinations of hyperparameters for the number of units for the first layer, dropout rate, optimizers, and their learning rates, and accuracy is used for accuracy."
},
{
"code": null,
"e": 10507,
"s": 9834,
"text": "session_num = 0for num_units in HP_NUM_UNITS.domain.values: for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value): for optimizer in HP_OPTIMIZER.domain.values: for learning_rate in HP_LEARNING_RATE.domain.values: hparams = { HP_NUM_UNITS: num_units, HP_DROPOUT: dropout_rate, HP_OPTIMIZER: optimizer, HP_LEARNING_RATE: learning_rate, } run_name = \"run-%d\" % session_num print('--- Starting trial: %s' % run_name) print({h.name: hparams[h] for h in hparams}) run('logs/hparam_tuning/' + run_name, hparams) session_num += 1"
},
{
"code": null,
"e": 10620,
"s": 10507,
"text": "You can view the HParams TensorBoard dashboard using different commands: either in Jupyter notebook or using cmd"
},
{
"code": null,
"e": 10630,
"s": 10620,
"text": "Using cmd"
},
{
"code": null,
"e": 10769,
"s": 10630,
"text": "you will display the Hparam dashboard by providing the directory path where the different run logs were stored using the following command"
},
{
"code": null,
"e": 10826,
"s": 10769,
"text": "python -m tensorboard.main --logdir=\"logs/hparam_tuning\""
},
{
"code": null,
"e": 11014,
"s": 10826,
"text": "When sorting the accuracy in descending order, you can see that the most optimized model is with 256 units with a dropout rate of 0.2 and rmsprop optimizer with a learning rate of 0.0005."
},
{
"code": null,
"e": 11037,
"s": 11014,
"text": "Using Jupyter notebook"
},
{
"code": null,
"e": 11081,
"s": 11037,
"text": "%tensorboard --logdir='\\logs\\hparam_tuning'"
},
{
"code": null,
"e": 11214,
"s": 11081,
"text": "you can also view the Parallel Coordinates View, displaying the individual runs for each hyperparameter and also displaying accuracy"
},
{
"code": null,
"e": 11317,
"s": 11214,
"text": "Tensorboard Hparams dashboard help find the most optimized hyperparameters for the best model accuracy"
}
]
|
Bagged Trees: A Machine Learning Algorithm Every Data Scientist Needs | by Robert Wood | Towards Data Science | Without diving into the specifics just yet, it’s important that you have some foundation understanding of decision trees.
From the evaluation approach of each algorithm to the algorithms themselves, there are many similarities.
If you aren’t already familiar with decision trees I’d recommend a quick refresher here.
With that said, get ready to become a bagged tree expert! Bagged trees are famous for improving the predictive capability of a single decision tree and an incredibly useful algorithm for your machine learning tool belt.
The main idea between bagged trees is that rather than depending on a single decision tree, you are depending on many many decision trees, which allows you to leverage the insight of many models.
When considering the performance of a model, we often consider what’s known as the bias-variance trade-off of our output. Variance has to do with how our model handles small errors and how much that potentially throws off our model and bias results in under-fitting. The model effectively makes incorrect assumptions around the relationships between variables.
You could say the issue with variation is while your model may be directionally correct, it’s not very accurate, while if your model is very biased, while there could be low variation; it could be directionally incorrect entirely.
The biggest issue with a decision tree, in general, is that they have high variance. The issue this presents is that any minor change to the data can result in major changes to the model and future predictions.
The reason this comes into play here is that one of the benefits of bagged trees, is it helps minimize variation while holding bias consistent.
One of the main issues with bagged trees is that they are incredibly difficult to interpret. In the decision trees lesson, we learned that a major benefit of decision trees is that they were considerably easier to interpret. Bagged trees prove the opposite in this regard as its process lends to complexity. I’ll explain that more in-depth shortly.
Bagging stands for Bootstrap Aggregation; it is what is known as an ensemble method — which is effectively an approach to layering different models, data, algorithms, and so forth.
So now you might be thinking... ok cool, so what is bootstrap aggregation...
What happens is that the model will sample a subset of the data and will train a decision tree; no different from a decision tree so far... but what then happens is that additional samples are taken (with replacement — meaning that the same data can be included multiple times), new models are trained, and then the predictions are averaged. A bagged tree could include 5 trees, 50 trees, 100 trees and so on. Each tree in your ensemble may have different features, terminal node counts, data, etc.
As you can imagine, a bagged tree is very difficult to interpret.
To start off, we’ll break out our training and test sets. I’m not going to talk much about the train test split here. We’ll be doing this with the Titanic dataset from the titanic package
n <- nrow(titanic_train)n_train <- round(0.8 * n)set.seed(123) train_indices <- sample(1:n, n_train) train <- titanic_train[train_indices, ] test <- titanic_train[-train_indices, ]
Now that we have our train & test sets broken out, let’s load up the ipred package. This will allow us to run the bagging function.
A couple things to keep in mind is that the formula indicates that we want to understand Survived by (~ ) Pclass + Sex + Age + SibSp + Parch + Fare + Embarked
From there you can see that we’re using the train dataset to train this model. & finally, you can see this parameter coob. This is confirming whether we'd like to test performance on an out of bag sample.
Remember how I said that each tree re-samples the data? Well, that process leaves a handful of records that will never be used to train with & make up an excellent dataset for testing the model’s performance. This process happens within the bagging function, as you'll see when we print the model.
library(ipred) set.seed(123) model <- bagging(formula = Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data = train, coob = TRUE) print(model)
As you can see we trained the default of 25 trees in our bagged tree model.
We use the same process to predict for our test set as we use for decision trees.
pred <- predict(object = model, newdata = test, type = "class") print(pred)
Now, we’ve trained our model, predicted for our test set, now it’s time to break down different methods of performance evaluation.
ROC Curve or Receiver Operating Characteristic Curve is a method for visualizing the capability of a binary classification model to diagnose or predict correctly. The ROC Curve plots the true positive rate against the false positive rate at various thresholds.
Our target for the ROC Curve is that the true positive rate is 100% and the false positive rate is 0%. That curve would fall in the top left corner of the plot.
AUC is intended to determine the degree of separability, or the ability to correct predict class. The higher the AUC the better. 1 would be perfect, and .5 would be random.
We’ll be using the metrics package to calculate the AUC for our dataset.
library(Metrics) pred <- predict(object = model, newdata = test, type = "prob") auc(actual = test$Survived, predicted = pred[,"yes"])
Here you can see that I change the type to "prob" to return a percentage likelihood rather than the classification. This is needed to calculate AUC.
This returned an AUC of .89 which is not bad at all.
In classification, the idea of a cutoff threshold means that given a certain percent likelihood for a given outcome you would classify it accordingly. Wow was that a mouthful. In other words, if you predict survival at 99%, then you’d probably classify it as survival. Well, let’s say you look at another passenger that you predict to survive with a 60% likelihood. Well, they’re still more likely to survive than not, so you probably classify them as survive. When selecting type = "pred" you have the flexibility to specify your own cutoff threshold.
This metric is very simple, what percentage of your predictions were correct. The confusion matrix function from caret includes this.
The confusionMatrix function from the caret package is incredibly useful. For assessing classification model performance. Load up the package, and pass it your predictions & the actuals.
library(caret) confusionMatrix(data = test$pred, reference = test$Survived)
The first thing this function shows you is what’s called a confusion matrix. This shows you a table of how predictions and actuals lined up. So the diagonal cells where the prediction and reference are the same represents what we got correct. Counting those up 149 (106 + 43) and dividing it by the total number of records, 178; we arrive at our accuracy number of 83.4%.
True positive: The cell in the quadrant where both the reference and the prediction are 1. This indicates that you predicted survival and they did in fact survive.
False positive: Here you predicted positive, but you were wrong.
True negative: When you predict negative, and you are correct.
False negative: When you predict negative, and you are incorrect.
A couple more key metrics to keep in mind are sensitivity and specificity. Sensitivity is the percentage of true records that you predicted correctly.
Specificity, on the other hand, is to measure what portion of the actual false records you predicted correctly.
Specificity is one to keep in mind when predicting on an imbalanced dataset. A very common example of this is for classifying email spam. 99% of the time it’s not spam, so if you predicted nothing was ever spam you’d have 99% accuracy, but your specificity would be 0, leading to all spam being accepted.
In summary, we’ve learned about the right times to use bagged trees, as well as the wrong times to use them.
We defined what bagging is and how it changes the model.
We built and tested our own model while defining & assessing a variety of performance measures.
I hope you enjoyed this quick lesson on bagged trees. Let me know if there was something you wanted more info on or if there’s something you’d like me to cover in a different post.
Happy Data Science-ing! | [
{
"code": null,
"e": 294,
"s": 172,
"text": "Without diving into the specifics just yet, it’s important that you have some foundation understanding of decision trees."
},
{
"code": null,
"e": 400,
"s": 294,
"text": "From the evaluation approach of each algorithm to the algorithms themselves, there are many similarities."
},
{
"code": null,
"e": 489,
"s": 400,
"text": "If you aren’t already familiar with decision trees I’d recommend a quick refresher here."
},
{
"code": null,
"e": 709,
"s": 489,
"text": "With that said, get ready to become a bagged tree expert! Bagged trees are famous for improving the predictive capability of a single decision tree and an incredibly useful algorithm for your machine learning tool belt."
},
{
"code": null,
"e": 905,
"s": 709,
"text": "The main idea between bagged trees is that rather than depending on a single decision tree, you are depending on many many decision trees, which allows you to leverage the insight of many models."
},
{
"code": null,
"e": 1266,
"s": 905,
"text": "When considering the performance of a model, we often consider what’s known as the bias-variance trade-off of our output. Variance has to do with how our model handles small errors and how much that potentially throws off our model and bias results in under-fitting. The model effectively makes incorrect assumptions around the relationships between variables."
},
{
"code": null,
"e": 1497,
"s": 1266,
"text": "You could say the issue with variation is while your model may be directionally correct, it’s not very accurate, while if your model is very biased, while there could be low variation; it could be directionally incorrect entirely."
},
{
"code": null,
"e": 1708,
"s": 1497,
"text": "The biggest issue with a decision tree, in general, is that they have high variance. The issue this presents is that any minor change to the data can result in major changes to the model and future predictions."
},
{
"code": null,
"e": 1852,
"s": 1708,
"text": "The reason this comes into play here is that one of the benefits of bagged trees, is it helps minimize variation while holding bias consistent."
},
{
"code": null,
"e": 2201,
"s": 1852,
"text": "One of the main issues with bagged trees is that they are incredibly difficult to interpret. In the decision trees lesson, we learned that a major benefit of decision trees is that they were considerably easier to interpret. Bagged trees prove the opposite in this regard as its process lends to complexity. I’ll explain that more in-depth shortly."
},
{
"code": null,
"e": 2382,
"s": 2201,
"text": "Bagging stands for Bootstrap Aggregation; it is what is known as an ensemble method — which is effectively an approach to layering different models, data, algorithms, and so forth."
},
{
"code": null,
"e": 2459,
"s": 2382,
"text": "So now you might be thinking... ok cool, so what is bootstrap aggregation..."
},
{
"code": null,
"e": 2958,
"s": 2459,
"text": "What happens is that the model will sample a subset of the data and will train a decision tree; no different from a decision tree so far... but what then happens is that additional samples are taken (with replacement — meaning that the same data can be included multiple times), new models are trained, and then the predictions are averaged. A bagged tree could include 5 trees, 50 trees, 100 trees and so on. Each tree in your ensemble may have different features, terminal node counts, data, etc."
},
{
"code": null,
"e": 3024,
"s": 2958,
"text": "As you can imagine, a bagged tree is very difficult to interpret."
},
{
"code": null,
"e": 3212,
"s": 3024,
"text": "To start off, we’ll break out our training and test sets. I’m not going to talk much about the train test split here. We’ll be doing this with the Titanic dataset from the titanic package"
},
{
"code": null,
"e": 3393,
"s": 3212,
"text": "n <- nrow(titanic_train)n_train <- round(0.8 * n)set.seed(123) train_indices <- sample(1:n, n_train) train <- titanic_train[train_indices, ] test <- titanic_train[-train_indices, ]"
},
{
"code": null,
"e": 3525,
"s": 3393,
"text": "Now that we have our train & test sets broken out, let’s load up the ipred package. This will allow us to run the bagging function."
},
{
"code": null,
"e": 3684,
"s": 3525,
"text": "A couple things to keep in mind is that the formula indicates that we want to understand Survived by (~ ) Pclass + Sex + Age + SibSp + Parch + Fare + Embarked"
},
{
"code": null,
"e": 3889,
"s": 3684,
"text": "From there you can see that we’re using the train dataset to train this model. & finally, you can see this parameter coob. This is confirming whether we'd like to test performance on an out of bag sample."
},
{
"code": null,
"e": 4187,
"s": 3889,
"text": "Remember how I said that each tree re-samples the data? Well, that process leaves a handful of records that will never be used to train with & make up an excellent dataset for testing the model’s performance. This process happens within the bagging function, as you'll see when we print the model."
},
{
"code": null,
"e": 4348,
"s": 4187,
"text": "library(ipred) set.seed(123) model <- bagging(formula = Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data = train, coob = TRUE) print(model)"
},
{
"code": null,
"e": 4424,
"s": 4348,
"text": "As you can see we trained the default of 25 trees in our bagged tree model."
},
{
"code": null,
"e": 4506,
"s": 4424,
"text": "We use the same process to predict for our test set as we use for decision trees."
},
{
"code": null,
"e": 4582,
"s": 4506,
"text": "pred <- predict(object = model, newdata = test, type = \"class\") print(pred)"
},
{
"code": null,
"e": 4713,
"s": 4582,
"text": "Now, we’ve trained our model, predicted for our test set, now it’s time to break down different methods of performance evaluation."
},
{
"code": null,
"e": 4974,
"s": 4713,
"text": "ROC Curve or Receiver Operating Characteristic Curve is a method for visualizing the capability of a binary classification model to diagnose or predict correctly. The ROC Curve plots the true positive rate against the false positive rate at various thresholds."
},
{
"code": null,
"e": 5135,
"s": 4974,
"text": "Our target for the ROC Curve is that the true positive rate is 100% and the false positive rate is 0%. That curve would fall in the top left corner of the plot."
},
{
"code": null,
"e": 5308,
"s": 5135,
"text": "AUC is intended to determine the degree of separability, or the ability to correct predict class. The higher the AUC the better. 1 would be perfect, and .5 would be random."
},
{
"code": null,
"e": 5381,
"s": 5308,
"text": "We’ll be using the metrics package to calculate the AUC for our dataset."
},
{
"code": null,
"e": 5515,
"s": 5381,
"text": "library(Metrics) pred <- predict(object = model, newdata = test, type = \"prob\") auc(actual = test$Survived, predicted = pred[,\"yes\"])"
},
{
"code": null,
"e": 5664,
"s": 5515,
"text": "Here you can see that I change the type to \"prob\" to return a percentage likelihood rather than the classification. This is needed to calculate AUC."
},
{
"code": null,
"e": 5717,
"s": 5664,
"text": "This returned an AUC of .89 which is not bad at all."
},
{
"code": null,
"e": 6270,
"s": 5717,
"text": "In classification, the idea of a cutoff threshold means that given a certain percent likelihood for a given outcome you would classify it accordingly. Wow was that a mouthful. In other words, if you predict survival at 99%, then you’d probably classify it as survival. Well, let’s say you look at another passenger that you predict to survive with a 60% likelihood. Well, they’re still more likely to survive than not, so you probably classify them as survive. When selecting type = \"pred\" you have the flexibility to specify your own cutoff threshold."
},
{
"code": null,
"e": 6404,
"s": 6270,
"text": "This metric is very simple, what percentage of your predictions were correct. The confusion matrix function from caret includes this."
},
{
"code": null,
"e": 6591,
"s": 6404,
"text": "The confusionMatrix function from the caret package is incredibly useful. For assessing classification model performance. Load up the package, and pass it your predictions & the actuals."
},
{
"code": null,
"e": 6667,
"s": 6591,
"text": "library(caret) confusionMatrix(data = test$pred, reference = test$Survived)"
},
{
"code": null,
"e": 7039,
"s": 6667,
"text": "The first thing this function shows you is what’s called a confusion matrix. This shows you a table of how predictions and actuals lined up. So the diagonal cells where the prediction and reference are the same represents what we got correct. Counting those up 149 (106 + 43) and dividing it by the total number of records, 178; we arrive at our accuracy number of 83.4%."
},
{
"code": null,
"e": 7203,
"s": 7039,
"text": "True positive: The cell in the quadrant where both the reference and the prediction are 1. This indicates that you predicted survival and they did in fact survive."
},
{
"code": null,
"e": 7268,
"s": 7203,
"text": "False positive: Here you predicted positive, but you were wrong."
},
{
"code": null,
"e": 7331,
"s": 7268,
"text": "True negative: When you predict negative, and you are correct."
},
{
"code": null,
"e": 7397,
"s": 7331,
"text": "False negative: When you predict negative, and you are incorrect."
},
{
"code": null,
"e": 7548,
"s": 7397,
"text": "A couple more key metrics to keep in mind are sensitivity and specificity. Sensitivity is the percentage of true records that you predicted correctly."
},
{
"code": null,
"e": 7660,
"s": 7548,
"text": "Specificity, on the other hand, is to measure what portion of the actual false records you predicted correctly."
},
{
"code": null,
"e": 7965,
"s": 7660,
"text": "Specificity is one to keep in mind when predicting on an imbalanced dataset. A very common example of this is for classifying email spam. 99% of the time it’s not spam, so if you predicted nothing was ever spam you’d have 99% accuracy, but your specificity would be 0, leading to all spam being accepted."
},
{
"code": null,
"e": 8074,
"s": 7965,
"text": "In summary, we’ve learned about the right times to use bagged trees, as well as the wrong times to use them."
},
{
"code": null,
"e": 8131,
"s": 8074,
"text": "We defined what bagging is and how it changes the model."
},
{
"code": null,
"e": 8227,
"s": 8131,
"text": "We built and tested our own model while defining & assessing a variety of performance measures."
},
{
"code": null,
"e": 8408,
"s": 8227,
"text": "I hope you enjoyed this quick lesson on bagged trees. Let me know if there was something you wanted more info on or if there’s something you’d like me to cover in a different post."
}
]
|
Overfitting and Conceptual Soundness | by Klas Leino | Towards Data Science | Overfitting is a central problem in machine learning that is strongly tied to the reliability of a learned model when it is deployed on unseen data. Overfitting is often measured — or even defined — by the difference in accuracy obtained by a model on its training data, compared to on previously unseen validation data. While this is a useful metric that broadly captures the extent to which a model will make mistakes on new points (one of the key problematic implications of overfitting), we will instead take a more general and nuanced take on overfitting. In particular, this article will use the TruLens explanatory framework to examine a key mechanism underlying overfitting: the encoding and use of unsound features.
At a high level, deep networks work by learning to extract high-level features that enable them to make predictions on new inputs. While some of these features may truly be generalizable predictors, others may coincidentally aid classification on the training set only. In the former case, we say the learned features are conceptually sound. The latter type of learned features are not conceptually sound, and thus may lead to anomalous or incorrect behavior on unseen points, i.e., overfitting.
In the remainder of this article, we will present evidence supporting this perspective of overfitting, and show how TruLens can be used to assess the features that are learned and used by neural networks. For a more general introduction to TruLens, see this article.
Our hypothesis is that overfitting manifests itself in a model through idiosyncratic feature use. To illustrate this point, we will consider an example from the “labeled faces in the wild” (LFW) dataset. The LFW dataset contains images of many celebrities and prominent public figures circa the early 2000s, and the task is to identify the person in each picture. We have selected a subset containing five of the most frequently appearing identities. The full dataset can be obtained via scikit-learn.
In the training set, we find that a few images of Tony Blair have a unique and distinctive pink background. Our hypothesis suggests that a model can overfit by learning to use the pink background as a feature for Tony Blair, as the feature is indeed predictive of Tony Blair on the training set. Of course, despite its coincidental usefulness on the training set, the background is clearly not conceptually sound, and is unlikely to be useful on new data.
If the model overfits in this way, it will be evident from an inspection of the features that are encoded and used by the model on instances with pink backgrounds. This can be done using internal influence [1] with TruLens. A notebook reproducing the images and experiments in this article can be found here.
We will begin by training a simple convolutional neural network (CNN) on our LFW training set. For example, using TensorFlow:
from trulens.nn.models import get_model_wrapper# Define our model.x = Input((64,64,3))z = Conv2D(20, 5, padding='same')(x)z = Activation('relu')(z)z = MaxPooling2D()(z)z = Conv2D(50, 5, padding='same')(z)z = Activation('relu')(z)z = MaxPooling2D()(z)z = Flatten()(z)z = Dense(500)(z)z = Activation('relu')(z)y = Dense(5)(z)keras_model = Model(x, y)# Compile and train the model.keras_model.compile( loss=SparseCategoricalCrossentropy(from_logits=True), optimizer='rmsprop', metrics=['sparse_categorical_accuracy'])keras_model.fit( x_train, y_train, epochs=50, batch_size=64, validation_data=(x_test, y_test))# Wrap the model as a TruLens model.model = get_model_wrapper(keras_model)
Next we will use internal influence to examine the primary learned features the model uses to make its decisions. According to our hypothesis, an overfit model will have encoded the pink background as a feature used for classification. If this hypothesis holds, we will need to find where this feature is encoded in the model, and whether it is used in classification.
The convolutional layers of a CNN consist of many channels, or feature maps, which are in turn made of a grid of individual neurons. Each channel represents a single type of feature, while the neurons inside each channel represent that type of feature at a specific location in the image. It is possible that a high-level feature (e.g., a pink background) is encoded by a network, but that it doesn’t correspond to a single channel. For example, it may be formed by a linear combination of multiple channels. In our example, for simplicity, we will limit our search to considering single channels, which happens to work for us.
The network we have trained is not particularly deep, so we don’t have too many choices for the layer to search in. Typically, deeper layers encode progressively higher level features; to find our pink background feature, we will begin with searching the second convolutional layer.
We use the following procedure: first we find the most influential channel in the second convolutional layer (layer 4 in the implementation of our model). There are a number of ways we could do this; in our case we will assign influence to each channel according to the maximum influence among each neuron in the channel. Once we have determined the most influential channel, we will visualize it by finding the input pixels that contribute most to that channel. Altogether, this procedure tells us which feature (at our chosen layer) is most influential on the model’s prediction, and which parts of the image are part of that feature.
from trulens.nn.attribution import InternalInfluencefrom trulens.visualizations import HeatmapVisualizerlayer = 4# Define the influence measure.internal_infl_attributer = InternalInfluence( model, layer, qoi='max', doi='point')internal_attributions = internal_infl_attributer.attributions( instance)# Take the max over the width and height to get an attribution for# each channel.channel_attributions = internal_attributions.max( axis=(1,2)).mean(axis=0)target_channel = int(channel_attributions.argmax())# Calculate the input pixels that are most influential on the# target channel.input_attributions = InternalInfluence( model, (0, layer), qoi=target_channel, doi='point').attributions(instance)# Visualize the influential input pixels._ = HeatmapVisualizer(blur=3)(input_attributions, instance)
The most important pixels are highlighted in red. We see that indeed, the background is being heavily used by our model. Using an alternative visualization technique, we can again confirm that the explanation focuses on the background on these distinctive training points:
from trulens.visualizations import ChannelMaskVisualizerfrom trulens.visualizations import Tilervisualizer = ChannelMaskVisualizer( model, layer, target_channel, blur=3, threshold=0.9)visualization = visualizer(instance)plt.imshow(Tiler().tile(visualization))
For the sake of comparison, we can follow the same procedure on a different model that did not see any pink backgrounds during training. This model has no reason to encode a pink background feature, let alone use it to identify Tony Blair. As expected, we see that the result is quite different:
Explanations can help increase our trust in conceptually sound models, or help us anticipate future mistakes that may arise from the use of unsound features.
Consider again our running example. The model learned that a pink background is a feature of Tony Blair. As it happens, there are no images in our test set — of Tony Blair or any other person — with a pink background. Our test set will thus not be useful in identifying this case of conceptual unsoundness. But should the model be trusted? Presumably pink backgrounds could easily arise in deployment, even if they are not found in the test set.
Both the model trained with the pink background and the model trained without the pink background achieved roughly the same validation accuracy (between 83 and 84%). From the perspective of the validation metrics, we should be just as happy with either of them. But again, the explanations generated in the previous sections should make it clear that one model has a weakness that the other does not.
In fact, we can directly demonstrate the implications of unsound feature use, which can be foreseen upon examining explanations. Though we have no examples in the test set that display a pink background, this can be easily fixed with some basic photo-editing. Here we have edited an image of a non-Tony-Blair person from LFW, Gerhard Schroeder, to have a pink background. Pictures like the edited image could, of course, easily be realized in real life.
We see that on the original image, the model makes the correct prediction of class 3, corresponding to Gerhard Schroeder. However, on the edited image, the model predicts class 4, corresponding to Tony Blair.
>>> keras_model.predict(original).argmax(axis=1)array([3])>>>>>> keras_model.predict(edited).argmax(axis=1)array([4])
And, predictably, if we ask the model why it has predicted Tony Blair on the edited image, we see that the pink background is again highlighted.
Finally, if we turn to our alternative model trained without the pink background, we observe that our edited image does not cause the same erroneous behavior. After all, the alternative model has no reason to associate a pink background with Tony Blair (or any other person), and did not appear to do so.
>>> keras_model_no_pink.predict(original).argmax(axis=1)array([3])>>>>>> keras_model_no_pink.predict(edited).argmax(axis=1)array([3])
We have seen how overfitting can lead a model to make peculiar mistakes on unseen data. In addition to causing misclassifications, overfitting presents a privacy risk. Intuitively, by learning features that are overly-specific to the training set, models will inadvertently leak information about their training data. My research [2] on this topic with Matt Fredrikson (appearing in USENIX 2020) uses insights similar to those presented here to show how an attacker can make inferences about the data used to train a model.
In particular, the attack we designed works even on models with essentially no gap in accuracy between their training and validation sets. This underscores the point that overfitting does not necessarily need to show up as mistakes on the validation data to cause problems. By examining the way in which our models use features, we can gain more trust in their efficacy than we should infer from simple performance metrics; and conversely, we can identify potential problems that might otherwise go unnoticed.
Machine learning models are prone to learning unsound features that can lead to prediction errors, privacy vulnerabilities, etc. Explanations can help identify cases of unsound feature usage that might otherwise go undetected even on the validation data. On the other hand, we should strive for models on which explanations indicate sound feature usage, increasing our trust in the model’s performance on future unseen data. TruLens is a powerful and easy-to-use tool that can help us apply this valuable form of model analysis.
Leino et al. “Influence-directed Explanations for Deep Convolutional Networks.” ITC 2018. ArXivLeino & Fredrikson. “Stolen Memories: Leveraging Model Memorization for Calibrated White-Box Membership Inference.” USENIX 2020. ArXiv
Leino et al. “Influence-directed Explanations for Deep Convolutional Networks.” ITC 2018. ArXiv
Leino & Fredrikson. “Stolen Memories: Leveraging Model Memorization for Calibrated White-Box Membership Inference.” USENIX 2020. ArXiv | [
{
"code": null,
"e": 897,
"s": 172,
"text": "Overfitting is a central problem in machine learning that is strongly tied to the reliability of a learned model when it is deployed on unseen data. Overfitting is often measured — or even defined — by the difference in accuracy obtained by a model on its training data, compared to on previously unseen validation data. While this is a useful metric that broadly captures the extent to which a model will make mistakes on new points (one of the key problematic implications of overfitting), we will instead take a more general and nuanced take on overfitting. In particular, this article will use the TruLens explanatory framework to examine a key mechanism underlying overfitting: the encoding and use of unsound features."
},
{
"code": null,
"e": 1393,
"s": 897,
"text": "At a high level, deep networks work by learning to extract high-level features that enable them to make predictions on new inputs. While some of these features may truly be generalizable predictors, others may coincidentally aid classification on the training set only. In the former case, we say the learned features are conceptually sound. The latter type of learned features are not conceptually sound, and thus may lead to anomalous or incorrect behavior on unseen points, i.e., overfitting."
},
{
"code": null,
"e": 1660,
"s": 1393,
"text": "In the remainder of this article, we will present evidence supporting this perspective of overfitting, and show how TruLens can be used to assess the features that are learned and used by neural networks. For a more general introduction to TruLens, see this article."
},
{
"code": null,
"e": 2162,
"s": 1660,
"text": "Our hypothesis is that overfitting manifests itself in a model through idiosyncratic feature use. To illustrate this point, we will consider an example from the “labeled faces in the wild” (LFW) dataset. The LFW dataset contains images of many celebrities and prominent public figures circa the early 2000s, and the task is to identify the person in each picture. We have selected a subset containing five of the most frequently appearing identities. The full dataset can be obtained via scikit-learn."
},
{
"code": null,
"e": 2618,
"s": 2162,
"text": "In the training set, we find that a few images of Tony Blair have a unique and distinctive pink background. Our hypothesis suggests that a model can overfit by learning to use the pink background as a feature for Tony Blair, as the feature is indeed predictive of Tony Blair on the training set. Of course, despite its coincidental usefulness on the training set, the background is clearly not conceptually sound, and is unlikely to be useful on new data."
},
{
"code": null,
"e": 2927,
"s": 2618,
"text": "If the model overfits in this way, it will be evident from an inspection of the features that are encoded and used by the model on instances with pink backgrounds. This can be done using internal influence [1] with TruLens. A notebook reproducing the images and experiments in this article can be found here."
},
{
"code": null,
"e": 3053,
"s": 2927,
"text": "We will begin by training a simple convolutional neural network (CNN) on our LFW training set. For example, using TensorFlow:"
},
{
"code": null,
"e": 3764,
"s": 3053,
"text": "from trulens.nn.models import get_model_wrapper# Define our model.x = Input((64,64,3))z = Conv2D(20, 5, padding='same')(x)z = Activation('relu')(z)z = MaxPooling2D()(z)z = Conv2D(50, 5, padding='same')(z)z = Activation('relu')(z)z = MaxPooling2D()(z)z = Flatten()(z)z = Dense(500)(z)z = Activation('relu')(z)y = Dense(5)(z)keras_model = Model(x, y)# Compile and train the model.keras_model.compile( loss=SparseCategoricalCrossentropy(from_logits=True), optimizer='rmsprop', metrics=['sparse_categorical_accuracy'])keras_model.fit( x_train, y_train, epochs=50, batch_size=64, validation_data=(x_test, y_test))# Wrap the model as a TruLens model.model = get_model_wrapper(keras_model)"
},
{
"code": null,
"e": 4133,
"s": 3764,
"text": "Next we will use internal influence to examine the primary learned features the model uses to make its decisions. According to our hypothesis, an overfit model will have encoded the pink background as a feature used for classification. If this hypothesis holds, we will need to find where this feature is encoded in the model, and whether it is used in classification."
},
{
"code": null,
"e": 4761,
"s": 4133,
"text": "The convolutional layers of a CNN consist of many channels, or feature maps, which are in turn made of a grid of individual neurons. Each channel represents a single type of feature, while the neurons inside each channel represent that type of feature at a specific location in the image. It is possible that a high-level feature (e.g., a pink background) is encoded by a network, but that it doesn’t correspond to a single channel. For example, it may be formed by a linear combination of multiple channels. In our example, for simplicity, we will limit our search to considering single channels, which happens to work for us."
},
{
"code": null,
"e": 5044,
"s": 4761,
"text": "The network we have trained is not particularly deep, so we don’t have too many choices for the layer to search in. Typically, deeper layers encode progressively higher level features; to find our pink background feature, we will begin with searching the second convolutional layer."
},
{
"code": null,
"e": 5681,
"s": 5044,
"text": "We use the following procedure: first we find the most influential channel in the second convolutional layer (layer 4 in the implementation of our model). There are a number of ways we could do this; in our case we will assign influence to each channel according to the maximum influence among each neuron in the channel. Once we have determined the most influential channel, we will visualize it by finding the input pixels that contribute most to that channel. Altogether, this procedure tells us which feature (at our chosen layer) is most influential on the model’s prediction, and which parts of the image are part of that feature."
},
{
"code": null,
"e": 6491,
"s": 5681,
"text": "from trulens.nn.attribution import InternalInfluencefrom trulens.visualizations import HeatmapVisualizerlayer = 4# Define the influence measure.internal_infl_attributer = InternalInfluence( model, layer, qoi='max', doi='point')internal_attributions = internal_infl_attributer.attributions( instance)# Take the max over the width and height to get an attribution for# each channel.channel_attributions = internal_attributions.max( axis=(1,2)).mean(axis=0)target_channel = int(channel_attributions.argmax())# Calculate the input pixels that are most influential on the# target channel.input_attributions = InternalInfluence( model, (0, layer), qoi=target_channel, doi='point').attributions(instance)# Visualize the influential input pixels._ = HeatmapVisualizer(blur=3)(input_attributions, instance)"
},
{
"code": null,
"e": 6764,
"s": 6491,
"text": "The most important pixels are highlighted in red. We see that indeed, the background is being heavily used by our model. Using an alternative visualization technique, we can again confirm that the explanation focuses on the background on these distinctive training points:"
},
{
"code": null,
"e": 7040,
"s": 6764,
"text": "from trulens.visualizations import ChannelMaskVisualizerfrom trulens.visualizations import Tilervisualizer = ChannelMaskVisualizer( model, layer, target_channel, blur=3, threshold=0.9)visualization = visualizer(instance)plt.imshow(Tiler().tile(visualization))"
},
{
"code": null,
"e": 7336,
"s": 7040,
"text": "For the sake of comparison, we can follow the same procedure on a different model that did not see any pink backgrounds during training. This model has no reason to encode a pink background feature, let alone use it to identify Tony Blair. As expected, we see that the result is quite different:"
},
{
"code": null,
"e": 7494,
"s": 7336,
"text": "Explanations can help increase our trust in conceptually sound models, or help us anticipate future mistakes that may arise from the use of unsound features."
},
{
"code": null,
"e": 7940,
"s": 7494,
"text": "Consider again our running example. The model learned that a pink background is a feature of Tony Blair. As it happens, there are no images in our test set — of Tony Blair or any other person — with a pink background. Our test set will thus not be useful in identifying this case of conceptual unsoundness. But should the model be trusted? Presumably pink backgrounds could easily arise in deployment, even if they are not found in the test set."
},
{
"code": null,
"e": 8341,
"s": 7940,
"text": "Both the model trained with the pink background and the model trained without the pink background achieved roughly the same validation accuracy (between 83 and 84%). From the perspective of the validation metrics, we should be just as happy with either of them. But again, the explanations generated in the previous sections should make it clear that one model has a weakness that the other does not."
},
{
"code": null,
"e": 8795,
"s": 8341,
"text": "In fact, we can directly demonstrate the implications of unsound feature use, which can be foreseen upon examining explanations. Though we have no examples in the test set that display a pink background, this can be easily fixed with some basic photo-editing. Here we have edited an image of a non-Tony-Blair person from LFW, Gerhard Schroeder, to have a pink background. Pictures like the edited image could, of course, easily be realized in real life."
},
{
"code": null,
"e": 9004,
"s": 8795,
"text": "We see that on the original image, the model makes the correct prediction of class 3, corresponding to Gerhard Schroeder. However, on the edited image, the model predicts class 4, corresponding to Tony Blair."
},
{
"code": null,
"e": 9122,
"s": 9004,
"text": ">>> keras_model.predict(original).argmax(axis=1)array([3])>>>>>> keras_model.predict(edited).argmax(axis=1)array([4])"
},
{
"code": null,
"e": 9267,
"s": 9122,
"text": "And, predictably, if we ask the model why it has predicted Tony Blair on the edited image, we see that the pink background is again highlighted."
},
{
"code": null,
"e": 9572,
"s": 9267,
"text": "Finally, if we turn to our alternative model trained without the pink background, we observe that our edited image does not cause the same erroneous behavior. After all, the alternative model has no reason to associate a pink background with Tony Blair (or any other person), and did not appear to do so."
},
{
"code": null,
"e": 9706,
"s": 9572,
"text": ">>> keras_model_no_pink.predict(original).argmax(axis=1)array([3])>>>>>> keras_model_no_pink.predict(edited).argmax(axis=1)array([3])"
},
{
"code": null,
"e": 10230,
"s": 9706,
"text": "We have seen how overfitting can lead a model to make peculiar mistakes on unseen data. In addition to causing misclassifications, overfitting presents a privacy risk. Intuitively, by learning features that are overly-specific to the training set, models will inadvertently leak information about their training data. My research [2] on this topic with Matt Fredrikson (appearing in USENIX 2020) uses insights similar to those presented here to show how an attacker can make inferences about the data used to train a model."
},
{
"code": null,
"e": 10740,
"s": 10230,
"text": "In particular, the attack we designed works even on models with essentially no gap in accuracy between their training and validation sets. This underscores the point that overfitting does not necessarily need to show up as mistakes on the validation data to cause problems. By examining the way in which our models use features, we can gain more trust in their efficacy than we should infer from simple performance metrics; and conversely, we can identify potential problems that might otherwise go unnoticed."
},
{
"code": null,
"e": 11269,
"s": 10740,
"text": "Machine learning models are prone to learning unsound features that can lead to prediction errors, privacy vulnerabilities, etc. Explanations can help identify cases of unsound feature usage that might otherwise go undetected even on the validation data. On the other hand, we should strive for models on which explanations indicate sound feature usage, increasing our trust in the model’s performance on future unseen data. TruLens is a powerful and easy-to-use tool that can help us apply this valuable form of model analysis."
},
{
"code": null,
"e": 11499,
"s": 11269,
"text": "Leino et al. “Influence-directed Explanations for Deep Convolutional Networks.” ITC 2018. ArXivLeino & Fredrikson. “Stolen Memories: Leveraging Model Memorization for Calibrated White-Box Membership Inference.” USENIX 2020. ArXiv"
},
{
"code": null,
"e": 11595,
"s": 11499,
"text": "Leino et al. “Influence-directed Explanations for Deep Convolutional Networks.” ITC 2018. ArXiv"
}
]
|
Change Thread Priority in Java | A thread can be created by implementing the Runnable interface and overriding the run() method. Then a Thread object can be created and the start() method called.
The thread priority determines when the processor is provided to the thread as well as other resources. It can be changed using the method setPriority() of class Thread.
A program that demonstrates changing the thread priorities using the method setPriority() in Java is given as follows:
Live Demo
public class ThreadDemo extends Thread {
public void run() {
System.out.println("Running...");
}
public static void main(String[] args) {
ThreadDemo thread1 = new ThreadDemo();
ThreadDemo thread2 = new ThreadDemo();
ThreadDemo thread3 = new ThreadDemo();
ThreadDemo thread4 = new ThreadDemo();
ThreadDemo thread5 = new ThreadDemo();
System.out.println("Default thread priority of Thread 1: " + thread1.getPriority());
System.out.println("Default thread priority of Thread 2: " + thread2.getPriority());
System.out.println("Default thread priority of Thread 3: " + thread3.getPriority());
System.out.println("Default thread priority of Thread 4: " + thread4.getPriority());
System.out.println("Default thread priority of Thread 5: " + thread5.getPriority());
System.out.println("\nChanging the default thread priority using the setPriority() method");
thread1.setPriority(7);
thread2.setPriority(3);
thread3.setPriority(9);
thread4.setPriority(2);
thread5.setPriority(8);
System.out.println("\nChanged thread priority of Thread 1: " + thread1.getPriority());
System.out.println("Changed thread priority of Thread 2: " + thread2.getPriority());
System.out.println("Changed thread priority of Thread 3: " + thread3.getPriority());
System.out.println("Changed thread priority of Thread 4: " + thread4.getPriority());
System.out.println("Changed thread priority of Thread 5: " + thread5.getPriority());
System.out.println("\n" + Thread.currentThread().getName());
System.out.println("\nDefault thread priority of Main Thread: " + Thread.currentThread().getPriority());
Thread.currentThread().setPriority(10);
System.out.println("Changed thread priority of Main Thread: " + Thread.currentThread().getPriority());
}
}
Default thread priority of Thread 1: 5
Default thread priority of Thread 2: 5
Default thread priority of Thread 3: 5
Default thread priority of Thread 4: 5
Default thread priority of Thread 5: 5
Changing the default thread priority using the setPriority() method
Changed thread priority of Thread 1: 7
Changed thread priority of Thread 2: 3
Changed thread priority of Thread 3: 9
Changed thread priority of Thread 4: 2
Changed thread priority of Thread 5: 8
main
Default thread priority of Main Thread: 5
Changed thread priority of Main Thread: 10 | [
{
"code": null,
"e": 1225,
"s": 1062,
"text": "A thread can be created by implementing the Runnable interface and overriding the run() method. Then a Thread object can be created and the start() method called."
},
{
"code": null,
"e": 1395,
"s": 1225,
"text": "The thread priority determines when the processor is provided to the thread as well as other resources. It can be changed using the method setPriority() of class Thread."
},
{
"code": null,
"e": 1514,
"s": 1395,
"text": "A program that demonstrates changing the thread priorities using the method setPriority() in Java is given as follows:"
},
{
"code": null,
"e": 1525,
"s": 1514,
"text": " Live Demo"
},
{
"code": null,
"e": 3404,
"s": 1525,
"text": "public class ThreadDemo extends Thread {\n public void run() {\n System.out.println(\"Running...\");\n }\n public static void main(String[] args) {\n ThreadDemo thread1 = new ThreadDemo();\n ThreadDemo thread2 = new ThreadDemo();\n ThreadDemo thread3 = new ThreadDemo();\n ThreadDemo thread4 = new ThreadDemo();\n ThreadDemo thread5 = new ThreadDemo();\n System.out.println(\"Default thread priority of Thread 1: \" + thread1.getPriority());\n System.out.println(\"Default thread priority of Thread 2: \" + thread2.getPriority());\n System.out.println(\"Default thread priority of Thread 3: \" + thread3.getPriority());\n System.out.println(\"Default thread priority of Thread 4: \" + thread4.getPriority());\n System.out.println(\"Default thread priority of Thread 5: \" + thread5.getPriority());\n System.out.println(\"\\nChanging the default thread priority using the setPriority() method\");\n thread1.setPriority(7);\n thread2.setPriority(3);\n thread3.setPriority(9);\n thread4.setPriority(2);\n thread5.setPriority(8);\n System.out.println(\"\\nChanged thread priority of Thread 1: \" + thread1.getPriority());\n System.out.println(\"Changed thread priority of Thread 2: \" + thread2.getPriority());\n System.out.println(\"Changed thread priority of Thread 3: \" + thread3.getPriority());\n System.out.println(\"Changed thread priority of Thread 4: \" + thread4.getPriority());\n System.out.println(\"Changed thread priority of Thread 5: \" + thread5.getPriority());\n System.out.println(\"\\n\" + Thread.currentThread().getName());\n System.out.println(\"\\nDefault thread priority of Main Thread: \" + Thread.currentThread().getPriority());\n Thread.currentThread().setPriority(10);\n System.out.println(\"Changed thread priority of Main Thread: \" + Thread.currentThread().getPriority());\n }\n}"
},
{
"code": null,
"e": 3952,
"s": 3404,
"text": "Default thread priority of Thread 1: 5\nDefault thread priority of Thread 2: 5\nDefault thread priority of Thread 3: 5\nDefault thread priority of Thread 4: 5\nDefault thread priority of Thread 5: 5\nChanging the default thread priority using the setPriority() method\nChanged thread priority of Thread 1: 7\nChanged thread priority of Thread 2: 3\nChanged thread priority of Thread 3: 9\nChanged thread priority of Thread 4: 2\nChanged thread priority of Thread 5: 8\nmain\nDefault thread priority of Main Thread: 5\nChanged thread priority of Main Thread: 10"
}
]
|
Java & MySQL - Viewing a ResultSet | The ResultSet interface contains dozens of methods for getting the data of the current row.
There is a get method for each of the possible data types, and each get method has two versions −
One that takes in a column name.
One that takes in a column name.
One that takes in a column index.
One that takes in a column index.
For example, if the column you are interested in viewing contains an int, you need to use one of the getInt() methods of ResultSet −
Returns the int in the current row in the column named columnName.
Returns the int in the current row in the specified column index. The column index
starts at 1, meaning the first column of a row is 1, the second column of a row is 2, and so on.
Similarly, there are get methods in the ResultSet interface for each of the eight Java primitive types, as well as common types such as java.lang.String, java.lang.Object, and java.net.URL.
There are also methods for getting SQL data types java.sql.Date, java.sql.Time, java.sql.TimeStamp, java.sql.Clob, and java.sql.Blob. Check the documentation for more information about using these SQL data types.
Following is the example which makes use of few viewing methods described.
This sample code has been written based on the environment and database setup done in the previous chapters.
Copy and paste the following example in TestApplication.java, compile and run as follows −
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
public class TestApplication {
static final String DB_URL = "jdbc:mysql://localhost/TUTORIALSPOINT";
static final String USER = "guest";
static final String PASS = "guest123";
static final String QUERY = "SELECT id, first, last, age FROM Employees";
public static void main(String[] args) {
// Open a connection
try(Connection conn = DriverManager.getConnection(DB_URL, USER, PASS);
Statement stmt = conn.createStatement(
ResultSet.TYPE_SCROLL_INSENSITIVE,
ResultSet.CONCUR_READ_ONLY);
ResultSet rs = stmt.executeQuery(QUERY);
) {
// Move cursor to the last row.
System.out.println("Moving cursor to the last...");
rs.last();
// Extract data from result set
System.out.println("Displaying record...");
//Retrieve by column name
int id = rs.getInt("id");
int age = rs.getInt("age");
String first = rs.getString("first");
String last = rs.getString("last");
// Display values
System.out.print("ID: " + id);
System.out.print(", Age: " + age);
System.out.print(", First: " + first);
System.out.println(", Last: " + last);
// Move cursor to the first row.
System.out.println("Moving cursor to the first row...");
rs.first();
// Extract data from result set
System.out.println("Displaying record...");
// Retrieve by column name
id = rs.getInt("id");
age = rs.getInt("age");
first = rs.getString("first");
last = rs.getString("last");
// Display values
System.out.print("ID: " + id);
System.out.print(", Age: " + age);
System.out.print(", First: " + first);
System.out.println(", Last: " + last);
// Move cursor to the first row.
System.out.println("Moving cursor to the next row...");
rs.next();
// Extract data from result set
System.out.println("Displaying record...");
id = rs.getInt("id");
age = rs.getInt("age");
first = rs.getString("first");
last = rs.getString("last");
// Display values
System.out.print("ID: " + id);
System.out.print(", Age: " + age);
System.out.print(", First: " + first);
System.out.println(", Last: " + last);
} catch (SQLException e) {
e.printStackTrace();
}
}
}
Now let us compile the above example as follows −
C:\>javac TestApplication.java
C:\>
When you run TestApplication, it produces the following result −
C:\>java TestApplication
Moving cursor to the last...
Displaying record...
ID: 103, Age: 30, First: Sumit, Last: Mittal
Moving cursor to the first row...
Displaying record...
ID: 100, Age: 18, First: Zara, Last: Ali
Moving cursor to the next row...
Displaying record...
ID: 101, Age: 25, First: Mehnaz, Last: Fatma
C:\>
16 Lectures
2 hours
Malhar Lathkar
19 Lectures
5 hours
Malhar Lathkar
25 Lectures
2.5 hours
Anadi Sharma
126 Lectures
7 hours
Tushar Kale
119 Lectures
17.5 hours
Monica Mittal
76 Lectures
7 hours
Arnab Chakraborty
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2778,
"s": 2686,
"text": "The ResultSet interface contains dozens of methods for getting the data of the current row."
},
{
"code": null,
"e": 2876,
"s": 2778,
"text": "There is a get method for each of the possible data types, and each get method has two versions −"
},
{
"code": null,
"e": 2909,
"s": 2876,
"text": "One that takes in a column name."
},
{
"code": null,
"e": 2942,
"s": 2909,
"text": "One that takes in a column name."
},
{
"code": null,
"e": 2976,
"s": 2942,
"text": "One that takes in a column index."
},
{
"code": null,
"e": 3010,
"s": 2976,
"text": "One that takes in a column index."
},
{
"code": null,
"e": 3143,
"s": 3010,
"text": "For example, if the column you are interested in viewing contains an int, you need to use one of the getInt() methods of ResultSet −"
},
{
"code": null,
"e": 3210,
"s": 3143,
"text": "Returns the int in the current row in the column named columnName."
},
{
"code": null,
"e": 3391,
"s": 3210,
"text": " Returns the int in the current row in the specified column index. The column index\nstarts at 1, meaning the first column of a row is 1, the second column of a row is 2, and so on."
},
{
"code": null,
"e": 3581,
"s": 3391,
"text": "Similarly, there are get methods in the ResultSet interface for each of the eight Java primitive types, as well as common types such as java.lang.String, java.lang.Object, and java.net.URL."
},
{
"code": null,
"e": 3794,
"s": 3581,
"text": "There are also methods for getting SQL data types java.sql.Date, java.sql.Time, java.sql.TimeStamp, java.sql.Clob, and java.sql.Blob. Check the documentation for more information about using these SQL data types."
},
{
"code": null,
"e": 3869,
"s": 3794,
"text": "Following is the example which makes use of few viewing methods described."
},
{
"code": null,
"e": 3978,
"s": 3869,
"text": "This sample code has been written based on the environment and database setup done in the previous chapters."
},
{
"code": null,
"e": 4069,
"s": 3978,
"text": "Copy and paste the following example in TestApplication.java, compile and run as follows −"
},
{
"code": null,
"e": 6709,
"s": 4069,
"text": "import java.sql.Connection;\nimport java.sql.DriverManager;\nimport java.sql.ResultSet;\nimport java.sql.SQLException;\nimport java.sql.Statement;\n\npublic class TestApplication {\n static final String DB_URL = \"jdbc:mysql://localhost/TUTORIALSPOINT\";\n static final String USER = \"guest\";\n static final String PASS = \"guest123\";\n static final String QUERY = \"SELECT id, first, last, age FROM Employees\";\n\n public static void main(String[] args) {\n // Open a connection\n try(Connection conn = DriverManager.getConnection(DB_URL, USER, PASS);\n Statement stmt = conn.createStatement(\n ResultSet.TYPE_SCROLL_INSENSITIVE,\n ResultSet.CONCUR_READ_ONLY);\n ResultSet rs = stmt.executeQuery(QUERY);\n ) {\t\t\n // Move cursor to the last row.\n System.out.println(\"Moving cursor to the last...\");\n rs.last();\n\n // Extract data from result set\n System.out.println(\"Displaying record...\");\n //Retrieve by column name\n int id = rs.getInt(\"id\");\n int age = rs.getInt(\"age\");\n String first = rs.getString(\"first\");\n String last = rs.getString(\"last\");\n\n // Display values\n System.out.print(\"ID: \" + id);\n System.out.print(\", Age: \" + age);\n System.out.print(\", First: \" + first);\n System.out.println(\", Last: \" + last);\n\n // Move cursor to the first row.\n System.out.println(\"Moving cursor to the first row...\");\n rs.first();\n\n // Extract data from result set\n System.out.println(\"Displaying record...\");\n // Retrieve by column name\n id = rs.getInt(\"id\");\n age = rs.getInt(\"age\");\n first = rs.getString(\"first\");\n last = rs.getString(\"last\");\n\n // Display values\n System.out.print(\"ID: \" + id);\n System.out.print(\", Age: \" + age);\n System.out.print(\", First: \" + first);\n System.out.println(\", Last: \" + last);\n // Move cursor to the first row.\n\n System.out.println(\"Moving cursor to the next row...\");\n rs.next();\n\n // Extract data from result set\n System.out.println(\"Displaying record...\");\n id = rs.getInt(\"id\");\n age = rs.getInt(\"age\");\n first = rs.getString(\"first\");\n last = rs.getString(\"last\");\n\n // Display values\n System.out.print(\"ID: \" + id);\n System.out.print(\", Age: \" + age);\n System.out.print(\", First: \" + first);\n System.out.println(\", Last: \" + last);\t\t\n\n } catch (SQLException e) {\n e.printStackTrace();\n } \n }\n}"
},
{
"code": null,
"e": 6759,
"s": 6709,
"text": "Now let us compile the above example as follows −"
},
{
"code": null,
"e": 6796,
"s": 6759,
"text": "C:\\>javac TestApplication.java\nC:\\>\n"
},
{
"code": null,
"e": 6861,
"s": 6796,
"text": "When you run TestApplication, it produces the following result −"
},
{
"code": null,
"e": 7182,
"s": 6861,
"text": "C:\\>java TestApplication\nMoving cursor to the last...\nDisplaying record...\nID: 103, Age: 30, First: Sumit, Last: Mittal\nMoving cursor to the first row...\nDisplaying record...\nID: 100, Age: 18, First: Zara, Last: Ali\nMoving cursor to the next row...\nDisplaying record...\nID: 101, Age: 25, First: Mehnaz, Last: Fatma\nC:\\>\n"
},
{
"code": null,
"e": 7215,
"s": 7182,
"text": "\n 16 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 7231,
"s": 7215,
"text": " Malhar Lathkar"
},
{
"code": null,
"e": 7264,
"s": 7231,
"text": "\n 19 Lectures \n 5 hours \n"
},
{
"code": null,
"e": 7280,
"s": 7264,
"text": " Malhar Lathkar"
},
{
"code": null,
"e": 7315,
"s": 7280,
"text": "\n 25 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 7329,
"s": 7315,
"text": " Anadi Sharma"
},
{
"code": null,
"e": 7363,
"s": 7329,
"text": "\n 126 Lectures \n 7 hours \n"
},
{
"code": null,
"e": 7377,
"s": 7363,
"text": " Tushar Kale"
},
{
"code": null,
"e": 7414,
"s": 7377,
"text": "\n 119 Lectures \n 17.5 hours \n"
},
{
"code": null,
"e": 7429,
"s": 7414,
"text": " Monica Mittal"
},
{
"code": null,
"e": 7462,
"s": 7429,
"text": "\n 76 Lectures \n 7 hours \n"
},
{
"code": null,
"e": 7481,
"s": 7462,
"text": " Arnab Chakraborty"
},
{
"code": null,
"e": 7488,
"s": 7481,
"text": " Print"
},
{
"code": null,
"e": 7499,
"s": 7488,
"text": " Add Notes"
}
]
|
The Binomial Regression Model: Everything You Need to Know | Towards Data Science | This article is divided into two sections:
SECTION 1: Introduction to the Binomial Regression model: We’ll get introduced to the Binomial Regression model, see how it fits into the family of Generalized Linear Models, and why it can be used to predict the odds of seeing a random event.SECTION 2: Using the Binomial regression model: We’ll train a Binomial Regression model on the real world Titanic data set using Python and the statsmodels library. We’ll see why the Binomial regression model is the right model for predicting the odds of survival on the Titanic. We’ll also learn how to interpret the fitted model’s regression coefficients, a necessary skill to learn, which in case of the Titanic data set produces astonishing results.
SECTION 1: Introduction to the Binomial Regression model: We’ll get introduced to the Binomial Regression model, see how it fits into the family of Generalized Linear Models, and why it can be used to predict the odds of seeing a random event.
SECTION 2: Using the Binomial regression model: We’ll train a Binomial Regression model on the real world Titanic data set using Python and the statsmodels library. We’ll see why the Binomial regression model is the right model for predicting the odds of survival on the Titanic. We’ll also learn how to interpret the fitted model’s regression coefficients, a necessary skill to learn, which in case of the Titanic data set produces astonishing results.
The Binomial Regression model can be used for predicting the odds of seeing an event, given a vector of regression variables. For e.g. one could use the Binomial Regression model to predict the odds of its starting to rain in the next 2 hours, given the current temperature, humidity, barometric pressure, time of year, geo-location, altitude etc.
In a Binomial Regression model, the dependent variable y is a discrete random variable that takes on values such as 0, 1, 5, 67 etc. Each value represents the number of ‘successes’ observed in m trials. Thus y follows the binomial distribution.
The following equation gives the probability of observing k successes in m independent Bernoulli trials. Each Bernoulli trial has a probability of success=π and probability of failure=(1-π). A coin toss is the simplest example of a Bernoulli trial in which π = (1-π) = 0.5.
The vertically bracketed term (m k) is the notation for a ‘Combination’ and is read as ‘m choose k’. It gives you the number of different ways to choose k outcomes from a set of m possible outcomes.
In a regression model, we will assume that the dependent variable y depends on an (n X p) size matrix of regression variables X. The ith row in X can be denoted as x_i which is a vector of size (1 X p ). It corresponds to the ith outcome y_i.
One usually expresses the probability of y_i taking a certain value k as conditional upon the regression variables X taking the value x_i. In notation form, it can be written as Pr(y_i=k|X=x_i) and can be read as probability of y_i being k given that X is x_i.
We can now state the probability distribution of the Binomially distributed y in the context of a regression of y over X as follows:
On the L.H.S. of the above mentioned PMF equation, we will replace the unconditional probability Pr(y=k) with the conditional probability Pr(y_i=k|X=x_i).
In the R.H.S, we will replace the unconditional probability π of observing a success with the conditional probability π_i, where π_i is the probability of observing a success for the ith row of the data set, i.e. when the regression vector X=x_i.
With these two substitutions, the PMF of the binomially distributed y becomes as follows:
In the above equation, the probability of observing a success π_i for some X=x_i, is usually expressed as some function g(.) of x_i. In symbolic terms:
The above set of concepts can be neatly visualized using a single illustration as follows:
In the above graph, y_1, y_2, y_3,...y_10 are ten binomially distributed random variables. They also happen to the the constituents of the dependent variable y which is a (10 X 1) matrix as follows: y=[[y_1], [y_2],[y_3],...,[y_10]]. The corresponding regression variables matrix X in this case also happens to be a 10 X 1 matrix since there is only one regression variable involved:X=[[1], [2],[3],[4], ...,[10]].
Because, y is a random variable with spread m, the plot shows how for each value of X=x_i, y can take any binomially distributed value around its expected value μ_i where μ_i = m*π_i and π_i as we saw earlier is some function g(.) of x_i. And thus, the expected value of y_i which is μ_i, can be expressed as some function of x_i.
The Binomial Regression model is part of the family of Generalized Linear Models. GLMs are used to model the relationship between the expected value of a response variable y and a linear combination of the explanatory variables vector X.
The relationship between E(y|X) and X is expressed by means of a suitable link function, as follows:
In the above equation, g(.) is the link function that connects the conditional expectation of y on X with a linear combination of the regression variables x_i. X being the matrix of regression variables of size (n X p) where n=rows and p=regression variables in each row, and X=x_i being the ith row in this matrix of size (1 X p) and β being a (p X 1) vector of regression coefficients.
When y is binomially distributed, we are interested in fixing the relation between the conditional expectation of the probability π of a single Bernoulli trial on a particular value of X=x_i, i.e. E(π=π_i|X=x_i), or concisely, π_i|x_i. So the GLM equation for the Binomial regression model can be written as follows:
In case of the Binomial Regression model, the link function g(.) takes one of the following four forms (we’ll stop mentioning the conditional notation |X=x_i in each for simplicity, but just assume that it is there):
The Logistic (logit) link function, also known as the log-odds function:
The logistic is known as the log-odds function because it is expressed as the ratio of the probability of success to probability of failure, i.e. the log of the odds of success. We will be using this link function later on in this article.
The probit link function:
The probit (short for probability unit) link function is used to model the occurrence of an event that has a binary Yes/No outcome. This link function is expressed as the inverse of the Cumulative Distribution Function Φ(.) of the standard normal distribution N(0,1).
The book Regression analysis of count data by Colin Cameron and Pravin K. Trivedi provides an excellent introduction to the Probit link function in section 3.6: Ordered and Other Discrete-Choice Models. In there, you will also find a very lucid derivation of why the Probit model’s link function happens to be the Inverse of the CDF Φ(.) of the normal distribution.
The log-log function:
The log-log function is useful for modeling ‘Poisson-like counting processes’ in which the parameter of the probability distribution (which often contains the mean) lies in the exponent of the probability distribution’s formula, and the parameter is also expressed as an exponent of a linear combination of the regression variables. Thus, it has the double exponent format: exp(exp( — β.x_i) and therefore two consecutive logarithm operations are needed to bring the β.x_i term down to ‘ground level’.
The complementary log-log link function:
The complementary log-log is called so because it operates on (1-π_i) i.e. the probability of failure, instead of π_i.
In this article, we’ll use the logistic a.k.a. the logit a.k.a. the log-odds link function to build our Binomial Regression model. Here it is once again, this time expressed in a slightly different way. On the R.H.S, I have replaced summation with the bolded vector notation:
We’ll use the Titanic data set as an example to understand the kinds of use-cases that are appropriate for the Binomial regression model.
Here is a link to the original data set.
The Titanic data set contains information about 887 of the 2229 souls aboard the ill-fated ocean liner Titanic. Each passenger’s record contains the following attributes:
Name
Passenger class (1/2/3)
Sex
Age
Whether the passenger was accompanied by siblings, parents or children
The Fare they paid, and most importantly,
Whether they survived (1=Survived, 0=Died)
Using Python and the Pandas data analysis library, let’s load the data set into a Pandas data frame, and print out the first few rows:
import pandas as pddf = pd.read_csv('titanic_dataset.csv', header=0)df.head(10)
This prints out the following output:
We’ll focus attention on four key attributes:
Pclass (the passenger’s class)
Age
Sex
Survived (Whether they survived)
Let’s drop the rest of the columns from the Data Frame:
#Drop the columns that our model will not usedf = df.drop(['Name','Siblings/Spouses Aboard', 'Parents/Children Aboard', 'Fare'], axis=1)#print the top 10 rowsdf.head(10)
This prints out the following:
We’ll postulate that while the Titanic was going down, the combination of [Pclass, Age, Sex] have greatly influenced the odds of a passenger’s survival.
Note that the ‘Survived’ column contains a [0, 1] Bernoulli random variable.
So can we say the following?
Regression variables X = [Pclass, Age, Sex], and,
Dependent variable is the BOOLEAN VARIABLE y = [Survived]
No, we cannot. Let’s see why...
Since y is a boolean variable, it may seem like a straight-forward case for using a Probit Regression model. But notice that if one is unfortunate enough to be on a ship such as the Titanic, what one wants to know is not the answer to the binary question: will I survive with 100% certainty or will I die with 100% certainty? Instead, what is more useful to know are the odds of survival.
For example, if you are a 22 years old woman in the second class cabin of the ship, you’ll want to know if your odds of survival are 1 out of 10, 1 out of 4, 1 out of 50 etc.
But the manner in which the Titanic data set is organized, the response variable survived has a yes/no i.e. 1/0 format. In other words, survived has a Bernoulli distribution, i.e. :
Pr(survived=0) = π,Pr(survived=1) = (1-π)Where π is some probability between 0 and 1.
What we want is for y to express the odds, i.e. the ratio of successes (survivals) to failures (deaths), in m independent, identical trials.
In other words, we want is for y to have a Log-Odds distribution.
And therefore, instead of using a True or False, 1 or 0 type Probit regression model, what we want to do here is build a Binomial regression model where the response variable is Binomially distributed, and the link function is the Logit i.e. log-odds function.
The link function will allow us to link the odds of survival to a linear combination of the regression variables X=[Pclass, Age & Sex] plus the intercept, as follows:
To transform the response variable y from Bernoulli to Binomial, we must group together the data set rows that share the same combination of X values [Pclass, Age and Sex]. Before we go about doing that, there is one little thing we need to take care of, and that is bucketing of the Age attribute. You see, Age, the way it is expressed in the data set, is a continuous variable that ranges from 0.42 to 80.
It hardly seems plausible that babies that were 0.42 years and 0.67 years old respectively would have had different odds of survival. Ditto logic holds true for youths with ages 26, 27, 28, 29 etc. years old, and so on for other cases.
We need to make the age data more granular so as to limit the number of groups. Let’s do this by bucketing the overall age range into bins of size 5 years and label each bin like so:(0, 5] → 5(5, 10] → 10(10, 15] → 15 and so on.The pandas.cut() method does the bucketing very neatly:
#define the binsage_range_bins=[0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80]#define the label for each bin. Num labels = Num bins - 1age_range_labels=[5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80]#Cut up the age range into multiple bins and stuff them into a new Age_Range columndf['Age_Range']=pd.cut(df['Age'],age_range_bins,labels=age_range_labels)#Print the outputdf.head(10)
We get the following output. Note the new Age_Range column we have added:
Let’s drop the Age column as we’ll use Age_Range instead:
#Drop the age columndf = df.drop(['Age'],axis=1)
Now that we have our explanatory variables set up the way we want, let’s get to work on grouping together the samples by the combination [Pclass, Sex, Age_Range]. We’ll use the Pandas groupby() method.
#Group by ['Pclass', 'Sex', 'Age_Range'] groups = df.groupby(['Pclass', 'Sex', 'Age_Range'])#Get the counts for each group. This is the number of passengers in each group who have surviveddf_grouped_counts = groups.count()#Get the size (number of passengers) in each groupdf_grouped_survived = groups.sum()
Merge the number of survivors and number of passengers for each group into each grouped data frame. (We’ll see how this helps us in a minute):
df_grouped_counts.to_csv('df_grouped_counts.csv')df_grouped_counts_1 = pd.read_csv('df_grouped_counts.csv', header=0)df_grouped_survived.to_csv('df_grouped_survived.csv')df_grouped_survived_1 = pd.read_csv('df_grouped_survived.csv', header=0)
Finally, let’s construct a new Data Frame which contains:
The grouped columns Pclass, Sex, Age_Range,
The corresponding number of survivors in each group,
The total number of passengers in each group i.e. the group size, and,
The number of passengers in each group who died.
#Create a new Data Framedf_grouped = pd.DataFrame()#Copy over the Pclass, Sex and Age Range columnsdf_grouped['Pclass'] = df_grouped_counts_1['Pclass']df_grouped['Sex'] = df_grouped_counts_1['Sex']df_grouped['Age_Range'] = df_grouped_counts_1['Age_Range']#Copy over the num passengers from the counts grouped Data Framedf_grouped['Total'] = df_grouped_counts_1['Survived']#Copy over the num survivors from the summation grouped Data Framedf_grouped['Survived'] = df_grouped_survived_1['Survived']#Add a column containing the number who dieddf_grouped[’Died’] = df_grouped[’Total’] - df_grouped[’Survived’]
Let’s print out the first 20 rows of the grouped data set:
df_grouped.head(20)
Let’s see what the grouped data set is telling us. I have highlighted row numbers 9, 14 and 19 for illustration:
In row #9, we find that there were 10 women in the age range (45, 50] with a first class ticket of which 9 survived. Thus the odds of survival for a woman in this group were pretty good (9 to 1), especially if she occupied a first class cabin.
In row #19, we see there were 4 male passengers aged (15- 20] of which only one survived.
In row #14, we see that there weren’t any women passengers aged (70–75] who occupied a first class cabin. This is why we are seeing NaNs in the aggregate columns for the group: [1, female, 75].
Let’s remove all such NaN rows from the Data Frame:
df_grouped = df_grouped.dropna()
Build the Binomial Regression Model using Python and statsmodels
Before we build the Binomial model, let’s take care of one final data preparation task, namely, let’s replace the ‘female’ and ‘male’ strings with integers 1 and 2:
df_grouped=df_grouped.replace(to_replace={'female': 1, 'male': 2})
We’ll use the excellent support offered by the statsmodels library for building and training the Binomial Regression model.
Let’s carve out the training and testing data sets:
import numpy as np#Separate out the training and test setsmask = np.random.rand(len(df_grouped)) < 0.85df_train = df_grouped[mask]df_test = df_grouped[~mask]
Let’s set up the regression model’s formula using the patsy syntax. What we are saying in below mentioned formula is that the dependent variable is a matrix composed of the Survived and Died columns of the dataframe, while the regression variables are Pclass, Age_Range and Sex.
#Construct the Binomial model's regression formula in Patsy syntax.formula = 'Survived + Died ~ Pclass + Age_Range + Sex'
Using this formula, let’s carve out the X and y design matrices from the training and testing data frames which we created a minute ago:
from patsy import dmatrices#Carve out the training matrices from the training data frame using the regression formulay_train, X_train = dmatrices(formula, df_train, return_type='dataframe')#Carve out the testing matrices from the testing data frame using the regression formulay_test, X_test = dmatrices(formula, df_test, return_type='dataframe')
Next, we feed X_train and y_train into an instance of the Binomial Regression model class and train the model:
import statsmodels.api as smbinom_model = sm.GLM(y_train, X_train, family=sm.families.Binomial())binom_model_results = binom_model.fit()
Let’s print out the fitted model summary:
print(binom_model_results.summary())
This prints out the following:
In the above output, statsmodels is telling us that it has trained a Generalized Linear Model of type Binomial because, well, we asked it to, that it used the log-odds link function and it has used the Iterative Re-weighted Least Squares (IRLS) algorithm for training our model.
Statsmodels is reporting that our model has 3 degrees of freedom: Sex, Pclass and Age_Range, which seems about right:
For Binomial models, statsmodels calculates three goodness-of-fit measures for you: Maximum Log-likelihood, Deviance and Pearson Chi-squared. We won’t inspect them any further as all three measures are useful only when you are comparing the goodness-of-fit of two or more Binomial regression models which in this case, we aren’t:
Let’s look at the fitted coefficients:
All regression coefficients are statistically significant at the 0.1% margin of error as indicated by the p-values which are all < 0.001:
Let’s see what each coefficient is telling us.
For the logit link function, the fitted coefficients can be interpreted as follows:
Age_Range: It’s coefficient is -0.0446. Note the negative value. The way to interpret this coefficient’s value is that, keeping all other variables constant, for each unit increase in the passenger’s age, the odds of their survival decreased by a factor = exp(-0.0446) = 0.9564. i.e. for each unit increase in the passenger’s age, one needs to multiple their survival odds by 0.9564, thereby reducing the odds of survival by a certain amount each time. For example, if a 12 year old male occupant of a 2nd class cabin had a known survival odds of 8:9 during the disaster, then a 22 years old male occupant of a 2nd class cabin had an odds of survival of (8/9) * 0.956410 = approximately 6:10.
Pclass: The coefficient for Pclass is -1.2008. Again note the negative sign. A downgrade of the cabin class of a passenger had an even more dramatic effect on the passenger’s odds of survival aboard the Titanic. The Pclass variable is coded as First class cabin=1, Second class cabin=2 and Third class cabin=3. So for every unit increase in the cabin class i.e. as one goes down from 1st class to 2nd class to 3rd class, the odds of survival, keeping age and sex constant, reduce by a factor of exp(-1.2008) = 0.30! i.e. for each unit downgrade, your odds of survival get multiplied by 0.30. For e.g., if a 30 years old male occupant of a 1st class cabin had a 7 : 9 odds of survival on the Titanic, just dropping him down one class to class 2, reduced his odds of survival to (7/9)*0.3 = approximately 1:4. Bumping down the class further to the 3rd class reduced the odds to (7/9)*0.3*0.3 = 7 : 100.
Sex: Finally, notice the very heavy negative coefficient of -2.6526 for the Sex variable. Aboard the sinking Titanic, male passengers had quite miserable chances of survival as compared to female passengers. Keeping Pclass and Age constant, the odds of survival of a male passenger was only exp(- 2.6526) = 7% of those of a female passenger.
Recollect that we had put aside the test data set in the Data Frame df_test. It’s time to test our model’s performance on this data set. To do that, we’ll first add a Percentage Survived column to the test data frame whose value we’ll ask our model to predict:
df_test['Pcnt_Survived'] = df_test['Survived']/df_test['Total']
We’ll use the .predict() method on the results object and pass the test data set get the predicted survival rate:
predicted_survival_rate = binom_model_results.predict(X_test)
Let’s plot the actual versus predicted survival rate:
import matplotlib.pyplot as pltplt.xlabel('Actual Survival Rate')plt.ylabel('Predicted Survival Rate')plt.scatter(df_test['Pcnt_Survived'], predicted_survival_rate, color = 'blue')plt.show()
As you can see, the fit becomes unacceptable when the survival rates are toward the top of the range i.e. 1.0. To a large extent the accuracy of the prediction is determined by the sample size i.e. the size of each group of passengers, grouped by the tuple [Pclass, Sex, Age Range]. For some groups in the training set, the group size is too small for the model to train in a meaningful way. For such combinations in the test data set, the accuracy will be understandably low.
Here is the link to the complete source code:
Here is the link to the Titanic data set used in this article.
A Binomial Regression model can be used to predict the odds of an event.
The Binomial Regression model is a member of the family of Generalized Linear Models which use a suitable link function to establish a relationship between the conditional expectation of the response variable y with a linear combination of explanatory variables X.
In the Binomial Regression model, we usually use the log-odds function as the link function.
The Logistic Regression model is a special case of the Binomial Regression model in the situation where the size of each group of explanatory variables in the data set is one.
towardsdatascience.com
McCullough, P., Nelder, J. A. (FRS). Generalized Linear Models. 2nd Ed. (1989) Chapman & Hall
Thanks for reading! I write about topics in data science. If you liked this article, please follow me at Sachin Date to get info, insights and programming advice about how to do data science using Python.
Some rights reserved | [
{
"code": null,
"e": 90,
"s": 47,
"text": "This article is divided into two sections:"
},
{
"code": null,
"e": 787,
"s": 90,
"text": "SECTION 1: Introduction to the Binomial Regression model: We’ll get introduced to the Binomial Regression model, see how it fits into the family of Generalized Linear Models, and why it can be used to predict the odds of seeing a random event.SECTION 2: Using the Binomial regression model: We’ll train a Binomial Regression model on the real world Titanic data set using Python and the statsmodels library. We’ll see why the Binomial regression model is the right model for predicting the odds of survival on the Titanic. We’ll also learn how to interpret the fitted model’s regression coefficients, a necessary skill to learn, which in case of the Titanic data set produces astonishing results."
},
{
"code": null,
"e": 1031,
"s": 787,
"text": "SECTION 1: Introduction to the Binomial Regression model: We’ll get introduced to the Binomial Regression model, see how it fits into the family of Generalized Linear Models, and why it can be used to predict the odds of seeing a random event."
},
{
"code": null,
"e": 1485,
"s": 1031,
"text": "SECTION 2: Using the Binomial regression model: We’ll train a Binomial Regression model on the real world Titanic data set using Python and the statsmodels library. We’ll see why the Binomial regression model is the right model for predicting the odds of survival on the Titanic. We’ll also learn how to interpret the fitted model’s regression coefficients, a necessary skill to learn, which in case of the Titanic data set produces astonishing results."
},
{
"code": null,
"e": 1833,
"s": 1485,
"text": "The Binomial Regression model can be used for predicting the odds of seeing an event, given a vector of regression variables. For e.g. one could use the Binomial Regression model to predict the odds of its starting to rain in the next 2 hours, given the current temperature, humidity, barometric pressure, time of year, geo-location, altitude etc."
},
{
"code": null,
"e": 2078,
"s": 1833,
"text": "In a Binomial Regression model, the dependent variable y is a discrete random variable that takes on values such as 0, 1, 5, 67 etc. Each value represents the number of ‘successes’ observed in m trials. Thus y follows the binomial distribution."
},
{
"code": null,
"e": 2352,
"s": 2078,
"text": "The following equation gives the probability of observing k successes in m independent Bernoulli trials. Each Bernoulli trial has a probability of success=π and probability of failure=(1-π). A coin toss is the simplest example of a Bernoulli trial in which π = (1-π) = 0.5."
},
{
"code": null,
"e": 2551,
"s": 2352,
"text": "The vertically bracketed term (m k) is the notation for a ‘Combination’ and is read as ‘m choose k’. It gives you the number of different ways to choose k outcomes from a set of m possible outcomes."
},
{
"code": null,
"e": 2794,
"s": 2551,
"text": "In a regression model, we will assume that the dependent variable y depends on an (n X p) size matrix of regression variables X. The ith row in X can be denoted as x_i which is a vector of size (1 X p ). It corresponds to the ith outcome y_i."
},
{
"code": null,
"e": 3055,
"s": 2794,
"text": "One usually expresses the probability of y_i taking a certain value k as conditional upon the regression variables X taking the value x_i. In notation form, it can be written as Pr(y_i=k|X=x_i) and can be read as probability of y_i being k given that X is x_i."
},
{
"code": null,
"e": 3188,
"s": 3055,
"text": "We can now state the probability distribution of the Binomially distributed y in the context of a regression of y over X as follows:"
},
{
"code": null,
"e": 3343,
"s": 3188,
"text": "On the L.H.S. of the above mentioned PMF equation, we will replace the unconditional probability Pr(y=k) with the conditional probability Pr(y_i=k|X=x_i)."
},
{
"code": null,
"e": 3590,
"s": 3343,
"text": "In the R.H.S, we will replace the unconditional probability π of observing a success with the conditional probability π_i, where π_i is the probability of observing a success for the ith row of the data set, i.e. when the regression vector X=x_i."
},
{
"code": null,
"e": 3680,
"s": 3590,
"text": "With these two substitutions, the PMF of the binomially distributed y becomes as follows:"
},
{
"code": null,
"e": 3832,
"s": 3680,
"text": "In the above equation, the probability of observing a success π_i for some X=x_i, is usually expressed as some function g(.) of x_i. In symbolic terms:"
},
{
"code": null,
"e": 3923,
"s": 3832,
"text": "The above set of concepts can be neatly visualized using a single illustration as follows:"
},
{
"code": null,
"e": 4338,
"s": 3923,
"text": "In the above graph, y_1, y_2, y_3,...y_10 are ten binomially distributed random variables. They also happen to the the constituents of the dependent variable y which is a (10 X 1) matrix as follows: y=[[y_1], [y_2],[y_3],...,[y_10]]. The corresponding regression variables matrix X in this case also happens to be a 10 X 1 matrix since there is only one regression variable involved:X=[[1], [2],[3],[4], ...,[10]]."
},
{
"code": null,
"e": 4669,
"s": 4338,
"text": "Because, y is a random variable with spread m, the plot shows how for each value of X=x_i, y can take any binomially distributed value around its expected value μ_i where μ_i = m*π_i and π_i as we saw earlier is some function g(.) of x_i. And thus, the expected value of y_i which is μ_i, can be expressed as some function of x_i."
},
{
"code": null,
"e": 4907,
"s": 4669,
"text": "The Binomial Regression model is part of the family of Generalized Linear Models. GLMs are used to model the relationship between the expected value of a response variable y and a linear combination of the explanatory variables vector X."
},
{
"code": null,
"e": 5008,
"s": 4907,
"text": "The relationship between E(y|X) and X is expressed by means of a suitable link function, as follows:"
},
{
"code": null,
"e": 5396,
"s": 5008,
"text": "In the above equation, g(.) is the link function that connects the conditional expectation of y on X with a linear combination of the regression variables x_i. X being the matrix of regression variables of size (n X p) where n=rows and p=regression variables in each row, and X=x_i being the ith row in this matrix of size (1 X p) and β being a (p X 1) vector of regression coefficients."
},
{
"code": null,
"e": 5713,
"s": 5396,
"text": "When y is binomially distributed, we are interested in fixing the relation between the conditional expectation of the probability π of a single Bernoulli trial on a particular value of X=x_i, i.e. E(π=π_i|X=x_i), or concisely, π_i|x_i. So the GLM equation for the Binomial regression model can be written as follows:"
},
{
"code": null,
"e": 5930,
"s": 5713,
"text": "In case of the Binomial Regression model, the link function g(.) takes one of the following four forms (we’ll stop mentioning the conditional notation |X=x_i in each for simplicity, but just assume that it is there):"
},
{
"code": null,
"e": 6003,
"s": 5930,
"text": "The Logistic (logit) link function, also known as the log-odds function:"
},
{
"code": null,
"e": 6243,
"s": 6003,
"text": "The logistic is known as the log-odds function because it is expressed as the ratio of the probability of success to probability of failure, i.e. the log of the odds of success. We will be using this link function later on in this article."
},
{
"code": null,
"e": 6269,
"s": 6243,
"text": "The probit link function:"
},
{
"code": null,
"e": 6537,
"s": 6269,
"text": "The probit (short for probability unit) link function is used to model the occurrence of an event that has a binary Yes/No outcome. This link function is expressed as the inverse of the Cumulative Distribution Function Φ(.) of the standard normal distribution N(0,1)."
},
{
"code": null,
"e": 6903,
"s": 6537,
"text": "The book Regression analysis of count data by Colin Cameron and Pravin K. Trivedi provides an excellent introduction to the Probit link function in section 3.6: Ordered and Other Discrete-Choice Models. In there, you will also find a very lucid derivation of why the Probit model’s link function happens to be the Inverse of the CDF Φ(.) of the normal distribution."
},
{
"code": null,
"e": 6925,
"s": 6903,
"text": "The log-log function:"
},
{
"code": null,
"e": 7427,
"s": 6925,
"text": "The log-log function is useful for modeling ‘Poisson-like counting processes’ in which the parameter of the probability distribution (which often contains the mean) lies in the exponent of the probability distribution’s formula, and the parameter is also expressed as an exponent of a linear combination of the regression variables. Thus, it has the double exponent format: exp(exp( — β.x_i) and therefore two consecutive logarithm operations are needed to bring the β.x_i term down to ‘ground level’."
},
{
"code": null,
"e": 7468,
"s": 7427,
"text": "The complementary log-log link function:"
},
{
"code": null,
"e": 7587,
"s": 7468,
"text": "The complementary log-log is called so because it operates on (1-π_i) i.e. the probability of failure, instead of π_i."
},
{
"code": null,
"e": 7863,
"s": 7587,
"text": "In this article, we’ll use the logistic a.k.a. the logit a.k.a. the log-odds link function to build our Binomial Regression model. Here it is once again, this time expressed in a slightly different way. On the R.H.S, I have replaced summation with the bolded vector notation:"
},
{
"code": null,
"e": 8001,
"s": 7863,
"text": "We’ll use the Titanic data set as an example to understand the kinds of use-cases that are appropriate for the Binomial regression model."
},
{
"code": null,
"e": 8042,
"s": 8001,
"text": "Here is a link to the original data set."
},
{
"code": null,
"e": 8213,
"s": 8042,
"text": "The Titanic data set contains information about 887 of the 2229 souls aboard the ill-fated ocean liner Titanic. Each passenger’s record contains the following attributes:"
},
{
"code": null,
"e": 8218,
"s": 8213,
"text": "Name"
},
{
"code": null,
"e": 8242,
"s": 8218,
"text": "Passenger class (1/2/3)"
},
{
"code": null,
"e": 8246,
"s": 8242,
"text": "Sex"
},
{
"code": null,
"e": 8250,
"s": 8246,
"text": "Age"
},
{
"code": null,
"e": 8321,
"s": 8250,
"text": "Whether the passenger was accompanied by siblings, parents or children"
},
{
"code": null,
"e": 8363,
"s": 8321,
"text": "The Fare they paid, and most importantly,"
},
{
"code": null,
"e": 8406,
"s": 8363,
"text": "Whether they survived (1=Survived, 0=Died)"
},
{
"code": null,
"e": 8541,
"s": 8406,
"text": "Using Python and the Pandas data analysis library, let’s load the data set into a Pandas data frame, and print out the first few rows:"
},
{
"code": null,
"e": 8621,
"s": 8541,
"text": "import pandas as pddf = pd.read_csv('titanic_dataset.csv', header=0)df.head(10)"
},
{
"code": null,
"e": 8659,
"s": 8621,
"text": "This prints out the following output:"
},
{
"code": null,
"e": 8705,
"s": 8659,
"text": "We’ll focus attention on four key attributes:"
},
{
"code": null,
"e": 8736,
"s": 8705,
"text": "Pclass (the passenger’s class)"
},
{
"code": null,
"e": 8740,
"s": 8736,
"text": "Age"
},
{
"code": null,
"e": 8744,
"s": 8740,
"text": "Sex"
},
{
"code": null,
"e": 8777,
"s": 8744,
"text": "Survived (Whether they survived)"
},
{
"code": null,
"e": 8833,
"s": 8777,
"text": "Let’s drop the rest of the columns from the Data Frame:"
},
{
"code": null,
"e": 9003,
"s": 8833,
"text": "#Drop the columns that our model will not usedf = df.drop(['Name','Siblings/Spouses Aboard', 'Parents/Children Aboard', 'Fare'], axis=1)#print the top 10 rowsdf.head(10)"
},
{
"code": null,
"e": 9034,
"s": 9003,
"text": "This prints out the following:"
},
{
"code": null,
"e": 9187,
"s": 9034,
"text": "We’ll postulate that while the Titanic was going down, the combination of [Pclass, Age, Sex] have greatly influenced the odds of a passenger’s survival."
},
{
"code": null,
"e": 9264,
"s": 9187,
"text": "Note that the ‘Survived’ column contains a [0, 1] Bernoulli random variable."
},
{
"code": null,
"e": 9293,
"s": 9264,
"text": "So can we say the following?"
},
{
"code": null,
"e": 9343,
"s": 9293,
"text": "Regression variables X = [Pclass, Age, Sex], and,"
},
{
"code": null,
"e": 9401,
"s": 9343,
"text": "Dependent variable is the BOOLEAN VARIABLE y = [Survived]"
},
{
"code": null,
"e": 9433,
"s": 9401,
"text": "No, we cannot. Let’s see why..."
},
{
"code": null,
"e": 9822,
"s": 9433,
"text": "Since y is a boolean variable, it may seem like a straight-forward case for using a Probit Regression model. But notice that if one is unfortunate enough to be on a ship such as the Titanic, what one wants to know is not the answer to the binary question: will I survive with 100% certainty or will I die with 100% certainty? Instead, what is more useful to know are the odds of survival."
},
{
"code": null,
"e": 9997,
"s": 9822,
"text": "For example, if you are a 22 years old woman in the second class cabin of the ship, you’ll want to know if your odds of survival are 1 out of 10, 1 out of 4, 1 out of 50 etc."
},
{
"code": null,
"e": 10179,
"s": 9997,
"text": "But the manner in which the Titanic data set is organized, the response variable survived has a yes/no i.e. 1/0 format. In other words, survived has a Bernoulli distribution, i.e. :"
},
{
"code": null,
"e": 10265,
"s": 10179,
"text": "Pr(survived=0) = π,Pr(survived=1) = (1-π)Where π is some probability between 0 and 1."
},
{
"code": null,
"e": 10406,
"s": 10265,
"text": "What we want is for y to express the odds, i.e. the ratio of successes (survivals) to failures (deaths), in m independent, identical trials."
},
{
"code": null,
"e": 10472,
"s": 10406,
"text": "In other words, we want is for y to have a Log-Odds distribution."
},
{
"code": null,
"e": 10733,
"s": 10472,
"text": "And therefore, instead of using a True or False, 1 or 0 type Probit regression model, what we want to do here is build a Binomial regression model where the response variable is Binomially distributed, and the link function is the Logit i.e. log-odds function."
},
{
"code": null,
"e": 10900,
"s": 10733,
"text": "The link function will allow us to link the odds of survival to a linear combination of the regression variables X=[Pclass, Age & Sex] plus the intercept, as follows:"
},
{
"code": null,
"e": 11308,
"s": 10900,
"text": "To transform the response variable y from Bernoulli to Binomial, we must group together the data set rows that share the same combination of X values [Pclass, Age and Sex]. Before we go about doing that, there is one little thing we need to take care of, and that is bucketing of the Age attribute. You see, Age, the way it is expressed in the data set, is a continuous variable that ranges from 0.42 to 80."
},
{
"code": null,
"e": 11544,
"s": 11308,
"text": "It hardly seems plausible that babies that were 0.42 years and 0.67 years old respectively would have had different odds of survival. Ditto logic holds true for youths with ages 26, 27, 28, 29 etc. years old, and so on for other cases."
},
{
"code": null,
"e": 11828,
"s": 11544,
"text": "We need to make the age data more granular so as to limit the number of groups. Let’s do this by bucketing the overall age range into bins of size 5 years and label each bin like so:(0, 5] → 5(5, 10] → 10(10, 15] → 15 and so on.The pandas.cut() method does the bucketing very neatly:"
},
{
"code": null,
"e": 12215,
"s": 11828,
"text": "#define the binsage_range_bins=[0,5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80]#define the label for each bin. Num labels = Num bins - 1age_range_labels=[5,10,15,20,25,30,35,40,45,50,55,60,65,70,75,80]#Cut up the age range into multiple bins and stuff them into a new Age_Range columndf['Age_Range']=pd.cut(df['Age'],age_range_bins,labels=age_range_labels)#Print the outputdf.head(10)"
},
{
"code": null,
"e": 12289,
"s": 12215,
"text": "We get the following output. Note the new Age_Range column we have added:"
},
{
"code": null,
"e": 12347,
"s": 12289,
"text": "Let’s drop the Age column as we’ll use Age_Range instead:"
},
{
"code": null,
"e": 12396,
"s": 12347,
"text": "#Drop the age columndf = df.drop(['Age'],axis=1)"
},
{
"code": null,
"e": 12598,
"s": 12396,
"text": "Now that we have our explanatory variables set up the way we want, let’s get to work on grouping together the samples by the combination [Pclass, Sex, Age_Range]. We’ll use the Pandas groupby() method."
},
{
"code": null,
"e": 12905,
"s": 12598,
"text": "#Group by ['Pclass', 'Sex', 'Age_Range'] groups = df.groupby(['Pclass', 'Sex', 'Age_Range'])#Get the counts for each group. This is the number of passengers in each group who have surviveddf_grouped_counts = groups.count()#Get the size (number of passengers) in each groupdf_grouped_survived = groups.sum()"
},
{
"code": null,
"e": 13048,
"s": 12905,
"text": "Merge the number of survivors and number of passengers for each group into each grouped data frame. (We’ll see how this helps us in a minute):"
},
{
"code": null,
"e": 13291,
"s": 13048,
"text": "df_grouped_counts.to_csv('df_grouped_counts.csv')df_grouped_counts_1 = pd.read_csv('df_grouped_counts.csv', header=0)df_grouped_survived.to_csv('df_grouped_survived.csv')df_grouped_survived_1 = pd.read_csv('df_grouped_survived.csv', header=0)"
},
{
"code": null,
"e": 13349,
"s": 13291,
"text": "Finally, let’s construct a new Data Frame which contains:"
},
{
"code": null,
"e": 13393,
"s": 13349,
"text": "The grouped columns Pclass, Sex, Age_Range,"
},
{
"code": null,
"e": 13446,
"s": 13393,
"text": "The corresponding number of survivors in each group,"
},
{
"code": null,
"e": 13517,
"s": 13446,
"text": "The total number of passengers in each group i.e. the group size, and,"
},
{
"code": null,
"e": 13566,
"s": 13517,
"text": "The number of passengers in each group who died."
},
{
"code": null,
"e": 14172,
"s": 13566,
"text": "#Create a new Data Framedf_grouped = pd.DataFrame()#Copy over the Pclass, Sex and Age Range columnsdf_grouped['Pclass'] = df_grouped_counts_1['Pclass']df_grouped['Sex'] = df_grouped_counts_1['Sex']df_grouped['Age_Range'] = df_grouped_counts_1['Age_Range']#Copy over the num passengers from the counts grouped Data Framedf_grouped['Total'] = df_grouped_counts_1['Survived']#Copy over the num survivors from the summation grouped Data Framedf_grouped['Survived'] = df_grouped_survived_1['Survived']#Add a column containing the number who dieddf_grouped[’Died’] = df_grouped[’Total’] - df_grouped[’Survived’]"
},
{
"code": null,
"e": 14231,
"s": 14172,
"text": "Let’s print out the first 20 rows of the grouped data set:"
},
{
"code": null,
"e": 14251,
"s": 14231,
"text": "df_grouped.head(20)"
},
{
"code": null,
"e": 14364,
"s": 14251,
"text": "Let’s see what the grouped data set is telling us. I have highlighted row numbers 9, 14 and 19 for illustration:"
},
{
"code": null,
"e": 14608,
"s": 14364,
"text": "In row #9, we find that there were 10 women in the age range (45, 50] with a first class ticket of which 9 survived. Thus the odds of survival for a woman in this group were pretty good (9 to 1), especially if she occupied a first class cabin."
},
{
"code": null,
"e": 14698,
"s": 14608,
"text": "In row #19, we see there were 4 male passengers aged (15- 20] of which only one survived."
},
{
"code": null,
"e": 14892,
"s": 14698,
"text": "In row #14, we see that there weren’t any women passengers aged (70–75] who occupied a first class cabin. This is why we are seeing NaNs in the aggregate columns for the group: [1, female, 75]."
},
{
"code": null,
"e": 14944,
"s": 14892,
"text": "Let’s remove all such NaN rows from the Data Frame:"
},
{
"code": null,
"e": 14977,
"s": 14944,
"text": "df_grouped = df_grouped.dropna()"
},
{
"code": null,
"e": 15042,
"s": 14977,
"text": "Build the Binomial Regression Model using Python and statsmodels"
},
{
"code": null,
"e": 15207,
"s": 15042,
"text": "Before we build the Binomial model, let’s take care of one final data preparation task, namely, let’s replace the ‘female’ and ‘male’ strings with integers 1 and 2:"
},
{
"code": null,
"e": 15274,
"s": 15207,
"text": "df_grouped=df_grouped.replace(to_replace={'female': 1, 'male': 2})"
},
{
"code": null,
"e": 15398,
"s": 15274,
"text": "We’ll use the excellent support offered by the statsmodels library for building and training the Binomial Regression model."
},
{
"code": null,
"e": 15450,
"s": 15398,
"text": "Let’s carve out the training and testing data sets:"
},
{
"code": null,
"e": 15608,
"s": 15450,
"text": "import numpy as np#Separate out the training and test setsmask = np.random.rand(len(df_grouped)) < 0.85df_train = df_grouped[mask]df_test = df_grouped[~mask]"
},
{
"code": null,
"e": 15887,
"s": 15608,
"text": "Let’s set up the regression model’s formula using the patsy syntax. What we are saying in below mentioned formula is that the dependent variable is a matrix composed of the Survived and Died columns of the dataframe, while the regression variables are Pclass, Age_Range and Sex."
},
{
"code": null,
"e": 16009,
"s": 15887,
"text": "#Construct the Binomial model's regression formula in Patsy syntax.formula = 'Survived + Died ~ Pclass + Age_Range + Sex'"
},
{
"code": null,
"e": 16146,
"s": 16009,
"text": "Using this formula, let’s carve out the X and y design matrices from the training and testing data frames which we created a minute ago:"
},
{
"code": null,
"e": 16493,
"s": 16146,
"text": "from patsy import dmatrices#Carve out the training matrices from the training data frame using the regression formulay_train, X_train = dmatrices(formula, df_train, return_type='dataframe')#Carve out the testing matrices from the testing data frame using the regression formulay_test, X_test = dmatrices(formula, df_test, return_type='dataframe')"
},
{
"code": null,
"e": 16604,
"s": 16493,
"text": "Next, we feed X_train and y_train into an instance of the Binomial Regression model class and train the model:"
},
{
"code": null,
"e": 16741,
"s": 16604,
"text": "import statsmodels.api as smbinom_model = sm.GLM(y_train, X_train, family=sm.families.Binomial())binom_model_results = binom_model.fit()"
},
{
"code": null,
"e": 16783,
"s": 16741,
"text": "Let’s print out the fitted model summary:"
},
{
"code": null,
"e": 16820,
"s": 16783,
"text": "print(binom_model_results.summary())"
},
{
"code": null,
"e": 16851,
"s": 16820,
"text": "This prints out the following:"
},
{
"code": null,
"e": 17130,
"s": 16851,
"text": "In the above output, statsmodels is telling us that it has trained a Generalized Linear Model of type Binomial because, well, we asked it to, that it used the log-odds link function and it has used the Iterative Re-weighted Least Squares (IRLS) algorithm for training our model."
},
{
"code": null,
"e": 17248,
"s": 17130,
"text": "Statsmodels is reporting that our model has 3 degrees of freedom: Sex, Pclass and Age_Range, which seems about right:"
},
{
"code": null,
"e": 17578,
"s": 17248,
"text": "For Binomial models, statsmodels calculates three goodness-of-fit measures for you: Maximum Log-likelihood, Deviance and Pearson Chi-squared. We won’t inspect them any further as all three measures are useful only when you are comparing the goodness-of-fit of two or more Binomial regression models which in this case, we aren’t:"
},
{
"code": null,
"e": 17617,
"s": 17578,
"text": "Let’s look at the fitted coefficients:"
},
{
"code": null,
"e": 17755,
"s": 17617,
"text": "All regression coefficients are statistically significant at the 0.1% margin of error as indicated by the p-values which are all < 0.001:"
},
{
"code": null,
"e": 17802,
"s": 17755,
"text": "Let’s see what each coefficient is telling us."
},
{
"code": null,
"e": 17886,
"s": 17802,
"text": "For the logit link function, the fitted coefficients can be interpreted as follows:"
},
{
"code": null,
"e": 18579,
"s": 17886,
"text": "Age_Range: It’s coefficient is -0.0446. Note the negative value. The way to interpret this coefficient’s value is that, keeping all other variables constant, for each unit increase in the passenger’s age, the odds of their survival decreased by a factor = exp(-0.0446) = 0.9564. i.e. for each unit increase in the passenger’s age, one needs to multiple their survival odds by 0.9564, thereby reducing the odds of survival by a certain amount each time. For example, if a 12 year old male occupant of a 2nd class cabin had a known survival odds of 8:9 during the disaster, then a 22 years old male occupant of a 2nd class cabin had an odds of survival of (8/9) * 0.956410 = approximately 6:10."
},
{
"code": null,
"e": 19480,
"s": 18579,
"text": "Pclass: The coefficient for Pclass is -1.2008. Again note the negative sign. A downgrade of the cabin class of a passenger had an even more dramatic effect on the passenger’s odds of survival aboard the Titanic. The Pclass variable is coded as First class cabin=1, Second class cabin=2 and Third class cabin=3. So for every unit increase in the cabin class i.e. as one goes down from 1st class to 2nd class to 3rd class, the odds of survival, keeping age and sex constant, reduce by a factor of exp(-1.2008) = 0.30! i.e. for each unit downgrade, your odds of survival get multiplied by 0.30. For e.g., if a 30 years old male occupant of a 1st class cabin had a 7 : 9 odds of survival on the Titanic, just dropping him down one class to class 2, reduced his odds of survival to (7/9)*0.3 = approximately 1:4. Bumping down the class further to the 3rd class reduced the odds to (7/9)*0.3*0.3 = 7 : 100."
},
{
"code": null,
"e": 19822,
"s": 19480,
"text": "Sex: Finally, notice the very heavy negative coefficient of -2.6526 for the Sex variable. Aboard the sinking Titanic, male passengers had quite miserable chances of survival as compared to female passengers. Keeping Pclass and Age constant, the odds of survival of a male passenger was only exp(- 2.6526) = 7% of those of a female passenger."
},
{
"code": null,
"e": 20083,
"s": 19822,
"text": "Recollect that we had put aside the test data set in the Data Frame df_test. It’s time to test our model’s performance on this data set. To do that, we’ll first add a Percentage Survived column to the test data frame whose value we’ll ask our model to predict:"
},
{
"code": null,
"e": 20147,
"s": 20083,
"text": "df_test['Pcnt_Survived'] = df_test['Survived']/df_test['Total']"
},
{
"code": null,
"e": 20261,
"s": 20147,
"text": "We’ll use the .predict() method on the results object and pass the test data set get the predicted survival rate:"
},
{
"code": null,
"e": 20323,
"s": 20261,
"text": "predicted_survival_rate = binom_model_results.predict(X_test)"
},
{
"code": null,
"e": 20377,
"s": 20323,
"text": "Let’s plot the actual versus predicted survival rate:"
},
{
"code": null,
"e": 20568,
"s": 20377,
"text": "import matplotlib.pyplot as pltplt.xlabel('Actual Survival Rate')plt.ylabel('Predicted Survival Rate')plt.scatter(df_test['Pcnt_Survived'], predicted_survival_rate, color = 'blue')plt.show()"
},
{
"code": null,
"e": 21045,
"s": 20568,
"text": "As you can see, the fit becomes unacceptable when the survival rates are toward the top of the range i.e. 1.0. To a large extent the accuracy of the prediction is determined by the sample size i.e. the size of each group of passengers, grouped by the tuple [Pclass, Sex, Age Range]. For some groups in the training set, the group size is too small for the model to train in a meaningful way. For such combinations in the test data set, the accuracy will be understandably low."
},
{
"code": null,
"e": 21091,
"s": 21045,
"text": "Here is the link to the complete source code:"
},
{
"code": null,
"e": 21154,
"s": 21091,
"text": "Here is the link to the Titanic data set used in this article."
},
{
"code": null,
"e": 21227,
"s": 21154,
"text": "A Binomial Regression model can be used to predict the odds of an event."
},
{
"code": null,
"e": 21492,
"s": 21227,
"text": "The Binomial Regression model is a member of the family of Generalized Linear Models which use a suitable link function to establish a relationship between the conditional expectation of the response variable y with a linear combination of explanatory variables X."
},
{
"code": null,
"e": 21585,
"s": 21492,
"text": "In the Binomial Regression model, we usually use the log-odds function as the link function."
},
{
"code": null,
"e": 21761,
"s": 21585,
"text": "The Logistic Regression model is a special case of the Binomial Regression model in the situation where the size of each group of explanatory variables in the data set is one."
},
{
"code": null,
"e": 21784,
"s": 21761,
"text": "towardsdatascience.com"
},
{
"code": null,
"e": 21878,
"s": 21784,
"text": "McCullough, P., Nelder, J. A. (FRS). Generalized Linear Models. 2nd Ed. (1989) Chapman & Hall"
},
{
"code": null,
"e": 22083,
"s": 21878,
"text": "Thanks for reading! I write about topics in data science. If you liked this article, please follow me at Sachin Date to get info, insights and programming advice about how to do data science using Python."
}
]
|
AWT KeyEvent Class | On entering the character the Key event is generated.There are three types of key events which are represented by the integer constants. These key events are following
KEY_PRESSED
KEY_PRESSED
KEY_RELASED
KEY_RELASED
KEY_TYPED
KEY_TYPED
Following is the declaration for java.awt.event.KeyEvent class:
public class KeyEvent
extends InputEvent
Following are the fields for java.awt.InputEvent class:
static char CHAR_UNDEFINED --KEY_PRESSED and KEY_RELEASED events which do not map to a valid Unicode character use this for the keyChar value.
static char CHAR_UNDEFINED --KEY_PRESSED and KEY_RELEASED events which do not map to a valid Unicode character use this for the keyChar value.
static int KEY_FIRST --The first number in the range of ids used for key events.
static int KEY_FIRST --The first number in the range of ids used for key events.
static int KEY_LAST --The last number in the range of ids used for key events.
static int KEY_LAST --The last number in the range of ids used for key events.
static int KEY_LOCATION_LEFT --A constant indicating that the key pressed or released is in the left key location (there is more than one possible location for this key).
static int KEY_LOCATION_LEFT --A constant indicating that the key pressed or released is in the left key location (there is more than one possible location for this key).
static int KEY_LOCATION_NUMPAD --A constant indicating that the key event originated on the numeric keypad or with a virtual key corresponding to the numeric keypad.
static int KEY_LOCATION_NUMPAD --A constant indicating that the key event originated on the numeric keypad or with a virtual key corresponding to the numeric keypad.
static int KEY_LOCATION_RIGHT -- A constant indicating that the key pressed or released is in the right key location (there is more than one possible location for this key).
static int KEY_LOCATION_RIGHT -- A constant indicating that the key pressed or released is in the right key location (there is more than one possible location for this key).
static int KEY_LOCATION_STANDARD --A constant indicating that the key pressed or released is not distinguished as the left or right version of a key, and did not originate on the numeric keypad (or did not originate with a virtual key corresponding to the numeric keypad).
static int KEY_LOCATION_STANDARD --A constant indicating that the key pressed or released is not distinguished as the left or right version of a key, and did not originate on the numeric keypad (or did not originate with a virtual key corresponding to the numeric keypad).
static int KEY_LOCATION_UNKNOWN -- A constant indicating that the keyLocation is indeterminate or not relevant.
static int KEY_LOCATION_UNKNOWN -- A constant indicating that the keyLocation is indeterminate or not relevant.
static int KEY_PRESSED --The "key pressed" event.
static int KEY_PRESSED --The "key pressed" event.
static int KEY_RELEASED --The "key released" event.
static int KEY_RELEASED --The "key released" event.
static int KEY_TYPED --The "key typed" event.
static int KEY_TYPED --The "key typed" event.
static int VK_1
static int VK_1
static int VK_2
static int VK_2
static int VK_3
static int VK_3
static int VK_4
static int VK_4
static int VK_5
static int VK_5
static int VK_6
static int VK_6
static int VK_7
static int VK_7
static int VK_8
static int VK_8
static int VK_9
static int VK_9
static int VK_A --VK_A thru VK_Z are the same as ASCII 'A' thru 'Z' (0x41 - 0x5A)
static int VK_A --VK_A thru VK_Z are the same as ASCII 'A' thru 'Z' (0x41 - 0x5A)
static int VK_ACCEPT --Constant for the Accept or Commit function key.
static int VK_ACCEPT --Constant for the Accept or Commit function key.
static int VK_ADD
static int VK_ADD
static int VK_AGAIN
static int VK_AGAIN
static int VK_ALL_CANDIDATES --Constant for the All Candidates function key.
static int VK_ALL_CANDIDATES --Constant for the All Candidates function key.
static int VK_ALPHANUMERIC --Constant for the Alphanumeric function key.
static int VK_ALPHANUMERIC --Constant for the Alphanumeric function key.
static int VK_ALT
static int VK_ALT
static int VK_ALT_GRAPH --Constant for the AltGraph function key.
static int VK_ALT_GRAPH --Constant for the AltGraph function key.
static int VK_AMPERSAND
static int VK_AMPERSAND
static int VK_ASTERISK
static int VK_ASTERISK
static int VK_AT --constant for the "@" key.
static int VK_AT --constant for the "@" key.
static int VK_B
static int VK_B
static int VK_BACK_QUOTE
static int VK_BACK_QUOTE
static int VK_BACK_SLASH --Constant for the back slash key, "\"
static int VK_BACK_SLASH --Constant for the back slash key, "\"
static int VK_BACK_SPACE
static int VK_BACK_SPACE
static int VK_BEGIN --Constant for the Begin key.
static int VK_BEGIN --Constant for the Begin key.
static int VK_BRACELEFT
static int VK_BRACELEFT
static int VK_BRACERIGHT
static int VK_BRACERIGHT
static int VK_C
static int VK_C
static int VK_CANCEL
static int VK_CANCEL
static int VK_CAPS_LOCK
static int VK_CAPS_LOCK
static int VK_CIRCUMFLEX --Constant for the "^" key.
static int VK_CIRCUMFLEX --Constant for the "^" key.
static int VK_CLEAR
static int VK_CLEAR
static int VK_CLOSE_BRACKET --Constant for the close bracket key, "]"
static int VK_CLOSE_BRACKET --Constant for the close bracket key, "]"
static int VK_CODE_INPUT --Constant for the Code Input function key.
static int VK_CODE_INPUT --Constant for the Code Input function key.
static int VK_COLON --Constant for the ":" key.
static int VK_COLON --Constant for the ":" key.
static int VK_COMMA --Constant for the comma key, ","
static int VK_COMMA --Constant for the comma key, ","
static int VK_COMPOSE --Constant for the Compose function key.
static int VK_COMPOSE --Constant for the Compose function key.
static int VK_CONTEXT_MENU --Constant for the Microsoft Windows Context Menu key.
static int VK_CONTEXT_MENU --Constant for the Microsoft Windows Context Menu key.
static int VK_CONTROL
static int VK_CONTROL
static int VK_CONVERT -- Constant for the Convert function key.
static int VK_CONVERT -- Constant for the Convert function key.
static int VK_COPY
static int VK_COPY
static int VK_CUT
static int VK_CUT
static int VK_D
static int VK_D
static int VK_DEAD_ABOVEDOT
static int VK_DEAD_ABOVEDOT
static int VK_DEAD_ABOVERING
static int VK_DEAD_ABOVERING
static int VK_DEAD_ACUTE
static int VK_DEAD_ACUTE
static int VK_DEAD_BREVE
static int VK_DEAD_BREVE
static int VK_DEAD_CARON
static int VK_DEAD_CARON
static int VK_DEAD_CEDILLA
static int VK_DEAD_CEDILLA
static int VK_DEAD_CIRCUMFLEX
static int VK_DEAD_CIRCUMFLEX
static int VK_DEAD_DIAERESIS
static int VK_DEAD_DIAERESIS
static int VK_DEAD_DOUBLEACUTE
static int VK_DEAD_DOUBLEACUTE
static int VK_DEAD_GRAVE
static int VK_DEAD_GRAVE
static int VK_DEAD_IOTA
static int VK_DEAD_IOTA
static int VK_DEAD_MACRON
static int VK_DEAD_MACRON
static int VK_DEAD_OGONEK
static int VK_DEAD_OGONEK
static int VK_DEAD_SEMIVOICED_SOUND
static int VK_DEAD_SEMIVOICED_SOUND
static int VK_DEAD_TILDE
static int VK_DEAD_TILDE
static int VK_DEAD_VOICED_SOUND
static int VK_DEAD_VOICED_SOUND
static int VK_DECIMAL
static int VK_DECIMAL
static int VK_DELETE
static int VK_DELETE
static int VK_DIVIDE
static int VK_DIVIDE
static int VK_DOLLAR --Constant for the "$" key.
static int VK_DOLLAR --Constant for the "$" key.
static int VK_DOWN -- Constant for the non-numpad down arrow key.
static int VK_DOWN -- Constant for the non-numpad down arrow key.
static int VK_E
static int VK_E
static int VK_END
static int VK_END
static int VK_ENTER
static int VK_ENTER
static int VK_EQUALS --Constant for the equals key, "="
static int VK_EQUALS --Constant for the equals key, "="
static int VK_ESCAPE
static int VK_ESCAPE
static int VK_EURO_SIGN --Constant for the Euro currency sign key.
static int VK_EURO_SIGN --Constant for the Euro currency sign key.
static int VK_EXCLAMATION_MARK --Constant for the "!" key.
static int VK_EXCLAMATION_MARK --Constant for the "!" key.
static int VK_F
static int VK_F
static int VK_F1 --Constant for the F1 function key.
static int VK_F1 --Constant for the F1 function key.
static int VK_F10 --Constant for the F10 function key.
static int VK_F10 --Constant for the F10 function key.
static int VK_F11 --Constant for the F11 function key.
static int VK_F11 --Constant for the F11 function key.
static int VK_F12 --Constant for the F12 function key.
static int VK_F12 --Constant for the F12 function key.
static int VK_F13 --Constant for the F13 function key.
static int VK_F13 --Constant for the F13 function key.
static int VK_F14 --Constant for the F14 function key.
static int VK_F14 --Constant for the F14 function key.
static int VK_F15 --Constant for the F15 function key.
static int VK_F15 --Constant for the F15 function key.
static int VK_F16 --Constant for the F16 function key.
static int VK_F16 --Constant for the F16 function key.
static int VK_F17 --Constant for the F17 function key.
static int VK_F17 --Constant for the F17 function key.
static int VK_F18 --Constant for the F18 function key.
static int VK_F18 --Constant for the F18 function key.
static int VK_F19 --Constant for the F19 function key.
static int VK_F19 --Constant for the F19 function key.
static int VK_F2 --Constant for the F2 function key.
static int VK_F2 --Constant for the F2 function key.
static int VK_F20 --Constant for the F20 function key.
static int VK_F20 --Constant for the F20 function key.
static int VK_F21 -- Constant for the F21 function key.
static int VK_F21 -- Constant for the F21 function key.
static int VK_F22 --Constant for the F22 function key.
static int VK_F22 --Constant for the F22 function key.
static int VK_F23 --Constant for the F23 function key.
static int VK_F23 --Constant for the F23 function key.
static int VK_F24 --Constant for the F24 function key.
static int VK_F24 --Constant for the F24 function key.
static int VK_F3 --Constant for the F3 function key.
static int VK_F3 --Constant for the F3 function key.
static int VK_F4 --Constant for the F4 function key.
static int VK_F4 --Constant for the F4 function key.
static int VK_F5 -- Constant for the F5 function key.
static int VK_F5 -- Constant for the F5 function key.
static int VK_F6 --Constant for the F6 function key.
static int VK_F6 --Constant for the F6 function key.
static int VK_F7 --Constant for the F7 function key.
static int VK_F7 --Constant for the F7 function key.
static int VK_F8 --Constant for the F8 function key.
static int VK_F8 --Constant for the F8 function key.
static int VK_F9 --Constant for the F9 function key.
static int VK_F9 --Constant for the F9 function key.
static int VK_FINAL
static int VK_FINAL
static int VK_FIND
static int VK_FIND
static int VK_FULL_WIDTH --Constant for the Full-Width Characters function key.
static int VK_FULL_WIDTH --Constant for the Full-Width Characters function key.
static int VK_G
static int VK_G
static int VK_GREATER
static int VK_GREATER
static int VK_H
static int VK_H
static int VK_HALF_WIDTH --Constant for the Half-Width Characters function key.
static int VK_HALF_WIDTH --Constant for the Half-Width Characters function key.
static int VK_HELP
static int VK_HELP
static int VK_HIRAGANA --Constant for the Hiragana function key.
static int VK_HIRAGANA --Constant for the Hiragana function key.
static int VK_HOME
static int VK_HOME
static int VK_I
static int VK_I
static int VK_INPUT_METHOD_ON_OFF -- Constant for the input method on/off key.
static int VK_INPUT_METHOD_ON_OFF -- Constant for the input method on/off key.
static int VK_INSERT
static int VK_INSERT
static int VK_INVERTED_EXCLAMATION_MARK --Constant for the inverted exclamation mark key.
static int VK_INVERTED_EXCLAMATION_MARK --Constant for the inverted exclamation mark key.
static int VK_J
static int VK_J
static int VK_JAPANESE_HIRAGANA --Constant for the Japanese-Hiragana function key.
static int VK_JAPANESE_HIRAGANA --Constant for the Japanese-Hiragana function key.
static int VK_JAPANESE_KATAKANA --Constant for the Japanese-Katakana function key.
static int VK_JAPANESE_KATAKANA --Constant for the Japanese-Katakana function key.
static int VK_JAPANESE_ROMAN --Constant for the Japanese-Roman function key.
static int VK_JAPANESE_ROMAN --Constant for the Japanese-Roman function key.
static int VK_K
static int VK_K
static int VK_KANA
static int VK_KANA
static int VK_KANA_LOCK -- Constant for the locking Kana function key.
static int VK_KANA_LOCK -- Constant for the locking Kana function key.
static int VK_KANJI
static int VK_KANJI
static int VK_KATAKANA --Constant for the Katakana function key.
static int VK_KATAKANA --Constant for the Katakana function key.
static int VK_KP_DOWN -- Constant for the numeric keypad down arrow key.
static int VK_KP_DOWN -- Constant for the numeric keypad down arrow key.
static int VK_KP_LEFT --Constant for the numeric keypad left arrow key.
static int VK_KP_LEFT --Constant for the numeric keypad left arrow key.
static int VK_KP_RIGHT --Constant for the numeric keypad right arrow key.
static int VK_KP_RIGHT --Constant for the numeric keypad right arrow key.
static int VK_KP_UP --Constant for the numeric keypad up arrow key.
static int VK_KP_UP --Constant for the numeric keypad up arrow key.
static int VK_L
static int VK_L
static int VK_LEFT --Constant for the non-numpad left arrow key.
static int VK_LEFT --Constant for the non-numpad left arrow key.
static int VK_LEFT_PARENTHESIS --Constant for the "(" key.
static int VK_LEFT_PARENTHESIS --Constant for the "(" key.
static int VK_LESS
static int VK_LESS
static int VK_M
static int VK_M
static int VK_META
static int VK_META
static int VK_MINUS -- Constant for the minus key, "-"
static int VK_MINUS -- Constant for the minus key, "-"
static int VK_MODECHANGE
static int VK_MODECHANGE
static int VK_MULTIPLY
static int VK_MULTIPLY
static int VK_N
static int VK_N
static int VK_NONCONVERT --Constant for the Don't Convert function key.
static int VK_NONCONVERT --Constant for the Don't Convert function key.
static int VK_NUM_LOCK
static int VK_NUM_LOCK
static int VK_NUMBER_SIGN --Constant for the "#" key.
static int VK_NUMBER_SIGN --Constant for the "#" key.
static int VK_NUMPAD0
static int VK_NUMPAD0
static int VK_NUMPAD1
static int VK_NUMPAD1
static int VK_NUMPAD2
static int VK_NUMPAD2
static int VK_NUMPAD3
static int VK_NUMPAD3
static int VK_NUMPAD4
static int VK_NUMPAD4
static int VK_NUMPAD5
static int VK_NUMPAD5
static int VK_NUMPAD6
static int VK_NUMPAD6
static int VK_NUMPAD7
static int VK_NUMPAD7
static int VK_NUMPAD8
static int VK_NUMPAD8
static int VK_NUMPAD9
static int VK_NUMPAD9
static int VK_O
static int VK_O
static int VK_OPEN_BRACKET --Constant for the open bracket key, "["
static int VK_OPEN_BRACKET --Constant for the open bracket key, "["
static int VK_P
static int VK_P
static int VK_PAGE_DOWN
static int VK_PAGE_DOWN
static int VK_PAGE_UP
static int VK_PAGE_UP
static int VK_PASTE
static int VK_PASTE
static int VK_PAUSE
static int VK_PAUSE
static int VK_PERIOD --Constant for the period key, "."
static int VK_PERIOD --Constant for the period key, "."
static int VK_PLUS -- Constant for the "+" key.
static int VK_PLUS -- Constant for the "+" key.
static int VK_PREVIOUS_CANDIDATE -- Constant for the Previous Candidate function key.
static int VK_PREVIOUS_CANDIDATE -- Constant for the Previous Candidate function key.
static int VK_PRINTSCREEN
static int VK_PRINTSCREEN
static int VK_PROPS
static int VK_PROPS
static int VK_Q
static int VK_Q
static int VK_QUOTE
static int VK_QUOTE
static int VK_QUOTEDBL
static int VK_QUOTEDBL
static int VK_R
static int VK_R
static int VK_RIGHT -- Constant for the non-numpad right arrow key.
static int VK_RIGHT -- Constant for the non-numpad right arrow key.
static int VK_RIGHT_PARENTHESIS --Constant for the ")" key.
static int VK_RIGHT_PARENTHESIS --Constant for the ")" key.
static int VK_ROMAN_CHARACTERS --Constant for the Roman Characters function key.
static int VK_ROMAN_CHARACTERS --Constant for the Roman Characters function key.
static int VK_S
static int VK_S
static int VK_SCROLL_LOCK
static int VK_SCROLL_LOCK
static int VK_SEMICOLON -- Constant for the semicolon key, ";"
static int VK_SEMICOLON -- Constant for the semicolon key, ";"
static int VK_SEPARATER --This constant is obsolete, and is included only for backwards compatibility.
static int VK_SEPARATER --This constant is obsolete, and is included only for backwards compatibility.
static int VK_SEPARATOR --Constant for the Numpad Separator key.
static int VK_SEPARATOR --Constant for the Numpad Separator key.
static int VK_SHIFT
static int VK_SHIFT
static int VK_SLASH -- Constant for the forward slash key, "/"
static int VK_SLASH -- Constant for the forward slash key, "/"
static int VK_SPACE
static int VK_SPACE
static int VK_STOP
static int VK_STOP
static int VK_SUBTRACT
static int VK_SUBTRACT
static int VK_T
static int VK_T
static int VK_TAB
static int VK_TAB
static int VK_U
static int VK_U
static int VK_UNDEFINED -- This value is used to indicate that the keyCode is unknown.
static int VK_UNDEFINED -- This value is used to indicate that the keyCode is unknown.
static int VK_UNDERSCORE --Constant for the "_" key.
static int VK_UNDERSCORE --Constant for the "_" key.
static int VK_UNDO
static int VK_UNDO
static int VK_UP --Constant for the non-numpad up arrow key.
static int VK_UP --Constant for the non-numpad up arrow key.
static int VK_V
static int VK_V
static int VK_W
static int VK_W
static int VK_WINDOWS --Constant for the Microsoft Windows "Windows" key.
static int VK_WINDOWS --Constant for the Microsoft Windows "Windows" key.
static int VK_X
static int VK_X
static int VK_Y
static int VK_Y
static int VK_Z
static int VK_Z
KeyEvent(Component source, int id, long when, int modifiers, int keyCode)
Deprecated. as of JDK1.1
KeyEvent(Component source, int id, long when, int modifiers, int keyCode, char keyChar)
Constructs a KeyEvent object.
KeyEvent(Component source, int id, long when, int modifiers, int keyCode, char keyChar, int keyLocation)
char getKeyChar()
Returns the character associated with the key in this event.
int getKeyCode()
Returns the integer keyCode associated with the key in this event.
int getKeyLocation()
Returns the location of the key that originated this key event.
static String getKeyModifiersText(int modifiers)
Returns a String describing the modifier key(s), such as "Shift", or "Ctrl+Shift".
static String getKeyText(int keyCode)
Returns a String describing the keyCode, such as "HOME", "F1" or "A".
boolean isActionKey()
Returns whether the key in this event is an "action" key.
String paramString()
Returns a parameter string identifying this event.
void setKeyChar(char keyChar)
Set the keyChar value to indicate a logical character.
void setKeyCode(int keyCode)
Set the keyCode value to indicate a physical key.
void setModifiers(int modifiers)
Deprecated. as of JDK1.1.4
This class inherits methods from the following classes:
java.awt.event.InputEvent
java.awt.event.InputEvent
java.awt.event.ComponentEvent
java.awt.event.ComponentEvent
java.awt.AWTEvent
java.awt.AWTEvent
java.util.EventObject
java.util.EventObject
java.lang.Object
java.lang.Object
13 Lectures
2 hours
EduOLC
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 1915,
"s": 1747,
"text": "On entering the character the Key event is generated.There are three types of key events which are represented by the integer constants. These key events are following"
},
{
"code": null,
"e": 1927,
"s": 1915,
"text": "KEY_PRESSED"
},
{
"code": null,
"e": 1939,
"s": 1927,
"text": "KEY_PRESSED"
},
{
"code": null,
"e": 1951,
"s": 1939,
"text": "KEY_RELASED"
},
{
"code": null,
"e": 1963,
"s": 1951,
"text": "KEY_RELASED"
},
{
"code": null,
"e": 1973,
"s": 1963,
"text": "KEY_TYPED"
},
{
"code": null,
"e": 1983,
"s": 1973,
"text": "KEY_TYPED"
},
{
"code": null,
"e": 2047,
"s": 1983,
"text": "Following is the declaration for java.awt.event.KeyEvent class:"
},
{
"code": null,
"e": 2091,
"s": 2047,
"text": "public class KeyEvent\n extends InputEvent"
},
{
"code": null,
"e": 2147,
"s": 2091,
"text": "Following are the fields for java.awt.InputEvent class:"
},
{
"code": null,
"e": 2291,
"s": 2147,
"text": "static char CHAR_UNDEFINED --KEY_PRESSED and KEY_RELEASED events which do not map to a valid Unicode character use this for the keyChar value."
},
{
"code": null,
"e": 2435,
"s": 2291,
"text": "static char CHAR_UNDEFINED --KEY_PRESSED and KEY_RELEASED events which do not map to a valid Unicode character use this for the keyChar value."
},
{
"code": null,
"e": 2517,
"s": 2435,
"text": "static int KEY_FIRST --The first number in the range of ids used for key events."
},
{
"code": null,
"e": 2599,
"s": 2517,
"text": "static int KEY_FIRST --The first number in the range of ids used for key events."
},
{
"code": null,
"e": 2679,
"s": 2599,
"text": "static int KEY_LAST --The last number in the range of ids used for key events."
},
{
"code": null,
"e": 2759,
"s": 2679,
"text": "static int KEY_LAST --The last number in the range of ids used for key events."
},
{
"code": null,
"e": 2931,
"s": 2759,
"text": "static int KEY_LOCATION_LEFT --A constant indicating that the key pressed or released is in the left key location (there is more than one possible location for this key)."
},
{
"code": null,
"e": 3103,
"s": 2931,
"text": "static int KEY_LOCATION_LEFT --A constant indicating that the key pressed or released is in the left key location (there is more than one possible location for this key)."
},
{
"code": null,
"e": 3269,
"s": 3103,
"text": "static int KEY_LOCATION_NUMPAD --A constant indicating that the key event originated on the numeric keypad or with a virtual key corresponding to the numeric keypad."
},
{
"code": null,
"e": 3435,
"s": 3269,
"text": "static int KEY_LOCATION_NUMPAD --A constant indicating that the key event originated on the numeric keypad or with a virtual key corresponding to the numeric keypad."
},
{
"code": null,
"e": 3610,
"s": 3435,
"text": "static int KEY_LOCATION_RIGHT -- A constant indicating that the key pressed or released is in the right key location (there is more than one possible location for this key)."
},
{
"code": null,
"e": 3785,
"s": 3610,
"text": "static int KEY_LOCATION_RIGHT -- A constant indicating that the key pressed or released is in the right key location (there is more than one possible location for this key)."
},
{
"code": null,
"e": 4059,
"s": 3785,
"text": "static int KEY_LOCATION_STANDARD --A constant indicating that the key pressed or released is not distinguished as the left or right version of a key, and did not originate on the numeric keypad (or did not originate with a virtual key corresponding to the numeric keypad)."
},
{
"code": null,
"e": 4333,
"s": 4059,
"text": "static int KEY_LOCATION_STANDARD --A constant indicating that the key pressed or released is not distinguished as the left or right version of a key, and did not originate on the numeric keypad (or did not originate with a virtual key corresponding to the numeric keypad)."
},
{
"code": null,
"e": 4445,
"s": 4333,
"text": "static int KEY_LOCATION_UNKNOWN -- A constant indicating that the keyLocation is indeterminate or not relevant."
},
{
"code": null,
"e": 4557,
"s": 4445,
"text": "static int KEY_LOCATION_UNKNOWN -- A constant indicating that the keyLocation is indeterminate or not relevant."
},
{
"code": null,
"e": 4607,
"s": 4557,
"text": "static int KEY_PRESSED --The \"key pressed\" event."
},
{
"code": null,
"e": 4657,
"s": 4607,
"text": "static int KEY_PRESSED --The \"key pressed\" event."
},
{
"code": null,
"e": 4710,
"s": 4657,
"text": "static int KEY_RELEASED --The \"key released\" event."
},
{
"code": null,
"e": 4763,
"s": 4710,
"text": "static int KEY_RELEASED --The \"key released\" event."
},
{
"code": null,
"e": 4810,
"s": 4763,
"text": "static int KEY_TYPED --The \"key typed\" event."
},
{
"code": null,
"e": 4857,
"s": 4810,
"text": "static int KEY_TYPED --The \"key typed\" event."
},
{
"code": null,
"e": 4874,
"s": 4857,
"text": "static int VK_1 "
},
{
"code": null,
"e": 4891,
"s": 4874,
"text": "static int VK_1 "
},
{
"code": null,
"e": 4908,
"s": 4891,
"text": "static int VK_2 "
},
{
"code": null,
"e": 4925,
"s": 4908,
"text": "static int VK_2 "
},
{
"code": null,
"e": 4943,
"s": 4925,
"text": "static int VK_3 "
},
{
"code": null,
"e": 4961,
"s": 4943,
"text": "static int VK_3 "
},
{
"code": null,
"e": 4979,
"s": 4961,
"text": "static int VK_4 "
},
{
"code": null,
"e": 4997,
"s": 4979,
"text": "static int VK_4 "
},
{
"code": null,
"e": 5015,
"s": 4997,
"text": "static int VK_5 "
},
{
"code": null,
"e": 5033,
"s": 5015,
"text": "static int VK_5 "
},
{
"code": null,
"e": 5051,
"s": 5033,
"text": "static int VK_6 "
},
{
"code": null,
"e": 5069,
"s": 5051,
"text": "static int VK_6 "
},
{
"code": null,
"e": 5087,
"s": 5069,
"text": "static int VK_7 "
},
{
"code": null,
"e": 5105,
"s": 5087,
"text": "static int VK_7 "
},
{
"code": null,
"e": 5123,
"s": 5105,
"text": "static int VK_8 "
},
{
"code": null,
"e": 5141,
"s": 5123,
"text": "static int VK_8 "
},
{
"code": null,
"e": 5160,
"s": 5141,
"text": "static int VK_9 "
},
{
"code": null,
"e": 5179,
"s": 5160,
"text": "static int VK_9 "
},
{
"code": null,
"e": 5262,
"s": 5179,
"text": "static int VK_A --VK_A thru VK_Z are the same as ASCII 'A' thru 'Z' (0x41 - 0x5A)"
},
{
"code": null,
"e": 5345,
"s": 5262,
"text": "static int VK_A --VK_A thru VK_Z are the same as ASCII 'A' thru 'Z' (0x41 - 0x5A)"
},
{
"code": null,
"e": 5417,
"s": 5345,
"text": "static int VK_ACCEPT --Constant for the Accept or Commit function key."
},
{
"code": null,
"e": 5489,
"s": 5417,
"text": "static int VK_ACCEPT --Constant for the Accept or Commit function key."
},
{
"code": null,
"e": 5514,
"s": 5489,
"text": "static int VK_ADD "
},
{
"code": null,
"e": 5539,
"s": 5514,
"text": "static int VK_ADD "
},
{
"code": null,
"e": 5567,
"s": 5539,
"text": "static int VK_AGAIN "
},
{
"code": null,
"e": 5595,
"s": 5567,
"text": "static int VK_AGAIN "
},
{
"code": null,
"e": 5673,
"s": 5595,
"text": "static int VK_ALL_CANDIDATES --Constant for the All Candidates function key."
},
{
"code": null,
"e": 5751,
"s": 5673,
"text": "static int VK_ALL_CANDIDATES --Constant for the All Candidates function key."
},
{
"code": null,
"e": 5825,
"s": 5751,
"text": "static int VK_ALPHANUMERIC --Constant for the Alphanumeric function key."
},
{
"code": null,
"e": 5899,
"s": 5825,
"text": "static int VK_ALPHANUMERIC --Constant for the Alphanumeric function key."
},
{
"code": null,
"e": 5926,
"s": 5899,
"text": "static int VK_ALT "
},
{
"code": null,
"e": 5953,
"s": 5926,
"text": "static int VK_ALT "
},
{
"code": null,
"e": 6020,
"s": 5953,
"text": "static int VK_ALT_GRAPH --Constant for the AltGraph function key."
},
{
"code": null,
"e": 6087,
"s": 6020,
"text": "static int VK_ALT_GRAPH --Constant for the AltGraph function key."
},
{
"code": null,
"e": 6115,
"s": 6087,
"text": "static int VK_AMPERSAND "
},
{
"code": null,
"e": 6143,
"s": 6115,
"text": "static int VK_AMPERSAND "
},
{
"code": null,
"e": 6171,
"s": 6143,
"text": "static int VK_ASTERISK "
},
{
"code": null,
"e": 6199,
"s": 6171,
"text": "static int VK_ASTERISK "
},
{
"code": null,
"e": 6245,
"s": 6199,
"text": "static int VK_AT --constant for the \"@\" key."
},
{
"code": null,
"e": 6291,
"s": 6245,
"text": "static int VK_AT --constant for the \"@\" key."
},
{
"code": null,
"e": 6316,
"s": 6291,
"text": "static int VK_B "
},
{
"code": null,
"e": 6341,
"s": 6316,
"text": "static int VK_B "
},
{
"code": null,
"e": 6370,
"s": 6341,
"text": "static int VK_BACK_QUOTE "
},
{
"code": null,
"e": 6399,
"s": 6370,
"text": "static int VK_BACK_QUOTE "
},
{
"code": null,
"e": 6465,
"s": 6399,
"text": "static int VK_BACK_SLASH --Constant for the back slash key, \"\\\"\n"
},
{
"code": null,
"e": 6531,
"s": 6465,
"text": "static int VK_BACK_SLASH --Constant for the back slash key, \"\\\"\n"
},
{
"code": null,
"e": 6561,
"s": 6531,
"text": "static int VK_BACK_SPACE "
},
{
"code": null,
"e": 6591,
"s": 6561,
"text": "static int VK_BACK_SPACE "
},
{
"code": null,
"e": 6642,
"s": 6591,
"text": "static int VK_BEGIN --Constant for the Begin key."
},
{
"code": null,
"e": 6693,
"s": 6642,
"text": "static int VK_BEGIN --Constant for the Begin key."
},
{
"code": null,
"e": 6723,
"s": 6693,
"text": "static int VK_BRACELEFT "
},
{
"code": null,
"e": 6753,
"s": 6723,
"text": "static int VK_BRACELEFT "
},
{
"code": null,
"e": 6784,
"s": 6753,
"text": "static int VK_BRACERIGHT "
},
{
"code": null,
"e": 6815,
"s": 6784,
"text": "static int VK_BRACERIGHT "
},
{
"code": null,
"e": 6843,
"s": 6815,
"text": "static int VK_C "
},
{
"code": null,
"e": 6871,
"s": 6843,
"text": "static int VK_C "
},
{
"code": null,
"e": 6903,
"s": 6871,
"text": "static int VK_CANCEL "
},
{
"code": null,
"e": 6935,
"s": 6903,
"text": "static int VK_CANCEL "
},
{
"code": null,
"e": 6970,
"s": 6935,
"text": "static int VK_CAPS_LOCK "
},
{
"code": null,
"e": 7005,
"s": 6970,
"text": "static int VK_CAPS_LOCK "
},
{
"code": null,
"e": 7059,
"s": 7005,
"text": "static int VK_CIRCUMFLEX --Constant for the \"^\" key."
},
{
"code": null,
"e": 7113,
"s": 7059,
"text": "static int VK_CIRCUMFLEX --Constant for the \"^\" key."
},
{
"code": null,
"e": 7143,
"s": 7113,
"text": "static int VK_CLEAR "
},
{
"code": null,
"e": 7173,
"s": 7143,
"text": "static int VK_CLEAR "
},
{
"code": null,
"e": 7245,
"s": 7173,
"text": "static int VK_CLOSE_BRACKET --Constant for the close bracket key, \"]\"\n"
},
{
"code": null,
"e": 7317,
"s": 7245,
"text": "static int VK_CLOSE_BRACKET --Constant for the close bracket key, \"]\"\n"
},
{
"code": null,
"e": 7387,
"s": 7317,
"text": "static int VK_CODE_INPUT --Constant for the Code Input function key."
},
{
"code": null,
"e": 7457,
"s": 7387,
"text": "static int VK_CODE_INPUT --Constant for the Code Input function key."
},
{
"code": null,
"e": 7506,
"s": 7457,
"text": "static int VK_COLON --Constant for the \":\" key."
},
{
"code": null,
"e": 7555,
"s": 7506,
"text": "static int VK_COLON --Constant for the \":\" key."
},
{
"code": null,
"e": 7611,
"s": 7555,
"text": "static int VK_COMMA --Constant for the comma key, \",\"\n"
},
{
"code": null,
"e": 7667,
"s": 7611,
"text": "static int VK_COMMA --Constant for the comma key, \",\"\n"
},
{
"code": null,
"e": 7731,
"s": 7667,
"text": "static int VK_COMPOSE --Constant for the Compose function key."
},
{
"code": null,
"e": 7795,
"s": 7731,
"text": "static int VK_COMPOSE --Constant for the Compose function key."
},
{
"code": null,
"e": 7878,
"s": 7795,
"text": "static int VK_CONTEXT_MENU --Constant for the Microsoft Windows Context Menu key."
},
{
"code": null,
"e": 7961,
"s": 7878,
"text": "static int VK_CONTEXT_MENU --Constant for the Microsoft Windows Context Menu key."
},
{
"code": null,
"e": 7991,
"s": 7961,
"text": "static int VK_CONTROL "
},
{
"code": null,
"e": 8021,
"s": 7991,
"text": "static int VK_CONTROL "
},
{
"code": null,
"e": 8086,
"s": 8021,
"text": "static int VK_CONVERT -- Constant for the Convert function key."
},
{
"code": null,
"e": 8151,
"s": 8086,
"text": "static int VK_CONVERT -- Constant for the Convert function key."
},
{
"code": null,
"e": 8179,
"s": 8151,
"text": "static int VK_COPY "
},
{
"code": null,
"e": 8207,
"s": 8179,
"text": "static int VK_COPY "
},
{
"code": null,
"e": 8232,
"s": 8207,
"text": "static int VK_CUT "
},
{
"code": null,
"e": 8257,
"s": 8232,
"text": "static int VK_CUT "
},
{
"code": null,
"e": 8282,
"s": 8257,
"text": "static int VK_D "
},
{
"code": null,
"e": 8307,
"s": 8282,
"text": "static int VK_D "
},
{
"code": null,
"e": 8339,
"s": 8307,
"text": "static int VK_DEAD_ABOVEDOT "
},
{
"code": null,
"e": 8371,
"s": 8339,
"text": "static int VK_DEAD_ABOVEDOT "
},
{
"code": null,
"e": 8404,
"s": 8371,
"text": "static int VK_DEAD_ABOVERING "
},
{
"code": null,
"e": 8437,
"s": 8404,
"text": "static int VK_DEAD_ABOVERING "
},
{
"code": null,
"e": 8468,
"s": 8437,
"text": "static int VK_DEAD_ACUTE "
},
{
"code": null,
"e": 8499,
"s": 8468,
"text": "static int VK_DEAD_ACUTE "
},
{
"code": null,
"e": 8533,
"s": 8499,
"text": "static int VK_DEAD_BREVE "
},
{
"code": null,
"e": 8567,
"s": 8533,
"text": "static int VK_DEAD_BREVE "
},
{
"code": null,
"e": 8599,
"s": 8567,
"text": "static int VK_DEAD_CARON "
},
{
"code": null,
"e": 8631,
"s": 8599,
"text": "static int VK_DEAD_CARON "
},
{
"code": null,
"e": 8663,
"s": 8631,
"text": "static int VK_DEAD_CEDILLA "
},
{
"code": null,
"e": 8695,
"s": 8663,
"text": "static int VK_DEAD_CEDILLA "
},
{
"code": null,
"e": 8729,
"s": 8695,
"text": "static int VK_DEAD_CIRCUMFLEX "
},
{
"code": null,
"e": 8763,
"s": 8729,
"text": "static int VK_DEAD_CIRCUMFLEX "
},
{
"code": null,
"e": 8797,
"s": 8763,
"text": "static int VK_DEAD_DIAERESIS "
},
{
"code": null,
"e": 8831,
"s": 8797,
"text": "static int VK_DEAD_DIAERESIS "
},
{
"code": null,
"e": 8865,
"s": 8831,
"text": "static int VK_DEAD_DOUBLEACUTE "
},
{
"code": null,
"e": 8899,
"s": 8865,
"text": "static int VK_DEAD_DOUBLEACUTE "
},
{
"code": null,
"e": 8931,
"s": 8899,
"text": "static int VK_DEAD_GRAVE "
},
{
"code": null,
"e": 8963,
"s": 8931,
"text": "static int VK_DEAD_GRAVE "
},
{
"code": null,
"e": 8995,
"s": 8963,
"text": "static int VK_DEAD_IOTA "
},
{
"code": null,
"e": 9027,
"s": 8995,
"text": "static int VK_DEAD_IOTA "
},
{
"code": null,
"e": 9060,
"s": 9027,
"text": "static int VK_DEAD_MACRON "
},
{
"code": null,
"e": 9093,
"s": 9060,
"text": "static int VK_DEAD_MACRON "
},
{
"code": null,
"e": 9122,
"s": 9093,
"text": "static int VK_DEAD_OGONEK "
},
{
"code": null,
"e": 9151,
"s": 9122,
"text": "static int VK_DEAD_OGONEK "
},
{
"code": null,
"e": 9188,
"s": 9151,
"text": "static int VK_DEAD_SEMIVOICED_SOUND "
},
{
"code": null,
"e": 9225,
"s": 9188,
"text": "static int VK_DEAD_SEMIVOICED_SOUND "
},
{
"code": null,
"e": 9256,
"s": 9225,
"text": "static int VK_DEAD_TILDE "
},
{
"code": null,
"e": 9287,
"s": 9256,
"text": "static int VK_DEAD_TILDE "
},
{
"code": null,
"e": 9322,
"s": 9287,
"text": "static int VK_DEAD_VOICED_SOUND "
},
{
"code": null,
"e": 9357,
"s": 9322,
"text": "static int VK_DEAD_VOICED_SOUND "
},
{
"code": null,
"e": 9386,
"s": 9357,
"text": "static int VK_DECIMAL "
},
{
"code": null,
"e": 9415,
"s": 9386,
"text": "static int VK_DECIMAL "
},
{
"code": null,
"e": 9447,
"s": 9415,
"text": "static int VK_DELETE "
},
{
"code": null,
"e": 9479,
"s": 9447,
"text": "static int VK_DELETE "
},
{
"code": null,
"e": 9509,
"s": 9479,
"text": "static int VK_DIVIDE "
},
{
"code": null,
"e": 9539,
"s": 9509,
"text": "static int VK_DIVIDE "
},
{
"code": null,
"e": 9589,
"s": 9539,
"text": "static int VK_DOLLAR --Constant for the \"$\" key."
},
{
"code": null,
"e": 9639,
"s": 9589,
"text": "static int VK_DOLLAR --Constant for the \"$\" key."
},
{
"code": null,
"e": 9706,
"s": 9639,
"text": "static int VK_DOWN -- Constant for the non-numpad down arrow key."
},
{
"code": null,
"e": 9773,
"s": 9706,
"text": "static int VK_DOWN -- Constant for the non-numpad down arrow key."
},
{
"code": null,
"e": 9799,
"s": 9773,
"text": "static int VK_E "
},
{
"code": null,
"e": 9825,
"s": 9799,
"text": "static int VK_E "
},
{
"code": null,
"e": 9853,
"s": 9825,
"text": "static int VK_END "
},
{
"code": null,
"e": 9881,
"s": 9853,
"text": "static int VK_END "
},
{
"code": null,
"e": 9910,
"s": 9881,
"text": "static int VK_ENTER "
},
{
"code": null,
"e": 9939,
"s": 9910,
"text": "static int VK_ENTER "
},
{
"code": null,
"e": 9997,
"s": 9939,
"text": "static int VK_EQUALS --Constant for the equals key, \"=\"\n"
},
{
"code": null,
"e": 10055,
"s": 9997,
"text": "static int VK_EQUALS --Constant for the equals key, \"=\"\n"
},
{
"code": null,
"e": 10084,
"s": 10055,
"text": "static int VK_ESCAPE "
},
{
"code": null,
"e": 10113,
"s": 10084,
"text": "static int VK_ESCAPE "
},
{
"code": null,
"e": 10181,
"s": 10113,
"text": "static int VK_EURO_SIGN --Constant for the Euro currency sign key."
},
{
"code": null,
"e": 10249,
"s": 10181,
"text": "static int VK_EURO_SIGN --Constant for the Euro currency sign key."
},
{
"code": null,
"e": 10309,
"s": 10249,
"text": "static int VK_EXCLAMATION_MARK --Constant for the \"!\" key."
},
{
"code": null,
"e": 10369,
"s": 10309,
"text": "static int VK_EXCLAMATION_MARK --Constant for the \"!\" key."
},
{
"code": null,
"e": 10390,
"s": 10369,
"text": "static int VK_F "
},
{
"code": null,
"e": 10411,
"s": 10390,
"text": "static int VK_F "
},
{
"code": null,
"e": 10465,
"s": 10411,
"text": "static int VK_F1 --Constant for the F1 function key."
},
{
"code": null,
"e": 10519,
"s": 10465,
"text": "static int VK_F1 --Constant for the F1 function key."
},
{
"code": null,
"e": 10575,
"s": 10519,
"text": "static int VK_F10 --Constant for the F10 function key."
},
{
"code": null,
"e": 10631,
"s": 10575,
"text": "static int VK_F10 --Constant for the F10 function key."
},
{
"code": null,
"e": 10687,
"s": 10631,
"text": "static int VK_F11 --Constant for the F11 function key."
},
{
"code": null,
"e": 10743,
"s": 10687,
"text": "static int VK_F11 --Constant for the F11 function key."
},
{
"code": null,
"e": 10799,
"s": 10743,
"text": "static int VK_F12 --Constant for the F12 function key."
},
{
"code": null,
"e": 10855,
"s": 10799,
"text": "static int VK_F12 --Constant for the F12 function key."
},
{
"code": null,
"e": 10911,
"s": 10855,
"text": "static int VK_F13 --Constant for the F13 function key."
},
{
"code": null,
"e": 10967,
"s": 10911,
"text": "static int VK_F13 --Constant for the F13 function key."
},
{
"code": null,
"e": 11023,
"s": 10967,
"text": "static int VK_F14 --Constant for the F14 function key."
},
{
"code": null,
"e": 11079,
"s": 11023,
"text": "static int VK_F14 --Constant for the F14 function key."
},
{
"code": null,
"e": 11135,
"s": 11079,
"text": "static int VK_F15 --Constant for the F15 function key."
},
{
"code": null,
"e": 11191,
"s": 11135,
"text": "static int VK_F15 --Constant for the F15 function key."
},
{
"code": null,
"e": 11247,
"s": 11191,
"text": "static int VK_F16 --Constant for the F16 function key."
},
{
"code": null,
"e": 11303,
"s": 11247,
"text": "static int VK_F16 --Constant for the F16 function key."
},
{
"code": null,
"e": 11359,
"s": 11303,
"text": "static int VK_F17 --Constant for the F17 function key."
},
{
"code": null,
"e": 11415,
"s": 11359,
"text": "static int VK_F17 --Constant for the F17 function key."
},
{
"code": null,
"e": 11471,
"s": 11415,
"text": "static int VK_F18 --Constant for the F18 function key."
},
{
"code": null,
"e": 11527,
"s": 11471,
"text": "static int VK_F18 --Constant for the F18 function key."
},
{
"code": null,
"e": 11583,
"s": 11527,
"text": "static int VK_F19 --Constant for the F19 function key."
},
{
"code": null,
"e": 11639,
"s": 11583,
"text": "static int VK_F19 --Constant for the F19 function key."
},
{
"code": null,
"e": 11693,
"s": 11639,
"text": "static int VK_F2 --Constant for the F2 function key."
},
{
"code": null,
"e": 11747,
"s": 11693,
"text": "static int VK_F2 --Constant for the F2 function key."
},
{
"code": null,
"e": 11803,
"s": 11747,
"text": "static int VK_F20 --Constant for the F20 function key."
},
{
"code": null,
"e": 11859,
"s": 11803,
"text": "static int VK_F20 --Constant for the F20 function key."
},
{
"code": null,
"e": 11916,
"s": 11859,
"text": "static int VK_F21 -- Constant for the F21 function key."
},
{
"code": null,
"e": 11973,
"s": 11916,
"text": "static int VK_F21 -- Constant for the F21 function key."
},
{
"code": null,
"e": 12029,
"s": 11973,
"text": "static int VK_F22 --Constant for the F22 function key."
},
{
"code": null,
"e": 12085,
"s": 12029,
"text": "static int VK_F22 --Constant for the F22 function key."
},
{
"code": null,
"e": 12141,
"s": 12085,
"text": "static int VK_F23 --Constant for the F23 function key."
},
{
"code": null,
"e": 12197,
"s": 12141,
"text": "static int VK_F23 --Constant for the F23 function key."
},
{
"code": null,
"e": 12253,
"s": 12197,
"text": "static int VK_F24 --Constant for the F24 function key."
},
{
"code": null,
"e": 12309,
"s": 12253,
"text": "static int VK_F24 --Constant for the F24 function key."
},
{
"code": null,
"e": 12363,
"s": 12309,
"text": "static int VK_F3 --Constant for the F3 function key."
},
{
"code": null,
"e": 12417,
"s": 12363,
"text": "static int VK_F3 --Constant for the F3 function key."
},
{
"code": null,
"e": 12471,
"s": 12417,
"text": "static int VK_F4 --Constant for the F4 function key."
},
{
"code": null,
"e": 12525,
"s": 12471,
"text": "static int VK_F4 --Constant for the F4 function key."
},
{
"code": null,
"e": 12580,
"s": 12525,
"text": "static int VK_F5 -- Constant for the F5 function key."
},
{
"code": null,
"e": 12635,
"s": 12580,
"text": "static int VK_F5 -- Constant for the F5 function key."
},
{
"code": null,
"e": 12689,
"s": 12635,
"text": "static int VK_F6 --Constant for the F6 function key."
},
{
"code": null,
"e": 12743,
"s": 12689,
"text": "static int VK_F6 --Constant for the F6 function key."
},
{
"code": null,
"e": 12797,
"s": 12743,
"text": "static int VK_F7 --Constant for the F7 function key."
},
{
"code": null,
"e": 12851,
"s": 12797,
"text": "static int VK_F7 --Constant for the F7 function key."
},
{
"code": null,
"e": 12905,
"s": 12851,
"text": "static int VK_F8 --Constant for the F8 function key."
},
{
"code": null,
"e": 12959,
"s": 12905,
"text": "static int VK_F8 --Constant for the F8 function key."
},
{
"code": null,
"e": 13013,
"s": 12959,
"text": "static int VK_F9 --Constant for the F9 function key."
},
{
"code": null,
"e": 13067,
"s": 13013,
"text": "static int VK_F9 --Constant for the F9 function key."
},
{
"code": null,
"e": 13091,
"s": 13067,
"text": "static int VK_FINAL "
},
{
"code": null,
"e": 13115,
"s": 13091,
"text": "static int VK_FINAL "
},
{
"code": null,
"e": 13140,
"s": 13115,
"text": "static int VK_FIND "
},
{
"code": null,
"e": 13165,
"s": 13140,
"text": "static int VK_FIND "
},
{
"code": null,
"e": 13246,
"s": 13165,
"text": "static int VK_FULL_WIDTH --Constant for the Full-Width Characters function key."
},
{
"code": null,
"e": 13327,
"s": 13246,
"text": "static int VK_FULL_WIDTH --Constant for the Full-Width Characters function key."
},
{
"code": null,
"e": 13351,
"s": 13327,
"text": "static int VK_G "
},
{
"code": null,
"e": 13375,
"s": 13351,
"text": "static int VK_G "
},
{
"code": null,
"e": 13403,
"s": 13375,
"text": "static int VK_GREATER "
},
{
"code": null,
"e": 13431,
"s": 13403,
"text": "static int VK_GREATER "
},
{
"code": null,
"e": 13456,
"s": 13431,
"text": "static int VK_H "
},
{
"code": null,
"e": 13481,
"s": 13456,
"text": "static int VK_H "
},
{
"code": null,
"e": 13562,
"s": 13481,
"text": "static int VK_HALF_WIDTH --Constant for the Half-Width Characters function key."
},
{
"code": null,
"e": 13643,
"s": 13562,
"text": "static int VK_HALF_WIDTH --Constant for the Half-Width Characters function key."
},
{
"code": null,
"e": 13673,
"s": 13643,
"text": "static int VK_HELP "
},
{
"code": null,
"e": 13703,
"s": 13673,
"text": "static int VK_HELP "
},
{
"code": null,
"e": 13769,
"s": 13703,
"text": "static int VK_HIRAGANA --Constant for the Hiragana function key."
},
{
"code": null,
"e": 13835,
"s": 13769,
"text": "static int VK_HIRAGANA --Constant for the Hiragana function key."
},
{
"code": null,
"e": 13862,
"s": 13835,
"text": "static int VK_HOME "
},
{
"code": null,
"e": 13889,
"s": 13862,
"text": "static int VK_HOME "
},
{
"code": null,
"e": 13914,
"s": 13889,
"text": "static int VK_I "
},
{
"code": null,
"e": 13939,
"s": 13914,
"text": "static int VK_I "
},
{
"code": null,
"e": 14019,
"s": 13939,
"text": "static int VK_INPUT_METHOD_ON_OFF -- Constant for the input method on/off key."
},
{
"code": null,
"e": 14099,
"s": 14019,
"text": "static int VK_INPUT_METHOD_ON_OFF -- Constant for the input method on/off key."
},
{
"code": null,
"e": 14129,
"s": 14099,
"text": "static int VK_INSERT "
},
{
"code": null,
"e": 14159,
"s": 14129,
"text": "static int VK_INSERT "
},
{
"code": null,
"e": 14250,
"s": 14159,
"text": "static int VK_INVERTED_EXCLAMATION_MARK --Constant for the inverted exclamation mark key."
},
{
"code": null,
"e": 14341,
"s": 14250,
"text": "static int VK_INVERTED_EXCLAMATION_MARK --Constant for the inverted exclamation mark key."
},
{
"code": null,
"e": 14367,
"s": 14341,
"text": "static int VK_J "
},
{
"code": null,
"e": 14393,
"s": 14367,
"text": "static int VK_J "
},
{
"code": null,
"e": 14477,
"s": 14393,
"text": "static int VK_JAPANESE_HIRAGANA --Constant for the Japanese-Hiragana function key."
},
{
"code": null,
"e": 14561,
"s": 14477,
"text": "static int VK_JAPANESE_HIRAGANA --Constant for the Japanese-Hiragana function key."
},
{
"code": null,
"e": 14645,
"s": 14561,
"text": "static int VK_JAPANESE_KATAKANA --Constant for the Japanese-Katakana function key."
},
{
"code": null,
"e": 14729,
"s": 14645,
"text": "static int VK_JAPANESE_KATAKANA --Constant for the Japanese-Katakana function key."
},
{
"code": null,
"e": 14807,
"s": 14729,
"text": "static int VK_JAPANESE_ROMAN --Constant for the Japanese-Roman function key."
},
{
"code": null,
"e": 14885,
"s": 14807,
"text": "static int VK_JAPANESE_ROMAN --Constant for the Japanese-Roman function key."
},
{
"code": null,
"e": 14909,
"s": 14885,
"text": "static int VK_K "
},
{
"code": null,
"e": 14933,
"s": 14909,
"text": "static int VK_K "
},
{
"code": null,
"e": 14962,
"s": 14933,
"text": "static int VK_KANA "
},
{
"code": null,
"e": 14991,
"s": 14962,
"text": "static int VK_KANA "
},
{
"code": null,
"e": 15063,
"s": 14991,
"text": "static int VK_KANA_LOCK -- Constant for the locking Kana function key."
},
{
"code": null,
"e": 15135,
"s": 15063,
"text": "static int VK_KANA_LOCK -- Constant for the locking Kana function key."
},
{
"code": null,
"e": 15164,
"s": 15135,
"text": "static int VK_KANJI "
},
{
"code": null,
"e": 15193,
"s": 15164,
"text": "static int VK_KANJI "
},
{
"code": null,
"e": 15259,
"s": 15193,
"text": "static int VK_KATAKANA --Constant for the Katakana function key."
},
{
"code": null,
"e": 15325,
"s": 15259,
"text": "static int VK_KATAKANA --Constant for the Katakana function key."
},
{
"code": null,
"e": 15399,
"s": 15325,
"text": "static int VK_KP_DOWN -- Constant for the numeric keypad down arrow key."
},
{
"code": null,
"e": 15473,
"s": 15399,
"text": "static int VK_KP_DOWN -- Constant for the numeric keypad down arrow key."
},
{
"code": null,
"e": 15546,
"s": 15473,
"text": "static int VK_KP_LEFT --Constant for the numeric keypad left arrow key."
},
{
"code": null,
"e": 15619,
"s": 15546,
"text": "static int VK_KP_LEFT --Constant for the numeric keypad left arrow key."
},
{
"code": null,
"e": 15694,
"s": 15619,
"text": "static int VK_KP_RIGHT --Constant for the numeric keypad right arrow key."
},
{
"code": null,
"e": 15769,
"s": 15694,
"text": "static int VK_KP_RIGHT --Constant for the numeric keypad right arrow key."
},
{
"code": null,
"e": 15838,
"s": 15769,
"text": "static int VK_KP_UP --Constant for the numeric keypad up arrow key."
},
{
"code": null,
"e": 15907,
"s": 15838,
"text": "static int VK_KP_UP --Constant for the numeric keypad up arrow key."
},
{
"code": null,
"e": 15933,
"s": 15907,
"text": "static int VK_L "
},
{
"code": null,
"e": 15959,
"s": 15933,
"text": "static int VK_L "
},
{
"code": null,
"e": 16025,
"s": 15959,
"text": "static int VK_LEFT --Constant for the non-numpad left arrow key."
},
{
"code": null,
"e": 16091,
"s": 16025,
"text": "static int VK_LEFT --Constant for the non-numpad left arrow key."
},
{
"code": null,
"e": 16151,
"s": 16091,
"text": "static int VK_LEFT_PARENTHESIS --Constant for the \"(\" key."
},
{
"code": null,
"e": 16211,
"s": 16151,
"text": "static int VK_LEFT_PARENTHESIS --Constant for the \"(\" key."
},
{
"code": null,
"e": 16237,
"s": 16211,
"text": "static int VK_LESS "
},
{
"code": null,
"e": 16263,
"s": 16237,
"text": "static int VK_LESS "
},
{
"code": null,
"e": 16288,
"s": 16263,
"text": "static int VK_M "
},
{
"code": null,
"e": 16313,
"s": 16288,
"text": "static int VK_M "
},
{
"code": null,
"e": 16341,
"s": 16313,
"text": "static int VK_META "
},
{
"code": null,
"e": 16369,
"s": 16341,
"text": "static int VK_META "
},
{
"code": null,
"e": 16425,
"s": 16369,
"text": "static int VK_MINUS -- Constant for the minus key, \"-\""
},
{
"code": null,
"e": 16481,
"s": 16425,
"text": "static int VK_MINUS -- Constant for the minus key, \"-\""
},
{
"code": null,
"e": 16511,
"s": 16481,
"text": "static int VK_MODECHANGE "
},
{
"code": null,
"e": 16541,
"s": 16511,
"text": "static int VK_MODECHANGE "
},
{
"code": null,
"e": 16570,
"s": 16541,
"text": "static int VK_MULTIPLY "
},
{
"code": null,
"e": 16599,
"s": 16570,
"text": "static int VK_MULTIPLY "
},
{
"code": null,
"e": 16617,
"s": 16599,
"text": "static int VK_N "
},
{
"code": null,
"e": 16635,
"s": 16617,
"text": "static int VK_N "
},
{
"code": null,
"e": 16708,
"s": 16635,
"text": "static int VK_NONCONVERT --Constant for the Don't Convert function key."
},
{
"code": null,
"e": 16781,
"s": 16708,
"text": "static int VK_NONCONVERT --Constant for the Don't Convert function key."
},
{
"code": null,
"e": 16809,
"s": 16781,
"text": "static int VK_NUM_LOCK "
},
{
"code": null,
"e": 16837,
"s": 16809,
"text": "static int VK_NUM_LOCK "
},
{
"code": null,
"e": 16892,
"s": 16837,
"text": "static int VK_NUMBER_SIGN --Constant for the \"#\" key."
},
{
"code": null,
"e": 16947,
"s": 16892,
"text": "static int VK_NUMBER_SIGN --Constant for the \"#\" key."
},
{
"code": null,
"e": 16976,
"s": 16947,
"text": "static int VK_NUMPAD0 "
},
{
"code": null,
"e": 17005,
"s": 16976,
"text": "static int VK_NUMPAD0 "
},
{
"code": null,
"e": 17033,
"s": 17005,
"text": "static int VK_NUMPAD1 "
},
{
"code": null,
"e": 17061,
"s": 17033,
"text": "static int VK_NUMPAD1 "
},
{
"code": null,
"e": 17089,
"s": 17061,
"text": "static int VK_NUMPAD2 "
},
{
"code": null,
"e": 17117,
"s": 17089,
"text": "static int VK_NUMPAD2 "
},
{
"code": null,
"e": 17144,
"s": 17117,
"text": "static int VK_NUMPAD3 "
},
{
"code": null,
"e": 17171,
"s": 17144,
"text": "static int VK_NUMPAD3 "
},
{
"code": null,
"e": 17201,
"s": 17171,
"text": "static int VK_NUMPAD4 "
},
{
"code": null,
"e": 17231,
"s": 17201,
"text": "static int VK_NUMPAD4 "
},
{
"code": null,
"e": 17255,
"s": 17231,
"text": "static int VK_NUMPAD5 "
},
{
"code": null,
"e": 17279,
"s": 17255,
"text": "static int VK_NUMPAD5 "
},
{
"code": null,
"e": 17305,
"s": 17279,
"text": "static int VK_NUMPAD6 "
},
{
"code": null,
"e": 17331,
"s": 17305,
"text": "static int VK_NUMPAD6 "
},
{
"code": null,
"e": 17355,
"s": 17331,
"text": "static int VK_NUMPAD7 "
},
{
"code": null,
"e": 17379,
"s": 17355,
"text": "static int VK_NUMPAD7 "
},
{
"code": null,
"e": 17403,
"s": 17379,
"text": "static int VK_NUMPAD8 "
},
{
"code": null,
"e": 17427,
"s": 17403,
"text": "static int VK_NUMPAD8 "
},
{
"code": null,
"e": 17452,
"s": 17427,
"text": "static int VK_NUMPAD9 "
},
{
"code": null,
"e": 17477,
"s": 17452,
"text": "static int VK_NUMPAD9 "
},
{
"code": null,
"e": 17499,
"s": 17477,
"text": "static int VK_O "
},
{
"code": null,
"e": 17521,
"s": 17499,
"text": "static int VK_O "
},
{
"code": null,
"e": 17590,
"s": 17521,
"text": "static int VK_OPEN_BRACKET --Constant for the open bracket key, \"[\""
},
{
"code": null,
"e": 17659,
"s": 17590,
"text": "static int VK_OPEN_BRACKET --Constant for the open bracket key, \"[\""
},
{
"code": null,
"e": 17681,
"s": 17659,
"text": "static int VK_P "
},
{
"code": null,
"e": 17703,
"s": 17681,
"text": "static int VK_P "
},
{
"code": null,
"e": 17728,
"s": 17703,
"text": "static int VK_PAGE_DOWN "
},
{
"code": null,
"e": 17753,
"s": 17728,
"text": "static int VK_PAGE_DOWN "
},
{
"code": null,
"e": 17778,
"s": 17753,
"text": "static int VK_PAGE_UP "
},
{
"code": null,
"e": 17803,
"s": 17778,
"text": "static int VK_PAGE_UP "
},
{
"code": null,
"e": 17825,
"s": 17803,
"text": "static int VK_PASTE "
},
{
"code": null,
"e": 17847,
"s": 17825,
"text": "static int VK_PASTE "
},
{
"code": null,
"e": 17871,
"s": 17847,
"text": "static int VK_PAUSE "
},
{
"code": null,
"e": 17895,
"s": 17871,
"text": "static int VK_PAUSE "
},
{
"code": null,
"e": 17952,
"s": 17895,
"text": "static int VK_PERIOD --Constant for the period key, \".\""
},
{
"code": null,
"e": 18009,
"s": 17952,
"text": "static int VK_PERIOD --Constant for the period key, \".\""
},
{
"code": null,
"e": 18058,
"s": 18009,
"text": "static int VK_PLUS -- Constant for the \"+\" key."
},
{
"code": null,
"e": 18107,
"s": 18058,
"text": "static int VK_PLUS -- Constant for the \"+\" key."
},
{
"code": null,
"e": 18194,
"s": 18107,
"text": "static int VK_PREVIOUS_CANDIDATE -- Constant for the Previous Candidate function key."
},
{
"code": null,
"e": 18281,
"s": 18194,
"text": "static int VK_PREVIOUS_CANDIDATE -- Constant for the Previous Candidate function key."
},
{
"code": null,
"e": 18309,
"s": 18281,
"text": "static int VK_PRINTSCREEN "
},
{
"code": null,
"e": 18337,
"s": 18309,
"text": "static int VK_PRINTSCREEN "
},
{
"code": null,
"e": 18359,
"s": 18337,
"text": "static int VK_PROPS "
},
{
"code": null,
"e": 18381,
"s": 18359,
"text": "static int VK_PROPS "
},
{
"code": null,
"e": 18405,
"s": 18381,
"text": "static int VK_Q "
},
{
"code": null,
"e": 18429,
"s": 18405,
"text": "static int VK_Q "
},
{
"code": null,
"e": 18454,
"s": 18429,
"text": "static int VK_QUOTE "
},
{
"code": null,
"e": 18479,
"s": 18454,
"text": "static int VK_QUOTE "
},
{
"code": null,
"e": 18503,
"s": 18479,
"text": "static int VK_QUOTEDBL "
},
{
"code": null,
"e": 18527,
"s": 18503,
"text": "static int VK_QUOTEDBL "
},
{
"code": null,
"e": 18552,
"s": 18527,
"text": "static int VK_R "
},
{
"code": null,
"e": 18577,
"s": 18552,
"text": "static int VK_R "
},
{
"code": null,
"e": 18646,
"s": 18577,
"text": "static int VK_RIGHT -- Constant for the non-numpad right arrow key."
},
{
"code": null,
"e": 18715,
"s": 18646,
"text": "static int VK_RIGHT -- Constant for the non-numpad right arrow key."
},
{
"code": null,
"e": 18776,
"s": 18715,
"text": "static int VK_RIGHT_PARENTHESIS --Constant for the \")\" key."
},
{
"code": null,
"e": 18837,
"s": 18776,
"text": "static int VK_RIGHT_PARENTHESIS --Constant for the \")\" key."
},
{
"code": null,
"e": 18919,
"s": 18837,
"text": "static int VK_ROMAN_CHARACTERS --Constant for the Roman Characters function key."
},
{
"code": null,
"e": 19001,
"s": 18919,
"text": "static int VK_ROMAN_CHARACTERS --Constant for the Roman Characters function key."
},
{
"code": null,
"e": 19027,
"s": 19001,
"text": "static int VK_S "
},
{
"code": null,
"e": 19053,
"s": 19027,
"text": "static int VK_S "
},
{
"code": null,
"e": 19082,
"s": 19053,
"text": "static int VK_SCROLL_LOCK "
},
{
"code": null,
"e": 19111,
"s": 19082,
"text": "static int VK_SCROLL_LOCK "
},
{
"code": null,
"e": 19176,
"s": 19111,
"text": "static int VK_SEMICOLON -- Constant for the semicolon key, \";\"\n"
},
{
"code": null,
"e": 19241,
"s": 19176,
"text": "static int VK_SEMICOLON -- Constant for the semicolon key, \";\"\n"
},
{
"code": null,
"e": 19345,
"s": 19241,
"text": "static int VK_SEPARATER --This constant is obsolete, and is included only for backwards compatibility."
},
{
"code": null,
"e": 19449,
"s": 19345,
"text": "static int VK_SEPARATER --This constant is obsolete, and is included only for backwards compatibility."
},
{
"code": null,
"e": 19515,
"s": 19449,
"text": "static int VK_SEPARATOR --Constant for the Numpad Separator key."
},
{
"code": null,
"e": 19581,
"s": 19515,
"text": "static int VK_SEPARATOR --Constant for the Numpad Separator key."
},
{
"code": null,
"e": 19607,
"s": 19581,
"text": "static int VK_SHIFT "
},
{
"code": null,
"e": 19633,
"s": 19607,
"text": "static int VK_SHIFT "
},
{
"code": null,
"e": 19699,
"s": 19633,
"text": "static int VK_SLASH -- Constant for the forward slash key, \"/\"\n"
},
{
"code": null,
"e": 19765,
"s": 19699,
"text": "static int VK_SLASH -- Constant for the forward slash key, \"/\"\n"
},
{
"code": null,
"e": 19789,
"s": 19765,
"text": "static int VK_SPACE "
},
{
"code": null,
"e": 19813,
"s": 19789,
"text": "static int VK_SPACE "
},
{
"code": null,
"e": 19836,
"s": 19813,
"text": "static int VK_STOP "
},
{
"code": null,
"e": 19859,
"s": 19836,
"text": "static int VK_STOP "
},
{
"code": null,
"e": 19883,
"s": 19859,
"text": "static int VK_SUBTRACT "
},
{
"code": null,
"e": 19907,
"s": 19883,
"text": "static int VK_SUBTRACT "
},
{
"code": null,
"e": 19929,
"s": 19907,
"text": "static int VK_T "
},
{
"code": null,
"e": 19951,
"s": 19929,
"text": "static int VK_T "
},
{
"code": null,
"e": 19972,
"s": 19951,
"text": "static int VK_TAB "
},
{
"code": null,
"e": 19993,
"s": 19972,
"text": "static int VK_TAB "
},
{
"code": null,
"e": 20012,
"s": 19993,
"text": "static int VK_U "
},
{
"code": null,
"e": 20031,
"s": 20012,
"text": "static int VK_U "
},
{
"code": null,
"e": 20119,
"s": 20031,
"text": "static int VK_UNDEFINED -- This value is used to indicate that the keyCode is unknown."
},
{
"code": null,
"e": 20207,
"s": 20119,
"text": "static int VK_UNDEFINED -- This value is used to indicate that the keyCode is unknown."
},
{
"code": null,
"e": 20261,
"s": 20207,
"text": "static int VK_UNDERSCORE --Constant for the \"_\" key."
},
{
"code": null,
"e": 20315,
"s": 20261,
"text": "static int VK_UNDERSCORE --Constant for the \"_\" key."
},
{
"code": null,
"e": 20335,
"s": 20315,
"text": "static int VK_UNDO "
},
{
"code": null,
"e": 20355,
"s": 20335,
"text": "static int VK_UNDO "
},
{
"code": null,
"e": 20417,
"s": 20355,
"text": "static int VK_UP --Constant for the non-numpad up arrow key."
},
{
"code": null,
"e": 20479,
"s": 20417,
"text": "static int VK_UP --Constant for the non-numpad up arrow key."
},
{
"code": null,
"e": 20497,
"s": 20479,
"text": "static int VK_V "
},
{
"code": null,
"e": 20515,
"s": 20497,
"text": "static int VK_V "
},
{
"code": null,
"e": 20533,
"s": 20515,
"text": "static int VK_W "
},
{
"code": null,
"e": 20551,
"s": 20533,
"text": "static int VK_W "
},
{
"code": null,
"e": 20626,
"s": 20551,
"text": "static int VK_WINDOWS --Constant for the Microsoft Windows \"Windows\" key."
},
{
"code": null,
"e": 20701,
"s": 20626,
"text": "static int VK_WINDOWS --Constant for the Microsoft Windows \"Windows\" key."
},
{
"code": null,
"e": 20719,
"s": 20701,
"text": "static int VK_X "
},
{
"code": null,
"e": 20737,
"s": 20719,
"text": "static int VK_X "
},
{
"code": null,
"e": 20756,
"s": 20737,
"text": "static int VK_Y "
},
{
"code": null,
"e": 20775,
"s": 20756,
"text": "static int VK_Y "
},
{
"code": null,
"e": 20793,
"s": 20775,
"text": "static int VK_Z "
},
{
"code": null,
"e": 20811,
"s": 20793,
"text": "static int VK_Z "
},
{
"code": null,
"e": 20886,
"s": 20811,
"text": "KeyEvent(Component source, int id, long when, int modifiers, int keyCode) "
},
{
"code": null,
"e": 20911,
"s": 20886,
"text": "Deprecated. as of JDK1.1"
},
{
"code": null,
"e": 20999,
"s": 20911,
"text": "KeyEvent(Component source, int id, long when, int modifiers, int keyCode, char keyChar)"
},
{
"code": null,
"e": 21030,
"s": 20999,
"text": " Constructs a KeyEvent object."
},
{
"code": null,
"e": 21135,
"s": 21030,
"text": "KeyEvent(Component source, int id, long when, int modifiers, int keyCode, char keyChar, int keyLocation)"
},
{
"code": null,
"e": 21154,
"s": 21135,
"text": "char getKeyChar() "
},
{
"code": null,
"e": 21215,
"s": 21154,
"text": "Returns the character associated with the key in this event."
},
{
"code": null,
"e": 21233,
"s": 21215,
"text": "int\tgetKeyCode() "
},
{
"code": null,
"e": 21300,
"s": 21233,
"text": "Returns the integer keyCode associated with the key in this event."
},
{
"code": null,
"e": 21322,
"s": 21300,
"text": "int\tgetKeyLocation() "
},
{
"code": null,
"e": 21386,
"s": 21322,
"text": "Returns the location of the key that originated this key event."
},
{
"code": null,
"e": 21436,
"s": 21386,
"text": "static String getKeyModifiersText(int modifiers) "
},
{
"code": null,
"e": 21519,
"s": 21436,
"text": "Returns a String describing the modifier key(s), such as \"Shift\", or \"Ctrl+Shift\"."
},
{
"code": null,
"e": 21558,
"s": 21519,
"text": "static String getKeyText(int keyCode) "
},
{
"code": null,
"e": 21628,
"s": 21558,
"text": "Returns a String describing the keyCode, such as \"HOME\", \"F1\" or \"A\"."
},
{
"code": null,
"e": 21651,
"s": 21628,
"text": "boolean\tisActionKey() "
},
{
"code": null,
"e": 21709,
"s": 21651,
"text": "Returns whether the key in this event is an \"action\" key."
},
{
"code": null,
"e": 21731,
"s": 21709,
"text": "String paramString() "
},
{
"code": null,
"e": 21782,
"s": 21731,
"text": "Returns a parameter string identifying this event."
},
{
"code": null,
"e": 21813,
"s": 21782,
"text": "void setKeyChar(char keyChar) "
},
{
"code": null,
"e": 21868,
"s": 21813,
"text": "Set the keyChar value to indicate a logical character."
},
{
"code": null,
"e": 21898,
"s": 21868,
"text": "void setKeyCode(int keyCode) "
},
{
"code": null,
"e": 21948,
"s": 21898,
"text": "Set the keyCode value to indicate a physical key."
},
{
"code": null,
"e": 21982,
"s": 21948,
"text": "void setModifiers(int modifiers) "
},
{
"code": null,
"e": 22009,
"s": 21982,
"text": "Deprecated. as of JDK1.1.4"
},
{
"code": null,
"e": 22065,
"s": 22009,
"text": "This class inherits methods from the following classes:"
},
{
"code": null,
"e": 22091,
"s": 22065,
"text": "java.awt.event.InputEvent"
},
{
"code": null,
"e": 22117,
"s": 22091,
"text": "java.awt.event.InputEvent"
},
{
"code": null,
"e": 22147,
"s": 22117,
"text": "java.awt.event.ComponentEvent"
},
{
"code": null,
"e": 22177,
"s": 22147,
"text": "java.awt.event.ComponentEvent"
},
{
"code": null,
"e": 22195,
"s": 22177,
"text": "java.awt.AWTEvent"
},
{
"code": null,
"e": 22213,
"s": 22195,
"text": "java.awt.AWTEvent"
},
{
"code": null,
"e": 22235,
"s": 22213,
"text": "java.util.EventObject"
},
{
"code": null,
"e": 22257,
"s": 22235,
"text": "java.util.EventObject"
},
{
"code": null,
"e": 22274,
"s": 22257,
"text": "java.lang.Object"
},
{
"code": null,
"e": 22291,
"s": 22274,
"text": "java.lang.Object"
},
{
"code": null,
"e": 22324,
"s": 22291,
"text": "\n 13 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 22332,
"s": 22324,
"text": " EduOLC"
},
{
"code": null,
"e": 22339,
"s": 22332,
"text": " Print"
},
{
"code": null,
"e": 22350,
"s": 22339,
"text": " Add Notes"
}
]
|
Construct Binary Tree from String in C++ | Suppose we have a string consisting of parenthesis and integers. We have to construct a binary tree from that string. The whole input represents a binary tree. It holds an integer that followed by zero, one or two pairs of parentheses. The integer represents the root's value and a pair of parenthesis contains a child binary tree with the same structure.
So, if the input is like "4(2(3)(1))(6(5))", then the output will be [3,2,1,4,5,6] (inorder traversal)
To solve this, we will follow these steps −
Define a function solve(), this will take s, idx,
Define a function solve(), this will take s, idx,
if idx >= size of s, then −return null
if idx >= size of s, then −
return null
return null
num := empty string
num := empty string
while (idx < size of s and s[idx] is not equal to '(' and s[idx] is not equal to ')'), do −num := num + s[idx](increase idx by 1)
while (idx < size of s and s[idx] is not equal to '(' and s[idx] is not equal to ')'), do −
num := num + s[idx]
num := num + s[idx]
(increase idx by 1)
(increase idx by 1)
node = new node with value num
node = new node with value num
if idx < size of s and s[idx] is same as '(', then −(increase idx by 1)left of node := solve(s, idx)(increase idx by 1)if idx < size of s and s[idx] is same as '(', then −(increase idx by 1)right of node := solve(s, idx)(increase idx by 1)
if idx < size of s and s[idx] is same as '(', then −
(increase idx by 1)
(increase idx by 1)
left of node := solve(s, idx)
left of node := solve(s, idx)
(increase idx by 1)
(increase idx by 1)
if idx < size of s and s[idx] is same as '(', then −(increase idx by 1)right of node := solve(s, idx)(increase idx by 1)
if idx < size of s and s[idx] is same as '(', then −
(increase idx by 1)
(increase idx by 1)
right of node := solve(s, idx)
right of node := solve(s, idx)
(increase idx by 1)
(increase idx by 1)
return node
return node
From the main method do the following −
From the main method do the following −
idx := 0
idx := 0
temp = new node with value -1
temp = new node with value -1
return solve(s, idx)
return solve(s, idx)
Let us see the following implementation to get better understanding −
Live Demo
#include <bits/stdc++.h>
using namespace std;
class TreeNode{
public:
int val;
TreeNode *left, *right;
TreeNode(int data){
val = data;
left = NULL;
right = NULL;
}
};
void inord(TreeNode *root){
if(root != NULL){
inord(root->left);
cout << root->val << " ";
inord(root->right);
}
}
class Solution {
public:
TreeNode* solve(string s, int& idx){
if (idx >= s.size())
return NULL;
string num = "";
while (idx < s.size() && s[idx] != '(' && s[idx] != ')') {
num += s[idx];
idx++;
}
TreeNode* node = new TreeNode(stoi(num));
if (idx < s.size() && s[idx] == '(') {
idx++;
node->left = solve(s, idx);
idx++;
if (idx < s.size() && s[idx] == '(') {
idx++;
node->right = solve(s, idx);
idx++;
}
}
return node;
}
TreeNode* str2tree(string s) {
int idx = 0;
TreeNode* temp = new TreeNode(-1);
return solve(s, idx);
}
};
main(){
Solution ob;
TreeNode *root = ob.str2tree("4(2(3)(1))(6(5))");
inord(root);
}
"4(2(3)(1))(6(5))"
3 2 1 4 5 6 | [
{
"code": null,
"e": 1418,
"s": 1062,
"text": "Suppose we have a string consisting of parenthesis and integers. We have to construct a binary tree from that string. The whole input represents a binary tree. It holds an integer that followed by zero, one or two pairs of parentheses. The integer represents the root's value and a pair of parenthesis contains a child binary tree with the same structure."
},
{
"code": null,
"e": 1521,
"s": 1418,
"text": "So, if the input is like \"4(2(3)(1))(6(5))\", then the output will be [3,2,1,4,5,6] (inorder traversal)"
},
{
"code": null,
"e": 1565,
"s": 1521,
"text": "To solve this, we will follow these steps −"
},
{
"code": null,
"e": 1615,
"s": 1565,
"text": "Define a function solve(), this will take s, idx,"
},
{
"code": null,
"e": 1665,
"s": 1615,
"text": "Define a function solve(), this will take s, idx,"
},
{
"code": null,
"e": 1704,
"s": 1665,
"text": "if idx >= size of s, then −return null"
},
{
"code": null,
"e": 1732,
"s": 1704,
"text": "if idx >= size of s, then −"
},
{
"code": null,
"e": 1744,
"s": 1732,
"text": "return null"
},
{
"code": null,
"e": 1756,
"s": 1744,
"text": "return null"
},
{
"code": null,
"e": 1776,
"s": 1756,
"text": "num := empty string"
},
{
"code": null,
"e": 1796,
"s": 1776,
"text": "num := empty string"
},
{
"code": null,
"e": 1926,
"s": 1796,
"text": "while (idx < size of s and s[idx] is not equal to '(' and s[idx] is not equal to ')'), do −num := num + s[idx](increase idx by 1)"
},
{
"code": null,
"e": 2018,
"s": 1926,
"text": "while (idx < size of s and s[idx] is not equal to '(' and s[idx] is not equal to ')'), do −"
},
{
"code": null,
"e": 2038,
"s": 2018,
"text": "num := num + s[idx]"
},
{
"code": null,
"e": 2058,
"s": 2038,
"text": "num := num + s[idx]"
},
{
"code": null,
"e": 2078,
"s": 2058,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2098,
"s": 2078,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2129,
"s": 2098,
"text": "node = new node with value num"
},
{
"code": null,
"e": 2160,
"s": 2129,
"text": "node = new node with value num"
},
{
"code": null,
"e": 2400,
"s": 2160,
"text": "if idx < size of s and s[idx] is same as '(', then −(increase idx by 1)left of node := solve(s, idx)(increase idx by 1)if idx < size of s and s[idx] is same as '(', then −(increase idx by 1)right of node := solve(s, idx)(increase idx by 1)"
},
{
"code": null,
"e": 2453,
"s": 2400,
"text": "if idx < size of s and s[idx] is same as '(', then −"
},
{
"code": null,
"e": 2473,
"s": 2453,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2493,
"s": 2473,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2523,
"s": 2493,
"text": "left of node := solve(s, idx)"
},
{
"code": null,
"e": 2553,
"s": 2523,
"text": "left of node := solve(s, idx)"
},
{
"code": null,
"e": 2573,
"s": 2553,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2593,
"s": 2573,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2714,
"s": 2593,
"text": "if idx < size of s and s[idx] is same as '(', then −(increase idx by 1)right of node := solve(s, idx)(increase idx by 1)"
},
{
"code": null,
"e": 2767,
"s": 2714,
"text": "if idx < size of s and s[idx] is same as '(', then −"
},
{
"code": null,
"e": 2787,
"s": 2767,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2807,
"s": 2787,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2838,
"s": 2807,
"text": "right of node := solve(s, idx)"
},
{
"code": null,
"e": 2869,
"s": 2838,
"text": "right of node := solve(s, idx)"
},
{
"code": null,
"e": 2889,
"s": 2869,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2909,
"s": 2889,
"text": "(increase idx by 1)"
},
{
"code": null,
"e": 2921,
"s": 2909,
"text": "return node"
},
{
"code": null,
"e": 2933,
"s": 2921,
"text": "return node"
},
{
"code": null,
"e": 2973,
"s": 2933,
"text": "From the main method do the following −"
},
{
"code": null,
"e": 3013,
"s": 2973,
"text": "From the main method do the following −"
},
{
"code": null,
"e": 3022,
"s": 3013,
"text": "idx := 0"
},
{
"code": null,
"e": 3031,
"s": 3022,
"text": "idx := 0"
},
{
"code": null,
"e": 3061,
"s": 3031,
"text": "temp = new node with value -1"
},
{
"code": null,
"e": 3091,
"s": 3061,
"text": "temp = new node with value -1"
},
{
"code": null,
"e": 3112,
"s": 3091,
"text": "return solve(s, idx)"
},
{
"code": null,
"e": 3133,
"s": 3112,
"text": "return solve(s, idx)"
},
{
"code": null,
"e": 3203,
"s": 3133,
"text": "Let us see the following implementation to get better understanding −"
},
{
"code": null,
"e": 3214,
"s": 3203,
"text": " Live Demo"
},
{
"code": null,
"e": 4381,
"s": 3214,
"text": "#include <bits/stdc++.h>\nusing namespace std;\nclass TreeNode{\n public:\n int val;\n TreeNode *left, *right;\n TreeNode(int data){\n val = data;\n left = NULL;\n right = NULL;\n }\n};\nvoid inord(TreeNode *root){\n if(root != NULL){\n inord(root->left);\n cout << root->val << \" \";\n inord(root->right);\n }\n}\nclass Solution {\npublic:\n TreeNode* solve(string s, int& idx){\n if (idx >= s.size())\n return NULL;\n string num = \"\";\n while (idx < s.size() && s[idx] != '(' && s[idx] != ')') {\n num += s[idx];\n idx++;\n }\n TreeNode* node = new TreeNode(stoi(num));\n if (idx < s.size() && s[idx] == '(') {\n idx++;\n node->left = solve(s, idx);\n idx++;\n if (idx < s.size() && s[idx] == '(') {\n idx++;\n node->right = solve(s, idx);\n idx++;\n }\n }\n return node;\n }\n TreeNode* str2tree(string s) {\n int idx = 0;\n TreeNode* temp = new TreeNode(-1);\n return solve(s, idx);\n }\n};\nmain(){\n Solution ob;\n TreeNode *root = ob.str2tree(\"4(2(3)(1))(6(5))\");\n inord(root);\n}"
},
{
"code": null,
"e": 4400,
"s": 4381,
"text": "\"4(2(3)(1))(6(5))\""
},
{
"code": null,
"e": 4412,
"s": 4400,
"text": "3 2 1 4 5 6"
}
]
|
Java private Keyword | ❮ Java Keywords
A class with private attributes:
public class Main {
private String fname = "John";
private String lname = "Doe";
private String email = "[email protected]";
private int age = 24;
public static void main(String[] args) {
Main myObj = new Main();
System.out.println("Name: " + myObj.fname + " " + myObj.lname);
System.out.println("Email: " + myObj.email);
System.out.println("Age: " + myObj.age);
}
}
Try it Yourself »
The private keyword is an access modifier used for attributes, methods and constructors, making them only accessible within the declared class.
Read more about modifiers in our Java Modifiers Tutorial.
❮ Java Keywords
We just launchedW3Schools videos
Get certifiedby completinga course today!
If you want to report an error, or if you want to make a suggestion, do not hesitate to send us an e-mail:
[email protected]
Your message has been sent to W3Schools. | [
{
"code": null,
"e": 18,
"s": 0,
"text": "\n❮ Java Keywords\n"
},
{
"code": null,
"e": 51,
"s": 18,
"text": "A class with private attributes:"
},
{
"code": null,
"e": 443,
"s": 51,
"text": "public class Main {\n private String fname = \"John\";\n private String lname = \"Doe\";\n private String email = \"[email protected]\";\n private int age = 24;\n\n public static void main(String[] args) {\n Main myObj = new Main();\n System.out.println(\"Name: \" + myObj.fname + \" \" + myObj.lname);\n System.out.println(\"Email: \" + myObj.email);\n System.out.println(\"Age: \" + myObj.age);\n }\n}\n"
},
{
"code": null,
"e": 463,
"s": 443,
"text": "\nTry it Yourself »\n"
},
{
"code": null,
"e": 607,
"s": 463,
"text": "The private keyword is an access modifier used for attributes, methods and constructors, making them only accessible within the declared class."
},
{
"code": null,
"e": 665,
"s": 607,
"text": "Read more about modifiers in our Java Modifiers Tutorial."
},
{
"code": null,
"e": 683,
"s": 665,
"text": "\n❮ Java Keywords\n"
},
{
"code": null,
"e": 716,
"s": 683,
"text": "We just launchedW3Schools videos"
},
{
"code": null,
"e": 758,
"s": 716,
"text": "Get certifiedby completinga course today!"
},
{
"code": null,
"e": 865,
"s": 758,
"text": "If you want to report an error, or if you want to make a suggestion, do not hesitate to send us an e-mail:"
},
{
"code": null,
"e": 884,
"s": 865,
"text": "[email protected]"
}
]
|
How to use Android Picasso library to download images using Kotlin? | This example demonstrates how to use the Android Picasso library to download images using Kotlin.
Step 1 − Create a new project in Android Studio, go to File ⇒ New Project and fill all required details to create a new project.
Add the following dependency to the build gradle (Module: app)
implementation 'com.squareup.picasso:picasso:2.4.0'
Step 2 − Add the following code to res/layout/activity_main.xml.
<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:paddingBottom="4dp"
tools:context=".MainActivity">
<ImageView
android:id="@+id/imageView"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:layout_above="@id/btnDownload" />
<Button
android:id="@+id/btnDownload"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:layout_alignParentBottom="true"
android:text="Download Image"
android:textAlignment="center" />
</RelativeLayout>
Step 3 − Add the following code to src/MainActivity.kt
import android.os.Bundle
import android.widget.Button
import android.widget.ImageView
import androidx.appcompat.app.AppCompatActivity
import com.squareup.picasso.Picasso
class MainActivity : AppCompatActivity() {
lateinit var imageView: ImageView
lateinit var btnDownload: Button
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
title = "KotlinApp"
imageView = findViewById(R.id.imageView)
btnDownload = findViewById(R.id.btnDownload)
btnDownload.setOnClickListener {
Picasso.with(this)
.load("https://images.unsplash.com/photo-1555083892-97490c72c90c?ixlib=rb-1.2.1&w=1000&q=80")
.into(imageView);
}
}
}
Step 4 − Add the following code to androidManifest.xml
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android" package="com.example.q11">
<uses-permission android:name="android.permission.INTERNET" />
<application
android:allowBackup="true"
android:icon="@mipmap/ic_launcher"
android:label="@string/app_name"
android:roundIcon="@mipmap/ic_launcher_round"
android:supportsRtl="true"
android:theme="@style/AppTheme">
<activity android:name=".MainActivity">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>
Let's try to run your application. I assume you have connected your actual Android Mobile device with your computer. To run the app from android studio, open one of your project's activity files and click the Run icon from the toolbar. Select your mobile device as an option and then check your mobile device which will display your default screen.
Click here to download the project code. | [
{
"code": null,
"e": 1160,
"s": 1062,
"text": "This example demonstrates how to use the Android Picasso library to download images using Kotlin."
},
{
"code": null,
"e": 1289,
"s": 1160,
"text": "Step 1 − Create a new project in Android Studio, go to File ⇒ New Project and fill all required details to create a new project."
},
{
"code": null,
"e": 1352,
"s": 1289,
"text": "Add the following dependency to the build gradle (Module: app)"
},
{
"code": null,
"e": 1404,
"s": 1352,
"text": "implementation 'com.squareup.picasso:picasso:2.4.0'"
},
{
"code": null,
"e": 1469,
"s": 1404,
"text": "Step 2 − Add the following code to res/layout/activity_main.xml."
},
{
"code": null,
"e": 2230,
"s": 1469,
"text": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<RelativeLayout xmlns:android=\"http://schemas.android.com/apk/res/android\"\n xmlns:tools=\"http://schemas.android.com/tools\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"match_parent\"\n android:paddingBottom=\"4dp\"\n tools:context=\".MainActivity\">\n <ImageView\n android:id=\"@+id/imageView\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"match_parent\"\n android:layout_above=\"@id/btnDownload\" />\n <Button\n android:id=\"@+id/btnDownload\"\n android:layout_width=\"match_parent\"\n android:layout_height=\"wrap_content\"\n android:layout_alignParentBottom=\"true\"\n android:text=\"Download Image\"\n android:textAlignment=\"center\" />\n</RelativeLayout>"
},
{
"code": null,
"e": 2285,
"s": 2230,
"text": "Step 3 − Add the following code to src/MainActivity.kt"
},
{
"code": null,
"e": 3049,
"s": 2285,
"text": "import android.os.Bundle\nimport android.widget.Button\nimport android.widget.ImageView\nimport androidx.appcompat.app.AppCompatActivity\nimport com.squareup.picasso.Picasso\nclass MainActivity : AppCompatActivity() {\n lateinit var imageView: ImageView\n lateinit var btnDownload: Button\n override fun onCreate(savedInstanceState: Bundle?) {\n super.onCreate(savedInstanceState)\n setContentView(R.layout.activity_main)\n title = \"KotlinApp\"\n imageView = findViewById(R.id.imageView)\n btnDownload = findViewById(R.id.btnDownload)\n btnDownload.setOnClickListener {\n Picasso.with(this)\n .load(\"https://images.unsplash.com/photo-1555083892-97490c72c90c?ixlib=rb-1.2.1&w=1000&q=80\")\n .into(imageView);\n }\n }\n}"
},
{
"code": null,
"e": 3104,
"s": 3049,
"text": "Step 4 − Add the following code to androidManifest.xml"
},
{
"code": null,
"e": 3838,
"s": 3104,
"text": "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"com.example.q11\">\n<uses-permission android:name=\"android.permission.INTERNET\" />\n <application\n android:allowBackup=\"true\"\n android:icon=\"@mipmap/ic_launcher\"\n android:label=\"@string/app_name\"\n android:roundIcon=\"@mipmap/ic_launcher_round\"\n android:supportsRtl=\"true\"\n android:theme=\"@style/AppTheme\">\n <activity android:name=\".MainActivity\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" />\n <category android:name=\"android.intent.category.LAUNCHER\" />\n </intent-filter>\n </activity>\n </application>\n</manifest>"
},
{
"code": null,
"e": 4188,
"s": 3838,
"text": "Let's try to run your application. I assume you have connected your actual Android Mobile device with your computer. To run the app from android studio, open one of your project's activity files and click the Run icon from the toolbar. Select your mobile device as an option and then check your mobile device which will display your default screen."
},
{
"code": null,
"e": 4229,
"s": 4188,
"text": "Click here to download the project code."
}
]
|
How to find the sixth cell (second row and third column ) of a 3x3 table in jQuery ? - GeeksforGeeks | 18 Apr, 2021
In this article, we will see how to get the sixth cell of a 3×3 table in jQuery. To find the nth-child of an element, we can use the nth-child selector of jQuery.
Approach: Sixth cell of a 3×3 table can be found using the following jQuery call:
$('#table1 tr:nth-child(2) td:nth-child(3)').text();
If the table has column headers, the sixth cell can be found using the following call.
$('#table1 tr:nth-child(3) td:nth-child(3)').text();
HTML code: Please note that the nth-child selector’s index is 1 based.
HTML
<!DOCTYPE html><html> <head> <script type="text/javascript" src="https://code.jquery.com/jquery-3.6.0.min.js"> </script> <script type="text/javascript"> $(document).ready(function () { $('#btnGetValue').click(function () { $('#value').text($('#table1 tr:nth-child(3) td:nth-child(3)').text()); }); }); </script></head> <body style="text-align:center"> <table id="table1" style= "width:100%" border="1"> <tr> <th> Header1 </th> <th> Header2 </th> <th> Header3 </th> </tr> <tr> <td>Cell 1</td> <td>Cell 2</td> <td>Cell 3</td> </tr> <tr> <td>Cell 4</td> <td>Cell 5</td> <td>Cell 6</td> </tr> </table> <br> <input type="button" value="Get 6th Cell's value" id="btnGetValue" /> <span id="value"></span></body> </html>
Output: We see the following web page.
Before Click:
After Click:
You see the sixth cell’s value next to the button (highlighted in red rectangle)
After click output
HTML-Tags
jQuery-Methods
jQuery-Questions
Picked
JQuery
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to prevent Body from scrolling when a modal is opened using jQuery ?
jQuery | ajax() Method
How to get the value in an input text box using jQuery ?
Difference Between JavaScript and jQuery
QR Code Generator using HTML, CSS and jQuery
Roadmap to Become a Web Developer in 2022
Installation of Node.js on Linux
How to fetch data from an API in ReactJS ?
Top 10 Projects For Beginners To Practice HTML and CSS Skills
How to insert spaces/tabs in text using HTML/CSS? | [
{
"code": null,
"e": 25755,
"s": 25727,
"text": "\n18 Apr, 2021"
},
{
"code": null,
"e": 25919,
"s": 25755,
"text": "In this article, we will see how to get the sixth cell of a 3×3 table in jQuery. To find the nth-child of an element, we can use the nth-child selector of jQuery. "
},
{
"code": null,
"e": 26001,
"s": 25919,
"text": "Approach: Sixth cell of a 3×3 table can be found using the following jQuery call:"
},
{
"code": null,
"e": 26054,
"s": 26001,
"text": "$('#table1 tr:nth-child(2) td:nth-child(3)').text();"
},
{
"code": null,
"e": 26141,
"s": 26054,
"text": "If the table has column headers, the sixth cell can be found using the following call."
},
{
"code": null,
"e": 26194,
"s": 26141,
"text": "$('#table1 tr:nth-child(3) td:nth-child(3)').text();"
},
{
"code": null,
"e": 26265,
"s": 26194,
"text": "HTML code: Please note that the nth-child selector’s index is 1 based."
},
{
"code": null,
"e": 26270,
"s": 26265,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html> <head> <script type=\"text/javascript\" src=\"https://code.jquery.com/jquery-3.6.0.min.js\"> </script> <script type=\"text/javascript\"> $(document).ready(function () { $('#btnGetValue').click(function () { $('#value').text($('#table1 tr:nth-child(3) td:nth-child(3)').text()); }); }); </script></head> <body style=\"text-align:center\"> <table id=\"table1\" style= \"width:100%\" border=\"1\"> <tr> <th> Header1 </th> <th> Header2 </th> <th> Header3 </th> </tr> <tr> <td>Cell 1</td> <td>Cell 2</td> <td>Cell 3</td> </tr> <tr> <td>Cell 4</td> <td>Cell 5</td> <td>Cell 6</td> </tr> </table> <br> <input type=\"button\" value=\"Get 6th Cell's value\" id=\"btnGetValue\" /> <span id=\"value\"></span></body> </html>",
"e": 27320,
"s": 26270,
"text": null
},
{
"code": null,
"e": 27359,
"s": 27320,
"text": "Output: We see the following web page."
},
{
"code": null,
"e": 27373,
"s": 27359,
"text": "Before Click:"
},
{
"code": null,
"e": 27386,
"s": 27373,
"text": "After Click:"
},
{
"code": null,
"e": 27467,
"s": 27386,
"text": "You see the sixth cell’s value next to the button (highlighted in red rectangle)"
},
{
"code": null,
"e": 27486,
"s": 27467,
"text": "After click output"
},
{
"code": null,
"e": 27496,
"s": 27486,
"text": "HTML-Tags"
},
{
"code": null,
"e": 27511,
"s": 27496,
"text": "jQuery-Methods"
},
{
"code": null,
"e": 27528,
"s": 27511,
"text": "jQuery-Questions"
},
{
"code": null,
"e": 27535,
"s": 27528,
"text": "Picked"
},
{
"code": null,
"e": 27542,
"s": 27535,
"text": "JQuery"
},
{
"code": null,
"e": 27559,
"s": 27542,
"text": "Web Technologies"
},
{
"code": null,
"e": 27657,
"s": 27559,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27730,
"s": 27657,
"text": "How to prevent Body from scrolling when a modal is opened using jQuery ?"
},
{
"code": null,
"e": 27753,
"s": 27730,
"text": "jQuery | ajax() Method"
},
{
"code": null,
"e": 27810,
"s": 27753,
"text": "How to get the value in an input text box using jQuery ?"
},
{
"code": null,
"e": 27851,
"s": 27810,
"text": "Difference Between JavaScript and jQuery"
},
{
"code": null,
"e": 27896,
"s": 27851,
"text": "QR Code Generator using HTML, CSS and jQuery"
},
{
"code": null,
"e": 27938,
"s": 27896,
"text": "Roadmap to Become a Web Developer in 2022"
},
{
"code": null,
"e": 27971,
"s": 27938,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 28014,
"s": 27971,
"text": "How to fetch data from an API in ReactJS ?"
},
{
"code": null,
"e": 28076,
"s": 28014,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
}
]
|
How to do time series forecasting in BigQuery | by Lak Lakshmanan | Towards Data Science | Let’s carry out 2-week forecasts of the number of bicycle rentals that will commence at one of the bicycle stations in Hyde Park based on the past six weeks.
The first step, as with any machine learning problem is to gather the training data and explore it. Assume that we have the data on rentals until mid-June of 2015 and we’d like to predict for the rest of the month. We can gather the past 6 weeks of data using:
SELECT CAST(EXTRACT(date from start_date) AS TIMESTAMP) AS date , COUNT(*) AS numrentalsFROM `bigquery-public-data`.london_bicycles.cycle_hireWHERE start_station_name LIKE '%Hyde%' -- all stations in Hyde ParkGROUP BY dateHAVING date BETWEEN '2015-05-01' AND '2015-06-15'ORDER BY date
Plotting this, we see a sort of weekly trend with higher rentals on weekends:
We can use this data to train an ARIMA model, telling BigQuery which column is the data column and which one the timestamp column:
CREATE OR REPLACE MODEL ch09eu.numrentals_forecastOPTIONS(model_type='ARIMA', time_series_data_col='numrentals', time_series_timestamp_col='date') ASSELECT CAST(EXTRACT(date from start_date) AS TIMESTAMP) AS date , COUNT(*) AS numrentalsFROM `bigquery-public-data`.london_bicycles.cycle_hireWHERE start_station_name LIKE '%Hyde%' -- all stations in Hyde ParkGROUP BY dateHAVING date BETWEEN '2015-05-01' AND '2015-06-15'
Once the model is trained, we can evaluate it using ML.EVALUATE() and view the ARIMA coefficients using ML.ARIMA_COEFFICIENTS().
We can forecast the number of rentals for each of the next 14 days and also obtain 90th percentile confidence bounds using:
SELECT * FROM ML.FORECAST(MODEL ch09eu.numrentals_forecast, STRUCT(14 AS horizon, 0.9 AS confidence_level))
The history, forecast, and confidence bounds can be plotted using:
import matplotlib.pyplot as pltimport pandas as pddef plot_historical_and_forecast(input_timeseries, forecast_output, timestamp_col_name, data_col_name): plt.figure(figsize=(20,6)) plt.plot(input_timeseries[timestamp_col_name], input_timeseries[data_col_name], label = 'Historical') plt.xlabel(timestamp_col_name) plt.ylabel(data_col_name)forecast_output['forecast_timestamp'] = pd.to_datetime(forecast_output['forecast_timestamp']) x_data = forecast_output['forecast_timestamp'] y_data = forecast_output['forecast_value'] confidence_level = forecast_output['confidence_level'].iloc[0] * 100 low_CI = forecast_output['confidence_interval_lower_bound'] upper_CI = forecast_output['confidence_interval_upper_bound']# Plot the data, set the linewidth, color and transparency of the # line, provide a label for the legend plt.plot(x_data, y_data, alpha = 1, label = 'Forecast', linestyle='--') # Shade the confidence interval plt.fill_between(x_data, low_CI, upper_CI, color = '#539caf', alpha = 0.4, label = str(confidence_level) + '% confidence interval') # Display legend plt.legend(loc = 'upper center', prop={'size': 16})plot_historical_and_forecast(df, fcst, 'date', 'numrentals')
This yields:
But how well does this compare with actually happened in the latter part of June? We can pull out the data for those days and compare with the forecast time series:
Pretty cool, eh?
So far, I have been forecasting the overall rental volume for all the bicycle stations in Hyde Park. How do we predict the rental volume for each individual station? Use the time_series_id_col:
CREATE OR REPLACE MODEL ch09eu.numrentals_forecastOPTIONS(model_type='ARIMA', time_series_data_col='numrentals', time_series_timestamp_col='date', time_series_id_col='start_station_name') ASSELECT start_station_name , CAST(EXTRACT(date from start_date) AS TIMESTAMP) AS date , COUNT(*) AS numrentalsFROM `bigquery-public-data`.london_bicycles.cycle_hireWHERE start_station_name LIKE '%Hyde%' -- all stations in Hyde ParkGROUP BY start_station_name, dateHAVING date BETWEEN '2015-01-01' AND '2015-06-15'
Note that instead of training the series on 45 days (May 1 to June 15), I’m now training on a longer time period. That’s because aggregate time series will tend to be smoother and much easier to predict than the time series for individual stations. So, we have to show the model a longer trend-line.
Now, the model is not one ARIMA model, but a separate ARIMA model for each station name. Indeed, doing:
SELECT * FROM ML.ARIMA_COEFFICIENTS(MODEL ch09eu.numrentals_forecast)ORDER BY start_station_name
gives us a separate set of coefficients for each start_station_name:
Note something interesting — each of the stations has ARIMA models of different complexity! Under the hood, BigQuery ML does automatic hyper-parameter tuning. Although the model is called “ARIMA”, the underlying algorithm actually includes quite a few bells-and-whistles including anomaly detection, holiday effect modeling (user needs to specify the holiday region), seasonality detection/modeling, and trend modeling. Plus, the different time series are trained in parallel.
When we do a prediction, we will get a prediction for each station and timestamp:
SELECT start_station_name, forecast_timestamp, forecast_valueFROM ML.FORECAST(MODEL ch09eu.numrentals_forecast, STRUCT(3 AS horizon, 0.9 AS confidence_level))ORDER By start_station_name, forecast_timestamp
yields one time-series forecast per station:
The full notebook on GitHub has plots of the forecasts by station.
The model evaluation might help you anticipate which stations the model will better in (the lower the seasonality-corrected variance, the easier it is to predict, and so, the better the model should be — you can not use AIC here because the AIC of different time series is not comparable):
SELECT * FROM ML.EVALUATE(MODEL ch09eu.numrentals_forecast)ORDER BY variance DESC
Based on the results, we expect predictions at Hyde Park Corner to be the worst, and Knightsbridge to be the best:
Enjoy!
Run through the full notebook on GitHub .To learn more about BigQuery ML, read Chapter 9 of BigQuery: The Definitive Guide. The book is periodically updated with these blog posts so that it remains, well, definitive.
Run through the full notebook on GitHub .
To learn more about BigQuery ML, read Chapter 9 of BigQuery: The Definitive Guide. The book is periodically updated with these blog posts so that it remains, well, definitive.
Thanks to Xi Cheng and Amir Hormati for helpful suggestions | [
{
"code": null,
"e": 330,
"s": 172,
"text": "Let’s carry out 2-week forecasts of the number of bicycle rentals that will commence at one of the bicycle stations in Hyde Park based on the past six weeks."
},
{
"code": null,
"e": 591,
"s": 330,
"text": "The first step, as with any machine learning problem is to gather the training data and explore it. Assume that we have the data on rentals until mid-June of 2015 and we’d like to predict for the rest of the month. We can gather the past 6 weeks of data using:"
},
{
"code": null,
"e": 882,
"s": 591,
"text": "SELECT CAST(EXTRACT(date from start_date) AS TIMESTAMP) AS date , COUNT(*) AS numrentalsFROM `bigquery-public-data`.london_bicycles.cycle_hireWHERE start_station_name LIKE '%Hyde%' -- all stations in Hyde ParkGROUP BY dateHAVING date BETWEEN '2015-05-01' AND '2015-06-15'ORDER BY date"
},
{
"code": null,
"e": 960,
"s": 882,
"text": "Plotting this, we see a sort of weekly trend with higher rentals on weekends:"
},
{
"code": null,
"e": 1091,
"s": 960,
"text": "We can use this data to train an ARIMA model, telling BigQuery which column is the data column and which one the timestamp column:"
},
{
"code": null,
"e": 1532,
"s": 1091,
"text": "CREATE OR REPLACE MODEL ch09eu.numrentals_forecastOPTIONS(model_type='ARIMA', time_series_data_col='numrentals', time_series_timestamp_col='date') ASSELECT CAST(EXTRACT(date from start_date) AS TIMESTAMP) AS date , COUNT(*) AS numrentalsFROM `bigquery-public-data`.london_bicycles.cycle_hireWHERE start_station_name LIKE '%Hyde%' -- all stations in Hyde ParkGROUP BY dateHAVING date BETWEEN '2015-05-01' AND '2015-06-15'"
},
{
"code": null,
"e": 1661,
"s": 1532,
"text": "Once the model is trained, we can evaluate it using ML.EVALUATE() and view the ARIMA coefficients using ML.ARIMA_COEFFICIENTS()."
},
{
"code": null,
"e": 1785,
"s": 1661,
"text": "We can forecast the number of rentals for each of the next 14 days and also obtain 90th percentile confidence bounds using:"
},
{
"code": null,
"e": 1911,
"s": 1785,
"text": "SELECT * FROM ML.FORECAST(MODEL ch09eu.numrentals_forecast, STRUCT(14 AS horizon, 0.9 AS confidence_level))"
},
{
"code": null,
"e": 1978,
"s": 1911,
"text": "The history, forecast, and confidence bounds can be plotted using:"
},
{
"code": null,
"e": 3176,
"s": 1978,
"text": "import matplotlib.pyplot as pltimport pandas as pddef plot_historical_and_forecast(input_timeseries, forecast_output, timestamp_col_name, data_col_name): plt.figure(figsize=(20,6)) plt.plot(input_timeseries[timestamp_col_name], input_timeseries[data_col_name], label = 'Historical') plt.xlabel(timestamp_col_name) plt.ylabel(data_col_name)forecast_output['forecast_timestamp'] = pd.to_datetime(forecast_output['forecast_timestamp']) x_data = forecast_output['forecast_timestamp'] y_data = forecast_output['forecast_value'] confidence_level = forecast_output['confidence_level'].iloc[0] * 100 low_CI = forecast_output['confidence_interval_lower_bound'] upper_CI = forecast_output['confidence_interval_upper_bound']# Plot the data, set the linewidth, color and transparency of the # line, provide a label for the legend plt.plot(x_data, y_data, alpha = 1, label = 'Forecast', linestyle='--') # Shade the confidence interval plt.fill_between(x_data, low_CI, upper_CI, color = '#539caf', alpha = 0.4, label = str(confidence_level) + '% confidence interval') # Display legend plt.legend(loc = 'upper center', prop={'size': 16})plot_historical_and_forecast(df, fcst, 'date', 'numrentals')"
},
{
"code": null,
"e": 3189,
"s": 3176,
"text": "This yields:"
},
{
"code": null,
"e": 3354,
"s": 3189,
"text": "But how well does this compare with actually happened in the latter part of June? We can pull out the data for those days and compare with the forecast time series:"
},
{
"code": null,
"e": 3371,
"s": 3354,
"text": "Pretty cool, eh?"
},
{
"code": null,
"e": 3565,
"s": 3371,
"text": "So far, I have been forecasting the overall rental volume for all the bicycle stations in Hyde Park. How do we predict the rental volume for each individual station? Use the time_series_id_col:"
},
{
"code": null,
"e": 4097,
"s": 3565,
"text": "CREATE OR REPLACE MODEL ch09eu.numrentals_forecastOPTIONS(model_type='ARIMA', time_series_data_col='numrentals', time_series_timestamp_col='date', time_series_id_col='start_station_name') ASSELECT start_station_name , CAST(EXTRACT(date from start_date) AS TIMESTAMP) AS date , COUNT(*) AS numrentalsFROM `bigquery-public-data`.london_bicycles.cycle_hireWHERE start_station_name LIKE '%Hyde%' -- all stations in Hyde ParkGROUP BY start_station_name, dateHAVING date BETWEEN '2015-01-01' AND '2015-06-15'"
},
{
"code": null,
"e": 4397,
"s": 4097,
"text": "Note that instead of training the series on 45 days (May 1 to June 15), I’m now training on a longer time period. That’s because aggregate time series will tend to be smoother and much easier to predict than the time series for individual stations. So, we have to show the model a longer trend-line."
},
{
"code": null,
"e": 4501,
"s": 4397,
"text": "Now, the model is not one ARIMA model, but a separate ARIMA model for each station name. Indeed, doing:"
},
{
"code": null,
"e": 4598,
"s": 4501,
"text": "SELECT * FROM ML.ARIMA_COEFFICIENTS(MODEL ch09eu.numrentals_forecast)ORDER BY start_station_name"
},
{
"code": null,
"e": 4667,
"s": 4598,
"text": "gives us a separate set of coefficients for each start_station_name:"
},
{
"code": null,
"e": 5144,
"s": 4667,
"text": "Note something interesting — each of the stations has ARIMA models of different complexity! Under the hood, BigQuery ML does automatic hyper-parameter tuning. Although the model is called “ARIMA”, the underlying algorithm actually includes quite a few bells-and-whistles including anomaly detection, holiday effect modeling (user needs to specify the holiday region), seasonality detection/modeling, and trend modeling. Plus, the different time series are trained in parallel."
},
{
"code": null,
"e": 5226,
"s": 5144,
"text": "When we do a prediction, we will get a prediction for each station and timestamp:"
},
{
"code": null,
"e": 5453,
"s": 5226,
"text": "SELECT start_station_name, forecast_timestamp, forecast_valueFROM ML.FORECAST(MODEL ch09eu.numrentals_forecast, STRUCT(3 AS horizon, 0.9 AS confidence_level))ORDER By start_station_name, forecast_timestamp"
},
{
"code": null,
"e": 5498,
"s": 5453,
"text": "yields one time-series forecast per station:"
},
{
"code": null,
"e": 5565,
"s": 5498,
"text": "The full notebook on GitHub has plots of the forecasts by station."
},
{
"code": null,
"e": 5855,
"s": 5565,
"text": "The model evaluation might help you anticipate which stations the model will better in (the lower the seasonality-corrected variance, the easier it is to predict, and so, the better the model should be — you can not use AIC here because the AIC of different time series is not comparable):"
},
{
"code": null,
"e": 5937,
"s": 5855,
"text": "SELECT * FROM ML.EVALUATE(MODEL ch09eu.numrentals_forecast)ORDER BY variance DESC"
},
{
"code": null,
"e": 6052,
"s": 5937,
"text": "Based on the results, we expect predictions at Hyde Park Corner to be the worst, and Knightsbridge to be the best:"
},
{
"code": null,
"e": 6059,
"s": 6052,
"text": "Enjoy!"
},
{
"code": null,
"e": 6276,
"s": 6059,
"text": "Run through the full notebook on GitHub .To learn more about BigQuery ML, read Chapter 9 of BigQuery: The Definitive Guide. The book is periodically updated with these blog posts so that it remains, well, definitive."
},
{
"code": null,
"e": 6318,
"s": 6276,
"text": "Run through the full notebook on GitHub ."
},
{
"code": null,
"e": 6494,
"s": 6318,
"text": "To learn more about BigQuery ML, read Chapter 9 of BigQuery: The Definitive Guide. The book is periodically updated with these blog posts so that it remains, well, definitive."
}
]
|
Find length of loop in linked list - GeeksforGeeks | 20 Oct, 2021
Write a function detectAndCountLoop() that checks whether a given Linked List contains loop and if loop is present then returns count of nodes in loop. For example, the loop is present in below-linked list and length of the loop is 4. If the loop is not present, then the function should return 0.
Approach: It is known that Floyd’s Cycle detection algorithm terminates when fast and slow pointers meet at a common point. It is also known that this common point is one of the loop nodes. Store the address of this common point in a pointer variable say (ptr). Then initialize a counter with 1 and start from the common point and keeps on visiting the next node and increasing the counter till the common pointer is reached again. At that point, the value of the counter will be equal to the length of the loop.
Algorithm:
Find the common point in the loop by using the Floyd’s Cycle detection algorithmStore the pointer in a temporary variable and keep a count = 0Traverse the linked list until the same node is reached again and increase the count while moving to next node.Print the count as length of loop
Find the common point in the loop by using the Floyd’s Cycle detection algorithm
Store the pointer in a temporary variable and keep a count = 0
Traverse the linked list until the same node is reached again and increase the count while moving to next node.
Print the count as length of loop
C++
C
Java
Python3
C#
Javascript
// C++ program to count number of nodes// in loop in a linked list if loop is// present#include<bits/stdc++.h>using namespace std; /* Link list node */struct Node{ int data; struct Node* next;}; // Returns count of nodes present in loop.int countNodes(struct Node *n){ int res = 1; struct Node *temp = n; while (temp->next != n) { res++; temp = temp->next; } return res;} /* This function detects and counts loopnodes in the list. If loop is not therein then returns 0 */int countNodesinLoop(struct Node *list){ struct Node *slow_p = list, *fast_p = list; while (slow_p && fast_p && fast_p->next) { slow_p = slow_p->next; fast_p = fast_p->next->next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0;} struct Node *newNode(int key){ struct Node *temp = (struct Node*)malloc(sizeof(struct Node)); temp->data = key; temp->next = NULL; return temp;} // Driver Codeint main(){ struct Node *head = newNode(1); head->next = newNode(2); head->next->next = newNode(3); head->next->next->next = newNode(4); head->next->next->next->next = newNode(5); /* Create a loop for testing */ head->next->next->next->next->next = head->next; cout << countNodesinLoop(head) << endl; return 0;} // This code is contributed by SHUBHAMSINGH10
// C program to count number of nodes// in loop in a linked list if loop is// present#include<stdio.h>#include<stdlib.h> /* Link list node */struct Node{ int data; struct Node* next;}; // Returns count of nodes present in loop.int countNodes(struct Node *n){ int res = 1; struct Node *temp = n; while (temp->next != n) { res++; temp = temp->next; } return res;} /* This function detects and counts loop nodes in the list. If loop is not there in then returns 0 */int countNodesinLoop(struct Node *list){ struct Node *slow_p = list, *fast_p = list; while (slow_p && fast_p && fast_p->next) { slow_p = slow_p->next; fast_p = fast_p->next->next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0;} struct Node *newNode(int key){ struct Node *temp = (struct Node*)malloc(sizeof(struct Node)); temp->data = key; temp->next = NULL; return temp;} /* Driver program to test above function*/int main(){ struct Node *head = newNode(1); head->next = newNode(2); head->next->next = newNode(3); head->next->next->next = newNode(4); head->next->next->next->next = newNode(5); /* Create a loop for testing */ head->next->next->next->next->next = head->next; printf("%d \n", countNodesinLoop(head)); return 0;}
// Java program to count number of nodes// in loop in a linked list if loop is// presentimport java.io.*; class GFG { /* Link list node */static class Node{ int data; Node next; Node(int data) { this.data =data; next =null; }} // Returns count of nodes present in loop.static int countNodes( Node n){int res = 1;Node temp = n;while (temp.next != n){ res++; temp = temp.next;}return res;} /* This function detects and counts loopnodes in the list. If loop is not therein then returns 0 */static int countNodesinLoop( Node list){ Node slow_p = list, fast_p = list; while (slow_p !=null && fast_p!=null && fast_p.next!=null) { slow_p = slow_p.next; fast_p = fast_p.next.next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0;} static Node newNode(int key){ Node temp = new Node(key); return temp;} /* Driver program to test above function*/ public static void main (String[] args) { Node head = newNode(1); head.next = newNode(2); head.next.next = newNode(3); head.next.next.next = newNode(4); head.next.next.next.next = newNode(5); /* Create a loop for testing */ head.next.next.next.next.next = head.next; System.out.println( countNodesinLoop(head)); }}// This code is contributed by inder_verma.
# Python 3 program to find the number# of nodes in loop in a linked list# if loop is present # Python Code to detect a loop and# find the length of the loop# Node defining classclass Node: # Function to make a node def __init__(self, val): self.val = val self.next = None # Linked List defining and loop# length finding classclass LinkedList: # Function to initialize the # head of the linked list def __init__(self): self.head = None # Function to insert a new # node at the end def AddNode(self, val): if self.head is None: self.head = Node(val) else: curr = self.head while(curr.next): curr = curr.next curr.next = Node(val) # Function to create a loop in the # Linked List. This function creates # a loop by connecting the last node # to n^th node of the linked list, # (counting first node as 1) def CreateLoop(self, n): # LoopNode is the connecting node to # the last node of linked list LoopNode = self.head for _ in range(1, n): LoopNode = LoopNode.next # end is the last node of the Linked List end = self.head while(end.next): end = end.next # Creating the loop end.next = LoopNode # Function to detect the loop and return # the length of the loop if the returned # value is zero, that means that either # the linked list is empty or the linked # list doesn't have any loop def detectLoop(self): # if linked list is empty then there # is no loop, so return 0 if self.head is None: return 0 # Using Floyd’s Cycle-Finding # Algorithm/ Slow-Fast Pointer Method slow = self.head fast = self.head flag = 0 # to show that both slow and fast # are at start of the Linked List while(slow and slow.next and fast and fast.next and fast.next.next): if slow == fast and flag != 0: # Means loop is confirmed in the # Linked List. Now slow and fast # are both at the same node which # is part of the loop count = 1 slow = slow.next while(slow != fast): slow = slow.next count += 1 return count slow = slow.next fast = fast.next.next flag = 1 return 0 # No loop # Setting up the code# Making a Linked List and adding the nodesmyLL = LinkedList()myLL.AddNode(1)myLL.AddNode(2)myLL.AddNode(3)myLL.AddNode(4)myLL.AddNode(5) # Creating a loop in the linked List# Loop is created by connecting the# last node of linked list to n^th node# 1<= n <= len(LinkedList)myLL.CreateLoop(2) # Checking for Loop in the Linked List# and printing the length of the looploopLength = myLL.detectLoop()if myLL.head is None: print("Linked list is empty")else: print(str(loopLength)) # This code is contributed by _Ashutosh
// C# program to count number of nodes// in loop in a linked list if loop is// presentusing System; class GFG{ /* Link list node */ class Node { public int data; public Node next; public Node(int data) { this.data = data; next = null; } } // Returns count of nodes present in loop. static int countNodes( Node n) { int res = 1; Node temp = n; while (temp.next != n) { res++; temp = temp.next; } return res; } /* This function detects and counts loop nodes in the list. If loop is not there in then returns 0 */ static int countNodesinLoop( Node list) { Node slow_p = list, fast_p = list; while (slow_p != null && fast_p != null && fast_p.next != null) { slow_p = slow_p.next; fast_p = fast_p.next.next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0; } static Node newNode(int key) { Node temp = new Node(key); return temp; } /* Driver code*/ public static void Main (String[] args) { Node head = newNode(1); head.next = newNode(2); head.next.next = newNode(3); head.next.next.next = newNode(4); head.next.next.next.next = newNode(5); /* Create a loop for testing */ head.next.next.next.next.next = head.next; Console.WriteLine( countNodesinLoop(head)); }} // This code is contributed by Rajput-Ji
<script>// javascript program to count number of nodes// in loop in a linked list if loop is// present /* Link list node */class Node { constructor(data) { this.data = data; this.next = null; }} // Returns count of nodes present in loop. function countNodes( n) { var res = 1; temp = n; while (temp.next != n) { res++; temp = temp.next; } return res; } /* * This function detects and counts loop nodes in the list. If loop is not there * in then returns 0 */ function countNodesinLoop( list) { var slow_p = list, fast_p = list; while (slow_p != null && fast_p != null && fast_p.next != null) { slow_p = slow_p.next; fast_p = fast_p.next.next; /* * If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop */ return 0; } function newNode(key) { temp = new Node(key); return temp; } /* Driver program to test above function */ head = newNode(1); head.next = newNode(2); head.next.next = newNode(3); head.next.next.next = newNode(4); head.next.next.next.next = newNode(5); /* Create a loop for testing */ head.next.next.next.next.next = head.next; document.write(countNodesinLoop(head)); // This code contributed by gauravrajput1</script>
Output :
4
Complexity Analysis:
Time complexity:O(n). Only one traversal of the linked list is needed.
Auxiliary Space:O(1). As no extra space is required.
Related Articles:
Detect loop in a linked list
Detect and Remove Loop in a Linked List
YouTubeGeeksforGeeks502K subscribersFind the length of loop in linked list | GeeksforGeeksWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:000:00 / 4:04•Live•<div class="player-unavailable"><h1 class="message">An error occurred.</h1><div class="submessage"><a href="https://www.youtube.com/watch?v=n6AMo1qFPlA" target="_blank">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div>
This article is contributed by Shubham Gupta. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to [email protected]. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
subtleseeker
inderDuMCA
PriyaBadchariya
Leonardo_Da_Vinci
Rajput-Ji
SHUBHAMSINGH10
andrew1234
GauravRajput1
sweetyty
simranarora5sos
Adobe
Qualcomm
Linked List
Adobe
Qualcomm
Linked List
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
LinkedList in Java
Doubly Linked List | Set 1 (Introduction and Insertion)
Linked List vs Array
Delete a Linked List node at a given position
Merge two sorted linked lists
Implementing a Linked List in Java using Class
Queue - Linked List Implementation
Implement a stack using singly linked list
Circular Linked List | Set 1 (Introduction and Applications)
Find Length of a Linked List (Iterative and Recursive) | [
{
"code": null,
"e": 25088,
"s": 25060,
"text": "\n20 Oct, 2021"
},
{
"code": null,
"e": 25386,
"s": 25088,
"text": "Write a function detectAndCountLoop() that checks whether a given Linked List contains loop and if loop is present then returns count of nodes in loop. For example, the loop is present in below-linked list and length of the loop is 4. If the loop is not present, then the function should return 0."
},
{
"code": null,
"e": 25899,
"s": 25386,
"text": "Approach: It is known that Floyd’s Cycle detection algorithm terminates when fast and slow pointers meet at a common point. It is also known that this common point is one of the loop nodes. Store the address of this common point in a pointer variable say (ptr). Then initialize a counter with 1 and start from the common point and keeps on visiting the next node and increasing the counter till the common pointer is reached again. At that point, the value of the counter will be equal to the length of the loop."
},
{
"code": null,
"e": 25912,
"s": 25899,
"text": "Algorithm: "
},
{
"code": null,
"e": 26199,
"s": 25912,
"text": "Find the common point in the loop by using the Floyd’s Cycle detection algorithmStore the pointer in a temporary variable and keep a count = 0Traverse the linked list until the same node is reached again and increase the count while moving to next node.Print the count as length of loop"
},
{
"code": null,
"e": 26280,
"s": 26199,
"text": "Find the common point in the loop by using the Floyd’s Cycle detection algorithm"
},
{
"code": null,
"e": 26343,
"s": 26280,
"text": "Store the pointer in a temporary variable and keep a count = 0"
},
{
"code": null,
"e": 26455,
"s": 26343,
"text": "Traverse the linked list until the same node is reached again and increase the count while moving to next node."
},
{
"code": null,
"e": 26489,
"s": 26455,
"text": "Print the count as length of loop"
},
{
"code": null,
"e": 26493,
"s": 26489,
"text": "C++"
},
{
"code": null,
"e": 26495,
"s": 26493,
"text": "C"
},
{
"code": null,
"e": 26500,
"s": 26495,
"text": "Java"
},
{
"code": null,
"e": 26508,
"s": 26500,
"text": "Python3"
},
{
"code": null,
"e": 26511,
"s": 26508,
"text": "C#"
},
{
"code": null,
"e": 26522,
"s": 26511,
"text": "Javascript"
},
{
"code": "// C++ program to count number of nodes// in loop in a linked list if loop is// present#include<bits/stdc++.h>using namespace std; /* Link list node */struct Node{ int data; struct Node* next;}; // Returns count of nodes present in loop.int countNodes(struct Node *n){ int res = 1; struct Node *temp = n; while (temp->next != n) { res++; temp = temp->next; } return res;} /* This function detects and counts loopnodes in the list. If loop is not therein then returns 0 */int countNodesinLoop(struct Node *list){ struct Node *slow_p = list, *fast_p = list; while (slow_p && fast_p && fast_p->next) { slow_p = slow_p->next; fast_p = fast_p->next->next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0;} struct Node *newNode(int key){ struct Node *temp = (struct Node*)malloc(sizeof(struct Node)); temp->data = key; temp->next = NULL; return temp;} // Driver Codeint main(){ struct Node *head = newNode(1); head->next = newNode(2); head->next->next = newNode(3); head->next->next->next = newNode(4); head->next->next->next->next = newNode(5); /* Create a loop for testing */ head->next->next->next->next->next = head->next; cout << countNodesinLoop(head) << endl; return 0;} // This code is contributed by SHUBHAMSINGH10",
"e": 28042,
"s": 26522,
"text": null
},
{
"code": "// C program to count number of nodes// in loop in a linked list if loop is// present#include<stdio.h>#include<stdlib.h> /* Link list node */struct Node{ int data; struct Node* next;}; // Returns count of nodes present in loop.int countNodes(struct Node *n){ int res = 1; struct Node *temp = n; while (temp->next != n) { res++; temp = temp->next; } return res;} /* This function detects and counts loop nodes in the list. If loop is not there in then returns 0 */int countNodesinLoop(struct Node *list){ struct Node *slow_p = list, *fast_p = list; while (slow_p && fast_p && fast_p->next) { slow_p = slow_p->next; fast_p = fast_p->next->next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0;} struct Node *newNode(int key){ struct Node *temp = (struct Node*)malloc(sizeof(struct Node)); temp->data = key; temp->next = NULL; return temp;} /* Driver program to test above function*/int main(){ struct Node *head = newNode(1); head->next = newNode(2); head->next->next = newNode(3); head->next->next->next = newNode(4); head->next->next->next->next = newNode(5); /* Create a loop for testing */ head->next->next->next->next->next = head->next; printf(\"%d \\n\", countNodesinLoop(head)); return 0;}",
"e": 29515,
"s": 28042,
"text": null
},
{
"code": "// Java program to count number of nodes// in loop in a linked list if loop is// presentimport java.io.*; class GFG { /* Link list node */static class Node{ int data; Node next; Node(int data) { this.data =data; next =null; }} // Returns count of nodes present in loop.static int countNodes( Node n){int res = 1;Node temp = n;while (temp.next != n){ res++; temp = temp.next;}return res;} /* This function detects and counts loopnodes in the list. If loop is not therein then returns 0 */static int countNodesinLoop( Node list){ Node slow_p = list, fast_p = list; while (slow_p !=null && fast_p!=null && fast_p.next!=null) { slow_p = slow_p.next; fast_p = fast_p.next.next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0;} static Node newNode(int key){ Node temp = new Node(key); return temp;} /* Driver program to test above function*/ public static void main (String[] args) { Node head = newNode(1); head.next = newNode(2); head.next.next = newNode(3); head.next.next.next = newNode(4); head.next.next.next.next = newNode(5); /* Create a loop for testing */ head.next.next.next.next.next = head.next; System.out.println( countNodesinLoop(head)); }}// This code is contributed by inder_verma.",
"e": 30991,
"s": 29515,
"text": null
},
{
"code": "# Python 3 program to find the number# of nodes in loop in a linked list# if loop is present # Python Code to detect a loop and# find the length of the loop# Node defining classclass Node: # Function to make a node def __init__(self, val): self.val = val self.next = None # Linked List defining and loop# length finding classclass LinkedList: # Function to initialize the # head of the linked list def __init__(self): self.head = None # Function to insert a new # node at the end def AddNode(self, val): if self.head is None: self.head = Node(val) else: curr = self.head while(curr.next): curr = curr.next curr.next = Node(val) # Function to create a loop in the # Linked List. This function creates # a loop by connecting the last node # to n^th node of the linked list, # (counting first node as 1) def CreateLoop(self, n): # LoopNode is the connecting node to # the last node of linked list LoopNode = self.head for _ in range(1, n): LoopNode = LoopNode.next # end is the last node of the Linked List end = self.head while(end.next): end = end.next # Creating the loop end.next = LoopNode # Function to detect the loop and return # the length of the loop if the returned # value is zero, that means that either # the linked list is empty or the linked # list doesn't have any loop def detectLoop(self): # if linked list is empty then there # is no loop, so return 0 if self.head is None: return 0 # Using Floyd’s Cycle-Finding # Algorithm/ Slow-Fast Pointer Method slow = self.head fast = self.head flag = 0 # to show that both slow and fast # are at start of the Linked List while(slow and slow.next and fast and fast.next and fast.next.next): if slow == fast and flag != 0: # Means loop is confirmed in the # Linked List. Now slow and fast # are both at the same node which # is part of the loop count = 1 slow = slow.next while(slow != fast): slow = slow.next count += 1 return count slow = slow.next fast = fast.next.next flag = 1 return 0 # No loop # Setting up the code# Making a Linked List and adding the nodesmyLL = LinkedList()myLL.AddNode(1)myLL.AddNode(2)myLL.AddNode(3)myLL.AddNode(4)myLL.AddNode(5) # Creating a loop in the linked List# Loop is created by connecting the# last node of linked list to n^th node# 1<= n <= len(LinkedList)myLL.CreateLoop(2) # Checking for Loop in the Linked List# and printing the length of the looploopLength = myLL.detectLoop()if myLL.head is None: print(\"Linked list is empty\")else: print(str(loopLength)) # This code is contributed by _Ashutosh",
"e": 34175,
"s": 30991,
"text": null
},
{
"code": "// C# program to count number of nodes// in loop in a linked list if loop is// presentusing System; class GFG{ /* Link list node */ class Node { public int data; public Node next; public Node(int data) { this.data = data; next = null; } } // Returns count of nodes present in loop. static int countNodes( Node n) { int res = 1; Node temp = n; while (temp.next != n) { res++; temp = temp.next; } return res; } /* This function detects and counts loop nodes in the list. If loop is not there in then returns 0 */ static int countNodesinLoop( Node list) { Node slow_p = list, fast_p = list; while (slow_p != null && fast_p != null && fast_p.next != null) { slow_p = slow_p.next; fast_p = fast_p.next.next; /* If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop*/ return 0; } static Node newNode(int key) { Node temp = new Node(key); return temp; } /* Driver code*/ public static void Main (String[] args) { Node head = newNode(1); head.next = newNode(2); head.next.next = newNode(3); head.next.next.next = newNode(4); head.next.next.next.next = newNode(5); /* Create a loop for testing */ head.next.next.next.next.next = head.next; Console.WriteLine( countNodesinLoop(head)); }} // This code is contributed by Rajput-Ji",
"e": 35905,
"s": 34175,
"text": null
},
{
"code": "<script>// javascript program to count number of nodes// in loop in a linked list if loop is// present /* Link list node */class Node { constructor(data) { this.data = data; this.next = null; }} // Returns count of nodes present in loop. function countNodes( n) { var res = 1; temp = n; while (temp.next != n) { res++; temp = temp.next; } return res; } /* * This function detects and counts loop nodes in the list. If loop is not there * in then returns 0 */ function countNodesinLoop( list) { var slow_p = list, fast_p = list; while (slow_p != null && fast_p != null && fast_p.next != null) { slow_p = slow_p.next; fast_p = fast_p.next.next; /* * If slow_p and fast_p meet at some point then there is a loop */ if (slow_p == fast_p) return countNodes(slow_p); } /* Return 0 to indicate that there is no loop */ return 0; } function newNode(key) { temp = new Node(key); return temp; } /* Driver program to test above function */ head = newNode(1); head.next = newNode(2); head.next.next = newNode(3); head.next.next.next = newNode(4); head.next.next.next.next = newNode(5); /* Create a loop for testing */ head.next.next.next.next.next = head.next; document.write(countNodesinLoop(head)); // This code contributed by gauravrajput1</script>",
"e": 37471,
"s": 35905,
"text": null
},
{
"code": null,
"e": 37481,
"s": 37471,
"text": "Output : "
},
{
"code": null,
"e": 37483,
"s": 37481,
"text": "4"
},
{
"code": null,
"e": 37506,
"s": 37483,
"text": "Complexity Analysis: "
},
{
"code": null,
"e": 37577,
"s": 37506,
"text": "Time complexity:O(n). Only one traversal of the linked list is needed."
},
{
"code": null,
"e": 37630,
"s": 37577,
"text": "Auxiliary Space:O(1). As no extra space is required."
},
{
"code": null,
"e": 37650,
"s": 37630,
"text": "Related Articles: "
},
{
"code": null,
"e": 37679,
"s": 37650,
"text": "Detect loop in a linked list"
},
{
"code": null,
"e": 37719,
"s": 37679,
"text": "Detect and Remove Loop in a Linked List"
},
{
"code": null,
"e": 38556,
"s": 37719,
"text": "YouTubeGeeksforGeeks502K subscribersFind the length of loop in linked list | GeeksforGeeksWatch laterShareCopy linkInfoShoppingTap to unmuteIf playback doesn't begin shortly, try restarting your device.You're signed outVideos you watch may be added to the TV's watch history and influence TV recommendations. To avoid this, cancel and sign in to YouTube on your computer.CancelConfirmMore videosMore videosSwitch cameraShareInclude playlistAn error occurred while retrieving sharing information. Please try again later.Watch on0:000:000:00 / 4:04•Live•<div class=\"player-unavailable\"><h1 class=\"message\">An error occurred.</h1><div class=\"submessage\"><a href=\"https://www.youtube.com/watch?v=n6AMo1qFPlA\" target=\"_blank\">Try watching this video on www.youtube.com</a>, or enable JavaScript if it is disabled in your browser.</div></div>"
},
{
"code": null,
"e": 38978,
"s": 38556,
"text": "This article is contributed by Shubham Gupta. If you like GeeksforGeeks and would like to contribute, you can also write an article using write.geeksforgeeks.org or mail your article to [email protected]. See your article appearing on the GeeksforGeeks main page and help other Geeks.Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. "
},
{
"code": null,
"e": 38991,
"s": 38978,
"text": "subtleseeker"
},
{
"code": null,
"e": 39002,
"s": 38991,
"text": "inderDuMCA"
},
{
"code": null,
"e": 39018,
"s": 39002,
"text": "PriyaBadchariya"
},
{
"code": null,
"e": 39036,
"s": 39018,
"text": "Leonardo_Da_Vinci"
},
{
"code": null,
"e": 39046,
"s": 39036,
"text": "Rajput-Ji"
},
{
"code": null,
"e": 39061,
"s": 39046,
"text": "SHUBHAMSINGH10"
},
{
"code": null,
"e": 39072,
"s": 39061,
"text": "andrew1234"
},
{
"code": null,
"e": 39086,
"s": 39072,
"text": "GauravRajput1"
},
{
"code": null,
"e": 39095,
"s": 39086,
"text": "sweetyty"
},
{
"code": null,
"e": 39111,
"s": 39095,
"text": "simranarora5sos"
},
{
"code": null,
"e": 39117,
"s": 39111,
"text": "Adobe"
},
{
"code": null,
"e": 39126,
"s": 39117,
"text": "Qualcomm"
},
{
"code": null,
"e": 39138,
"s": 39126,
"text": "Linked List"
},
{
"code": null,
"e": 39144,
"s": 39138,
"text": "Adobe"
},
{
"code": null,
"e": 39153,
"s": 39144,
"text": "Qualcomm"
},
{
"code": null,
"e": 39165,
"s": 39153,
"text": "Linked List"
},
{
"code": null,
"e": 39263,
"s": 39165,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 39282,
"s": 39263,
"text": "LinkedList in Java"
},
{
"code": null,
"e": 39338,
"s": 39282,
"text": "Doubly Linked List | Set 1 (Introduction and Insertion)"
},
{
"code": null,
"e": 39359,
"s": 39338,
"text": "Linked List vs Array"
},
{
"code": null,
"e": 39405,
"s": 39359,
"text": "Delete a Linked List node at a given position"
},
{
"code": null,
"e": 39435,
"s": 39405,
"text": "Merge two sorted linked lists"
},
{
"code": null,
"e": 39482,
"s": 39435,
"text": "Implementing a Linked List in Java using Class"
},
{
"code": null,
"e": 39517,
"s": 39482,
"text": "Queue - Linked List Implementation"
},
{
"code": null,
"e": 39560,
"s": 39517,
"text": "Implement a stack using singly linked list"
},
{
"code": null,
"e": 39621,
"s": 39560,
"text": "Circular Linked List | Set 1 (Introduction and Applications)"
}
]
|
POSIX Thread Libraries | Pthreads refers to the POSIX standard (IEEE 1003.1c) defining an API for thread creation and synchronization. This defines specification for thread behavior, not an implementation. The specification can be implemented by OS designers in any way they wish. So many systems implement the Pthreads specification; most are UNIX-type systems, including Linux, Mac OS X, and Solaris. Although Windows doesn’t support Pthreads natively, some third-party implementations for Windows are available. The C program shown in Figure 4.9 demonstrates the basic Pthreads API for constructing a multithreaded program that calculates the summation of a nonnegative integer in a separate thread. Separate threads begin execution in a specified function in a Pthreads program. In below program, this is the runner() function. At the time when this program begins, a single thread of control begins in main().Then main() creates a second thread that begins control in the runner() function, after some initialization,. Both threads share the global data sum.
#include<pthread.h>
#include<stdio.h>
int sum;
/* this sum data is shared by the thread(s) */
/* threads call this function */
void *runner(void *param);
int main(int argc, char *argv[]){
pthread t tid; /* the thread identifier */
/* set of thread attributes */
pthread attr t attr;
if (argc != 2){
fprintf(stderr,"usage: a.out \n");
return -1;
}
if (atoi(argv[1]) < 0){
fprintf(stderr,"%d must be >= 0\n",atoi(argv[1]));
return -1;
}
/* get the default attributes */
pthread attr init(&attr); /* create the thread */
pthread create(&tid,&attr,runner,argv[1]);
/* wait for the thread to exit */
pthread join(tid,NULL);
printf("sum = %d\n",sum);
}
/* The thread will now begin control in this function */
void *runner(void *param){
int i, upper = atoi(param);
sum = 0;
for (i = 1; i <= upper; i++)
sum += i;
pthread exit(0);
}
Multithreaded C program using the Pthreads API. | [
{
"code": null,
"e": 2101,
"s": 1062,
"text": "Pthreads refers to the POSIX standard (IEEE 1003.1c) defining an API for thread creation and synchronization. This defines specification for thread behavior, not an implementation. The specification can be implemented by OS designers in any way they wish. So many systems implement the Pthreads specification; most are UNIX-type systems, including Linux, Mac OS X, and Solaris. Although Windows doesn’t support Pthreads natively, some third-party implementations for Windows are available. The C program shown in Figure 4.9 demonstrates the basic Pthreads API for constructing a multithreaded program that calculates the summation of a nonnegative integer in a separate thread. Separate threads begin execution in a specified function in a Pthreads program. In below program, this is the runner() function. At the time when this program begins, a single thread of control begins in main().Then main() creates a second thread that begins control in the runner() function, after some initialization,. Both threads share the global data sum."
},
{
"code": null,
"e": 3008,
"s": 2101,
"text": "#include<pthread.h>\n#include<stdio.h>\nint sum;\n/* this sum data is shared by the thread(s) */\n/* threads call this function */\nvoid *runner(void *param);\nint main(int argc, char *argv[]){\n pthread t tid; /* the thread identifier */\n /* set of thread attributes */\n pthread attr t attr;\n if (argc != 2){\n fprintf(stderr,\"usage: a.out \\n\");\n return -1;\n }\n if (atoi(argv[1]) < 0){\n fprintf(stderr,\"%d must be >= 0\\n\",atoi(argv[1]));\n return -1;\n }\n /* get the default attributes */\n pthread attr init(&attr); /* create the thread */\n pthread create(&tid,&attr,runner,argv[1]);\n /* wait for the thread to exit */\n pthread join(tid,NULL);\n printf(\"sum = %d\\n\",sum);\n}\n/* The thread will now begin control in this function */\nvoid *runner(void *param){\n int i, upper = atoi(param);\n sum = 0;\n for (i = 1; i <= upper; i++)\n sum += i;\n pthread exit(0);\n}"
},
{
"code": null,
"e": 3056,
"s": 3008,
"text": "Multithreaded C program using the Pthreads API."
}
]
|
Andrew Ng’s Machine Learning Course in Python (Kmeans-Clustering, PCA) | by Benjamin Lau | Towards Data Science | Second last part of the series, we look at unsupervised learning algorithms which consist of data without labels. Let’s get right into the assignment as we are going through two algorithms today.
K-means Clustering is one of a cluster analysis technique that allows grouping of data into groups called clusters. As labels are not provided for each training data, clusters are determined by the similarity of the data from each other.
We will start by implementing the K-means algorithms. Since K-means is an iterative process that assigns training examples to their closest centroids and then recomputing the centroids, we need two main functions that do just that.
import numpy as npimport matplotlib.pyplot as pltfrom scipy.io import loadmatmat = loadmat("ex7data2.mat")X = mat["X"]
findClosestCentroids find the closest centroids by evaluating the distance between the training example from each centroid and assign the centroid to the training example with the smallest distance.
def findClosestCentroids(X, centroids): """ Returns the closest centroids in idx for a dataset X where each row is a single example. """ K = centroids.shape[0] idx = np.zeros((X.shape[0],1)) temp = np.zeros((centroids.shape[0],1)) for i in range(X.shape[0]): for j in range(K): dist = X[i,:] - centroids[j,:] length = np.sum(dist**2) temp[j] = length idx[i] = np.argmin(temp)+1 return idx# Select an initial set of centroidsK = 3initial_centroids = np.array([[3,3],[6,2],[8,5]])idx = findClosestCentroids(X, initial_centroids)print("Closest centroids for the first 3 examples:\n",idx[0:3])
np.argmin find the index with the smallest distance and assign it to the training example. +1 is used here to number the centroid from 1 instead of 0.
The print statement will print:
Closest centroids for the first 3 examples: [[1.] [3.] [2.]]
To compute the centroid means after the assignment, we sum up the training examples assigned to a particular centroid and divide by the number of examples in each centroid.
def computeCentroids(X, idx, K): """ returns the new centroids by computing the means of the data points assigned to each centroid. """ m, n = X.shape[0],X.shape[1] centroids = np.zeros((K,n)) count = np.zeros((K,1)) for i in range(m): index = int((idx[i]-1)[0]) centroids[index,:]+=X[i,:] count[index]+=1 return centroids/countcentroids = computeCentroids(X, idx, K)print("Centroids computed after initial finding of closest centroids:\n", centroids)
The print statement will print:
Centroids computed after initial finding of closest centroids: [[2.42830111 3.15792418] [5.81350331 2.63365645] [7.11938687 3.6166844 ]]
Now to visualize the whole process, I created a subplot for each iteration of the algorithms to monitor the movement of the centroids and assignment of the training examples.
def plotKmeans(X, centroids, idx, K, num_iters): """ plots the data points with colors assigned to each centroid """ m,n = X.shape[0],X.shape[1] fig, ax = plt.subplots(nrows=num_iters,ncols=1,figsize=(6,36)) for i in range(num_iters): # Visualisation of data color = "rgb" for k in range(1,K+1): grp = (idx==k).reshape(m,1) ax[i].scatter(X[grp[:,0],0],X[grp[:,0],1],c=color[k-1],s=15)# visualize the new centroids ax[i].scatter(centroids[:,0],centroids[:,1],s=120,marker="x",c="black",linewidth=3) title = "Iteration Number " + str(i) ax[i].set_title(title) # Compute the centroids mean centroids = computeCentroids(X, idx, K) # assign each training example to the nearest centroid idx = findClosestCentroids(X, centroids) plt.tight_layout()m,n = X.shape[0],X.shape[1]plotKmeans(X, initial_centroids,idx, K,10)
Since K-means algorithms do not always give the optimal solution, random initialization is important.
def kMeansInitCentroids(X, K): """ This function initializes K centroids that are to beused in K-Means on the dataset X """ m,n = X.shape[0], X.shape[1] centroids = np.zeros((K,n)) for i in range(K): centroids[i] = X[np.random.randint(0,m+1),:] return centroidscentroids = kMeansInitCentroids(X, K)idx = findClosestCentroids(X, centroids)plotKmeans(X, centroids,idx, K,10)
The code above will run the visualization again but with random initialization. You can run the code multiple times to see the impact of random initial centroids.
Now that we finish coding the algorithm, we can start implementing it with other datasets. In this exercise, we will use the algorithm to select 16 clusters to represent the image (down from thousands of colors) to compress the image.
mat2 = loadmat("bird_small.mat")A = mat2["A"]# preprocess and reshape the imageX2 = (A/255).reshape(128*128,3)def runKmeans(X, initial_centroids,num_iters,K): idx = findClosestCentroids(X, initial_centroids) for i in range(num_iters): # Compute the centroids mean centroids = computeCentroids(X, idx, K)# assign each training example to the nearest centroid idx = findClosestCentroids(X, initial_centroids)return centroids, idx
Now to run k-means algorithm on the dataset
K2 = 16num_iters = 10initial_centroids2 = kMeansInitCentroids(X2, K2)centroids2, idx2 = runKmeans(X2, initial_centroids2, num_iters,K2)m2,n2 = X.shape[0],X.shape[1]X2_recovered = X2.copy()for i in range(1,K2+1): X2_recovered[(idx2==i).ravel(),:] = centroids2[i-1]# Reshape the recovered image into proper dimensionsX2_recovered = X2_recovered.reshape(128,128,3)# Display the imageimport matplotlib.image as mpimgfig, ax = plt.subplots(1,2)ax[0].imshow(X2.reshape(128,128,3))ax[1].imshow(X2_recovered)
This is a side-by-side comparison of the original image and the compressed image with only 16 colors.
The next part of the assignment makes use of a 2D dataset to get intuition on the process of Principal Component Analysis (PCA) and then conduct PCA on a face image dataset to perform dimensionality reduction.
Load and visualize the 2D dataset
mat3 = loadmat("ex7data1.mat")X3 = mat3["X"]plt.scatter(X3[:,0],X3[:,1],marker="o",facecolors="none",edgecolors="b")
To implement the PCA algorithm, PCA also consists of two computational steps where we will code one of it to compute the covariance matrix and utilize numpy library for the other to obtain the eigenvectors.
Before that, feature normalization is needed to ensure that data are in the same range.
def featureNormalize(X): """ Returns a normalized version of X where the mean value of each feature is 0 and the standard deviation is 1. """ mu = np.mean(X,axis=0) sigma = np.std(X,axis=0) X_norm = (X - mu)/sigma return X_norm, mu , sigmadef pca(X): """ Computes eigenvectors of the covariance matrix of X """ m,n = X.shape[0], X.shape[1] sigma = 1/m * X.T @ X U,S,V = svd(sigma) return U,S,V
np.linalg.svd is similar to svd function in matlab and return the same U,S,V matrices. The official documentation can be found here.
from numpy.linalg import svdX_norm,mu,std = featureNormalize(X3)U,S = pca(X_norm)[:2]plt.scatter(X3[:,0],X3[:,1],marker="o",facecolors="none",edgecolors="b")plt.plot([mu[0],(mu+1.5*S[0]*U[:,0].T)[0]],[mu[1],(mu+1.5*S[0]*U[:,0].T)[1]],color="black",linewidth=3)plt.plot([mu[0],(mu+1.5*S[1]*U[:,1].T)[0]],[mu[1],(mu+1.5*S[1]*U[:,1].T)[1]],color="black",linewidth=3)plt.xlim(-1,7)plt.ylim(2,8)
The code block above implement PCA on the dataset and visualize the eigenvectors on the data. I found that Wikipedia has a good source of information for most of the learning algorithms and definitely worth looking if you wish to delve deeper into the algorithms.
print("Top eigenvector U(:,1) =:",U[:,0])`
The print statement print: Top eigenvector U(:,1) =: [-0.70710678 -0.70710678]
To reduce the dimension of the dataset, we project the data onto the principal components (eigenvectors) found.
def projectData(X, U, K): """ Computes the reduced data representation when projecting only on to the top k eigenvectors """ m = X.shape[0] U_reduced = U[:,:K] Z = np.zeros((m,K)) for i in range(m): for j in range(K): Z[i,j] = X[i,:] @ U_reduced[:,j] return Z# Project the data onto K=1 dimensionK=1Z = projectData(X_norm, U, K)print("Projection of the first example:",Z[0][0])
The print statement will print: Projection of the first example: 1.4963126084578515
Data also can be approximately reconstructed by projecting them back onto the original dimension space.
def recoverData(Z, U, K): """ Recovers an approximation of the original data when using the projected data """ m,n = Z.shape[0],U.shape[0] X_rec = np.zeros((m,n)) U_reduced = U[:,:K] for i in range(m): X_rec[i,:] = Z[i,:] @ U_reduced.T return X_recX_rec = recoverData(Z, U, K)print("Approximation of the first example:",X_rec[0,:])
The print statement will print: Approximation of the first example: [-1.05805279 -1.05805279]
To visualize the whole process,
plt.scatter(X_norm[:,0],X_norm[:,1],marker="o",label="Original",facecolors="none",edgecolors="b",s=15)plt.scatter(X_rec[:,0],X_rec[:,1],marker="o",label="Approximation",facecolors="none",edgecolors="r",s=15)plt.title("The Normalized and Projected Data after PCA")plt.legend()
Finally, we move on to a more complex dataset — Face image dataset. To load and visualize the data,
mat4 = loadmat("ex7faces.mat")X4 = mat4["X"]fig, ax = plt.subplots(nrows=10,ncols=10,figsize=(8,8))for i in range(0,100,10): for j in range(10): ax[int(i/10),j].imshow(X4[i+j,:].reshape(32,32,order="F"),cmap="gray") ax[int(i/10),j].axis("off")
These images contain 32 X 32 pixels in grayscale, resulting in a dimension of 1,024 features and our task is to reduce the dimension to around 100 principal components that best describe our data.
X_norm2 = featureNormalize(X4)[0]# Run PCAU2 =pca(X_norm2)[0]#Visualize the top 36 eigenvectors foundU_reduced = U2[:,:36].Tfig2, ax2 = plt.subplots(6,6,figsize=(8,8))for i in range(0,36,6): for j in range(6): ax2[int(i/6),j].imshow(U_reduced[i+j,:].reshape(32,32,order="F"),cmap="gray") ax2[int(i/6),j].axis("off")
Above is the visualization of 36 principal components that describe the largest variations in the dataset.
Next, we project the data onto the first 100 principal components, effectively reduce the dimension down to a hundred, recover the data and try to understand what is lost in the dimension reduction process.
K2 = 100Z2 = projectData(X_norm2, U2, K2)print("The projected data Z has a size of:",Z2.shape)# Data reconstructionX_rec2 = recoverData(Z2, U2, K2)# Visualize the reconstructed datafig3, ax3 = plt.subplots(10,10,figsize=(8,8))for i in range(0,100,10): for j in range(10): ax3[int(i/10),j].imshow(X_rec2[i+j,:].reshape(32,32,order="F"),cmap="gray") ax3[int(i/10),j].axis("off")
This is the end of unsupervised learning. Please look forward to the last article in the series. The Jupyter notebook will be uploaded to my GitHub at (https://github.com/Benlau93/Machine-Learning-by-Andrew-Ng-in-Python).
For other python implementation in the series,
Linear Regression
Logistic Regression
Regularized Logistic Regression
Neural Networks
Support Vector Machines
Anomaly Detection
Thank you for reading. | [
{
"code": null,
"e": 367,
"s": 171,
"text": "Second last part of the series, we look at unsupervised learning algorithms which consist of data without labels. Let’s get right into the assignment as we are going through two algorithms today."
},
{
"code": null,
"e": 605,
"s": 367,
"text": "K-means Clustering is one of a cluster analysis technique that allows grouping of data into groups called clusters. As labels are not provided for each training data, clusters are determined by the similarity of the data from each other."
},
{
"code": null,
"e": 837,
"s": 605,
"text": "We will start by implementing the K-means algorithms. Since K-means is an iterative process that assigns training examples to their closest centroids and then recomputing the centroids, we need two main functions that do just that."
},
{
"code": null,
"e": 956,
"s": 837,
"text": "import numpy as npimport matplotlib.pyplot as pltfrom scipy.io import loadmatmat = loadmat(\"ex7data2.mat\")X = mat[\"X\"]"
},
{
"code": null,
"e": 1155,
"s": 956,
"text": "findClosestCentroids find the closest centroids by evaluating the distance between the training example from each centroid and assign the centroid to the training example with the smallest distance."
},
{
"code": null,
"e": 1820,
"s": 1155,
"text": "def findClosestCentroids(X, centroids): \"\"\" Returns the closest centroids in idx for a dataset X where each row is a single example. \"\"\" K = centroids.shape[0] idx = np.zeros((X.shape[0],1)) temp = np.zeros((centroids.shape[0],1)) for i in range(X.shape[0]): for j in range(K): dist = X[i,:] - centroids[j,:] length = np.sum(dist**2) temp[j] = length idx[i] = np.argmin(temp)+1 return idx# Select an initial set of centroidsK = 3initial_centroids = np.array([[3,3],[6,2],[8,5]])idx = findClosestCentroids(X, initial_centroids)print(\"Closest centroids for the first 3 examples:\\n\",idx[0:3])"
},
{
"code": null,
"e": 1971,
"s": 1820,
"text": "np.argmin find the index with the smallest distance and assign it to the training example. +1 is used here to number the centroid from 1 instead of 0."
},
{
"code": null,
"e": 2003,
"s": 1971,
"text": "The print statement will print:"
},
{
"code": null,
"e": 2064,
"s": 2003,
"text": "Closest centroids for the first 3 examples: [[1.] [3.] [2.]]"
},
{
"code": null,
"e": 2237,
"s": 2064,
"text": "To compute the centroid means after the assignment, we sum up the training examples assigned to a particular centroid and divide by the number of examples in each centroid."
},
{
"code": null,
"e": 2742,
"s": 2237,
"text": "def computeCentroids(X, idx, K): \"\"\" returns the new centroids by computing the means of the data points assigned to each centroid. \"\"\" m, n = X.shape[0],X.shape[1] centroids = np.zeros((K,n)) count = np.zeros((K,1)) for i in range(m): index = int((idx[i]-1)[0]) centroids[index,:]+=X[i,:] count[index]+=1 return centroids/countcentroids = computeCentroids(X, idx, K)print(\"Centroids computed after initial finding of closest centroids:\\n\", centroids)"
},
{
"code": null,
"e": 2774,
"s": 2742,
"text": "The print statement will print:"
},
{
"code": null,
"e": 2911,
"s": 2774,
"text": "Centroids computed after initial finding of closest centroids: [[2.42830111 3.15792418] [5.81350331 2.63365645] [7.11938687 3.6166844 ]]"
},
{
"code": null,
"e": 3086,
"s": 2911,
"text": "Now to visualize the whole process, I created a subplot for each iteration of the algorithms to monitor the movement of the centroids and assignment of the training examples."
},
{
"code": null,
"e": 4042,
"s": 3086,
"text": "def plotKmeans(X, centroids, idx, K, num_iters): \"\"\" plots the data points with colors assigned to each centroid \"\"\" m,n = X.shape[0],X.shape[1] fig, ax = plt.subplots(nrows=num_iters,ncols=1,figsize=(6,36)) for i in range(num_iters): # Visualisation of data color = \"rgb\" for k in range(1,K+1): grp = (idx==k).reshape(m,1) ax[i].scatter(X[grp[:,0],0],X[grp[:,0],1],c=color[k-1],s=15)# visualize the new centroids ax[i].scatter(centroids[:,0],centroids[:,1],s=120,marker=\"x\",c=\"black\",linewidth=3) title = \"Iteration Number \" + str(i) ax[i].set_title(title) # Compute the centroids mean centroids = computeCentroids(X, idx, K) # assign each training example to the nearest centroid idx = findClosestCentroids(X, centroids) plt.tight_layout()m,n = X.shape[0],X.shape[1]plotKmeans(X, initial_centroids,idx, K,10)"
},
{
"code": null,
"e": 4144,
"s": 4042,
"text": "Since K-means algorithms do not always give the optimal solution, random initialization is important."
},
{
"code": null,
"e": 4557,
"s": 4144,
"text": "def kMeansInitCentroids(X, K): \"\"\" This function initializes K centroids that are to beused in K-Means on the dataset X \"\"\" m,n = X.shape[0], X.shape[1] centroids = np.zeros((K,n)) for i in range(K): centroids[i] = X[np.random.randint(0,m+1),:] return centroidscentroids = kMeansInitCentroids(X, K)idx = findClosestCentroids(X, centroids)plotKmeans(X, centroids,idx, K,10)"
},
{
"code": null,
"e": 4720,
"s": 4557,
"text": "The code above will run the visualization again but with random initialization. You can run the code multiple times to see the impact of random initial centroids."
},
{
"code": null,
"e": 4955,
"s": 4720,
"text": "Now that we finish coding the algorithm, we can start implementing it with other datasets. In this exercise, we will use the algorithm to select 16 clusters to represent the image (down from thousands of colors) to compress the image."
},
{
"code": null,
"e": 5426,
"s": 4955,
"text": "mat2 = loadmat(\"bird_small.mat\")A = mat2[\"A\"]# preprocess and reshape the imageX2 = (A/255).reshape(128*128,3)def runKmeans(X, initial_centroids,num_iters,K): idx = findClosestCentroids(X, initial_centroids) for i in range(num_iters): # Compute the centroids mean centroids = computeCentroids(X, idx, K)# assign each training example to the nearest centroid idx = findClosestCentroids(X, initial_centroids)return centroids, idx"
},
{
"code": null,
"e": 5470,
"s": 5426,
"text": "Now to run k-means algorithm on the dataset"
},
{
"code": null,
"e": 5974,
"s": 5470,
"text": "K2 = 16num_iters = 10initial_centroids2 = kMeansInitCentroids(X2, K2)centroids2, idx2 = runKmeans(X2, initial_centroids2, num_iters,K2)m2,n2 = X.shape[0],X.shape[1]X2_recovered = X2.copy()for i in range(1,K2+1): X2_recovered[(idx2==i).ravel(),:] = centroids2[i-1]# Reshape the recovered image into proper dimensionsX2_recovered = X2_recovered.reshape(128,128,3)# Display the imageimport matplotlib.image as mpimgfig, ax = plt.subplots(1,2)ax[0].imshow(X2.reshape(128,128,3))ax[1].imshow(X2_recovered)"
},
{
"code": null,
"e": 6076,
"s": 5974,
"text": "This is a side-by-side comparison of the original image and the compressed image with only 16 colors."
},
{
"code": null,
"e": 6286,
"s": 6076,
"text": "The next part of the assignment makes use of a 2D dataset to get intuition on the process of Principal Component Analysis (PCA) and then conduct PCA on a face image dataset to perform dimensionality reduction."
},
{
"code": null,
"e": 6320,
"s": 6286,
"text": "Load and visualize the 2D dataset"
},
{
"code": null,
"e": 6437,
"s": 6320,
"text": "mat3 = loadmat(\"ex7data1.mat\")X3 = mat3[\"X\"]plt.scatter(X3[:,0],X3[:,1],marker=\"o\",facecolors=\"none\",edgecolors=\"b\")"
},
{
"code": null,
"e": 6644,
"s": 6437,
"text": "To implement the PCA algorithm, PCA also consists of two computational steps where we will code one of it to compute the covariance matrix and utilize numpy library for the other to obtain the eigenvectors."
},
{
"code": null,
"e": 6732,
"s": 6644,
"text": "Before that, feature normalization is needed to ensure that data are in the same range."
},
{
"code": null,
"e": 7188,
"s": 6732,
"text": "def featureNormalize(X): \"\"\" Returns a normalized version of X where the mean value of each feature is 0 and the standard deviation is 1. \"\"\" mu = np.mean(X,axis=0) sigma = np.std(X,axis=0) X_norm = (X - mu)/sigma return X_norm, mu , sigmadef pca(X): \"\"\" Computes eigenvectors of the covariance matrix of X \"\"\" m,n = X.shape[0], X.shape[1] sigma = 1/m * X.T @ X U,S,V = svd(sigma) return U,S,V"
},
{
"code": null,
"e": 7321,
"s": 7188,
"text": "np.linalg.svd is similar to svd function in matlab and return the same U,S,V matrices. The official documentation can be found here."
},
{
"code": null,
"e": 7712,
"s": 7321,
"text": "from numpy.linalg import svdX_norm,mu,std = featureNormalize(X3)U,S = pca(X_norm)[:2]plt.scatter(X3[:,0],X3[:,1],marker=\"o\",facecolors=\"none\",edgecolors=\"b\")plt.plot([mu[0],(mu+1.5*S[0]*U[:,0].T)[0]],[mu[1],(mu+1.5*S[0]*U[:,0].T)[1]],color=\"black\",linewidth=3)plt.plot([mu[0],(mu+1.5*S[1]*U[:,1].T)[0]],[mu[1],(mu+1.5*S[1]*U[:,1].T)[1]],color=\"black\",linewidth=3)plt.xlim(-1,7)plt.ylim(2,8)"
},
{
"code": null,
"e": 7976,
"s": 7712,
"text": "The code block above implement PCA on the dataset and visualize the eigenvectors on the data. I found that Wikipedia has a good source of information for most of the learning algorithms and definitely worth looking if you wish to delve deeper into the algorithms."
},
{
"code": null,
"e": 8019,
"s": 7976,
"text": "print(\"Top eigenvector U(:,1) =:\",U[:,0])`"
},
{
"code": null,
"e": 8098,
"s": 8019,
"text": "The print statement print: Top eigenvector U(:,1) =: [-0.70710678 -0.70710678]"
},
{
"code": null,
"e": 8210,
"s": 8098,
"text": "To reduce the dimension of the dataset, we project the data onto the principal components (eigenvectors) found."
},
{
"code": null,
"e": 8638,
"s": 8210,
"text": "def projectData(X, U, K): \"\"\" Computes the reduced data representation when projecting only on to the top k eigenvectors \"\"\" m = X.shape[0] U_reduced = U[:,:K] Z = np.zeros((m,K)) for i in range(m): for j in range(K): Z[i,j] = X[i,:] @ U_reduced[:,j] return Z# Project the data onto K=1 dimensionK=1Z = projectData(X_norm, U, K)print(\"Projection of the first example:\",Z[0][0])"
},
{
"code": null,
"e": 8722,
"s": 8638,
"text": "The print statement will print: Projection of the first example: 1.4963126084578515"
},
{
"code": null,
"e": 8826,
"s": 8722,
"text": "Data also can be approximately reconstructed by projecting them back onto the original dimension space."
},
{
"code": null,
"e": 9198,
"s": 8826,
"text": "def recoverData(Z, U, K): \"\"\" Recovers an approximation of the original data when using the projected data \"\"\" m,n = Z.shape[0],U.shape[0] X_rec = np.zeros((m,n)) U_reduced = U[:,:K] for i in range(m): X_rec[i,:] = Z[i,:] @ U_reduced.T return X_recX_rec = recoverData(Z, U, K)print(\"Approximation of the first example:\",X_rec[0,:])"
},
{
"code": null,
"e": 9292,
"s": 9198,
"text": "The print statement will print: Approximation of the first example: [-1.05805279 -1.05805279]"
},
{
"code": null,
"e": 9324,
"s": 9292,
"text": "To visualize the whole process,"
},
{
"code": null,
"e": 9600,
"s": 9324,
"text": "plt.scatter(X_norm[:,0],X_norm[:,1],marker=\"o\",label=\"Original\",facecolors=\"none\",edgecolors=\"b\",s=15)plt.scatter(X_rec[:,0],X_rec[:,1],marker=\"o\",label=\"Approximation\",facecolors=\"none\",edgecolors=\"r\",s=15)plt.title(\"The Normalized and Projected Data after PCA\")plt.legend()"
},
{
"code": null,
"e": 9700,
"s": 9600,
"text": "Finally, we move on to a more complex dataset — Face image dataset. To load and visualize the data,"
},
{
"code": null,
"e": 9961,
"s": 9700,
"text": "mat4 = loadmat(\"ex7faces.mat\")X4 = mat4[\"X\"]fig, ax = plt.subplots(nrows=10,ncols=10,figsize=(8,8))for i in range(0,100,10): for j in range(10): ax[int(i/10),j].imshow(X4[i+j,:].reshape(32,32,order=\"F\"),cmap=\"gray\") ax[int(i/10),j].axis(\"off\")"
},
{
"code": null,
"e": 10158,
"s": 9961,
"text": "These images contain 32 X 32 pixels in grayscale, resulting in a dimension of 1,024 features and our task is to reduce the dimension to around 100 principal components that best describe our data."
},
{
"code": null,
"e": 10491,
"s": 10158,
"text": "X_norm2 = featureNormalize(X4)[0]# Run PCAU2 =pca(X_norm2)[0]#Visualize the top 36 eigenvectors foundU_reduced = U2[:,:36].Tfig2, ax2 = plt.subplots(6,6,figsize=(8,8))for i in range(0,36,6): for j in range(6): ax2[int(i/6),j].imshow(U_reduced[i+j,:].reshape(32,32,order=\"F\"),cmap=\"gray\") ax2[int(i/6),j].axis(\"off\")"
},
{
"code": null,
"e": 10598,
"s": 10491,
"text": "Above is the visualization of 36 principal components that describe the largest variations in the dataset."
},
{
"code": null,
"e": 10805,
"s": 10598,
"text": "Next, we project the data onto the first 100 principal components, effectively reduce the dimension down to a hundred, recover the data and try to understand what is lost in the dimension reduction process."
},
{
"code": null,
"e": 11200,
"s": 10805,
"text": "K2 = 100Z2 = projectData(X_norm2, U2, K2)print(\"The projected data Z has a size of:\",Z2.shape)# Data reconstructionX_rec2 = recoverData(Z2, U2, K2)# Visualize the reconstructed datafig3, ax3 = plt.subplots(10,10,figsize=(8,8))for i in range(0,100,10): for j in range(10): ax3[int(i/10),j].imshow(X_rec2[i+j,:].reshape(32,32,order=\"F\"),cmap=\"gray\") ax3[int(i/10),j].axis(\"off\")"
},
{
"code": null,
"e": 11422,
"s": 11200,
"text": "This is the end of unsupervised learning. Please look forward to the last article in the series. The Jupyter notebook will be uploaded to my GitHub at (https://github.com/Benlau93/Machine-Learning-by-Andrew-Ng-in-Python)."
},
{
"code": null,
"e": 11469,
"s": 11422,
"text": "For other python implementation in the series,"
},
{
"code": null,
"e": 11487,
"s": 11469,
"text": "Linear Regression"
},
{
"code": null,
"e": 11507,
"s": 11487,
"text": "Logistic Regression"
},
{
"code": null,
"e": 11539,
"s": 11507,
"text": "Regularized Logistic Regression"
},
{
"code": null,
"e": 11555,
"s": 11539,
"text": "Neural Networks"
},
{
"code": null,
"e": 11579,
"s": 11555,
"text": "Support Vector Machines"
},
{
"code": null,
"e": 11597,
"s": 11579,
"text": "Anomaly Detection"
}
]
|
Flutter - Expansion Tile Card - GeeksforGeeks | 15 Feb, 2021
The Expansion Tile Card works similarly to that of the Flutter SDK’s standard expansion tile. But it uses the style used by Google itself in its products to raise a tile. It can be called a better version of the Flutter’s ExpansionTileCard.
In this article, we will look into the process of implementing the Expansion Tile Card in a Flutter application by building a simple flutter app. To build the app follow the below steps:
Add the expansion_tile_card dependency to the pubspec.yaml file
Import the dependency to the main.dart file
Add the asset that is to be used in the src/assets/ directory
Create a basic app structure
Create a Homepage to the app
Call the ExpansionTileCard in the body of the app
Assign content to the Expansion Card
Now, let’s look into the steps in detail.
Add the expansion_tile_card dependency to the dependencies section of the pubspec.yaml file as shown below:
Use the below line of code to add the expansion_tile_card to the main.dart file:
import 'package:expansion_tile_card/expansion_tile_card.dart';
At this stage, we will create an assets directory inside the root directory of the project that will hold the image which will be shown when the card expands. it would somewhat look like below:
Note: Activate the assets in the assets section of the pubspec.yaml file to use it in the application.
Create a Class Myapp and extend it through a StatelessWidget and build the root of the application as shown below:
Dart
class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { return MaterialApp( title: 'ExpansionTileCard', theme: ThemeData( primarySwatch: Colors.green, ), home: MyHomePage(title: 'GeeksForGeeks'), ); }}
To create a Homepage to the app create a Class Homepage and extend it through a StatelessWidget and create an Appbar and a body to the application as shown below:
Dart
class MyHomePage extends StatefulWidget { MyHomePage({Key key, this.title}) : super(key: key); final String title; @override _MyHomePageState createState() => _MyHomePageState();} class _MyHomePageState extends State<MyHomePage> { final GlobalKey<ExpansionTileCardState> cardA = new GlobalKey(); final GlobalKey<ExpansionTileCardState> cardB = new GlobalKey(); @override Widget build(BuildContext context) { return Scaffold( appBar: AppBar( title: Text(widget.title), ), body: ), ); }
The dependency can be called within the body of the Homepage by making a call to the ExpansionTileCard. Action will also be added on Tap to the tile that would lead to the expansion of the tile as shown below:
Dart
ExpansionTileCard( key: cardB, leading: CircleAvatar(child: Text('A')), title: Text('Tap to Expand!'), subtitle: Text('It has the GFG Logo.'), children: <Widget>[ Divider( thickness: 1.0, height: 1.0, ), Align( alignment: Alignment.centerLeft, child: Padding( padding: const EdgeInsets.symmetric( horizontal: 16.0, vertical: 8.0, ), child: Image.asset('assets/gfg.png') ), ),
Complete Source Code:
Dart
import 'package:flutter/material.dart';import 'package:expansion_tile_card/expansion_tile_card.dart'; void main() => runApp(MyApp()); class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { return MaterialApp( title: 'ExpansionTileCard', theme: ThemeData( primarySwatch: Colors.green, ), home: MyHomePage(title: 'GeeksForGeeks'), ); }} class MyHomePage extends StatefulWidget { MyHomePage({Key key, this.title}) : super(key: key); final String title; @override _MyHomePageState createState() => _MyHomePageState();} class _MyHomePageState extends State<MyHomePage> { final GlobalKey<ExpansionTileCardState> cardA = new GlobalKey(); final GlobalKey<ExpansionTileCardState> cardB = new GlobalKey(); @override Widget build(BuildContext context) { return Scaffold( appBar: AppBar( title: Text(widget.title), ), body: ListView( children: <Widget>[ Padding( padding: const EdgeInsets.symmetric(horizontal: 12.0), child: ExpansionTileCard( key: cardB, leading: CircleAvatar(child: Text('A')), title: Text('Tap to Expand!'), subtitle: Text('It has the GFG Logo.'), children: <Widget>[ Divider( thickness: 1.0, height: 1.0, ), Align( alignment: Alignment.centerLeft, child: Padding( padding: const EdgeInsets.symmetric( horizontal: 16.0, vertical: 8.0, ), child: Image.asset('assets/gfg.png') ), ), ], ), ), ] ), ); }}
Output:
android
Flutter
Flutter UI-components
Flutter-widgets
Dart
Flutter
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Flutter - DropDownButton Widget
Dart - Null Aware Operators
Flutter - Creating Bottomsheet GetX Library
Data Enumeration in Dart
How to Check String is Empty or Not in Dart (Null Safety)?
Flutter - DropDownButton Widget
Flutter - Custom Bottom Navigation Bar
Flutter - Checkbox Widget
Flutter - Flexible Widget
Flutter - BoxShadow Widget | [
{
"code": null,
"e": 25549,
"s": 25521,
"text": "\n15 Feb, 2021"
},
{
"code": null,
"e": 25790,
"s": 25549,
"text": "The Expansion Tile Card works similarly to that of the Flutter SDK’s standard expansion tile. But it uses the style used by Google itself in its products to raise a tile. It can be called a better version of the Flutter’s ExpansionTileCard."
},
{
"code": null,
"e": 25977,
"s": 25790,
"text": "In this article, we will look into the process of implementing the Expansion Tile Card in a Flutter application by building a simple flutter app. To build the app follow the below steps:"
},
{
"code": null,
"e": 26041,
"s": 25977,
"text": "Add the expansion_tile_card dependency to the pubspec.yaml file"
},
{
"code": null,
"e": 26085,
"s": 26041,
"text": "Import the dependency to the main.dart file"
},
{
"code": null,
"e": 26147,
"s": 26085,
"text": "Add the asset that is to be used in the src/assets/ directory"
},
{
"code": null,
"e": 26176,
"s": 26147,
"text": "Create a basic app structure"
},
{
"code": null,
"e": 26205,
"s": 26176,
"text": "Create a Homepage to the app"
},
{
"code": null,
"e": 26255,
"s": 26205,
"text": "Call the ExpansionTileCard in the body of the app"
},
{
"code": null,
"e": 26292,
"s": 26255,
"text": "Assign content to the Expansion Card"
},
{
"code": null,
"e": 26334,
"s": 26292,
"text": "Now, let’s look into the steps in detail."
},
{
"code": null,
"e": 26442,
"s": 26334,
"text": "Add the expansion_tile_card dependency to the dependencies section of the pubspec.yaml file as shown below:"
},
{
"code": null,
"e": 26523,
"s": 26442,
"text": "Use the below line of code to add the expansion_tile_card to the main.dart file:"
},
{
"code": null,
"e": 26587,
"s": 26523,
"text": "import 'package:expansion_tile_card/expansion_tile_card.dart';\n"
},
{
"code": null,
"e": 26781,
"s": 26587,
"text": "At this stage, we will create an assets directory inside the root directory of the project that will hold the image which will be shown when the card expands. it would somewhat look like below:"
},
{
"code": null,
"e": 26884,
"s": 26781,
"text": "Note: Activate the assets in the assets section of the pubspec.yaml file to use it in the application."
},
{
"code": null,
"e": 26999,
"s": 26884,
"text": "Create a Class Myapp and extend it through a StatelessWidget and build the root of the application as shown below:"
},
{
"code": null,
"e": 27004,
"s": 26999,
"text": "Dart"
},
{
"code": "class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { return MaterialApp( title: 'ExpansionTileCard', theme: ThemeData( primarySwatch: Colors.green, ), home: MyHomePage(title: 'GeeksForGeeks'), ); }}",
"e": 27271,
"s": 27004,
"text": null
},
{
"code": null,
"e": 27434,
"s": 27271,
"text": "To create a Homepage to the app create a Class Homepage and extend it through a StatelessWidget and create an Appbar and a body to the application as shown below:"
},
{
"code": null,
"e": 27439,
"s": 27434,
"text": "Dart"
},
{
"code": "class MyHomePage extends StatefulWidget { MyHomePage({Key key, this.title}) : super(key: key); final String title; @override _MyHomePageState createState() => _MyHomePageState();} class _MyHomePageState extends State<MyHomePage> { final GlobalKey<ExpansionTileCardState> cardA = new GlobalKey(); final GlobalKey<ExpansionTileCardState> cardB = new GlobalKey(); @override Widget build(BuildContext context) { return Scaffold( appBar: AppBar( title: Text(widget.title), ), body: ), ); }",
"e": 27973,
"s": 27439,
"text": null
},
{
"code": null,
"e": 28183,
"s": 27973,
"text": "The dependency can be called within the body of the Homepage by making a call to the ExpansionTileCard. Action will also be added on Tap to the tile that would lead to the expansion of the tile as shown below:"
},
{
"code": null,
"e": 28188,
"s": 28183,
"text": "Dart"
},
{
"code": "ExpansionTileCard( key: cardB, leading: CircleAvatar(child: Text('A')), title: Text('Tap to Expand!'), subtitle: Text('It has the GFG Logo.'), children: <Widget>[ Divider( thickness: 1.0, height: 1.0, ), Align( alignment: Alignment.centerLeft, child: Padding( padding: const EdgeInsets.symmetric( horizontal: 16.0, vertical: 8.0, ), child: Image.asset('assets/gfg.png') ), ),",
"e": 28854,
"s": 28188,
"text": null
},
{
"code": null,
"e": 28876,
"s": 28854,
"text": "Complete Source Code:"
},
{
"code": null,
"e": 28881,
"s": 28876,
"text": "Dart"
},
{
"code": "import 'package:flutter/material.dart';import 'package:expansion_tile_card/expansion_tile_card.dart'; void main() => runApp(MyApp()); class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { return MaterialApp( title: 'ExpansionTileCard', theme: ThemeData( primarySwatch: Colors.green, ), home: MyHomePage(title: 'GeeksForGeeks'), ); }} class MyHomePage extends StatefulWidget { MyHomePage({Key key, this.title}) : super(key: key); final String title; @override _MyHomePageState createState() => _MyHomePageState();} class _MyHomePageState extends State<MyHomePage> { final GlobalKey<ExpansionTileCardState> cardA = new GlobalKey(); final GlobalKey<ExpansionTileCardState> cardB = new GlobalKey(); @override Widget build(BuildContext context) { return Scaffold( appBar: AppBar( title: Text(widget.title), ), body: ListView( children: <Widget>[ Padding( padding: const EdgeInsets.symmetric(horizontal: 12.0), child: ExpansionTileCard( key: cardB, leading: CircleAvatar(child: Text('A')), title: Text('Tap to Expand!'), subtitle: Text('It has the GFG Logo.'), children: <Widget>[ Divider( thickness: 1.0, height: 1.0, ), Align( alignment: Alignment.centerLeft, child: Padding( padding: const EdgeInsets.symmetric( horizontal: 16.0, vertical: 8.0, ), child: Image.asset('assets/gfg.png') ), ), ], ), ), ] ), ); }}",
"e": 30691,
"s": 28881,
"text": null
},
{
"code": null,
"e": 30699,
"s": 30691,
"text": "Output:"
},
{
"code": null,
"e": 30707,
"s": 30699,
"text": "android"
},
{
"code": null,
"e": 30715,
"s": 30707,
"text": "Flutter"
},
{
"code": null,
"e": 30737,
"s": 30715,
"text": "Flutter UI-components"
},
{
"code": null,
"e": 30753,
"s": 30737,
"text": "Flutter-widgets"
},
{
"code": null,
"e": 30758,
"s": 30753,
"text": "Dart"
},
{
"code": null,
"e": 30766,
"s": 30758,
"text": "Flutter"
},
{
"code": null,
"e": 30864,
"s": 30766,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30873,
"s": 30864,
"text": "Comments"
},
{
"code": null,
"e": 30886,
"s": 30873,
"text": "Old Comments"
},
{
"code": null,
"e": 30918,
"s": 30886,
"text": "Flutter - DropDownButton Widget"
},
{
"code": null,
"e": 30946,
"s": 30918,
"text": "Dart - Null Aware Operators"
},
{
"code": null,
"e": 30990,
"s": 30946,
"text": "Flutter - Creating Bottomsheet GetX Library"
},
{
"code": null,
"e": 31015,
"s": 30990,
"text": "Data Enumeration in Dart"
},
{
"code": null,
"e": 31074,
"s": 31015,
"text": "How to Check String is Empty or Not in Dart (Null Safety)?"
},
{
"code": null,
"e": 31106,
"s": 31074,
"text": "Flutter - DropDownButton Widget"
},
{
"code": null,
"e": 31145,
"s": 31106,
"text": "Flutter - Custom Bottom Navigation Bar"
},
{
"code": null,
"e": 31171,
"s": 31145,
"text": "Flutter - Checkbox Widget"
},
{
"code": null,
"e": 31197,
"s": 31171,
"text": "Flutter - Flexible Widget"
}
]
|
Is it possible to predict stock prices with a neural network? | by Ivan | Towards Data Science | When it comes to time series prediction the reader (the listener, the viewer...) starts thinking about predicting stock prices. This is expected to help to determine when to sell and when to buy more. Sometimes we see papers that describe how one can do this. Paper [1] provides an example here, the authors even provide some results. However, the “Deep Learning with Python” book by Chollet emphasizes that one should not try to use time series prediction techniques to predict stock prices. Chollet explains it in a way, that in the case of a stock market, the data about the previous state is not a good basis to estimate the future state. In paper [3] the authors even conclude that stock price is a martingale and, therefore, the best estimate of the future price (in terms of estimation error) is the current price.
So, is it possible to use a neural network to predict stock prices?
Disclaimer: this theoretical overview reflects my own knowledge of the subject, so it may use incorrect terms, be entirely incorrect and so on. So if you know more than me, you may die laughing. I have warned you.
What is a share? A share is a document that testifies the holder’s right to claim a part of the company’s profits. This implies that the share’s price should depend on the company’s profits. Moreover, the share’s price depends not on the exact company’s profits, but expected profits. This means that the share’s price represents the market traders’ opinion about future profits. And opinions may be wrong. We all remember the stories of startups that cost much but eventually appeared to provide nothing revolutionary and then lost their market price almost entirely. Therefore we can conclude that stock prices depend on the subjective opinion of the market traders.
Consider the figure below. The figure depicts the plot of share’s price of Maersk company. As one can see, one share cost 7718 DKK on the 2nd of April, 2019. The next day the price was 7750 DKK per share. What was the reason? We can see a small capital D letter at the bottom of the plot. This letter means that the company pays dividends this day, and, apparently, the dividends were large enough to surge the demand. So, the upcoming event can cause price growth.
Now consider another plot. This plot shows share prices for Yandex. These are the days when we have heard rumours that one of the banks was going to take over Yandex. Usually, amid such rumours share prices grow, since this means the buyer is going to buy shares from the market, thus increasing demand. This time investors decided that these are not good news.
We can make a simple conclusion here: share price depends mostly on the opinion of traders about the company’s future, and not on the previous price itself. Therefore there is no sense in predicting future stock prices using previous values.
We should predict something using values the target depends on, or, at least, correlates with. In the case of stock prices, one has to take into account events that are external to the market. Probably, it would not be possible to predict such events using a neural network. The fact that more traders went bankrupt than became billionaire tells us that a human is not often able to tell the future. To know more about predicting unpredictable, read “The Black Swan” book by Nassim Nicholas Taleb.
In theory, the theory and the practice are highly interconnected, but in practice, they are not. Here we are going to try predicting something and see what happens.
We are going to train a neural network that will predict (n+1)-th price using n known values (previous prices). We assume that the time between two subsequent price measurements is constant. First of all, we need the dataset. We can take stock prices at Yahoo Finance.
We will predict daily prices, which means that a day is represented in the dataset with a single value. We will predict the close price using close prices for several previous days. We will use Maersk as the test company.We will get the data using the yfinance Python package. We should take into account that Yahoo may change their API, so the package may stop working unexpectedly. This has already happened at least once, so we have to be prepared for other changes. So, let’s install the package:
pip install yfinance
For more info on how to use the package see here. Now, let’s contact the market:
import yfinance as yf# create the object that represents Maersk stock data# here MAERSK-B.CO -- is the Maerks's tickermaersk = yf.Ticker('MAERSK-B.CO')
We haven’t downloaded any data yet, we have only created the object that we can use to request the data. Yahoo Finance provides dividend information for Maersk and, as we have already seen, dividends affect stock prices. Therefore we want the neural network to take dividends into account when it predicts the prices. This means that when we tell the network to predict the close price for a particular day using a set of prices for the previous days we also need to provide it with a marker that tells whether dividends are paid that day. To get the dates when the dividends are paid, check the maersk.dividends property. To get the share prices we call the history method. The method takes several arguments, and we are especially interested in period and interval.
The period argument defines the period for which we request the data. The argument supports some predefined string values, and we will use one of them. We pass string ’max’ which tells it to give us all data available: starting from the first day shares became available at the market until today. Using start and end argument one can define the exact period. However, since we will use all data available, we will be using the period argument and pass ’max’.
The interval parameter tells the method the interval between two subsequent values. It takes one of the predefined values and we will pass ’1d’ there since we are going to use daily prices.
You can read more on the history method and its arguments here.
So, it’s time to get some data!
history = maersk.history(period='max', interval='1d')
Now the history variable holds a pandas’ DataFrame with the prices. Let’s look at them:
It’s time to prepare the data. When designing a neural network to predict time series, one should decide how many inputs the network will have. In our case, we have to choose the number of prices fed into the network to predict the next one. Since we do not know this number now, it is better to be able to generate datasets with different amount of inputs. Fortunately, Keras developers have already thought about that and now Keras provides a generator for time series that can generate datasets with different amount of inputs. In the case of time series prediction, both input and target values are drawn from the same series. Which means that we use the sliding window of size j, where j is the number of values we use to predict (j+1)-th value. In other words, we take j subsequent elements ({x1, x2, ... xj}) of the time series, then we take the (j+1)-th element (x(j+1)) and set it as the target value. The pair (j values, (j+1)-th value) makes a single training example. To make another training example, we move the sliding window by one, and use {x2, x3, ... x(j+1)} as inputs and x(j+2) as the target value.
Keras provides us with TimeseriesGenerator class, and we will use this class to generate the training set. The only difficulty here is that we also want the network to take dividends into account. Therefore we have to write a function that uses TimeseriesGenerator class to generate the training set and then enriches the generator’s output with the information about the dividends.
def generate_series(data, value_num): close = data['Close'] dividends = data['Dividends'] tsg = TimeseriesGenerator(close, close, length=value_num, batch_size=len(close)) global_index = value_num i, t = tsg[0] has_dividends = np.zeros(len(i)) for b_row in range(len(t)): assert(abs(t[b_row] - close[global_index]) <= 0.001) has_dividends[b_row] = dividends[global_index] > 0 global_index += 1 return np.concatenate((i, np.transpose([has_dividends])), axis=1), t
The function takes two arguments: the dataset we want it to process (the data argument) and the number of input values the series should have (the value_num argument).
As you know, neural networks are trained using Gradient Descent that employs the gradient of the cost function. The easiest approach assumes that we compute the cost function gradient using the entire dataset. However, there are downsides here. Firstly, the dataset maybe extremely large, which will make it very time consuming to compute the gradient. Secondly, if the dataset is extremely large, then the gradient value can also be extremely large, so large, that it simply does not fit into the machine precision. The second issue is, of course, usually important in extreme cases (slight pun intended). Some smart people have pointed out that we do not actually need the exact gradient value[4]. We only need its estimate that determines which direction we should move to minimize the cost function. Therefore we can estimate the gradient using a small subset of the training examples. Of course, we will eventually walk through the entire dataset, but there is no need to compute the gradient for the entire dataset at once. We can divide the dataset into several subsets called batches and process only a single batch at a time. We update the network’s weights using the gradient computed for a single batch. Once we have processed all batches, we can say we have run a single training epoch. Within a single training session, there might be more than one epoch, the exact number of epochs depends on the task. The same smart people emphasize that the training examples must be shuffled[4]. That means that a pair of subsequent training examples must not belong to the same batch.Let’s test the function and generate a dataset that uses four input values.
inputs, targets = generate_series(history, 4)
Let’s look at a single example.
# print(inputs[3818])array([1.246046e+04, 1.232848e+04, 1.244496e+04, 1.274000e+04, 1.000000e+00])
As we can see, a training example is a vector with four prices and an additional fifths value that indicates whether dividends are paid that day. Note that values are relatively large. Indeed, the close price ranges from 767.7 to 12740.0 Neural networks do not work well with such ranges, so we have to normalize the data. We will use the simplest normalization strategy, MinMax normalization.
h_min = history.min()normalized_h = (history - h_min) / (history.max() - h_min)
Since we have modified the initial data, we have to re-generate the dataset.
inputs, targets = generate_series(normalized_h, 4)
Let’s look at the normalized data.
# print(inputs[3818])array([0.9766511 , 0.96562732, 0.97535645, 1. , 1. ])
As we can see, the values now range from 0 to 1. That makes the task easier. However, we now have to keep h.min() and h.max() so that we can normalize network inputs when we predict the prices and to denormalize its output to get the exact value.
Finally, it’s time for neural networks. The network will have (n+1) inputs, n for prices and one for dividend indicator, and one output. We still need to determine n. For this, we will write a function that creates a neural network with a specified number of inputs. We use input_shape=(n+1,) expression to include the dividend indicator.
def create_model(n): m = models.Sequential() m.add(layers.Dense(64, activation='relu', input_shape=(n+1,))) m.add(layers.Dense(64, activation='relu')) m.add(layers.Dense(1)) return m
Before training a network, we divide the dataset into two parts: train and test sets. We are going to use the training set to train the network and the test set to test the network performance on the unknown data. We will never use examples of the test set while training the network.
train_inputs = inputs[:-1000]val_inputs = inputs[-1000:]train_targets = targets[:-1000]val_targets = targets[-1000:]
Let’s write one more function. This function will help us decide how many inputs the network should have. This function takes the number of inputs to check the number of epochs to train for. The function will create a network, prepare data for it, then train the network and evaluate its performance on the test set.
def select_inputs(data, start, end, epochs): models = {} for inputs in range(start, end+1): print('Using {} inputs'.format(inputs)) model_inputs, targets = generate_series(data, inputs) train_inputs = model_inputs[:-1000] val_inputs = model_inputs[-1000:] train_targets = targets[:-1000] val_targets = targets[-1000:] m = create_model(inputs) print('Training') m.compile(optimizer='adam', loss='mse') h = m.fit(train_inputs, train_targets, epochs=epochs, batch_size=32, validation_data=(val_inputs, val_targets)) model_info = {'model': m, 'history': h.history} models[inputs] = model_info return models
Now, let’s train networks with 2 to 10 inputs for 20 epochs:
trained_models = select_inputs(normalized_h, 2, 10, 20)
When the training is done, we can get a short summary with the following code:
model_stats = {}for k, v in trained_models.items(): train_history = v['history'] loss = train_history['loss'][-1] val_loss = train_history['val_loss'][-1] model_stats[k] = {'inputs': k, 'loss': loss, 'val_loss': val_loss}
Printing the model_stats values we are able to see the summary:
{2: {'inputs': 2, 'loss': 6.159038594863468e-05, 'val_loss': 0.0006709674960002303}, 3: {'inputs': 3, 'loss': 7.425233190960614e-05, 'val_loss': 0.00021176348975859583}, 4: {'inputs': 4, 'loss': 7.471898652647588e-05, 'val_loss': 0.00022580388654023408}, 5: {'inputs': 5, 'loss': 8.866131339595126e-05, 'val_loss': 0.00027424713294021784}, 6: {'inputs': 6, 'loss': 7.322355930846842e-05, 'val_loss': 0.0003323734663426876}, 7: {'inputs': 7, 'loss': 8.709070955596233e-05, 'val_loss': 0.0004295352199114859}, 8: {'inputs': 8, 'loss': 8.170129280188121e-05, 'val_loss': 0.00024587249546311797}, 9: {'inputs': 9, 'loss': 7.327485314296024e-05, 'val_loss': 0.0003118165017804131}, 10: {'inputs': 10, 'loss': 8.064566193526276e-05, 'val_loss': 0.0003668071269057691}}
As we can see, the error computed with the test set is always slightly greater than the value computed for the train set. This means that the network handles known data (training examples) slightly better than unknown (test examples).
We can now plot the test error depending on the network’s input number.
import matplotlib.pyplot as pltval_loss = []indices = []for k, v in model_stats.items(): indices.append(k) val_loss.append(v['val_loss'])plt.plot(indices, val_loss)
With the plot, we can see which network has shown the lowermost test error. The exact result may change with time depending on the amount of historical data available through Yahoo Finance.
There is one interesting observation. If one runs this script twice, then they are expected to receive different results. In other words, the lowermost test error is produced by different networks. Since the only difference between the networks is the number of inputs, then we can conclude that the test error does not depend on the number of inputs that much. This, in turn, supports the initial speculation that we won’t be able to predict stock prices with a neural network. Apparently, the network trains to ignore some of the inputs, concluding that the output does not depend on them.
Remember, that we have normalized the data. Now let’s compute the exact error for the networks.
close_min = history['Close'].min()close_max = history['Close'].max()for k in model_stats: e = ((close_max - close_min) * model_stats[k]['val_loss'] + close_min) print(k, e)
Output:
2 771.04007734144513 770.3419643750374 771.65381685608875 771.96373145032876 770.31642393499577 771.51479731061688 778.07844905371519 779.754623689196810 770.8432766947052
Wow! The errors are very large. Even for the network that has shown the lowermost test error, the exact error is very large. Honestly speaking, I would not trust a network with such errors when deciding which shares to buy. Neither would I recommend others to trust it.
Now we can draw a plot that compares the exact prices and the predicted.
As one can see the graphs do not match that often.
We got large error values for the unknown data. This means that the network has failed to predict the close price. Perhaps, we can improve the result by changing the network architecture on train it for more epochs. I don’t think we can get more data since we have already used all of the data available. It is possible to train a network using data for different companies, but since these companies may have different properties, their stock prices may change according to different laws, which will only confuse the network.
We should note that the network does not predict the stock price itself. Instead, it tries to guess what would be the next value of a series using the given values. That is because the price does not encode how would it change next time. If the price grows for some time, then there is no guarantee that it won’t decrease the next minute. The price is mostly affected by the external event, that the network is not aware of.
Possibly, we will be able to use a neural network for short-term predictions, to determine price changes within the next few minutes. This might be so because the smaller the time period we predict for, the lesser the change that an external event happens. However, in this case, it might be better to use a linear regression model to predict the direction of price changes.
If we look closer at the stock prices plots we can see that changes look random. Possibly that is true if we have only prices and have no idea about the external events. Therefore, stock prices indeed look like a martingale, a process we could not predict. I agree with Chollet, it is better that you don’t predict stock prices with a neural network.
Update
I have came across another post here on Towards Data Science dedicated to stock prediction. The author tried using Technical Analysis to feed a neural network with more values it can use for prediction. However, the author did not succeed, he concluded that the stock price is mostly a random process that could not be predicted based on its own values. This conclusion matches the findings of this post: you can’t predict stock prices with a neural network even using Technical Analysis to gain more statistics for the data.
References
Bugorskij, V. Ispol’zovanie nejronnyh setej dlya modelirovaniya prognoza kotirovok cennyh bumag / V. Bugorskij, A. Sergienko // Prikladnaya informatika. — 2008. — T. No 3(15). (in Russian)Chollet, F. Deep learning with python — 2017. — Manning Publications.Elliot, A. Time Series Prediction : Predicting Stock Price / A. Elliot, C. H. Hsu // ArXiv e-prints. — 2017.Ian Goodfellow. Deep Learning / Ian Goodfellow, Yoshua Bengio, Aaron Courville — MIT Press, 2016.
Bugorskij, V. Ispol’zovanie nejronnyh setej dlya modelirovaniya prognoza kotirovok cennyh bumag / V. Bugorskij, A. Sergienko // Prikladnaya informatika. — 2008. — T. No 3(15). (in Russian)
Chollet, F. Deep learning with python — 2017. — Manning Publications.
Elliot, A. Time Series Prediction : Predicting Stock Price / A. Elliot, C. H. Hsu // ArXiv e-prints. — 2017.
Ian Goodfellow. Deep Learning / Ian Goodfellow, Yoshua Bengio, Aaron Courville — MIT Press, 2016. | [
{
"code": null,
"e": 868,
"s": 46,
"text": "When it comes to time series prediction the reader (the listener, the viewer...) starts thinking about predicting stock prices. This is expected to help to determine when to sell and when to buy more. Sometimes we see papers that describe how one can do this. Paper [1] provides an example here, the authors even provide some results. However, the “Deep Learning with Python” book by Chollet emphasizes that one should not try to use time series prediction techniques to predict stock prices. Chollet explains it in a way, that in the case of a stock market, the data about the previous state is not a good basis to estimate the future state. In paper [3] the authors even conclude that stock price is a martingale and, therefore, the best estimate of the future price (in terms of estimation error) is the current price."
},
{
"code": null,
"e": 936,
"s": 868,
"text": "So, is it possible to use a neural network to predict stock prices?"
},
{
"code": null,
"e": 1150,
"s": 936,
"text": "Disclaimer: this theoretical overview reflects my own knowledge of the subject, so it may use incorrect terms, be entirely incorrect and so on. So if you know more than me, you may die laughing. I have warned you."
},
{
"code": null,
"e": 1819,
"s": 1150,
"text": "What is a share? A share is a document that testifies the holder’s right to claim a part of the company’s profits. This implies that the share’s price should depend on the company’s profits. Moreover, the share’s price depends not on the exact company’s profits, but expected profits. This means that the share’s price represents the market traders’ opinion about future profits. And opinions may be wrong. We all remember the stories of startups that cost much but eventually appeared to provide nothing revolutionary and then lost their market price almost entirely. Therefore we can conclude that stock prices depend on the subjective opinion of the market traders."
},
{
"code": null,
"e": 2285,
"s": 1819,
"text": "Consider the figure below. The figure depicts the plot of share’s price of Maersk company. As one can see, one share cost 7718 DKK on the 2nd of April, 2019. The next day the price was 7750 DKK per share. What was the reason? We can see a small capital D letter at the bottom of the plot. This letter means that the company pays dividends this day, and, apparently, the dividends were large enough to surge the demand. So, the upcoming event can cause price growth."
},
{
"code": null,
"e": 2647,
"s": 2285,
"text": "Now consider another plot. This plot shows share prices for Yandex. These are the days when we have heard rumours that one of the banks was going to take over Yandex. Usually, amid such rumours share prices grow, since this means the buyer is going to buy shares from the market, thus increasing demand. This time investors decided that these are not good news."
},
{
"code": null,
"e": 2889,
"s": 2647,
"text": "We can make a simple conclusion here: share price depends mostly on the opinion of traders about the company’s future, and not on the previous price itself. Therefore there is no sense in predicting future stock prices using previous values."
},
{
"code": null,
"e": 3387,
"s": 2889,
"text": "We should predict something using values the target depends on, or, at least, correlates with. In the case of stock prices, one has to take into account events that are external to the market. Probably, it would not be possible to predict such events using a neural network. The fact that more traders went bankrupt than became billionaire tells us that a human is not often able to tell the future. To know more about predicting unpredictable, read “The Black Swan” book by Nassim Nicholas Taleb."
},
{
"code": null,
"e": 3552,
"s": 3387,
"text": "In theory, the theory and the practice are highly interconnected, but in practice, they are not. Here we are going to try predicting something and see what happens."
},
{
"code": null,
"e": 3821,
"s": 3552,
"text": "We are going to train a neural network that will predict (n+1)-th price using n known values (previous prices). We assume that the time between two subsequent price measurements is constant. First of all, we need the dataset. We can take stock prices at Yahoo Finance."
},
{
"code": null,
"e": 4322,
"s": 3821,
"text": "We will predict daily prices, which means that a day is represented in the dataset with a single value. We will predict the close price using close prices for several previous days. We will use Maersk as the test company.We will get the data using the yfinance Python package. We should take into account that Yahoo may change their API, so the package may stop working unexpectedly. This has already happened at least once, so we have to be prepared for other changes. So, let’s install the package:"
},
{
"code": null,
"e": 4343,
"s": 4322,
"text": "pip install yfinance"
},
{
"code": null,
"e": 4424,
"s": 4343,
"text": "For more info on how to use the package see here. Now, let’s contact the market:"
},
{
"code": null,
"e": 4576,
"s": 4424,
"text": "import yfinance as yf# create the object that represents Maersk stock data# here MAERSK-B.CO -- is the Maerks's tickermaersk = yf.Ticker('MAERSK-B.CO')"
},
{
"code": null,
"e": 5344,
"s": 4576,
"text": "We haven’t downloaded any data yet, we have only created the object that we can use to request the data. Yahoo Finance provides dividend information for Maersk and, as we have already seen, dividends affect stock prices. Therefore we want the neural network to take dividends into account when it predicts the prices. This means that when we tell the network to predict the close price for a particular day using a set of prices for the previous days we also need to provide it with a marker that tells whether dividends are paid that day. To get the dates when the dividends are paid, check the maersk.dividends property. To get the share prices we call the history method. The method takes several arguments, and we are especially interested in period and interval."
},
{
"code": null,
"e": 5804,
"s": 5344,
"text": "The period argument defines the period for which we request the data. The argument supports some predefined string values, and we will use one of them. We pass string ’max’ which tells it to give us all data available: starting from the first day shares became available at the market until today. Using start and end argument one can define the exact period. However, since we will use all data available, we will be using the period argument and pass ’max’."
},
{
"code": null,
"e": 5994,
"s": 5804,
"text": "The interval parameter tells the method the interval between two subsequent values. It takes one of the predefined values and we will pass ’1d’ there since we are going to use daily prices."
},
{
"code": null,
"e": 6058,
"s": 5994,
"text": "You can read more on the history method and its arguments here."
},
{
"code": null,
"e": 6090,
"s": 6058,
"text": "So, it’s time to get some data!"
},
{
"code": null,
"e": 6144,
"s": 6090,
"text": "history = maersk.history(period='max', interval='1d')"
},
{
"code": null,
"e": 6232,
"s": 6144,
"text": "Now the history variable holds a pandas’ DataFrame with the prices. Let’s look at them:"
},
{
"code": null,
"e": 7352,
"s": 6232,
"text": "It’s time to prepare the data. When designing a neural network to predict time series, one should decide how many inputs the network will have. In our case, we have to choose the number of prices fed into the network to predict the next one. Since we do not know this number now, it is better to be able to generate datasets with different amount of inputs. Fortunately, Keras developers have already thought about that and now Keras provides a generator for time series that can generate datasets with different amount of inputs. In the case of time series prediction, both input and target values are drawn from the same series. Which means that we use the sliding window of size j, where j is the number of values we use to predict (j+1)-th value. In other words, we take j subsequent elements ({x1, x2, ... xj}) of the time series, then we take the (j+1)-th element (x(j+1)) and set it as the target value. The pair (j values, (j+1)-th value) makes a single training example. To make another training example, we move the sliding window by one, and use {x2, x3, ... x(j+1)} as inputs and x(j+2) as the target value."
},
{
"code": null,
"e": 7735,
"s": 7352,
"text": "Keras provides us with TimeseriesGenerator class, and we will use this class to generate the training set. The only difficulty here is that we also want the network to take dividends into account. Therefore we have to write a function that uses TimeseriesGenerator class to generate the training set and then enriches the generator’s output with the information about the dividends."
},
{
"code": null,
"e": 8338,
"s": 7735,
"text": "def generate_series(data, value_num): close = data['Close'] dividends = data['Dividends'] tsg = TimeseriesGenerator(close, close, length=value_num, batch_size=len(close)) global_index = value_num i, t = tsg[0] has_dividends = np.zeros(len(i)) for b_row in range(len(t)): assert(abs(t[b_row] - close[global_index]) <= 0.001) has_dividends[b_row] = dividends[global_index] > 0 global_index += 1 return np.concatenate((i, np.transpose([has_dividends])), axis=1), t"
},
{
"code": null,
"e": 8506,
"s": 8338,
"text": "The function takes two arguments: the dataset we want it to process (the data argument) and the number of input values the series should have (the value_num argument)."
},
{
"code": null,
"e": 10168,
"s": 8506,
"text": "As you know, neural networks are trained using Gradient Descent that employs the gradient of the cost function. The easiest approach assumes that we compute the cost function gradient using the entire dataset. However, there are downsides here. Firstly, the dataset maybe extremely large, which will make it very time consuming to compute the gradient. Secondly, if the dataset is extremely large, then the gradient value can also be extremely large, so large, that it simply does not fit into the machine precision. The second issue is, of course, usually important in extreme cases (slight pun intended). Some smart people have pointed out that we do not actually need the exact gradient value[4]. We only need its estimate that determines which direction we should move to minimize the cost function. Therefore we can estimate the gradient using a small subset of the training examples. Of course, we will eventually walk through the entire dataset, but there is no need to compute the gradient for the entire dataset at once. We can divide the dataset into several subsets called batches and process only a single batch at a time. We update the network’s weights using the gradient computed for a single batch. Once we have processed all batches, we can say we have run a single training epoch. Within a single training session, there might be more than one epoch, the exact number of epochs depends on the task. The same smart people emphasize that the training examples must be shuffled[4]. That means that a pair of subsequent training examples must not belong to the same batch.Let’s test the function and generate a dataset that uses four input values."
},
{
"code": null,
"e": 10214,
"s": 10168,
"text": "inputs, targets = generate_series(history, 4)"
},
{
"code": null,
"e": 10246,
"s": 10214,
"text": "Let’s look at a single example."
},
{
"code": null,
"e": 10351,
"s": 10246,
"text": "# print(inputs[3818])array([1.246046e+04, 1.232848e+04, 1.244496e+04, 1.274000e+04, 1.000000e+00])"
},
{
"code": null,
"e": 10745,
"s": 10351,
"text": "As we can see, a training example is a vector with four prices and an additional fifths value that indicates whether dividends are paid that day. Note that values are relatively large. Indeed, the close price ranges from 767.7 to 12740.0 Neural networks do not work well with such ranges, so we have to normalize the data. We will use the simplest normalization strategy, MinMax normalization."
},
{
"code": null,
"e": 10825,
"s": 10745,
"text": "h_min = history.min()normalized_h = (history - h_min) / (history.max() - h_min)"
},
{
"code": null,
"e": 10902,
"s": 10825,
"text": "Since we have modified the initial data, we have to re-generate the dataset."
},
{
"code": null,
"e": 10953,
"s": 10902,
"text": "inputs, targets = generate_series(normalized_h, 4)"
},
{
"code": null,
"e": 10988,
"s": 10953,
"text": "Let’s look at the normalized data."
},
{
"code": null,
"e": 11077,
"s": 10988,
"text": "# print(inputs[3818])array([0.9766511 , 0.96562732, 0.97535645, 1. , 1. ])"
},
{
"code": null,
"e": 11324,
"s": 11077,
"text": "As we can see, the values now range from 0 to 1. That makes the task easier. However, we now have to keep h.min() and h.max() so that we can normalize network inputs when we predict the prices and to denormalize its output to get the exact value."
},
{
"code": null,
"e": 11663,
"s": 11324,
"text": "Finally, it’s time for neural networks. The network will have (n+1) inputs, n for prices and one for dividend indicator, and one output. We still need to determine n. For this, we will write a function that creates a neural network with a specified number of inputs. We use input_shape=(n+1,) expression to include the dividend indicator."
},
{
"code": null,
"e": 11861,
"s": 11663,
"text": "def create_model(n): m = models.Sequential() m.add(layers.Dense(64, activation='relu', input_shape=(n+1,))) m.add(layers.Dense(64, activation='relu')) m.add(layers.Dense(1)) return m"
},
{
"code": null,
"e": 12146,
"s": 11861,
"text": "Before training a network, we divide the dataset into two parts: train and test sets. We are going to use the training set to train the network and the test set to test the network performance on the unknown data. We will never use examples of the test set while training the network."
},
{
"code": null,
"e": 12263,
"s": 12146,
"text": "train_inputs = inputs[:-1000]val_inputs = inputs[-1000:]train_targets = targets[:-1000]val_targets = targets[-1000:]"
},
{
"code": null,
"e": 12580,
"s": 12263,
"text": "Let’s write one more function. This function will help us decide how many inputs the network should have. This function takes the number of inputs to check the number of epochs to train for. The function will create a network, prepare data for it, then train the network and evaluate its performance on the test set."
},
{
"code": null,
"e": 13344,
"s": 12580,
"text": "def select_inputs(data, start, end, epochs): models = {} for inputs in range(start, end+1): print('Using {} inputs'.format(inputs)) model_inputs, targets = generate_series(data, inputs) train_inputs = model_inputs[:-1000] val_inputs = model_inputs[-1000:] train_targets = targets[:-1000] val_targets = targets[-1000:] m = create_model(inputs) print('Training') m.compile(optimizer='adam', loss='mse') h = m.fit(train_inputs, train_targets, epochs=epochs, batch_size=32, validation_data=(val_inputs, val_targets)) model_info = {'model': m, 'history': h.history} models[inputs] = model_info return models"
},
{
"code": null,
"e": 13405,
"s": 13344,
"text": "Now, let’s train networks with 2 to 10 inputs for 20 epochs:"
},
{
"code": null,
"e": 13461,
"s": 13405,
"text": "trained_models = select_inputs(normalized_h, 2, 10, 20)"
},
{
"code": null,
"e": 13540,
"s": 13461,
"text": "When the training is done, we can get a short summary with the following code:"
},
{
"code": null,
"e": 13774,
"s": 13540,
"text": "model_stats = {}for k, v in trained_models.items(): train_history = v['history'] loss = train_history['loss'][-1] val_loss = train_history['val_loss'][-1] model_stats[k] = {'inputs': k, 'loss': loss, 'val_loss': val_loss}"
},
{
"code": null,
"e": 13838,
"s": 13774,
"text": "Printing the model_stats values we are able to see the summary:"
},
{
"code": null,
"e": 14619,
"s": 13838,
"text": "{2: {'inputs': 2, 'loss': 6.159038594863468e-05, 'val_loss': 0.0006709674960002303}, 3: {'inputs': 3, 'loss': 7.425233190960614e-05, 'val_loss': 0.00021176348975859583}, 4: {'inputs': 4, 'loss': 7.471898652647588e-05, 'val_loss': 0.00022580388654023408}, 5: {'inputs': 5, 'loss': 8.866131339595126e-05, 'val_loss': 0.00027424713294021784}, 6: {'inputs': 6, 'loss': 7.322355930846842e-05, 'val_loss': 0.0003323734663426876}, 7: {'inputs': 7, 'loss': 8.709070955596233e-05, 'val_loss': 0.0004295352199114859}, 8: {'inputs': 8, 'loss': 8.170129280188121e-05, 'val_loss': 0.00024587249546311797}, 9: {'inputs': 9, 'loss': 7.327485314296024e-05, 'val_loss': 0.0003118165017804131}, 10: {'inputs': 10, 'loss': 8.064566193526276e-05, 'val_loss': 0.0003668071269057691}}"
},
{
"code": null,
"e": 14854,
"s": 14619,
"text": "As we can see, the error computed with the test set is always slightly greater than the value computed for the train set. This means that the network handles known data (training examples) slightly better than unknown (test examples)."
},
{
"code": null,
"e": 14926,
"s": 14854,
"text": "We can now plot the test error depending on the network’s input number."
},
{
"code": null,
"e": 15097,
"s": 14926,
"text": "import matplotlib.pyplot as pltval_loss = []indices = []for k, v in model_stats.items(): indices.append(k) val_loss.append(v['val_loss'])plt.plot(indices, val_loss)"
},
{
"code": null,
"e": 15287,
"s": 15097,
"text": "With the plot, we can see which network has shown the lowermost test error. The exact result may change with time depending on the amount of historical data available through Yahoo Finance."
},
{
"code": null,
"e": 15879,
"s": 15287,
"text": "There is one interesting observation. If one runs this script twice, then they are expected to receive different results. In other words, the lowermost test error is produced by different networks. Since the only difference between the networks is the number of inputs, then we can conclude that the test error does not depend on the number of inputs that much. This, in turn, supports the initial speculation that we won’t be able to predict stock prices with a neural network. Apparently, the network trains to ignore some of the inputs, concluding that the output does not depend on them."
},
{
"code": null,
"e": 15975,
"s": 15879,
"text": "Remember, that we have normalized the data. Now let’s compute the exact error for the networks."
},
{
"code": null,
"e": 16154,
"s": 15975,
"text": "close_min = history['Close'].min()close_max = history['Close'].max()for k in model_stats: e = ((close_max - close_min) * model_stats[k]['val_loss'] + close_min) print(k, e)"
},
{
"code": null,
"e": 16162,
"s": 16154,
"text": "Output:"
},
{
"code": null,
"e": 16334,
"s": 16162,
"text": "2 771.04007734144513 770.3419643750374 771.65381685608875 771.96373145032876 770.31642393499577 771.51479731061688 778.07844905371519 779.754623689196810 770.8432766947052"
},
{
"code": null,
"e": 16604,
"s": 16334,
"text": "Wow! The errors are very large. Even for the network that has shown the lowermost test error, the exact error is very large. Honestly speaking, I would not trust a network with such errors when deciding which shares to buy. Neither would I recommend others to trust it."
},
{
"code": null,
"e": 16677,
"s": 16604,
"text": "Now we can draw a plot that compares the exact prices and the predicted."
},
{
"code": null,
"e": 16728,
"s": 16677,
"text": "As one can see the graphs do not match that often."
},
{
"code": null,
"e": 17256,
"s": 16728,
"text": "We got large error values for the unknown data. This means that the network has failed to predict the close price. Perhaps, we can improve the result by changing the network architecture on train it for more epochs. I don’t think we can get more data since we have already used all of the data available. It is possible to train a network using data for different companies, but since these companies may have different properties, their stock prices may change according to different laws, which will only confuse the network."
},
{
"code": null,
"e": 17681,
"s": 17256,
"text": "We should note that the network does not predict the stock price itself. Instead, it tries to guess what would be the next value of a series using the given values. That is because the price does not encode how would it change next time. If the price grows for some time, then there is no guarantee that it won’t decrease the next minute. The price is mostly affected by the external event, that the network is not aware of."
},
{
"code": null,
"e": 18056,
"s": 17681,
"text": "Possibly, we will be able to use a neural network for short-term predictions, to determine price changes within the next few minutes. This might be so because the smaller the time period we predict for, the lesser the change that an external event happens. However, in this case, it might be better to use a linear regression model to predict the direction of price changes."
},
{
"code": null,
"e": 18407,
"s": 18056,
"text": "If we look closer at the stock prices plots we can see that changes look random. Possibly that is true if we have only prices and have no idea about the external events. Therefore, stock prices indeed look like a martingale, a process we could not predict. I agree with Chollet, it is better that you don’t predict stock prices with a neural network."
},
{
"code": null,
"e": 18414,
"s": 18407,
"text": "Update"
},
{
"code": null,
"e": 18940,
"s": 18414,
"text": "I have came across another post here on Towards Data Science dedicated to stock prediction. The author tried using Technical Analysis to feed a neural network with more values it can use for prediction. However, the author did not succeed, he concluded that the stock price is mostly a random process that could not be predicted based on its own values. This conclusion matches the findings of this post: you can’t predict stock prices with a neural network even using Technical Analysis to gain more statistics for the data."
},
{
"code": null,
"e": 18951,
"s": 18940,
"text": "References"
},
{
"code": null,
"e": 19414,
"s": 18951,
"text": "Bugorskij, V. Ispol’zovanie nejronnyh setej dlya modelirovaniya prognoza kotirovok cennyh bumag / V. Bugorskij, A. Sergienko // Prikladnaya informatika. — 2008. — T. No 3(15). (in Russian)Chollet, F. Deep learning with python — 2017. — Manning Publications.Elliot, A. Time Series Prediction : Predicting Stock Price / A. Elliot, C. H. Hsu // ArXiv e-prints. — 2017.Ian Goodfellow. Deep Learning / Ian Goodfellow, Yoshua Bengio, Aaron Courville — MIT Press, 2016."
},
{
"code": null,
"e": 19603,
"s": 19414,
"text": "Bugorskij, V. Ispol’zovanie nejronnyh setej dlya modelirovaniya prognoza kotirovok cennyh bumag / V. Bugorskij, A. Sergienko // Prikladnaya informatika. — 2008. — T. No 3(15). (in Russian)"
},
{
"code": null,
"e": 19673,
"s": 19603,
"text": "Chollet, F. Deep learning with python — 2017. — Manning Publications."
},
{
"code": null,
"e": 19782,
"s": 19673,
"text": "Elliot, A. Time Series Prediction : Predicting Stock Price / A. Elliot, C. H. Hsu // ArXiv e-prints. — 2017."
}
]
|
Flutter and Blockchain – Hello World Dapp | 11 May, 2022
Flutter and Blockchain
This tutorial will take you through the process of building your first mobile dapp – Hello World Dapp!
This tutorial is meant for those with a basic knowledge of Ethereum and smart contracts, who have some knowledge of the Flutter framework but are new to mobile dapps.
In this tutorial we will be covering:
Setting up the development environmentCreating a Truffle ProjectWriting your first Smart ContractCompiling and Migrating the Smart ContractTesting the Smart ContractContract linking with FlutterCreating a UI to interact with the smart contractInteracting with the complete Dapp
Setting up the development environment
Creating a Truffle Project
Writing your first Smart Contract
Compiling and Migrating the Smart Contract
Testing the Smart Contract
Contract linking with Flutter
Creating a UI to interact with the smart contract
Interacting with the complete Dapp
Truffle is the most popular development framework for Ethereum with a mission to make your life a whole lot easier. But before we install truffle make sure to install node .
Once we have node installed, we only need one command to install Truffle:
npm install -g truffle
We will also be using Ganache, a personal blockchain for Ethereum development you can use to deploy smart contracts, develop applications, and run tests. You can download Ganache by navigating to http://truffleframework.com/ganache and clicking the “Download” button.
Create a basic Flutter project in your favorite IDEInitialize Truffle in the flutter project directory by running
Create a basic Flutter project in your favorite IDE
Initialize Truffle in the flutter project directory by running
truffle init
contracts/ : Contains solidity contract file.
migrations/: Contains migration script files (Truffle uses a migration system to handle contract deployment).
test/ : Contains test script files.
truffle-config.js: Contains truffle deployment configurations information.
The Smart Contract actually acts as the back-end logic and storage for our Dapp.
Create a new file named HelloWorld.sol in the contracts/ directory.Add the following content to the file:
Create a new file named HelloWorld.sol in the contracts/ directory.
Add the following content to the file:
Solidity
pragma solidity ^0.5.9; contract HelloWorld { }
The minimum version of Solidity required is noted at the top of the contract: pragma solidity ^0.5.9;.
Statements are terminated with semicolons.
Add the following variable on the next line after contract HelloWorld {
Add the following variable on the next line after contract HelloWorld {
Solidity
string public yourName ;
We just defined a single variable yourName of type string, also yourName is a public modifier which means that we can access it from outside the smart contract.
Add the following constructor on the next line after string public yourName ;
Add the following constructor on the next line after string public yourName ;
Solidity
constructor() public { yourName = "Unknown" ;}
Constructor in solidity executed only once, when a contract is created and it is used to initialize contract state. Here we’re just setting the initial value of variable yourName to “Unknown”.
Add the following function to the smart contract after the constructor declaration we set up above.
Add the following function to the smart contract after the constructor declaration we set up above.
Solidity
function setName(string memory nm) public{ yourName = nm ;}
In the above function, we’ll be taking in an nm(string) and setting the yourName variable to it.
Memory is the data location.
In a terminal, make sure you are in the root of the directory that contains the flutter and truffle project, Run the following command:
In a terminal, make sure you are in the root of the directory that contains the flutter and truffle project, Run the following command:
truffle compile
You should see output similar to the following:
truffle compile
You’ll see one JavaScript file already in the migrations/ directory: 1_initial_migration.js. This handles deploying the Migrations.sol contract to observe subsequent smart contract migrations, and ensures we don’t double-migrate unchanged contracts in the future.
Let’s create our own migration script :
Create a new file named 2_deploy_contracts.js in the migrations/ directory.Add the following content to the 2_deploy_contracts.js file:
Create a new file named 2_deploy_contracts.js in the migrations/ directory.
Add the following content to the 2_deploy_contracts.js file:
Javascript
const HelloWorld = artifacts.require("HelloWorld"); module.exports = function (deployer) { deployer.deploy(HelloWorld);};
Before we can migrate our contract to the blockchain, we need to have a blockchain running. For this article, we’re going to use Ganache, a personal blockchain for Ethereum development you can use to deploy contracts, develop applications, and run tests. If you haven’t already, download Ganache and double-click the icon to launch the application. This will generate a blockchain running locally on port 7545.
Ganache
Add the following content to the truffle-config.js file:
Javascript
module.exports = { networks: { development: { host: "127.0.0.1", // Localhost (default: none) port: 7545, // Standard Ethereum port (default: none) network_id: "*", // Any network (default: none) }, }, contracts_build_directory: "./src/artifacts/", // Configure your compilers compilers: { solc: { // See the solidity docs for advice // about optimization and evmVersion optimizer: { enabled: false, runs: 200 }, evmVersion: "byzantium" } }};
Migrating the contract to the blockchain, run:
truffle migrate
You should see output similar to the following:
truffle migrate
Take a look into the Ganache, the first account originally had 100 ether, it is now lower, due to the transaction costs of migration.
In Truffle, we can write tests either in JavaScript or Solidity, In this article, we’ll be writing our tests in Javascript using the Chai and Mocha libraries.
Create a new file named helloWorld.js in the test/ directory.Add the following content to the helloWorld.js file:
Create a new file named helloWorld.js in the test/ directory.
Add the following content to the helloWorld.js file:
Javascript
const HelloWorld = artifacts.require("HelloWorld") ; contract("HelloWorld" , () => { it("Hello World Testing" , async () => { const helloWorld = await HelloWorld.deployed() ; await helloWorld.setName("User Name") ; const result = await helloWorld.yourName() ; assert(result === "User Name") ; });});
HelloWorld: The smart contract we want to test, We begin our test by importing our HelloWorld contract using artifacts.require.
To test the setName function, recall that it accepts a name(string) as an argument.
Also, the yourName variable in our contract is using public modifier, which we can use as a getter from the outside function.
Truffle imports Chai so we can use the assert function. We pass the actual value and the expected value, To check that the name is set properly or not, assert(result === “User Name”) ;.
Running the test as:
truffle test
If all the test pass, you’ll see the console output similar to this:
truffle test
In the pubspec.yaml file import the following packages :
provider: ^4.3.3
web3dart: ^1.2.3
http: ^0.12.2
web_socket_channel: ^1.2.0
Also, add the asset src/artifacts/HelloWorld.json to pubspec.yaml file which is generated by truffle-config.js while we migrate our contract.
assets:
- src/artifacts/HelloWorld.json
Create a new file named contract_linking.dart in the lib/ directory.Add the following content to the file:
Create a new file named contract_linking.dart in the lib/ directory.
Add the following content to the file:
Dart
import 'package:flutter/material.dart'; class ContractLinking extends ChangeNotifier { }
Just a simple class with ChangeNotifier for state management.
Add the following variable on the next line after class ContractLinking extends ChangeNotifier {.
Dart
final String _rpcUrl = "http://10.0.2.2:7545";final String _wsUrl = "ws://10.0.2.2:7545/";final String _privateKey = "Enter Private Key";
The library web3dart won’t send signed transactions to miners itself. Instead, it relies on an RPC client to do that. For the WebSocket URL just modify the RPC URL. You can get the RPC URL from the ganache :
Ganache – RPC url
Get the Private Key from ganache:
Ganache – Private Key
Declare the following variables below :
Dart
Web3Client _client;bool isLoading = true; String _abiCode;EthereumAddress _contractAddress; Credentials _credentials; DeployedContract _contract;ContractFunction _yourName;ContractFunction _setName; String deployedName;
_client variable will be used to establish a connection to the ethereum rpc node with the help of WebSocket.isLoading variable will be used to check the state of the contract._abiCode variable will be used to, read the contract abi._contractAddress variable will be used to store the contract address of the deployed smart contract._credentials variable will store the credentials of the smart contract deployer._contract variable will be used to tell Web3dart where our contract is declared._yourName and _setName variable will be used to store the functions declared in our HelloWorld.sol smart contract.deployedName will hold the name from the smart contract.
_client variable will be used to establish a connection to the ethereum rpc node with the help of WebSocket.
isLoading variable will be used to check the state of the contract.
_abiCode variable will be used to, read the contract abi.
_contractAddress variable will be used to store the contract address of the deployed smart contract.
_credentials variable will store the credentials of the smart contract deployer.
_contract variable will be used to tell Web3dart where our contract is declared.
_yourName and _setName variable will be used to store the functions declared in our HelloWorld.sol smart contract.
deployedName will hold the name from the smart contract.
After declaring the above variable’s, Declare the following functions below it :
Dart
ContractLinking() { initialSetup(); } initialSetup() async { // establish a connection to the ethereum rpc node. The socketConnector // property allows more efficient event streams over websocket instead of // http-polls. However, the socketConnector property is experimental. _client = Web3Client(_rpcUrl, Client(), socketConnector: () { return IOWebSocketChannel.connect(_wsUrl).cast<String>(); }); await getAbi(); await getCredentials(); await getDeployedContract(); } Future<void> getAbi() async { // Reading the contract abi String abiStringFile = await rootBundle.loadString("src/artifacts/HelloWorld.json"); var jsonAbi = jsonDecode(abiStringFile); _abiCode = jsonEncode(jsonAbi["abi"]); _contractAddress = EthereumAddress.fromHex(jsonAbi["networks"]["5777"]["address"]); } Future<void> getCredentials() async { _credentials = await _client.credentialsFromPrivateKey(_privateKey); } Future<void> getDeployedContract() async { // Telling Web3dart where our contract is declared. _contract = DeployedContract( ContractAbi.fromJson(_abiCode, "HelloWorld"), _contractAddress); // Extracting the functions, declared in contract. _yourName = _contract.function("yourName"); _setName = _contract.function("setName"); getName(); } getName() async { // Getting the current name declared in the smart contract. var currentName = await _client .call(contract: _contract, function: _yourName, params: []); deployedName = currentName[0]; isLoading = false; notifyListeners(); } setName(String nameToSet) async { // Setting the name to nameToSet(name defined by user) isLoading = true; notifyListeners(); await _client.sendTransaction( _credentials, Transaction.callContract( contract: _contract, function: _setName, parameters: [nameToSet])); getName(); }
Create a new file named helloUI.dart in the lib/ directory.Add the following content to the file:
Create a new file named helloUI.dart in the lib/ directory.
Add the following content to the file:
Dart
import 'package:flutter/material.dart';import 'package:hello_world/contract_linking.dart';import 'package:provider/provider.dart'; class HelloUI extends StatelessWidget { @override Widget build(BuildContext context) { // Getting the value and object or contract_linking var contractLink = Provider.of<ContractLinking>(context); TextEditingController yourNameController = TextEditingController(); return Scaffold( appBar: AppBar( title: Text("Hello World !"), centerTitle: true, ), body: Container( padding: EdgeInsets.symmetric(horizontal: 20), child: Center( child: contractLink.isLoading ? CircularProgressIndicator() : SingleChildScrollView( child: Form( child: Column( children: [ Row( mainAxisAlignment: MainAxisAlignment.center, children: [ Text( "Hello ", style: TextStyle( fontWeight: FontWeight.bold, fontSize: 52), ), Text( contractLink.deployedName, style: TextStyle( fontWeight: FontWeight.bold, fontSize: 52, color: Colors.tealAccent), ), ], ), Padding( padding: EdgeInsets.only(top: 29), child: TextFormField( controller: yourNameController, decoration: InputDecoration( border: OutlineInputBorder(), labelText: "Your Name", hintText: "What is your name ?", icon: Icon(Icons.drive_file_rename_outline)), ), ), Padding( padding: EdgeInsets.only(top: 30), child: ElevatedButton( child: Text( 'Set Name', style: TextStyle(fontSize: 30), ), style: ElevatedButton.styleFrom( primary: Colors.green, ), onPressed: () { contractLink.setName(yourNameController.text); yourNameController.clear(); }, ), ) ], ), ), ), ), ), ); }}
Update the main.dart as :
Dart
import 'package:flutter/material.dart';import 'package:hello_world/contract_linking.dart';import 'package:hello_world/helloUI.dart';import 'package:provider/provider.dart'; void main() { runApp(MyApp());} class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { // Inserting Provider as a parent of HelloUI() return ChangeNotifierProvider<ContractLinking>( create: (_) => ContractLinking(), child: MaterialApp( title: "Hello World", theme: ThemeData( brightness: Brightness.dark, primaryColor: Colors.cyan[400], accentColor: Colors.deepOrange[200]), home: HelloUI(), ), ); }}
Now we’re ready to use our dapp!
Just RUN the Flutter Project.
Hello World Dapp
As you can see the Hello Unknown, in the UI is actually coming from the smart contract, variable yourName .
When you type your name into the TextFormField and Press `Set Name` ElevatedButton, it will invoke the setName function from contract_linking.dart which will directly invoke the setName function of our smart contract (HelloWorld.sol).
Hello World Dapp
Hello World Dapp
Hello World Dapp
Congratulations! You have taken a huge step to become a full-fledged mobile dapp developer. For developing locally, you have all the tools you need to start making more advanced dapps.
If you stuck somewhere, do check out the GitHub repository for complete code.
Blockchain
Dart
Flutter
Solidity
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n11 May, 2022"
},
{
"code": null,
"e": 75,
"s": 52,
"text": "Flutter and Blockchain"
},
{
"code": null,
"e": 178,
"s": 75,
"text": "This tutorial will take you through the process of building your first mobile dapp – Hello World Dapp!"
},
{
"code": null,
"e": 345,
"s": 178,
"text": "This tutorial is meant for those with a basic knowledge of Ethereum and smart contracts, who have some knowledge of the Flutter framework but are new to mobile dapps."
},
{
"code": null,
"e": 383,
"s": 345,
"text": "In this tutorial we will be covering:"
},
{
"code": null,
"e": 661,
"s": 383,
"text": "Setting up the development environmentCreating a Truffle ProjectWriting your first Smart ContractCompiling and Migrating the Smart ContractTesting the Smart ContractContract linking with FlutterCreating a UI to interact with the smart contractInteracting with the complete Dapp"
},
{
"code": null,
"e": 700,
"s": 661,
"text": "Setting up the development environment"
},
{
"code": null,
"e": 727,
"s": 700,
"text": "Creating a Truffle Project"
},
{
"code": null,
"e": 761,
"s": 727,
"text": "Writing your first Smart Contract"
},
{
"code": null,
"e": 804,
"s": 761,
"text": "Compiling and Migrating the Smart Contract"
},
{
"code": null,
"e": 831,
"s": 804,
"text": "Testing the Smart Contract"
},
{
"code": null,
"e": 861,
"s": 831,
"text": "Contract linking with Flutter"
},
{
"code": null,
"e": 911,
"s": 861,
"text": "Creating a UI to interact with the smart contract"
},
{
"code": null,
"e": 946,
"s": 911,
"text": "Interacting with the complete Dapp"
},
{
"code": null,
"e": 1120,
"s": 946,
"text": "Truffle is the most popular development framework for Ethereum with a mission to make your life a whole lot easier. But before we install truffle make sure to install node ."
},
{
"code": null,
"e": 1194,
"s": 1120,
"text": "Once we have node installed, we only need one command to install Truffle:"
},
{
"code": null,
"e": 1217,
"s": 1194,
"text": "npm install -g truffle"
},
{
"code": null,
"e": 1485,
"s": 1217,
"text": "We will also be using Ganache, a personal blockchain for Ethereum development you can use to deploy smart contracts, develop applications, and run tests. You can download Ganache by navigating to http://truffleframework.com/ganache and clicking the “Download” button."
},
{
"code": null,
"e": 1599,
"s": 1485,
"text": "Create a basic Flutter project in your favorite IDEInitialize Truffle in the flutter project directory by running"
},
{
"code": null,
"e": 1651,
"s": 1599,
"text": "Create a basic Flutter project in your favorite IDE"
},
{
"code": null,
"e": 1714,
"s": 1651,
"text": "Initialize Truffle in the flutter project directory by running"
},
{
"code": null,
"e": 1727,
"s": 1714,
"text": "truffle init"
},
{
"code": null,
"e": 1773,
"s": 1727,
"text": "contracts/ : Contains solidity contract file."
},
{
"code": null,
"e": 1883,
"s": 1773,
"text": "migrations/: Contains migration script files (Truffle uses a migration system to handle contract deployment)."
},
{
"code": null,
"e": 1919,
"s": 1883,
"text": "test/ : Contains test script files."
},
{
"code": null,
"e": 1994,
"s": 1919,
"text": "truffle-config.js: Contains truffle deployment configurations information."
},
{
"code": null,
"e": 2075,
"s": 1994,
"text": "The Smart Contract actually acts as the back-end logic and storage for our Dapp."
},
{
"code": null,
"e": 2181,
"s": 2075,
"text": "Create a new file named HelloWorld.sol in the contracts/ directory.Add the following content to the file:"
},
{
"code": null,
"e": 2249,
"s": 2181,
"text": "Create a new file named HelloWorld.sol in the contracts/ directory."
},
{
"code": null,
"e": 2288,
"s": 2249,
"text": "Add the following content to the file:"
},
{
"code": null,
"e": 2297,
"s": 2288,
"text": "Solidity"
},
{
"code": "pragma solidity ^0.5.9; contract HelloWorld { }",
"e": 2347,
"s": 2297,
"text": null
},
{
"code": null,
"e": 2450,
"s": 2347,
"text": "The minimum version of Solidity required is noted at the top of the contract: pragma solidity ^0.5.9;."
},
{
"code": null,
"e": 2493,
"s": 2450,
"text": "Statements are terminated with semicolons."
},
{
"code": null,
"e": 2566,
"s": 2493,
"text": "Add the following variable on the next line after contract HelloWorld { "
},
{
"code": null,
"e": 2639,
"s": 2566,
"text": "Add the following variable on the next line after contract HelloWorld { "
},
{
"code": null,
"e": 2648,
"s": 2639,
"text": "Solidity"
},
{
"code": "string public yourName ;",
"e": 2673,
"s": 2648,
"text": null
},
{
"code": null,
"e": 2834,
"s": 2673,
"text": "We just defined a single variable yourName of type string, also yourName is a public modifier which means that we can access it from outside the smart contract."
},
{
"code": null,
"e": 2912,
"s": 2834,
"text": "Add the following constructor on the next line after string public yourName ;"
},
{
"code": null,
"e": 2990,
"s": 2912,
"text": "Add the following constructor on the next line after string public yourName ;"
},
{
"code": null,
"e": 2999,
"s": 2990,
"text": "Solidity"
},
{
"code": "constructor() public { yourName = \"Unknown\" ;}",
"e": 3053,
"s": 2999,
"text": null
},
{
"code": null,
"e": 3246,
"s": 3053,
"text": "Constructor in solidity executed only once, when a contract is created and it is used to initialize contract state. Here we’re just setting the initial value of variable yourName to “Unknown”."
},
{
"code": null,
"e": 3346,
"s": 3246,
"text": "Add the following function to the smart contract after the constructor declaration we set up above."
},
{
"code": null,
"e": 3446,
"s": 3346,
"text": "Add the following function to the smart contract after the constructor declaration we set up above."
},
{
"code": null,
"e": 3455,
"s": 3446,
"text": "Solidity"
},
{
"code": "function setName(string memory nm) public{ yourName = nm ;}",
"e": 3522,
"s": 3455,
"text": null
},
{
"code": null,
"e": 3619,
"s": 3522,
"text": "In the above function, we’ll be taking in an nm(string) and setting the yourName variable to it."
},
{
"code": null,
"e": 3648,
"s": 3619,
"text": "Memory is the data location."
},
{
"code": null,
"e": 3784,
"s": 3648,
"text": "In a terminal, make sure you are in the root of the directory that contains the flutter and truffle project, Run the following command:"
},
{
"code": null,
"e": 3920,
"s": 3784,
"text": "In a terminal, make sure you are in the root of the directory that contains the flutter and truffle project, Run the following command:"
},
{
"code": null,
"e": 3936,
"s": 3920,
"text": "truffle compile"
},
{
"code": null,
"e": 3984,
"s": 3936,
"text": "You should see output similar to the following:"
},
{
"code": null,
"e": 4000,
"s": 3984,
"text": "truffle compile"
},
{
"code": null,
"e": 4264,
"s": 4000,
"text": "You’ll see one JavaScript file already in the migrations/ directory: 1_initial_migration.js. This handles deploying the Migrations.sol contract to observe subsequent smart contract migrations, and ensures we don’t double-migrate unchanged contracts in the future."
},
{
"code": null,
"e": 4304,
"s": 4264,
"text": "Let’s create our own migration script :"
},
{
"code": null,
"e": 4440,
"s": 4304,
"text": "Create a new file named 2_deploy_contracts.js in the migrations/ directory.Add the following content to the 2_deploy_contracts.js file:"
},
{
"code": null,
"e": 4516,
"s": 4440,
"text": "Create a new file named 2_deploy_contracts.js in the migrations/ directory."
},
{
"code": null,
"e": 4577,
"s": 4516,
"text": "Add the following content to the 2_deploy_contracts.js file:"
},
{
"code": null,
"e": 4588,
"s": 4577,
"text": "Javascript"
},
{
"code": "const HelloWorld = artifacts.require(\"HelloWorld\"); module.exports = function (deployer) { deployer.deploy(HelloWorld);};",
"e": 4712,
"s": 4588,
"text": null
},
{
"code": null,
"e": 5123,
"s": 4712,
"text": "Before we can migrate our contract to the blockchain, we need to have a blockchain running. For this article, we’re going to use Ganache, a personal blockchain for Ethereum development you can use to deploy contracts, develop applications, and run tests. If you haven’t already, download Ganache and double-click the icon to launch the application. This will generate a blockchain running locally on port 7545."
},
{
"code": null,
"e": 5131,
"s": 5123,
"text": "Ganache"
},
{
"code": null,
"e": 5188,
"s": 5131,
"text": "Add the following content to the truffle-config.js file:"
},
{
"code": null,
"e": 5199,
"s": 5188,
"text": "Javascript"
},
{
"code": "module.exports = { networks: { development: { host: \"127.0.0.1\", // Localhost (default: none) port: 7545, // Standard Ethereum port (default: none) network_id: \"*\", // Any network (default: none) }, }, contracts_build_directory: \"./src/artifacts/\", // Configure your compilers compilers: { solc: { // See the solidity docs for advice // about optimization and evmVersion optimizer: { enabled: false, runs: 200 }, evmVersion: \"byzantium\" } }};",
"e": 5767,
"s": 5199,
"text": null
},
{
"code": null,
"e": 5814,
"s": 5767,
"text": "Migrating the contract to the blockchain, run:"
},
{
"code": null,
"e": 5830,
"s": 5814,
"text": "truffle migrate"
},
{
"code": null,
"e": 5878,
"s": 5830,
"text": "You should see output similar to the following:"
},
{
"code": null,
"e": 5894,
"s": 5878,
"text": "truffle migrate"
},
{
"code": null,
"e": 6028,
"s": 5894,
"text": "Take a look into the Ganache, the first account originally had 100 ether, it is now lower, due to the transaction costs of migration."
},
{
"code": null,
"e": 6187,
"s": 6028,
"text": "In Truffle, we can write tests either in JavaScript or Solidity, In this article, we’ll be writing our tests in Javascript using the Chai and Mocha libraries."
},
{
"code": null,
"e": 6301,
"s": 6187,
"text": "Create a new file named helloWorld.js in the test/ directory.Add the following content to the helloWorld.js file:"
},
{
"code": null,
"e": 6363,
"s": 6301,
"text": "Create a new file named helloWorld.js in the test/ directory."
},
{
"code": null,
"e": 6416,
"s": 6363,
"text": "Add the following content to the helloWorld.js file:"
},
{
"code": null,
"e": 6427,
"s": 6416,
"text": "Javascript"
},
{
"code": "const HelloWorld = artifacts.require(\"HelloWorld\") ; contract(\"HelloWorld\" , () => { it(\"Hello World Testing\" , async () => { const helloWorld = await HelloWorld.deployed() ; await helloWorld.setName(\"User Name\") ; const result = await helloWorld.yourName() ; assert(result === \"User Name\") ; });});",
"e": 6758,
"s": 6427,
"text": null
},
{
"code": null,
"e": 6886,
"s": 6758,
"text": "HelloWorld: The smart contract we want to test, We begin our test by importing our HelloWorld contract using artifacts.require."
},
{
"code": null,
"e": 6970,
"s": 6886,
"text": "To test the setName function, recall that it accepts a name(string) as an argument."
},
{
"code": null,
"e": 7096,
"s": 6970,
"text": "Also, the yourName variable in our contract is using public modifier, which we can use as a getter from the outside function."
},
{
"code": null,
"e": 7282,
"s": 7096,
"text": "Truffle imports Chai so we can use the assert function. We pass the actual value and the expected value, To check that the name is set properly or not, assert(result === “User Name”) ;."
},
{
"code": null,
"e": 7303,
"s": 7282,
"text": "Running the test as:"
},
{
"code": null,
"e": 7316,
"s": 7303,
"text": "truffle test"
},
{
"code": null,
"e": 7385,
"s": 7316,
"text": "If all the test pass, you’ll see the console output similar to this:"
},
{
"code": null,
"e": 7398,
"s": 7385,
"text": "truffle test"
},
{
"code": null,
"e": 7455,
"s": 7398,
"text": "In the pubspec.yaml file import the following packages :"
},
{
"code": null,
"e": 7530,
"s": 7455,
"text": "provider: ^4.3.3\nweb3dart: ^1.2.3\nhttp: ^0.12.2\nweb_socket_channel: ^1.2.0"
},
{
"code": null,
"e": 7672,
"s": 7530,
"text": "Also, add the asset src/artifacts/HelloWorld.json to pubspec.yaml file which is generated by truffle-config.js while we migrate our contract."
},
{
"code": null,
"e": 7716,
"s": 7672,
"text": "assets:\n - src/artifacts/HelloWorld.json"
},
{
"code": null,
"e": 7823,
"s": 7716,
"text": "Create a new file named contract_linking.dart in the lib/ directory.Add the following content to the file:"
},
{
"code": null,
"e": 7892,
"s": 7823,
"text": "Create a new file named contract_linking.dart in the lib/ directory."
},
{
"code": null,
"e": 7931,
"s": 7892,
"text": "Add the following content to the file:"
},
{
"code": null,
"e": 7936,
"s": 7931,
"text": "Dart"
},
{
"code": "import 'package:flutter/material.dart'; class ContractLinking extends ChangeNotifier { }",
"e": 8029,
"s": 7936,
"text": null
},
{
"code": null,
"e": 8091,
"s": 8029,
"text": "Just a simple class with ChangeNotifier for state management."
},
{
"code": null,
"e": 8189,
"s": 8091,
"text": "Add the following variable on the next line after class ContractLinking extends ChangeNotifier {."
},
{
"code": null,
"e": 8194,
"s": 8189,
"text": "Dart"
},
{
"code": "final String _rpcUrl = \"http://10.0.2.2:7545\";final String _wsUrl = \"ws://10.0.2.2:7545/\";final String _privateKey = \"Enter Private Key\";",
"e": 8332,
"s": 8194,
"text": null
},
{
"code": null,
"e": 8540,
"s": 8332,
"text": "The library web3dart won’t send signed transactions to miners itself. Instead, it relies on an RPC client to do that. For the WebSocket URL just modify the RPC URL. You can get the RPC URL from the ganache :"
},
{
"code": null,
"e": 8558,
"s": 8540,
"text": "Ganache – RPC url"
},
{
"code": null,
"e": 8592,
"s": 8558,
"text": "Get the Private Key from ganache:"
},
{
"code": null,
"e": 8614,
"s": 8592,
"text": "Ganache – Private Key"
},
{
"code": null,
"e": 8654,
"s": 8614,
"text": "Declare the following variables below :"
},
{
"code": null,
"e": 8659,
"s": 8654,
"text": "Dart"
},
{
"code": "Web3Client _client;bool isLoading = true; String _abiCode;EthereumAddress _contractAddress; Credentials _credentials; DeployedContract _contract;ContractFunction _yourName;ContractFunction _setName; String deployedName;",
"e": 8883,
"s": 8659,
"text": null
},
{
"code": null,
"e": 9546,
"s": 8883,
"text": "_client variable will be used to establish a connection to the ethereum rpc node with the help of WebSocket.isLoading variable will be used to check the state of the contract._abiCode variable will be used to, read the contract abi._contractAddress variable will be used to store the contract address of the deployed smart contract._credentials variable will store the credentials of the smart contract deployer._contract variable will be used to tell Web3dart where our contract is declared._yourName and _setName variable will be used to store the functions declared in our HelloWorld.sol smart contract.deployedName will hold the name from the smart contract."
},
{
"code": null,
"e": 9655,
"s": 9546,
"text": "_client variable will be used to establish a connection to the ethereum rpc node with the help of WebSocket."
},
{
"code": null,
"e": 9723,
"s": 9655,
"text": "isLoading variable will be used to check the state of the contract."
},
{
"code": null,
"e": 9781,
"s": 9723,
"text": "_abiCode variable will be used to, read the contract abi."
},
{
"code": null,
"e": 9882,
"s": 9781,
"text": "_contractAddress variable will be used to store the contract address of the deployed smart contract."
},
{
"code": null,
"e": 9963,
"s": 9882,
"text": "_credentials variable will store the credentials of the smart contract deployer."
},
{
"code": null,
"e": 10044,
"s": 9963,
"text": "_contract variable will be used to tell Web3dart where our contract is declared."
},
{
"code": null,
"e": 10159,
"s": 10044,
"text": "_yourName and _setName variable will be used to store the functions declared in our HelloWorld.sol smart contract."
},
{
"code": null,
"e": 10216,
"s": 10159,
"text": "deployedName will hold the name from the smart contract."
},
{
"code": null,
"e": 10297,
"s": 10216,
"text": "After declaring the above variable’s, Declare the following functions below it :"
},
{
"code": null,
"e": 10302,
"s": 10297,
"text": "Dart"
},
{
"code": "ContractLinking() { initialSetup(); } initialSetup() async { // establish a connection to the ethereum rpc node. The socketConnector // property allows more efficient event streams over websocket instead of // http-polls. However, the socketConnector property is experimental. _client = Web3Client(_rpcUrl, Client(), socketConnector: () { return IOWebSocketChannel.connect(_wsUrl).cast<String>(); }); await getAbi(); await getCredentials(); await getDeployedContract(); } Future<void> getAbi() async { // Reading the contract abi String abiStringFile = await rootBundle.loadString(\"src/artifacts/HelloWorld.json\"); var jsonAbi = jsonDecode(abiStringFile); _abiCode = jsonEncode(jsonAbi[\"abi\"]); _contractAddress = EthereumAddress.fromHex(jsonAbi[\"networks\"][\"5777\"][\"address\"]); } Future<void> getCredentials() async { _credentials = await _client.credentialsFromPrivateKey(_privateKey); } Future<void> getDeployedContract() async { // Telling Web3dart where our contract is declared. _contract = DeployedContract( ContractAbi.fromJson(_abiCode, \"HelloWorld\"), _contractAddress); // Extracting the functions, declared in contract. _yourName = _contract.function(\"yourName\"); _setName = _contract.function(\"setName\"); getName(); } getName() async { // Getting the current name declared in the smart contract. var currentName = await _client .call(contract: _contract, function: _yourName, params: []); deployedName = currentName[0]; isLoading = false; notifyListeners(); } setName(String nameToSet) async { // Setting the name to nameToSet(name defined by user) isLoading = true; notifyListeners(); await _client.sendTransaction( _credentials, Transaction.callContract( contract: _contract, function: _setName, parameters: [nameToSet])); getName(); }",
"e": 12264,
"s": 10302,
"text": null
},
{
"code": null,
"e": 12362,
"s": 12264,
"text": "Create a new file named helloUI.dart in the lib/ directory.Add the following content to the file:"
},
{
"code": null,
"e": 12422,
"s": 12362,
"text": "Create a new file named helloUI.dart in the lib/ directory."
},
{
"code": null,
"e": 12461,
"s": 12422,
"text": "Add the following content to the file:"
},
{
"code": null,
"e": 12466,
"s": 12461,
"text": "Dart"
},
{
"code": "import 'package:flutter/material.dart';import 'package:hello_world/contract_linking.dart';import 'package:provider/provider.dart'; class HelloUI extends StatelessWidget { @override Widget build(BuildContext context) { // Getting the value and object or contract_linking var contractLink = Provider.of<ContractLinking>(context); TextEditingController yourNameController = TextEditingController(); return Scaffold( appBar: AppBar( title: Text(\"Hello World !\"), centerTitle: true, ), body: Container( padding: EdgeInsets.symmetric(horizontal: 20), child: Center( child: contractLink.isLoading ? CircularProgressIndicator() : SingleChildScrollView( child: Form( child: Column( children: [ Row( mainAxisAlignment: MainAxisAlignment.center, children: [ Text( \"Hello \", style: TextStyle( fontWeight: FontWeight.bold, fontSize: 52), ), Text( contractLink.deployedName, style: TextStyle( fontWeight: FontWeight.bold, fontSize: 52, color: Colors.tealAccent), ), ], ), Padding( padding: EdgeInsets.only(top: 29), child: TextFormField( controller: yourNameController, decoration: InputDecoration( border: OutlineInputBorder(), labelText: \"Your Name\", hintText: \"What is your name ?\", icon: Icon(Icons.drive_file_rename_outline)), ), ), Padding( padding: EdgeInsets.only(top: 30), child: ElevatedButton( child: Text( 'Set Name', style: TextStyle(fontSize: 30), ), style: ElevatedButton.styleFrom( primary: Colors.green, ), onPressed: () { contractLink.setName(yourNameController.text); yourNameController.clear(); }, ), ) ], ), ), ), ), ), ); }}",
"e": 15145,
"s": 12466,
"text": null
},
{
"code": null,
"e": 15171,
"s": 15145,
"text": "Update the main.dart as :"
},
{
"code": null,
"e": 15176,
"s": 15171,
"text": "Dart"
},
{
"code": "import 'package:flutter/material.dart';import 'package:hello_world/contract_linking.dart';import 'package:hello_world/helloUI.dart';import 'package:provider/provider.dart'; void main() { runApp(MyApp());} class MyApp extends StatelessWidget { @override Widget build(BuildContext context) { // Inserting Provider as a parent of HelloUI() return ChangeNotifierProvider<ContractLinking>( create: (_) => ContractLinking(), child: MaterialApp( title: \"Hello World\", theme: ThemeData( brightness: Brightness.dark, primaryColor: Colors.cyan[400], accentColor: Colors.deepOrange[200]), home: HelloUI(), ), ); }}",
"e": 15870,
"s": 15176,
"text": null
},
{
"code": null,
"e": 15903,
"s": 15870,
"text": "Now we’re ready to use our dapp!"
},
{
"code": null,
"e": 15933,
"s": 15903,
"text": "Just RUN the Flutter Project."
},
{
"code": null,
"e": 15950,
"s": 15933,
"text": "Hello World Dapp"
},
{
"code": null,
"e": 16058,
"s": 15950,
"text": "As you can see the Hello Unknown, in the UI is actually coming from the smart contract, variable yourName ."
},
{
"code": null,
"e": 16293,
"s": 16058,
"text": "When you type your name into the TextFormField and Press `Set Name` ElevatedButton, it will invoke the setName function from contract_linking.dart which will directly invoke the setName function of our smart contract (HelloWorld.sol)."
},
{
"code": null,
"e": 16310,
"s": 16293,
"text": "Hello World Dapp"
},
{
"code": null,
"e": 16327,
"s": 16310,
"text": "Hello World Dapp"
},
{
"code": null,
"e": 16344,
"s": 16327,
"text": "Hello World Dapp"
},
{
"code": null,
"e": 16529,
"s": 16344,
"text": "Congratulations! You have taken a huge step to become a full-fledged mobile dapp developer. For developing locally, you have all the tools you need to start making more advanced dapps."
},
{
"code": null,
"e": 16607,
"s": 16529,
"text": "If you stuck somewhere, do check out the GitHub repository for complete code."
},
{
"code": null,
"e": 16618,
"s": 16607,
"text": "Blockchain"
},
{
"code": null,
"e": 16623,
"s": 16618,
"text": "Dart"
},
{
"code": null,
"e": 16631,
"s": 16623,
"text": "Flutter"
},
{
"code": null,
"e": 16640,
"s": 16631,
"text": "Solidity"
}
]
|
How to align Image in HTML? | 21 Jul, 2021
Image alignment is used to move the image at different locations (top, bottom, right, left, middle) in our web pages. We use <img> align attribute to align the image. It is an inline element.
Syntax:
<img align=”left|right|middle|top|bottom”>
Attribute Values:
left: It is used for the alignment of image to the left.
right: It is used for the alignment of image to the right.
middle: It is used for the alignment of image to the middle.
top: It is used for the alignment of image to the top.
bottom: It is used for the alignment of image to the bottom.
To align the image to the left use attribute value as “left”.
Syntax:
<img align=”left”>
Example :
HTML
<!DOCTYPE html><html lang="en"><head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Left Alignment of Image</title></head><body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Left Alignment of Image</h4> <!-- Keep align attribute value as left --> <img align="left" src="https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png" alt=""></body></html>
Output:
To align the image to the right use attribute value as “right”.
Syntax:
<img align=”right”>
Example:
HTML
<!DOCTYPE html><html lang="en"><head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Right Alignment of Image</title></head><body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Right Alignment of Image</h4> <!-- Keep align attribute value as right --> <img align="right" src="https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png" alt=""></body></html>
Output:
To align the image to the right use attribute value as “middle”.
Syntax:
<img align=”middle”>
Example:
HTML
<!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Image Alignment</title> </head> <body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Middle Alignment of Image</h4> <!-- Keep align attribute value as "middle" --> <h4>GeeksforGeeks <img align="middle" src="https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png" alt="">GeeksforGeeks</h4></body> </html>
Output:
To align the image to the right use attribute value as “top”.
Syntax:
<img align=”top”>
Example:
HTML
<!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Image Alignment</title> </head> <body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Top Alignment of Image</h4> <!-- Keep align attribute value as "top" --> <h4>GeeksforGeeks <img align="top" src="https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png" alt="">GeeksforGeeks</h4></body> </html>
Output:
To align the image to the right use attribute value as “bottom”.
Syntax:
<img align=”bottom”>
Example:
HTML
<!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>Image Alignment</title> </head> <body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Bottom Alignment of Image</h4> <!-- Keep align attribute value as "bottom" --> <h4>GeeksforGeeks <img align="bottom" src="https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png" alt="">GeeksforGeeks</h4> </body> </html>
Output:
Browser Support:
anikaseth98
Picked
class 7
School Learning
School Programming
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Computer Security - Overview
Backup and Restore
How to Insert an Image in HTML?
What is Photoshop?
What is Hacking? Definition, Types, Identification, Safety
Libraries in Python
What are Different Output Devices?
Generations of Computers - Computer Fundamentals
GeeksforGeeks School
Reading Rows from a CSV File in Python | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n21 Jul, 2021"
},
{
"code": null,
"e": 220,
"s": 28,
"text": "Image alignment is used to move the image at different locations (top, bottom, right, left, middle) in our web pages. We use <img> align attribute to align the image. It is an inline element."
},
{
"code": null,
"e": 229,
"s": 220,
"text": "Syntax: "
},
{
"code": null,
"e": 272,
"s": 229,
"text": "<img align=”left|right|middle|top|bottom”>"
},
{
"code": null,
"e": 290,
"s": 272,
"text": "Attribute Values:"
},
{
"code": null,
"e": 347,
"s": 290,
"text": "left: It is used for the alignment of image to the left."
},
{
"code": null,
"e": 406,
"s": 347,
"text": "right: It is used for the alignment of image to the right."
},
{
"code": null,
"e": 467,
"s": 406,
"text": "middle: It is used for the alignment of image to the middle."
},
{
"code": null,
"e": 522,
"s": 467,
"text": "top: It is used for the alignment of image to the top."
},
{
"code": null,
"e": 583,
"s": 522,
"text": "bottom: It is used for the alignment of image to the bottom."
},
{
"code": null,
"e": 645,
"s": 583,
"text": "To align the image to the left use attribute value as “left”."
},
{
"code": null,
"e": 653,
"s": 645,
"text": "Syntax:"
},
{
"code": null,
"e": 672,
"s": 653,
"text": "<img align=”left”>"
},
{
"code": null,
"e": 683,
"s": 672,
"text": "Example : "
},
{
"code": null,
"e": 688,
"s": 683,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"><head> <meta charset=\"UTF-8\"> <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\"> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> <title>Left Alignment of Image</title></head><body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Left Alignment of Image</h4> <!-- Keep align attribute value as left --> <img align=\"left\" src=\"https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png\" alt=\"\"></body></html>",
"e": 1236,
"s": 688,
"text": null
},
{
"code": null,
"e": 1247,
"s": 1236,
"text": " Output: "
},
{
"code": null,
"e": 1311,
"s": 1247,
"text": "To align the image to the right use attribute value as “right”."
},
{
"code": null,
"e": 1321,
"s": 1311,
"text": " Syntax: "
},
{
"code": null,
"e": 1342,
"s": 1321,
"text": "<img align=”right”> "
},
{
"code": null,
"e": 1353,
"s": 1342,
"text": "Example: "
},
{
"code": null,
"e": 1358,
"s": 1353,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"><head> <meta charset=\"UTF-8\"> <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\"> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> <title>Right Alignment of Image</title></head><body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Right Alignment of Image</h4> <!-- Keep align attribute value as right --> <img align=\"right\" src=\"https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png\" alt=\"\"></body></html>",
"e": 1911,
"s": 1358,
"text": null
},
{
"code": null,
"e": 1921,
"s": 1911,
"text": " Output:"
},
{
"code": null,
"e": 1986,
"s": 1921,
"text": "To align the image to the right use attribute value as “middle”."
},
{
"code": null,
"e": 1996,
"s": 1986,
"text": " Syntax: "
},
{
"code": null,
"e": 2017,
"s": 1996,
"text": "<img align=”middle”>"
},
{
"code": null,
"e": 2028,
"s": 2017,
"text": " Example: "
},
{
"code": null,
"e": 2033,
"s": 2028,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\"> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> <title>Image Alignment</title> </head> <body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Middle Alignment of Image</h4> <!-- Keep align attribute value as \"middle\" --> <h4>GeeksforGeeks <img align=\"middle\" src=\"https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png\" alt=\"\">GeeksforGeeks</h4></body> </html>",
"e": 2598,
"s": 2033,
"text": null
},
{
"code": null,
"e": 2608,
"s": 2598,
"text": " Output:"
},
{
"code": null,
"e": 2670,
"s": 2608,
"text": "To align the image to the right use attribute value as “top”."
},
{
"code": null,
"e": 2680,
"s": 2670,
"text": " Syntax: "
},
{
"code": null,
"e": 2698,
"s": 2680,
"text": "<img align=”top”>"
},
{
"code": null,
"e": 2708,
"s": 2698,
"text": "Example: "
},
{
"code": null,
"e": 2713,
"s": 2708,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\"> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> <title>Image Alignment</title> </head> <body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Top Alignment of Image</h4> <!-- Keep align attribute value as \"top\" --> <h4>GeeksforGeeks <img align=\"top\" src=\"https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png\" alt=\"\">GeeksforGeeks</h4></body> </html>",
"e": 3269,
"s": 2713,
"text": null
},
{
"code": null,
"e": 3280,
"s": 3269,
"text": " Output: "
},
{
"code": null,
"e": 3346,
"s": 3280,
"text": " To align the image to the right use attribute value as “bottom”."
},
{
"code": null,
"e": 3356,
"s": 3346,
"text": " Syntax: "
},
{
"code": null,
"e": 3377,
"s": 3356,
"text": "<img align=”bottom”>"
},
{
"code": null,
"e": 3388,
"s": 3377,
"text": " Example: "
},
{
"code": null,
"e": 3393,
"s": 3388,
"text": "HTML"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\"> <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\"> <title>Image Alignment</title> </head> <body> <h1>GeeksforGeeks</h1> <h3>Welcome to GeeksforGeeks</h3> <h4>Bottom Alignment of Image</h4> <!-- Keep align attribute value as \"bottom\" --> <h4>GeeksforGeeks <img align=\"bottom\" src=\"https://media.geeksforgeeks.org/wp- content/uploads/20190506164011/logo3.png\" alt=\"\">GeeksforGeeks</h4> </body> </html>",
"e": 3959,
"s": 3393,
"text": null
},
{
"code": null,
"e": 3969,
"s": 3959,
"text": " Output:"
},
{
"code": null,
"e": 3987,
"s": 3969,
"text": " Browser Support:"
},
{
"code": null,
"e": 4001,
"s": 3989,
"text": "anikaseth98"
},
{
"code": null,
"e": 4008,
"s": 4001,
"text": "Picked"
},
{
"code": null,
"e": 4016,
"s": 4008,
"text": "class 7"
},
{
"code": null,
"e": 4032,
"s": 4016,
"text": "School Learning"
},
{
"code": null,
"e": 4051,
"s": 4032,
"text": "School Programming"
},
{
"code": null,
"e": 4149,
"s": 4051,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 4178,
"s": 4149,
"text": "Computer Security - Overview"
},
{
"code": null,
"e": 4197,
"s": 4178,
"text": "Backup and Restore"
},
{
"code": null,
"e": 4229,
"s": 4197,
"text": "How to Insert an Image in HTML?"
},
{
"code": null,
"e": 4248,
"s": 4229,
"text": "What is Photoshop?"
},
{
"code": null,
"e": 4307,
"s": 4248,
"text": "What is Hacking? Definition, Types, Identification, Safety"
},
{
"code": null,
"e": 4327,
"s": 4307,
"text": "Libraries in Python"
},
{
"code": null,
"e": 4362,
"s": 4327,
"text": "What are Different Output Devices?"
},
{
"code": null,
"e": 4411,
"s": 4362,
"text": "Generations of Computers - Computer Fundamentals"
},
{
"code": null,
"e": 4432,
"s": 4411,
"text": "GeeksforGeeks School"
}
]
|
Python – Merge two Pandas DataFrame | To merge two Pandas DataFrame, use the merge() function. Just set both the DataFrames as a parameter of the merge() function.
At first, let us import the required library with alias “pd” −
import pandas as pd
Create the 1st DataFrame −
# Create DataFrame1
dataFrame1 = pd.DataFrame(
{
"Car": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Bentley', 'Jaguar'],"Units": [100, 150, 110, 80, 110, 90]
}
)
Next, create the 2nd DataFrame −
# Create DataFrame2
dataFrame2 = pd.DataFrame(
{
"Car": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Mercedes', 'Jaguar'],"Reg_Price": [7000, 1500, 5000, 8000, 9000, 6000]
}
)
Now, merge both the DataFrames using the merge() function −
mergedRes = pd.merge(dataFrame1, dataFrame2)
Following is the code −
import pandas as pd
# Create DataFrame1
dataFrame1 = pd.DataFrame(
{
"Car": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Bentley', 'Jaguar'],"Units": [100, 150, 110, 80, 110, 90]
}
)
print"DataFrame1 ...\n",dataFrame1
# Create DataFrame2
dataFrame2 = pd.DataFrame(
{
"Car": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Mercedes', 'Jaguar'],"Reg_Price": [7000, 1500, 5000, 8000, 9000, 6000]
}
)
print"\nDataFrame2 ...\n",dataFrame2
# merge DataFrames
mergedRes = pd.merge(dataFrame1, dataFrame2)
print"\nMerged data frame...\n", mergedRes
This will produce the following output −
DataFrame1 ...
Car Units
0 BMW 100
1 Lexus 150
2 Audi 110
3 Mustang 80
4 Bentley 110
5 Jaguar 90
DataFrame2 ...
Car Reg_Price
0 BMW 7000
1 Lexus 1500
2 Audi 5000
3 Mustang 8000
4 Mercedes 9000
5 Jaguar 6000
Merged data frame...
Car Units Reg_Price
0 BMW 100 7000
1 Lexus 150 1500
2 Audi 110 5000
3 Mustang 80 8000
4 Jaguar 90 6000 | [
{
"code": null,
"e": 1313,
"s": 1187,
"text": "To merge two Pandas DataFrame, use the merge() function. Just set both the DataFrames as a parameter of the merge() function."
},
{
"code": null,
"e": 1376,
"s": 1313,
"text": "At first, let us import the required library with alias “pd” −"
},
{
"code": null,
"e": 1396,
"s": 1376,
"text": "import pandas as pd"
},
{
"code": null,
"e": 1423,
"s": 1396,
"text": "Create the 1st DataFrame −"
},
{
"code": null,
"e": 1590,
"s": 1423,
"text": "# Create DataFrame1\ndataFrame1 = pd.DataFrame(\n {\n \"Car\": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Bentley', 'Jaguar'],\"Units\": [100, 150, 110, 80, 110, 90]\n }\n)"
},
{
"code": null,
"e": 1623,
"s": 1590,
"text": "Next, create the 2nd DataFrame −"
},
{
"code": null,
"e": 1804,
"s": 1623,
"text": "# Create DataFrame2\ndataFrame2 = pd.DataFrame(\n {\n \"Car\": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Mercedes', 'Jaguar'],\"Reg_Price\": [7000, 1500, 5000, 8000, 9000, 6000]\n\n }\n)"
},
{
"code": null,
"e": 1864,
"s": 1804,
"text": "Now, merge both the DataFrames using the merge() function −"
},
{
"code": null,
"e": 1910,
"s": 1864,
"text": "mergedRes = pd.merge(dataFrame1, dataFrame2)\n"
},
{
"code": null,
"e": 1934,
"s": 1910,
"text": "Following is the code −"
},
{
"code": null,
"e": 2486,
"s": 1934,
"text": "import pandas as pd\n\n# Create DataFrame1\ndataFrame1 = pd.DataFrame(\n {\n \"Car\": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Bentley', 'Jaguar'],\"Units\": [100, 150, 110, 80, 110, 90]\n }\n)\n\nprint\"DataFrame1 ...\\n\",dataFrame1\n\n# Create DataFrame2\ndataFrame2 = pd.DataFrame(\n {\n \"Car\": ['BMW', 'Lexus', 'Audi', 'Mustang', 'Mercedes', 'Jaguar'],\"Reg_Price\": [7000, 1500, 5000, 8000, 9000, 6000]\n\n }\n)\n\nprint\"\\nDataFrame2 ...\\n\",dataFrame2\n\n# merge DataFrames\nmergedRes = pd.merge(dataFrame1, dataFrame2)\nprint\"\\nMerged data frame...\\n\", mergedRes"
},
{
"code": null,
"e": 2527,
"s": 2486,
"text": "This will produce the following output −"
},
{
"code": null,
"e": 3067,
"s": 2527,
"text": "DataFrame1 ...\n Car Units\n0 BMW 100\n1 Lexus 150\n2 Audi 110\n3 Mustang 80\n4 Bentley 110\n5 Jaguar 90\n\nDataFrame2 ...\n Car Reg_Price\n0 BMW 7000\n1 Lexus 1500\n2 Audi 5000\n3 Mustang 8000\n4 Mercedes 9000\n5 Jaguar 6000\n\nMerged data frame...\n Car Units Reg_Price\n0 BMW 100 7000\n1 Lexus 150 1500\n2 Audi 110 5000\n3 Mustang 80 8000\n4 Jaguar 90 6000"
}
]
|
Unified Modeling Language (UML) | Class Diagrams | 17 Feb, 2021
What is UML? It is the general-purpose modeling language used to visualize the system. It is a graphical language that is standard to the software industry for specifying, visualizing, constructing, and documenting the artifacts of the software systems, as well as for business modeling.Benefits of UML:
Simplifies complex software design, can also implement OOPs like a concept that is widely used.
It reduces thousands of words of explanation in a few graphical diagrams that may reduce time consumption to understand.
It makes communication more clear and more real.
It helps to acquire the entire system in a view.
It becomes very much easy for the software programmer to implement the actual demand once they have a clear picture of the problem.
Types of UML: The UML diagrams are divided into two parts: Structural UML diagrams and Behavioral UML diagrams which are listed below:
Structural UML diagrams Class diagramPackage diagramObject diagramComponent diagramComposite structure diagramDeployment diagramBehavioral UML diagrams Activity diagramSequence diagramUse case diagramState diagramCommunication diagramInteraction overview diagramTiming diagram
Structural UML diagrams Class diagramPackage diagramObject diagramComponent diagramComposite structure diagramDeployment diagram
Class diagram
Package diagram
Object diagram
Component diagram
Composite structure diagram
Deployment diagram
Behavioral UML diagrams Activity diagramSequence diagramUse case diagramState diagramCommunication diagramInteraction overview diagramTiming diagram
Activity diagram
Sequence diagram
Use case diagram
State diagram
Communication diagram
Interaction overview diagram
Timing diagram
UML class diagrams: Class diagrams are the main building blocks of every object-oriented method. The class diagram can be used to show the classes, relationships, interface, association, and collaboration. UML is standardized in class diagrams. Since classes are the building block of an application that is based on OOPs, so as the class diagram has an appropriate structure to represent the classes, inheritance, relationships, and everything that OOPs have in their context. It describes various kinds of objects and the static relationship between them. The main purpose to use class diagrams are:
This is the only UML that can appropriately depict various aspects of the OOPs concept.
Proper design and analysis of applications can be faster and efficient.
It is the base for deployment and component diagram.
There are several software available that can be used online and offline to draw these diagrams Like Edraw max, lucid chart, etc. There are several points to be kept in focus while drawing the class diagram. These can be said as its syntax:
Each class is represented by a rectangle having a subdivision of three compartments name, attributes, and operation.
There are three types of modifiers that are used to decide the visibility of attributes and operations. + is used for public visibility(for everyone)# is used for protected visibility (for friend and derived)– is used for private visibility (for only me)
+ is used for public visibility(for everyone)
# is used for protected visibility (for friend and derived)
– is used for private visibility (for only me)
Below is the example of Animal class (parent) having two child class as dog and cat both have object d1, c1 inheriting properties of the parent class.
Where to Use Class Diagrams?
The class diagram is also considered as the foundation for component and deployment diagrams. Class diagrams are not only used to visualize the static view of the system but are also used to construct the executable code for forward and reverse engineering of any system.
The class diagram clearly shows the mapping with object-oriented languages such as Java, C++, etc. From practical experience, a class diagram is generally used for construction purpose.
In a nutshell, it can be said, class diagrams are used for −
Describing the static view of the system.Showing the collaboration among the elements of the static view.Describing the functionalities performed by the system.Construction of software applications using object-oriented languages.
Describing the static view of the system.
Showing the collaboration among the elements of the static view.
Describing the functionalities performed by the system.
Construction of software applications using object-oriented languages.
Java
import java.io.*; class GFG { public static void main(String[] args) { dog d1 = new dog(); d1.bark(); d1.run(); cat c1 = new cat(); c1.meww(); }} class Animal { public void run() { String name; String colour; System.out.println("animal is running"); }} class dog extends Animal { public void bark() { System.out.println("wooh!wooh! dog is barking"); } public void run() { System.out.println("dog is running"); }} class cat extends Animal { public void meww() { System.out.println("meww! meww!"); }}
The process to design class diagram: In Edraw max (or any other platform where class diagrams can be drawn) follow the steps:
Open a blank document in the class diagram section.
From the library select the class diagram and click on create option.
Prepare the model of the class on the opened template page.
After editing according to requirement save it.
There are several diagram components that can be efficiently used while making/editing the model. These are as follows:
Class { name, attribute, method}
Objects
Interface
Relationships {inheritance, association, generalization}
Associations {bidirectional, unidirectional}
Class diagrams are one of the most widely used diagrams in the fields of software engineering as well as business modeling.The exact meaning of the arrows :
kushagrabansal8755
harshittiwari2020
Picked
UML
Design Pattern
UML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Factory method design pattern in Java
Builder Design Pattern
Java Singleton Design Pattern Practices with Examples
Introduction of Programming Paradigms
Adapter Pattern
MVC Design Pattern
How to prevent Singleton Pattern from Reflection, Serialization and Cloning?
Abstract Factory Pattern
Monolithic vs Microservices architecture
Observer Pattern | Set 1 (Introduction) | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n17 Feb, 2021"
},
{
"code": null,
"e": 357,
"s": 52,
"text": "What is UML? It is the general-purpose modeling language used to visualize the system. It is a graphical language that is standard to the software industry for specifying, visualizing, constructing, and documenting the artifacts of the software systems, as well as for business modeling.Benefits of UML: "
},
{
"code": null,
"e": 453,
"s": 357,
"text": "Simplifies complex software design, can also implement OOPs like a concept that is widely used."
},
{
"code": null,
"e": 574,
"s": 453,
"text": "It reduces thousands of words of explanation in a few graphical diagrams that may reduce time consumption to understand."
},
{
"code": null,
"e": 623,
"s": 574,
"text": "It makes communication more clear and more real."
},
{
"code": null,
"e": 672,
"s": 623,
"text": "It helps to acquire the entire system in a view."
},
{
"code": null,
"e": 804,
"s": 672,
"text": "It becomes very much easy for the software programmer to implement the actual demand once they have a clear picture of the problem."
},
{
"code": null,
"e": 941,
"s": 804,
"text": "Types of UML: The UML diagrams are divided into two parts: Structural UML diagrams and Behavioral UML diagrams which are listed below: "
},
{
"code": null,
"e": 1218,
"s": 941,
"text": "Structural UML diagrams Class diagramPackage diagramObject diagramComponent diagramComposite structure diagramDeployment diagramBehavioral UML diagrams Activity diagramSequence diagramUse case diagramState diagramCommunication diagramInteraction overview diagramTiming diagram"
},
{
"code": null,
"e": 1347,
"s": 1218,
"text": "Structural UML diagrams Class diagramPackage diagramObject diagramComponent diagramComposite structure diagramDeployment diagram"
},
{
"code": null,
"e": 1361,
"s": 1347,
"text": "Class diagram"
},
{
"code": null,
"e": 1377,
"s": 1361,
"text": "Package diagram"
},
{
"code": null,
"e": 1392,
"s": 1377,
"text": "Object diagram"
},
{
"code": null,
"e": 1410,
"s": 1392,
"text": "Component diagram"
},
{
"code": null,
"e": 1438,
"s": 1410,
"text": "Composite structure diagram"
},
{
"code": null,
"e": 1457,
"s": 1438,
"text": "Deployment diagram"
},
{
"code": null,
"e": 1606,
"s": 1457,
"text": "Behavioral UML diagrams Activity diagramSequence diagramUse case diagramState diagramCommunication diagramInteraction overview diagramTiming diagram"
},
{
"code": null,
"e": 1623,
"s": 1606,
"text": "Activity diagram"
},
{
"code": null,
"e": 1640,
"s": 1623,
"text": "Sequence diagram"
},
{
"code": null,
"e": 1657,
"s": 1640,
"text": "Use case diagram"
},
{
"code": null,
"e": 1671,
"s": 1657,
"text": "State diagram"
},
{
"code": null,
"e": 1693,
"s": 1671,
"text": "Communication diagram"
},
{
"code": null,
"e": 1722,
"s": 1693,
"text": "Interaction overview diagram"
},
{
"code": null,
"e": 1737,
"s": 1722,
"text": "Timing diagram"
},
{
"code": null,
"e": 2341,
"s": 1737,
"text": "UML class diagrams: Class diagrams are the main building blocks of every object-oriented method. The class diagram can be used to show the classes, relationships, interface, association, and collaboration. UML is standardized in class diagrams. Since classes are the building block of an application that is based on OOPs, so as the class diagram has an appropriate structure to represent the classes, inheritance, relationships, and everything that OOPs have in their context. It describes various kinds of objects and the static relationship between them. The main purpose to use class diagrams are: "
},
{
"code": null,
"e": 2429,
"s": 2341,
"text": "This is the only UML that can appropriately depict various aspects of the OOPs concept."
},
{
"code": null,
"e": 2501,
"s": 2429,
"text": "Proper design and analysis of applications can be faster and efficient."
},
{
"code": null,
"e": 2554,
"s": 2501,
"text": "It is the base for deployment and component diagram."
},
{
"code": null,
"e": 2797,
"s": 2554,
"text": "There are several software available that can be used online and offline to draw these diagrams Like Edraw max, lucid chart, etc. There are several points to be kept in focus while drawing the class diagram. These can be said as its syntax: "
},
{
"code": null,
"e": 2914,
"s": 2797,
"text": "Each class is represented by a rectangle having a subdivision of three compartments name, attributes, and operation."
},
{
"code": null,
"e": 3169,
"s": 2914,
"text": "There are three types of modifiers that are used to decide the visibility of attributes and operations. + is used for public visibility(for everyone)# is used for protected visibility (for friend and derived)– is used for private visibility (for only me)"
},
{
"code": null,
"e": 3215,
"s": 3169,
"text": "+ is used for public visibility(for everyone)"
},
{
"code": null,
"e": 3275,
"s": 3215,
"text": "# is used for protected visibility (for friend and derived)"
},
{
"code": null,
"e": 3322,
"s": 3275,
"text": "– is used for private visibility (for only me)"
},
{
"code": null,
"e": 3475,
"s": 3322,
"text": "Below is the example of Animal class (parent) having two child class as dog and cat both have object d1, c1 inheriting properties of the parent class. "
},
{
"code": null,
"e": 3504,
"s": 3475,
"text": "Where to Use Class Diagrams?"
},
{
"code": null,
"e": 3776,
"s": 3504,
"text": "The class diagram is also considered as the foundation for component and deployment diagrams. Class diagrams are not only used to visualize the static view of the system but are also used to construct the executable code for forward and reverse engineering of any system."
},
{
"code": null,
"e": 3962,
"s": 3776,
"text": "The class diagram clearly shows the mapping with object-oriented languages such as Java, C++, etc. From practical experience, a class diagram is generally used for construction purpose."
},
{
"code": null,
"e": 4023,
"s": 3962,
"text": "In a nutshell, it can be said, class diagrams are used for −"
},
{
"code": null,
"e": 4255,
"s": 4023,
"text": "Describing the static view of the system.Showing the collaboration among the elements of the static view.Describing the functionalities performed by the system.Construction of software applications using object-oriented languages. "
},
{
"code": null,
"e": 4297,
"s": 4255,
"text": "Describing the static view of the system."
},
{
"code": null,
"e": 4362,
"s": 4297,
"text": "Showing the collaboration among the elements of the static view."
},
{
"code": null,
"e": 4418,
"s": 4362,
"text": "Describing the functionalities performed by the system."
},
{
"code": null,
"e": 4490,
"s": 4418,
"text": "Construction of software applications using object-oriented languages. "
},
{
"code": null,
"e": 4495,
"s": 4490,
"text": "Java"
},
{
"code": "import java.io.*; class GFG { public static void main(String[] args) { dog d1 = new dog(); d1.bark(); d1.run(); cat c1 = new cat(); c1.meww(); }} class Animal { public void run() { String name; String colour; System.out.println(\"animal is running\"); }} class dog extends Animal { public void bark() { System.out.println(\"wooh!wooh! dog is barking\"); } public void run() { System.out.println(\"dog is running\"); }} class cat extends Animal { public void meww() { System.out.println(\"meww! meww!\"); }}",
"e": 5117,
"s": 4495,
"text": null
},
{
"code": null,
"e": 5245,
"s": 5117,
"text": "The process to design class diagram: In Edraw max (or any other platform where class diagrams can be drawn) follow the steps: "
},
{
"code": null,
"e": 5297,
"s": 5245,
"text": "Open a blank document in the class diagram section."
},
{
"code": null,
"e": 5367,
"s": 5297,
"text": "From the library select the class diagram and click on create option."
},
{
"code": null,
"e": 5427,
"s": 5367,
"text": "Prepare the model of the class on the opened template page."
},
{
"code": null,
"e": 5475,
"s": 5427,
"text": "After editing according to requirement save it."
},
{
"code": null,
"e": 5597,
"s": 5475,
"text": "There are several diagram components that can be efficiently used while making/editing the model. These are as follows: "
},
{
"code": null,
"e": 5630,
"s": 5597,
"text": "Class { name, attribute, method}"
},
{
"code": null,
"e": 5638,
"s": 5630,
"text": "Objects"
},
{
"code": null,
"e": 5648,
"s": 5638,
"text": "Interface"
},
{
"code": null,
"e": 5705,
"s": 5648,
"text": "Relationships {inheritance, association, generalization}"
},
{
"code": null,
"e": 5750,
"s": 5705,
"text": "Associations {bidirectional, unidirectional}"
},
{
"code": null,
"e": 5909,
"s": 5750,
"text": "Class diagrams are one of the most widely used diagrams in the fields of software engineering as well as business modeling.The exact meaning of the arrows : "
},
{
"code": null,
"e": 5930,
"s": 5911,
"text": "kushagrabansal8755"
},
{
"code": null,
"e": 5948,
"s": 5930,
"text": "harshittiwari2020"
},
{
"code": null,
"e": 5955,
"s": 5948,
"text": "Picked"
},
{
"code": null,
"e": 5959,
"s": 5955,
"text": "UML"
},
{
"code": null,
"e": 5974,
"s": 5959,
"text": "Design Pattern"
},
{
"code": null,
"e": 5978,
"s": 5974,
"text": "UML"
},
{
"code": null,
"e": 6076,
"s": 5978,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 6114,
"s": 6076,
"text": "Factory method design pattern in Java"
},
{
"code": null,
"e": 6137,
"s": 6114,
"text": "Builder Design Pattern"
},
{
"code": null,
"e": 6191,
"s": 6137,
"text": "Java Singleton Design Pattern Practices with Examples"
},
{
"code": null,
"e": 6229,
"s": 6191,
"text": "Introduction of Programming Paradigms"
},
{
"code": null,
"e": 6245,
"s": 6229,
"text": "Adapter Pattern"
},
{
"code": null,
"e": 6264,
"s": 6245,
"text": "MVC Design Pattern"
},
{
"code": null,
"e": 6341,
"s": 6264,
"text": "How to prevent Singleton Pattern from Reflection, Serialization and Cloning?"
},
{
"code": null,
"e": 6366,
"s": 6341,
"text": "Abstract Factory Pattern"
},
{
"code": null,
"e": 6407,
"s": 6366,
"text": "Monolithic vs Microservices architecture"
}
]
|
Node.js Buffer.readInt32BE() Method | 13 Oct, 2021
The Buffer.readInt32BE() method is used to read 32 bit integer from a buffer object at a given offset and returns the result in Big endian.
Syntax:
buffer.readInt32BE( offset )
Parameters: This method accepts a single parameter offset which specifies the position of buffer object. It represents the number of bytes to skip before starting to read. The value of offset lies within the range 0 to buffer.length – 4. The default value is 0.
Return value: This method reads an signed 32-bit integer at the specified offset in Big endian.
Example 1:
// Node.js program to demonstrate the// buffer.readInt32BE() method const buff = Buffer.from([1, 2, 3, 4, 5]); console.log(buff.readInt32BE(0)); console.log(buff.readInt32BE(1));
Output:
16909060
33752069
Example 2:
// Node.js program to demonstrate the// buffer.readInt32BE() method const buff = Buffer.from([1, 2, 3, 4, 5]); console.log(buff.readInt32BE(4))
Output:
buffer.js:831
throw new RangeError('Index out of range');
^
RangeError: Index out of range
at checkOffset (buffer.js:831:11)
at Buffer.readInt32BE (buffer.js:996:5)
at Object. (/home/cg/root/8545664/main.js:8:18)
at Module._compile (module.js:570:32)
at Object.Module._extensions..js (module.js:579:10)
at Module.load (module.js:487:32)
at tryModuleLoad (module.js:446:12)
at Function.Module._load (module.js:438:3)
at Module.runMain (module.js:604:10)
at run (bootstrap_node.js:389:7)
The above example shows the error because its parameters are not in the valid range.
Note:
To get the values in Little endian format you can use Buffer.readInt32LE() method.
The above program will compile and run by using the node index.js command.
Reference: https://nodejs.org/api/buffer.html#buffer_buf_readint32be_offset
Node.js-Buffer-module
Picked
Node.js
Technical Scripter
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Node.js fs.writeFile() Method
How to install the previous version of node.js and npm ?
Difference between promise and async await in Node.js
Mongoose | findByIdAndUpdate() Function
Installation of Node.js on Windows
Top 10 Projects For Beginners To Practice HTML and CSS Skills
Difference between var, let and const keywords in JavaScript
How to insert spaces/tabs in text using HTML/CSS?
How to fetch data from an API in ReactJS ?
Differences between Functional Components and Class Components in React | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n13 Oct, 2021"
},
{
"code": null,
"e": 168,
"s": 28,
"text": "The Buffer.readInt32BE() method is used to read 32 bit integer from a buffer object at a given offset and returns the result in Big endian."
},
{
"code": null,
"e": 176,
"s": 168,
"text": "Syntax:"
},
{
"code": null,
"e": 205,
"s": 176,
"text": "buffer.readInt32BE( offset )"
},
{
"code": null,
"e": 467,
"s": 205,
"text": "Parameters: This method accepts a single parameter offset which specifies the position of buffer object. It represents the number of bytes to skip before starting to read. The value of offset lies within the range 0 to buffer.length – 4. The default value is 0."
},
{
"code": null,
"e": 563,
"s": 467,
"text": "Return value: This method reads an signed 32-bit integer at the specified offset in Big endian."
},
{
"code": null,
"e": 574,
"s": 563,
"text": "Example 1:"
},
{
"code": "// Node.js program to demonstrate the// buffer.readInt32BE() method const buff = Buffer.from([1, 2, 3, 4, 5]); console.log(buff.readInt32BE(0)); console.log(buff.readInt32BE(1));",
"e": 755,
"s": 574,
"text": null
},
{
"code": null,
"e": 763,
"s": 755,
"text": "Output:"
},
{
"code": null,
"e": 782,
"s": 763,
"text": "16909060\n33752069\n"
},
{
"code": null,
"e": 793,
"s": 782,
"text": "Example 2:"
},
{
"code": "// Node.js program to demonstrate the// buffer.readInt32BE() method const buff = Buffer.from([1, 2, 3, 4, 5]); console.log(buff.readInt32BE(4))",
"e": 938,
"s": 793,
"text": null
},
{
"code": null,
"e": 946,
"s": 938,
"text": "Output:"
},
{
"code": null,
"e": 1482,
"s": 946,
"text": "buffer.js:831\n throw new RangeError('Index out of range');\n ^\n\nRangeError: Index out of range\n at checkOffset (buffer.js:831:11)\n at Buffer.readInt32BE (buffer.js:996:5)\n at Object. (/home/cg/root/8545664/main.js:8:18)\n at Module._compile (module.js:570:32)\n at Object.Module._extensions..js (module.js:579:10)\n at Module.load (module.js:487:32)\n at tryModuleLoad (module.js:446:12)\n at Function.Module._load (module.js:438:3)\n at Module.runMain (module.js:604:10)\n at run (bootstrap_node.js:389:7)\n"
},
{
"code": null,
"e": 1567,
"s": 1482,
"text": "The above example shows the error because its parameters are not in the valid range."
},
{
"code": null,
"e": 1573,
"s": 1567,
"text": "Note:"
},
{
"code": null,
"e": 1656,
"s": 1573,
"text": "To get the values in Little endian format you can use Buffer.readInt32LE() method."
},
{
"code": null,
"e": 1731,
"s": 1656,
"text": "The above program will compile and run by using the node index.js command."
},
{
"code": null,
"e": 1807,
"s": 1731,
"text": "Reference: https://nodejs.org/api/buffer.html#buffer_buf_readint32be_offset"
},
{
"code": null,
"e": 1829,
"s": 1807,
"text": "Node.js-Buffer-module"
},
{
"code": null,
"e": 1836,
"s": 1829,
"text": "Picked"
},
{
"code": null,
"e": 1844,
"s": 1836,
"text": "Node.js"
},
{
"code": null,
"e": 1863,
"s": 1844,
"text": "Technical Scripter"
},
{
"code": null,
"e": 1880,
"s": 1863,
"text": "Web Technologies"
},
{
"code": null,
"e": 1978,
"s": 1880,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2008,
"s": 1978,
"text": "Node.js fs.writeFile() Method"
},
{
"code": null,
"e": 2065,
"s": 2008,
"text": "How to install the previous version of node.js and npm ?"
},
{
"code": null,
"e": 2119,
"s": 2065,
"text": "Difference between promise and async await in Node.js"
},
{
"code": null,
"e": 2159,
"s": 2119,
"text": "Mongoose | findByIdAndUpdate() Function"
},
{
"code": null,
"e": 2194,
"s": 2159,
"text": "Installation of Node.js on Windows"
},
{
"code": null,
"e": 2256,
"s": 2194,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
},
{
"code": null,
"e": 2317,
"s": 2256,
"text": "Difference between var, let and const keywords in JavaScript"
},
{
"code": null,
"e": 2367,
"s": 2317,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
},
{
"code": null,
"e": 2410,
"s": 2367,
"text": "How to fetch data from an API in ReactJS ?"
}
]
|
Database Operations in HIVE Using CLOUDERA – VMWARE Work Station | 17 Jan, 2021
We are going to create a database and create a table in our database. And will cover Database operations in HIVE Using CLOUDERA – VMWARE Work Station. Let’s discuss one by one.
Introduction:
Hive is an ETL tool that provides an SQL-like interface between the user and the Hadoop distributed file system which integrates Hadoop.
It is built on top of Hadoop.
It facilitates reading, writing, and handling wide datasets that stored in distributed storage and queried by Structure Query Language (SQL) syntax.
Requirements:
Need to install Cloudera – vmware workstation.
Link to download for windows –https://www.cloudera.com/downloads/cdh.html
Cloudera:
Cloudera enables you to deploy and manage Apache Hadoop, manipulate and analyze your data, and keep that data secure and protected.
Step 1: On your desktop VMware workstation is available. Open that.
Step 2: Now you will get an interface. Click on open a virtual device.
Step 3: Select path – In this step, you have to select the path and file where you have downloaded the file.
Step 4: Now your virtual environment is creating.
Step 5: You can view your virtual machine details in this path.
Step 6: Now open the terminal to get started with hive commands.
Step 7: Now type hive in the terminal. It will give output as follows.
[cloudera@quickstart ~]$ hive
2020-12-09 20:59:24,314 WARN [main] mapreduce.TableMapReduceUtil:
The hbase-prefix-tree module jar containing PrefixTreeCodec is not present. Continuing without it.
Logging initialized using configuration in file:/etc/hive/conf.dist/hive-log4j.properties
WARNING: Hive CLI is deprecated and migration to Beeline is recommended.
hive>
Step 8: Now, you are all set and ready to start typing your hive commands.
1. Create a database
Syntax:
create database database_name;
Example:
create database geeksportal;
Output:
2. Creating a table
Syntax:
create database.tablename(columns);
Example:
create table geeksportal.geekdata(id int,name string);
Here id and string are the two columns.
Output :
3. Display Database
Syntax:
show databases;
Output: Display the databases created.
4. Describe Database
Syntax:
describe database database_name;
Example:
describe database geeksportal;
Output: Display the HDFS path of a particular database.
Apache-Hive
Hadoop
Hadoop
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Create Table in Hive?
What is Schema On Read and Schema On Write in Hadoop?
What is Hadoop Streaming?
MapReduce - Understanding With Real-Life Example
Apache Hive
Hadoop - HDFS (Hadoop Distributed File System)
Hive - Alter Table
Import and Export Data using SQOOP
Difference Between Hadoop and Apache Spark
Difference Between Hadoop 2.x vs Hadoop 3.x | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n17 Jan, 2021"
},
{
"code": null,
"e": 205,
"s": 28,
"text": "We are going to create a database and create a table in our database. And will cover Database operations in HIVE Using CLOUDERA – VMWARE Work Station. Let’s discuss one by one."
},
{
"code": null,
"e": 219,
"s": 205,
"text": "Introduction:"
},
{
"code": null,
"e": 356,
"s": 219,
"text": "Hive is an ETL tool that provides an SQL-like interface between the user and the Hadoop distributed file system which integrates Hadoop."
},
{
"code": null,
"e": 386,
"s": 356,
"text": "It is built on top of Hadoop."
},
{
"code": null,
"e": 535,
"s": 386,
"text": "It facilitates reading, writing, and handling wide datasets that stored in distributed storage and queried by Structure Query Language (SQL) syntax."
},
{
"code": null,
"e": 549,
"s": 535,
"text": "Requirements:"
},
{
"code": null,
"e": 596,
"s": 549,
"text": "Need to install Cloudera – vmware workstation."
},
{
"code": null,
"e": 670,
"s": 596,
"text": "Link to download for windows –https://www.cloudera.com/downloads/cdh.html"
},
{
"code": null,
"e": 680,
"s": 670,
"text": "Cloudera:"
},
{
"code": null,
"e": 812,
"s": 680,
"text": "Cloudera enables you to deploy and manage Apache Hadoop, manipulate and analyze your data, and keep that data secure and protected."
},
{
"code": null,
"e": 880,
"s": 812,
"text": "Step 1: On your desktop VMware workstation is available. Open that."
},
{
"code": null,
"e": 951,
"s": 880,
"text": "Step 2: Now you will get an interface. Click on open a virtual device."
},
{
"code": null,
"e": 1060,
"s": 951,
"text": "Step 3: Select path – In this step, you have to select the path and file where you have downloaded the file."
},
{
"code": null,
"e": 1110,
"s": 1060,
"text": "Step 4: Now your virtual environment is creating."
},
{
"code": null,
"e": 1174,
"s": 1110,
"text": "Step 5: You can view your virtual machine details in this path."
},
{
"code": null,
"e": 1239,
"s": 1174,
"text": "Step 6: Now open the terminal to get started with hive commands."
},
{
"code": null,
"e": 1310,
"s": 1239,
"text": "Step 7: Now type hive in the terminal. It will give output as follows."
},
{
"code": null,
"e": 1679,
"s": 1310,
"text": "[cloudera@quickstart ~]$ hive\n2020-12-09 20:59:24,314 WARN [main] mapreduce.TableMapReduceUtil: \nThe hbase-prefix-tree module jar containing PrefixTreeCodec is not present. Continuing without it.\nLogging initialized using configuration in file:/etc/hive/conf.dist/hive-log4j.properties\nWARNING: Hive CLI is deprecated and migration to Beeline is recommended.\nhive> "
},
{
"code": null,
"e": 1754,
"s": 1679,
"text": "Step 8: Now, you are all set and ready to start typing your hive commands."
},
{
"code": null,
"e": 1775,
"s": 1754,
"text": "1. Create a database"
},
{
"code": null,
"e": 1783,
"s": 1775,
"text": "Syntax:"
},
{
"code": null,
"e": 1814,
"s": 1783,
"text": "create database database_name;"
},
{
"code": null,
"e": 1823,
"s": 1814,
"text": "Example:"
},
{
"code": null,
"e": 1852,
"s": 1823,
"text": "create database geeksportal;"
},
{
"code": null,
"e": 1860,
"s": 1852,
"text": "Output:"
},
{
"code": null,
"e": 1880,
"s": 1860,
"text": "2. Creating a table"
},
{
"code": null,
"e": 1888,
"s": 1880,
"text": "Syntax:"
},
{
"code": null,
"e": 1924,
"s": 1888,
"text": "create database.tablename(columns);"
},
{
"code": null,
"e": 1933,
"s": 1924,
"text": "Example:"
},
{
"code": null,
"e": 1988,
"s": 1933,
"text": "create table geeksportal.geekdata(id int,name string);"
},
{
"code": null,
"e": 2028,
"s": 1988,
"text": "Here id and string are the two columns."
},
{
"code": null,
"e": 2037,
"s": 2028,
"text": "Output :"
},
{
"code": null,
"e": 2057,
"s": 2037,
"text": "3. Display Database"
},
{
"code": null,
"e": 2065,
"s": 2057,
"text": "Syntax:"
},
{
"code": null,
"e": 2081,
"s": 2065,
"text": "show databases;"
},
{
"code": null,
"e": 2120,
"s": 2081,
"text": "Output: Display the databases created."
},
{
"code": null,
"e": 2141,
"s": 2120,
"text": "4. Describe Database"
},
{
"code": null,
"e": 2149,
"s": 2141,
"text": "Syntax:"
},
{
"code": null,
"e": 2182,
"s": 2149,
"text": "describe database database_name;"
},
{
"code": null,
"e": 2191,
"s": 2182,
"text": "Example:"
},
{
"code": null,
"e": 2223,
"s": 2191,
"text": "describe database geeksportal;"
},
{
"code": null,
"e": 2279,
"s": 2223,
"text": "Output: Display the HDFS path of a particular database."
},
{
"code": null,
"e": 2291,
"s": 2279,
"text": "Apache-Hive"
},
{
"code": null,
"e": 2298,
"s": 2291,
"text": "Hadoop"
},
{
"code": null,
"e": 2305,
"s": 2298,
"text": "Hadoop"
},
{
"code": null,
"e": 2403,
"s": 2305,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2432,
"s": 2403,
"text": "How to Create Table in Hive?"
},
{
"code": null,
"e": 2486,
"s": 2432,
"text": "What is Schema On Read and Schema On Write in Hadoop?"
},
{
"code": null,
"e": 2512,
"s": 2486,
"text": "What is Hadoop Streaming?"
},
{
"code": null,
"e": 2561,
"s": 2512,
"text": "MapReduce - Understanding With Real-Life Example"
},
{
"code": null,
"e": 2573,
"s": 2561,
"text": "Apache Hive"
},
{
"code": null,
"e": 2620,
"s": 2573,
"text": "Hadoop - HDFS (Hadoop Distributed File System)"
},
{
"code": null,
"e": 2639,
"s": 2620,
"text": "Hive - Alter Table"
},
{
"code": null,
"e": 2674,
"s": 2639,
"text": "Import and Export Data using SQOOP"
},
{
"code": null,
"e": 2717,
"s": 2674,
"text": "Difference Between Hadoop and Apache Spark"
}
]
|
Extracting email addresses using regular expressions in Python | 29 Dec, 2020
Let suppose a situation in which you have to read some specific data like phone numbers, email addresses, dates, a collection of words etc. How can you do this in a very efficient manner?The Best way to do this by Regular Expression.
Let take an example in which we have to find out only email from the given input by Regular Expression.Examples:
Input : Hello [email protected] Rohit [email protected]
Output : [email protected] [email protected]
Here we have only selected email from the given input string.
Input : My 2 favourite numbers are 7 and 10
Output :2 7 10
Here we have selected only digits.
Regular Expression–Regular expression is a sequence of character(s) mainly used to find and replace patterns in a string or file.So we can say that the task of searching and extracting is so common that Python has a very powerful library called regular expressions that handles many of these tasks quite elegantly.
# Python program to extract numeric digit # from A string by regular expression... # Importing module required for regular# expressionsimport re # Example String s = 'My 2 favourite numbers are 7 and 10' # find all function to select all digit from 0 # to 9 [0-9] for numeric Letter in the String# + for repeats a character one or more timeslst = re.findall('[0-9]+', s) # Printing of Listprint(lst)
['2', '7', '10']
# Python program to extract emails From # the String By Regular Expression. # Importing module required for regular # expressions import re # Example string s = """Hello from [email protected] to [email protected] about the meeting @2PM""" # \S matches any non-whitespace character # @ for as in the Email # + for Repeats a character one or more times lst = re.findall('\S+@\S+', s) # Printing of List print(lst)
['[email protected]', '[email protected]']
For more details:
Regular Expression in Python with Examples | Set 1
Regular Expressions in Python | Set 2 (Search, Match and Find All)
Python Docs for Regular Expression
Python Regex-programs
python-regex
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Iterate over a list in Python
How to iterate through Excel rows in Python?
Enumerate() in Python
Python Dictionary
Deque in Python
Defaultdict in Python
Stack in Python
Queue in Python
sum() function in Python
Different ways to create Pandas Dataframe | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n29 Dec, 2020"
},
{
"code": null,
"e": 286,
"s": 52,
"text": "Let suppose a situation in which you have to read some specific data like phone numbers, email addresses, dates, a collection of words etc. How can you do this in a very efficient manner?The Best way to do this by Regular Expression."
},
{
"code": null,
"e": 399,
"s": 286,
"text": "Let take an example in which we have to find out only email from the given input by Regular Expression.Examples:"
},
{
"code": null,
"e": 671,
"s": 399,
"text": "Input : Hello [email protected] Rohit [email protected]\nOutput : [email protected] [email protected]\nHere we have only selected email from the given input string.\n\nInput : My 2 favourite numbers are 7 and 10\nOutput :2 7 10\nHere we have selected only digits.\n"
},
{
"code": null,
"e": 986,
"s": 671,
"text": "Regular Expression–Regular expression is a sequence of character(s) mainly used to find and replace patterns in a string or file.So we can say that the task of searching and extracting is so common that Python has a very powerful library called regular expressions that handles many of these tasks quite elegantly."
},
{
"code": "# Python program to extract numeric digit # from A string by regular expression... # Importing module required for regular# expressionsimport re # Example String s = 'My 2 favourite numbers are 7 and 10' # find all function to select all digit from 0 # to 9 [0-9] for numeric Letter in the String# + for repeats a character one or more timeslst = re.findall('[0-9]+', s) # Printing of Listprint(lst)",
"e": 1398,
"s": 986,
"text": null
},
{
"code": null,
"e": 1416,
"s": 1398,
"text": "['2', '7', '10']\n"
},
{
"code": "# Python program to extract emails From # the String By Regular Expression. # Importing module required for regular # expressions import re # Example string s = \"\"\"Hello from [email protected] to [email protected] about the meeting @2PM\"\"\" # \\S matches any non-whitespace character # @ for as in the Email # + for Repeats a character one or more times lst = re.findall('\\S+@\\S+', s) # Printing of List print(lst) ",
"e": 1853,
"s": 1416,
"text": null
},
{
"code": null,
"e": 1902,
"s": 1853,
"text": "['[email protected]', '[email protected]']\n"
},
{
"code": null,
"e": 1920,
"s": 1902,
"text": "For more details:"
},
{
"code": null,
"e": 1971,
"s": 1920,
"text": "Regular Expression in Python with Examples | Set 1"
},
{
"code": null,
"e": 2038,
"s": 1971,
"text": "Regular Expressions in Python | Set 2 (Search, Match and Find All)"
},
{
"code": null,
"e": 2073,
"s": 2038,
"text": "Python Docs for Regular Expression"
},
{
"code": null,
"e": 2095,
"s": 2073,
"text": "Python Regex-programs"
},
{
"code": null,
"e": 2108,
"s": 2095,
"text": "python-regex"
},
{
"code": null,
"e": 2115,
"s": 2108,
"text": "Python"
},
{
"code": null,
"e": 2213,
"s": 2115,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2243,
"s": 2213,
"text": "Iterate over a list in Python"
},
{
"code": null,
"e": 2288,
"s": 2243,
"text": "How to iterate through Excel rows in Python?"
},
{
"code": null,
"e": 2310,
"s": 2288,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 2328,
"s": 2310,
"text": "Python Dictionary"
},
{
"code": null,
"e": 2344,
"s": 2328,
"text": "Deque in Python"
},
{
"code": null,
"e": 2366,
"s": 2344,
"text": "Defaultdict in Python"
},
{
"code": null,
"e": 2382,
"s": 2366,
"text": "Stack in Python"
},
{
"code": null,
"e": 2398,
"s": 2382,
"text": "Queue in Python"
},
{
"code": null,
"e": 2423,
"s": 2398,
"text": "sum() function in Python"
}
]
|
Line Plot using ggplot2 in R | 06 Jun, 2021
In a line graph, we have the horizontal axis value through which the line will be ordered and connected using the vertical axis values. We are going to use the R package ggplot2 which has several layers in it.
First, you need to install the ggplot2 package if it is not previously installed in R Studio.
geom_line connects them in the order of the variable on the horizontal (x) axis.
Syntax:
geom_line(mapping=NULL, data=NULL, stat=”identity”, position=”identity”,...)
geom_path connects the observation in the same order as in data
Syntax:
geom_path(mapping=NULL, data=NULL, stat=”identity”, position=”identity”,...)
In this section, we will be dealing with a single line chart and will also discuss various attributes that help its appearance.
Data set in Use:
R
# Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60))head(val)
Output:
Basic Line Plot
For a simple line chart data is roughly passed to the function with some required attributes.
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Basic Lineggplot(data=val, aes(x=course, y=num, group=1)) + geom_line()+ geom_point()
Output:
Line Type
For this, the command linetype is used. ggplot2 provides various line types. For example : dotted, two dash, dashed, etc. This attribute is passed with a required value.
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Format the line typeggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(linetype = "dotted")+ geom_point()
Output:
Line Color
The command color is used and the desired color is written in double quotes [” “] inside geom_line( ).
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Format the line colorggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color="green")+ geom_point()
Output:
Line Size
The line size can be changed using the command size and providing the value of the size inside geom_line( ).
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Format the line sizeggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color="green",size=1.5)+ geom_point()
Output:
ggtitle() with the appropriate title can be used to add chart title and labs again with appropriate input can be used to add axes title.
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding titlesline<-ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color="green",size=1.5)+ geom_point() line+ggtitle("Courses vs Students Enrolled in GeeksforGeeks")+ labs(x="Courses",y="Number of Students")
Output:
Use theme_theme_name() to add the theme. There are a lot of themes available in R library. For example: dark, classic, etc. Values can be provided as desired.
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding titlesline<-ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color="green",size=1.5)+ geom_point() line+ggtitle("Courses vs Students Enrolled in GeeksforGeeks")+ labs(x="Courses",y="Number of Students")+ theme_dark()
Output:
To add an arrow in line use the grid library is used. Then to add arrows use the arrow( ) to add an arrow. It is also possible to change the parameters in an arrow like angle, type, ends.
Example:
R
library(ggplot2)library(grid) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding an arrowggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(arrow=arrow())+ geom_point() # Adding closed arrow on both ends of the linearr=arrow(angle = 20, ends = "both", type = "closed")ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(arrow=arr)+ geom_point()
Output:
Use label to get the values in y-axis and nudge_y to place the data label.
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding data labelggplot(data=val, aes(x=course, y=num, group=1, label=num)) + geom_line()+ geom_point()+ geom_text(nudge_y = 2)
Output:
Use xlim( ) to change the x-axis scale and ylim( ) to change the y-axis scale and pass appropriate values to these.
Syntax:
xlim(min,max)
ylim(min,max)
Example:
R
library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Storing the line plotln <-ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color="green",size=2)+ geom_point() # y-axis limitsln+ylim(0,100)+theme_dark()
Output:
For plotting multiple plots into one, nothing changes except that group attribute has to set to the name of the column on the basis of which different lines will be drawn.
Example:
R
library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c("Covishield", "Covaxin"), each=2), dose=rep(c("D1", "D2"),2), slots=c(33, 45, 66, 50)) # Plotting line with multiple groupsggplot(data=vacc, aes(x=dose, y=slots, group=type)) + geom_line(linetype="longdash", color="green", size=1.5)+ geom_point(color="red", size=5)+ theme_dark()
Output:
You can also add title, axes title, data labels in the above line plot as discussed in the previous section.
Using separate line types based on groups
To differentiate the lines by changing the type of line provide the line type in geom_line() and shape for the legend in geom_point().
Example:
R
library(ggplot) # Inserting datavacc <- data.frame(type=rep(c("Covishield", "Covaxin"), each=2), dose=rep(c("D1", "D2"),2), slots=c(33, 45, 66, 50)) # Changing the line type on the basis of groupsggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(linetype=type))+ geom_point()+ theme_classic() # Changing the line type on the basis of groups and also the shape of pointsggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(linetype=type))+ geom_point(aes(shape=type))+ theme_classic()
Output:
Assigning different line colors on the basis of groups
The following code automatically controls color using the level of the variable “type”. It will assign separate colors to each line.
Example:
R
library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c("Covishield", "Covaxin"), each=2), dose=rep(c("D1", "D2"),2), slots=c(33, 45, 66, 50)) # Change line color by group type of vaccineln <-ggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(color=type))+ geom_point(aes(color=type))+ theme_classic()ln
Output:
To enter color manually you can use :
scale_color_brewer( ) : It uses different color palettes from the RColorBrewer package. It has various color palettes.
scale_color_manual( ) : It is used to manually add discrete colors.
Example:
R
library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c("Covishield", "Covaxin"), each=2), dose=rep(c("D1", "D2"),2), slots=c(33, 45, 66, 50)) # Change line color by group type of vaccineln <-ggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(color=type))+ geom_point(aes(color=type))+ theme_classic() # Adding line colors using brewer color paletteln+scale_color_brewer(palette="Set2") # Adding line colors using color manualln+scale_color_manual(values=c("green", "blue"))
Output:
Changing the position of legends
For changing the legend position legen.position attribute of the theme function is passed with the required value.
Syntax:
theme(legend.position=”pos”)
pos It can be top, right, bottom, left or none
Example:
R
library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c("Covishield", "Covaxin"), each=2), dose=rep(c("D1", "D2"),2), slots=c(33, 45, 66, 50)) # Change line color by group type of vaccineln <-ggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(color=type))+ geom_point(aes(color=type))+ theme_classic() ln <- ln + scale_color_brewer(palette="Dark2")+ theme_classic() # Legend at topln + theme(legend.position="top") # Legend at leftln + theme(legend.position="left") # Remove legendln + theme(legend.position="none")
Output:
Picked
R-ggplot
R Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Change Color of Bars in Barchart using ggplot2 in R
How to Split Column Into Multiple Columns in R DataFrame?
Group by function in R using Dplyr
How to Change Axis Scales in R Plots?
How to filter R DataFrame by values in a column?
R - if statement
Logistic Regression in R Programming
Replace Specific Characters in String in R
How to import an Excel File into R ?
Joining of Dataframes in R Programming | [
{
"code": null,
"e": 54,
"s": 26,
"text": "\n06 Jun, 2021"
},
{
"code": null,
"e": 265,
"s": 54,
"text": "In a line graph, we have the horizontal axis value through which the line will be ordered and connected using the vertical axis values. We are going to use the R package ggplot2 which has several layers in it. "
},
{
"code": null,
"e": 360,
"s": 265,
"text": "First, you need to install the ggplot2 package if it is not previously installed in R Studio. "
},
{
"code": null,
"e": 441,
"s": 360,
"text": "geom_line connects them in the order of the variable on the horizontal (x) axis."
},
{
"code": null,
"e": 449,
"s": 441,
"text": "Syntax:"
},
{
"code": null,
"e": 526,
"s": 449,
"text": "geom_line(mapping=NULL, data=NULL, stat=”identity”, position=”identity”,...)"
},
{
"code": null,
"e": 590,
"s": 526,
"text": "geom_path connects the observation in the same order as in data"
},
{
"code": null,
"e": 598,
"s": 590,
"text": "Syntax:"
},
{
"code": null,
"e": 675,
"s": 598,
"text": "geom_path(mapping=NULL, data=NULL, stat=”identity”, position=”identity”,...)"
},
{
"code": null,
"e": 803,
"s": 675,
"text": "In this section, we will be dealing with a single line chart and will also discuss various attributes that help its appearance."
},
{
"code": null,
"e": 820,
"s": 803,
"text": "Data set in Use:"
},
{
"code": null,
"e": 822,
"s": 820,
"text": "R"
},
{
"code": "# Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60))head(val)",
"e": 942,
"s": 822,
"text": null
},
{
"code": null,
"e": 950,
"s": 942,
"text": "Output:"
},
{
"code": null,
"e": 966,
"s": 950,
"text": "Basic Line Plot"
},
{
"code": null,
"e": 1061,
"s": 966,
"text": "For a simple line chart data is roughly passed to the function with some required attributes. "
},
{
"code": null,
"e": 1070,
"s": 1061,
"text": "Example:"
},
{
"code": null,
"e": 1072,
"s": 1070,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Basic Lineggplot(data=val, aes(x=course, y=num, group=1)) + geom_line()+ geom_point()",
"e": 1292,
"s": 1072,
"text": null
},
{
"code": null,
"e": 1300,
"s": 1292,
"text": "Output:"
},
{
"code": null,
"e": 1310,
"s": 1300,
"text": "Line Type"
},
{
"code": null,
"e": 1480,
"s": 1310,
"text": "For this, the command linetype is used. ggplot2 provides various line types. For example : dotted, two dash, dashed, etc. This attribute is passed with a required value."
},
{
"code": null,
"e": 1489,
"s": 1480,
"text": "Example:"
},
{
"code": null,
"e": 1491,
"s": 1489,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Format the line typeggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(linetype = \"dotted\")+ geom_point()",
"e": 1740,
"s": 1491,
"text": null
},
{
"code": null,
"e": 1748,
"s": 1740,
"text": "Output:"
},
{
"code": null,
"e": 1759,
"s": 1748,
"text": "Line Color"
},
{
"code": null,
"e": 1862,
"s": 1759,
"text": "The command color is used and the desired color is written in double quotes [” “] inside geom_line( )."
},
{
"code": null,
"e": 1871,
"s": 1862,
"text": "Example:"
},
{
"code": null,
"e": 1873,
"s": 1871,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Format the line colorggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color=\"green\")+ geom_point()",
"e": 2117,
"s": 1873,
"text": null
},
{
"code": null,
"e": 2125,
"s": 2117,
"text": "Output:"
},
{
"code": null,
"e": 2135,
"s": 2125,
"text": "Line Size"
},
{
"code": null,
"e": 2244,
"s": 2135,
"text": "The line size can be changed using the command size and providing the value of the size inside geom_line( )."
},
{
"code": null,
"e": 2253,
"s": 2244,
"text": "Example:"
},
{
"code": null,
"e": 2255,
"s": 2253,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Format the line sizeggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color=\"green\",size=1.5)+ geom_point()",
"e": 2507,
"s": 2255,
"text": null
},
{
"code": null,
"e": 2515,
"s": 2507,
"text": "Output:"
},
{
"code": null,
"e": 2652,
"s": 2515,
"text": "ggtitle() with the appropriate title can be used to add chart title and labs again with appropriate input can be used to add axes title."
},
{
"code": null,
"e": 2661,
"s": 2652,
"text": "Example:"
},
{
"code": null,
"e": 2663,
"s": 2661,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding titlesline<-ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color=\"green\",size=1.5)+ geom_point() line+ggtitle(\"Courses vs Students Enrolled in GeeksforGeeks\")+ labs(x=\"Courses\",y=\"Number of Students\")",
"e": 3020,
"s": 2663,
"text": null
},
{
"code": null,
"e": 3028,
"s": 3020,
"text": "Output:"
},
{
"code": null,
"e": 3187,
"s": 3028,
"text": "Use theme_theme_name() to add the theme. There are a lot of themes available in R library. For example: dark, classic, etc. Values can be provided as desired."
},
{
"code": null,
"e": 3196,
"s": 3187,
"text": "Example:"
},
{
"code": null,
"e": 3198,
"s": 3196,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding titlesline<-ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color=\"green\",size=1.5)+ geom_point() line+ggtitle(\"Courses vs Students Enrolled in GeeksforGeeks\")+ labs(x=\"Courses\",y=\"Number of Students\")+ theme_dark()",
"e": 3570,
"s": 3198,
"text": null
},
{
"code": null,
"e": 3578,
"s": 3570,
"text": "Output:"
},
{
"code": null,
"e": 3767,
"s": 3578,
"text": "To add an arrow in line use the grid library is used. Then to add arrows use the arrow( ) to add an arrow. It is also possible to change the parameters in an arrow like angle, type, ends. "
},
{
"code": null,
"e": 3776,
"s": 3767,
"text": "Example:"
},
{
"code": null,
"e": 3778,
"s": 3776,
"text": "R"
},
{
"code": "library(ggplot2)library(grid) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding an arrowggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(arrow=arrow())+ geom_point() # Adding closed arrow on both ends of the linearr=arrow(angle = 20, ends = \"both\", type = \"closed\")ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(arrow=arr)+ geom_point()",
"e": 4217,
"s": 3778,
"text": null
},
{
"code": null,
"e": 4225,
"s": 4217,
"text": "Output:"
},
{
"code": null,
"e": 4300,
"s": 4225,
"text": "Use label to get the values in y-axis and nudge_y to place the data label."
},
{
"code": null,
"e": 4309,
"s": 4300,
"text": "Example:"
},
{
"code": null,
"e": 4311,
"s": 4309,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Adding data labelggplot(data=val, aes(x=course, y=num, group=1, label=num)) + geom_line()+ geom_point()+ geom_text(nudge_y = 2)",
"e": 4574,
"s": 4311,
"text": null
},
{
"code": null,
"e": 4582,
"s": 4574,
"text": "Output:"
},
{
"code": null,
"e": 4699,
"s": 4582,
"text": "Use xlim( ) to change the x-axis scale and ylim( ) to change the y-axis scale and pass appropriate values to these. "
},
{
"code": null,
"e": 4707,
"s": 4699,
"text": "Syntax:"
},
{
"code": null,
"e": 4721,
"s": 4707,
"text": "xlim(min,max)"
},
{
"code": null,
"e": 4735,
"s": 4721,
"text": "ylim(min,max)"
},
{
"code": null,
"e": 4744,
"s": 4735,
"text": "Example:"
},
{
"code": null,
"e": 4746,
"s": 4744,
"text": "R"
},
{
"code": "library(ggplot2) # Create data for chartval <-data.frame(course=c('DSA','C++','R','Python'), num=c(77,55,80,60)) # Storing the line plotln <-ggplot(data=val, aes(x=course, y=num, group=1)) + geom_line(color=\"green\",size=2)+ geom_point() # y-axis limitsln+ylim(0,100)+theme_dark()",
"e": 5046,
"s": 4746,
"text": null
},
{
"code": null,
"e": 5054,
"s": 5046,
"text": "Output:"
},
{
"code": null,
"e": 5226,
"s": 5054,
"text": "For plotting multiple plots into one, nothing changes except that group attribute has to set to the name of the column on the basis of which different lines will be drawn."
},
{
"code": null,
"e": 5235,
"s": 5226,
"text": "Example:"
},
{
"code": null,
"e": 5237,
"s": 5235,
"text": "R"
},
{
"code": "library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c(\"Covishield\", \"Covaxin\"), each=2), dose=rep(c(\"D1\", \"D2\"),2), slots=c(33, 45, 66, 50)) # Plotting line with multiple groupsggplot(data=vacc, aes(x=dose, y=slots, group=type)) + geom_line(linetype=\"longdash\", color=\"green\", size=1.5)+ geom_point(color=\"red\", size=5)+ theme_dark()",
"e": 5619,
"s": 5237,
"text": null
},
{
"code": null,
"e": 5627,
"s": 5619,
"text": "Output:"
},
{
"code": null,
"e": 5736,
"s": 5627,
"text": "You can also add title, axes title, data labels in the above line plot as discussed in the previous section."
},
{
"code": null,
"e": 5779,
"s": 5736,
"text": "Using separate line types based on groups "
},
{
"code": null,
"e": 5914,
"s": 5779,
"text": "To differentiate the lines by changing the type of line provide the line type in geom_line() and shape for the legend in geom_point()."
},
{
"code": null,
"e": 5923,
"s": 5914,
"text": "Example:"
},
{
"code": null,
"e": 5925,
"s": 5923,
"text": "R"
},
{
"code": "library(ggplot) # Inserting datavacc <- data.frame(type=rep(c(\"Covishield\", \"Covaxin\"), each=2), dose=rep(c(\"D1\", \"D2\"),2), slots=c(33, 45, 66, 50)) # Changing the line type on the basis of groupsggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(linetype=type))+ geom_point()+ theme_classic() # Changing the line type on the basis of groups and also the shape of pointsggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(linetype=type))+ geom_point(aes(shape=type))+ theme_classic()",
"e": 6475,
"s": 5925,
"text": null
},
{
"code": null,
"e": 6483,
"s": 6475,
"text": "Output:"
},
{
"code": null,
"e": 6539,
"s": 6483,
"text": "Assigning different line colors on the basis of groups "
},
{
"code": null,
"e": 6672,
"s": 6539,
"text": "The following code automatically controls color using the level of the variable “type”. It will assign separate colors to each line."
},
{
"code": null,
"e": 6681,
"s": 6672,
"text": "Example:"
},
{
"code": null,
"e": 6683,
"s": 6681,
"text": "R"
},
{
"code": "library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c(\"Covishield\", \"Covaxin\"), each=2), dose=rep(c(\"D1\", \"D2\"),2), slots=c(33, 45, 66, 50)) # Change line color by group type of vaccineln <-ggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(color=type))+ geom_point(aes(color=type))+ theme_classic()ln",
"e": 7045,
"s": 6683,
"text": null
},
{
"code": null,
"e": 7053,
"s": 7045,
"text": "Output:"
},
{
"code": null,
"e": 7091,
"s": 7053,
"text": "To enter color manually you can use :"
},
{
"code": null,
"e": 7210,
"s": 7091,
"text": "scale_color_brewer( ) : It uses different color palettes from the RColorBrewer package. It has various color palettes."
},
{
"code": null,
"e": 7278,
"s": 7210,
"text": "scale_color_manual( ) : It is used to manually add discrete colors."
},
{
"code": null,
"e": 7287,
"s": 7278,
"text": "Example:"
},
{
"code": null,
"e": 7289,
"s": 7287,
"text": "R"
},
{
"code": "library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c(\"Covishield\", \"Covaxin\"), each=2), dose=rep(c(\"D1\", \"D2\"),2), slots=c(33, 45, 66, 50)) # Change line color by group type of vaccineln <-ggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(color=type))+ geom_point(aes(color=type))+ theme_classic() # Adding line colors using brewer color paletteln+scale_color_brewer(palette=\"Set2\") # Adding line colors using color manualln+scale_color_manual(values=c(\"green\", \"blue\"))",
"e": 7824,
"s": 7289,
"text": null
},
{
"code": null,
"e": 7832,
"s": 7824,
"text": "Output:"
},
{
"code": null,
"e": 7866,
"s": 7832,
"text": "Changing the position of legends "
},
{
"code": null,
"e": 7981,
"s": 7866,
"text": "For changing the legend position legen.position attribute of the theme function is passed with the required value."
},
{
"code": null,
"e": 7989,
"s": 7981,
"text": "Syntax:"
},
{
"code": null,
"e": 8018,
"s": 7989,
"text": "theme(legend.position=”pos”)"
},
{
"code": null,
"e": 8065,
"s": 8018,
"text": "pos It can be top, right, bottom, left or none"
},
{
"code": null,
"e": 8074,
"s": 8065,
"text": "Example:"
},
{
"code": null,
"e": 8076,
"s": 8074,
"text": "R"
},
{
"code": "library(ggplot2) # Inserting datavacc <- data.frame(type=rep(c(\"Covishield\", \"Covaxin\"), each=2), dose=rep(c(\"D1\", \"D2\"),2), slots=c(33, 45, 66, 50)) # Change line color by group type of vaccineln <-ggplot(vacc, aes(x=dose, y=slots, group=type)) + geom_line(aes(color=type))+ geom_point(aes(color=type))+ theme_classic() ln <- ln + scale_color_brewer(palette=\"Dark2\")+ theme_classic() # Legend at topln + theme(legend.position=\"top\") # Legend at leftln + theme(legend.position=\"left\") # Remove legendln + theme(legend.position=\"none\")",
"e": 8655,
"s": 8076,
"text": null
},
{
"code": null,
"e": 8663,
"s": 8655,
"text": "Output:"
},
{
"code": null,
"e": 8670,
"s": 8663,
"text": "Picked"
},
{
"code": null,
"e": 8679,
"s": 8670,
"text": "R-ggplot"
},
{
"code": null,
"e": 8690,
"s": 8679,
"text": "R Language"
},
{
"code": null,
"e": 8788,
"s": 8690,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 8840,
"s": 8788,
"text": "Change Color of Bars in Barchart using ggplot2 in R"
},
{
"code": null,
"e": 8898,
"s": 8840,
"text": "How to Split Column Into Multiple Columns in R DataFrame?"
},
{
"code": null,
"e": 8933,
"s": 8898,
"text": "Group by function in R using Dplyr"
},
{
"code": null,
"e": 8971,
"s": 8933,
"text": "How to Change Axis Scales in R Plots?"
},
{
"code": null,
"e": 9020,
"s": 8971,
"text": "How to filter R DataFrame by values in a column?"
},
{
"code": null,
"e": 9037,
"s": 9020,
"text": "R - if statement"
},
{
"code": null,
"e": 9074,
"s": 9037,
"text": "Logistic Regression in R Programming"
},
{
"code": null,
"e": 9117,
"s": 9074,
"text": "Replace Specific Characters in String in R"
},
{
"code": null,
"e": 9154,
"s": 9117,
"text": "How to import an Excel File into R ?"
}
]
|
Program to check if a given number is Lucky (all digits are different) | 08 Jun, 2022
A number is lucky if all digits of the number are different. How to check if a given number is lucky or not.Examples:
Input: n = 983
Output: true
All digits are different
Input: n = 9838
Output: false
8 appears twice
We strongly recommend you to minimize your browser and try this yourself first.The idea is to traverse through every digit of given number and mark the traversed digit as visited. Since the total number of digits is 10, we need a boolean array of size only 10 to mark visited digits.Below is the implementation of above idea.
C++
Java
Python3
C#
PHP
Javascript
// C++ program to check if a given number is lucky#include<iostream>using namespace std; // This function returns true if n is luckybool isLucky(int n){ // Create an array of size 10 and initialize all // elements as false. This array is used to check // if a digit is already seen or not. bool arr[10]; for (int i=0; i<10; i++) arr[i] = false; // Traverse through all digits of given number while (n > 0) { // Find the last digit int digit = n%10; // If digit is already seen, return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // REmove the last digit from number n = n/10; } return true;} // Driver program to test above function.int main(){ int arr[] = {1291, 897, 4566, 1232, 80, 700}; int n = sizeof(arr)/sizeof(arr[0]); for (int i=0; i<n; i++) isLucky(arr[i])? cout << arr[i] << " is Lucky \n": cout << arr[i] << " is not Lucky \n"; return 0;}
// Java program to check if// a given number is lucky class GFG{ // This function returns true if n is lucky static boolean isLucky(int n) { // Create an array of size 10 and initialize all // elements as false. This array is used to check // if a digit is already seen or not. boolean arr[]=new boolean[10]; for (int i = 0; i < 10; i++) arr[i] = false; // Traverse through all digits // of given number while (n > 0) { // Find the last digit int digit = n % 10; // If digit is already seen, // return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // Remove the last digit from number n = n / 10; } return true; } // Driver code public static void main (String[] args) { int arr[] = {1291, 897, 4566, 1232, 80, 700}; int n = arr.length; for (int i = 0; i < n; i++) if(isLucky(arr[i])) System.out.print(arr[i] + " is Lucky \n"); else System.out.print(arr[i] + " is not Lucky \n"); }} // This code is contributed by Anant Agarwal.
# python program to check if a# given number is lucky import math # This function returns true# if n is luckydef isLucky(n): # Create an array of size 10 # and initialize all elements # as false. This array is # used to check if a digit # is already seen or not. ar = [0] * 10 # Traverse through all digits # of given number while (n > 0): #Find the last digit digit = math.floor(n % 10) # If digit is already seen, # return false if (ar[digit]): return 0 # Mark this digit as seen ar[digit] = 1 # REmove the last digit # from number n = n / 10 return 1 # Driver program to test above function.arr = [1291, 897, 4566, 1232, 80, 700]n = len(arr) for i in range(0, n): k = arr[i] if(isLucky(k)): print(k, " is Lucky ") else: print(k, " is not Lucky ") # This code is contributed by Sam007.
// C# program to check if// a given number is luckyusing System; class GFG { // This function returns true if // n is lucky static bool isLucky(int n) { // Create an array of size 10 // and initialize all elements // as false. This array is used // to check if a digit is // already seen or not. bool []arr = new bool[10]; for (int i = 0; i < 10; i++) arr[i] = false; // Traverse through all digits // of given number while (n > 0) { // Find the last digit int digit = n % 10; // If digit is already seen, // return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // Remove the last digit // from number n = n / 10; } return true; } // Driver code public static void Main () { int []arr = {1291, 897, 4566, 1232, 80, 700}; int n = arr.Length; for (int i = 0; i < n; i++) if(isLucky(arr[i])) Console.Write(arr[i] + " is Lucky \n"); else Console.Write(arr[i] + " is not Lucky \n"); }} // This code is contributed by sam007.
<?php// PHP program to check if a given// number is lucky // This function returns true// if n is luckyfunction isLucky($n){ // Create an array of size 10 and // initialize all elements as false. // This array is used to check if a // digit is already seen or not. $arr = array(); for ($i = 0; $i < 10; $i++) $arr[$i] = false; // Traverse through all digits // of given number while ($n > 0) { // Find the last digit $digit = $n % 10; // If digit is already seen, // return false if ($arr[$digit]) return false; // Mark this digit as seen $arr[$digit] = true; // Remove the last digit // from number $n = (int)($n / 10); } return true;} // Driver Code$arr = array(1291, 897, 4566, 1232, 80, 700);$n = sizeof($arr); for ($i = 0; $i < $n; $i++) if(isLucky($arr[$i])) echo $arr[$i] , " is Lucky \n"; else echo $arr[$i] , " is not Lucky \n"; // This code is contributed by jit_t?>
<script> // Javascript program to check if a given number is lucky // This function returns true if n is luckyfunction isLucky(n){ // Create an array of size 10 and initialize all // elements as false. This array is used to check // if a digit is already seen or not. var arr=Array(10).fill(0); for (var i=0; i<10; i++) arr[i] = false; // Traverse through all digits of given number while (n > 0) { // Find the last digit var digit = n%10; // If digit is already seen, return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // REmove the last digit from number n = parseInt(n/10); } return true;} // Driver program to test above function.var arr = [1291, 897, 4566, 1232, 80, 700]var n = arr.length;for (var i=0; i<n; i++) isLucky(arr[i])? document.write( arr[i] + " is Lucky<br>"): document.write(arr[i] + " is not Lucky<br>"); </script>
Output:
1291 is not Lucky
897 is Lucky
4566 is not Lucky
1232 is not Lucky
80 is Lucky
700 is not Lucky
Time Complexity: O(d) where d is a number of digits in the input number. Auxiliary Space: O(1)This article is contributed by Himanshu. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above
Method 2:(using STL and sorting)
In this method we will firstly convert the number into a string. Then we will sort the whole string. Then we will compare every index element with the next index element. If both are equal then we will stop that position and will print that the given number is not lucky.
And if we do not get any index as discussed above then we will print that given number is lucky.
This whole task will be completed in O(K) time where K is the total number of digits of N.
Below is the implementation of the above approach:
C++
// C++ code to check is the number// lucky or not.#include <bits/stdc++.h>using namespace std;void checklucky(string s,int n){ bool x = false; // traversing the whole string for (int i = 0; i < s.length() - 1; i++) { // checking next element whether // it is equal or not if (s[i] == s[i + 1]) { cout << n << " is not lucky number"<<endl; x = true; break; } } if (!x) { cout << n << " is lucky number"<<endl; }}int main(){ int n1 = 1234,n2=5868; // converting the number from // integer to string using // C++ STL function. string s1 = to_string(n1); string s2 = to_string(n2); // sorting the string sort(s1.begin(), s1.end()); sort(s2.begin(), s2.end()); //function calling checklucky(s1,n1); checklucky(s2,n2); return 0;} //this code is contribut by Machhaliya Muhammad
1234 is lucky number
5868 is not lucky number
Time Complexity: O(n1*logn1 + n2*logn2), where n1 and n2 represents the lengths of the given strings.Auxiliary Space: O(1), no extra space is required, so it is a constant.
Sam007
jit_t
rutvik_56
Shubham Pandey 1
shaheeneallamaiqbal
samim2000
number-digits
Mathematical
Mathematical
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Operators in C / C++
Find minimum number of coins that make a given value
Minimum number of jumps to reach end
Algorithm to solve Rubik's Cube
The Knight's tour problem | Backtracking-1
Modulo 10^9+7 (1000000007)
Modulo Operator (%) in C/C++ with Examples
Program for factorial of a number
Program to find sum of elements in a given array
Merge two sorted arrays with O(1) extra space | [
{
"code": null,
"e": 53,
"s": 25,
"text": "\n08 Jun, 2022"
},
{
"code": null,
"e": 173,
"s": 53,
"text": "A number is lucky if all digits of the number are different. How to check if a given number is lucky or not.Examples: "
},
{
"code": null,
"e": 273,
"s": 173,
"text": "Input: n = 983\nOutput: true\nAll digits are different\n\nInput: n = 9838\nOutput: false\n8 appears twice"
},
{
"code": null,
"e": 601,
"s": 273,
"text": "We strongly recommend you to minimize your browser and try this yourself first.The idea is to traverse through every digit of given number and mark the traversed digit as visited. Since the total number of digits is 10, we need a boolean array of size only 10 to mark visited digits.Below is the implementation of above idea. "
},
{
"code": null,
"e": 605,
"s": 601,
"text": "C++"
},
{
"code": null,
"e": 610,
"s": 605,
"text": "Java"
},
{
"code": null,
"e": 618,
"s": 610,
"text": "Python3"
},
{
"code": null,
"e": 621,
"s": 618,
"text": "C#"
},
{
"code": null,
"e": 625,
"s": 621,
"text": "PHP"
},
{
"code": null,
"e": 636,
"s": 625,
"text": "Javascript"
},
{
"code": "// C++ program to check if a given number is lucky#include<iostream>using namespace std; // This function returns true if n is luckybool isLucky(int n){ // Create an array of size 10 and initialize all // elements as false. This array is used to check // if a digit is already seen or not. bool arr[10]; for (int i=0; i<10; i++) arr[i] = false; // Traverse through all digits of given number while (n > 0) { // Find the last digit int digit = n%10; // If digit is already seen, return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // REmove the last digit from number n = n/10; } return true;} // Driver program to test above function.int main(){ int arr[] = {1291, 897, 4566, 1232, 80, 700}; int n = sizeof(arr)/sizeof(arr[0]); for (int i=0; i<n; i++) isLucky(arr[i])? cout << arr[i] << \" is Lucky \\n\": cout << arr[i] << \" is not Lucky \\n\"; return 0;}",
"e": 1676,
"s": 636,
"text": null
},
{
"code": "// Java program to check if// a given number is lucky class GFG{ // This function returns true if n is lucky static boolean isLucky(int n) { // Create an array of size 10 and initialize all // elements as false. This array is used to check // if a digit is already seen or not. boolean arr[]=new boolean[10]; for (int i = 0; i < 10; i++) arr[i] = false; // Traverse through all digits // of given number while (n > 0) { // Find the last digit int digit = n % 10; // If digit is already seen, // return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // Remove the last digit from number n = n / 10; } return true; } // Driver code public static void main (String[] args) { int arr[] = {1291, 897, 4566, 1232, 80, 700}; int n = arr.length; for (int i = 0; i < n; i++) if(isLucky(arr[i])) System.out.print(arr[i] + \" is Lucky \\n\"); else System.out.print(arr[i] + \" is not Lucky \\n\"); }} // This code is contributed by Anant Agarwal.",
"e": 2958,
"s": 1676,
"text": null
},
{
"code": "# python program to check if a# given number is lucky import math # This function returns true# if n is luckydef isLucky(n): # Create an array of size 10 # and initialize all elements # as false. This array is # used to check if a digit # is already seen or not. ar = [0] * 10 # Traverse through all digits # of given number while (n > 0): #Find the last digit digit = math.floor(n % 10) # If digit is already seen, # return false if (ar[digit]): return 0 # Mark this digit as seen ar[digit] = 1 # REmove the last digit # from number n = n / 10 return 1 # Driver program to test above function.arr = [1291, 897, 4566, 1232, 80, 700]n = len(arr) for i in range(0, n): k = arr[i] if(isLucky(k)): print(k, \" is Lucky \") else: print(k, \" is not Lucky \") # This code is contributed by Sam007.",
"e": 3911,
"s": 2958,
"text": null
},
{
"code": "// C# program to check if// a given number is luckyusing System; class GFG { // This function returns true if // n is lucky static bool isLucky(int n) { // Create an array of size 10 // and initialize all elements // as false. This array is used // to check if a digit is // already seen or not. bool []arr = new bool[10]; for (int i = 0; i < 10; i++) arr[i] = false; // Traverse through all digits // of given number while (n > 0) { // Find the last digit int digit = n % 10; // If digit is already seen, // return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // Remove the last digit // from number n = n / 10; } return true; } // Driver code public static void Main () { int []arr = {1291, 897, 4566, 1232, 80, 700}; int n = arr.Length; for (int i = 0; i < n; i++) if(isLucky(arr[i])) Console.Write(arr[i] + \" is Lucky \\n\"); else Console.Write(arr[i] + \" is not Lucky \\n\"); }} // This code is contributed by sam007.",
"e": 5320,
"s": 3911,
"text": null
},
{
"code": "<?php// PHP program to check if a given// number is lucky // This function returns true// if n is luckyfunction isLucky($n){ // Create an array of size 10 and // initialize all elements as false. // This array is used to check if a // digit is already seen or not. $arr = array(); for ($i = 0; $i < 10; $i++) $arr[$i] = false; // Traverse through all digits // of given number while ($n > 0) { // Find the last digit $digit = $n % 10; // If digit is already seen, // return false if ($arr[$digit]) return false; // Mark this digit as seen $arr[$digit] = true; // Remove the last digit // from number $n = (int)($n / 10); } return true;} // Driver Code$arr = array(1291, 897, 4566, 1232, 80, 700);$n = sizeof($arr); for ($i = 0; $i < $n; $i++) if(isLucky($arr[$i])) echo $arr[$i] , \" is Lucky \\n\"; else echo $arr[$i] , \" is not Lucky \\n\"; // This code is contributed by jit_t?>",
"e": 6353,
"s": 5320,
"text": null
},
{
"code": "<script> // Javascript program to check if a given number is lucky // This function returns true if n is luckyfunction isLucky(n){ // Create an array of size 10 and initialize all // elements as false. This array is used to check // if a digit is already seen or not. var arr=Array(10).fill(0); for (var i=0; i<10; i++) arr[i] = false; // Traverse through all digits of given number while (n > 0) { // Find the last digit var digit = n%10; // If digit is already seen, return false if (arr[digit]) return false; // Mark this digit as seen arr[digit] = true; // REmove the last digit from number n = parseInt(n/10); } return true;} // Driver program to test above function.var arr = [1291, 897, 4566, 1232, 80, 700]var n = arr.length;for (var i=0; i<n; i++) isLucky(arr[i])? document.write( arr[i] + \" is Lucky<br>\"): document.write(arr[i] + \" is not Lucky<br>\"); </script>",
"e": 7352,
"s": 6353,
"text": null
},
{
"code": null,
"e": 7362,
"s": 7352,
"text": "Output: "
},
{
"code": null,
"e": 7458,
"s": 7362,
"text": "1291 is not Lucky\n897 is Lucky\n4566 is not Lucky\n1232 is not Lucky\n80 is Lucky\n700 is not Lucky"
},
{
"code": null,
"e": 7718,
"s": 7458,
"text": "Time Complexity: O(d) where d is a number of digits in the input number. Auxiliary Space: O(1)This article is contributed by Himanshu. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above "
},
{
"code": null,
"e": 7751,
"s": 7718,
"text": "Method 2:(using STL and sorting)"
},
{
"code": null,
"e": 8024,
"s": 7751,
"text": " In this method we will firstly convert the number into a string. Then we will sort the whole string. Then we will compare every index element with the next index element. If both are equal then we will stop that position and will print that the given number is not lucky."
},
{
"code": null,
"e": 8121,
"s": 8024,
"text": "And if we do not get any index as discussed above then we will print that given number is lucky."
},
{
"code": null,
"e": 8212,
"s": 8121,
"text": "This whole task will be completed in O(K) time where K is the total number of digits of N."
},
{
"code": null,
"e": 8263,
"s": 8212,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 8267,
"s": 8263,
"text": "C++"
},
{
"code": "// C++ code to check is the number// lucky or not.#include <bits/stdc++.h>using namespace std;void checklucky(string s,int n){ bool x = false; // traversing the whole string for (int i = 0; i < s.length() - 1; i++) { // checking next element whether // it is equal or not if (s[i] == s[i + 1]) { cout << n << \" is not lucky number\"<<endl; x = true; break; } } if (!x) { cout << n << \" is lucky number\"<<endl; }}int main(){ int n1 = 1234,n2=5868; // converting the number from // integer to string using // C++ STL function. string s1 = to_string(n1); string s2 = to_string(n2); // sorting the string sort(s1.begin(), s1.end()); sort(s2.begin(), s2.end()); //function calling checklucky(s1,n1); checklucky(s2,n2); return 0;} //this code is contribut by Machhaliya Muhammad",
"e": 9183,
"s": 8267,
"text": null
},
{
"code": null,
"e": 9229,
"s": 9183,
"text": "1234 is lucky number\n5868 is not lucky number"
},
{
"code": null,
"e": 9402,
"s": 9229,
"text": "Time Complexity: O(n1*logn1 + n2*logn2), where n1 and n2 represents the lengths of the given strings.Auxiliary Space: O(1), no extra space is required, so it is a constant."
},
{
"code": null,
"e": 9409,
"s": 9402,
"text": "Sam007"
},
{
"code": null,
"e": 9415,
"s": 9409,
"text": "jit_t"
},
{
"code": null,
"e": 9425,
"s": 9415,
"text": "rutvik_56"
},
{
"code": null,
"e": 9442,
"s": 9425,
"text": "Shubham Pandey 1"
},
{
"code": null,
"e": 9462,
"s": 9442,
"text": "shaheeneallamaiqbal"
},
{
"code": null,
"e": 9472,
"s": 9462,
"text": "samim2000"
},
{
"code": null,
"e": 9486,
"s": 9472,
"text": "number-digits"
},
{
"code": null,
"e": 9499,
"s": 9486,
"text": "Mathematical"
},
{
"code": null,
"e": 9512,
"s": 9499,
"text": "Mathematical"
},
{
"code": null,
"e": 9610,
"s": 9512,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 9631,
"s": 9610,
"text": "Operators in C / C++"
},
{
"code": null,
"e": 9684,
"s": 9631,
"text": "Find minimum number of coins that make a given value"
},
{
"code": null,
"e": 9721,
"s": 9684,
"text": "Minimum number of jumps to reach end"
},
{
"code": null,
"e": 9753,
"s": 9721,
"text": "Algorithm to solve Rubik's Cube"
},
{
"code": null,
"e": 9796,
"s": 9753,
"text": "The Knight's tour problem | Backtracking-1"
},
{
"code": null,
"e": 9823,
"s": 9796,
"text": "Modulo 10^9+7 (1000000007)"
},
{
"code": null,
"e": 9866,
"s": 9823,
"text": "Modulo Operator (%) in C/C++ with Examples"
},
{
"code": null,
"e": 9900,
"s": 9866,
"text": "Program for factorial of a number"
},
{
"code": null,
"e": 9949,
"s": 9900,
"text": "Program to find sum of elements in a given array"
}
]
|
Python | Sort alternate numeric and alphabet list | 11 May, 2020
Sometimes, while performing sorting in list, we have a problem in which we need to perform particular type of sorting in which we need to sort in alternate ways in which we have numerics and alphabets sorted in order. Lets discuss certain ways in which this task can be performed.
Method #1 : Using isalpha() + isnumeric() + zip_longest()The combination of above methods can be used to perform this task. In this, we separate the numeric and alphabets and then perform a sort on them separately and join using zip_longest().
# Python3 code to demonstrate # Sort alternate numeric and alphabet list# using isalpha() + isnumeric() + zip_longest()from itertools import zip_longest # Initializing listtest_list = ['3', 'B', '2', 'A', 'C', '1'] # printing original listprint("The original list is : " + str(test_list)) # Sort alternate numeric and alphabet list# using isalpha() + isnumeric() + zip_longest()num_list = sorted(filter(str.isnumeric, test_list), key = lambda sub: int(sub)) chr_list = sorted(filter(str.isalpha, test_list))res = [ele for sub in zip_longest(num_list, chr_list) for ele in sub if ele] # printing result print ("List after performing sorting : " + str(res))
The original list is : ['3', 'B', '2', 'A', 'C', '1']
List after performing sorting : ['1', 'A', '2', 'B', '3', 'C']
Method #2 : Using sorted() + key + lambda + isnumeric()The combination of above methods can be used to perform this task. In this, we perform the sorting in alternate manner using ord() and lambda function, testing using isnumeric().
# Python3 code to demonstrate # Sort alternate numeric and alphabet list# using sorted() + key + lambda + isnumeric()from itertools import zip_longest # Initializing listtest_list = ['3', 'B', '2', 'A', 'C', '1'] # printing original listprint("The original list is : " + str(test_list)) # Sort alternate numeric and alphabet list# using sorted() + key + lambda + isnumeric()res = sorted(test_list, key = lambda ele : (int(ele), 0) if ele.isnumeric() else ((ord(ele) - 64) % 26, 1)) # printing result print ("List after performing sorting : " + str(res))
The original list is : ['3', 'B', '2', 'A', 'C', '1']
List after performing sorting : ['1', 'A', '2', 'B', '3', 'C']
Python list-programs
Python-sort
Python
Python Programs
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Python Dictionary
Different ways to create Pandas Dataframe
Enumerate() in Python
Read a file line by line in Python
Python String | replace()
Python program to convert a list to string
Defaultdict in Python
Python | Get dictionary keys as a list
Python | Convert a list to dictionary
Python Program for Fibonacci numbers | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n11 May, 2020"
},
{
"code": null,
"e": 309,
"s": 28,
"text": "Sometimes, while performing sorting in list, we have a problem in which we need to perform particular type of sorting in which we need to sort in alternate ways in which we have numerics and alphabets sorted in order. Lets discuss certain ways in which this task can be performed."
},
{
"code": null,
"e": 553,
"s": 309,
"text": "Method #1 : Using isalpha() + isnumeric() + zip_longest()The combination of above methods can be used to perform this task. In this, we separate the numeric and alphabets and then perform a sort on them separately and join using zip_longest()."
},
{
"code": "# Python3 code to demonstrate # Sort alternate numeric and alphabet list# using isalpha() + isnumeric() + zip_longest()from itertools import zip_longest # Initializing listtest_list = ['3', 'B', '2', 'A', 'C', '1'] # printing original listprint(\"The original list is : \" + str(test_list)) # Sort alternate numeric and alphabet list# using isalpha() + isnumeric() + zip_longest()num_list = sorted(filter(str.isnumeric, test_list), key = lambda sub: int(sub)) chr_list = sorted(filter(str.isalpha, test_list))res = [ele for sub in zip_longest(num_list, chr_list) for ele in sub if ele] # printing result print (\"List after performing sorting : \" + str(res))",
"e": 1270,
"s": 553,
"text": null
},
{
"code": null,
"e": 1388,
"s": 1270,
"text": "The original list is : ['3', 'B', '2', 'A', 'C', '1']\nList after performing sorting : ['1', 'A', '2', 'B', '3', 'C']\n"
},
{
"code": null,
"e": 1624,
"s": 1390,
"text": "Method #2 : Using sorted() + key + lambda + isnumeric()The combination of above methods can be used to perform this task. In this, we perform the sorting in alternate manner using ord() and lambda function, testing using isnumeric()."
},
{
"code": "# Python3 code to demonstrate # Sort alternate numeric and alphabet list# using sorted() + key + lambda + isnumeric()from itertools import zip_longest # Initializing listtest_list = ['3', 'B', '2', 'A', 'C', '1'] # printing original listprint(\"The original list is : \" + str(test_list)) # Sort alternate numeric and alphabet list# using sorted() + key + lambda + isnumeric()res = sorted(test_list, key = lambda ele : (int(ele), 0) if ele.isnumeric() else ((ord(ele) - 64) % 26, 1)) # printing result print (\"List after performing sorting : \" + str(res))",
"e": 2192,
"s": 1624,
"text": null
},
{
"code": null,
"e": 2310,
"s": 2192,
"text": "The original list is : ['3', 'B', '2', 'A', 'C', '1']\nList after performing sorting : ['1', 'A', '2', 'B', '3', 'C']\n"
},
{
"code": null,
"e": 2331,
"s": 2310,
"text": "Python list-programs"
},
{
"code": null,
"e": 2343,
"s": 2331,
"text": "Python-sort"
},
{
"code": null,
"e": 2350,
"s": 2343,
"text": "Python"
},
{
"code": null,
"e": 2366,
"s": 2350,
"text": "Python Programs"
},
{
"code": null,
"e": 2464,
"s": 2366,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2482,
"s": 2464,
"text": "Python Dictionary"
},
{
"code": null,
"e": 2524,
"s": 2482,
"text": "Different ways to create Pandas Dataframe"
},
{
"code": null,
"e": 2546,
"s": 2524,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 2581,
"s": 2546,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 2607,
"s": 2581,
"text": "Python String | replace()"
},
{
"code": null,
"e": 2650,
"s": 2607,
"text": "Python program to convert a list to string"
},
{
"code": null,
"e": 2672,
"s": 2650,
"text": "Defaultdict in Python"
},
{
"code": null,
"e": 2711,
"s": 2672,
"text": "Python | Get dictionary keys as a list"
},
{
"code": null,
"e": 2749,
"s": 2711,
"text": "Python | Convert a list to dictionary"
}
]
|
Scrape content from dynamic websites | 05 Sep, 2020
To scrape content from a static page, we use BeautifulSoup as our package for scraping, and it works flawlessly for static pages. We use requests to load page into our python script. Now, if the page we are trying to load is dynamic in nature and we request this page by requests library, it would send the JS code to be executed locally. Requests package does not execute this JS code and just gives it as the page source.
BeautifulSoup does not catch the interactions with DOM via Java Script. Let’s suppose, if you have a table that is generated by JS. BeautifulSoup will not be able to capture it, while Selenium can.
If there was just a need to scrape static websites, we would’ve used just bs4. But, for dynamically generated webpages, we use selenium.Selenium
Selenium is a free (open-source) automated testing framework used to validate web applications across different browsers and platforms. You can use multiple programming languages like Java, C#, Python etc to create Selenium Test Scripts. Here, we use Python as our main language.
First up, the installation :
1) Selenium bindings in python
pip install selenium
2) Web driversSelenium requires a web driver to interface with the chosen browser.Web drivers is a package to interact with web browser. It interacts with the web browser or a remote web server through a wire protocol which is common to all. You can check out and install the web drivers of your browser choice.
Chrome: https://sites.google.com/a/chromium.org/chromedriver/downloads
Firefox: https://github.com/mozilla/geckodriver/releases
Safari: https://webkit.org/blog/6900/webdriver-support-in-safari-10/
Beautifulsoup
Beautifulsoup is a Python library for pulling data out of HTML and XML files. It works with your favorite parser to provide idiomatic ways of navigating, searching, and modifying the parse tree. It commonly saves programmers hours or days of work.
To use beautiful soup, we have this wonderful binding of it in python :1) BS4 bindings in python
pip install bs4
Let’s suppose the site is dynamic and simple scraping leads to returning a Nonetype object.
#### This program scrapes naukri.com's page and gives our result as a #### list of all the job_profiles which are currently present there. import requestsfrom bs4 import BeautifulSoupfrom selenium import webdriverfrom selenium.webdriver.common.keys import Keysimport time #url of the page we want to scrapeurl = "https://www.naukri.com/top-jobs-by-designations# desigtop600" # initiating the webdriver. Parameter includes the path of the webdriver.driver = webdriver.Chrome('./chromedriver') driver.get(url) # this is just to ensure that the page is loadedtime.sleep(5) html = driver.page_source # this renders the JS code and stores all# of the information in static HTML code. # Now, we could simply apply bs4 to html variablesoup = BeautifulSoup(html, "html.parser")all_divs = soup.find('div', {'id' : 'nameSearch'})job_profiles = all_divs.find_all('a') # printing top ten job profilescount = 0for job_profile in job_profiles : print(job_profile.text) count = count + 1 if(count == 10) : break driver.close() # closing the webdriver
Here’s the video of the scraper in action : Working_scraper_video
Output of the code :
Project
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 52,
"s": 24,
"text": "\n05 Sep, 2020"
},
{
"code": null,
"e": 476,
"s": 52,
"text": "To scrape content from a static page, we use BeautifulSoup as our package for scraping, and it works flawlessly for static pages. We use requests to load page into our python script. Now, if the page we are trying to load is dynamic in nature and we request this page by requests library, it would send the JS code to be executed locally. Requests package does not execute this JS code and just gives it as the page source."
},
{
"code": null,
"e": 674,
"s": 476,
"text": "BeautifulSoup does not catch the interactions with DOM via Java Script. Let’s suppose, if you have a table that is generated by JS. BeautifulSoup will not be able to capture it, while Selenium can."
},
{
"code": null,
"e": 819,
"s": 674,
"text": "If there was just a need to scrape static websites, we would’ve used just bs4. But, for dynamically generated webpages, we use selenium.Selenium"
},
{
"code": null,
"e": 1099,
"s": 819,
"text": "Selenium is a free (open-source) automated testing framework used to validate web applications across different browsers and platforms. You can use multiple programming languages like Java, C#, Python etc to create Selenium Test Scripts. Here, we use Python as our main language."
},
{
"code": null,
"e": 1128,
"s": 1099,
"text": "First up, the installation :"
},
{
"code": null,
"e": 1159,
"s": 1128,
"text": "1) Selenium bindings in python"
},
{
"code": null,
"e": 1181,
"s": 1159,
"text": "pip install selenium\n"
},
{
"code": null,
"e": 1493,
"s": 1181,
"text": "2) Web driversSelenium requires a web driver to interface with the chosen browser.Web drivers is a package to interact with web browser. It interacts with the web browser or a remote web server through a wire protocol which is common to all. You can check out and install the web drivers of your browser choice."
},
{
"code": null,
"e": 1695,
"s": 1493,
"text": "Chrome: https://sites.google.com/a/chromium.org/chromedriver/downloads\nFirefox: https://github.com/mozilla/geckodriver/releases\nSafari: https://webkit.org/blog/6900/webdriver-support-in-safari-10/ \n"
},
{
"code": null,
"e": 1709,
"s": 1695,
"text": "Beautifulsoup"
},
{
"code": null,
"e": 1957,
"s": 1709,
"text": "Beautifulsoup is a Python library for pulling data out of HTML and XML files. It works with your favorite parser to provide idiomatic ways of navigating, searching, and modifying the parse tree. It commonly saves programmers hours or days of work."
},
{
"code": null,
"e": 2054,
"s": 1957,
"text": "To use beautiful soup, we have this wonderful binding of it in python :1) BS4 bindings in python"
},
{
"code": null,
"e": 2071,
"s": 2054,
"text": "pip install bs4\n"
},
{
"code": null,
"e": 2163,
"s": 2071,
"text": "Let’s suppose the site is dynamic and simple scraping leads to returning a Nonetype object."
},
{
"code": "#### This program scrapes naukri.com's page and gives our result as a #### list of all the job_profiles which are currently present there. import requestsfrom bs4 import BeautifulSoupfrom selenium import webdriverfrom selenium.webdriver.common.keys import Keysimport time #url of the page we want to scrapeurl = \"https://www.naukri.com/top-jobs-by-designations# desigtop600\" # initiating the webdriver. Parameter includes the path of the webdriver.driver = webdriver.Chrome('./chromedriver') driver.get(url) # this is just to ensure that the page is loadedtime.sleep(5) html = driver.page_source # this renders the JS code and stores all# of the information in static HTML code. # Now, we could simply apply bs4 to html variablesoup = BeautifulSoup(html, \"html.parser\")all_divs = soup.find('div', {'id' : 'nameSearch'})job_profiles = all_divs.find_all('a') # printing top ten job profilescount = 0for job_profile in job_profiles : print(job_profile.text) count = count + 1 if(count == 10) : break driver.close() # closing the webdriver",
"e": 3227,
"s": 2163,
"text": null
},
{
"code": null,
"e": 3293,
"s": 3227,
"text": "Here’s the video of the scraper in action : Working_scraper_video"
},
{
"code": null,
"e": 3314,
"s": 3293,
"text": "Output of the code :"
},
{
"code": null,
"e": 3322,
"s": 3314,
"text": "Project"
},
{
"code": null,
"e": 3329,
"s": 3322,
"text": "Python"
}
]
|
Introduction to MS-Excel | 01 Jun, 2021
MS-EXCEL is a part of Microsoft Office suite software. It is an electronic spreadsheet with numerous rows and columns, used for organizing data, graphically represent data(s), and performing different calculations. It consists of 1048576 rows and 16383 columns, a row and column together make a cell. Each cell has an address defined by column name and row number example A1, D2, etc. this is also known as a cell reference.
Cell references: The address or name of a cell or a range of cells is known as Cell reference. It helps the software to identify the cell from where the data/value is to be used in the formula. We can reference the cell of other worksheets and also of other programs.
Referencing the cell of other worksheets is known as External referencing.
Referencing the cell of other programs is known as Remote referencing.
There are three types of cell references in Excel:
Relative reference.Absolute reference.Mixed reference.
Relative reference.
Absolute reference.
Mixed reference.
The Ribbon in MS-Excel is the topmost row of tabs that provide the user with different facilities/functionalities. These tabs are:
Home Tab: It provides the basic facilities like changing the font, size of text, editing the cells in the spreadsheet, autosum, etc.Insert Tab: It provides the facilities like inserting tables, pivot tables, images, clip art, charts, links, etc.Page layout: It provides all the facilities related to the spreadsheet-like margins, orientation, height, width, background etc. The worksheet appearance will be the same in the hard copy as well.Formulas: It is a package of different in-built formulas/functions which can be used by user just by selecting the cell or range of cells for values.Data: The Data Tab helps to perform different operations on a vast set of data like analysis through what-if analysis tools and many other data analysis tools, removing duplicate data, transpose the row and column, etc. It also helps to access data(s) from different sources as well, such as from Ms-Access, from web, etc.Review: This tab provides the facility of thesaurus, checking spellings, translating the text, and helps to protect and share the worksheet and workbook.View: It contains the commands to manage the view of the workbook, show/hide ruler, gridlines, etc, freezing panes, and adding macros.
Home Tab: It provides the basic facilities like changing the font, size of text, editing the cells in the spreadsheet, autosum, etc.
Insert Tab: It provides the facilities like inserting tables, pivot tables, images, clip art, charts, links, etc.
Page layout: It provides all the facilities related to the spreadsheet-like margins, orientation, height, width, background etc. The worksheet appearance will be the same in the hard copy as well.
Formulas: It is a package of different in-built formulas/functions which can be used by user just by selecting the cell or range of cells for values.
Data: The Data Tab helps to perform different operations on a vast set of data like analysis through what-if analysis tools and many other data analysis tools, removing duplicate data, transpose the row and column, etc. It also helps to access data(s) from different sources as well, such as from Ms-Access, from web, etc.
Review: This tab provides the facility of thesaurus, checking spellings, translating the text, and helps to protect and share the worksheet and workbook.
View: It contains the commands to manage the view of the workbook, show/hide ruler, gridlines, etc, freezing panes, and adding macros.
In Excel 3 sheets are already opened by default, now to add a new sheet :
In the lowermost pane in Excel, you can find a button.
Click on that button to add a new sheet.
We can also achieve the same by Right-clicking on the sheet number before which you want to insert the sheet.
Click on Insert.
Select Worksheet.
Click OK.
On the lowermost pane in Excel, you can find the name of the current sheet you have opened.
On the left side of this sheet, the name of previous sheets are also available like Sheet 2, Sheet 3 will be available at the left of sheet4, click on the number/name of the sheet you want to open and the sheet will open in the same workbook.
For example, we are on Sheet 4, and we want to open Sheet 2 then simply just click on Sheet2 to open it.
You can easily manage the spreadsheets in Excel simply by :
Simply navigating between the sheets.
Right-clicking on the sheet name or number on the pane.
Choose among the various options available like, move, copy, rename, add, delete etc.
You can move/copy your sheet to other workbooks as well just by selecting the workbook in the To workbook and the sheet before you want to insert the sheet in Before sheet.
Click on the Office Button or the File tab.Click on Save As option.Write the desired name of your file.Click OK.
Click on the Office Button or the File tab.
Click on Save As option.
Write the desired name of your file.
Click OK.
Click on the Review tab on the Ribbon.Click on the share workbook (under Changes group).If you want to protect your workbook and then make it available for another user then click on Protect and Share Workbook option.Now check the option “Allow changes by more than one user at the same time. This also allows workbook merging” in the Share Workbook dialog box.Many other options are also available in the Advanced like track, update changes.Click OK.
Click on the Review tab on the Ribbon.
Click on the share workbook (under Changes group).
If you want to protect your workbook and then make it available for another user then click on Protect and Share Workbook option.
Now check the option “Allow changes by more than one user at the same time. This also allows workbook merging” in the Share Workbook dialog box.
Many other options are also available in the Advanced like track, update changes.
Click OK.
Ctrl+N: To open a new workbook.Ctrl+O: To open a saved workbook.Ctrl+S: To save a workbook.Ctrl+C: To copy the selected cells.Ctrl+V: To paste the copied cells.Ctrl+X: To cut the selected cells.Ctrl+W: To close the workbook.Delete: To remove all the contents from the cell.Ctrl+P: To print the workbook.Ctrl+Z: To undo.
Ctrl+N: To open a new workbook.
Ctrl+O: To open a saved workbook.
Ctrl+S: To save a workbook.
Ctrl+C: To copy the selected cells.
Ctrl+V: To paste the copied cells.
Ctrl+X: To cut the selected cells.
Ctrl+W: To close the workbook.
Delete: To remove all the contents from the cell.
Ctrl+P: To print the workbook.
Ctrl+Z: To undo.
Picked
Excel
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 53,
"s": 25,
"text": "\n01 Jun, 2021"
},
{
"code": null,
"e": 479,
"s": 53,
"text": "MS-EXCEL is a part of Microsoft Office suite software. It is an electronic spreadsheet with numerous rows and columns, used for organizing data, graphically represent data(s), and performing different calculations. It consists of 1048576 rows and 16383 columns, a row and column together make a cell. Each cell has an address defined by column name and row number example A1, D2, etc. this is also known as a cell reference."
},
{
"code": null,
"e": 747,
"s": 479,
"text": "Cell references: The address or name of a cell or a range of cells is known as Cell reference. It helps the software to identify the cell from where the data/value is to be used in the formula. We can reference the cell of other worksheets and also of other programs."
},
{
"code": null,
"e": 822,
"s": 747,
"text": "Referencing the cell of other worksheets is known as External referencing."
},
{
"code": null,
"e": 893,
"s": 822,
"text": "Referencing the cell of other programs is known as Remote referencing."
},
{
"code": null,
"e": 946,
"s": 893,
"text": "There are three types of cell references in Excel: "
},
{
"code": null,
"e": 1001,
"s": 946,
"text": "Relative reference.Absolute reference.Mixed reference."
},
{
"code": null,
"e": 1021,
"s": 1001,
"text": "Relative reference."
},
{
"code": null,
"e": 1041,
"s": 1021,
"text": "Absolute reference."
},
{
"code": null,
"e": 1058,
"s": 1041,
"text": "Mixed reference."
},
{
"code": null,
"e": 1189,
"s": 1058,
"text": "The Ribbon in MS-Excel is the topmost row of tabs that provide the user with different facilities/functionalities. These tabs are:"
},
{
"code": null,
"e": 2389,
"s": 1189,
"text": "Home Tab: It provides the basic facilities like changing the font, size of text, editing the cells in the spreadsheet, autosum, etc.Insert Tab: It provides the facilities like inserting tables, pivot tables, images, clip art, charts, links, etc.Page layout: It provides all the facilities related to the spreadsheet-like margins, orientation, height, width, background etc. The worksheet appearance will be the same in the hard copy as well.Formulas: It is a package of different in-built formulas/functions which can be used by user just by selecting the cell or range of cells for values.Data: The Data Tab helps to perform different operations on a vast set of data like analysis through what-if analysis tools and many other data analysis tools, removing duplicate data, transpose the row and column, etc. It also helps to access data(s) from different sources as well, such as from Ms-Access, from web, etc.Review: This tab provides the facility of thesaurus, checking spellings, translating the text, and helps to protect and share the worksheet and workbook.View: It contains the commands to manage the view of the workbook, show/hide ruler, gridlines, etc, freezing panes, and adding macros."
},
{
"code": null,
"e": 2522,
"s": 2389,
"text": "Home Tab: It provides the basic facilities like changing the font, size of text, editing the cells in the spreadsheet, autosum, etc."
},
{
"code": null,
"e": 2636,
"s": 2522,
"text": "Insert Tab: It provides the facilities like inserting tables, pivot tables, images, clip art, charts, links, etc."
},
{
"code": null,
"e": 2833,
"s": 2636,
"text": "Page layout: It provides all the facilities related to the spreadsheet-like margins, orientation, height, width, background etc. The worksheet appearance will be the same in the hard copy as well."
},
{
"code": null,
"e": 2983,
"s": 2833,
"text": "Formulas: It is a package of different in-built formulas/functions which can be used by user just by selecting the cell or range of cells for values."
},
{
"code": null,
"e": 3306,
"s": 2983,
"text": "Data: The Data Tab helps to perform different operations on a vast set of data like analysis through what-if analysis tools and many other data analysis tools, removing duplicate data, transpose the row and column, etc. It also helps to access data(s) from different sources as well, such as from Ms-Access, from web, etc."
},
{
"code": null,
"e": 3460,
"s": 3306,
"text": "Review: This tab provides the facility of thesaurus, checking spellings, translating the text, and helps to protect and share the worksheet and workbook."
},
{
"code": null,
"e": 3595,
"s": 3460,
"text": "View: It contains the commands to manage the view of the workbook, show/hide ruler, gridlines, etc, freezing panes, and adding macros."
},
{
"code": null,
"e": 3669,
"s": 3595,
"text": "In Excel 3 sheets are already opened by default, now to add a new sheet :"
},
{
"code": null,
"e": 3724,
"s": 3669,
"text": "In the lowermost pane in Excel, you can find a button."
},
{
"code": null,
"e": 3765,
"s": 3724,
"text": "Click on that button to add a new sheet."
},
{
"code": null,
"e": 3875,
"s": 3765,
"text": "We can also achieve the same by Right-clicking on the sheet number before which you want to insert the sheet."
},
{
"code": null,
"e": 3892,
"s": 3875,
"text": "Click on Insert."
},
{
"code": null,
"e": 3910,
"s": 3892,
"text": "Select Worksheet."
},
{
"code": null,
"e": 3920,
"s": 3910,
"text": "Click OK."
},
{
"code": null,
"e": 4012,
"s": 3920,
"text": "On the lowermost pane in Excel, you can find the name of the current sheet you have opened."
},
{
"code": null,
"e": 4255,
"s": 4012,
"text": "On the left side of this sheet, the name of previous sheets are also available like Sheet 2, Sheet 3 will be available at the left of sheet4, click on the number/name of the sheet you want to open and the sheet will open in the same workbook."
},
{
"code": null,
"e": 4360,
"s": 4255,
"text": "For example, we are on Sheet 4, and we want to open Sheet 2 then simply just click on Sheet2 to open it."
},
{
"code": null,
"e": 4420,
"s": 4360,
"text": "You can easily manage the spreadsheets in Excel simply by :"
},
{
"code": null,
"e": 4458,
"s": 4420,
"text": "Simply navigating between the sheets."
},
{
"code": null,
"e": 4514,
"s": 4458,
"text": "Right-clicking on the sheet name or number on the pane."
},
{
"code": null,
"e": 4600,
"s": 4514,
"text": "Choose among the various options available like, move, copy, rename, add, delete etc."
},
{
"code": null,
"e": 4773,
"s": 4600,
"text": "You can move/copy your sheet to other workbooks as well just by selecting the workbook in the To workbook and the sheet before you want to insert the sheet in Before sheet."
},
{
"code": null,
"e": 4886,
"s": 4773,
"text": "Click on the Office Button or the File tab.Click on Save As option.Write the desired name of your file.Click OK."
},
{
"code": null,
"e": 4930,
"s": 4886,
"text": "Click on the Office Button or the File tab."
},
{
"code": null,
"e": 4955,
"s": 4930,
"text": "Click on Save As option."
},
{
"code": null,
"e": 4992,
"s": 4955,
"text": "Write the desired name of your file."
},
{
"code": null,
"e": 5002,
"s": 4992,
"text": "Click OK."
},
{
"code": null,
"e": 5454,
"s": 5002,
"text": "Click on the Review tab on the Ribbon.Click on the share workbook (under Changes group).If you want to protect your workbook and then make it available for another user then click on Protect and Share Workbook option.Now check the option “Allow changes by more than one user at the same time. This also allows workbook merging” in the Share Workbook dialog box.Many other options are also available in the Advanced like track, update changes.Click OK."
},
{
"code": null,
"e": 5493,
"s": 5454,
"text": "Click on the Review tab on the Ribbon."
},
{
"code": null,
"e": 5544,
"s": 5493,
"text": "Click on the share workbook (under Changes group)."
},
{
"code": null,
"e": 5674,
"s": 5544,
"text": "If you want to protect your workbook and then make it available for another user then click on Protect and Share Workbook option."
},
{
"code": null,
"e": 5819,
"s": 5674,
"text": "Now check the option “Allow changes by more than one user at the same time. This also allows workbook merging” in the Share Workbook dialog box."
},
{
"code": null,
"e": 5901,
"s": 5819,
"text": "Many other options are also available in the Advanced like track, update changes."
},
{
"code": null,
"e": 5911,
"s": 5901,
"text": "Click OK."
},
{
"code": null,
"e": 6231,
"s": 5911,
"text": "Ctrl+N: To open a new workbook.Ctrl+O: To open a saved workbook.Ctrl+S: To save a workbook.Ctrl+C: To copy the selected cells.Ctrl+V: To paste the copied cells.Ctrl+X: To cut the selected cells.Ctrl+W: To close the workbook.Delete: To remove all the contents from the cell.Ctrl+P: To print the workbook.Ctrl+Z: To undo."
},
{
"code": null,
"e": 6263,
"s": 6231,
"text": "Ctrl+N: To open a new workbook."
},
{
"code": null,
"e": 6297,
"s": 6263,
"text": "Ctrl+O: To open a saved workbook."
},
{
"code": null,
"e": 6325,
"s": 6297,
"text": "Ctrl+S: To save a workbook."
},
{
"code": null,
"e": 6361,
"s": 6325,
"text": "Ctrl+C: To copy the selected cells."
},
{
"code": null,
"e": 6396,
"s": 6361,
"text": "Ctrl+V: To paste the copied cells."
},
{
"code": null,
"e": 6431,
"s": 6396,
"text": "Ctrl+X: To cut the selected cells."
},
{
"code": null,
"e": 6462,
"s": 6431,
"text": "Ctrl+W: To close the workbook."
},
{
"code": null,
"e": 6512,
"s": 6462,
"text": "Delete: To remove all the contents from the cell."
},
{
"code": null,
"e": 6543,
"s": 6512,
"text": "Ctrl+P: To print the workbook."
},
{
"code": null,
"e": 6560,
"s": 6543,
"text": "Ctrl+Z: To undo."
},
{
"code": null,
"e": 6567,
"s": 6560,
"text": "Picked"
},
{
"code": null,
"e": 6573,
"s": 6567,
"text": "Excel"
}
]
|
PYGLET – Sprite Position | 19 Sep, 2021
In this article we will see how we can access position of the sprite in PYGLET module in python. Pyglet is easy to use but powerful library for developing visually rich GUI applications like games, multimedia etc. A window is a “heavyweight” object occupying operating system resources. Windows may appear as floating regions or can be set to fill an entire screen (fullscreen). A sprite is an instance of an image displayed on-screen. Multiple sprites can display the same image at different positions on the screen. Sprites can also be scaled larger or smaller, rotated at any angle and drawn at a fractional opacity. Image is loaded with the help of image module of pyglet. Potion property tells the sprite position inside the window in form of pixels distance from x and y axis.We can create a window and sprite object with the help of commands given below
# creating a window
window = pyglet.window.Window(width, height, title)
# creating a sprite object
sprite = pyglet.sprite.Sprite(img, x, y)
In order to create window we use position attribute with sprite objectSyntax : sprite.positionArgument : It takes no argumentReturn : It returns tuple
Below is the implementation
Python3
# importing pyglet moduleimport pygletimport pyglet.window.key as key # width of windowwidth = 500 # height of windowheight = 500 # caption i.e title of the windowtitle = "Geeksforgeeks" # creating a windowwindow = pyglet.window.Window(width, height, title) # text text = "Welcome to GeeksforGeeks" # creating label with following properties# font = cooper# position = 250, 150# anchor position = centerlabel = pyglet.text.Label(text, font_name ='Cooper', font_size = 16, x = 250, y = 150, anchor_x ='center', anchor_y ='center') # creating a batchbatch = pyglet.graphics.Batch() # loading geeksforgeeks imageimage = pyglet.image.load('gfg.png') # creating sprite object# it is instance of an image displayed on-screensprite = pyglet.sprite.Sprite(image, x = 200, y = 230) # on draw [email protected] on_draw(): # clear the window window.clear() # draw the label label.draw() # draw the image on screen sprite.draw() # key press event @window.eventdef on_key_press(symbol, modifier): # key "C" get press if symbol == key.C: # printing the message print("Key : C is pressed") # image for iconimg = image = pyglet.resource.image("gfg.png") # setting image as iconwindow.set_icon(img) # accessing position of the spritevalue = sprite.position # creating text from spritetext = "Position : " + str(value) # setting text to the labellabel.text = text # start running the applicationpyglet.app.run()
Output :
singghakshay
anikakapoor
Python-gui
Python-Pyglet
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here. | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n19 Sep, 2021"
},
{
"code": null,
"e": 890,
"s": 28,
"text": "In this article we will see how we can access position of the sprite in PYGLET module in python. Pyglet is easy to use but powerful library for developing visually rich GUI applications like games, multimedia etc. A window is a “heavyweight” object occupying operating system resources. Windows may appear as floating regions or can be set to fill an entire screen (fullscreen). A sprite is an instance of an image displayed on-screen. Multiple sprites can display the same image at different positions on the screen. Sprites can also be scaled larger or smaller, rotated at any angle and drawn at a fractional opacity. Image is loaded with the help of image module of pyglet. Potion property tells the sprite position inside the window in form of pixels distance from x and y axis.We can create a window and sprite object with the help of commands given below "
},
{
"code": null,
"e": 1031,
"s": 890,
"text": "# creating a window\nwindow = pyglet.window.Window(width, height, title)\n\n# creating a sprite object\nsprite = pyglet.sprite.Sprite(img, x, y)"
},
{
"code": null,
"e": 1184,
"s": 1031,
"text": "In order to create window we use position attribute with sprite objectSyntax : sprite.positionArgument : It takes no argumentReturn : It returns tuple "
},
{
"code": null,
"e": 1213,
"s": 1184,
"text": "Below is the implementation "
},
{
"code": null,
"e": 1221,
"s": 1213,
"text": "Python3"
},
{
"code": "# importing pyglet moduleimport pygletimport pyglet.window.key as key # width of windowwidth = 500 # height of windowheight = 500 # caption i.e title of the windowtitle = \"Geeksforgeeks\" # creating a windowwindow = pyglet.window.Window(width, height, title) # text text = \"Welcome to GeeksforGeeks\" # creating label with following properties# font = cooper# position = 250, 150# anchor position = centerlabel = pyglet.text.Label(text, font_name ='Cooper', font_size = 16, x = 250, y = 150, anchor_x ='center', anchor_y ='center') # creating a batchbatch = pyglet.graphics.Batch() # loading geeksforgeeks imageimage = pyglet.image.load('gfg.png') # creating sprite object# it is instance of an image displayed on-screensprite = pyglet.sprite.Sprite(image, x = 200, y = 230) # on draw [email protected] on_draw(): # clear the window window.clear() # draw the label label.draw() # draw the image on screen sprite.draw() # key press event @window.eventdef on_key_press(symbol, modifier): # key \"C\" get press if symbol == key.C: # printing the message print(\"Key : C is pressed\") # image for iconimg = image = pyglet.resource.image(\"gfg.png\") # setting image as iconwindow.set_icon(img) # accessing position of the spritevalue = sprite.position # creating text from spritetext = \"Position : \" + str(value) # setting text to the labellabel.text = text # start running the applicationpyglet.app.run()",
"e": 2876,
"s": 1221,
"text": null
},
{
"code": null,
"e": 2886,
"s": 2876,
"text": "Output : "
},
{
"code": null,
"e": 2899,
"s": 2886,
"text": "singghakshay"
},
{
"code": null,
"e": 2911,
"s": 2899,
"text": "anikakapoor"
},
{
"code": null,
"e": 2922,
"s": 2911,
"text": "Python-gui"
},
{
"code": null,
"e": 2936,
"s": 2922,
"text": "Python-Pyglet"
},
{
"code": null,
"e": 2943,
"s": 2936,
"text": "Python"
}
]
|
Operators in Julia | 09 Mar, 2020
Operators in Julia are the mathematical symbols that are used to perform operations on variables and values. These symbols are used to carry out arithmetic and logical computations. Variables on which the operators perform operations are termed as Operands. In other words, we can say that an operator operates the operands.For example, consider the below statement:
c = a + b;
Here, ‘+’ is the operator known as the addition operator and ‘a’ and ‘b’ are operands. The addition operator tells the compiler to add both of the operands ‘a’ and ‘b’.
Operators in Julia are of Six types:
Arithmetic Operators
Bitwise Operators
Logical Operators
Assignment Operators
Vectorized ‘dot’ Operators
Relational Operators
Arithmetic operators are used to perform arithmetic/mathematical operations on operands. These operators include the process of addition, subtraction, multiplication, division, etc. Examples: (+, -, *, /, %, +x, -x).Arithmetic operators are of two types:
Unary Operators: Operators that operate or work with a single operand are unary operators. For example: (+x, -x) i.e. unary plus and unary minus.
Binary Operators: Operators that operate or work with two operands are binary operators. For example: (+, –, *, /)
# Examples of Arithmetic Operator a = 9b = 4 println("a = ", a)println("b = ", b) # Addition of numbers add = a + b println("Binary Addition: ", add) # Subtraction of numbers sub = a - b println("Binary Subtraction: ", sub) # Multiplication of number mul = a * b println("Binary Multiplication: ", mul) # Division(float) of number div1 = a / b println("Binary Division: ", div1) # Division(Integer) of number div2 = a ÷ b println("Integer Division: ", div2) # Division(floor) of number div3 = a \ b println("Inverse Division: ", div3) # Power of number pow = a ^ b println("Power Operation: ", pow) # Modulo of both number mod = a % b println("Modular Division: ", mod)
Output:
a = 9
b = 4
Binary Addition: 13
Binary Subtraction: 5
Binary Multiplication: 36
Binary Division: 2.25
Integer Division: 2
Inverse Division: 0.4444444444444444
Power Operation: 6561
Modular Division: 1
Bitwise operators are used to perform manipulation of individual bits of a number. They can be used with any of the integer types. Examples: (~, &, |, >>, <<, etc.)
# Examples of Bitwise operatorsa = 48b = 67 # Bitwise NOT operation println(~a) # Bitwise AND operation println(a & b) # Bitwise OR operationprintln(a | b) # Bitwise XOR operation println(a ? b) # Logical right shift operation println(a >>> 2) # Bitwise right shift operation println(a >> 2) # Bitwise left shift operation println(a << 2)
Output:
-49
0
115
115
12
12
192
Logical Operators are used to combine two or more conditions/constraints or to complement the evaluation of the original condition into consideration. The result of the operation of a logical operator is a boolean value either true or false. For example, the logical AND represented as ‘&&’ operator in Julia returns true when both the conditions under consideration are satisfied. Otherwise, it returns false. Therefore, a && b returns true when both a and b are true (i.e. non-zero).
# Examples of Logical Operatora = trueb = false # Print if a and b both are Falseprintln(a && b) # Print if a or b is Trueprintln(a || b) # Print if not a is Falseprintln(! a)
Output:
false
true
false
Assignment operators are used to assign value to a variable. The left side operand of the assignment operator is a variable and the right side operand of the assignment operator is a value. The value on the right side must be of the same data-type of the variable on the left side otherwise the compiler will raise an error.
# Examples of Assignment Operator a = 9b = 4 println("a = ", a)println("b = ", b) # Addition of numbers a += b println("Binary Addition: ", a) # Subtraction of numbers a -= b println("Binary Subtraction: ", a) # Multiplication of number a *= b println("Binary Multiplication: ", a) # Division(float) of number a /= b println("Binary Division: ", a) # Division(Integer) of number a ÷= b println("Integer Division: ", a) # Division(floor) of number a \= b println("Inverse Division: ", a) # Power of number a ^= b println("Power Operation: ", a) # Modulo of both number a %= b println("Modular Division: ", a)
Output
a = 9
b = 4
Binary Addition: 13
Binary Subtraction: 9
Binary Multiplication: 36
Binary Division: 9.0
Integer Division: 2.0
Inverse Division: 2.0
Power Operation: 16.0
Modular Division: 0.0
A ‘dot’ operator(.) is used to perform a binary operation with which it is used on the entire array, element by element, one by one. For ex- A power(^) operator if applied on an array like [4, 5, 6, 7] ^ 2, will result in an error, because it is not possible to perform ‘square’ of an array. Hence, the ‘dot’ operator comes into use. When used with the binary operation like .^ it will perform the operation on each element of the array. For ex- [4, 5, 6, 7] ^ 2 will result in [4^2, 5^2, 6^2, 7^2].Similarly, this dot operator can be used with other binary operators like .=, .+, .-, etc.
Example:
# Julia program to illustrate# use of 'dot' operator # Creating arrayA = [4, 5, 6, 7] # Performing exponent binary operationA = A ^ 2 # Performing exponent using 'dot' operationA = A .^ 2 println(A)
Above code will generate an error when the exponent is performed without a ‘dot’ operator. This is because exponent can’t be performed on the array of elements.
These operators are used to check for relations like equality, greater than, less than. They return boolean result after the comparison and are extensively used in looping statements as well as conditional if-else statements.
Example:
# Examples of Relational Operators a = 13b = 33 # a > b is False println(a > b) # a < b is True println(a < b) # a == b is False println(a == b) # a != b is True println(a != b) # a >= b is False println(a >= b) # a <= b is True println(a <= b)
Output:
false
true
false
true
false
true
Julia-Basics
Julia
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Vectors in Julia
Getting rounded value of a number in Julia - round() Method
Storing Output on a File in Julia
Reshaping array dimensions in Julia | Array reshape() Method
Manipulating matrices in Julia
Exception handling in Julia
Tuples in Julia
while loop in Julia
Comments in Julia
Creating array with repeated elements in Julia - repeat() Method | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n09 Mar, 2020"
},
{
"code": null,
"e": 395,
"s": 28,
"text": "Operators in Julia are the mathematical symbols that are used to perform operations on variables and values. These symbols are used to carry out arithmetic and logical computations. Variables on which the operators perform operations are termed as Operands. In other words, we can say that an operator operates the operands.For example, consider the below statement:"
},
{
"code": null,
"e": 406,
"s": 395,
"text": "c = a + b;"
},
{
"code": null,
"e": 575,
"s": 406,
"text": "Here, ‘+’ is the operator known as the addition operator and ‘a’ and ‘b’ are operands. The addition operator tells the compiler to add both of the operands ‘a’ and ‘b’."
},
{
"code": null,
"e": 612,
"s": 575,
"text": "Operators in Julia are of Six types:"
},
{
"code": null,
"e": 633,
"s": 612,
"text": "Arithmetic Operators"
},
{
"code": null,
"e": 651,
"s": 633,
"text": "Bitwise Operators"
},
{
"code": null,
"e": 669,
"s": 651,
"text": "Logical Operators"
},
{
"code": null,
"e": 690,
"s": 669,
"text": "Assignment Operators"
},
{
"code": null,
"e": 717,
"s": 690,
"text": "Vectorized ‘dot’ Operators"
},
{
"code": null,
"e": 738,
"s": 717,
"text": "Relational Operators"
},
{
"code": null,
"e": 993,
"s": 738,
"text": "Arithmetic operators are used to perform arithmetic/mathematical operations on operands. These operators include the process of addition, subtraction, multiplication, division, etc. Examples: (+, -, *, /, %, +x, -x).Arithmetic operators are of two types:"
},
{
"code": null,
"e": 1139,
"s": 993,
"text": "Unary Operators: Operators that operate or work with a single operand are unary operators. For example: (+x, -x) i.e. unary plus and unary minus."
},
{
"code": null,
"e": 1254,
"s": 1139,
"text": "Binary Operators: Operators that operate or work with two operands are binary operators. For example: (+, –, *, /)"
},
{
"code": "# Examples of Arithmetic Operator a = 9b = 4 println(\"a = \", a)println(\"b = \", b) # Addition of numbers add = a + b println(\"Binary Addition: \", add) # Subtraction of numbers sub = a - b println(\"Binary Subtraction: \", sub) # Multiplication of number mul = a * b println(\"Binary Multiplication: \", mul) # Division(float) of number div1 = a / b println(\"Binary Division: \", div1) # Division(Integer) of number div2 = a ÷ b println(\"Integer Division: \", div2) # Division(floor) of number div3 = a \\ b println(\"Inverse Division: \", div3) # Power of number pow = a ^ b println(\"Power Operation: \", pow) # Modulo of both number mod = a % b println(\"Modular Division: \", mod) ",
"e": 1946,
"s": 1254,
"text": null
},
{
"code": null,
"e": 1954,
"s": 1946,
"text": "Output:"
},
{
"code": null,
"e": 2155,
"s": 1954,
"text": "a = 9\nb = 4\nBinary Addition: 13\nBinary Subtraction: 5\nBinary Multiplication: 36\nBinary Division: 2.25\nInteger Division: 2\nInverse Division: 0.4444444444444444\nPower Operation: 6561\nModular Division: 1"
},
{
"code": null,
"e": 2320,
"s": 2155,
"text": "Bitwise operators are used to perform manipulation of individual bits of a number. They can be used with any of the integer types. Examples: (~, &, |, >>, <<, etc.)"
},
{
"code": "# Examples of Bitwise operatorsa = 48b = 67 # Bitwise NOT operation println(~a) # Bitwise AND operation println(a & b) # Bitwise OR operationprintln(a | b) # Bitwise XOR operation println(a ? b) # Logical right shift operation println(a >>> 2) # Bitwise right shift operation println(a >> 2) # Bitwise left shift operation println(a << 2)",
"e": 2667,
"s": 2320,
"text": null
},
{
"code": null,
"e": 2675,
"s": 2667,
"text": "Output:"
},
{
"code": null,
"e": 2700,
"s": 2675,
"text": "-49\n0\n115\n115\n12\n12\n192\n"
},
{
"code": null,
"e": 3186,
"s": 2700,
"text": "Logical Operators are used to combine two or more conditions/constraints or to complement the evaluation of the original condition into consideration. The result of the operation of a logical operator is a boolean value either true or false. For example, the logical AND represented as ‘&&’ operator in Julia returns true when both the conditions under consideration are satisfied. Otherwise, it returns false. Therefore, a && b returns true when both a and b are true (i.e. non-zero)."
},
{
"code": "# Examples of Logical Operatora = trueb = false # Print if a and b both are Falseprintln(a && b) # Print if a or b is Trueprintln(a || b) # Print if not a is Falseprintln(! a)",
"e": 3365,
"s": 3186,
"text": null
},
{
"code": null,
"e": 3373,
"s": 3365,
"text": "Output:"
},
{
"code": null,
"e": 3391,
"s": 3373,
"text": "false\ntrue\nfalse\n"
},
{
"code": null,
"e": 3716,
"s": 3391,
"text": "Assignment operators are used to assign value to a variable. The left side operand of the assignment operator is a variable and the right side operand of the assignment operator is a value. The value on the right side must be of the same data-type of the variable on the left side otherwise the compiler will raise an error."
},
{
"code": "# Examples of Assignment Operator a = 9b = 4 println(\"a = \", a)println(\"b = \", b) # Addition of numbers a += b println(\"Binary Addition: \", a) # Subtraction of numbers a -= b println(\"Binary Subtraction: \", a) # Multiplication of number a *= b println(\"Binary Multiplication: \", a) # Division(float) of number a /= b println(\"Binary Division: \", a) # Division(Integer) of number a ÷= b println(\"Integer Division: \", a) # Division(floor) of number a \\= b println(\"Inverse Division: \", a) # Power of number a ^= b println(\"Power Operation: \", a) # Modulo of both number a %= b println(\"Modular Division: \", a) ",
"e": 4346,
"s": 3716,
"text": null
},
{
"code": null,
"e": 4353,
"s": 4346,
"text": "Output"
},
{
"code": null,
"e": 4543,
"s": 4353,
"text": "a = 9\nb = 4\nBinary Addition: 13\nBinary Subtraction: 9\nBinary Multiplication: 36\nBinary Division: 9.0\nInteger Division: 2.0\nInverse Division: 2.0\nPower Operation: 16.0\nModular Division: 0.0\n"
},
{
"code": null,
"e": 5133,
"s": 4543,
"text": "A ‘dot’ operator(.) is used to perform a binary operation with which it is used on the entire array, element by element, one by one. For ex- A power(^) operator if applied on an array like [4, 5, 6, 7] ^ 2, will result in an error, because it is not possible to perform ‘square’ of an array. Hence, the ‘dot’ operator comes into use. When used with the binary operation like .^ it will perform the operation on each element of the array. For ex- [4, 5, 6, 7] ^ 2 will result in [4^2, 5^2, 6^2, 7^2].Similarly, this dot operator can be used with other binary operators like .=, .+, .-, etc."
},
{
"code": null,
"e": 5142,
"s": 5133,
"text": "Example:"
},
{
"code": "# Julia program to illustrate# use of 'dot' operator # Creating arrayA = [4, 5, 6, 7] # Performing exponent binary operationA = A ^ 2 # Performing exponent using 'dot' operationA = A .^ 2 println(A)",
"e": 5344,
"s": 5142,
"text": null
},
{
"code": null,
"e": 5505,
"s": 5344,
"text": "Above code will generate an error when the exponent is performed without a ‘dot’ operator. This is because exponent can’t be performed on the array of elements."
},
{
"code": null,
"e": 5731,
"s": 5505,
"text": "These operators are used to check for relations like equality, greater than, less than. They return boolean result after the comparison and are extensively used in looping statements as well as conditional if-else statements."
},
{
"code": null,
"e": 5740,
"s": 5731,
"text": "Example:"
},
{
"code": "# Examples of Relational Operators a = 13b = 33 # a > b is False println(a > b) # a < b is True println(a < b) # a == b is False println(a == b) # a != b is True println(a != b) # a >= b is False println(a >= b) # a <= b is True println(a <= b) ",
"e": 6009,
"s": 5740,
"text": null
},
{
"code": null,
"e": 6017,
"s": 6009,
"text": "Output:"
},
{
"code": null,
"e": 6051,
"s": 6017,
"text": "false\ntrue\nfalse\ntrue\nfalse\ntrue\n"
},
{
"code": null,
"e": 6064,
"s": 6051,
"text": "Julia-Basics"
},
{
"code": null,
"e": 6070,
"s": 6064,
"text": "Julia"
},
{
"code": null,
"e": 6168,
"s": 6070,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 6185,
"s": 6168,
"text": "Vectors in Julia"
},
{
"code": null,
"e": 6245,
"s": 6185,
"text": "Getting rounded value of a number in Julia - round() Method"
},
{
"code": null,
"e": 6279,
"s": 6245,
"text": "Storing Output on a File in Julia"
},
{
"code": null,
"e": 6340,
"s": 6279,
"text": "Reshaping array dimensions in Julia | Array reshape() Method"
},
{
"code": null,
"e": 6371,
"s": 6340,
"text": "Manipulating matrices in Julia"
},
{
"code": null,
"e": 6399,
"s": 6371,
"text": "Exception handling in Julia"
},
{
"code": null,
"e": 6415,
"s": 6399,
"text": "Tuples in Julia"
},
{
"code": null,
"e": 6435,
"s": 6415,
"text": "while loop in Julia"
},
{
"code": null,
"e": 6453,
"s": 6435,
"text": "Comments in Julia"
}
]
|
SAS | Date Formats and Informats | 30 Jul, 2019
Informats is used to tell SAS how to read a variable whereas Formats is used to tell SAS how to display or write values of a variable.
Informats is basically used when you read in sample data which is being created using CARDS/DATALINES statement or read or import data from either an external file (Text/Excel/CSV).
Formats can be used in both Data Steps and PROC Steps whereas Informat can be used only in Data Steps.
Example: Read Dates in SAS
In the example below, we have used INFORMATS ddmmyy8. and ddymmyy10. to read dates in SAS. It creates a dataset called sampledata which is stored in WORK library.DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;The INFORMATS ddmmyy8. is used to read 20-07-19 date and ddmmyy10. to read 20-07-2019 date. In defined syntax above, 8 and 10 refers to width of the date.The created dataset looks like below –It returns 21750 as it is in the SAS date value form. It is not meaningful if you look at the value. You cannot tell which date it is.
DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;
The INFORMATS ddmmyy8. is used to read 20-07-19 date and ddmmyy10. to read 20-07-2019 date. In defined syntax above, 8 and 10 refers to width of the date.
The created dataset looks like below –
It returns 21750 as it is in the SAS date value form. It is not meaningful if you look at the value. You cannot tell which date it is.
To display the date in usual date format, use FORMAT statement.DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; FORMAT date1 ddmmyy8. date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;Output:
DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; FORMAT date1 ddmmyy8. date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;
Output:
How to read DD-MMM-YY formatYou can use date11. format for both DD-MMM-YY and DD-MMM-YYYY format.DATA temp; INPUT @6 dt date11.; FORMAT dt date11.; CARDS;20-jul-19 ; PROC PRINT noobs;RUN;Output:
You can use date11. format for both DD-MMM-YY and DD-MMM-YYYY format.
DATA temp; INPUT @6 dt date11.; FORMAT dt date11.; CARDS;20-jul-19 ; PROC PRINT noobs;RUN;
Output:
SAS Programming
Programming Language
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Decorators with parameters in Python
C# | Data Types
Difference between Shallow and Deep copy of a class
Shallow Copy and Deep Copy in C++
Top 10 Programming Languages to Learn in 2022
Kotlin Array
Advantages and Disadvantages of OOP
Top 10 Fastest Programming Languages
Difference between while and do-while loop in C, C++, Java
Difference between for and while loop in C, C++, Java | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n30 Jul, 2019"
},
{
"code": null,
"e": 163,
"s": 28,
"text": "Informats is used to tell SAS how to read a variable whereas Formats is used to tell SAS how to display or write values of a variable."
},
{
"code": null,
"e": 345,
"s": 163,
"text": "Informats is basically used when you read in sample data which is being created using CARDS/DATALINES statement or read or import data from either an external file (Text/Excel/CSV)."
},
{
"code": null,
"e": 448,
"s": 345,
"text": "Formats can be used in both Data Steps and PROC Steps whereas Informat can be used only in Data Steps."
},
{
"code": null,
"e": 475,
"s": 448,
"text": "Example: Read Dates in SAS"
},
{
"code": null,
"e": 1063,
"s": 475,
"text": "In the example below, we have used INFORMATS ddmmyy8. and ddymmyy10. to read dates in SAS. It creates a dataset called sampledata which is stored in WORK library.DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;The INFORMATS ddmmyy8. is used to read 20-07-19 date and ddmmyy10. to read 20-07-2019 date. In defined syntax above, 8 and 10 refers to width of the date.The created dataset looks like below –It returns 21750 as it is in the SAS date value form. It is not meaningful if you look at the value. You cannot tell which date it is."
},
{
"code": "DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;",
"e": 1163,
"s": 1063,
"text": null
},
{
"code": null,
"e": 1318,
"s": 1163,
"text": "The INFORMATS ddmmyy8. is used to read 20-07-19 date and ddmmyy10. to read 20-07-2019 date. In defined syntax above, 8 and 10 refers to width of the date."
},
{
"code": null,
"e": 1357,
"s": 1318,
"text": "The created dataset looks like below –"
},
{
"code": null,
"e": 1492,
"s": 1357,
"text": "It returns 21750 as it is in the SAS date value form. It is not meaningful if you look at the value. You cannot tell which date it is."
},
{
"code": null,
"e": 1704,
"s": 1492,
"text": "To display the date in usual date format, use FORMAT statement.DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; FORMAT date1 ddmmyy8. date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;Output:"
},
{
"code": "DATA sampledata; INPUT @6 date1 ddmmyy8. @15 date2 ddmmyy10.; FORMAT date1 ddmmyy8. date2 ddmmyy10.; CARDS;20-07-19 20-07-2019 ;RUN;",
"e": 1846,
"s": 1704,
"text": null
},
{
"code": null,
"e": 1854,
"s": 1846,
"text": "Output:"
},
{
"code": null,
"e": 2061,
"s": 1854,
"text": "How to read DD-MMM-YY formatYou can use date11. format for both DD-MMM-YY and DD-MMM-YYYY format.DATA temp; INPUT @6 dt date11.; FORMAT dt date11.; CARDS;20-jul-19 ; PROC PRINT noobs;RUN;Output:"
},
{
"code": null,
"e": 2131,
"s": 2061,
"text": "You can use date11. format for both DD-MMM-YY and DD-MMM-YYYY format."
},
{
"code": "DATA temp; INPUT @6 dt date11.; FORMAT dt date11.; CARDS;20-jul-19 ; PROC PRINT noobs;RUN;",
"e": 2234,
"s": 2131,
"text": null
},
{
"code": null,
"e": 2242,
"s": 2234,
"text": "Output:"
},
{
"code": null,
"e": 2258,
"s": 2242,
"text": "SAS Programming"
},
{
"code": null,
"e": 2279,
"s": 2258,
"text": "Programming Language"
},
{
"code": null,
"e": 2377,
"s": 2279,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2414,
"s": 2377,
"text": "Decorators with parameters in Python"
},
{
"code": null,
"e": 2430,
"s": 2414,
"text": "C# | Data Types"
},
{
"code": null,
"e": 2482,
"s": 2430,
"text": "Difference between Shallow and Deep copy of a class"
},
{
"code": null,
"e": 2516,
"s": 2482,
"text": "Shallow Copy and Deep Copy in C++"
},
{
"code": null,
"e": 2562,
"s": 2516,
"text": "Top 10 Programming Languages to Learn in 2022"
},
{
"code": null,
"e": 2575,
"s": 2562,
"text": "Kotlin Array"
},
{
"code": null,
"e": 2611,
"s": 2575,
"text": "Advantages and Disadvantages of OOP"
},
{
"code": null,
"e": 2648,
"s": 2611,
"text": "Top 10 Fastest Programming Languages"
},
{
"code": null,
"e": 2707,
"s": 2648,
"text": "Difference between while and do-while loop in C, C++, Java"
}
]
|
Python – Remove Dictionary Key Words | 10 May, 2020
Sometimes, while working with Python strings, we can have a problem in which we need to remove all the words from a string which are a part of key of dictionary. This problem can have application in domains such as web development and day-day programming. Let’s discuss certain ways in which this task can be performed.
Method #1 : Using split() + loop + replace()The combination of above functions can be used to solve this problem. In this, we perform the task of converting string to list of words using split(). Then we perform a replace of word present in string with empty string using replace().
# Python3 code to demonstrate working of # Remove Dictionary Key Words# Using split() + loop + replace() # initializing stringtest_str = 'gfg is best for geeks' # printing original stringprint("The original string is : " + str(test_str)) # initializing Dictionarytest_dict = {'geeks' : 1, 'best': 6} # Remove Dictionary Key Words# Using split() + loop + replace()for key in test_dict: if key in test_str.split(' '): test_str = test_str.replace(key, "") # printing result print("The string after replace : " + str(test_str))
The original string is : gfg is best for geeks
The string after replace : gfg is for
Method #2 : Using join() + split()This is yet another way in which this task can be performed. In this, we reconstruct new string using join(), performing join by the empty string after split.
# Python3 code to demonstrate working of # Remove Dictionary Key Words# Using join() + split() # initializing stringtest_str = 'gfg is best for geeks' # printing original stringprint("The original string is : " + str(test_str)) # initializing Dictionarytest_dict = {'geeks' : 1, 'best': 6} # Remove Dictionary Key Words# Using join() + split()temp = test_str.split(' ')temp1 = [word for word in temp if word.lower() not in test_dict]res = ' '.join(temp1) # printing result print("The string after replace : " + str(res))
The original string is : gfg is best for geeks
The string after replace : gfg is for
Python dictionary-programs
Python string-programs
Python
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Different ways to create Pandas Dataframe
Enumerate() in Python
Read a file line by line in Python
Python String | replace()
How to Install PIP on Windows ?
*args and **kwargs in Python
Python Classes and Objects
Python OOPs Concepts
Iterate over a list in Python
Introduction To PYTHON | [
{
"code": null,
"e": 28,
"s": 0,
"text": "\n10 May, 2020"
},
{
"code": null,
"e": 348,
"s": 28,
"text": "Sometimes, while working with Python strings, we can have a problem in which we need to remove all the words from a string which are a part of key of dictionary. This problem can have application in domains such as web development and day-day programming. Let’s discuss certain ways in which this task can be performed."
},
{
"code": null,
"e": 631,
"s": 348,
"text": "Method #1 : Using split() + loop + replace()The combination of above functions can be used to solve this problem. In this, we perform the task of converting string to list of words using split(). Then we perform a replace of word present in string with empty string using replace()."
},
{
"code": "# Python3 code to demonstrate working of # Remove Dictionary Key Words# Using split() + loop + replace() # initializing stringtest_str = 'gfg is best for geeks' # printing original stringprint(\"The original string is : \" + str(test_str)) # initializing Dictionarytest_dict = {'geeks' : 1, 'best': 6} # Remove Dictionary Key Words# Using split() + loop + replace()for key in test_dict: if key in test_str.split(' '): test_str = test_str.replace(key, \"\") # printing result print(\"The string after replace : \" + str(test_str)) ",
"e": 1171,
"s": 631,
"text": null
},
{
"code": null,
"e": 1259,
"s": 1171,
"text": "The original string is : gfg is best for geeks\nThe string after replace : gfg is for \n"
},
{
"code": null,
"e": 1454,
"s": 1261,
"text": "Method #2 : Using join() + split()This is yet another way in which this task can be performed. In this, we reconstruct new string using join(), performing join by the empty string after split."
},
{
"code": "# Python3 code to demonstrate working of # Remove Dictionary Key Words# Using join() + split() # initializing stringtest_str = 'gfg is best for geeks' # printing original stringprint(\"The original string is : \" + str(test_str)) # initializing Dictionarytest_dict = {'geeks' : 1, 'best': 6} # Remove Dictionary Key Words# Using join() + split()temp = test_str.split(' ')temp1 = [word for word in temp if word.lower() not in test_dict]res = ' '.join(temp1) # printing result print(\"The string after replace : \" + str(res)) ",
"e": 1981,
"s": 1454,
"text": null
},
{
"code": null,
"e": 2069,
"s": 1981,
"text": "The original string is : gfg is best for geeks\nThe string after replace : gfg is for \n"
},
{
"code": null,
"e": 2096,
"s": 2069,
"text": "Python dictionary-programs"
},
{
"code": null,
"e": 2119,
"s": 2096,
"text": "Python string-programs"
},
{
"code": null,
"e": 2126,
"s": 2119,
"text": "Python"
},
{
"code": null,
"e": 2224,
"s": 2126,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 2266,
"s": 2224,
"text": "Different ways to create Pandas Dataframe"
},
{
"code": null,
"e": 2288,
"s": 2266,
"text": "Enumerate() in Python"
},
{
"code": null,
"e": 2323,
"s": 2288,
"text": "Read a file line by line in Python"
},
{
"code": null,
"e": 2349,
"s": 2323,
"text": "Python String | replace()"
},
{
"code": null,
"e": 2381,
"s": 2349,
"text": "How to Install PIP on Windows ?"
},
{
"code": null,
"e": 2410,
"s": 2381,
"text": "*args and **kwargs in Python"
},
{
"code": null,
"e": 2437,
"s": 2410,
"text": "Python Classes and Objects"
},
{
"code": null,
"e": 2458,
"s": 2437,
"text": "Python OOPs Concepts"
},
{
"code": null,
"e": 2488,
"s": 2458,
"text": "Iterate over a list in Python"
}
]
|
Sum of XOR of all pairs in an array | Difficulty Level :
Medium
Given an array of n integers, find the sum of xor of all pairs of numbers in the array.
Examples :
Input : arr[] = {7, 3, 5}
Output : 12
7 ^ 3 = 4
3 ^ 5 = 6
7 ^ 5 = 2
Sum = 4 + 6 + 2
= 12
Input : arr[] = {5, 9, 7, 6}
Output : 47
5 ^ 9 = 12
9 ^ 7 = 14
7 ^ 6 = 1
5 ^ 7 = 2
5 ^ 6 = 3
9 ^ 6 = 15
Sum = 12 + 14 + 1 + 2 + 3 + 15
= 47
Naive SolutionA Brute Force approach is to run two loops and time complexity is O(n2).
C++
Java
Python3
C#
PHP
JavaScript
// A Simple C++ program to compute// sum of bitwise OR of all pairs#include <bits/stdc++.h>using namespace std; // Returns sum of bitwise OR// of all pairsint pairORSum(int arr[], int n){ int ans = 0; // Initialize result // Consider all pairs (arr[i], arr[j) such that // i < j for (int i = 0; i < n; i++) for (int j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans;} // Driver program to test above functionint main(){ int arr[] = { 5, 9, 7, 6 }; int n = sizeof(arr) / sizeof(arr[0]); cout << pairORSum(arr, n) << endl; return 0;}
// A Simple Java program to compute// sum of bitwise OR of all pairsimport java.io.*; class GFG { // Returns sum of bitwise OR // of all pairs static int pairORSum(int arr[], int n) { // Initialize result int ans = 0; // Consider all pairs (arr[i], arr[j) // such that i < j for (int i = 0; i < n; i++) for (int j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans; } // Driver program to test above function public static void main (String[] args) { int arr[] = { 5, 9, 7, 6 }; int n = arr.length; System.out.println(pairORSum(arr, arr.length)); }} // This code is contributed by vt_m
# A Simple Python 3 program to compute# sum of bitwise OR of all pairs # Returns sum of bitwise OR# of all pairsdef pairORSum(arr, n) : ans = 0 # Initialize result # Consider all pairs (arr[i], arr[j) # such that i < j for i in range(0, n) : for j in range(i + 1, n) : ans = ans + (arr[i] ^ arr[j]) return ans # Driver Codearr = [ 5, 9, 7, 6 ]n = len(arr) print(pairORSum(arr, n)) # This code is contributed by Nikita Tiwari.
// A Simple C# program to compute// sum of bitwise OR of all pairsusing System; class GFG { // Returns sum of bitwise OR // of all pairs static int pairORSum(int []arr, int n) { // Initialize result int ans = 0; // Consider all pairs (arr[i], arr[j) // such that i < j for (int i = 0; i < n; i++) for (int j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans; } // Driver program to test above function public static void Main () { int []arr = { 5, 9, 7, 6 }; int n = arr.Length; Console.WriteLine(pairORSum(arr, arr.Length)); }} // This code is contributed by vt_m
<?php// A Simple PHP program to compute// sum of bitwise OR of all pairs // Returns sum of bitwise OR// of all pairsfunction pairORSum($arr, $n){ // Initialize result $ans = 0; // Consider all pairs // (arr[i], arr[j) such that // i < j for ( $i = 0; $i < $n; $i++) for ( $j = $i + 1; $j < $n; $j++) $ans += $arr[$i] ^ $arr[$j]; return $ans;} // Driver Code $arr = array( 5, 9, 7, 6 ); $n = count($arr); echo pairORSum($arr, $n) ; // This code is contributed by anuj_67.
<script> // A Simple Javascript program to compute // sum of bitwise OR of all pairs // Returns sum of bitwise OR // of all pairs const pairORSum = (arr, n) => { let ans = 0; // Initialize result // Consider all pairs (arr[i], arr[j) such that // i < j for (let i = 0; i < n; i++) for (let j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans; } // Driver program to test above function let arr = [5, 9, 7, 6]; let n = arr.length; document.write(pairORSum(arr, n)); // This code is contributed by rakeshsahni </script>?>
Output :
47
Efficient SolutionAn Efficient Solution can solve this problem in O(n) time. The assumption here is that integers are represented using 32 bits.Optimized solution will be to try bit manipulation. To implement the solution, we consider all bits which are 1 and which are 0 and store their count in two different variables. Next multiple those counts along with the power of 2 raised to that bit position. Do this for all the bit positions of the numbers. Their sum would be our answer.How this actually works?
For example, look at the rightmost bit of all the numbers in the array. Suppose that a numbers have a rightmost 0-bit, and b numbers have a 1-bit. Then out of the pairs, a*b of them will have 1 in the rightmost bit of the XOR. This is because there are a*b ways to choose one number that has a 0-bit and one that has a 1-bit. These bits will therefore contribute a*b towards the total of all the XORs.
In general, when looking at the nth bit (where the rightmost bit is the 0th), count how many numbers have 0 (call this an) and how many have 1 (call this bn). The contribution towards the final sum will be an*bn*pow(2,n). You need to do this for each bit and sum all these contributions together.
This can be done in O(kn) time, where k is the number of bits in the given values.
Explanation : arr[] = { 7, 3, 5 }
7 = 1 1 1
3 = 0 1 1
5 = 1 0 1
For bit position 0 :
Bits with zero = 0
Bits with one = 3
Answer = 0 * 3 * 2 ^ 0 = 0
Similarly, for bit position 1 :
Bits with zero = 1
Bits with one = 2
Answer = 1 * 2 * 2 ^ 1 = 4
Similarly, for bit position 2 :
Bits with zero = 1
Bits with one = 2
Answer = 1 * 2 * 2 ^ 2 = 8
Final answer = 0 + 4 + 8 = 12
CPP
Java
Python3
C#
PHP
JavaScript
// An efficient C++ program to compute// sum of bitwise OR of all pairs#include <bits/stdc++.h>using namespace std; // Returns sum of bitwise OR// of all pairslong long int sumXOR(int arr[], int n){ long long int sum = 0; for (int i = 0; i < 32; i++) { // Count of zeros and ones int zc = 0, oc = 0; // Individual sum at each bit position long long int idsum = 0; for (int j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] /= 2; } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum;} int main(){ long long int sum = 0; int arr[] = { 5, 9, 7, 6 }; int n = sizeof(arr) / sizeof(arr[0]); sum = sumXOR(arr, n); cout << sum; return 0;}
// An efficient Java program to compute// sum of bitwise OR of all pairsimport java.io.*; class GFG { // Returns sum of bitwise OR // of all pairs static long sumXOR(int arr[], int n) { long sum = 0; for (int i = 0; i < 32; i++) { // Count of zeros and ones int zc = 0, oc = 0; // Individual sum at each bit position long idsum = 0; for (int j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] /= 2; } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum; } // Driver Code public static void main(String args[]) { long sum = 0; int arr[] = { 5, 9, 7, 6 }; int n = arr.length; sum = sumXOR(arr, n); System.out.println(sum); }} // This code is contributed by Nikita Tiwari.
# An efficient Python3 program to compute# sum of bitwise OR of all pair # Returns sum of bitwise OR# of all pairsdef sumXOR( arr, n): sum = 0 for i in range(0, 32): # Count of zeros and ones zc = 0 oc = 0 # Individual sum at each bit position idsum = 0 for j in range(0, n): if (arr[j] % 2 == 0): zc = zc + 1 else: oc = oc + 1 arr[j] = int(arr[j] / 2) # calculating individual bit sum idsum = oc * zc * (1 << i) # final sum sum = sum + idsum; return sum # driver functionsum = 0arr = [ 5, 9, 7, 6 ]n = len(arr)sum = sumXOR(arr, n);print (sum) # This code is contributed by saloni1297
// An efficient C# program to compute// sum of bitwise OR of all pairsusing System; class GFG { // Returns sum of bitwise OR // of all pairs static long sumXOR(int []arr, int n) { long sum = 0; for (int i = 0; i < 32; i++) { // Count of zeros and ones int zc = 0, oc = 0; // Individual sum at each bit position long idsum = 0; for (int j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] /= 2; } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum; } // Driver Code public static void Main() { long sum = 0; int []arr = { 5, 9, 7, 6 }; int n = arr.Length; sum = sumXOR(arr, n); Console.WriteLine(sum); }} // This code is contributed by vt_m.
<?php// An efficient PHP program to compute// sum of bitwise OR of all pairs // Returns sum of bitwise OR// of all pairsfunction sumXOR($arr, $n){ $sum = 0; for ($i = 0; $i < 32; $i++) { // Count of zeros and ones $zc = 0; $oc = 0; // Individual sum at each // bit position $idsum = 0; for ($j = 0; $j < $n; $j++) { if ($arr[$j] % 2 == 0) $zc++; else $oc++; $arr[$j] /= 2; } // calculating individual bit sum $idsum = $oc * $zc * (1 << $i); // final sum $sum += $idsum; } return $sum;} // Driver code $sum = 0; $arr = array( 5, 9, 7, 6 ); $n = count($arr); $sum = sumXOR($arr, $n); echo $sum; // This code is contributed by anuj_67
<script> // An efficient JavaScript program to compute // sum of bitwise OR of all pairs // Returns sum of bitwise OR // of all pairs const sumXOR = (arr, n) => { let sum = 0; for (let i = 0; i < 32; i++) { // Count of zeros and ones let zc = 0, oc = 0; // Individual sum at each bit position let idsum = 0; for (let j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] = parseInt(arr[j] / 2); } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum; } let sum = 0; let arr = [5, 9, 7, 6]; let n = arr.length; sum = sumXOR(arr, n); document.write(sum); // This code is contributed by rakeshsahni </script> ?>
47
vt_m
Ganeshchowdharysadanala
rakeshsahni
Bitwise-XOR
Arrays
Bit Magic
Arrays
Bit Magic
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Maximum and minimum of an array using minimum number of comparisons
Top 50 Array Coding Problems for Interviews
Multidimensional Arrays in Java
Stack Data Structure (Introduction and Program)
Linear Search
Left Shift and Right Shift Operators in C/C++
Travelling Salesman Problem | Set 1 (Naive and Dynamic Programming)
How to swap two numbers without using a temporary variable?
Program to find whether a given number is power of 2
Little and Big Endian Mystery | [
{
"code": null,
"e": 26,
"s": 0,
"text": "Difficulty Level :\nMedium"
},
{
"code": null,
"e": 114,
"s": 26,
"text": "Given an array of n integers, find the sum of xor of all pairs of numbers in the array."
},
{
"code": null,
"e": 125,
"s": 114,
"text": "Examples :"
},
{
"code": null,
"e": 365,
"s": 125,
"text": "Input : arr[] = {7, 3, 5}\nOutput : 12\n7 ^ 3 = 4\n3 ^ 5 = 6\n7 ^ 5 = 2\nSum = 4 + 6 + 2 \n = 12\n\nInput : arr[] = {5, 9, 7, 6}\nOutput : 47\n5 ^ 9 = 12\n9 ^ 7 = 14\n7 ^ 6 = 1\n5 ^ 7 = 2\n5 ^ 6 = 3\n9 ^ 6 = 15\nSum = 12 + 14 + 1 + 2 + 3 + 15\n = 47\n"
},
{
"code": null,
"e": 452,
"s": 365,
"text": "Naive SolutionA Brute Force approach is to run two loops and time complexity is O(n2)."
},
{
"code": null,
"e": 456,
"s": 452,
"text": "C++"
},
{
"code": null,
"e": 461,
"s": 456,
"text": "Java"
},
{
"code": null,
"e": 469,
"s": 461,
"text": "Python3"
},
{
"code": null,
"e": 472,
"s": 469,
"text": "C#"
},
{
"code": null,
"e": 476,
"s": 472,
"text": "PHP"
},
{
"code": null,
"e": 487,
"s": 476,
"text": "JavaScript"
},
{
"code": "// A Simple C++ program to compute// sum of bitwise OR of all pairs#include <bits/stdc++.h>using namespace std; // Returns sum of bitwise OR// of all pairsint pairORSum(int arr[], int n){ int ans = 0; // Initialize result // Consider all pairs (arr[i], arr[j) such that // i < j for (int i = 0; i < n; i++) for (int j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans;} // Driver program to test above functionint main(){ int arr[] = { 5, 9, 7, 6 }; int n = sizeof(arr) / sizeof(arr[0]); cout << pairORSum(arr, n) << endl; return 0;}",
"e": 1074,
"s": 487,
"text": null
},
{
"code": "// A Simple Java program to compute// sum of bitwise OR of all pairsimport java.io.*; class GFG { // Returns sum of bitwise OR // of all pairs static int pairORSum(int arr[], int n) { // Initialize result int ans = 0; // Consider all pairs (arr[i], arr[j) // such that i < j for (int i = 0; i < n; i++) for (int j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans; } // Driver program to test above function public static void main (String[] args) { int arr[] = { 5, 9, 7, 6 }; int n = arr.length; System.out.println(pairORSum(arr, arr.length)); }} // This code is contributed by vt_m",
"e": 1852,
"s": 1074,
"text": null
},
{
"code": "# A Simple Python 3 program to compute# sum of bitwise OR of all pairs # Returns sum of bitwise OR# of all pairsdef pairORSum(arr, n) : ans = 0 # Initialize result # Consider all pairs (arr[i], arr[j) # such that i < j for i in range(0, n) : for j in range(i + 1, n) : ans = ans + (arr[i] ^ arr[j]) return ans # Driver Codearr = [ 5, 9, 7, 6 ]n = len(arr) print(pairORSum(arr, n)) # This code is contributed by Nikita Tiwari.",
"e": 2358,
"s": 1852,
"text": null
},
{
"code": "// A Simple C# program to compute// sum of bitwise OR of all pairsusing System; class GFG { // Returns sum of bitwise OR // of all pairs static int pairORSum(int []arr, int n) { // Initialize result int ans = 0; // Consider all pairs (arr[i], arr[j) // such that i < j for (int i = 0; i < n; i++) for (int j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans; } // Driver program to test above function public static void Main () { int []arr = { 5, 9, 7, 6 }; int n = arr.Length; Console.WriteLine(pairORSum(arr, arr.Length)); }} // This code is contributed by vt_m",
"e": 3117,
"s": 2358,
"text": null
},
{
"code": "<?php// A Simple PHP program to compute// sum of bitwise OR of all pairs // Returns sum of bitwise OR// of all pairsfunction pairORSum($arr, $n){ // Initialize result $ans = 0; // Consider all pairs // (arr[i], arr[j) such that // i < j for ( $i = 0; $i < $n; $i++) for ( $j = $i + 1; $j < $n; $j++) $ans += $arr[$i] ^ $arr[$j]; return $ans;} // Driver Code $arr = array( 5, 9, 7, 6 ); $n = count($arr); echo pairORSum($arr, $n) ; // This code is contributed by anuj_67.",
"e": 3647,
"s": 3117,
"text": null
},
{
"code": "<script> // A Simple Javascript program to compute // sum of bitwise OR of all pairs // Returns sum of bitwise OR // of all pairs const pairORSum = (arr, n) => { let ans = 0; // Initialize result // Consider all pairs (arr[i], arr[j) such that // i < j for (let i = 0; i < n; i++) for (let j = i + 1; j < n; j++) ans += arr[i] ^ arr[j]; return ans; } // Driver program to test above function let arr = [5, 9, 7, 6]; let n = arr.length; document.write(pairORSum(arr, n)); // This code is contributed by rakeshsahni </script>?>",
"e": 4272,
"s": 3647,
"text": null
},
{
"code": null,
"e": 4281,
"s": 4272,
"text": "Output :"
},
{
"code": null,
"e": 4284,
"s": 4281,
"text": "47"
},
{
"code": null,
"e": 4793,
"s": 4284,
"text": "Efficient SolutionAn Efficient Solution can solve this problem in O(n) time. The assumption here is that integers are represented using 32 bits.Optimized solution will be to try bit manipulation. To implement the solution, we consider all bits which are 1 and which are 0 and store their count in two different variables. Next multiple those counts along with the power of 2 raised to that bit position. Do this for all the bit positions of the numbers. Their sum would be our answer.How this actually works?"
},
{
"code": null,
"e": 5195,
"s": 4793,
"text": "For example, look at the rightmost bit of all the numbers in the array. Suppose that a numbers have a rightmost 0-bit, and b numbers have a 1-bit. Then out of the pairs, a*b of them will have 1 in the rightmost bit of the XOR. This is because there are a*b ways to choose one number that has a 0-bit and one that has a 1-bit. These bits will therefore contribute a*b towards the total of all the XORs."
},
{
"code": null,
"e": 5492,
"s": 5195,
"text": "In general, when looking at the nth bit (where the rightmost bit is the 0th), count how many numbers have 0 (call this an) and how many have 1 (call this bn). The contribution towards the final sum will be an*bn*pow(2,n). You need to do this for each bit and sum all these contributions together."
},
{
"code": null,
"e": 5575,
"s": 5492,
"text": "This can be done in O(kn) time, where k is the number of bits in the given values."
},
{
"code": null,
"e": 5952,
"s": 5575,
"text": "Explanation : arr[] = { 7, 3, 5 }\n7 = 1 1 1\n3 = 0 1 1\n5 = 1 0 1\nFor bit position 0 : \nBits with zero = 0\nBits with one = 3\nAnswer = 0 * 3 * 2 ^ 0 = 0\nSimilarly, for bit position 1 :\nBits with zero = 1\nBits with one = 2\nAnswer = 1 * 2 * 2 ^ 1 = 4\nSimilarly, for bit position 2 :\nBits with zero = 1\nBits with one = 2\nAnswer = 1 * 2 * 2 ^ 2 = 8\n Final answer = 0 + 4 + 8 = 12 \n\n"
},
{
"code": null,
"e": 5956,
"s": 5952,
"text": "CPP"
},
{
"code": null,
"e": 5961,
"s": 5956,
"text": "Java"
},
{
"code": null,
"e": 5969,
"s": 5961,
"text": "Python3"
},
{
"code": null,
"e": 5972,
"s": 5969,
"text": "C#"
},
{
"code": null,
"e": 5976,
"s": 5972,
"text": "PHP"
},
{
"code": null,
"e": 5987,
"s": 5976,
"text": "JavaScript"
},
{
"code": "// An efficient C++ program to compute// sum of bitwise OR of all pairs#include <bits/stdc++.h>using namespace std; // Returns sum of bitwise OR// of all pairslong long int sumXOR(int arr[], int n){ long long int sum = 0; for (int i = 0; i < 32; i++) { // Count of zeros and ones int zc = 0, oc = 0; // Individual sum at each bit position long long int idsum = 0; for (int j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] /= 2; } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum;} int main(){ long long int sum = 0; int arr[] = { 5, 9, 7, 6 }; int n = sizeof(arr) / sizeof(arr[0]); sum = sumXOR(arr, n); cout << sum; return 0;}",
"e": 6881,
"s": 5987,
"text": null
},
{
"code": "// An efficient Java program to compute// sum of bitwise OR of all pairsimport java.io.*; class GFG { // Returns sum of bitwise OR // of all pairs static long sumXOR(int arr[], int n) { long sum = 0; for (int i = 0; i < 32; i++) { // Count of zeros and ones int zc = 0, oc = 0; // Individual sum at each bit position long idsum = 0; for (int j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] /= 2; } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum; } // Driver Code public static void main(String args[]) { long sum = 0; int arr[] = { 5, 9, 7, 6 }; int n = arr.length; sum = sumXOR(arr, n); System.out.println(sum); }} // This code is contributed by Nikita Tiwari.",
"e": 8001,
"s": 6881,
"text": null
},
{
"code": "# An efficient Python3 program to compute# sum of bitwise OR of all pair # Returns sum of bitwise OR# of all pairsdef sumXOR( arr, n): sum = 0 for i in range(0, 32): # Count of zeros and ones zc = 0 oc = 0 # Individual sum at each bit position idsum = 0 for j in range(0, n): if (arr[j] % 2 == 0): zc = zc + 1 else: oc = oc + 1 arr[j] = int(arr[j] / 2) # calculating individual bit sum idsum = oc * zc * (1 << i) # final sum sum = sum + idsum; return sum # driver functionsum = 0arr = [ 5, 9, 7, 6 ]n = len(arr)sum = sumXOR(arr, n);print (sum) # This code is contributed by saloni1297",
"e": 8795,
"s": 8001,
"text": null
},
{
"code": "// An efficient C# program to compute// sum of bitwise OR of all pairsusing System; class GFG { // Returns sum of bitwise OR // of all pairs static long sumXOR(int []arr, int n) { long sum = 0; for (int i = 0; i < 32; i++) { // Count of zeros and ones int zc = 0, oc = 0; // Individual sum at each bit position long idsum = 0; for (int j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] /= 2; } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum; } // Driver Code public static void Main() { long sum = 0; int []arr = { 5, 9, 7, 6 }; int n = arr.Length; sum = sumXOR(arr, n); Console.WriteLine(sum); }} // This code is contributed by vt_m.",
"e": 9886,
"s": 8795,
"text": null
},
{
"code": "<?php// An efficient PHP program to compute// sum of bitwise OR of all pairs // Returns sum of bitwise OR// of all pairsfunction sumXOR($arr, $n){ $sum = 0; for ($i = 0; $i < 32; $i++) { // Count of zeros and ones $zc = 0; $oc = 0; // Individual sum at each // bit position $idsum = 0; for ($j = 0; $j < $n; $j++) { if ($arr[$j] % 2 == 0) $zc++; else $oc++; $arr[$j] /= 2; } // calculating individual bit sum $idsum = $oc * $zc * (1 << $i); // final sum $sum += $idsum; } return $sum;} // Driver code $sum = 0; $arr = array( 5, 9, 7, 6 ); $n = count($arr); $sum = sumXOR($arr, $n); echo $sum; // This code is contributed by anuj_67",
"e": 10736,
"s": 9886,
"text": null
},
{
"code": "<script> // An efficient JavaScript program to compute // sum of bitwise OR of all pairs // Returns sum of bitwise OR // of all pairs const sumXOR = (arr, n) => { let sum = 0; for (let i = 0; i < 32; i++) { // Count of zeros and ones let zc = 0, oc = 0; // Individual sum at each bit position let idsum = 0; for (let j = 0; j < n; j++) { if (arr[j] % 2 == 0) zc++; else oc++; arr[j] = parseInt(arr[j] / 2); } // calculating individual bit sum idsum = oc * zc * (1 << i); // final sum sum += idsum; } return sum; } let sum = 0; let arr = [5, 9, 7, 6]; let n = arr.length; sum = sumXOR(arr, n); document.write(sum); // This code is contributed by rakeshsahni </script> ?>",
"e": 11673,
"s": 10736,
"text": null
},
{
"code": null,
"e": 11676,
"s": 11673,
"text": "47"
},
{
"code": null,
"e": 11681,
"s": 11676,
"text": "vt_m"
},
{
"code": null,
"e": 11705,
"s": 11681,
"text": "Ganeshchowdharysadanala"
},
{
"code": null,
"e": 11717,
"s": 11705,
"text": "rakeshsahni"
},
{
"code": null,
"e": 11729,
"s": 11717,
"text": "Bitwise-XOR"
},
{
"code": null,
"e": 11736,
"s": 11729,
"text": "Arrays"
},
{
"code": null,
"e": 11746,
"s": 11736,
"text": "Bit Magic"
},
{
"code": null,
"e": 11753,
"s": 11746,
"text": "Arrays"
},
{
"code": null,
"e": 11763,
"s": 11753,
"text": "Bit Magic"
},
{
"code": null,
"e": 11861,
"s": 11763,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 11929,
"s": 11861,
"text": "Maximum and minimum of an array using minimum number of comparisons"
},
{
"code": null,
"e": 11973,
"s": 11929,
"text": "Top 50 Array Coding Problems for Interviews"
},
{
"code": null,
"e": 12005,
"s": 11973,
"text": "Multidimensional Arrays in Java"
},
{
"code": null,
"e": 12053,
"s": 12005,
"text": "Stack Data Structure (Introduction and Program)"
},
{
"code": null,
"e": 12067,
"s": 12053,
"text": "Linear Search"
},
{
"code": null,
"e": 12113,
"s": 12067,
"text": "Left Shift and Right Shift Operators in C/C++"
},
{
"code": null,
"e": 12181,
"s": 12113,
"text": "Travelling Salesman Problem | Set 1 (Naive and Dynamic Programming)"
},
{
"code": null,
"e": 12241,
"s": 12181,
"text": "How to swap two numbers without using a temporary variable?"
},
{
"code": null,
"e": 12294,
"s": 12241,
"text": "Program to find whether a given number is power of 2"
}
]
|
Global keyword in Python program | Sometimes we declare a variable but we may need to modify or access the values outside the current scope of its declaration which may be anywhere in the current program. In such scenario, we use the Global keyword with the variable name inside the function where the variable is declared. If the variable is not inside a function, then it is automatically global in scope.
In the below example we see a value outside a function, but we are able to access it from inside a function. Because such a variable is already global in scope.
x = 56
def func():
y = x *2
return y
print(func())
Running the above code gives us the following result:
112
In the next example we attempt to change the value of a variable inside a function and get an error.
x = 56
def func():
y = x *2
x = x+2
return y
print(func())
Running the above code gives us the following result:
nboundLocalError: local variable 'x' referenced before assignment
The only way we can modify the value of a variable with global scope inside a function is by declaring it as a global variable inside a function.
x = 56
def func():
global x
x = x + 2
y = x *2
return y
print(func())
Running the above code gives us the following result
116
In case of nested function, we create the global variable in the innermost function as shown in the below example.
Live Demo
def func_out():
x = 56
def func_in():
global x
x = 20
print("x is: ",x)
x = x + 3
y = x *2
print ("y is: ",y)
func_in()
func_out()
print("x is: ",x)
Running the above code gives us the following result:
x is: 20
y is: 46
x is: 23 | [
{
"code": null,
"e": 1560,
"s": 1187,
"text": "Sometimes we declare a variable but we may need to modify or access the values outside the current scope of its declaration which may be anywhere in the current program. In such scenario, we use the Global keyword with the variable name inside the function where the variable is declared. If the variable is not inside a function, then it is automatically global in scope."
},
{
"code": null,
"e": 1721,
"s": 1560,
"text": "In the below example we see a value outside a function, but we are able to access it from inside a function. Because such a variable is already global in scope."
},
{
"code": null,
"e": 1775,
"s": 1721,
"text": "x = 56\ndef func():\ny = x *2\n return y\nprint(func())"
},
{
"code": null,
"e": 1829,
"s": 1775,
"text": "Running the above code gives us the following result:"
},
{
"code": null,
"e": 1833,
"s": 1829,
"text": "112"
},
{
"code": null,
"e": 1934,
"s": 1833,
"text": "In the next example we attempt to change the value of a variable inside a function and get an error."
},
{
"code": null,
"e": 2005,
"s": 1934,
"text": "x = 56\ndef func():\n y = x *2\n x = x+2\n return y\nprint(func())"
},
{
"code": null,
"e": 2059,
"s": 2005,
"text": "Running the above code gives us the following result:"
},
{
"code": null,
"e": 2125,
"s": 2059,
"text": "nboundLocalError: local variable 'x' referenced before assignment"
},
{
"code": null,
"e": 2271,
"s": 2125,
"text": "The only way we can modify the value of a variable with global scope inside a function is by declaring it as a global variable inside a function."
},
{
"code": null,
"e": 2353,
"s": 2271,
"text": "x = 56\ndef func():\nglobal x\n x = x + 2\n y = x *2\n return y\nprint(func())"
},
{
"code": null,
"e": 2406,
"s": 2353,
"text": "Running the above code gives us the following result"
},
{
"code": null,
"e": 2410,
"s": 2406,
"text": "116"
},
{
"code": null,
"e": 2525,
"s": 2410,
"text": "In case of nested function, we create the global variable in the innermost function as shown in the below example."
},
{
"code": null,
"e": 2536,
"s": 2525,
"text": " Live Demo"
},
{
"code": null,
"e": 2730,
"s": 2536,
"text": "def func_out():\n x = 56\n def func_in():\n global x\n x = 20\n print(\"x is: \",x)\n x = x + 3\n y = x *2\n print (\"y is: \",y)\n func_in()\nfunc_out()\nprint(\"x is: \",x)"
},
{
"code": null,
"e": 2784,
"s": 2730,
"text": "Running the above code gives us the following result:"
},
{
"code": null,
"e": 2811,
"s": 2784,
"text": "x is: 20\ny is: 46\nx is: 23"
}
]
|
Topic Model Visualization using pyLDAvis | by Himanshu Sharma | Towards Data Science | Creating Interactive Topic Model Visualizations
Topic Modelling is a part of Machine Learning where the automated model analyzes the text data and creates the clusters of the words from that dataset or a combination of documents. It works on finding out the topics in the text and find out the hidden patterns between words relates to those topics.
By using topic modeling we can create clusters of documents that are relevant, for example, It can be used in the recruitment industry to create clusters of jobs and job seekers that have similar skill sets. There are several ways of obtaining the topics from the model but in this article, we will talk about LDA-Latent Dirichlet Allocation.
LDA works on the matrix factorization technique in which it assumes a is a mixture of topics and it backtracks to figure what topics would have created these documents. The important part is that in this article we will create visualizations where we can analyze the clusters created by LDA.
pyLDAvis is an open-source python library that helps in analyzing and creating highly interactive visualization of the clusters created by LDA. In this article, we will see how to use LDA and pyLDAvis to create Topic Modelling Clusters visualizations.
Let’s get started...
This article will mainly focus on pyLDAvis for visualization, in order to install it we will use pip installation and the command given below will perform the installation.
pip install pyldavis
In this article, we will start by creating the model by using a predefined dataset from sklearn. In order to do all these steps, we need to import all the required libraries.
from __future__ import print_functionimport pyLDAvisimport pyLDAvis.sklearnpyLDAvis.enable_notebook()from sklearn.datasets import fetch_20newsgroupsfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizerfrom sklearn.decomposition import LatentDirichletAllocation
As the main focus of this article is to create visualizations you can check this link on getting a better understanding of how to create a topic model.
Now we will load the dataset that we have already imported. The newsgroup is a textual dataset so it will be helpful for this article and understanding the cluster formation using LDA.
newsgroups = fetch_20newsgroups(remove=('headers', 'footers', 'quotes'))docs_raw = newsgroups.dataprint(len(docs_raw))
Here we will see that the dataset contains 11314 rows of data. Next, we will apply CountVectorizer, TFID, etc., and create the model which we will visualize.
tf_vectorizer = CountVectorizer(strip_accents = 'unicode', stop_words = 'english', lowercase = True, token_pattern = r'\b[a-zA-Z]{3,}\b', max_df = 0.5, min_df = 10)dtm_tf = tf_vectorizer.fit_transform(docs_raw)tfidf_vectorizer = TfidfVectorizer(**tf_vectorizer.get_params())dtm_tfidf = tfidf_vectorizer.fit_transform(docs_raw)
In this step, we will create the Topic Model of the current dataset so that we can visualize it using the pyLDAvis.
# for TF DTMlda_tf = LatentDirichletAllocation(n_components=20, random_state=0)lda_tf.fit(dtm_tf)# for TFIDF DTMlda_tfidf = LatentDirichletAllocation(n_components=20, random_state=0)lda_tfidf.fit(dtm_tfidf)
This is the final step where we will create the visualizations of the topic clusters. The best thing about pyLDAvis is that it is easy to use and creates visualization in a single line of code.
pyLDAvis.sklearn.prepare(lda_tf, dtm_tf, tf_vectorizer)
Check out the video below showing how interactive and visually appealing visualization is created by pyLDAvis.
Similarly, you can also create visualizations for TF-IDF vectorizer, etc. Go ahead try this and let me know your comments or any difficulty that you face in the comments section.
This post is in collaboration with Piyush Ingale.
Thanks for reading! If you want to get in touch with me, feel free to reach me at [email protected] or my LinkedIn Profile. You can view my Github profile for different data science projects and packages tutorials. Also, feel free to explore my profile and read different articles I have written related to Data Science. | [
{
"code": null,
"e": 219,
"s": 171,
"text": "Creating Interactive Topic Model Visualizations"
},
{
"code": null,
"e": 520,
"s": 219,
"text": "Topic Modelling is a part of Machine Learning where the automated model analyzes the text data and creates the clusters of the words from that dataset or a combination of documents. It works on finding out the topics in the text and find out the hidden patterns between words relates to those topics."
},
{
"code": null,
"e": 863,
"s": 520,
"text": "By using topic modeling we can create clusters of documents that are relevant, for example, It can be used in the recruitment industry to create clusters of jobs and job seekers that have similar skill sets. There are several ways of obtaining the topics from the model but in this article, we will talk about LDA-Latent Dirichlet Allocation."
},
{
"code": null,
"e": 1155,
"s": 863,
"text": "LDA works on the matrix factorization technique in which it assumes a is a mixture of topics and it backtracks to figure what topics would have created these documents. The important part is that in this article we will create visualizations where we can analyze the clusters created by LDA."
},
{
"code": null,
"e": 1407,
"s": 1155,
"text": "pyLDAvis is an open-source python library that helps in analyzing and creating highly interactive visualization of the clusters created by LDA. In this article, we will see how to use LDA and pyLDAvis to create Topic Modelling Clusters visualizations."
},
{
"code": null,
"e": 1428,
"s": 1407,
"text": "Let’s get started..."
},
{
"code": null,
"e": 1601,
"s": 1428,
"text": "This article will mainly focus on pyLDAvis for visualization, in order to install it we will use pip installation and the command given below will perform the installation."
},
{
"code": null,
"e": 1622,
"s": 1601,
"text": "pip install pyldavis"
},
{
"code": null,
"e": 1797,
"s": 1622,
"text": "In this article, we will start by creating the model by using a predefined dataset from sklearn. In order to do all these steps, we need to import all the required libraries."
},
{
"code": null,
"e": 2081,
"s": 1797,
"text": "from __future__ import print_functionimport pyLDAvisimport pyLDAvis.sklearnpyLDAvis.enable_notebook()from sklearn.datasets import fetch_20newsgroupsfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizerfrom sklearn.decomposition import LatentDirichletAllocation"
},
{
"code": null,
"e": 2233,
"s": 2081,
"text": "As the main focus of this article is to create visualizations you can check this link on getting a better understanding of how to create a topic model."
},
{
"code": null,
"e": 2418,
"s": 2233,
"text": "Now we will load the dataset that we have already imported. The newsgroup is a textual dataset so it will be helpful for this article and understanding the cluster formation using LDA."
},
{
"code": null,
"e": 2537,
"s": 2418,
"text": "newsgroups = fetch_20newsgroups(remove=('headers', 'footers', 'quotes'))docs_raw = newsgroups.dataprint(len(docs_raw))"
},
{
"code": null,
"e": 2695,
"s": 2537,
"text": "Here we will see that the dataset contains 11314 rows of data. Next, we will apply CountVectorizer, TFID, etc., and create the model which we will visualize."
},
{
"code": null,
"e": 3178,
"s": 2695,
"text": "tf_vectorizer = CountVectorizer(strip_accents = 'unicode', stop_words = 'english', lowercase = True, token_pattern = r'\\b[a-zA-Z]{3,}\\b', max_df = 0.5, min_df = 10)dtm_tf = tf_vectorizer.fit_transform(docs_raw)tfidf_vectorizer = TfidfVectorizer(**tf_vectorizer.get_params())dtm_tfidf = tfidf_vectorizer.fit_transform(docs_raw)"
},
{
"code": null,
"e": 3294,
"s": 3178,
"text": "In this step, we will create the Topic Model of the current dataset so that we can visualize it using the pyLDAvis."
},
{
"code": null,
"e": 3501,
"s": 3294,
"text": "# for TF DTMlda_tf = LatentDirichletAllocation(n_components=20, random_state=0)lda_tf.fit(dtm_tf)# for TFIDF DTMlda_tfidf = LatentDirichletAllocation(n_components=20, random_state=0)lda_tfidf.fit(dtm_tfidf)"
},
{
"code": null,
"e": 3695,
"s": 3501,
"text": "This is the final step where we will create the visualizations of the topic clusters. The best thing about pyLDAvis is that it is easy to use and creates visualization in a single line of code."
},
{
"code": null,
"e": 3751,
"s": 3695,
"text": "pyLDAvis.sklearn.prepare(lda_tf, dtm_tf, tf_vectorizer)"
},
{
"code": null,
"e": 3862,
"s": 3751,
"text": "Check out the video below showing how interactive and visually appealing visualization is created by pyLDAvis."
},
{
"code": null,
"e": 4041,
"s": 3862,
"text": "Similarly, you can also create visualizations for TF-IDF vectorizer, etc. Go ahead try this and let me know your comments or any difficulty that you face in the comments section."
},
{
"code": null,
"e": 4091,
"s": 4041,
"text": "This post is in collaboration with Piyush Ingale."
}
]
|
How do I execute a string containing Python code in Python? | If you want to execute Python statements, you can use exec(string). For example,
>>> my_code = 'print "Hello World!"'
>>> exec(my_code)
Hello World!
But if you just want to evaluate the value of an expression, you can use eval(). For example,
>>> my_expression = "5 + 3"
>>> eval(my_expression)
8
Note: Be very cautious while using both eval and exec as they are very powerful functions
and can cause very subtle bugs/ security loopholes in your code. | [
{
"code": null,
"e": 1143,
"s": 1062,
"text": "If you want to execute Python statements, you can use exec(string). For example,"
},
{
"code": null,
"e": 1211,
"s": 1143,
"text": ">>> my_code = 'print \"Hello World!\"'\n>>> exec(my_code)\nHello World!"
},
{
"code": null,
"e": 1305,
"s": 1211,
"text": "But if you just want to evaluate the value of an expression, you can use eval(). For example,"
},
{
"code": null,
"e": 1359,
"s": 1305,
"text": ">>> my_expression = \"5 + 3\"\n>>> eval(my_expression)\n8"
},
{
"code": null,
"e": 1450,
"s": 1359,
"text": " Note: Be very cautious while using both eval and exec as they are very powerful functions"
},
{
"code": null,
"e": 1515,
"s": 1450,
"text": "and can cause very subtle bugs/ security loopholes in your code."
}
]
|
JSF - f:validateRegex | f:validateRegex tag is used to validate a string value to a required format.
<f:validateRegex pattern = "((?=.*[a-z]).{6,})" />
pattern
Formatting pattern
Let us create a test JSF application to test the above tag.
package com.tutorialspoint.test;
import java.io.Serializable;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.SessionScoped;
@ManagedBean(name = "userData", eager = true)
@SessionScoped
public class UserData implements Serializable {
private static final long serialVersionUID = 1L;
private String password;
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
}
<?xml version = "1.0" encoding = "UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns = "http://www.w3.org/1999/xhtml"
xmlns:h = "http://java.sun.com/jsf/html"
xmlns:f = "http://java.sun.com/jsf/core">
<h:head>
<title>JSF tutorial</title>
</h:head>
<h:body>
<h2>h:validateRegex Example</h2>
<!-- password contains lower case letters only and.
length of the password should be greater than 6. -->
<h:form>
<h:inputSecret id = "passwordInput" value = "#{userData.password}"
label = "password" >
<f:validateRegex pattern = "((? = .*[a-z]).{6,})" />
</h:inputSecret>
<h:commandButton value = "submit" action = "result"/>
<h:message for = "passwordInput" style = "color:red" />
</h:form>
</h:body>
</html>
<?xml version = "1.0" encoding = "UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns = "http://www.w3.org/1999/xhtml"
xmlns:f = "http://java.sun.com/jsf/core"
xmlns:h = "http://java.sun.com/jsf/html">
<h:head>
<title>JSF Tutorial!</title>
</h:head>
<h:body>
<h2>Result</h2>
<hr />
Password: #{userData.password}
</h:body>
</html>
Once you are ready with all the changes done, let us compile and run the application as we did in JSF - First Application chapter. If everything is fine with your application, this will produce the following result.
Enter an invalid value. Following will be the output.
Enter a valid value. Following will be the output.
37 Lectures
3.5 hours
Chaand Sheikh
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2029,
"s": 1952,
"text": "f:validateRegex tag is used to validate a string value to a required format."
},
{
"code": null,
"e": 2081,
"s": 2029,
"text": "<f:validateRegex pattern = \"((?=.*[a-z]).{6,})\" />\n"
},
{
"code": null,
"e": 2089,
"s": 2081,
"text": "pattern"
},
{
"code": null,
"e": 2108,
"s": 2089,
"text": "Formatting pattern"
},
{
"code": null,
"e": 2168,
"s": 2108,
"text": "Let us create a test JSF application to test the above tag."
},
{
"code": null,
"e": 2655,
"s": 2168,
"text": "package com.tutorialspoint.test;\n\nimport java.io.Serializable;\n\nimport javax.faces.bean.ManagedBean;\nimport javax.faces.bean.SessionScoped;\n\n@ManagedBean(name = \"userData\", eager = true)\n@SessionScoped\npublic class UserData implements Serializable {\n private static final long serialVersionUID = 1L;\n private String password;\n \n public String getPassword() {\n return password;\n }\n \n public void setPassword(String password) {\n this.password = password;\n } \n}"
},
{
"code": null,
"e": 3596,
"s": 2655,
"text": "<?xml version = \"1.0\" encoding = \"UTF-8\"?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n<html xmlns = \"http://www.w3.org/1999/xhtml\" \n xmlns:h = \"http://java.sun.com/jsf/html\"\n xmlns:f = \"http://java.sun.com/jsf/core\">\n \n <h:head>\n <title>JSF tutorial</title>\t\t\t\n </h:head>\n \n <h:body> \n <h2>h:validateRegex Example</h2>\n <!-- password contains lower case letters only and.\n length of the password should be greater than 6. -->\n \n <h:form>\n <h:inputSecret id = \"passwordInput\" value = \"#{userData.password}\" \n label = \"password\" >\n <f:validateRegex pattern = \"((? = .*[a-z]).{6,})\" />\n </h:inputSecret>\t\t\t\n <h:commandButton value = \"submit\" action = \"result\"/>\n <h:message for = \"passwordInput\" style = \"color:red\" />\n </h:form>\n \n </h:body>\n</html>"
},
{
"code": null,
"e": 4089,
"s": 3596,
"text": "<?xml version = \"1.0\" encoding = \"UTF-8\"?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" \n\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n\n<html xmlns = \"http://www.w3.org/1999/xhtml\"\n xmlns:f = \"http://java.sun.com/jsf/core\" \n xmlns:h = \"http://java.sun.com/jsf/html\">\n \n <h:head>\n <title>JSF Tutorial!</title> \n </h:head>\n \n <h:body>\n <h2>Result</h2>\n <hr /> \n Password: #{userData.password} \n </h:body>\n</html> "
},
{
"code": null,
"e": 4305,
"s": 4089,
"text": "Once you are ready with all the changes done, let us compile and run the application as we did in JSF - First Application chapter. If everything is fine with your application, this will produce the following result."
},
{
"code": null,
"e": 4359,
"s": 4305,
"text": "Enter an invalid value. Following will be the output."
},
{
"code": null,
"e": 4410,
"s": 4359,
"text": "Enter a valid value. Following will be the output."
},
{
"code": null,
"e": 4445,
"s": 4410,
"text": "\n 37 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 4460,
"s": 4445,
"text": " Chaand Sheikh"
},
{
"code": null,
"e": 4467,
"s": 4460,
"text": " Print"
},
{
"code": null,
"e": 4478,
"s": 4467,
"text": " Add Notes"
}
]
|
The Complete Guide to Neural Network multi-class Classification from scratch | by Shaun Enslin | Towards Data Science | Neural networks reflect the behavior of the human brain. They allow programs to recognise patterns and solve common problems in machine learning. This is another option to either perform classification instead of logistics regression. At Rapidtrade, we use neural networks to classify data and run regression scenarios. The source code for this article is available on GitHub.
We will be working with a dataset from Kaggle and you can download it here. So to visualise the data we will be working with in this article, see below. We will use this to train the network to categorise our customers according to column J. We will also use the 3 features highlighted to classify our customers. I needed 3 features to fit my neural network and these were the best 3 available.
Just keep in mind, we will convert all the alpha string values to numerics. After all, we can’t plug strings into equations ;-)
This is quite a long article and is broken up into 2 sections:
Introduction
Putting it all together
Good luck ;-)
Neural networks are always made up of layers, as seen in figure 2. It all looks complicated, but let’s unpack this to make it more understandable.
A neural network has 6 important concepts, which I will explain briefly here, but cover in detail in this series of articles.
- Weights — These are like the theta’s we would use in other algorithms- Layers — Our network will have 3 layers- Forward propagation — Use the features/weights to get Z and A- Back propagation — Use the results of forward propogation/weights to get S- Calculating the cost/gradient of each weight- Gradient descent — find the best weight/hypothesis
In this series, we will be building a neural network with 3 layers. Let's discuss these layers quickly before we get into the tick of it.
Refer to figure 2 above and we will refer to the result of this layer as A1. The size (# units) of this layer depends on the number of features in our dataset.
Building our input layer is not difficult you simply copy X into A1, but add what is called a biased layer, which defaults to “1”.
Col 1: Biased layer defaults to ‘1’Col 2: “Ever married” our 1st feature and has been re-labeled to 1/2Col 3: “Graduated” our 2nd feature and re-labeled to 1/2Col 4: “Family size” our 3rd feature
Refer to figure 2 above and we only have 1 hidden layer, but you could have a hidden layer per feature. If you had more hidden layers than the logic I mention below, you would replicate the calculations for each hidden layer.
The size (#units) is up to you, we have chosen #features * 2 ie. 6 units.
This layer is calculated during forward and backward propagation. After running both these steps, we calculate Z2, A2 and S2 for each unit. See below for the outputs once each of these steps is run.
Forward propagation
Refer to figure 1 as in this step, we calculate Z2 and then A2.
Z2 contains the results of our hypothesis calculation for each of the 6 units in our hidden layer.
While A2 also includes the biased layer (col 1) and has the sigmoid function applied to each of the cell’s from Z2.
Hence Z2 has 6 columns and A2 has 7 columns as per figure 4.
Back propagation
So, after forward propagation has run through all the layers, we then perform the back propagation step to calculate S2. S2 is referred to as the delta of each units hypothesis calculation. This is used to then figure out the gradient for that theta and later on, combining this with the cost of this unit, helps gradient descent figure out what is the best theta/weight.
Our output layer gives us the result of our hypothesis. ie. if these thetas were applied, what would our best guess be in classifying these customers. The size (#units) is derived from the number labels for Y. As can become seen in figure 1, there are 7 labels, thus the size of the output layer is 7.
As with the hidden layer, this result is calculated during the 2 steps of forward and backward propagation. After running both these steps, here is the results:
Forward propagation
During forward prop, we will calculate Z3 and A3 for the output layer, as we did for the hidden layer. Refer to figure 1 above to see there is no bias column needed and you can see the results of Z3 and A3 below.
Back propagation
Now that (referring to figure 1) we have Z3 and A3, lets calculate S3. As it turns out S3 is simply a basic cost calculation, subtracting A3 from Y, so we will explore the equations in the upcoming articles, but we can nonetheless see the result below
So, the above is a little awkward as it visualises the outputs in each layer. Our main focus in neural networks, is a function to compute the cost of our neural network. The coding for this function will take the following steps.
Prepare the dataSetup neural networkInitialise a set of weights/thetasCreate our cost function which will4.1 Perform forward propagation4.2 Calculate the cost of forward propagation4.3 Perform backward propagation4.4 Calculate the deltas and then gradients from backward prop.Perform cost optimisation 5.1 Validates our cost function5.2 That performs gradient descent on steps (4.1) to (4.4) until it finds the best weight/theta to use for predictionsPredict results to check accuracy
Prepare the data
Setup neural network
Initialise a set of weights/thetas
Create our cost function which will4.1 Perform forward propagation4.2 Calculate the cost of forward propagation4.3 Perform backward propagation4.4 Calculate the deltas and then gradients from backward prop.
Perform cost optimisation 5.1 Validates our cost function5.2 That performs gradient descent on steps (4.1) to (4.4) until it finds the best weight/theta to use for predictions
Predict results to check accuracy
To begin this exploratory analysis, first import libraries and define functions for plotting the data using matplotlib. Depending on the data, not all plots will be made.
Hey, I’m just a simple kerneling bot, not a Kaggle Competitions Grandmaster!
from mpl_toolkits.mplot3d import Axes3Dfrom sklearn.preprocessing import StandardScalerfrom sklearn.preprocessing import LabelEncoderimport seaborn as snsimport matplotlib.pyplot as plt # plottingimport numpy as np # linear algebraimport os # accessing directory structureimport pandas as pd # data processing, CSV file I/O (e.g. from scipy import optimize as optpd.read_csv)import matplotlib.pyplot as plt
Now, let's read our data and have a quick look.
df = pd.read_csv(‘customertrain.csv’)df.head()
Doing an info, we can see we have some work to do with null values as well as some object fields to convert to numerics.
df.info()
So, let's transform our object fields to numerics and drop the columns we do not need.
columns = [“Gender”,”Ever_Married”,”Graduated”,”Profession”,”Spending_Score”]for feature in columns: le = LabelEncoder() df[feature] = le.fit_transform(df[feature])df = df.drop([“ID”,”Gender”,”Age”,”Profession”,”Work_Experience”,”Spending_Score”], axis=1)df.dropna(subset=['Var_1'], inplace=True)df.head()
Use fit_transform to encode our multinomial categories into numbers we can work with.
yle = LabelEncoder()df[“Var_1”] = yle.fit_transform(df[“Var_1”])df.head()
Fill in missing features
An important part of regression is understanding which features are missing. We can choose to ignore all rows with missing values, or fill them in with either mode, median or mode.
Mode = most common value
Median = middle value
Mean = average
Here is a handy function you can call which will fill in the missing features by your desired method. We will choose to fill in values with the average.
After funning below, you should see 7992 with no null values.
def fillmissing(df, feature, method): if method == “mode”: df[feature] = df[feature].fillna(df[feature].mode()[0]) elif method == “median”: df[feature] = df[feature].fillna(df[feature].median()) else: df[feature] = df[feature].fillna(df[feature].mean())features_missing= df.columns[df.isna().any()]for feature in features_missing fillmissing(df, feature= feature, method= “mean”)df.info()
Extract Y
Let's extract our Y column into a separate array and remove it from the dataframe.
Y = df[“Var_1”]df = df.drop([“Var_1”], axis=1
Now copy out our X and y columns into matrices for easier matrix manipulation later.
X = df.to_numpy() # np.matrix(df.to_numpy())y = Y.to_numpy().transpose() # np.matrix(Y.to_numpy()).transpose()m,n = X.shape
Normalize features
Now, let's normalise X so the values lie between -1 and 1. We do this so we can get all features into a similar range. We use the following equation
The goal to perform standardization is to bring down all the features to a common scale without distorting the differences in the range of the values. This process of rescaling the features is so that they have mean as 0 and variance as 1.
Now, we can setup the sizes of our neural network, first, below is the neural network we want to put together.
Below initialisations, ensure above network is achieved. So, now you are asking “What are reasonable numbers to set these to?”
Input layer = set to the size of the dimensions
Hidden layers = set to input_layer * 2
Output layer = set to the size of the labels of Y. In our case, this is 7 categories
input_layer_size = n # Dimension of featureshidden_layer_size = input_layer_size*2 # of units in hidden layeroutput_layer_size = len(yle.classes_) # number of labels
As it turns out, this is quite an important topic for gradient descent. If you have not dealt with gradient descent, then check this article first. We can see above that we need 2 sets of weights. (signified by ø).
We often still calls these weights theta and they mean the same thing.
We need one set of thetas for level 2 and a 2nd set for level 3. Each theta is a matrix and is size(L) * size(L-1). Thus for above:
Theta1 = 6x4 matrix
Theta2 = 7x7 matrix
We have to now guess at which initial thetas should be our starting point. Here, epsilon comes to the rescue and below is the matlab code to easily generate some random small numbers for our initial weights.
def initializeWeights(L_in, L_out): epsilon_init = 0.12 W = np.random.rand(L_out, 1 + L_in) * 2 * \ epsilon_init - epsilon_init return W
After running above function with our sizes for each theta as mentioned above, we will get some good small random initial values as in figure 7. For figure 1 above, the weights we mention would refer to rows 1 in below matrix’s.
We need a function which can implement the neural network cost function for a two layer neural network which performs classification.
In the GitHub code, checknn.py our costfunction called nnCostFunction will return:
gradient should be a “unrolled” vector of the partial derivatives of the neural network
the final J which is the cost of this weight.
Our cost function will need to perform the following:
Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices for our 2 layer neural network
Perform forward propagation to calculate (a) and (z)
Perform backward propagation to use (a) calculate (s)
So, our cost function first up, needs to reshape our thetas back into a theta for the hidden and output layers.
# Reshape nn_params back into the parameters Theta1 and Theta2, # the weight matrices for our 2 layer neural networkTheta1 = nn_params[:hidden_layer_size * \ (input_layer_size + 1)].reshape( \ (hidden_layer_size, input_layer_size + 1))Theta2 = nn_params[hidden_layer_size * \ (input_layer_size + 1):].reshape( \ (num_labels, hidden_layer_size + 1))# Setup some useful variablesm = X.shape[0]
Forward propagation is an important part of neural networks. Its not as hard as it sounds.
In figure 7, we can see our network diagram with much of the details removed. We will focus on one unit in level 2 and one unit in level 3. This understanding can then be copied to all units. Take note of the matrix multiplication we can do (in blue in figure 7) to perform forward propagation.
I am showing the details for one unit in each layer, but you can repeat the logic for all layers.
Before we show the forward prop code, lets talk a little on the 2 concepts we need during forward prop.
4.1.1 Sigmoid functions
Since we are doing classification, we will use sigmoid to evaluate our predictions. A sigmoid function is a mathematical function having a characteristic “S”-shaped curve or sigmoid curve. A common example of a sigmoid function is the logistic function shown in the first figure and defined by the formula
In github, in checknn.py the following handy functions are created:
sigmoid is a handy function to compute sigmoid of input parameter Z
sigmoidGradient computes the gradient of the sigmoid function evaluated at z. This should work regardless if z is a matrix or a vector.
def sigmoid(z): g = np.frompyfunc(lambda x: 1 / (1 + np.exp(-x)), 1, 1) return g(z).astype(z.dtype)def sigmoidGradient(z) return sigmoid(z) * (1 - sigmoid(z))
4.1.2 Regularization
We will implement regularization as one of the most common problems data science professionals face is to avoid overfitting. Overfitting gives you a situation where your model performed exceptionally well on train data but was not able to predict test data. Neural network are complex and makes them more prone to overfitting. Regularization is a technique which makes slight modifications to the learning algorithm such that the model generalizes better. This in turn improves the model’s performance on the unseen data as well.
If you have studied the concept of regularization in machine learning, you will have a fair idea that regularization penalizes the coefficients. In deep learning, it actually penalizes the weight matrices of the nodes.
We implement regularization in nnCostFunction by passing in a lambda which us used to penalise both the gradients and costs that are calculated.
4.1.3 Implementing forward prop
As per figure 1, lets calculate A1. You can see that its pretty much my X features an we add the bias column hard coded to “1” in front. Here is the python code to do this:
# Add ones to the X data matrixa1 = np.insert(X, 0, 1, axis=1)
The result will now give you the results in A1 in figure 4. Take special note of the bias column “1” added on the front.
Great, thats A1 done, lets move onto A2. Before we get A2, we will first run a hypothesis to calculate Z2. Once you have the hypotheses, you can run it through the sigmoid function to get A2. Again, as per figure 1, add the bias column to the front.
# Perform forward propagation for layer 2z2 = np.matmul(a1, Theta1.transpose())a2 = sigmoid(z2)a2 = np.insert(a2, 0, 1, axis=1)
Ok, so we almost there.... Now onto A3, lets do the same as with A2, but this time, we dont worry to add the bias column.
z3 = np.matmul(a2, Theta2.transpose())a3 = sigmoid(z3)
You may be asking, “why do we keep Z2 & Z3”. Well, we will need those in back propagation. So we may as well keep them handy ;-).
Before we continue, if you understand our Y column (figure 9) which contains the labels used to categorise our customers. Then to calculate the cost we need to reformat Y into a matrix which corresponds to the number of labels. In our case we have 7 categories for our customers.
Figure 8, shows how Y is converted to a matrix y_one_hot and labels are now indicated as a binary in the appropriate column.
# turn Y into a matrix with a new column for each category and marked with 1y_one_hot = np.zeros_like(a3)for i in range(m): y_one_hot[i, y[i] - 1] = 1
Now that we have Y in a matrix format, lets have a look at the equation to calculate the cost.
Well, that's all very complicated, but good news is that with some matrix manipulation, we can do it in a few lines of python code as below.
# Calculate the cost of our forward propones = np.ones_like(a3A = np.matmul(y_one_hot.transpose(), np.log(a3)) + \ np.matmul((ones - y_one_hot).transpose(), np.log(ones - a3))J = -1 / m * A.trace()J += lambda_ / (2 * m) * \ (np.sum(Theta1[:, 1:] ** 2) + np.sum(Theta2[:, 1:] ** 2))
So, we have simplified our neural network in figure 1 to only show the details to firstly:
Subtract A1(3) from Y calculate S3
Thereafter perform a linear equation using the thetas mentioned below multiplied by S3 to calculate. S2.
Since a picture paints 1000 words, figure 9 should explain what we use to calculate S3 and thereafter S2 (marked in red).
From (3) we understand how our weights (thetas) were initialised, so just to visualise the weights (ø) that figure 9 is referring see figure 10 below.
So again, with matrix manipulation to the rescue, forward propagation is not a difficult task in python
# Perform backward propagation to calculate deltass3 = a3 - yvs2 = np.matmul(s3, Theta2) * \ sigmoidGradient(np.insert(z2, 0, 1, axis=1))# remove z2 bias columns2 = s2[:, 1:]
We need to return the gradient’s as part of our cost function, these are needed as gradient descent is a process that occurs in backward prop where the goal is to continuously resample the gradient of the model’s parameter in the opposite direction based on the weight w, updating consistently until we reach the global minimum of function J(w).
To put it simply, we use gradient descent to minimize the cost function, J(w).
And again, matrix manipulation to the rescue makes it just a few lines of code.
Our first step is to calculate a penalty which can be used to regularise our cost. If you want an explanation on regularisation, then have a look at this article.
# calculate regularized penalty, replace 1st column with zerosp1 = (lambda_/m) * np.insert(Theta1[:, 1:], 0, 0, axis=1)p2 = (lambda_/m) * np.insert(Theta2[:, 1:], 0, 0, axis=1)
For cost optimisation, we need to feed back the gradient of this particular set of weights. Figure 2 indicates what a gradient is once its been plotted. For the set of weights, being fed to our cost function, this will be the gradient of the plotted line.
# gradients / partial derivitivesTheta1_grad = delta_1 / m + p1Theta2_grad = delta_2 / m + p2grad = np.concatenate((Theta1_grad.flatten(), Theta2_grad.flatten()), axis=None)
However, the cost optimisation functions dont know how to work with 2 theta’s, so lets unroll these into a vector, with results shown in figure 5.
grad = np.concatenate((Theta1_grad.flatten(), Theta2_grad.flatten()), axis=None)
Ok WOW, thats been a lot of info, but our cost function is done, lets move onto running gradient descent and cost optimization.
One difficult thing to understand is if our cost function is performing well. A good method to check this is to run a function called checknn.
Creates a small neural network to check the backpropagation gradients, it will output the analytical gradients produced by your backprop code and the numerical gradients (computed using computeNumericalGradient). These two gradient computations should result in very similar values.
If you want to delve more into the theory behind this technique, it is tought in Andrew Ng’s machine learning course, week 4.
You do not need to run this every time, just when you have setup your cost function for the first time.
I won't put the code here, but check the github project in checknn.py for the following functions:
checkNNGradients
debugInitializeWeights
computeNumericalGradient
After running cheecknn, you should get the following results
Gradient descent is an optimization algorithm which is mainly used to find the minimum of a function. In machine learning, gradient descent is used to update parameters in a model. Parameters can vary according to the algorithms, such as coefficients in Linear Regression and weights in Neural Networks. We will use SciPy optimize modules to run our gradient descent.
from scipy import optimize as optprint('Training Neural Network... ')# Change the MaxIter to a larger value to see how more # training helps.options = {'maxiter': 50, 'disp': True}# You should also try different values of lambdalambda_ = 1;# Create cost function shortcuts to be minimizedfun = lambda nn_params: nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, output_layer_size, xn, y, lambda_)[0]jac = lambda nn_params: nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, output_layer_size, xn, y, lambda_)[1]# Now, costFunction is a function that takes in only one # argument (the neural network parameters)res = opt.minimize(fun, nn_params, method='CG', jac=jac, options=options)nn_params = res.xcost = res.funprint(res.message)print(cost)
Get our thetas back for each layer by using a reshape
# Obtain Theta1 and Theta2 back from nn_paramsTheta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape((hidden_layer_size, input_layer_size + 1))Theta2 = nn_params[hidden_layer_size * (input_layer_size + 1):].reshape((output_layer_size, hidden_layer_size + 1))
Now that we have our best weights (thetas), let's use them to make a prediction to check for accuracy.
pred = predict(Theta1, Theta2, X)print(f’Training Set Accuracy: {(pred == y).mean() * 100:f}’)
You should get an accuracy of 65.427928%Yes, it's a little low, but that's the dataset we are working with. I have tried this dataset with logistics regression & SVM and get the same results.
I hope this article gives you a deep level of understanding of neural networks and how you can use it to classify data. Let me know how you go... | [
{
"code": null,
"e": 549,
"s": 172,
"text": "Neural networks reflect the behavior of the human brain. They allow programs to recognise patterns and solve common problems in machine learning. This is another option to either perform classification instead of logistics regression. At Rapidtrade, we use neural networks to classify data and run regression scenarios. The source code for this article is available on GitHub."
},
{
"code": null,
"e": 945,
"s": 549,
"text": "We will be working with a dataset from Kaggle and you can download it here. So to visualise the data we will be working with in this article, see below. We will use this to train the network to categorise our customers according to column J. We will also use the 3 features highlighted to classify our customers. I needed 3 features to fit my neural network and these were the best 3 available."
},
{
"code": null,
"e": 1073,
"s": 945,
"text": "Just keep in mind, we will convert all the alpha string values to numerics. After all, we can’t plug strings into equations ;-)"
},
{
"code": null,
"e": 1136,
"s": 1073,
"text": "This is quite a long article and is broken up into 2 sections:"
},
{
"code": null,
"e": 1149,
"s": 1136,
"text": "Introduction"
},
{
"code": null,
"e": 1173,
"s": 1149,
"text": "Putting it all together"
},
{
"code": null,
"e": 1187,
"s": 1173,
"text": "Good luck ;-)"
},
{
"code": null,
"e": 1334,
"s": 1187,
"text": "Neural networks are always made up of layers, as seen in figure 2. It all looks complicated, but let’s unpack this to make it more understandable."
},
{
"code": null,
"e": 1460,
"s": 1334,
"text": "A neural network has 6 important concepts, which I will explain briefly here, but cover in detail in this series of articles."
},
{
"code": null,
"e": 1810,
"s": 1460,
"text": "- Weights — These are like the theta’s we would use in other algorithms- Layers — Our network will have 3 layers- Forward propagation — Use the features/weights to get Z and A- Back propagation — Use the results of forward propogation/weights to get S- Calculating the cost/gradient of each weight- Gradient descent — find the best weight/hypothesis"
},
{
"code": null,
"e": 1948,
"s": 1810,
"text": "In this series, we will be building a neural network with 3 layers. Let's discuss these layers quickly before we get into the tick of it."
},
{
"code": null,
"e": 2108,
"s": 1948,
"text": "Refer to figure 2 above and we will refer to the result of this layer as A1. The size (# units) of this layer depends on the number of features in our dataset."
},
{
"code": null,
"e": 2239,
"s": 2108,
"text": "Building our input layer is not difficult you simply copy X into A1, but add what is called a biased layer, which defaults to “1”."
},
{
"code": null,
"e": 2435,
"s": 2239,
"text": "Col 1: Biased layer defaults to ‘1’Col 2: “Ever married” our 1st feature and has been re-labeled to 1/2Col 3: “Graduated” our 2nd feature and re-labeled to 1/2Col 4: “Family size” our 3rd feature"
},
{
"code": null,
"e": 2661,
"s": 2435,
"text": "Refer to figure 2 above and we only have 1 hidden layer, but you could have a hidden layer per feature. If you had more hidden layers than the logic I mention below, you would replicate the calculations for each hidden layer."
},
{
"code": null,
"e": 2735,
"s": 2661,
"text": "The size (#units) is up to you, we have chosen #features * 2 ie. 6 units."
},
{
"code": null,
"e": 2934,
"s": 2735,
"text": "This layer is calculated during forward and backward propagation. After running both these steps, we calculate Z2, A2 and S2 for each unit. See below for the outputs once each of these steps is run."
},
{
"code": null,
"e": 2954,
"s": 2934,
"text": "Forward propagation"
},
{
"code": null,
"e": 3018,
"s": 2954,
"text": "Refer to figure 1 as in this step, we calculate Z2 and then A2."
},
{
"code": null,
"e": 3117,
"s": 3018,
"text": "Z2 contains the results of our hypothesis calculation for each of the 6 units in our hidden layer."
},
{
"code": null,
"e": 3233,
"s": 3117,
"text": "While A2 also includes the biased layer (col 1) and has the sigmoid function applied to each of the cell’s from Z2."
},
{
"code": null,
"e": 3294,
"s": 3233,
"text": "Hence Z2 has 6 columns and A2 has 7 columns as per figure 4."
},
{
"code": null,
"e": 3311,
"s": 3294,
"text": "Back propagation"
},
{
"code": null,
"e": 3683,
"s": 3311,
"text": "So, after forward propagation has run through all the layers, we then perform the back propagation step to calculate S2. S2 is referred to as the delta of each units hypothesis calculation. This is used to then figure out the gradient for that theta and later on, combining this with the cost of this unit, helps gradient descent figure out what is the best theta/weight."
},
{
"code": null,
"e": 3985,
"s": 3683,
"text": "Our output layer gives us the result of our hypothesis. ie. if these thetas were applied, what would our best guess be in classifying these customers. The size (#units) is derived from the number labels for Y. As can become seen in figure 1, there are 7 labels, thus the size of the output layer is 7."
},
{
"code": null,
"e": 4146,
"s": 3985,
"text": "As with the hidden layer, this result is calculated during the 2 steps of forward and backward propagation. After running both these steps, here is the results:"
},
{
"code": null,
"e": 4166,
"s": 4146,
"text": "Forward propagation"
},
{
"code": null,
"e": 4379,
"s": 4166,
"text": "During forward prop, we will calculate Z3 and A3 for the output layer, as we did for the hidden layer. Refer to figure 1 above to see there is no bias column needed and you can see the results of Z3 and A3 below."
},
{
"code": null,
"e": 4396,
"s": 4379,
"text": "Back propagation"
},
{
"code": null,
"e": 4648,
"s": 4396,
"text": "Now that (referring to figure 1) we have Z3 and A3, lets calculate S3. As it turns out S3 is simply a basic cost calculation, subtracting A3 from Y, so we will explore the equations in the upcoming articles, but we can nonetheless see the result below"
},
{
"code": null,
"e": 4878,
"s": 4648,
"text": "So, the above is a little awkward as it visualises the outputs in each layer. Our main focus in neural networks, is a function to compute the cost of our neural network. The coding for this function will take the following steps."
},
{
"code": null,
"e": 5363,
"s": 4878,
"text": "Prepare the dataSetup neural networkInitialise a set of weights/thetasCreate our cost function which will4.1 Perform forward propagation4.2 Calculate the cost of forward propagation4.3 Perform backward propagation4.4 Calculate the deltas and then gradients from backward prop.Perform cost optimisation 5.1 Validates our cost function5.2 That performs gradient descent on steps (4.1) to (4.4) until it finds the best weight/theta to use for predictionsPredict results to check accuracy"
},
{
"code": null,
"e": 5380,
"s": 5363,
"text": "Prepare the data"
},
{
"code": null,
"e": 5401,
"s": 5380,
"text": "Setup neural network"
},
{
"code": null,
"e": 5436,
"s": 5401,
"text": "Initialise a set of weights/thetas"
},
{
"code": null,
"e": 5643,
"s": 5436,
"text": "Create our cost function which will4.1 Perform forward propagation4.2 Calculate the cost of forward propagation4.3 Perform backward propagation4.4 Calculate the deltas and then gradients from backward prop."
},
{
"code": null,
"e": 5819,
"s": 5643,
"text": "Perform cost optimisation 5.1 Validates our cost function5.2 That performs gradient descent on steps (4.1) to (4.4) until it finds the best weight/theta to use for predictions"
},
{
"code": null,
"e": 5853,
"s": 5819,
"text": "Predict results to check accuracy"
},
{
"code": null,
"e": 6024,
"s": 5853,
"text": "To begin this exploratory analysis, first import libraries and define functions for plotting the data using matplotlib. Depending on the data, not all plots will be made."
},
{
"code": null,
"e": 6101,
"s": 6024,
"text": "Hey, I’m just a simple kerneling bot, not a Kaggle Competitions Grandmaster!"
},
{
"code": null,
"e": 6508,
"s": 6101,
"text": "from mpl_toolkits.mplot3d import Axes3Dfrom sklearn.preprocessing import StandardScalerfrom sklearn.preprocessing import LabelEncoderimport seaborn as snsimport matplotlib.pyplot as plt # plottingimport numpy as np # linear algebraimport os # accessing directory structureimport pandas as pd # data processing, CSV file I/O (e.g. from scipy import optimize as optpd.read_csv)import matplotlib.pyplot as plt"
},
{
"code": null,
"e": 6556,
"s": 6508,
"text": "Now, let's read our data and have a quick look."
},
{
"code": null,
"e": 6603,
"s": 6556,
"text": "df = pd.read_csv(‘customertrain.csv’)df.head()"
},
{
"code": null,
"e": 6724,
"s": 6603,
"text": "Doing an info, we can see we have some work to do with null values as well as some object fields to convert to numerics."
},
{
"code": null,
"e": 6734,
"s": 6724,
"text": "df.info()"
},
{
"code": null,
"e": 6821,
"s": 6734,
"text": "So, let's transform our object fields to numerics and drop the columns we do not need."
},
{
"code": null,
"e": 7129,
"s": 6821,
"text": "columns = [“Gender”,”Ever_Married”,”Graduated”,”Profession”,”Spending_Score”]for feature in columns: le = LabelEncoder() df[feature] = le.fit_transform(df[feature])df = df.drop([“ID”,”Gender”,”Age”,”Profession”,”Work_Experience”,”Spending_Score”], axis=1)df.dropna(subset=['Var_1'], inplace=True)df.head()"
},
{
"code": null,
"e": 7215,
"s": 7129,
"text": "Use fit_transform to encode our multinomial categories into numbers we can work with."
},
{
"code": null,
"e": 7289,
"s": 7215,
"text": "yle = LabelEncoder()df[“Var_1”] = yle.fit_transform(df[“Var_1”])df.head()"
},
{
"code": null,
"e": 7314,
"s": 7289,
"text": "Fill in missing features"
},
{
"code": null,
"e": 7495,
"s": 7314,
"text": "An important part of regression is understanding which features are missing. We can choose to ignore all rows with missing values, or fill them in with either mode, median or mode."
},
{
"code": null,
"e": 7520,
"s": 7495,
"text": "Mode = most common value"
},
{
"code": null,
"e": 7542,
"s": 7520,
"text": "Median = middle value"
},
{
"code": null,
"e": 7557,
"s": 7542,
"text": "Mean = average"
},
{
"code": null,
"e": 7710,
"s": 7557,
"text": "Here is a handy function you can call which will fill in the missing features by your desired method. We will choose to fill in values with the average."
},
{
"code": null,
"e": 7772,
"s": 7710,
"text": "After funning below, you should see 7992 with no null values."
},
{
"code": null,
"e": 8174,
"s": 7772,
"text": "def fillmissing(df, feature, method): if method == “mode”: df[feature] = df[feature].fillna(df[feature].mode()[0]) elif method == “median”: df[feature] = df[feature].fillna(df[feature].median()) else: df[feature] = df[feature].fillna(df[feature].mean())features_missing= df.columns[df.isna().any()]for feature in features_missing fillmissing(df, feature= feature, method= “mean”)df.info()"
},
{
"code": null,
"e": 8184,
"s": 8174,
"text": "Extract Y"
},
{
"code": null,
"e": 8267,
"s": 8184,
"text": "Let's extract our Y column into a separate array and remove it from the dataframe."
},
{
"code": null,
"e": 8313,
"s": 8267,
"text": "Y = df[“Var_1”]df = df.drop([“Var_1”], axis=1"
},
{
"code": null,
"e": 8398,
"s": 8313,
"text": "Now copy out our X and y columns into matrices for easier matrix manipulation later."
},
{
"code": null,
"e": 8522,
"s": 8398,
"text": "X = df.to_numpy() # np.matrix(df.to_numpy())y = Y.to_numpy().transpose() # np.matrix(Y.to_numpy()).transpose()m,n = X.shape"
},
{
"code": null,
"e": 8541,
"s": 8522,
"text": "Normalize features"
},
{
"code": null,
"e": 8690,
"s": 8541,
"text": "Now, let's normalise X so the values lie between -1 and 1. We do this so we can get all features into a similar range. We use the following equation"
},
{
"code": null,
"e": 8930,
"s": 8690,
"text": "The goal to perform standardization is to bring down all the features to a common scale without distorting the differences in the range of the values. This process of rescaling the features is so that they have mean as 0 and variance as 1."
},
{
"code": null,
"e": 9041,
"s": 8930,
"text": "Now, we can setup the sizes of our neural network, first, below is the neural network we want to put together."
},
{
"code": null,
"e": 9168,
"s": 9041,
"text": "Below initialisations, ensure above network is achieved. So, now you are asking “What are reasonable numbers to set these to?”"
},
{
"code": null,
"e": 9216,
"s": 9168,
"text": "Input layer = set to the size of the dimensions"
},
{
"code": null,
"e": 9255,
"s": 9216,
"text": "Hidden layers = set to input_layer * 2"
},
{
"code": null,
"e": 9340,
"s": 9255,
"text": "Output layer = set to the size of the labels of Y. In our case, this is 7 categories"
},
{
"code": null,
"e": 9534,
"s": 9340,
"text": "input_layer_size = n # Dimension of featureshidden_layer_size = input_layer_size*2 # of units in hidden layeroutput_layer_size = len(yle.classes_) # number of labels"
},
{
"code": null,
"e": 9749,
"s": 9534,
"text": "As it turns out, this is quite an important topic for gradient descent. If you have not dealt with gradient descent, then check this article first. We can see above that we need 2 sets of weights. (signified by ø)."
},
{
"code": null,
"e": 9820,
"s": 9749,
"text": "We often still calls these weights theta and they mean the same thing."
},
{
"code": null,
"e": 9952,
"s": 9820,
"text": "We need one set of thetas for level 2 and a 2nd set for level 3. Each theta is a matrix and is size(L) * size(L-1). Thus for above:"
},
{
"code": null,
"e": 9972,
"s": 9952,
"text": "Theta1 = 6x4 matrix"
},
{
"code": null,
"e": 9992,
"s": 9972,
"text": "Theta2 = 7x7 matrix"
},
{
"code": null,
"e": 10200,
"s": 9992,
"text": "We have to now guess at which initial thetas should be our starting point. Here, epsilon comes to the rescue and below is the matlab code to easily generate some random small numbers for our initial weights."
},
{
"code": null,
"e": 10344,
"s": 10200,
"text": "def initializeWeights(L_in, L_out): epsilon_init = 0.12 W = np.random.rand(L_out, 1 + L_in) * 2 * \\ epsilon_init - epsilon_init return W"
},
{
"code": null,
"e": 10573,
"s": 10344,
"text": "After running above function with our sizes for each theta as mentioned above, we will get some good small random initial values as in figure 7. For figure 1 above, the weights we mention would refer to rows 1 in below matrix’s."
},
{
"code": null,
"e": 10707,
"s": 10573,
"text": "We need a function which can implement the neural network cost function for a two layer neural network which performs classification."
},
{
"code": null,
"e": 10790,
"s": 10707,
"text": "In the GitHub code, checknn.py our costfunction called nnCostFunction will return:"
},
{
"code": null,
"e": 10878,
"s": 10790,
"text": "gradient should be a “unrolled” vector of the partial derivatives of the neural network"
},
{
"code": null,
"e": 10924,
"s": 10878,
"text": "the final J which is the cost of this weight."
},
{
"code": null,
"e": 10978,
"s": 10924,
"text": "Our cost function will need to perform the following:"
},
{
"code": null,
"e": 11091,
"s": 10978,
"text": "Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices for our 2 layer neural network"
},
{
"code": null,
"e": 11144,
"s": 11091,
"text": "Perform forward propagation to calculate (a) and (z)"
},
{
"code": null,
"e": 11198,
"s": 11144,
"text": "Perform backward propagation to use (a) calculate (s)"
},
{
"code": null,
"e": 11310,
"s": 11198,
"text": "So, our cost function first up, needs to reshape our thetas back into a theta for the hidden and output layers."
},
{
"code": null,
"e": 11710,
"s": 11310,
"text": "# Reshape nn_params back into the parameters Theta1 and Theta2, # the weight matrices for our 2 layer neural networkTheta1 = nn_params[:hidden_layer_size * \\ (input_layer_size + 1)].reshape( \\ (hidden_layer_size, input_layer_size + 1))Theta2 = nn_params[hidden_layer_size * \\ (input_layer_size + 1):].reshape( \\ (num_labels, hidden_layer_size + 1))# Setup some useful variablesm = X.shape[0]"
},
{
"code": null,
"e": 11801,
"s": 11710,
"text": "Forward propagation is an important part of neural networks. Its not as hard as it sounds."
},
{
"code": null,
"e": 12096,
"s": 11801,
"text": "In figure 7, we can see our network diagram with much of the details removed. We will focus on one unit in level 2 and one unit in level 3. This understanding can then be copied to all units. Take note of the matrix multiplication we can do (in blue in figure 7) to perform forward propagation."
},
{
"code": null,
"e": 12194,
"s": 12096,
"text": "I am showing the details for one unit in each layer, but you can repeat the logic for all layers."
},
{
"code": null,
"e": 12298,
"s": 12194,
"text": "Before we show the forward prop code, lets talk a little on the 2 concepts we need during forward prop."
},
{
"code": null,
"e": 12322,
"s": 12298,
"text": "4.1.1 Sigmoid functions"
},
{
"code": null,
"e": 12628,
"s": 12322,
"text": "Since we are doing classification, we will use sigmoid to evaluate our predictions. A sigmoid function is a mathematical function having a characteristic “S”-shaped curve or sigmoid curve. A common example of a sigmoid function is the logistic function shown in the first figure and defined by the formula"
},
{
"code": null,
"e": 12696,
"s": 12628,
"text": "In github, in checknn.py the following handy functions are created:"
},
{
"code": null,
"e": 12764,
"s": 12696,
"text": "sigmoid is a handy function to compute sigmoid of input parameter Z"
},
{
"code": null,
"e": 12900,
"s": 12764,
"text": "sigmoidGradient computes the gradient of the sigmoid function evaluated at z. This should work regardless if z is a matrix or a vector."
},
{
"code": null,
"e": 13062,
"s": 12900,
"text": "def sigmoid(z): g = np.frompyfunc(lambda x: 1 / (1 + np.exp(-x)), 1, 1) return g(z).astype(z.dtype)def sigmoidGradient(z) return sigmoid(z) * (1 - sigmoid(z))"
},
{
"code": null,
"e": 13083,
"s": 13062,
"text": "4.1.2 Regularization"
},
{
"code": null,
"e": 13613,
"s": 13083,
"text": "We will implement regularization as one of the most common problems data science professionals face is to avoid overfitting. Overfitting gives you a situation where your model performed exceptionally well on train data but was not able to predict test data. Neural network are complex and makes them more prone to overfitting. Regularization is a technique which makes slight modifications to the learning algorithm such that the model generalizes better. This in turn improves the model’s performance on the unseen data as well."
},
{
"code": null,
"e": 13832,
"s": 13613,
"text": "If you have studied the concept of regularization in machine learning, you will have a fair idea that regularization penalizes the coefficients. In deep learning, it actually penalizes the weight matrices of the nodes."
},
{
"code": null,
"e": 13977,
"s": 13832,
"text": "We implement regularization in nnCostFunction by passing in a lambda which us used to penalise both the gradients and costs that are calculated."
},
{
"code": null,
"e": 14009,
"s": 13977,
"text": "4.1.3 Implementing forward prop"
},
{
"code": null,
"e": 14182,
"s": 14009,
"text": "As per figure 1, lets calculate A1. You can see that its pretty much my X features an we add the bias column hard coded to “1” in front. Here is the python code to do this:"
},
{
"code": null,
"e": 14245,
"s": 14182,
"text": "# Add ones to the X data matrixa1 = np.insert(X, 0, 1, axis=1)"
},
{
"code": null,
"e": 14366,
"s": 14245,
"text": "The result will now give you the results in A1 in figure 4. Take special note of the bias column “1” added on the front."
},
{
"code": null,
"e": 14616,
"s": 14366,
"text": "Great, thats A1 done, lets move onto A2. Before we get A2, we will first run a hypothesis to calculate Z2. Once you have the hypotheses, you can run it through the sigmoid function to get A2. Again, as per figure 1, add the bias column to the front."
},
{
"code": null,
"e": 14744,
"s": 14616,
"text": "# Perform forward propagation for layer 2z2 = np.matmul(a1, Theta1.transpose())a2 = sigmoid(z2)a2 = np.insert(a2, 0, 1, axis=1)"
},
{
"code": null,
"e": 14866,
"s": 14744,
"text": "Ok, so we almost there.... Now onto A3, lets do the same as with A2, but this time, we dont worry to add the bias column."
},
{
"code": null,
"e": 14921,
"s": 14866,
"text": "z3 = np.matmul(a2, Theta2.transpose())a3 = sigmoid(z3)"
},
{
"code": null,
"e": 15051,
"s": 14921,
"text": "You may be asking, “why do we keep Z2 & Z3”. Well, we will need those in back propagation. So we may as well keep them handy ;-)."
},
{
"code": null,
"e": 15331,
"s": 15051,
"text": "Before we continue, if you understand our Y column (figure 9) which contains the labels used to categorise our customers. Then to calculate the cost we need to reformat Y into a matrix which corresponds to the number of labels. In our case we have 7 categories for our customers."
},
{
"code": null,
"e": 15456,
"s": 15331,
"text": "Figure 8, shows how Y is converted to a matrix y_one_hot and labels are now indicated as a binary in the appropriate column."
},
{
"code": null,
"e": 15608,
"s": 15456,
"text": "# turn Y into a matrix with a new column for each category and marked with 1y_one_hot = np.zeros_like(a3)for i in range(m): y_one_hot[i, y[i] - 1] = 1"
},
{
"code": null,
"e": 15703,
"s": 15608,
"text": "Now that we have Y in a matrix format, lets have a look at the equation to calculate the cost."
},
{
"code": null,
"e": 15844,
"s": 15703,
"text": "Well, that's all very complicated, but good news is that with some matrix manipulation, we can do it in a few lines of python code as below."
},
{
"code": null,
"e": 16128,
"s": 15844,
"text": "# Calculate the cost of our forward propones = np.ones_like(a3A = np.matmul(y_one_hot.transpose(), np.log(a3)) + \\ np.matmul((ones - y_one_hot).transpose(), np.log(ones - a3))J = -1 / m * A.trace()J += lambda_ / (2 * m) * \\ (np.sum(Theta1[:, 1:] ** 2) + np.sum(Theta2[:, 1:] ** 2))"
},
{
"code": null,
"e": 16219,
"s": 16128,
"text": "So, we have simplified our neural network in figure 1 to only show the details to firstly:"
},
{
"code": null,
"e": 16254,
"s": 16219,
"text": "Subtract A1(3) from Y calculate S3"
},
{
"code": null,
"e": 16359,
"s": 16254,
"text": "Thereafter perform a linear equation using the thetas mentioned below multiplied by S3 to calculate. S2."
},
{
"code": null,
"e": 16481,
"s": 16359,
"text": "Since a picture paints 1000 words, figure 9 should explain what we use to calculate S3 and thereafter S2 (marked in red)."
},
{
"code": null,
"e": 16632,
"s": 16481,
"text": "From (3) we understand how our weights (thetas) were initialised, so just to visualise the weights (ø) that figure 9 is referring see figure 10 below."
},
{
"code": null,
"e": 16736,
"s": 16632,
"text": "So again, with matrix manipulation to the rescue, forward propagation is not a difficult task in python"
},
{
"code": null,
"e": 16912,
"s": 16736,
"text": "# Perform backward propagation to calculate deltass3 = a3 - yvs2 = np.matmul(s3, Theta2) * \\ sigmoidGradient(np.insert(z2, 0, 1, axis=1))# remove z2 bias columns2 = s2[:, 1:]"
},
{
"code": null,
"e": 17258,
"s": 16912,
"text": "We need to return the gradient’s as part of our cost function, these are needed as gradient descent is a process that occurs in backward prop where the goal is to continuously resample the gradient of the model’s parameter in the opposite direction based on the weight w, updating consistently until we reach the global minimum of function J(w)."
},
{
"code": null,
"e": 17337,
"s": 17258,
"text": "To put it simply, we use gradient descent to minimize the cost function, J(w)."
},
{
"code": null,
"e": 17417,
"s": 17337,
"text": "And again, matrix manipulation to the rescue makes it just a few lines of code."
},
{
"code": null,
"e": 17580,
"s": 17417,
"text": "Our first step is to calculate a penalty which can be used to regularise our cost. If you want an explanation on regularisation, then have a look at this article."
},
{
"code": null,
"e": 17757,
"s": 17580,
"text": "# calculate regularized penalty, replace 1st column with zerosp1 = (lambda_/m) * np.insert(Theta1[:, 1:], 0, 0, axis=1)p2 = (lambda_/m) * np.insert(Theta2[:, 1:], 0, 0, axis=1)"
},
{
"code": null,
"e": 18013,
"s": 17757,
"text": "For cost optimisation, we need to feed back the gradient of this particular set of weights. Figure 2 indicates what a gradient is once its been plotted. For the set of weights, being fed to our cost function, this will be the gradient of the plotted line."
},
{
"code": null,
"e": 18189,
"s": 18013,
"text": "# gradients / partial derivitivesTheta1_grad = delta_1 / m + p1Theta2_grad = delta_2 / m + p2grad = np.concatenate((Theta1_grad.flatten(), Theta2_grad.flatten()), axis=None)"
},
{
"code": null,
"e": 18336,
"s": 18189,
"text": "However, the cost optimisation functions dont know how to work with 2 theta’s, so lets unroll these into a vector, with results shown in figure 5."
},
{
"code": null,
"e": 18421,
"s": 18336,
"text": "grad = np.concatenate((Theta1_grad.flatten(), Theta2_grad.flatten()), axis=None)"
},
{
"code": null,
"e": 18549,
"s": 18421,
"text": "Ok WOW, thats been a lot of info, but our cost function is done, lets move onto running gradient descent and cost optimization."
},
{
"code": null,
"e": 18692,
"s": 18549,
"text": "One difficult thing to understand is if our cost function is performing well. A good method to check this is to run a function called checknn."
},
{
"code": null,
"e": 18975,
"s": 18692,
"text": "Creates a small neural network to check the backpropagation gradients, it will output the analytical gradients produced by your backprop code and the numerical gradients (computed using computeNumericalGradient). These two gradient computations should result in very similar values."
},
{
"code": null,
"e": 19101,
"s": 18975,
"text": "If you want to delve more into the theory behind this technique, it is tought in Andrew Ng’s machine learning course, week 4."
},
{
"code": null,
"e": 19205,
"s": 19101,
"text": "You do not need to run this every time, just when you have setup your cost function for the first time."
},
{
"code": null,
"e": 19304,
"s": 19205,
"text": "I won't put the code here, but check the github project in checknn.py for the following functions:"
},
{
"code": null,
"e": 19321,
"s": 19304,
"text": "checkNNGradients"
},
{
"code": null,
"e": 19344,
"s": 19321,
"text": "debugInitializeWeights"
},
{
"code": null,
"e": 19369,
"s": 19344,
"text": "computeNumericalGradient"
},
{
"code": null,
"e": 19430,
"s": 19369,
"text": "After running cheecknn, you should get the following results"
},
{
"code": null,
"e": 19798,
"s": 19430,
"text": "Gradient descent is an optimization algorithm which is mainly used to find the minimum of a function. In machine learning, gradient descent is used to update parameters in a model. Parameters can vary according to the algorithms, such as coefficients in Linear Regression and weights in Neural Networks. We will use SciPy optimize modules to run our gradient descent."
},
{
"code": null,
"e": 20571,
"s": 19798,
"text": "from scipy import optimize as optprint('Training Neural Network... ')# Change the MaxIter to a larger value to see how more # training helps.options = {'maxiter': 50, 'disp': True}# You should also try different values of lambdalambda_ = 1;# Create cost function shortcuts to be minimizedfun = lambda nn_params: nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, output_layer_size, xn, y, lambda_)[0]jac = lambda nn_params: nnCostFunction2(nn_params, input_layer_size, hidden_layer_size, output_layer_size, xn, y, lambda_)[1]# Now, costFunction is a function that takes in only one # argument (the neural network parameters)res = opt.minimize(fun, nn_params, method='CG', jac=jac, options=options)nn_params = res.xcost = res.funprint(res.message)print(cost)"
},
{
"code": null,
"e": 20625,
"s": 20571,
"text": "Get our thetas back for each layer by using a reshape"
},
{
"code": null,
"e": 20907,
"s": 20625,
"text": "# Obtain Theta1 and Theta2 back from nn_paramsTheta1 = nn_params[:hidden_layer_size * (input_layer_size + 1)].reshape((hidden_layer_size, input_layer_size + 1))Theta2 = nn_params[hidden_layer_size * (input_layer_size + 1):].reshape((output_layer_size, hidden_layer_size + 1))"
},
{
"code": null,
"e": 21010,
"s": 20907,
"text": "Now that we have our best weights (thetas), let's use them to make a prediction to check for accuracy."
},
{
"code": null,
"e": 21105,
"s": 21010,
"text": "pred = predict(Theta1, Theta2, X)print(f’Training Set Accuracy: {(pred == y).mean() * 100:f}’)"
},
{
"code": null,
"e": 21297,
"s": 21105,
"text": "You should get an accuracy of 65.427928%Yes, it's a little low, but that's the dataset we are working with. I have tried this dataset with logistics regression & SVM and get the same results."
}
]
|
CLAHE and Thresholding in Python. Contrast Limited Adaptive Histogram... | by Ravindu Senaratne | Towards Data Science | In this article, let’s talk about histogram equalization and image thresholding. Histogram equalization is one of the tools we have for image pre-processing and it makes image thresholding or segmentation tasks easier.
The reason we need histogram equalization is that when we collect images that are washed out or images with low contrast, we can stretch the histogram to span the entire range.
Let’s look at an example, Image is collected using an electron microscope.
In the above image, we can see the separation but it is not clear as it could be. So let's look at the histogram and use the equalization to stretch the histogram to threshold it.
import cv2import numpy as npfrom matplotlib import pyplot as plt img = cv2.imread("test.jpg", 0)equ = cv2.equalizeHist(img)
First I’ve read my image as grayscale and assigned it to the variable img. To perform histogram equalization we can run cv2.equalizeHist(img).
Let’s look at our test image’s histogram. And you can see it is skewed to the right side.
plt.hist(img.flat, bins=100, range=(0, 255))
Let’s look at the equalized image’s histogram. And you can see the histogram is stretched all the way to 255.
plt.hist(equ.flat, bins=100, range=(0, 255))
Below is the result of the histogram equalized image,
As you can see there is a lot of noise in the above image, because it considers the global contrast of the image, not just the local contrast. Therefore performing the global equalization might not work very well on your image, In those cases, we can use Adaptive Histogram Equalization or also know as CLAHE (Contrast Limiting Adaptive Histogram Equalization).
Contrast Limited AHE (CLAHE) is a variant of adaptive histogram equalization in which the contrast amplification is limited, so as to reduce this problem of noise amplification. In simple words, CLAHE does histogram equalization in small patches or in small tiles with high accuracy and contrast limiting.
Now we know what CLAHE is, let’s see how to set it up.
clahe = cv2.createCLAHE(clipLimit =2.0, tileGridSize=(8,8))cl_img = clahe.apply(img)
As you can see from the image above CLAHE gives a much better result compare to the normal equalized image. But it still has a lot of noise, Let’s see how thresholding works out to get better results.
To get a better result on images use .tiff file format rather than .jpeg file format
Before getting started on thresholding we need to look at the histogram of the CLAHE image.
plt.hist(cl_img.flat, bins=100, range=(100, 255))
As you can see from the above histogram there is a dip between 160–200 and we can decide on a close number to separate those two peaks. After we decide on a close number we can use it to do the thresholding (I’ve selected 190)
ret, thresh1 = cv2.threshold(cl_img, 190, 150, cv2.THRESH_BINARY)ret, thresh2 = cv2.threshold(cl_img, 190, 255, cv2.THRESH_BINARY_INV)
Ignore the first argument ret, We can get the thresholded image to the variablethresh1 and thresh2. In the above code part, the first argument is the image, secondly the threshold value we selected, thirdly, we need to give a value to all the pixels that are thresholded, and lastly, we need to give a method. I have given the THRESH_BINARY and THRESH_BINARY_INV to separate variables.
In the first threshold image (thresh1) has the gray level of value 150 and the second threshold image (thresh2) has a value of 255. This is nothing but histogram-based thresholding.
In the above example, we found the value 190 is the optimal value by referring to the histogram. But there is an easier way to find the optimal value using OTSU.
ret, thresh3 = cv2.threshold(cl_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
Using OTSU we can automatically segment it.
OTSU is the best way to find the optimal value if you are using binary threshold. You can even use K-means if necessary.
That is it for the article. If your image is noisy, do not forget to denoise it first. Then you can do all of these exercises.
Summary fo this is that CLAHE works great in most situations. Then you can use the histogram to segment your image into these two phases but if you do not want to plot the histogram to identify the separation then you can use OTSU. | [
{
"code": null,
"e": 390,
"s": 171,
"text": "In this article, let’s talk about histogram equalization and image thresholding. Histogram equalization is one of the tools we have for image pre-processing and it makes image thresholding or segmentation tasks easier."
},
{
"code": null,
"e": 567,
"s": 390,
"text": "The reason we need histogram equalization is that when we collect images that are washed out or images with low contrast, we can stretch the histogram to span the entire range."
},
{
"code": null,
"e": 642,
"s": 567,
"text": "Let’s look at an example, Image is collected using an electron microscope."
},
{
"code": null,
"e": 822,
"s": 642,
"text": "In the above image, we can see the separation but it is not clear as it could be. So let's look at the histogram and use the equalization to stretch the histogram to threshold it."
},
{
"code": null,
"e": 946,
"s": 822,
"text": "import cv2import numpy as npfrom matplotlib import pyplot as plt img = cv2.imread(\"test.jpg\", 0)equ = cv2.equalizeHist(img)"
},
{
"code": null,
"e": 1089,
"s": 946,
"text": "First I’ve read my image as grayscale and assigned it to the variable img. To perform histogram equalization we can run cv2.equalizeHist(img)."
},
{
"code": null,
"e": 1179,
"s": 1089,
"text": "Let’s look at our test image’s histogram. And you can see it is skewed to the right side."
},
{
"code": null,
"e": 1224,
"s": 1179,
"text": "plt.hist(img.flat, bins=100, range=(0, 255))"
},
{
"code": null,
"e": 1334,
"s": 1224,
"text": "Let’s look at the equalized image’s histogram. And you can see the histogram is stretched all the way to 255."
},
{
"code": null,
"e": 1379,
"s": 1334,
"text": "plt.hist(equ.flat, bins=100, range=(0, 255))"
},
{
"code": null,
"e": 1433,
"s": 1379,
"text": "Below is the result of the histogram equalized image,"
},
{
"code": null,
"e": 1795,
"s": 1433,
"text": "As you can see there is a lot of noise in the above image, because it considers the global contrast of the image, not just the local contrast. Therefore performing the global equalization might not work very well on your image, In those cases, we can use Adaptive Histogram Equalization or also know as CLAHE (Contrast Limiting Adaptive Histogram Equalization)."
},
{
"code": null,
"e": 2101,
"s": 1795,
"text": "Contrast Limited AHE (CLAHE) is a variant of adaptive histogram equalization in which the contrast amplification is limited, so as to reduce this problem of noise amplification. In simple words, CLAHE does histogram equalization in small patches or in small tiles with high accuracy and contrast limiting."
},
{
"code": null,
"e": 2156,
"s": 2101,
"text": "Now we know what CLAHE is, let’s see how to set it up."
},
{
"code": null,
"e": 2241,
"s": 2156,
"text": "clahe = cv2.createCLAHE(clipLimit =2.0, tileGridSize=(8,8))cl_img = clahe.apply(img)"
},
{
"code": null,
"e": 2442,
"s": 2241,
"text": "As you can see from the image above CLAHE gives a much better result compare to the normal equalized image. But it still has a lot of noise, Let’s see how thresholding works out to get better results."
},
{
"code": null,
"e": 2527,
"s": 2442,
"text": "To get a better result on images use .tiff file format rather than .jpeg file format"
},
{
"code": null,
"e": 2619,
"s": 2527,
"text": "Before getting started on thresholding we need to look at the histogram of the CLAHE image."
},
{
"code": null,
"e": 2669,
"s": 2619,
"text": "plt.hist(cl_img.flat, bins=100, range=(100, 255))"
},
{
"code": null,
"e": 2896,
"s": 2669,
"text": "As you can see from the above histogram there is a dip between 160–200 and we can decide on a close number to separate those two peaks. After we decide on a close number we can use it to do the thresholding (I’ve selected 190)"
},
{
"code": null,
"e": 3031,
"s": 2896,
"text": "ret, thresh1 = cv2.threshold(cl_img, 190, 150, cv2.THRESH_BINARY)ret, thresh2 = cv2.threshold(cl_img, 190, 255, cv2.THRESH_BINARY_INV)"
},
{
"code": null,
"e": 3417,
"s": 3031,
"text": "Ignore the first argument ret, We can get the thresholded image to the variablethresh1 and thresh2. In the above code part, the first argument is the image, secondly the threshold value we selected, thirdly, we need to give a value to all the pixels that are thresholded, and lastly, we need to give a method. I have given the THRESH_BINARY and THRESH_BINARY_INV to separate variables."
},
{
"code": null,
"e": 3599,
"s": 3417,
"text": "In the first threshold image (thresh1) has the gray level of value 150 and the second threshold image (thresh2) has a value of 255. This is nothing but histogram-based thresholding."
},
{
"code": null,
"e": 3761,
"s": 3599,
"text": "In the above example, we found the value 190 is the optimal value by referring to the histogram. But there is an easier way to find the optimal value using OTSU."
},
{
"code": null,
"e": 3843,
"s": 3761,
"text": "ret, thresh3 = cv2.threshold(cl_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)"
},
{
"code": null,
"e": 3887,
"s": 3843,
"text": "Using OTSU we can automatically segment it."
},
{
"code": null,
"e": 4008,
"s": 3887,
"text": "OTSU is the best way to find the optimal value if you are using binary threshold. You can even use K-means if necessary."
},
{
"code": null,
"e": 4135,
"s": 4008,
"text": "That is it for the article. If your image is noisy, do not forget to denoise it first. Then you can do all of these exercises."
}
]
|
Logistic Regression in Python— A Helpful Guide to How It Works | by Saul Dobilas | Towards Data Science | Just so you know what you are getting into, this is a long article that contains a visual and a mathematical explanation of logistic regression with 4 different Python examples. Please take a look at the list of topics below and feel free to jump to the sections that you are most interested in.
Machine Learning is making huge leaps forward, with an increasing number of algorithms enabling us to solve complex real-world problems.
This story is part of a deep dive series explaining the mechanics of Machine Learning algorithms. In addition to giving you an understanding of how ML algorithms work, it also provides you with Python examples to build your own ML models.
The category of algorithms logistic regression belongs to
An explanation of how logistic regression works
Python examples of how to build logistic regression models, including:- Binary target with 1 independent variable- Binary target with 2 independent variables- Multinomial with 3 class labels and 2 independent variables- Multinomial with 3 class labels and 2 independent variables + oversampling
Looking at the below chart's supervised learning branch, we can see that we have two main categories of problems: regression and classification.
Regression: we use regression algorithms when we have a continuous (numerical) target variable. For example, predicting the price of a house based on its proximity to major amenities.
Classification: used when the target variable is categorical. For example, predicting a win/loss of a game or customer defaulting/not-defaulting on a loan payment. Note, it does not necessarily have to be a binary outcome.
While logistic regression has a “regression” in its name, it actually belongs to the classification algorithms. However, there are some similarities between linear regression and logistic regression, which we will touch upon in the next section.
The below graph is interactive, so make sure to click on different categories to enlarge and reveal more👇.
If you enjoy Data Science and Machine Learning, please subscribe to get an email whenever I publish a new story.
Let’s begin the explanation by looking at the following example.
Assume we have a class of 10 pupils where each of them had to take an exam. Their preparation time, final score, and outcome (pass/fail) are displayed below. Note, the passing score is 40.
Exam Outcome Based on Study Time
Now, let’s see how we would approach this problem using linear regression vs. logistic regression.
If we were to build a simple linear regression model, we could use ‘hours of study’ as our independent variable and ‘final score’ as the dependent (target) variable. This is because ‘final score’ is a continuous variable as required by regression. This would lead us to a result summarized by a best-fit line taking the following form:
where β(0) is an intercept, β(1) is a slope, and x(1) is the sole independent variable.
Note, adding more independent variables would result in having more elements in your equation:
Let’s now assume that we do not have a ‘final score.’ All we have is an outcome( pass/fail flag). We want to build a logistic regression model where we use ‘hours of study’ to predict a student's likelihood of passing the exam.
As you can see from the table above, there is a strong correlation between ‘hours of study’ and ‘exam outcome,’ although we cannot perfectly separate the two classes. Hence, we want to have a model that gives us a probability of passing the exam given the study hours.
This is done by using a logistic function, also known as a sigmoid function:
If we were to plot a logistic function on a chart, it would look like this:
Probability
To understand how the data is mapped to the logistic function, we first need to learn about the relationship between probability, odds, and log-odds.
Odds — this is simply a ratio between the number of events (in this case, exam passes) and non-events (exam failures). Say, if you had 5 pupils that spent 7 hours each studying for an exam with 3 pupils passing and 2 failing it, the odds of passing would be 3:2, which is 1.5 in decimal notation.
Log-odds — is just a natural logarithm of odds. So if, the odds are 3:2 = 1.5, then log(odds) = log(1.5) = 0.405...
Probability vs. odds — you can easily convert between probability and odds. So if, the odds are 3:2, then the probability is 3/5=0.6.You can use the following equations to convert between probability and odds:
The last thing to note is that S(t) in the logistic function is the probability p. Hence, using the above equations, we can derive that t=log(odds). Which makes our logistic function:
Obviously, we could simplify it further, which would lead us back to the original equation of probability expressed through odds. However, we are happy with this form because now we can go one step further to find the log-odds equation.
Let’s use another example to plot the data onto a graph to understand how the log-odds equation is created.
Exam Outcome for 119 Pupils
We can plot this data onto a chart with ‘study hours’ on the x-axis and log-odds on the y-axis:
Now, this looks familiar. The relationship between our independent variable x (hours of study) and log-odds is linear! This means that we can draw the best fit-line through the points using the same type of line equation:
This makes our Logistic function:
A general form with multiple independent variables becomes:
When you build logistic regression models, the algorithm's goal is to find the coefficients β(0), β(1), etc. Unlike linear regression, though, it is not done by minimizing squared residuals but finding the maximum likelihood instead.
Maximum likelihood is most often expressed through a log-likelihood formula:
where p is the probability for points with an actual outcome of event ("pass") and 1-p is the probability for points with an actual outcome of non-event ("fail").
There are multiple methods available to maximize the log-likelihood. Some of the most commonly used ones would be gradient descent and Newton–Raphson.
In general, methods used to find the coefficients for the logistic function go through an iterative process of selecting a candidate line and calculating the log-likelihood. This is continued until the convergence is achieved and the maximum likelihood is found.
Note, I will not go into the mechanics of these algorithms. Instead, let’s build some logistic regression models in Python.
Now is the time to build some models using the knowledge that we acquired.
We will use the following libraries and data:
Chess games data from Kaggle
Scikit-learn library for splitting the data into train-test samples, building logistic regression models, and model evaluation
Imbalanced-learn library for oversampling minority class
Plotly for data visualizations
Pandas and Numpy for data manipulation
Let’s import all the libraries:
We will use data on chess games from Kaggle, which you can download following this link: https://www.kaggle.com/datasnaek/chess.
Once you have saved the data on your machine, we ingest it with the following code:
As we will want to use the ‘winner’ field for our dependent (target) variable, let’s check the distribution of it:
It is good to see that the wins between white and black are quite balanced. However, a small minority of matches ended up in a draw. Having an underrepresented class will make it harder to predict it, which we will see the multinomial examples later.
For the binary outcome model, we will try to predict whether the white pieces will win using the player rating difference. Meanwhile, for the multinomial case, we will attempt to predict all three classes (white win, draw, black win).
First, let’s derive a few new fields for usage in model predictions.
Let’s start building! We will use the difference between white and black ratings as the independent variable and the ‘white_win’ flag as the target.
After splitting the data into train and test samples, we fit the model. We chose sag (stochastic average gradient) solver for finding beta parameters of the log-odds equation this time. As listed in the comments below, there are other solvers, which we will try in the next few examples.
This gives us the following log-odds and logistic equations:
Let’s check our model performance metrics on the test sample:
A quick recap on the performance metrics:
Accuracy = Correct predictions / Total predictions
Precision = True Positives / (True Positives + False Positives); lower precision means higher number of False Positives
Recall = True Positives / (True Positives + False Negatives); low recall means that the model contains many False Negatives, i.e., it could not correctly identify a large proportion of the class members.
F1-score = Average between Precision and Recall (weights can be applied if one metric is more important than the other for a specific use case)
Support = Number of actual observations in that class
We can see that while the model is not great, it still helps us to identify the white win in 64% of the cases, which is better than a random guess (a 50% chance of getting it right).
Next, let’s plot a Logistic function with each class mapped onto it. We will do some data preparation first:
We will use masking in the graph to create two separate traces, one with events (white won) and the other with non-events (white did not win). As you can see, it is simply a boolean array contain True for 1 and False for 0.
Let’s take a look at what is displayed here.
The black dots at the top are the test dataset observations with the actual class of 1 (white won). In comparison, the black dots at the bottom are observations with the actual class of 0 (white did not win).
The black line is the logistic function which is based on the equation we derived with our model giving us the following parameters:intercept = -0.00289864 and slope = 0.00361573.
Green dots are black dots with class=1 mapped onto the logistic function using the probabilities from the model.
Red dots are black dots with class=0 mapped onto the logistic function using the probabilities from the model.
Quick note, I had to offset green and red dots by a small amount (0.01) to avoid overlapping for easier reading.
In summary, while the model can correctly predict a white win in 64% of the cases {p(white win)>0.5}, there are also lots of cases (36%) where it did not predict the outcome successfully. This suggests that having a higher rating in chess does not guarantee success in a match.
Let’s add an additional independent variable to the next model. We will use a field called ‘turns,’ which tells us the total number of moves made in a match.
Note that we are somewhat cheating here as the number of total moves would only be known after the match. Hence, this data point would not be available to us if we were to make a prediction before the match starts. Nevertheless, this is for illustration purposes only, so we will go ahead and use it anyway.
Note that we have two slope parameters this time, one for each independent variable. β(2) is slightly negative, suggesting that a higher number of ‘turns’ indicates a lower chance of white winning. This makes sense as the white not winning also includes ‘draws,’ and they are more likely to occur after a long match (after many moves).
Let’s take a look at model performance metrics on a test sample:
We can see that all classification metrics have improved for this model with 66% correct predictions. Not a surprise, given we used the ‘turns’ field, which gives us information about how the match has evolved.
Let’s now do some data prep and plot a logistic function again, although this time, it will be a surface on a 3D graph instead of a line. It is because we used 2 independent variables in our model.
Plot the graph:
This graph shows how the black dots at the top (class=1) and the bottom (class=0) have been mapped onto the logistic function prediction surface. In this case, green dots show probabilities for class=1 and blue ones for class=0.
Let’s now build a model that has 3 class labels:
-1: black wins
0: draw
1: white wins
Note that for a multinomial case, we have three intercepts and 3 pairs of slopes. This is because the model creates a separate equation for predicting each class.
Let’s look at the model performance:
As expected, the model had some difficulty predicting class=0 (draw) due to the unbalanced data. You can see a lot fewer draw outcomes (175 in the test sample) than wins by either white or black.
Based on precision, we can see that the model got 43% of its ‘draw’ predictions right. However, the recall is only 0.02, meaning that there were very few cases where the model predicted a ‘draw’ with most of the ‘draw’ outcomes being unidentified.
There are multiple ways of dealing with unbalanced data, with one approach being to oversample the minority class (in this case, class=0).
We will use the “random oversampler” from the imbalanced-learn package to help with our quest.
These are the final results. We can see that the model accuracy has gone down due to a reduction in precision for class=0. This is expected with oversampling as the model expects the class to be much more common than it actually is, leading to more frequent predictions of a ‘draw.’
While this harmed precision, it has helped with recall as the model was able to identify more of the ‘draw’ outcomes.
Clearly, this model is far from ideal and more work is needed to improve it. This can be done by adding more independent variables and employing additional techniques such as undersampling majority classes.
However, the purpose of these examples was to show you how you can build different types of logistic regression models rather than finding the best model for this specific set of data. I believe I have given you plenty of examples to work with. Hence, I will stop the story here.
This has been one of the longer stories I have written. If you managed to get all the way to the end, then kudos to you! 👏
I hope you now have a good understanding of what logistic regression is and that I have inspired you to open your notebook and to start building logistic regression models yourself.
Cheers! 👏Saul Dobilas
If you have already spent your learning budget for this month, please remember me next time. My personalized link to join Medium is:
solclover.com
Related stories you may like: | [
{
"code": null,
"e": 467,
"s": 171,
"text": "Just so you know what you are getting into, this is a long article that contains a visual and a mathematical explanation of logistic regression with 4 different Python examples. Please take a look at the list of topics below and feel free to jump to the sections that you are most interested in."
},
{
"code": null,
"e": 604,
"s": 467,
"text": "Machine Learning is making huge leaps forward, with an increasing number of algorithms enabling us to solve complex real-world problems."
},
{
"code": null,
"e": 843,
"s": 604,
"text": "This story is part of a deep dive series explaining the mechanics of Machine Learning algorithms. In addition to giving you an understanding of how ML algorithms work, it also provides you with Python examples to build your own ML models."
},
{
"code": null,
"e": 901,
"s": 843,
"text": "The category of algorithms logistic regression belongs to"
},
{
"code": null,
"e": 949,
"s": 901,
"text": "An explanation of how logistic regression works"
},
{
"code": null,
"e": 1244,
"s": 949,
"text": "Python examples of how to build logistic regression models, including:- Binary target with 1 independent variable- Binary target with 2 independent variables- Multinomial with 3 class labels and 2 independent variables- Multinomial with 3 class labels and 2 independent variables + oversampling"
},
{
"code": null,
"e": 1389,
"s": 1244,
"text": "Looking at the below chart's supervised learning branch, we can see that we have two main categories of problems: regression and classification."
},
{
"code": null,
"e": 1573,
"s": 1389,
"text": "Regression: we use regression algorithms when we have a continuous (numerical) target variable. For example, predicting the price of a house based on its proximity to major amenities."
},
{
"code": null,
"e": 1796,
"s": 1573,
"text": "Classification: used when the target variable is categorical. For example, predicting a win/loss of a game or customer defaulting/not-defaulting on a loan payment. Note, it does not necessarily have to be a binary outcome."
},
{
"code": null,
"e": 2042,
"s": 1796,
"text": "While logistic regression has a “regression” in its name, it actually belongs to the classification algorithms. However, there are some similarities between linear regression and logistic regression, which we will touch upon in the next section."
},
{
"code": null,
"e": 2149,
"s": 2042,
"text": "The below graph is interactive, so make sure to click on different categories to enlarge and reveal more👇."
},
{
"code": null,
"e": 2262,
"s": 2149,
"text": "If you enjoy Data Science and Machine Learning, please subscribe to get an email whenever I publish a new story."
},
{
"code": null,
"e": 2327,
"s": 2262,
"text": "Let’s begin the explanation by looking at the following example."
},
{
"code": null,
"e": 2516,
"s": 2327,
"text": "Assume we have a class of 10 pupils where each of them had to take an exam. Their preparation time, final score, and outcome (pass/fail) are displayed below. Note, the passing score is 40."
},
{
"code": null,
"e": 2552,
"s": 2516,
"text": " Exam Outcome Based on Study Time "
},
{
"code": null,
"e": 2651,
"s": 2552,
"text": "Now, let’s see how we would approach this problem using linear regression vs. logistic regression."
},
{
"code": null,
"e": 2987,
"s": 2651,
"text": "If we were to build a simple linear regression model, we could use ‘hours of study’ as our independent variable and ‘final score’ as the dependent (target) variable. This is because ‘final score’ is a continuous variable as required by regression. This would lead us to a result summarized by a best-fit line taking the following form:"
},
{
"code": null,
"e": 3076,
"s": 2987,
"text": "where β(0) is an intercept, β(1) is a slope, and x(1) is the sole independent variable. "
},
{
"code": null,
"e": 3171,
"s": 3076,
"text": "Note, adding more independent variables would result in having more elements in your equation:"
},
{
"code": null,
"e": 3399,
"s": 3171,
"text": "Let’s now assume that we do not have a ‘final score.’ All we have is an outcome( pass/fail flag). We want to build a logistic regression model where we use ‘hours of study’ to predict a student's likelihood of passing the exam."
},
{
"code": null,
"e": 3668,
"s": 3399,
"text": "As you can see from the table above, there is a strong correlation between ‘hours of study’ and ‘exam outcome,’ although we cannot perfectly separate the two classes. Hence, we want to have a model that gives us a probability of passing the exam given the study hours."
},
{
"code": null,
"e": 3745,
"s": 3668,
"text": "This is done by using a logistic function, also known as a sigmoid function:"
},
{
"code": null,
"e": 3821,
"s": 3745,
"text": "If we were to plot a logistic function on a chart, it would look like this:"
},
{
"code": null,
"e": 3836,
"s": 3821,
"text": " Probability "
},
{
"code": null,
"e": 3986,
"s": 3836,
"text": "To understand how the data is mapped to the logistic function, we first need to learn about the relationship between probability, odds, and log-odds."
},
{
"code": null,
"e": 4283,
"s": 3986,
"text": "Odds — this is simply a ratio between the number of events (in this case, exam passes) and non-events (exam failures). Say, if you had 5 pupils that spent 7 hours each studying for an exam with 3 pupils passing and 2 failing it, the odds of passing would be 3:2, which is 1.5 in decimal notation."
},
{
"code": null,
"e": 4399,
"s": 4283,
"text": "Log-odds — is just a natural logarithm of odds. So if, the odds are 3:2 = 1.5, then log(odds) = log(1.5) = 0.405..."
},
{
"code": null,
"e": 4609,
"s": 4399,
"text": "Probability vs. odds — you can easily convert between probability and odds. So if, the odds are 3:2, then the probability is 3/5=0.6.You can use the following equations to convert between probability and odds:"
},
{
"code": null,
"e": 4793,
"s": 4609,
"text": "The last thing to note is that S(t) in the logistic function is the probability p. Hence, using the above equations, we can derive that t=log(odds). Which makes our logistic function:"
},
{
"code": null,
"e": 5030,
"s": 4793,
"text": "Obviously, we could simplify it further, which would lead us back to the original equation of probability expressed through odds. However, we are happy with this form because now we can go one step further to find the log-odds equation."
},
{
"code": null,
"e": 5138,
"s": 5030,
"text": "Let’s use another example to plot the data onto a graph to understand how the log-odds equation is created."
},
{
"code": null,
"e": 5169,
"s": 5138,
"text": " Exam Outcome for 119 Pupils "
},
{
"code": null,
"e": 5265,
"s": 5169,
"text": "We can plot this data onto a chart with ‘study hours’ on the x-axis and log-odds on the y-axis:"
},
{
"code": null,
"e": 5487,
"s": 5265,
"text": "Now, this looks familiar. The relationship between our independent variable x (hours of study) and log-odds is linear! This means that we can draw the best fit-line through the points using the same type of line equation:"
},
{
"code": null,
"e": 5521,
"s": 5487,
"text": "This makes our Logistic function:"
},
{
"code": null,
"e": 5581,
"s": 5521,
"text": "A general form with multiple independent variables becomes:"
},
{
"code": null,
"e": 5815,
"s": 5581,
"text": "When you build logistic regression models, the algorithm's goal is to find the coefficients β(0), β(1), etc. Unlike linear regression, though, it is not done by minimizing squared residuals but finding the maximum likelihood instead."
},
{
"code": null,
"e": 5892,
"s": 5815,
"text": "Maximum likelihood is most often expressed through a log-likelihood formula:"
},
{
"code": null,
"e": 6055,
"s": 5892,
"text": "where p is the probability for points with an actual outcome of event (\"pass\") and 1-p is the probability for points with an actual outcome of non-event (\"fail\")."
},
{
"code": null,
"e": 6206,
"s": 6055,
"text": "There are multiple methods available to maximize the log-likelihood. Some of the most commonly used ones would be gradient descent and Newton–Raphson."
},
{
"code": null,
"e": 6469,
"s": 6206,
"text": "In general, methods used to find the coefficients for the logistic function go through an iterative process of selecting a candidate line and calculating the log-likelihood. This is continued until the convergence is achieved and the maximum likelihood is found."
},
{
"code": null,
"e": 6593,
"s": 6469,
"text": "Note, I will not go into the mechanics of these algorithms. Instead, let’s build some logistic regression models in Python."
},
{
"code": null,
"e": 6668,
"s": 6593,
"text": "Now is the time to build some models using the knowledge that we acquired."
},
{
"code": null,
"e": 6714,
"s": 6668,
"text": "We will use the following libraries and data:"
},
{
"code": null,
"e": 6743,
"s": 6714,
"text": "Chess games data from Kaggle"
},
{
"code": null,
"e": 6870,
"s": 6743,
"text": "Scikit-learn library for splitting the data into train-test samples, building logistic regression models, and model evaluation"
},
{
"code": null,
"e": 6927,
"s": 6870,
"text": "Imbalanced-learn library for oversampling minority class"
},
{
"code": null,
"e": 6958,
"s": 6927,
"text": "Plotly for data visualizations"
},
{
"code": null,
"e": 6997,
"s": 6958,
"text": "Pandas and Numpy for data manipulation"
},
{
"code": null,
"e": 7029,
"s": 6997,
"text": "Let’s import all the libraries:"
},
{
"code": null,
"e": 7158,
"s": 7029,
"text": "We will use data on chess games from Kaggle, which you can download following this link: https://www.kaggle.com/datasnaek/chess."
},
{
"code": null,
"e": 7242,
"s": 7158,
"text": "Once you have saved the data on your machine, we ingest it with the following code:"
},
{
"code": null,
"e": 7357,
"s": 7242,
"text": "As we will want to use the ‘winner’ field for our dependent (target) variable, let’s check the distribution of it:"
},
{
"code": null,
"e": 7608,
"s": 7357,
"text": "It is good to see that the wins between white and black are quite balanced. However, a small minority of matches ended up in a draw. Having an underrepresented class will make it harder to predict it, which we will see the multinomial examples later."
},
{
"code": null,
"e": 7843,
"s": 7608,
"text": "For the binary outcome model, we will try to predict whether the white pieces will win using the player rating difference. Meanwhile, for the multinomial case, we will attempt to predict all three classes (white win, draw, black win)."
},
{
"code": null,
"e": 7912,
"s": 7843,
"text": "First, let’s derive a few new fields for usage in model predictions."
},
{
"code": null,
"e": 8061,
"s": 7912,
"text": "Let’s start building! We will use the difference between white and black ratings as the independent variable and the ‘white_win’ flag as the target."
},
{
"code": null,
"e": 8349,
"s": 8061,
"text": "After splitting the data into train and test samples, we fit the model. We chose sag (stochastic average gradient) solver for finding beta parameters of the log-odds equation this time. As listed in the comments below, there are other solvers, which we will try in the next few examples."
},
{
"code": null,
"e": 8410,
"s": 8349,
"text": "This gives us the following log-odds and logistic equations:"
},
{
"code": null,
"e": 8472,
"s": 8410,
"text": "Let’s check our model performance metrics on the test sample:"
},
{
"code": null,
"e": 8514,
"s": 8472,
"text": "A quick recap on the performance metrics:"
},
{
"code": null,
"e": 8565,
"s": 8514,
"text": "Accuracy = Correct predictions / Total predictions"
},
{
"code": null,
"e": 8685,
"s": 8565,
"text": "Precision = True Positives / (True Positives + False Positives); lower precision means higher number of False Positives"
},
{
"code": null,
"e": 8889,
"s": 8685,
"text": "Recall = True Positives / (True Positives + False Negatives); low recall means that the model contains many False Negatives, i.e., it could not correctly identify a large proportion of the class members."
},
{
"code": null,
"e": 9033,
"s": 8889,
"text": "F1-score = Average between Precision and Recall (weights can be applied if one metric is more important than the other for a specific use case)"
},
{
"code": null,
"e": 9087,
"s": 9033,
"text": "Support = Number of actual observations in that class"
},
{
"code": null,
"e": 9270,
"s": 9087,
"text": "We can see that while the model is not great, it still helps us to identify the white win in 64% of the cases, which is better than a random guess (a 50% chance of getting it right)."
},
{
"code": null,
"e": 9379,
"s": 9270,
"text": "Next, let’s plot a Logistic function with each class mapped onto it. We will do some data preparation first:"
},
{
"code": null,
"e": 9603,
"s": 9379,
"text": "We will use masking in the graph to create two separate traces, one with events (white won) and the other with non-events (white did not win). As you can see, it is simply a boolean array contain True for 1 and False for 0."
},
{
"code": null,
"e": 9648,
"s": 9603,
"text": "Let’s take a look at what is displayed here."
},
{
"code": null,
"e": 9857,
"s": 9648,
"text": "The black dots at the top are the test dataset observations with the actual class of 1 (white won). In comparison, the black dots at the bottom are observations with the actual class of 0 (white did not win)."
},
{
"code": null,
"e": 10037,
"s": 9857,
"text": "The black line is the logistic function which is based on the equation we derived with our model giving us the following parameters:intercept = -0.00289864 and slope = 0.00361573."
},
{
"code": null,
"e": 10150,
"s": 10037,
"text": "Green dots are black dots with class=1 mapped onto the logistic function using the probabilities from the model."
},
{
"code": null,
"e": 10261,
"s": 10150,
"text": "Red dots are black dots with class=0 mapped onto the logistic function using the probabilities from the model."
},
{
"code": null,
"e": 10374,
"s": 10261,
"text": "Quick note, I had to offset green and red dots by a small amount (0.01) to avoid overlapping for easier reading."
},
{
"code": null,
"e": 10652,
"s": 10374,
"text": "In summary, while the model can correctly predict a white win in 64% of the cases {p(white win)>0.5}, there are also lots of cases (36%) where it did not predict the outcome successfully. This suggests that having a higher rating in chess does not guarantee success in a match."
},
{
"code": null,
"e": 10810,
"s": 10652,
"text": "Let’s add an additional independent variable to the next model. We will use a field called ‘turns,’ which tells us the total number of moves made in a match."
},
{
"code": null,
"e": 11118,
"s": 10810,
"text": "Note that we are somewhat cheating here as the number of total moves would only be known after the match. Hence, this data point would not be available to us if we were to make a prediction before the match starts. Nevertheless, this is for illustration purposes only, so we will go ahead and use it anyway."
},
{
"code": null,
"e": 11454,
"s": 11118,
"text": "Note that we have two slope parameters this time, one for each independent variable. β(2) is slightly negative, suggesting that a higher number of ‘turns’ indicates a lower chance of white winning. This makes sense as the white not winning also includes ‘draws,’ and they are more likely to occur after a long match (after many moves)."
},
{
"code": null,
"e": 11519,
"s": 11454,
"text": "Let’s take a look at model performance metrics on a test sample:"
},
{
"code": null,
"e": 11730,
"s": 11519,
"text": "We can see that all classification metrics have improved for this model with 66% correct predictions. Not a surprise, given we used the ‘turns’ field, which gives us information about how the match has evolved."
},
{
"code": null,
"e": 11928,
"s": 11730,
"text": "Let’s now do some data prep and plot a logistic function again, although this time, it will be a surface on a 3D graph instead of a line. It is because we used 2 independent variables in our model."
},
{
"code": null,
"e": 11944,
"s": 11928,
"text": "Plot the graph:"
},
{
"code": null,
"e": 12173,
"s": 11944,
"text": "This graph shows how the black dots at the top (class=1) and the bottom (class=0) have been mapped onto the logistic function prediction surface. In this case, green dots show probabilities for class=1 and blue ones for class=0."
},
{
"code": null,
"e": 12222,
"s": 12173,
"text": "Let’s now build a model that has 3 class labels:"
},
{
"code": null,
"e": 12237,
"s": 12222,
"text": "-1: black wins"
},
{
"code": null,
"e": 12245,
"s": 12237,
"text": "0: draw"
},
{
"code": null,
"e": 12259,
"s": 12245,
"text": "1: white wins"
},
{
"code": null,
"e": 12422,
"s": 12259,
"text": "Note that for a multinomial case, we have three intercepts and 3 pairs of slopes. This is because the model creates a separate equation for predicting each class."
},
{
"code": null,
"e": 12459,
"s": 12422,
"text": "Let’s look at the model performance:"
},
{
"code": null,
"e": 12655,
"s": 12459,
"text": "As expected, the model had some difficulty predicting class=0 (draw) due to the unbalanced data. You can see a lot fewer draw outcomes (175 in the test sample) than wins by either white or black."
},
{
"code": null,
"e": 12903,
"s": 12655,
"text": "Based on precision, we can see that the model got 43% of its ‘draw’ predictions right. However, the recall is only 0.02, meaning that there were very few cases where the model predicted a ‘draw’ with most of the ‘draw’ outcomes being unidentified."
},
{
"code": null,
"e": 13042,
"s": 12903,
"text": "There are multiple ways of dealing with unbalanced data, with one approach being to oversample the minority class (in this case, class=0)."
},
{
"code": null,
"e": 13137,
"s": 13042,
"text": "We will use the “random oversampler” from the imbalanced-learn package to help with our quest."
},
{
"code": null,
"e": 13420,
"s": 13137,
"text": "These are the final results. We can see that the model accuracy has gone down due to a reduction in precision for class=0. This is expected with oversampling as the model expects the class to be much more common than it actually is, leading to more frequent predictions of a ‘draw.’"
},
{
"code": null,
"e": 13538,
"s": 13420,
"text": "While this harmed precision, it has helped with recall as the model was able to identify more of the ‘draw’ outcomes."
},
{
"code": null,
"e": 13745,
"s": 13538,
"text": "Clearly, this model is far from ideal and more work is needed to improve it. This can be done by adding more independent variables and employing additional techniques such as undersampling majority classes."
},
{
"code": null,
"e": 14025,
"s": 13745,
"text": "However, the purpose of these examples was to show you how you can build different types of logistic regression models rather than finding the best model for this specific set of data. I believe I have given you plenty of examples to work with. Hence, I will stop the story here."
},
{
"code": null,
"e": 14148,
"s": 14025,
"text": "This has been one of the longer stories I have written. If you managed to get all the way to the end, then kudos to you! 👏"
},
{
"code": null,
"e": 14330,
"s": 14148,
"text": "I hope you now have a good understanding of what logistic regression is and that I have inspired you to open your notebook and to start building logistic regression models yourself."
},
{
"code": null,
"e": 14352,
"s": 14330,
"text": "Cheers! 👏Saul Dobilas"
},
{
"code": null,
"e": 14485,
"s": 14352,
"text": "If you have already spent your learning budget for this month, please remember me next time. My personalized link to join Medium is:"
},
{
"code": null,
"e": 14499,
"s": 14485,
"text": "solclover.com"
}
]
|
CSS - Fade In UP Big Effect | The image come or cause to come gradually into or out of view, or to merge into another shot.
@keyframes fadeInUpBig {
0% {
opacity: 0;
transform: translateY(2000px);
}
100% {
opacity: 1;
transform: translateY(0);
}
}
Transform − Transform applies to 2d and 3d transformation to an element.
Transform − Transform applies to 2d and 3d transformation to an element.
<html>
<head>
<style>
.animated {
background-image: url(/css/images/logo.png);
background-repeat: no-repeat;
background-position: left top;
padding-top:95px;
margin-bottom:60px;
-webkit-animation-duration: 10s;
animation-duration: 10s;
-webkit-animation-fill-mode: both;
animation-fill-mode: both;
}
@-webkit-keyframes fadeInUpBig {
0% {
opacity: 0;
-webkit-transform: translateY(2000px);
}
100% {
opacity: 1;
-webkit-transform: translateY(0);
}
}
@keyframes fadeInUpBig {
0% {
opacity: 0;
transform: translateY(2000px);
}
100% {
opacity: 1;
transform: translateY(0);
}
}
.fadeInUpBig {
-webkit-animation-name: fadeInUpBig;
animation-name: fadeInUpBig;
}
</style>
</head>
<body>
<div id = "animated-example" class = "animated fadeInUpBig"></div>
<button onclick = "myFunction()">Reload page</button>
<script>
function myFunction() {
location.reload();
}
</script>
</body>
</html>
It will produce the following result −
Academic Tutorials
Big Data & Analytics
Computer Programming
Computer Science
Databases
DevOps
Digital Marketing
Engineering Tutorials
Exams Syllabus
Famous Monuments
GATE Exams Tutorials
Latest Technologies
Machine Learning
Mainframe Development
Management Tutorials
Mathematics Tutorials
Microsoft Technologies
Misc tutorials
Mobile Development
Java Technologies
Python Technologies
SAP Tutorials
Programming Scripts
Selected Reading
Software Quality
Soft Skills
Telecom Tutorials
UPSC IAS Exams
Web Development
Sports Tutorials
XML Technologies
Multi-Language
Interview Questions
Academic Tutorials
Big Data & Analytics
Computer Programming
Computer Science
Databases
DevOps
Digital Marketing
Engineering Tutorials
Exams Syllabus
Famous Monuments
GATE Exams Tutorials
Latest Technologies
Machine Learning
Mainframe Development
Management Tutorials
Mathematics Tutorials
Microsoft Technologies
Misc tutorials
Mobile Development
Java Technologies
Python Technologies
SAP Tutorials
Programming Scripts
Selected Reading
Software Quality
Soft Skills
Telecom Tutorials
UPSC IAS Exams
Web Development
Sports Tutorials
XML Technologies
Multi-Language
Interview Questions
Selected Reading
UPSC IAS Exams Notes
Developer's Best Practices
Questions and Answers
Effective Resume Writing
HR Interview Questions
Computer Glossary
Who is Who
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2720,
"s": 2626,
"text": "The image come or cause to come gradually into or out of view, or to merge into another shot."
},
{
"code": null,
"e": 2881,
"s": 2720,
"text": "@keyframes fadeInUpBig {\n 0% {\n opacity: 0;\n transform: translateY(2000px);\n }\n 100% {\n opacity: 1;\n transform: translateY(0);\n }\n} "
},
{
"code": null,
"e": 2954,
"s": 2881,
"text": "Transform − Transform applies to 2d and 3d transformation to an element."
},
{
"code": null,
"e": 3027,
"s": 2954,
"text": "Transform − Transform applies to 2d and 3d transformation to an element."
},
{
"code": null,
"e": 4454,
"s": 3027,
"text": "<html>\n <head>\n <style>\n .animated {\n background-image: url(/css/images/logo.png);\n background-repeat: no-repeat;\n background-position: left top;\n padding-top:95px;\n margin-bottom:60px;\n -webkit-animation-duration: 10s;\n animation-duration: 10s;\n -webkit-animation-fill-mode: both;\n animation-fill-mode: both;\n }\n \n @-webkit-keyframes fadeInUpBig {\n 0% {\n opacity: 0;\n -webkit-transform: translateY(2000px);\n }\n 100% {\n opacity: 1;\n -webkit-transform: translateY(0);\n }\n }\n \n @keyframes fadeInUpBig {\n 0% {\n opacity: 0;\n transform: translateY(2000px);\n }\n 100% {\n opacity: 1;\n transform: translateY(0);\n }\n }\n \n .fadeInUpBig {\n -webkit-animation-name: fadeInUpBig;\n animation-name: fadeInUpBig;\n }\n </style>\n </head>\n\n <body>\n \n <div id = \"animated-example\" class = \"animated fadeInUpBig\"></div>\n <button onclick = \"myFunction()\">Reload page</button>\n \n <script>\n function myFunction() {\n location.reload();\n }\n </script>\n \n </body>\n</html>"
},
{
"code": null,
"e": 4493,
"s": 4454,
"text": "It will produce the following result −"
},
{
"code": null,
"e": 5140,
"s": 4493,
"text": "\n\n Academic Tutorials\n Big Data & Analytics \n Computer Programming \n Computer Science \n Databases \n DevOps \n Digital Marketing \n Engineering Tutorials \n Exams Syllabus \n Famous Monuments \n GATE Exams Tutorials\n Latest Technologies \n Machine Learning \n Mainframe Development \n Management Tutorials \n Mathematics Tutorials\n Microsoft Technologies \n Misc tutorials \n Mobile Development \n Java Technologies \n Python Technologies \n SAP Tutorials \nProgramming Scripts \n Selected Reading \n Software Quality \n Soft Skills \n Telecom Tutorials \n UPSC IAS Exams \n Web Development \n Sports Tutorials \n XML Technologies \n Multi-Language\n Interview Questions\n\n"
},
{
"code": null,
"e": 5160,
"s": 5140,
"text": " Academic Tutorials"
},
{
"code": null,
"e": 5183,
"s": 5160,
"text": " Big Data & Analytics "
},
{
"code": null,
"e": 5206,
"s": 5183,
"text": " Computer Programming "
},
{
"code": null,
"e": 5225,
"s": 5206,
"text": " Computer Science "
},
{
"code": null,
"e": 5237,
"s": 5225,
"text": " Databases "
},
{
"code": null,
"e": 5246,
"s": 5237,
"text": " DevOps "
},
{
"code": null,
"e": 5266,
"s": 5246,
"text": " Digital Marketing "
},
{
"code": null,
"e": 5290,
"s": 5266,
"text": " Engineering Tutorials "
},
{
"code": null,
"e": 5307,
"s": 5290,
"text": " Exams Syllabus "
},
{
"code": null,
"e": 5326,
"s": 5307,
"text": " Famous Monuments "
},
{
"code": null,
"e": 5348,
"s": 5326,
"text": " GATE Exams Tutorials"
},
{
"code": null,
"e": 5370,
"s": 5348,
"text": " Latest Technologies "
},
{
"code": null,
"e": 5389,
"s": 5370,
"text": " Machine Learning "
},
{
"code": null,
"e": 5413,
"s": 5389,
"text": " Mainframe Development "
},
{
"code": null,
"e": 5436,
"s": 5413,
"text": " Management Tutorials "
},
{
"code": null,
"e": 5459,
"s": 5436,
"text": " Mathematics Tutorials"
},
{
"code": null,
"e": 5484,
"s": 5459,
"text": " Microsoft Technologies "
},
{
"code": null,
"e": 5501,
"s": 5484,
"text": " Misc tutorials "
},
{
"code": null,
"e": 5522,
"s": 5501,
"text": " Mobile Development "
},
{
"code": null,
"e": 5542,
"s": 5522,
"text": " Java Technologies "
},
{
"code": null,
"e": 5564,
"s": 5542,
"text": " Python Technologies "
},
{
"code": null,
"e": 5580,
"s": 5564,
"text": " SAP Tutorials "
},
{
"code": null,
"e": 5601,
"s": 5580,
"text": "Programming Scripts "
},
{
"code": null,
"e": 5620,
"s": 5601,
"text": " Selected Reading "
},
{
"code": null,
"e": 5639,
"s": 5620,
"text": " Software Quality "
},
{
"code": null,
"e": 5653,
"s": 5639,
"text": " Soft Skills "
},
{
"code": null,
"e": 5673,
"s": 5653,
"text": " Telecom Tutorials "
},
{
"code": null,
"e": 5690,
"s": 5673,
"text": " UPSC IAS Exams "
},
{
"code": null,
"e": 5708,
"s": 5690,
"text": " Web Development "
},
{
"code": null,
"e": 5727,
"s": 5708,
"text": " Sports Tutorials "
},
{
"code": null,
"e": 5746,
"s": 5727,
"text": " XML Technologies "
},
{
"code": null,
"e": 5762,
"s": 5746,
"text": " Multi-Language"
},
{
"code": null,
"e": 5783,
"s": 5762,
"text": " Interview Questions"
},
{
"code": null,
"e": 5800,
"s": 5783,
"text": "Selected Reading"
},
{
"code": null,
"e": 5821,
"s": 5800,
"text": "UPSC IAS Exams Notes"
},
{
"code": null,
"e": 5848,
"s": 5821,
"text": "Developer's Best Practices"
},
{
"code": null,
"e": 5870,
"s": 5848,
"text": "Questions and Answers"
},
{
"code": null,
"e": 5895,
"s": 5870,
"text": "Effective Resume Writing"
},
{
"code": null,
"e": 5918,
"s": 5895,
"text": "HR Interview Questions"
},
{
"code": null,
"e": 5936,
"s": 5918,
"text": "Computer Glossary"
},
{
"code": null,
"e": 5947,
"s": 5936,
"text": "Who is Who"
},
{
"code": null,
"e": 5954,
"s": 5947,
"text": " Print"
},
{
"code": null,
"e": 5965,
"s": 5954,
"text": " Add Notes"
}
]
|
IFNULL in MySQL - GeeksforGeeks | 03 Mar, 2018
Given a TABLE, in this TABLE, it prints entry of the table. If table is empty then it gives NULL.
Examples:
QUESTION : Given an employee table, print name from the given table which id equals to 2.Output : Geek2
QUESTION : Given same Employee table, print name from the given table which id equals to 5.Output : NULL
Approach: In this case, we use here IFNULL. IFNULL print the null if the table is an empty or other condition.
Query:-
SELECT
IFNULL(
(SELECT NAME
from employee
where id = 2),
'NULL') as NAME;
Output:-
Geek2
Query:-
SELECT
IFNULL(
(SELECT NAME
from employee
where id = 5),
'NULL') as NAME;
Output:-
NULL
mysql
SQL
SQL
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
SQL Trigger | Student Database
SQL | Views
CTE in SQL
Difference between DELETE, DROP and TRUNCATE
How to Update Multiple Columns in Single Update Statement in SQL?
Difference between SQL and NoSQL
Difference between DDL and DML in DBMS
SQL Interview Questions
What is Temporary Table in SQL?
SQL Query to Find the Name of a Person Whose Name Starts with Specific Letter | [
{
"code": null,
"e": 23701,
"s": 23673,
"text": "\n03 Mar, 2018"
},
{
"code": null,
"e": 23799,
"s": 23701,
"text": "Given a TABLE, in this TABLE, it prints entry of the table. If table is empty then it gives NULL."
},
{
"code": null,
"e": 23809,
"s": 23799,
"text": "Examples:"
},
{
"code": null,
"e": 23913,
"s": 23809,
"text": "QUESTION : Given an employee table, print name from the given table which id equals to 2.Output : Geek2"
},
{
"code": null,
"e": 24018,
"s": 23913,
"text": "QUESTION : Given same Employee table, print name from the given table which id equals to 5.Output : NULL"
},
{
"code": null,
"e": 24129,
"s": 24018,
"text": "Approach: In this case, we use here IFNULL. IFNULL print the null if the table is an empty or other condition."
},
{
"code": null,
"e": 24137,
"s": 24129,
"text": "Query:-"
},
{
"code": null,
"e": 24222,
"s": 24137,
"text": "SELECT \nIFNULL(\n (SELECT NAME \n from employee\n where id = 2),\n 'NULL') as NAME;\n"
},
{
"code": null,
"e": 24238,
"s": 24222,
"text": "Output:-\nGeek2\n"
},
{
"code": null,
"e": 24246,
"s": 24238,
"text": "Query:-"
},
{
"code": null,
"e": 24331,
"s": 24246,
"text": "SELECT \nIFNULL(\n (SELECT NAME \n from employee\n where id = 5),\n 'NULL') as NAME;\n"
},
{
"code": null,
"e": 24346,
"s": 24331,
"text": "Output:-\nNULL\n"
},
{
"code": null,
"e": 24352,
"s": 24346,
"text": "mysql"
},
{
"code": null,
"e": 24356,
"s": 24352,
"text": "SQL"
},
{
"code": null,
"e": 24360,
"s": 24356,
"text": "SQL"
},
{
"code": null,
"e": 24458,
"s": 24360,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 24467,
"s": 24458,
"text": "Comments"
},
{
"code": null,
"e": 24480,
"s": 24467,
"text": "Old Comments"
},
{
"code": null,
"e": 24511,
"s": 24480,
"text": "SQL Trigger | Student Database"
},
{
"code": null,
"e": 24523,
"s": 24511,
"text": "SQL | Views"
},
{
"code": null,
"e": 24534,
"s": 24523,
"text": "CTE in SQL"
},
{
"code": null,
"e": 24579,
"s": 24534,
"text": "Difference between DELETE, DROP and TRUNCATE"
},
{
"code": null,
"e": 24645,
"s": 24579,
"text": "How to Update Multiple Columns in Single Update Statement in SQL?"
},
{
"code": null,
"e": 24678,
"s": 24645,
"text": "Difference between SQL and NoSQL"
},
{
"code": null,
"e": 24717,
"s": 24678,
"text": "Difference between DDL and DML in DBMS"
},
{
"code": null,
"e": 24741,
"s": 24717,
"text": "SQL Interview Questions"
},
{
"code": null,
"e": 24773,
"s": 24741,
"text": "What is Temporary Table in SQL?"
}
]
|
C++ Program for Best Fit algorithm in Memory Management | Given two arrays containing block size and process size; the task is to print the results according to Best Fit algorithm in memory management.
What is Best Fit Algorithm?
Best Fit is a memory management algorithm; it deals with allocating smallest free partition which meets the requirement of the requesting process. In this algorithm we look for the whole memory block and check the smallest and most appropriate block for the process and then look for the immediate near block which can be used to fulfill the adequate process.
So we will take the block size and process size and return the output of the process and which block is to be allocated to a process.
Input: bsize[] = {100, 500, 200, 300, 400}
psize[] = {112, 518, 110, 526}
Output:
Process No. Process Size Block no.
1 112 3
2 518 Not Allocated
3 110 4
4 526 Not Allocated
Approach will be using to solve the above problem −
Take the input of process ad block size.
Initially set all the memory blocks as free.
Take each process and find the minimum block size which can be allotted to a block means the minimum of the entire block which is greater than a process size.
If found then allot it to the current process else leave that process and check the further process.
Start
Step 1-> In function void bestfit(int bsize[], int m, int psize[], int n)
Declare int alloc[n]
Call function memset(alloc, -1, sizeof(alloc))
Loop For i=0 and i<n and i++
Declare and Initialize bestIdx = -1
Loop For j=0 and j<m and j++
If bsize[j] >= psize[i] then,
If bestIdx == -1 then,
Set bestIdx = j
Else If bsize[bestIdx] > bsize[j] then,
Set bestIdx = j
If bestIdx != -1 then,
Set alloc[i] = bestIdx
Set bsize[bestIdx] -= psize[i]
Loop For i = 0 and i < n and i++
Print i+1, psize[i]
If alloc[i] != -1
Print alloc[i] + 1
Else
Print "Not Allocated"
Print newline
Step 2->In function int main()
Declare and initialize bsize[] = {100, 500, 200, 300, 400}
Declare and initialize psize[] = {112, 518, 110, 526}
Set m = sizeof(bsize)/sizeof(bsize[0])
Set n = sizeof(psize)/sizeof(psize[0])
Call function bestfit(bsize, m, psize, n)
Stop
#include <iostream>
#include <memory>
using namespace std;
// To allocate the memory to blocks as per Best fit
// algorithm
void bestfit(int bsize[], int m, int psize[], int n) {
// To store block id of the block allocated to a
// process
int alloc[n];
// Initially no block is assigned to any process
memset(alloc, -1, sizeof(alloc));
// pick each process and find suitable blocks
// according to its size ad assign to it
for (int i=0; i<n; i++) {
// Find the best fit block for current process
int bestIdx = -1;
for (int j=0; j<m; j++) {
if (bsize[j] >= psize[i]) {
if (bestIdx == -1)
bestIdx = j;
else if (bsize[bestIdx] > bsize[j])
bestIdx = j;
}
}
// If we could find a block for current process
if (bestIdx != -1) {
// allocate block j to p[i] process
alloc[i] = bestIdx;
// Reduce available memory in this block.
bsize[bestIdx] -= psize[i];
}
}
cout << "\nProcess No.\tProcess Size\tBlock no.\n";
for (int i = 0; i < n; i++) {
cout << " " << i+1 << "\t\t\t\t" << psize[i] << "\t\t\t\t";
if (alloc[i] != -1)
cout << alloc[i] + 1;
else
cout << "Not Allocated";
cout << endl;
}
}
// Driver code
int main() {
int bsize[] = {100, 500, 200, 300, 400};
int psize[] = {112, 518, 110, 526};
int m = sizeof(bsize)/sizeof(bsize[0]);
int n = sizeof(psize)/sizeof(psize[0]);
bestfit(bsize, m, psize, n);
return 0 ;
}
Process No. Process Size Block no.
1 112 3
2 518 Not Allocated
3 110 4
4 526 Not Allocated | [
{
"code": null,
"e": 1206,
"s": 1062,
"text": "Given two arrays containing block size and process size; the task is to print the results according to Best Fit algorithm in memory management."
},
{
"code": null,
"e": 1234,
"s": 1206,
"text": "What is Best Fit Algorithm?"
},
{
"code": null,
"e": 1594,
"s": 1234,
"text": "Best Fit is a memory management algorithm; it deals with allocating smallest free partition which meets the requirement of the requesting process. In this algorithm we look for the whole memory block and check the smallest and most appropriate block for the process and then look for the immediate near block which can be used to fulfill the adequate process."
},
{
"code": null,
"e": 1728,
"s": 1594,
"text": "So we will take the block size and process size and return the output of the process and which block is to be allocated to a process."
},
{
"code": null,
"e": 1983,
"s": 1728,
"text": "Input: bsize[] = {100, 500, 200, 300, 400}\n psize[] = {112, 518, 110, 526}\nOutput:\nProcess No. Process Size Block no.\n1 112 3\n2 518 Not Allocated\n3 110 4\n4 526 Not Allocated"
},
{
"code": null,
"e": 2035,
"s": 1983,
"text": "Approach will be using to solve the above problem −"
},
{
"code": null,
"e": 2076,
"s": 2035,
"text": "Take the input of process ad block size."
},
{
"code": null,
"e": 2121,
"s": 2076,
"text": "Initially set all the memory blocks as free."
},
{
"code": null,
"e": 2280,
"s": 2121,
"text": "Take each process and find the minimum block size which can be allotted to a block means the minimum of the entire block which is greater than a process size."
},
{
"code": null,
"e": 2381,
"s": 2280,
"text": "If found then allot it to the current process else leave that process and check the further process."
},
{
"code": null,
"e": 3435,
"s": 2381,
"text": "Start\nStep 1-> In function void bestfit(int bsize[], int m, int psize[], int n)\n Declare int alloc[n]\n Call function memset(alloc, -1, sizeof(alloc))\n Loop For i=0 and i<n and i++\n Declare and Initialize bestIdx = -1\n Loop For j=0 and j<m and j++\n If bsize[j] >= psize[i] then,\n If bestIdx == -1 then,\n Set bestIdx = j\n Else If bsize[bestIdx] > bsize[j] then,\n Set bestIdx = j\n If bestIdx != -1 then,\n Set alloc[i] = bestIdx\n Set bsize[bestIdx] -= psize[i]\n Loop For i = 0 and i < n and i++\n Print i+1, psize[i]\n If alloc[i] != -1\n Print alloc[i] + 1\n Else\n Print \"Not Allocated\"\n Print newline\nStep 2->In function int main()\n Declare and initialize bsize[] = {100, 500, 200, 300, 400}\n Declare and initialize psize[] = {112, 518, 110, 526}\n Set m = sizeof(bsize)/sizeof(bsize[0])\n Set n = sizeof(psize)/sizeof(psize[0])\n Call function bestfit(bsize, m, psize, n)\nStop"
},
{
"code": null,
"e": 4996,
"s": 3435,
"text": "#include <iostream>\n#include <memory>\nusing namespace std;\n// To allocate the memory to blocks as per Best fit\n// algorithm\nvoid bestfit(int bsize[], int m, int psize[], int n) {\n // To store block id of the block allocated to a\n // process\n int alloc[n];\n // Initially no block is assigned to any process\n memset(alloc, -1, sizeof(alloc));\n // pick each process and find suitable blocks\n // according to its size ad assign to it\n for (int i=0; i<n; i++) {\n // Find the best fit block for current process\n int bestIdx = -1;\n for (int j=0; j<m; j++) {\n if (bsize[j] >= psize[i]) {\n if (bestIdx == -1)\n bestIdx = j;\n else if (bsize[bestIdx] > bsize[j])\n bestIdx = j;\n }\n }\n // If we could find a block for current process\n if (bestIdx != -1) {\n // allocate block j to p[i] process\n alloc[i] = bestIdx;\n // Reduce available memory in this block.\n bsize[bestIdx] -= psize[i];\n }\n }\n cout << \"\\nProcess No.\\tProcess Size\\tBlock no.\\n\";\n for (int i = 0; i < n; i++) {\n cout << \" \" << i+1 << \"\\t\\t\\t\\t\" << psize[i] << \"\\t\\t\\t\\t\";\n if (alloc[i] != -1)\n cout << alloc[i] + 1;\n else\n cout << \"Not Allocated\";\n cout << endl;\n }\n}\n// Driver code\nint main() {\n int bsize[] = {100, 500, 200, 300, 400};\n int psize[] = {112, 518, 110, 526};\n int m = sizeof(bsize)/sizeof(bsize[0]);\n int n = sizeof(psize)/sizeof(psize[0]);\n bestfit(bsize, m, psize, n);\n return 0 ;\n}"
},
{
"code": null,
"e": 5188,
"s": 4996,
"text": "Process No. Process Size Block no.\n 1 112 3\n 2 518 Not Allocated\n 3 110 4\n 4 526 Not Allocated"
}
]
|
C library function - atan() | The C library function double atan(double x) returns the arc tangent of x in radians.
Following is the declaration for atan() function.
double atan(double x)
x − This is the floating point value.
x − This is the floating point value.
This function returns the principal arc tangent of x, in the interval [-pi/2,+pi/2] radians.
The following example shows the usage of atan() function.
#include <stdio.h>
#include <math.h>
#define PI 3.14159265
int main () {
double x, ret, val;
x = 1.0;
val = 180.0 / PI;
ret = atan (x) * val;
printf("The arc tangent of %lf is %lf degrees", x, ret);
return(0);
}
Let us compile and run the above program that will produce the following result −
The arc tangent of 1.000000 is 45.000000 degrees
12 Lectures
2 hours
Nishant Malik
12 Lectures
2.5 hours
Nishant Malik
48 Lectures
6.5 hours
Asif Hussain
12 Lectures
2 hours
Richa Maheshwari
20 Lectures
3.5 hours
Vandana Annavaram
44 Lectures
1 hours
Amit Diwan
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2093,
"s": 2007,
"text": "The C library function double atan(double x) returns the arc tangent of x in radians."
},
{
"code": null,
"e": 2143,
"s": 2093,
"text": "Following is the declaration for atan() function."
},
{
"code": null,
"e": 2165,
"s": 2143,
"text": "double atan(double x)"
},
{
"code": null,
"e": 2203,
"s": 2165,
"text": "x − This is the floating point value."
},
{
"code": null,
"e": 2241,
"s": 2203,
"text": "x − This is the floating point value."
},
{
"code": null,
"e": 2334,
"s": 2241,
"text": "This function returns the principal arc tangent of x, in the interval [-pi/2,+pi/2] radians."
},
{
"code": null,
"e": 2392,
"s": 2334,
"text": "The following example shows the usage of atan() function."
},
{
"code": null,
"e": 2629,
"s": 2392,
"text": "#include <stdio.h>\n#include <math.h>\n\n#define PI 3.14159265\n\nint main () {\n double x, ret, val;\n x = 1.0;\n val = 180.0 / PI;\n\n ret = atan (x) * val;\n printf(\"The arc tangent of %lf is %lf degrees\", x, ret);\n \n return(0);\n}"
},
{
"code": null,
"e": 2711,
"s": 2629,
"text": "Let us compile and run the above program that will produce the following result −"
},
{
"code": null,
"e": 2761,
"s": 2711,
"text": "The arc tangent of 1.000000 is 45.000000 degrees\n"
},
{
"code": null,
"e": 2794,
"s": 2761,
"text": "\n 12 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 2809,
"s": 2794,
"text": " Nishant Malik"
},
{
"code": null,
"e": 2844,
"s": 2809,
"text": "\n 12 Lectures \n 2.5 hours \n"
},
{
"code": null,
"e": 2859,
"s": 2844,
"text": " Nishant Malik"
},
{
"code": null,
"e": 2894,
"s": 2859,
"text": "\n 48 Lectures \n 6.5 hours \n"
},
{
"code": null,
"e": 2908,
"s": 2894,
"text": " Asif Hussain"
},
{
"code": null,
"e": 2941,
"s": 2908,
"text": "\n 12 Lectures \n 2 hours \n"
},
{
"code": null,
"e": 2959,
"s": 2941,
"text": " Richa Maheshwari"
},
{
"code": null,
"e": 2994,
"s": 2959,
"text": "\n 20 Lectures \n 3.5 hours \n"
},
{
"code": null,
"e": 3013,
"s": 2994,
"text": " Vandana Annavaram"
},
{
"code": null,
"e": 3046,
"s": 3013,
"text": "\n 44 Lectures \n 1 hours \n"
},
{
"code": null,
"e": 3058,
"s": 3046,
"text": " Amit Diwan"
},
{
"code": null,
"e": 3065,
"s": 3058,
"text": " Print"
},
{
"code": null,
"e": 3076,
"s": 3065,
"text": " Add Notes"
}
]
|
Calculate determinant of a Matrix using Pivotal Condensation Method - GeeksforGeeks | 17 Aug, 2021
Given a square matrix mat[][] of dimension N, the task is to find the determinant of the matrix using the pivot condensation method.
Examples:
Input: mat[][] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}Output: 0Explanation:Performing R3 = R3 – R2 modifies the matrix mat[][] to {{1, 2, 3}, {4, 5, 6}, {1, 1, 1}}.Performing R2 = R2 – R1 modifies the matrix mat[][] to {{1, 2, 3}, {1, 1, 1}, {1, 1, 1}}.Now, the rows R2 and R3 are equal.Therefore, the determinant will of the matrix becomes equal to zero (using the property of matrix).
Input: mat[][] = {{1, 0, 2, -1}, {3, 0, 0, 5}, {2, 1, 4, -3}, {1, 0, 5, 0}}Output: 30
Approach: The idea is to use the Pivotal Condensation method to calculate the determinant of the matrix mat[][]. Below is the detailed explanation of the proposed method:
In this method of calculating the determinant of dimension N × N, square matrix:
First the matrix A[][] of dimension N*N is reduced to matrix B[][] of dimension (N – 1)*(N – 1) such that:
Then the determinant value of A[][] can be found out from matrix B[][] using the formula,
Now further reduce the matrix to (N – 2)*(N – 2) and calculate the determinant of matrix B[][].
And repeat the above process until the matrix becomes of dimension 2*2.
Then the determinant of the matrix of dimension 2×2 is calculated using formula det(A) = ad-bc for a matrix say A[][] as {{a, b}, {c, d}}.
Follow the steps below to solve the problem:
Initialize a variable, say D, to store the determinant of the matrix.
Iterate while N is greater than 2 and check for the following:Check if mat[0][0] is 0, then swap the current row with the next row such that mat[i][0] > 0 using the property of matrix.Otherwise, if no row is found such that mat[i][0] > 0, then print zero.Now, multiply D by pow(1 / mat[0][0], N – 2).Calculate the next matrix, say B[][], of dimension (N – 1) x (N – 1) using the formula b[i – 1][j – 1] = mat[0][0 * mat[i][i] – mat[0][j] * mat[i][0].Assign mat = B.
Check if mat[0][0] is 0, then swap the current row with the next row such that mat[i][0] > 0 using the property of matrix.
Otherwise, if no row is found such that mat[i][0] > 0, then print zero.
Now, multiply D by pow(1 / mat[0][0], N – 2).
Calculate the next matrix, say B[][], of dimension (N – 1) x (N – 1) using the formula b[i – 1][j – 1] = mat[0][0 * mat[i][i] – mat[0][j] * mat[i][0].
Assign mat = B.
Multiply D by the determinant of the matrix mat[][] of dimension 2×2, i.e mat[0][0] * mat[1][1] – mat[0][j] * mat[i][0].
Finally, print the value stored in D.
Below is the implementation of the above approach:
C++
Java
Python3
C#
Javascript
// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to swap valuesvoid swap(float& i, float& j){ float temp = i; i = j; j = temp;} // Function to find the determinant// of matrix M[][]float determinantOfMatrix( vector<vector<float> > mat, int N){ float mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) float M[N - 1][N - 1]; int next_index = 1; // Check if first element // of first row is zero while (mat[0][0] == 0) { if (mat[next_index][0] > 0) { // For swapping for (int k = 0; k < N; k++) { swap(mat[0][k], mat[next_index][k]); } // Update mul mul = mul * pow((-1), (next_index)); } else if (next_index == (N - 1)) return 0; next_index++; } // Store the first element // of the matrix float p = mat[0][0]; // Multiply the mul by // (1/p) to the power n-2 mul = mul * pow(1 / p, N - 2); // Calculate the next matrix // of dimension (N-1) x (N-1) for (int i = 1; i < N; i++) { for (int j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (int i = 0; i < (N - 1); i++) { for (int j = 0; j < (N - 1); j++) { mat[i][j] = M[i][j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul float D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]); // Print the determinant cout << D;} // Driver Codeint main(){ // Given matrix vector<vector<float> > mat = { { 1, 0, 2, -1 }, { 3, 0, 0, 5 }, { 2, 1, 4, -3 }, { 1, 0, 5, 0 } }; // Size of the matrix int N = mat.size(); // Function Call determinantOfMatrix(mat, N); return 0;}
// Java program for the above approachimport java.util.*;class GFG{ // Function to find the determinant// of matrix M[][]static void determinantOfMatrix(int[][] mat, int N){ int mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) int [][]M = new int[N - 1][N - 1]; int next_index = 1; // Check if first element // of first row is zero while (mat[0][0] == 0) { if (mat[next_index][0] > 0) { // For swapping for (int k = 0; k < N; k++) { int temp = mat[0][k]; mat[0][k] = mat[next_index][k]; mat[next_index][k] = temp; } // Update mul mul = (int) (mul * Math.pow((-1), (next_index))); } else if (next_index == (N - 1)) return; next_index++; } // Store the first element // of the matrix int p = mat[0][0]; // Multiply the mul by // (1/p) to the power n-2 mul = (int) (mul * Math.pow(1 / p, N - 2)); // Calculate the next matrix // of dimension (N-1) x (N-1) for (int i = 1; i < N; i++) { for (int j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (int i = 0; i < (N - 1); i++) { for (int j = 0; j < (N - 1); j++) { mat[i][j] = M[i][j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul int D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]); // Print the determinant System.out.print(D);} // Driver Codepublic static void main(String[] args){ // Given matrix int[][] mat = { { 1, 0, 2, -1 }, { 3, 0, 0, 5 }, { 2, 1, 4, -3 }, { 1, 0, 5, 0 } }; // Size of the matrix int N = mat.length; // Function Call determinantOfMatrix(mat, N);}} // This code is contributed by 29AjayKumar
# Pyhton 3 program for the above approach # Function to find the determinant# of matrix M[][]def determinantOfMatrix(mat, N): mul = 1 # Iterate over N while N > 2 while (N > 2): # Store the reduced matrix # of dimension (N-1)x(N-1) M = [[0 for i in range(N-1)] for j in range(N-1)] next_index = 1 # Check if first element # of first row is zero while (mat[0][0] == 0): if (mat[next_index][0] > 0): # For swapping for k in range(N): temp = mat[0][k] mat[0][k] = mat[next_index][k] mat[next_index][k] = temp # Update mul mul = mul * pow((-1),(next_index)) elif (next_index == (N - 1)): return 0; next_index += 1 # Store the first element # of the matrix p = mat[0][0] # Multiply the mul by # (1/p) to the power n-2 mul = mul * pow(1 / p, N - 2) # Calculate the next matrix # of dimension (N-1) x (N-1) for i in range(1,N): for j in range(1,N,1): # Calculate each element of # the matrix from previous # matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j] # Copy elements of the matrix # M into mat to use it in # next iteration for i in range(N - 1): for j in range(N - 1): mat[i][j] = M[i][j] # Decrement N by one N -= 1 # Calculate the determinant # of reduced 2x2 matrix and # multiply it with factor mul D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]) # Print the determinant print(int(D)) # Driver Codeif __name__ == '__main__': # Given matrix mat = [[1, 0, 2, -1],[3, 0, 0, 5], [2, 1, 4, -3], [1, 0, 5, 0]] # Size of the matrix N = len(mat) # Function Call determinantOfMatrix(mat, N) # This code is contributed by bgangwar59.
// C# program for the above approachusing System; public class GFG{ // Function to find the determinant// of matrix [,]Mstatic void determinantOfMatrix(int[,] mat, int N){ int mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) int [,]M = new int[N - 1,N - 1]; int next_index = 1; // Check if first element // of first row is zero while (mat[0,0] == 0) { if (mat[next_index,0] > 0) { // For swapping for (int k = 0; k < N; k++) { int temp = mat[0,k]; mat[0,k] = mat[next_index,k]; mat[next_index,k] = temp; } // Update mul mul = (int) (mul * Math.Pow((-1), (next_index))); } else if (next_index == (N - 1)) return; next_index++; } // Store the first element // of the matrix int p = mat[0,0]; // Multiply the mul by // (1/p) to the power n-2 mul = (int) (mul * Math.Pow(1 / p, N - 2)); // Calculate the next matrix // of dimension (N-1) x (N-1) for (int i = 1; i < N; i++) { for (int j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1,j - 1] = mat[0,0] * mat[i,j] - mat[i,0] * mat[0,j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (int i = 0; i < (N - 1); i++) { for (int j = 0; j < (N - 1); j++) { mat[i,j] = M[i,j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul int D = mul * (mat[0,0] * mat[1,1] - mat[0,1] * mat[1,0]); // Print the determinant Console.Write(D);} // Driver Codepublic static void Main(String[] args){ // Given matrix int[,] mat = { { 1, 0, 2, -1 }, { 3, 0, 0, 5 }, { 2, 1, 4, -3 }, { 1, 0, 5, 0 } }; // Size of the matrix int N = mat.GetLength(0); // Function Call determinantOfMatrix(mat, N);}} // This code is contributed by Rajput-Ji
<script> // Javascript program for the above approach // Function to find the determinant // of matrix M[][] function determinantOfMatrix(mat, N) { let mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) let M = new Array(N - 1); for(let i = 0; i < N - 1; i++) { M[i] = new Array(N - 1); for(let j = 0; j < N - 1; j++) { M[i][j] = 0; } } let next_index = 1; // Check if first element // of first row is zero while (mat[0][0] == 0) { if (mat[next_index][0] > 0) { // For swapping for (let k = 0; k < N; k++) { let temp = mat[0][k]; mat[0][k] = mat[next_index][k]; mat[next_index][k] = temp; } // Update mul mul = (mul * Math.pow((-1), (next_index))); } else if (next_index == (N - 1)) return; next_index++; } // Store the first element // of the matrix let p = mat[0][0]; // Multiply the mul by // (1/p) to the power n-2 mul = (mul * Math.pow(parseInt(1 / p, 10), N - 2)); // Calculate the next matrix // of dimension (N-1) x (N-1) for (let i = 1; i < N; i++) { for (let j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (let i = 0; i < (N - 1); i++) { for (let j = 0; j < (N - 1); j++) { mat[i][j] = M[i][j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul let D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]); // Print the determinant document.write(D); } // Given matrix let mat = [ [ 1, 0, 2, -1 ], [ 3, 0, 0, 5 ], [ 2, 1, 4, -3 ], [ 1, 0, 5, 0 ] ]; // Size of the matrix let N = mat.length; // Function Call determinantOfMatrix(mat, N); // This code is contributed by decode2207.</script>
30
Time Complexity: O(N3)Auxiliary Space: O(N2)
bgangwar59
29AjayKumar
Rajput-Ji
decode2207
rajeev0719singh
Technical Scripter 2020
Mathematical
Matrix
Technical Scripter
Mathematical
Matrix
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Modular multiplicative inverse
Algorithm to solve Rubik's Cube
Count ways to reach the n'th stair
Program to multiply two matrices
Convex Hull | Set 1 (Jarvis's Algorithm or Wrapping)
Matrix Chain Multiplication | DP-8
Program to find largest element in an array
Sudoku | Backtracking-7
Divide and Conquer | Set 5 (Strassen's Matrix Multiplication)
Rat in a Maze | Backtracking-2 | [
{
"code": null,
"e": 24301,
"s": 24273,
"text": "\n17 Aug, 2021"
},
{
"code": null,
"e": 24434,
"s": 24301,
"text": "Given a square matrix mat[][] of dimension N, the task is to find the determinant of the matrix using the pivot condensation method."
},
{
"code": null,
"e": 24444,
"s": 24434,
"text": "Examples:"
},
{
"code": null,
"e": 24827,
"s": 24444,
"text": "Input: mat[][] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}Output: 0Explanation:Performing R3 = R3 – R2 modifies the matrix mat[][] to {{1, 2, 3}, {4, 5, 6}, {1, 1, 1}}.Performing R2 = R2 – R1 modifies the matrix mat[][] to {{1, 2, 3}, {1, 1, 1}, {1, 1, 1}}.Now, the rows R2 and R3 are equal.Therefore, the determinant will of the matrix becomes equal to zero (using the property of matrix)."
},
{
"code": null,
"e": 24913,
"s": 24827,
"text": "Input: mat[][] = {{1, 0, 2, -1}, {3, 0, 0, 5}, {2, 1, 4, -3}, {1, 0, 5, 0}}Output: 30"
},
{
"code": null,
"e": 25084,
"s": 24913,
"text": "Approach: The idea is to use the Pivotal Condensation method to calculate the determinant of the matrix mat[][]. Below is the detailed explanation of the proposed method:"
},
{
"code": null,
"e": 25165,
"s": 25084,
"text": "In this method of calculating the determinant of dimension N × N, square matrix:"
},
{
"code": null,
"e": 25272,
"s": 25165,
"text": "First the matrix A[][] of dimension N*N is reduced to matrix B[][] of dimension (N – 1)*(N – 1) such that:"
},
{
"code": null,
"e": 25362,
"s": 25272,
"text": "Then the determinant value of A[][] can be found out from matrix B[][] using the formula,"
},
{
"code": null,
"e": 25458,
"s": 25362,
"text": "Now further reduce the matrix to (N – 2)*(N – 2) and calculate the determinant of matrix B[][]."
},
{
"code": null,
"e": 25530,
"s": 25458,
"text": "And repeat the above process until the matrix becomes of dimension 2*2."
},
{
"code": null,
"e": 25669,
"s": 25530,
"text": "Then the determinant of the matrix of dimension 2×2 is calculated using formula det(A) = ad-bc for a matrix say A[][] as {{a, b}, {c, d}}."
},
{
"code": null,
"e": 25714,
"s": 25669,
"text": "Follow the steps below to solve the problem:"
},
{
"code": null,
"e": 25784,
"s": 25714,
"text": "Initialize a variable, say D, to store the determinant of the matrix."
},
{
"code": null,
"e": 26250,
"s": 25784,
"text": "Iterate while N is greater than 2 and check for the following:Check if mat[0][0] is 0, then swap the current row with the next row such that mat[i][0] > 0 using the property of matrix.Otherwise, if no row is found such that mat[i][0] > 0, then print zero.Now, multiply D by pow(1 / mat[0][0], N – 2).Calculate the next matrix, say B[][], of dimension (N – 1) x (N – 1) using the formula b[i – 1][j – 1] = mat[0][0 * mat[i][i] – mat[0][j] * mat[i][0].Assign mat = B."
},
{
"code": null,
"e": 26373,
"s": 26250,
"text": "Check if mat[0][0] is 0, then swap the current row with the next row such that mat[i][0] > 0 using the property of matrix."
},
{
"code": null,
"e": 26445,
"s": 26373,
"text": "Otherwise, if no row is found such that mat[i][0] > 0, then print zero."
},
{
"code": null,
"e": 26491,
"s": 26445,
"text": "Now, multiply D by pow(1 / mat[0][0], N – 2)."
},
{
"code": null,
"e": 26642,
"s": 26491,
"text": "Calculate the next matrix, say B[][], of dimension (N – 1) x (N – 1) using the formula b[i – 1][j – 1] = mat[0][0 * mat[i][i] – mat[0][j] * mat[i][0]."
},
{
"code": null,
"e": 26658,
"s": 26642,
"text": "Assign mat = B."
},
{
"code": null,
"e": 26779,
"s": 26658,
"text": "Multiply D by the determinant of the matrix mat[][] of dimension 2×2, i.e mat[0][0] * mat[1][1] – mat[0][j] * mat[i][0]."
},
{
"code": null,
"e": 26817,
"s": 26779,
"text": "Finally, print the value stored in D."
},
{
"code": null,
"e": 26868,
"s": 26817,
"text": "Below is the implementation of the above approach:"
},
{
"code": null,
"e": 26872,
"s": 26868,
"text": "C++"
},
{
"code": null,
"e": 26877,
"s": 26872,
"text": "Java"
},
{
"code": null,
"e": 26885,
"s": 26877,
"text": "Python3"
},
{
"code": null,
"e": 26888,
"s": 26885,
"text": "C#"
},
{
"code": null,
"e": 26899,
"s": 26888,
"text": "Javascript"
},
{
"code": "// C++ program for the above approach#include <bits/stdc++.h>using namespace std; // Function to swap valuesvoid swap(float& i, float& j){ float temp = i; i = j; j = temp;} // Function to find the determinant// of matrix M[][]float determinantOfMatrix( vector<vector<float> > mat, int N){ float mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) float M[N - 1][N - 1]; int next_index = 1; // Check if first element // of first row is zero while (mat[0][0] == 0) { if (mat[next_index][0] > 0) { // For swapping for (int k = 0; k < N; k++) { swap(mat[0][k], mat[next_index][k]); } // Update mul mul = mul * pow((-1), (next_index)); } else if (next_index == (N - 1)) return 0; next_index++; } // Store the first element // of the matrix float p = mat[0][0]; // Multiply the mul by // (1/p) to the power n-2 mul = mul * pow(1 / p, N - 2); // Calculate the next matrix // of dimension (N-1) x (N-1) for (int i = 1; i < N; i++) { for (int j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (int i = 0; i < (N - 1); i++) { for (int j = 0; j < (N - 1); j++) { mat[i][j] = M[i][j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul float D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]); // Print the determinant cout << D;} // Driver Codeint main(){ // Given matrix vector<vector<float> > mat = { { 1, 0, 2, -1 }, { 3, 0, 0, 5 }, { 2, 1, 4, -3 }, { 1, 0, 5, 0 } }; // Size of the matrix int N = mat.size(); // Function Call determinantOfMatrix(mat, N); return 0;}",
"e": 29577,
"s": 26899,
"text": null
},
{
"code": "// Java program for the above approachimport java.util.*;class GFG{ // Function to find the determinant// of matrix M[][]static void determinantOfMatrix(int[][] mat, int N){ int mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) int [][]M = new int[N - 1][N - 1]; int next_index = 1; // Check if first element // of first row is zero while (mat[0][0] == 0) { if (mat[next_index][0] > 0) { // For swapping for (int k = 0; k < N; k++) { int temp = mat[0][k]; mat[0][k] = mat[next_index][k]; mat[next_index][k] = temp; } // Update mul mul = (int) (mul * Math.pow((-1), (next_index))); } else if (next_index == (N - 1)) return; next_index++; } // Store the first element // of the matrix int p = mat[0][0]; // Multiply the mul by // (1/p) to the power n-2 mul = (int) (mul * Math.pow(1 / p, N - 2)); // Calculate the next matrix // of dimension (N-1) x (N-1) for (int i = 1; i < N; i++) { for (int j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (int i = 0; i < (N - 1); i++) { for (int j = 0; j < (N - 1); j++) { mat[i][j] = M[i][j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul int D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]); // Print the determinant System.out.print(D);} // Driver Codepublic static void main(String[] args){ // Given matrix int[][] mat = { { 1, 0, 2, -1 }, { 3, 0, 0, 5 }, { 2, 1, 4, -3 }, { 1, 0, 5, 0 } }; // Size of the matrix int N = mat.length; // Function Call determinantOfMatrix(mat, N);}} // This code is contributed by 29AjayKumar",
"e": 32340,
"s": 29577,
"text": null
},
{
"code": "# Pyhton 3 program for the above approach # Function to find the determinant# of matrix M[][]def determinantOfMatrix(mat, N): mul = 1 # Iterate over N while N > 2 while (N > 2): # Store the reduced matrix # of dimension (N-1)x(N-1) M = [[0 for i in range(N-1)] for j in range(N-1)] next_index = 1 # Check if first element # of first row is zero while (mat[0][0] == 0): if (mat[next_index][0] > 0): # For swapping for k in range(N): temp = mat[0][k] mat[0][k] = mat[next_index][k] mat[next_index][k] = temp # Update mul mul = mul * pow((-1),(next_index)) elif (next_index == (N - 1)): return 0; next_index += 1 # Store the first element # of the matrix p = mat[0][0] # Multiply the mul by # (1/p) to the power n-2 mul = mul * pow(1 / p, N - 2) # Calculate the next matrix # of dimension (N-1) x (N-1) for i in range(1,N): for j in range(1,N,1): # Calculate each element of # the matrix from previous # matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j] # Copy elements of the matrix # M into mat to use it in # next iteration for i in range(N - 1): for j in range(N - 1): mat[i][j] = M[i][j] # Decrement N by one N -= 1 # Calculate the determinant # of reduced 2x2 matrix and # multiply it with factor mul D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]) # Print the determinant print(int(D)) # Driver Codeif __name__ == '__main__': # Given matrix mat = [[1, 0, 2, -1],[3, 0, 0, 5], [2, 1, 4, -3], [1, 0, 5, 0]] # Size of the matrix N = len(mat) # Function Call determinantOfMatrix(mat, N) # This code is contributed by bgangwar59.",
"e": 34383,
"s": 32340,
"text": null
},
{
"code": "// C# program for the above approachusing System; public class GFG{ // Function to find the determinant// of matrix [,]Mstatic void determinantOfMatrix(int[,] mat, int N){ int mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) int [,]M = new int[N - 1,N - 1]; int next_index = 1; // Check if first element // of first row is zero while (mat[0,0] == 0) { if (mat[next_index,0] > 0) { // For swapping for (int k = 0; k < N; k++) { int temp = mat[0,k]; mat[0,k] = mat[next_index,k]; mat[next_index,k] = temp; } // Update mul mul = (int) (mul * Math.Pow((-1), (next_index))); } else if (next_index == (N - 1)) return; next_index++; } // Store the first element // of the matrix int p = mat[0,0]; // Multiply the mul by // (1/p) to the power n-2 mul = (int) (mul * Math.Pow(1 / p, N - 2)); // Calculate the next matrix // of dimension (N-1) x (N-1) for (int i = 1; i < N; i++) { for (int j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1,j - 1] = mat[0,0] * mat[i,j] - mat[i,0] * mat[0,j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (int i = 0; i < (N - 1); i++) { for (int j = 0; j < (N - 1); j++) { mat[i,j] = M[i,j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul int D = mul * (mat[0,0] * mat[1,1] - mat[0,1] * mat[1,0]); // Print the determinant Console.Write(D);} // Driver Codepublic static void Main(String[] args){ // Given matrix int[,] mat = { { 1, 0, 2, -1 }, { 3, 0, 0, 5 }, { 2, 1, 4, -3 }, { 1, 0, 5, 0 } }; // Size of the matrix int N = mat.GetLength(0); // Function Call determinantOfMatrix(mat, N);}} // This code is contributed by Rajput-Ji",
"e": 37124,
"s": 34383,
"text": null
},
{
"code": "<script> // Javascript program for the above approach // Function to find the determinant // of matrix M[][] function determinantOfMatrix(mat, N) { let mul = 1; // Iterate over N while N > 2 while (N > 2) { // Store the reduced matrix // of dimension (N-1)x(N-1) let M = new Array(N - 1); for(let i = 0; i < N - 1; i++) { M[i] = new Array(N - 1); for(let j = 0; j < N - 1; j++) { M[i][j] = 0; } } let next_index = 1; // Check if first element // of first row is zero while (mat[0][0] == 0) { if (mat[next_index][0] > 0) { // For swapping for (let k = 0; k < N; k++) { let temp = mat[0][k]; mat[0][k] = mat[next_index][k]; mat[next_index][k] = temp; } // Update mul mul = (mul * Math.pow((-1), (next_index))); } else if (next_index == (N - 1)) return; next_index++; } // Store the first element // of the matrix let p = mat[0][0]; // Multiply the mul by // (1/p) to the power n-2 mul = (mul * Math.pow(parseInt(1 / p, 10), N - 2)); // Calculate the next matrix // of dimension (N-1) x (N-1) for (let i = 1; i < N; i++) { for (let j = 1; j < N; j++) { // Calculate each element of // the matrix from previous // matrix M[i - 1][j - 1] = mat[0][0] * mat[i][j] - mat[i][0] * mat[0][j]; } } // Copy elements of the matrix // M into mat to use it in // next iteration for (let i = 0; i < (N - 1); i++) { for (let j = 0; j < (N - 1); j++) { mat[i][j] = M[i][j]; } } // Decrement N by one N--; } // Calculate the determinant // of reduced 2x2 matrix and // multiply it with factor mul let D = mul * (mat[0][0] * mat[1][1] - mat[0][1] * mat[1][0]); // Print the determinant document.write(D); } // Given matrix let mat = [ [ 1, 0, 2, -1 ], [ 3, 0, 0, 5 ], [ 2, 1, 4, -3 ], [ 1, 0, 5, 0 ] ]; // Size of the matrix let N = mat.length; // Function Call determinantOfMatrix(mat, N); // This code is contributed by decode2207.</script>",
"e": 40285,
"s": 37124,
"text": null
},
{
"code": null,
"e": 40291,
"s": 40288,
"text": "30"
},
{
"code": null,
"e": 40339,
"s": 40293,
"text": "Time Complexity: O(N3)Auxiliary Space: O(N2) "
},
{
"code": null,
"e": 40350,
"s": 40339,
"text": "bgangwar59"
},
{
"code": null,
"e": 40362,
"s": 40350,
"text": "29AjayKumar"
},
{
"code": null,
"e": 40372,
"s": 40362,
"text": "Rajput-Ji"
},
{
"code": null,
"e": 40383,
"s": 40372,
"text": "decode2207"
},
{
"code": null,
"e": 40399,
"s": 40383,
"text": "rajeev0719singh"
},
{
"code": null,
"e": 40423,
"s": 40399,
"text": "Technical Scripter 2020"
},
{
"code": null,
"e": 40436,
"s": 40423,
"text": "Mathematical"
},
{
"code": null,
"e": 40443,
"s": 40436,
"text": "Matrix"
},
{
"code": null,
"e": 40462,
"s": 40443,
"text": "Technical Scripter"
},
{
"code": null,
"e": 40475,
"s": 40462,
"text": "Mathematical"
},
{
"code": null,
"e": 40482,
"s": 40475,
"text": "Matrix"
},
{
"code": null,
"e": 40580,
"s": 40482,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 40589,
"s": 40580,
"text": "Comments"
},
{
"code": null,
"e": 40602,
"s": 40589,
"text": "Old Comments"
},
{
"code": null,
"e": 40633,
"s": 40602,
"text": "Modular multiplicative inverse"
},
{
"code": null,
"e": 40665,
"s": 40633,
"text": "Algorithm to solve Rubik's Cube"
},
{
"code": null,
"e": 40700,
"s": 40665,
"text": "Count ways to reach the n'th stair"
},
{
"code": null,
"e": 40733,
"s": 40700,
"text": "Program to multiply two matrices"
},
{
"code": null,
"e": 40786,
"s": 40733,
"text": "Convex Hull | Set 1 (Jarvis's Algorithm or Wrapping)"
},
{
"code": null,
"e": 40821,
"s": 40786,
"text": "Matrix Chain Multiplication | DP-8"
},
{
"code": null,
"e": 40865,
"s": 40821,
"text": "Program to find largest element in an array"
},
{
"code": null,
"e": 40889,
"s": 40865,
"text": "Sudoku | Backtracking-7"
},
{
"code": null,
"e": 40951,
"s": 40889,
"text": "Divide and Conquer | Set 5 (Strassen's Matrix Multiplication)"
}
]
|
File Upload in ElectronJS - GeeksforGeeks | 15 May, 2020
ElectronJS is an Open Source Framework used for building Cross-Platform native desktop applications using web technologies such as HTML, CSS, and JavaScript which are capable of running on Windows, macOS, and Linux operating systems. It combines the Chromium engine and NodeJS into a Single Runtime.
Any native desktop application should integrate itself with the System OS environment. The application should have the ability to interact with core OS functionalities such as the File System, System Tray, etc. Electron provides us with built-in dialog module to display the native System dialogs for interacting with files. This tutorial will use the instance method of the dialog module to demonstrate File Upload functionality in Electron.
We assume you are familiar with the prerequisites as covered in the above-mentioned link. For Electron to work, node and npm need to be pre-installed in the system.
Dialog Module: The dialog Module is part of the Main Process. To import and use the dialog Module in the Renderer Process, we will be using Electron remote module.
Project Structure:
Example: We will start by building the Electron Application for File Upload functionality by following the given steps.
Step 1: Navigate to an Empty Directory to setup the project, and run the following command,npm initTo generate the package.json file. Install Electron using npm.npm install electron --saveThis will install the required node_modules dependencies. Install axios package using npm.npm install axios --saveThis package is a Promise based HTTP client for NodeJS. This package is used to make HTTP calls to REST APIs. For more detailed information on axios, Refer this link. Create the sample.txt file in the assets folder for demo purposes.package.json:{
"name": "electron-fileupload",
"version": "1.0.0",
"description": "File Upload in Electron",
"main": "main.js",
"scripts": {
"start": "electron ."
},
"keywords": [
"electron"
],
"author": "Radhesh Khanna",
"license": "ISC",
"dependencies": {
"axios": "^0.19.2",
"electron": "^8.2.5"
}
}
npm init
To generate the package.json file. Install Electron using npm.
npm install electron --save
This will install the required node_modules dependencies. Install axios package using npm.
npm install axios --save
This package is a Promise based HTTP client for NodeJS. This package is used to make HTTP calls to REST APIs. For more detailed information on axios, Refer this link. Create the sample.txt file in the assets folder for demo purposes.
package.json:
{
"name": "electron-fileupload",
"version": "1.0.0",
"description": "File Upload in Electron",
"main": "main.js",
"scripts": {
"start": "electron ."
},
"keywords": [
"electron"
],
"author": "Radhesh Khanna",
"license": "ISC",
"dependencies": {
"axios": "^0.19.2",
"electron": "^8.2.5"
}
}
Step 2: This is the main.js file. For the Boilerplate code of the main.js file, Refer this link. We have modified the code to suit our project needs.main.js:const { app, BrowserWindow } = require('electron') function createWindow () { // Create the browser window. const win = new BrowserWindow({ width: 800, height: 600, webPreferences: { nodeIntegration: true } }) // Load the index.html of the app. win.loadFile('src/index.html') // Open the DevTools. win.webContents.openDevTools()} // This method will be called when Electron has finished// initialization and is ready to create browser windows.// Some APIs can only be used after this event occurs.// This method is equivalent to 'app.on('ready', function())'app.whenReady().then(createWindow) // Quit when all windows are closed.app.on('window-all-closed', () => { // On macOS it is common for applications and their menu bar // To stay active until the user quits explicitly with Cmd + Q if (process.platform !== 'darwin') { app.quit() }}) app.on('activate', () => { // On macOS it's common to re-create a window in the // app when the dock icon is clicked and there are no // other windows open. if (BrowserWindow.getAllWindows().length === 0) { createWindow() }}) // In this file, you can include the rest of your // app's specific main process code. You can also // put them in separate files and require them here.
main.js:
const { app, BrowserWindow } = require('electron') function createWindow () { // Create the browser window. const win = new BrowserWindow({ width: 800, height: 600, webPreferences: { nodeIntegration: true } }) // Load the index.html of the app. win.loadFile('src/index.html') // Open the DevTools. win.webContents.openDevTools()} // This method will be called when Electron has finished// initialization and is ready to create browser windows.// Some APIs can only be used after this event occurs.// This method is equivalent to 'app.on('ready', function())'app.whenReady().then(createWindow) // Quit when all windows are closed.app.on('window-all-closed', () => { // On macOS it is common for applications and their menu bar // To stay active until the user quits explicitly with Cmd + Q if (process.platform !== 'darwin') { app.quit() }}) app.on('activate', () => { // On macOS it's common to re-create a window in the // app when the dock icon is clicked and there are no // other windows open. if (BrowserWindow.getAllWindows().length === 0) { createWindow() }}) // In this file, you can include the rest of your // app's specific main process code. You can also // put them in separate files and require them here.
Step 3: Create the index.html file and index.js file within the src directory. We will also copy the boilerplate code for the index.html file from the above-mentioned link. We have modified the code to suit our project needs.index.html:<!DOCTYPE html><html> <head> <meta charset="UTF-8"> <title>Hello World!</title> <!-- https://electronjs.org/docs/tutorial /security#csp-meta-tag --> <meta http-equiv="Content-Security-Policy" content="script-src 'self' 'unsafe-inline';" /> </head> <body> <h1>Hello World!</h1> We are using node <script> document.write(process.versions.node) </script>, Chrome <script> document.write(process.versions.chrome) </script>, and Electron <script> document.write(process.versions.electron) </script>. <h3>File Upload in Electron</h3> <button id="upload">Upload File</button> <!-- Adding Individual Renderer Process JS File --> <script src="index.js"></script> </body></html>
index.html:
<!DOCTYPE html><html> <head> <meta charset="UTF-8"> <title>Hello World!</title> <!-- https://electronjs.org/docs/tutorial /security#csp-meta-tag --> <meta http-equiv="Content-Security-Policy" content="script-src 'self' 'unsafe-inline';" /> </head> <body> <h1>Hello World!</h1> We are using node <script> document.write(process.versions.node) </script>, Chrome <script> document.write(process.versions.chrome) </script>, and Electron <script> document.write(process.versions.electron) </script>. <h3>File Upload in Electron</h3> <button id="upload">Upload File</button> <!-- Adding Individual Renderer Process JS File --> <script src="index.js"></script> </body></html>
Output: At this point, our application is set up and we can launch the application to check the GUI Output. To launch the Electron Application, run the Command:npm start
npm start
Step 4: The Upload File button does not have any functionality associated with it yet. The dialog.showOpenDialog(browserWindow, options) takes in the following parameters.browserWindow: BrowserWindow (Optional) The BrowserWindow Instance. This argument allows the dialog to attach itself to the parent window, making it a modal. A modal window is a child window that disables the parent window. If BrowserWindow is not shown dialog will not be attached to it. In such case It will be displayed as independent window. In the above code, the BrowserWindow instance is not being passed to the dialog, therefore the dialog opens as an independent window on clicking the Upload File button.options: Object It takes in the following parameters,title: String (Optional) The title to be displayed on the dialog window.defaultPath: String (Optional) The directory to be opened as defined by the default path on clicking the Upload File button.buttonLabel: String (Optional) Custom label for the confirmation Button. If empty, the default label will be used. In the above code it is defined as Upload.message: String (Optional) This parameter is supported in macOS only. This is used to display the custom message above input boxes.securityScopedBookmarks: Boolean (Optional) This parameter is supported in macOS only. This parameter is used to create security scoped bookmarks when packaged for the Mac App Store. For more detailed Information, Refer this link.filters: FileFilter[{}] (Optional) It is an Array of Objects. It defines an array of file types that can be displayed or selected when we want to limit the user to a specific type. We can define multiple file types object belonging to different categories. The FileFilter object takes in the following parameters,name: String The name of the category of extensions.extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link.In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’].properties: String[] (Optional) Contains a list of features which are available for the native dialog. It take take in the following values,openFile: Allows the Files to be selected.openDirectory: Allows directory/folders to be selected.multiSelections: Allows multiple files to be selected in the dialog.showHiddenFiles: Show Hidden files in dialog.createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it.promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application.noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path.treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file.dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list.Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform.The dialog.showOpenDialog(browserWindow, options) returns a Promise. It resolves to an Object containing the following parameters,canceled: Boolean Whether or not the dialog operation was cancelled.filePaths: String[] An Array of filepaths chosen by the user. If the dialog operation is cancelled, it is going to be an empty Array. In case the multiSelections value is not provided in properties, the filePaths array will return a single element.bookmarks: String[] (Optional) This String array is supported in macOS only. This is returned when the securityScopedBookmarks parameter is specified as true in the options Object.index.js: Add the following snippet in that file.const electron = require('electron');const path = require('path'); // Importing dialog module using remoteconst dialog = electron.remote.dialog; var uploadFile = document.getElementById('upload'); // Defining a Global file path Variable to store // user-selected fileglobal.filepath = undefined; uploadFile.addEventListener('click', () => {// If the platform is 'win32' or 'Linux' if (process.platform !== 'darwin') { // Resolves to a Promise<Object> dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', // Restricting the user to only Text Files. filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector Property properties: ['openFile'] }).then(file => { // Stating whether dialog operation was // cancelled or not. console.log(file.canceled); if (!file.canceled) { // Updating the GLOBAL filepath variable // to user-selected file. global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); } else { // If the platform is 'darwin' (macOS) dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector and Directory // Selector Property In macOS properties: ['openFile', 'openDirectory'] }).then(file => { console.log(file.canceled); if (!file.canceled) { global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); }});Output:
browserWindow: BrowserWindow (Optional) The BrowserWindow Instance. This argument allows the dialog to attach itself to the parent window, making it a modal. A modal window is a child window that disables the parent window. If BrowserWindow is not shown dialog will not be attached to it. In such case It will be displayed as independent window. In the above code, the BrowserWindow instance is not being passed to the dialog, therefore the dialog opens as an independent window on clicking the Upload File button.
options: Object It takes in the following parameters,title: String (Optional) The title to be displayed on the dialog window.defaultPath: String (Optional) The directory to be opened as defined by the default path on clicking the Upload File button.buttonLabel: String (Optional) Custom label for the confirmation Button. If empty, the default label will be used. In the above code it is defined as Upload.message: String (Optional) This parameter is supported in macOS only. This is used to display the custom message above input boxes.securityScopedBookmarks: Boolean (Optional) This parameter is supported in macOS only. This parameter is used to create security scoped bookmarks when packaged for the Mac App Store. For more detailed Information, Refer this link.filters: FileFilter[{}] (Optional) It is an Array of Objects. It defines an array of file types that can be displayed or selected when we want to limit the user to a specific type. We can define multiple file types object belonging to different categories. The FileFilter object takes in the following parameters,name: String The name of the category of extensions.extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link.In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’].properties: String[] (Optional) Contains a list of features which are available for the native dialog. It take take in the following values,openFile: Allows the Files to be selected.openDirectory: Allows directory/folders to be selected.multiSelections: Allows multiple files to be selected in the dialog.showHiddenFiles: Show Hidden files in dialog.createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it.promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application.noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path.treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file.dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list.Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform.
title: String (Optional) The title to be displayed on the dialog window.
defaultPath: String (Optional) The directory to be opened as defined by the default path on clicking the Upload File button.
buttonLabel: String (Optional) Custom label for the confirmation Button. If empty, the default label will be used. In the above code it is defined as Upload.
message: String (Optional) This parameter is supported in macOS only. This is used to display the custom message above input boxes.
securityScopedBookmarks: Boolean (Optional) This parameter is supported in macOS only. This parameter is used to create security scoped bookmarks when packaged for the Mac App Store. For more detailed Information, Refer this link.
filters: FileFilter[{}] (Optional) It is an Array of Objects. It defines an array of file types that can be displayed or selected when we want to limit the user to a specific type. We can define multiple file types object belonging to different categories. The FileFilter object takes in the following parameters,name: String The name of the category of extensions.extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link.In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’].
name: String The name of the category of extensions.
extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link.
In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’].
properties: String[] (Optional) Contains a list of features which are available for the native dialog. It take take in the following values,openFile: Allows the Files to be selected.openDirectory: Allows directory/folders to be selected.multiSelections: Allows multiple files to be selected in the dialog.showHiddenFiles: Show Hidden files in dialog.createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it.promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application.noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path.treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file.dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list.Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform.
openFile: Allows the Files to be selected.
openDirectory: Allows directory/folders to be selected.
multiSelections: Allows multiple files to be selected in the dialog.
showHiddenFiles: Show Hidden files in dialog.
createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it.
promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application.
noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path.
treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file.
dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list.
Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform.
The dialog.showOpenDialog(browserWindow, options) returns a Promise. It resolves to an Object containing the following parameters,
canceled: Boolean Whether or not the dialog operation was cancelled.
filePaths: String[] An Array of filepaths chosen by the user. If the dialog operation is cancelled, it is going to be an empty Array. In case the multiSelections value is not provided in properties, the filePaths array will return a single element.
bookmarks: String[] (Optional) This String array is supported in macOS only. This is returned when the securityScopedBookmarks parameter is specified as true in the options Object.index.js: Add the following snippet in that file.const electron = require('electron');const path = require('path'); // Importing dialog module using remoteconst dialog = electron.remote.dialog; var uploadFile = document.getElementById('upload'); // Defining a Global file path Variable to store // user-selected fileglobal.filepath = undefined; uploadFile.addEventListener('click', () => {// If the platform is 'win32' or 'Linux' if (process.platform !== 'darwin') { // Resolves to a Promise<Object> dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', // Restricting the user to only Text Files. filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector Property properties: ['openFile'] }).then(file => { // Stating whether dialog operation was // cancelled or not. console.log(file.canceled); if (!file.canceled) { // Updating the GLOBAL filepath variable // to user-selected file. global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); } else { // If the platform is 'darwin' (macOS) dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector and Directory // Selector Property In macOS properties: ['openFile', 'openDirectory'] }).then(file => { console.log(file.canceled); if (!file.canceled) { global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); }});
index.js: Add the following snippet in that file.
const electron = require('electron');const path = require('path'); // Importing dialog module using remoteconst dialog = electron.remote.dialog; var uploadFile = document.getElementById('upload'); // Defining a Global file path Variable to store // user-selected fileglobal.filepath = undefined; uploadFile.addEventListener('click', () => {// If the platform is 'win32' or 'Linux' if (process.platform !== 'darwin') { // Resolves to a Promise<Object> dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', // Restricting the user to only Text Files. filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector Property properties: ['openFile'] }).then(file => { // Stating whether dialog operation was // cancelled or not. console.log(file.canceled); if (!file.canceled) { // Updating the GLOBAL filepath variable // to user-selected file. global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); } else { // If the platform is 'darwin' (macOS) dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector and Directory // Selector Property In macOS properties: ['openFile', 'openDirectory'] }).then(file => { console.log(file.canceled); if (!file.canceled) { global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); }});
Output:
Step 5: Once we have obtained the file path from the dialog window, we can follow any of the two approaches:Approach 1: Upload the file to a server by making an HTTP POST REST API call and let the server handle the file processing. We will use the axios package that was installed earlier to achieve this.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseindex.js:const fs = require('fs');const axios = require('axios'); if (global.filepath && !file.canceled) { var formData = new FormData(); formData.append('file', fs.createReadStream(global.filepath)); axios.post('[Custom URL]', formData, { headers: { 'Content-Type': 'multipart/form-data' } }); } // ...Explanation: We have appended the file to a formData object using the fs module, and set the Content-Type header to multipart/form-data for the POST Request. Replace the [Custom URL] with a URL of the REST API to receive the file on the server side. Once we have received the file on the server side, we can let the server handle the file processing and display the respective response.Approach 2: Process the file on the system itself by reading/manipulating the contents of the file. We can use the fs module to read the contents of the file, and then perform further manipulations as required.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseconst fs = require('fs');const fs = require('fs'); if (global.filepath && !file.canceled) { fs.readFile(global.filepath, {encoding: 'utf-8'}, function(err,data) { if (!err) { console.log('received data: ' + data); } else { console.log(err); } }); } //...Explanation: We will read the file from the GLOBAL file path variable that was updated from the dialog window with standard UTF-8 encoding. Once we have successfully fetched the contents of the file, we can update/manipulate the contents as per the desired functionality.sample.txt:Output:Video Playerhttps://media.geeksforgeeks.org/wp-content/uploads/20200507193706/Output-22.mp400:0000:0000:18Use Up/Down Arrow keys to increase or decrease volume.
Approach 1: Upload the file to a server by making an HTTP POST REST API call and let the server handle the file processing. We will use the axios package that was installed earlier to achieve this.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseindex.js:const fs = require('fs');const axios = require('axios'); if (global.filepath && !file.canceled) { var formData = new FormData(); formData.append('file', fs.createReadStream(global.filepath)); axios.post('[Custom URL]', formData, { headers: { 'Content-Type': 'multipart/form-data' } }); } // ...Explanation: We have appended the file to a formData object using the fs module, and set the Content-Type header to multipart/form-data for the POST Request. Replace the [Custom URL] with a URL of the REST API to receive the file on the server side. Once we have received the file on the server side, we can let the server handle the file processing and display the respective response.
const fs = require('fs');const axios = require('axios'); if (global.filepath && !file.canceled) { var formData = new FormData(); formData.append('file', fs.createReadStream(global.filepath)); axios.post('[Custom URL]', formData, { headers: { 'Content-Type': 'multipart/form-data' } }); } // ...
Explanation: We have appended the file to a formData object using the fs module, and set the Content-Type header to multipart/form-data for the POST Request. Replace the [Custom URL] with a URL of the REST API to receive the file on the server side. Once we have received the file on the server side, we can let the server handle the file processing and display the respective response.
Approach 2: Process the file on the system itself by reading/manipulating the contents of the file. We can use the fs module to read the contents of the file, and then perform further manipulations as required.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseconst fs = require('fs');const fs = require('fs'); if (global.filepath && !file.canceled) { fs.readFile(global.filepath, {encoding: 'utf-8'}, function(err,data) { if (!err) { console.log('received data: ' + data); } else { console.log(err); } }); } //...Explanation: We will read the file from the GLOBAL file path variable that was updated from the dialog window with standard UTF-8 encoding. Once we have successfully fetched the contents of the file, we can update/manipulate the contents as per the desired functionality.sample.txt:Output:Video Playerhttps://media.geeksforgeeks.org/wp-content/uploads/20200507193706/Output-22.mp400:0000:0000:18Use Up/Down Arrow keys to increase or decrease volume.
const fs = require('fs');const fs = require('fs'); if (global.filepath && !file.canceled) { fs.readFile(global.filepath, {encoding: 'utf-8'}, function(err,data) { if (!err) { console.log('received data: ' + data); } else { console.log(err); } }); } //...
Explanation: We will read the file from the GLOBAL file path variable that was updated from the dialog window with standard UTF-8 encoding. Once we have successfully fetched the contents of the file, we can update/manipulate the contents as per the desired functionality.
sample.txt:
Output:
ElectronJS
JavaScript
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
Convert a string to an integer in JavaScript
Set the value of an input field in JavaScript
Differences between Functional Components and Class Components in React
How to Open URL in New Tab using JavaScript ?
Form validation using HTML and JavaScript
Express.js express.Router() Function
Installation of Node.js on Linux
Convert a string to an integer in JavaScript
How to set the default value for an HTML <select> element ?
Top 10 Angular Libraries For Web Developers | [
{
"code": null,
"e": 23931,
"s": 23903,
"text": "\n15 May, 2020"
},
{
"code": null,
"e": 24231,
"s": 23931,
"text": "ElectronJS is an Open Source Framework used for building Cross-Platform native desktop applications using web technologies such as HTML, CSS, and JavaScript which are capable of running on Windows, macOS, and Linux operating systems. It combines the Chromium engine and NodeJS into a Single Runtime."
},
{
"code": null,
"e": 24674,
"s": 24231,
"text": "Any native desktop application should integrate itself with the System OS environment. The application should have the ability to interact with core OS functionalities such as the File System, System Tray, etc. Electron provides us with built-in dialog module to display the native System dialogs for interacting with files. This tutorial will use the instance method of the dialog module to demonstrate File Upload functionality in Electron."
},
{
"code": null,
"e": 24839,
"s": 24674,
"text": "We assume you are familiar with the prerequisites as covered in the above-mentioned link. For Electron to work, node and npm need to be pre-installed in the system."
},
{
"code": null,
"e": 25003,
"s": 24839,
"text": "Dialog Module: The dialog Module is part of the Main Process. To import and use the dialog Module in the Renderer Process, we will be using Electron remote module."
},
{
"code": null,
"e": 25022,
"s": 25003,
"text": "Project Structure:"
},
{
"code": null,
"e": 25142,
"s": 25022,
"text": "Example: We will start by building the Electron Application for File Upload functionality by following the given steps."
},
{
"code": null,
"e": 26020,
"s": 25142,
"text": "Step 1: Navigate to an Empty Directory to setup the project, and run the following command,npm initTo generate the package.json file. Install Electron using npm.npm install electron --saveThis will install the required node_modules dependencies. Install axios package using npm.npm install axios --saveThis package is a Promise based HTTP client for NodeJS. This package is used to make HTTP calls to REST APIs. For more detailed information on axios, Refer this link. Create the sample.txt file in the assets folder for demo purposes.package.json:{\n \"name\": \"electron-fileupload\",\n \"version\": \"1.0.0\",\n \"description\": \"File Upload in Electron\",\n \"main\": \"main.js\",\n \"scripts\": {\n \"start\": \"electron .\"\n },\n \"keywords\": [\n \"electron\"\n ],\n \"author\": \"Radhesh Khanna\",\n \"license\": \"ISC\",\n \"dependencies\": {\n \"axios\": \"^0.19.2\",\n \"electron\": \"^8.2.5\"\n }\n}\n"
},
{
"code": null,
"e": 26029,
"s": 26020,
"text": "npm init"
},
{
"code": null,
"e": 26092,
"s": 26029,
"text": "To generate the package.json file. Install Electron using npm."
},
{
"code": null,
"e": 26120,
"s": 26092,
"text": "npm install electron --save"
},
{
"code": null,
"e": 26211,
"s": 26120,
"text": "This will install the required node_modules dependencies. Install axios package using npm."
},
{
"code": null,
"e": 26236,
"s": 26211,
"text": "npm install axios --save"
},
{
"code": null,
"e": 26470,
"s": 26236,
"text": "This package is a Promise based HTTP client for NodeJS. This package is used to make HTTP calls to REST APIs. For more detailed information on axios, Refer this link. Create the sample.txt file in the assets folder for demo purposes."
},
{
"code": null,
"e": 26484,
"s": 26470,
"text": "package.json:"
},
{
"code": null,
"e": 26814,
"s": 26484,
"text": "{\n \"name\": \"electron-fileupload\",\n \"version\": \"1.0.0\",\n \"description\": \"File Upload in Electron\",\n \"main\": \"main.js\",\n \"scripts\": {\n \"start\": \"electron .\"\n },\n \"keywords\": [\n \"electron\"\n ],\n \"author\": \"Radhesh Khanna\",\n \"license\": \"ISC\",\n \"dependencies\": {\n \"axios\": \"^0.19.2\",\n \"electron\": \"^8.2.5\"\n }\n}\n"
},
{
"code": null,
"e": 28242,
"s": 26814,
"text": "Step 2: This is the main.js file. For the Boilerplate code of the main.js file, Refer this link. We have modified the code to suit our project needs.main.js:const { app, BrowserWindow } = require('electron') function createWindow () { // Create the browser window. const win = new BrowserWindow({ width: 800, height: 600, webPreferences: { nodeIntegration: true } }) // Load the index.html of the app. win.loadFile('src/index.html') // Open the DevTools. win.webContents.openDevTools()} // This method will be called when Electron has finished// initialization and is ready to create browser windows.// Some APIs can only be used after this event occurs.// This method is equivalent to 'app.on('ready', function())'app.whenReady().then(createWindow) // Quit when all windows are closed.app.on('window-all-closed', () => { // On macOS it is common for applications and their menu bar // To stay active until the user quits explicitly with Cmd + Q if (process.platform !== 'darwin') { app.quit() }}) app.on('activate', () => { // On macOS it's common to re-create a window in the // app when the dock icon is clicked and there are no // other windows open. if (BrowserWindow.getAllWindows().length === 0) { createWindow() }}) // In this file, you can include the rest of your // app's specific main process code. You can also // put them in separate files and require them here."
},
{
"code": null,
"e": 28251,
"s": 28242,
"text": "main.js:"
},
{
"code": "const { app, BrowserWindow } = require('electron') function createWindow () { // Create the browser window. const win = new BrowserWindow({ width: 800, height: 600, webPreferences: { nodeIntegration: true } }) // Load the index.html of the app. win.loadFile('src/index.html') // Open the DevTools. win.webContents.openDevTools()} // This method will be called when Electron has finished// initialization and is ready to create browser windows.// Some APIs can only be used after this event occurs.// This method is equivalent to 'app.on('ready', function())'app.whenReady().then(createWindow) // Quit when all windows are closed.app.on('window-all-closed', () => { // On macOS it is common for applications and their menu bar // To stay active until the user quits explicitly with Cmd + Q if (process.platform !== 'darwin') { app.quit() }}) app.on('activate', () => { // On macOS it's common to re-create a window in the // app when the dock icon is clicked and there are no // other windows open. if (BrowserWindow.getAllWindows().length === 0) { createWindow() }}) // In this file, you can include the rest of your // app's specific main process code. You can also // put them in separate files and require them here.",
"e": 29522,
"s": 28251,
"text": null
},
{
"code": null,
"e": 30547,
"s": 29522,
"text": "Step 3: Create the index.html file and index.js file within the src directory. We will also copy the boilerplate code for the index.html file from the above-mentioned link. We have modified the code to suit our project needs.index.html:<!DOCTYPE html><html> <head> <meta charset=\"UTF-8\"> <title>Hello World!</title> <!-- https://electronjs.org/docs/tutorial /security#csp-meta-tag --> <meta http-equiv=\"Content-Security-Policy\" content=\"script-src 'self' 'unsafe-inline';\" /> </head> <body> <h1>Hello World!</h1> We are using node <script> document.write(process.versions.node) </script>, Chrome <script> document.write(process.versions.chrome) </script>, and Electron <script> document.write(process.versions.electron) </script>. <h3>File Upload in Electron</h3> <button id=\"upload\">Upload File</button> <!-- Adding Individual Renderer Process JS File --> <script src=\"index.js\"></script> </body></html>"
},
{
"code": null,
"e": 30559,
"s": 30547,
"text": "index.html:"
},
{
"code": "<!DOCTYPE html><html> <head> <meta charset=\"UTF-8\"> <title>Hello World!</title> <!-- https://electronjs.org/docs/tutorial /security#csp-meta-tag --> <meta http-equiv=\"Content-Security-Policy\" content=\"script-src 'self' 'unsafe-inline';\" /> </head> <body> <h1>Hello World!</h1> We are using node <script> document.write(process.versions.node) </script>, Chrome <script> document.write(process.versions.chrome) </script>, and Electron <script> document.write(process.versions.electron) </script>. <h3>File Upload in Electron</h3> <button id=\"upload\">Upload File</button> <!-- Adding Individual Renderer Process JS File --> <script src=\"index.js\"></script> </body></html>",
"e": 31348,
"s": 30559,
"text": null
},
{
"code": null,
"e": 31518,
"s": 31348,
"text": "Output: At this point, our application is set up and we can launch the application to check the GUI Output. To launch the Electron Application, run the Command:npm start"
},
{
"code": null,
"e": 31528,
"s": 31518,
"text": "npm start"
},
{
"code": null,
"e": 38529,
"s": 31528,
"text": "Step 4: The Upload File button does not have any functionality associated with it yet. The dialog.showOpenDialog(browserWindow, options) takes in the following parameters.browserWindow: BrowserWindow (Optional) The BrowserWindow Instance. This argument allows the dialog to attach itself to the parent window, making it a modal. A modal window is a child window that disables the parent window. If BrowserWindow is not shown dialog will not be attached to it. In such case It will be displayed as independent window. In the above code, the BrowserWindow instance is not being passed to the dialog, therefore the dialog opens as an independent window on clicking the Upload File button.options: Object It takes in the following parameters,title: String (Optional) The title to be displayed on the dialog window.defaultPath: String (Optional) The directory to be opened as defined by the default path on clicking the Upload File button.buttonLabel: String (Optional) Custom label for the confirmation Button. If empty, the default label will be used. In the above code it is defined as Upload.message: String (Optional) This parameter is supported in macOS only. This is used to display the custom message above input boxes.securityScopedBookmarks: Boolean (Optional) This parameter is supported in macOS only. This parameter is used to create security scoped bookmarks when packaged for the Mac App Store. For more detailed Information, Refer this link.filters: FileFilter[{}] (Optional) It is an Array of Objects. It defines an array of file types that can be displayed or selected when we want to limit the user to a specific type. We can define multiple file types object belonging to different categories. The FileFilter object takes in the following parameters,name: String The name of the category of extensions.extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link.In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’].properties: String[] (Optional) Contains a list of features which are available for the native dialog. It take take in the following values,openFile: Allows the Files to be selected.openDirectory: Allows directory/folders to be selected.multiSelections: Allows multiple files to be selected in the dialog.showHiddenFiles: Show Hidden files in dialog.createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it.promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application.noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path.treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file.dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list.Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform.The dialog.showOpenDialog(browserWindow, options) returns a Promise. It resolves to an Object containing the following parameters,canceled: Boolean Whether or not the dialog operation was cancelled.filePaths: String[] An Array of filepaths chosen by the user. If the dialog operation is cancelled, it is going to be an empty Array. In case the multiSelections value is not provided in properties, the filePaths array will return a single element.bookmarks: String[] (Optional) This String array is supported in macOS only. This is returned when the securityScopedBookmarks parameter is specified as true in the options Object.index.js: Add the following snippet in that file.const electron = require('electron');const path = require('path'); // Importing dialog module using remoteconst dialog = electron.remote.dialog; var uploadFile = document.getElementById('upload'); // Defining a Global file path Variable to store // user-selected fileglobal.filepath = undefined; uploadFile.addEventListener('click', () => {// If the platform is 'win32' or 'Linux' if (process.platform !== 'darwin') { // Resolves to a Promise<Object> dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', // Restricting the user to only Text Files. filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector Property properties: ['openFile'] }).then(file => { // Stating whether dialog operation was // cancelled or not. console.log(file.canceled); if (!file.canceled) { // Updating the GLOBAL filepath variable // to user-selected file. global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); } else { // If the platform is 'darwin' (macOS) dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector and Directory // Selector Property In macOS properties: ['openFile', 'openDirectory'] }).then(file => { console.log(file.canceled); if (!file.canceled) { global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); }});Output:"
},
{
"code": null,
"e": 39044,
"s": 38529,
"text": "browserWindow: BrowserWindow (Optional) The BrowserWindow Instance. This argument allows the dialog to attach itself to the parent window, making it a modal. A modal window is a child window that disables the parent window. If BrowserWindow is not shown dialog will not be attached to it. In such case It will be displayed as independent window. In the above code, the BrowserWindow instance is not being passed to the dialog, therefore the dialog opens as an independent window on clicking the Upload File button."
},
{
"code": null,
"e": 42467,
"s": 39044,
"text": "options: Object It takes in the following parameters,title: String (Optional) The title to be displayed on the dialog window.defaultPath: String (Optional) The directory to be opened as defined by the default path on clicking the Upload File button.buttonLabel: String (Optional) Custom label for the confirmation Button. If empty, the default label will be used. In the above code it is defined as Upload.message: String (Optional) This parameter is supported in macOS only. This is used to display the custom message above input boxes.securityScopedBookmarks: Boolean (Optional) This parameter is supported in macOS only. This parameter is used to create security scoped bookmarks when packaged for the Mac App Store. For more detailed Information, Refer this link.filters: FileFilter[{}] (Optional) It is an Array of Objects. It defines an array of file types that can be displayed or selected when we want to limit the user to a specific type. We can define multiple file types object belonging to different categories. The FileFilter object takes in the following parameters,name: String The name of the category of extensions.extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link.In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’].properties: String[] (Optional) Contains a list of features which are available for the native dialog. It take take in the following values,openFile: Allows the Files to be selected.openDirectory: Allows directory/folders to be selected.multiSelections: Allows multiple files to be selected in the dialog.showHiddenFiles: Show Hidden files in dialog.createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it.promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application.noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path.treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file.dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list.Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform."
},
{
"code": null,
"e": 42540,
"s": 42467,
"text": "title: String (Optional) The title to be displayed on the dialog window."
},
{
"code": null,
"e": 42665,
"s": 42540,
"text": "defaultPath: String (Optional) The directory to be opened as defined by the default path on clicking the Upload File button."
},
{
"code": null,
"e": 42823,
"s": 42665,
"text": "buttonLabel: String (Optional) Custom label for the confirmation Button. If empty, the default label will be used. In the above code it is defined as Upload."
},
{
"code": null,
"e": 42955,
"s": 42823,
"text": "message: String (Optional) This parameter is supported in macOS only. This is used to display the custom message above input boxes."
},
{
"code": null,
"e": 43186,
"s": 42955,
"text": "securityScopedBookmarks: Boolean (Optional) This parameter is supported in macOS only. This parameter is used to create security scoped bookmarks when packaged for the Mac App Store. For more detailed Information, Refer this link."
},
{
"code": null,
"e": 43948,
"s": 43186,
"text": "filters: FileFilter[{}] (Optional) It is an Array of Objects. It defines an array of file types that can be displayed or selected when we want to limit the user to a specific type. We can define multiple file types object belonging to different categories. The FileFilter object takes in the following parameters,name: String The name of the category of extensions.extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link.In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’]."
},
{
"code": null,
"e": 44001,
"s": 43948,
"text": "name: String The name of the category of extensions."
},
{
"code": null,
"e": 44241,
"s": 44001,
"text": "extensions: [] The extensions array should consist of extensions without wildcards or dots as demonstrated in the code. To show all files, use the * wildcard (no other wildcard is supported). For more detailed Information, Refer this link."
},
{
"code": null,
"e": 44399,
"s": 44241,
"text": "In the above code, we want to restrict the user to Text files only. Hence we have defined the name as Text Files and the extensions array as [‘txt’, ‘docx’]."
},
{
"code": null,
"e": 46294,
"s": 44399,
"text": "properties: String[] (Optional) Contains a list of features which are available for the native dialog. It take take in the following values,openFile: Allows the Files to be selected.openDirectory: Allows directory/folders to be selected.multiSelections: Allows multiple files to be selected in the dialog.showHiddenFiles: Show Hidden files in dialog.createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it.promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application.noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path.treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file.dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list.Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform."
},
{
"code": null,
"e": 46337,
"s": 46294,
"text": "openFile: Allows the Files to be selected."
},
{
"code": null,
"e": 46393,
"s": 46337,
"text": "openDirectory: Allows directory/folders to be selected."
},
{
"code": null,
"e": 46462,
"s": 46393,
"text": "multiSelections: Allows multiple files to be selected in the dialog."
},
{
"code": null,
"e": 46508,
"s": 46462,
"text": "showHiddenFiles: Show Hidden files in dialog."
},
{
"code": null,
"e": 46767,
"s": 46508,
"text": "createDirectory: This value is supported in macOS only. It allows creating new directories from within the dialog. In Windows, the context-menu is pre-available in the dialog (right-click in dialog window) and we can create new files and directories from it."
},
{
"code": null,
"e": 47117,
"s": 46767,
"text": "promptToCreate: This value is supported in Windows only. This value is used when the filepath entered in the dialog does not exist on the System. At this time, it should prompt the user to create. This does not actually create the file at the path but allows non-existent paths to be returned that can further be created and used by the application."
},
{
"code": null,
"e": 47338,
"s": 47117,
"text": "noResolveAliases: This value is supported in macOS only. It disables the automatic resolution of filepath of the alias to its original target. Selected aliases will now return the alias path instead of their target path."
},
{
"code": null,
"e": 47477,
"s": 47338,
"text": "treatPackageAsDirectory: This value is supported in macOS only. It treats packages such as .app folders, as a directory instead of a file."
},
{
"code": null,
"e": 47643,
"s": 47477,
"text": "dontAddToRecent: This value is supported in Windows only. This value signifies that the file/directory being chosen should not be added to the recent documents list."
},
{
"code": null,
"e": 48058,
"s": 47643,
"text": "Note: According to the official Electron documentation, the open dialog cannot be both a file Selector and a directory selector in Windows and Linux OS. If both the properties are specified, [‘openFile’, ‘openDirectory’] on these platforms, directory selector will be shown and we won’t be able to select files. Hence, in the code, we have specified different dialog properties for win32/linux and darwin platform."
},
{
"code": null,
"e": 48189,
"s": 48058,
"text": "The dialog.showOpenDialog(browserWindow, options) returns a Promise. It resolves to an Object containing the following parameters,"
},
{
"code": null,
"e": 48258,
"s": 48189,
"text": "canceled: Boolean Whether or not the dialog operation was cancelled."
},
{
"code": null,
"e": 48507,
"s": 48258,
"text": "filePaths: String[] An Array of filepaths chosen by the user. If the dialog operation is cancelled, it is going to be an empty Array. In case the multiSelections value is not provided in properties, the filePaths array will return a single element."
},
{
"code": null,
"e": 50948,
"s": 48507,
"text": "bookmarks: String[] (Optional) This String array is supported in macOS only. This is returned when the securityScopedBookmarks parameter is specified as true in the options Object.index.js: Add the following snippet in that file.const electron = require('electron');const path = require('path'); // Importing dialog module using remoteconst dialog = electron.remote.dialog; var uploadFile = document.getElementById('upload'); // Defining a Global file path Variable to store // user-selected fileglobal.filepath = undefined; uploadFile.addEventListener('click', () => {// If the platform is 'win32' or 'Linux' if (process.platform !== 'darwin') { // Resolves to a Promise<Object> dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', // Restricting the user to only Text Files. filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector Property properties: ['openFile'] }).then(file => { // Stating whether dialog operation was // cancelled or not. console.log(file.canceled); if (!file.canceled) { // Updating the GLOBAL filepath variable // to user-selected file. global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); } else { // If the platform is 'darwin' (macOS) dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector and Directory // Selector Property In macOS properties: ['openFile', 'openDirectory'] }).then(file => { console.log(file.canceled); if (!file.canceled) { global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); }});"
},
{
"code": null,
"e": 50998,
"s": 50948,
"text": "index.js: Add the following snippet in that file."
},
{
"code": "const electron = require('electron');const path = require('path'); // Importing dialog module using remoteconst dialog = electron.remote.dialog; var uploadFile = document.getElementById('upload'); // Defining a Global file path Variable to store // user-selected fileglobal.filepath = undefined; uploadFile.addEventListener('click', () => {// If the platform is 'win32' or 'Linux' if (process.platform !== 'darwin') { // Resolves to a Promise<Object> dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', // Restricting the user to only Text Files. filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector Property properties: ['openFile'] }).then(file => { // Stating whether dialog operation was // cancelled or not. console.log(file.canceled); if (!file.canceled) { // Updating the GLOBAL filepath variable // to user-selected file. global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); } else { // If the platform is 'darwin' (macOS) dialog.showOpenDialog({ title: 'Select the File to be uploaded', defaultPath: path.join(__dirname, '../assets/'), buttonLabel: 'Upload', filters: [ { name: 'Text Files', extensions: ['txt', 'docx'] }, ], // Specifying the File Selector and Directory // Selector Property In macOS properties: ['openFile', 'openDirectory'] }).then(file => { console.log(file.canceled); if (!file.canceled) { global.filepath = file.filePaths[0].toString(); console.log(global.filepath); } }).catch(err => { console.log(err) }); }});",
"e": 53210,
"s": 50998,
"text": null
},
{
"code": null,
"e": 53218,
"s": 53210,
"text": "Output:"
},
{
"code": null,
"e": 55435,
"s": 53218,
"text": "Step 5: Once we have obtained the file path from the dialog window, we can follow any of the two approaches:Approach 1: Upload the file to a server by making an HTTP POST REST API call and let the server handle the file processing. We will use the axios package that was installed earlier to achieve this.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseindex.js:const fs = require('fs');const axios = require('axios'); if (global.filepath && !file.canceled) { var formData = new FormData(); formData.append('file', fs.createReadStream(global.filepath)); axios.post('[Custom URL]', formData, { headers: { 'Content-Type': 'multipart/form-data' } }); } // ...Explanation: We have appended the file to a formData object using the fs module, and set the Content-Type header to multipart/form-data for the POST Request. Replace the [Custom URL] with a URL of the REST API to receive the file on the server side. Once we have received the file on the server side, we can let the server handle the file processing and display the respective response.Approach 2: Process the file on the system itself by reading/manipulating the contents of the file. We can use the fs module to read the contents of the file, and then perform further manipulations as required.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseconst fs = require('fs');const fs = require('fs'); if (global.filepath && !file.canceled) { fs.readFile(global.filepath, {encoding: 'utf-8'}, function(err,data) { if (!err) { console.log('received data: ' + data); } else { console.log(err); } }); } //...Explanation: We will read the file from the GLOBAL file path variable that was updated from the dialog window with standard UTF-8 encoding. Once we have successfully fetched the contents of the file, we can update/manipulate the contents as per the desired functionality.sample.txt:Output:Video Playerhttps://media.geeksforgeeks.org/wp-content/uploads/20200507193706/Output-22.mp400:0000:0000:18Use Up/Down Arrow keys to increase or decrease volume."
},
{
"code": null,
"e": 56493,
"s": 55435,
"text": "Approach 1: Upload the file to a server by making an HTTP POST REST API call and let the server handle the file processing. We will use the axios package that was installed earlier to achieve this.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseindex.js:const fs = require('fs');const axios = require('axios'); if (global.filepath && !file.canceled) { var formData = new FormData(); formData.append('file', fs.createReadStream(global.filepath)); axios.post('[Custom URL]', formData, { headers: { 'Content-Type': 'multipart/form-data' } }); } // ...Explanation: We have appended the file to a formData object using the fs module, and set the Content-Type header to multipart/form-data for the POST Request. Replace the [Custom URL] with a URL of the REST API to receive the file on the server side. Once we have received the file on the server side, we can let the server handle the file processing and display the respective response."
},
{
"code": "const fs = require('fs');const axios = require('axios'); if (global.filepath && !file.canceled) { var formData = new FormData(); formData.append('file', fs.createReadStream(global.filepath)); axios.post('[Custom URL]', formData, { headers: { 'Content-Type': 'multipart/form-data' } }); } // ...",
"e": 56857,
"s": 56493,
"text": null
},
{
"code": null,
"e": 57244,
"s": 56857,
"text": "Explanation: We have appended the file to a formData object using the fs module, and set the Content-Type header to multipart/form-data for the POST Request. Replace the [Custom URL] with a URL of the REST API to receive the file on the server side. Once we have received the file on the server side, we can let the server handle the file processing and display the respective response."
},
{
"code": null,
"e": 58296,
"s": 57244,
"text": "Approach 2: Process the file on the system itself by reading/manipulating the contents of the file. We can use the fs module to read the contents of the file, and then perform further manipulations as required.In index.js file, add the following code just after console.log(global.filepath); } within the Promiseconst fs = require('fs');const fs = require('fs'); if (global.filepath && !file.canceled) { fs.readFile(global.filepath, {encoding: 'utf-8'}, function(err,data) { if (!err) { console.log('received data: ' + data); } else { console.log(err); } }); } //...Explanation: We will read the file from the GLOBAL file path variable that was updated from the dialog window with standard UTF-8 encoding. Once we have successfully fetched the contents of the file, we can update/manipulate the contents as per the desired functionality.sample.txt:Output:Video Playerhttps://media.geeksforgeeks.org/wp-content/uploads/20200507193706/Output-22.mp400:0000:0000:18Use Up/Down Arrow keys to increase or decrease volume."
},
{
"code": "const fs = require('fs');const fs = require('fs'); if (global.filepath && !file.canceled) { fs.readFile(global.filepath, {encoding: 'utf-8'}, function(err,data) { if (!err) { console.log('received data: ' + data); } else { console.log(err); } }); } //...",
"e": 58587,
"s": 58296,
"text": null
},
{
"code": null,
"e": 58859,
"s": 58587,
"text": "Explanation: We will read the file from the GLOBAL file path variable that was updated from the dialog window with standard UTF-8 encoding. Once we have successfully fetched the contents of the file, we can update/manipulate the contents as per the desired functionality."
},
{
"code": null,
"e": 58871,
"s": 58859,
"text": "sample.txt:"
},
{
"code": null,
"e": 58879,
"s": 58871,
"text": "Output:"
},
{
"code": null,
"e": 58890,
"s": 58879,
"text": "ElectronJS"
},
{
"code": null,
"e": 58901,
"s": 58890,
"text": "JavaScript"
},
{
"code": null,
"e": 58918,
"s": 58901,
"text": "Web Technologies"
},
{
"code": null,
"e": 59016,
"s": 58918,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 59025,
"s": 59016,
"text": "Comments"
},
{
"code": null,
"e": 59038,
"s": 59025,
"text": "Old Comments"
},
{
"code": null,
"e": 59083,
"s": 59038,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 59129,
"s": 59083,
"text": "Set the value of an input field in JavaScript"
},
{
"code": null,
"e": 59201,
"s": 59129,
"text": "Differences between Functional Components and Class Components in React"
},
{
"code": null,
"e": 59247,
"s": 59201,
"text": "How to Open URL in New Tab using JavaScript ?"
},
{
"code": null,
"e": 59289,
"s": 59247,
"text": "Form validation using HTML and JavaScript"
},
{
"code": null,
"e": 59326,
"s": 59289,
"text": "Express.js express.Router() Function"
},
{
"code": null,
"e": 59359,
"s": 59326,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 59404,
"s": 59359,
"text": "Convert a string to an integer in JavaScript"
},
{
"code": null,
"e": 59464,
"s": 59404,
"text": "How to set the default value for an HTML <select> element ?"
}
]
|
Transfer Learning with Fruit Classification | by James Nelson | Towards Data Science | For those of you who don’t know what deep learning is, its a genre within machine learning that falls under the category of Artificial Intelligence. This type of technology isn’t I,Robot artificial intelligence and we as a species are pretty far away from actually developing something like that. However, deep learning and many other types of machine learning approaches are as close as it gets when we talk about programmable computers acting like humans.
So briefly, deep learning is a machine learning approach that goes beyond smaller methods of learning that may only require one or two layers of learning which could be known as shallow learning. But, deep learning is layers upon layers upon layers of learning. The models that represent these layers are referred to as neural networks in which the name stemmed from the study of neurobiology but don’t get confused because it isn’t actually a network made to emulate how a brain functions. Neural networks come in many different forms, but we will be focusing on only one for our transfer learning, which is a convolutional neural network.
So what is a convolutional neural network?
A convolutional neural network is just another method for training the network (model) in order to give accurate classification. Out of all the other neural networks out there, convolutional neural networks perform great for computer vision learning. What makes CNNs or (covnets)amazing is the patterns they learn from images are translation invariant, meaning if they pick up on a pattern in the corner of an image they will recognize that same pattern in any different image’s corner, whereas a regular network would have to re-learn it over and over again. Covnets also are able to learn spatial hierarchies of patterns meaning each layer of a Covnet will learn something different. The first layer may learn small patterns and the next layer may learn bigger patterns that are features from the first layer. How these features are obtained is through the convolution function.
The convolution function is used to obtain feature maps (feature matrices) for a convolutional layer. At the baseline, covnets have configured weights that consist of a kernel. A kernel is used to get distinct features from an input image (input layer) like it can be used to gather the input image’s sharpness, edges, or gather information on how to detect an edge. This function can be represented as n*n which is a matrix containing lots of unique values. The kernel convolutes (slides and multiplies) on top of an input image and let's say the input image is (10,10) and the kernel is (3,3). The first slide (stride) will multiply by 9 pixels in the very top left corner of the input image to produce an output of a single-pixel in the top left corner of a new matrix called a feature map.
This multiplication continues as the kernel slides across the input image like so.
This continual process doesn’t stop until the entire feature matrix has been filled with these convoluted values and once the feature matrix is complete it is stacked inside of a convolutional layer. If the network is designed to do so another kernel will produce another feature matrix with the same input image to store the next feature matrix within the same convolutional layer.
To keep it short, the training of a convolutional neural network is about locating all the right values with each of the kernels so that when the input image is passed through the layers it will activated different neurons on the last output layer to predict and accurately classify the image.
Transfer learning makes life easier and better for everyone. Although creating convolutional neural networks from scratch is fun, they can be a bit pricey and cost a lot of computational power as well. So in order to reduce the amount of power needed for the network we use transfer learning which are pre-trained weights that already undergone training on another image in order to increase performance for our network. What makes using pre-trained models an optimal choice is the fact they have already been configured and trained on millions of other images that consist of thousands of classes for many days at a time to provide the highly capable pre-trained weights we need in order to train a network of our own with ease (Aditya Ananthram, 2018).
Now in order to demonstrate the practical application of transfer learning’s capabilities, I will cover the data that was used, the pre-trained model of choice, the model architecture, and then the code.
The dataset contains 81,104 images of different fruits and vegetables, consisting of 120 unique classifications for each image of fruits and vegetables. The total number of images is split into training and testing datasets. The training dataset contains 60,486 images and the testing dataset is 20,618 images.
The size of all images is 100x100 pixels and was collected with a logitech C920 camera that was used to film the fruits/vegetables (Mihai Oltean, 2019). All fruits and vegetables were planted inside of a shaft with a low-speed motor where they were recorded in a short duration of 20 seconds each. The fruits and vegetables testing images were taken with a Nexus 5X smartphone.
The transfer learning model of choice is called InceptionV3. The model is a convolutional neural network architecturally designed to be 48 layers deep trained on image shapes of 299 by 299. The original Inception architecture network was called “GoogLeNet” which was a 27 layer deep convolutional neural network made back in 2014(Shaikh, 2018). The name of the model derives from the movie “Inception” directed by Christopher Nolan, based on the concept of going deeper into a dream “A dream within a dream,” translating to a convolutional neural network within a convolutional neural network.
The idea behind GoogLeNet’s design was to eliminate the issues found commonly found with overfitting when working on deeper neural networks. Overfitting usually occurs when the dataset is too small and is being trained within a large neural network and the problem overfitting presents is a misrepresentation of the validation accuracy (testing accuracy) of the model. Testing accuracy is the measure of how precise the trained network accurately predicts the images it hasn’t seen. The solution to designing an enormous network to produce this accuracy is to create a sparsely connected neural network in place of a fully connected neural network (Shaikh, 2018), and that is the reason why the GoogLeNet model won the ImageNet Visual Recognition Challenge with a predictive accuracy of 80%+ back in 2014.
The InceptionV3 model is connected to two fully connected layers at the bottom but has its dimensionality reduced from 3D to a 1D with Global Average Pooling 2D before this connection. The pooling will also output one response for every feature matrix. After the pooling, the next layer of the architecture is the first dense hidden layer with 512 units (neurons) which will be connected to the final output layer with 10 neurons to match the number of fruits and vegetable classes. This is what the InceptionV3 architecture looks like.
And this is what the bottom fully connected layers to attached to the architecture looks like.
It is also worth mentioning that fine-tuning these pre-trained models and the weights associated with them. I can choose which weights I want to use from the model and it can be either the top half, bottom half, the middle, or I can freeze all the weights. Do this means that whichever portion of the pre-trained model I freeze won’t be trainable weights that can be updated for the model I am making. I also can choose the weights of which image the model was trained on, but for this example though and through trial and error, I chose not to freeze the weights. My implementation of InceptionV3 will be using the pre-trained weights on ImageNet. “ImageNet is an image database organized according to the WordNet hierarchy (currently only the nouns), in which each node of the hierarchy is depicted by hundreds and thousands of images. Currently we have an average of over five hundred images per node.” (“ImageNet,” 2017)
Now that you have an idea of what the dataset looks like and an idea of the model architecture, it’s time to execute.
Loading the Libraries.
First things first; we have to load the necessary libraries. When loading the libraries, ensure that all necessary modules needed are imported so we can prepare the data and train the model.
# read in librariesimport tensorflow as tffrom tensorflow.keras import backend, models, layers, optimizersimport numpy as npfrom tensorflow.keras.layers import GlobalAveragePooling2Dfrom tensorflow.keras.callbacks import EarlyStoppingfrom sklearn.model_selection import train_test_splitfrom tensorflow.keras.utils import plot_modelfrom IPython.display import displayfrom PIL import Imagefrom tensorflow.keras.preprocessing.image import ImageDataGeneratorimport os, shutilfrom tensorflow.keras.models import Modelnp.random.seed(42)
Load the Data and Prepare it.
Next, in order to prepare the data, we need to set up a train_datagen and test_datagen with ImageDataGenerator. Then with those generators resize the images of the training data and testing data to match the pre-trained model’s pixel image inputs. To ensure that the neural network doesn’t learn irrelevant patterns and in return boosts overall performance.
# Specify the base directory where images are located.base_dir = '/kaggle/input/fruits/fruits-360/'# Specify the traning, validation, and test dirrectories. train_dir = os.path.join(base_dir, 'Training')test_dir = os.path.join(base_dir, 'Test')# Normalize the pixels in the train data images, resize and augment the data.train_datagen = ImageDataGenerator( rescale=1./255,# The image augmentaion function in Keras shear_range=0.2, zoom_range=0.2, # Zoom in on image by 20% horizontal_flip=True) # Flip image horizontally # Normalize the test data imagees, resize them but don't augment themtest_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( train_dir, target_size=(299, 299), batch_size=16, class_mode='categorical')test_generator = test_datagen.flow_from_directory( test_dir, target_size=(299, 299), batch_size=16, class_mode='categorical')
Prepare the InceptionV3 Model
Now that the images are prepared it is time we import and set up the pre-trained InceptionV3 model for transfer learning.
# Load InceptionV3 libraryfrom tensorflow.keras.applications.inception_v3 import InceptionV3# Always clear the backend before training a modelbackend.clear_session()# InceptionV3 model and use the weights from imagenetconv_base = InceptionV3(weights = 'imagenet', #Useing the inception_v3 CNN that was trained on ImageNet data. include_top = False)
Create a Functional API Model.
Now let's combine the pre-trained InceptionV3 model weights with the dense layers (fully connected layers) and reduce the dimensionality of the model in between the two.
# Connect the InceptionV3 output to the fully connected layersInceptionV3_model = conv_base.outputpool = GlobalAveragePooling2D()(InceptionV3_model)dense_1 = layers.Dense(512, activation = 'relu')(pool)output = layers.Dense(120, activation = 'softmax')(dense_1)
Display the Functional API Model.
To get an understanding of the model architecture, we can display the functional API model as a whole to visually see the depth of the network.
# Create an example of the Archictecture to plot on a graphmodel_example = models.Model(inputs=conv_base.input, outputs=output)# plot graphplot_model(model_example)
(The model is way too big to display here on Medium, click this link to see)
Define the Model and Compile it.
In order for us to train the model, we need to define the functional API model and compile the model with categorical cross-entropy as the loss function and Stochastic Gradient Descent with a learning rate and momentum parameters.
# Define/Create the model for trainingmodel_InceptionV3 = models.Model(inputs=conv_base.input, outputs=output)# Compile the model with categorical crossentropy for the loss function and SGD for the optimizer with the learning# rate at 1e-4 and momentum at 0.9model_InceptionV3.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'])
Check the Device List for the GPU to Use.
Now I recommend using a GPU to train this model since the InceptionV3 model has over 21 million parameters and training on a CPU could take days to complete. If you have a GPU you can use your own but I used Kaggle’s GPUs supplied to their notebooks which took me about 20–25 min for the training to complete. Find the GPU device usable so the training process can be sped up.
# Import from tensorflow the module to read the GPU device and then printfrom tensorflow.python.client import device_libprint(device_lib.list_local_devices())
Train the Model.
After finding the GPU to use we will incorporate it into our code to finally train the model with the train_generator for the training data and the validation_data parameter set to the test_generator.
# Execute the model with fit_generator within the while loop utilizing the discovered GPUimport tensorflow as tfwith tf.device("/device:GPU:0"): history = model_InceptionV3.fit_generator( train_generator, epochs=5, validation_data=test_generator, verbose = 1, callbacks=[EarlyStopping(monitor='val_accuracy', patience = 5, restore_best_weights = True)])
99% Validation accuracy with a loss of 0.0187 is more than good.
Display the Model’s Testing Accuracy and Testing Loss Value
Now let’s see how our model looks by plotting the training accuracy/validation accuracy and training loss/validation loss across the epochs, then print the final test accuracy and test loss.
# Create a dictionary of the model history import matplotlib.pyplot as plthistory_dict = history.historyloss_values = history_dict['loss']val_loss_values = history_dict['val_loss']acc_values = history_dict['accuracy']val_acc_values = history_dict['val_accuracy']epochs = range(1, len(history_dict['accuracy']) + 1)# Plot the training/validation lossplt.plot(epochs, loss_values, 'bo', label = 'Training loss')plt.plot(epochs, val_loss_values, 'b', label = 'Validation loss')plt.title('Training and validation loss')plt.xlabel('Epochs')plt.ylabel('Loss')plt.legend()plt.show()# Plot the training/validation accuracyplt.plot(epochs, acc_values, 'bo', label = 'Training accuracy')plt.plot(epochs, val_acc_values, 'b', label = 'Validation accuracy')plt.title('Training and validation accuracy')plt.xlabel('Epochs')plt.ylabel('Accuracy')plt.legend()plt.show()# Evaluate the test accuracy and test loss of the modeltest_loss, test_acc = model_InceptionV3.evaluate_generator(test_generator)print('Model testing accuracy/testing loss:', test_acc, " ", test_loss)
The results for accurately predicting 120 classes of fruits and vegetable images comes out to being 99% testing accuracy with a 1.8% loss value. Loss value is the measure of distance for how far off our outputs are and what we expected. The training was done through 5 epochs and took around 20–25min each to complete to achieve this accuracy with the help of kaggle’s GPU to speed up the process. The training data was a 3781 step process (iterations), taking a batch size of data by every 16 samples to be propagated forward and backward to give us one pass. One pass equals one iteration.
In conclusion, transfer learning is a very effective way to train datasets to recognize and classify images. It allows for fast setup without going in detail designing a convolutional neural network architecture from scratch and it provides high accuracy with the pre-trained model’s previous training. You can head over to my project’s Kaggle notebook for anything further.
Aditya Ananthram. (2018, October 17). Deep Learning For Beginners Using Transfer Learning In Keras. Retrieved April 24, 2020, from Medium website: https://towardsdatascience.com/keras-transfer-learning-for-beginners-6c9b8b7143e
Shaikh, F. (2018, October 18). Understanding Inception Network from Scratch (with Python codes). Retrieved May 7, 2020, from Analytics Vidhya website: https://www.analyticsvidhya.com/blog/2018/10/understanding-inception-network-from-scratch/
Chollet, F. (2018). Deep Learning with Python. Shelter Island (New York, Estados Unidos): Manning, Cop.
File:Valid-padding-convolution.gif — Wikimedia Commons. (2018, July 6). Retrieved May 6, 2020, from Wikimedia.org website: https://commons.wikimedia.org/wiki/File:Valid-padding-convolution.gif
Get Free Stock Photos of Concept of Intelligence with Human Brain on Blue Background Online | Download Latest Free Images and Free Illustrations. (2020). Retrieved May 9, 2020, from Freerangestock.com website: https://freerangestock.com/photos/65677/concept-of-intelligence-with-human-brain-on-blue-background.html
Get Free Stock Photos of fruit and vegetables vendor italy Online | Download Latest Free Images and Free Illustrations. (2020). Retrieved May 9, 2020, from Freerangestock.com website: https://freerangestock.com/photos/37652/fruit-and-vegetables-vendor-italy.html
https://github.com/syt123450, syt123450. (2020). Layer — GlobalPooling2d. Retrieved May 6, 2020, from Tensorspace.org website: https://tensorspace.org/html/docs/layerGlobalPooling2d.html
ImageNet. (2017). Retrieved May 7, 2020, from Image-net.org website: http://www.image-net.org/
Krut Patel. (2019, September 8). Convolutional Neural Networks — A Beginner’s Guide — Towards Data Science. Retrieved April 24, 2020, from Medium website: https://towardsdatascience.com/convolution-neural-networks-a-beginners-guide-implementing-a-mnist-hand-written-digit-8aa60330d022
Mihai Oltean. (2020). Fruits 360. Retrieved May 6, 2020, from Kaggle.com website: https://www.kaggle.com/moltean/fruits
Milton-Barker, A. (2019, February 17). Inception V3 Deep Convolutional Architecture For Classifying Acute Myeloid/Lymphoblastic Leukemia. Retrieved May 6, 2020, from Intel.com website: https://software.intel.com/en-us/articles/inception-v3-deep-convolutional-architecture-for-classifying-acute-myeloidlymphoblastic
Prakhar Ganesh. (2019, October 18). Types of Convolution Kernels : Simplified — Towards Data Science. Retrieved April 24, 2020, from Medium website: https://towardsdatascience.com/types-of-convolution-kernels-simplified-f040cb307c37
Unsplash. (2020). Ash from Modern Afflatus. Retrieved May 9, 2020, from Unsplash.com website: https://unsplash.com/@modernafflatusphotography
VisibleBreadcrumbs. (2016). Retrieved April 27, 2020, from Mathworks.com website: https://www.mathworks.com/help/deeplearning/ref/inceptionv3.html
Wikipedia Contributors. (2020, May 2). I, Robot (film). Retrieved May 5, 2020, from Wikipedia website: https://en.wikipedia.org/wiki/I,_Robot_(film) | [
{
"code": null,
"e": 629,
"s": 171,
"text": "For those of you who don’t know what deep learning is, its a genre within machine learning that falls under the category of Artificial Intelligence. This type of technology isn’t I,Robot artificial intelligence and we as a species are pretty far away from actually developing something like that. However, deep learning and many other types of machine learning approaches are as close as it gets when we talk about programmable computers acting like humans."
},
{
"code": null,
"e": 1270,
"s": 629,
"text": "So briefly, deep learning is a machine learning approach that goes beyond smaller methods of learning that may only require one or two layers of learning which could be known as shallow learning. But, deep learning is layers upon layers upon layers of learning. The models that represent these layers are referred to as neural networks in which the name stemmed from the study of neurobiology but don’t get confused because it isn’t actually a network made to emulate how a brain functions. Neural networks come in many different forms, but we will be focusing on only one for our transfer learning, which is a convolutional neural network."
},
{
"code": null,
"e": 1313,
"s": 1270,
"text": "So what is a convolutional neural network?"
},
{
"code": null,
"e": 2194,
"s": 1313,
"text": "A convolutional neural network is just another method for training the network (model) in order to give accurate classification. Out of all the other neural networks out there, convolutional neural networks perform great for computer vision learning. What makes CNNs or (covnets)amazing is the patterns they learn from images are translation invariant, meaning if they pick up on a pattern in the corner of an image they will recognize that same pattern in any different image’s corner, whereas a regular network would have to re-learn it over and over again. Covnets also are able to learn spatial hierarchies of patterns meaning each layer of a Covnet will learn something different. The first layer may learn small patterns and the next layer may learn bigger patterns that are features from the first layer. How these features are obtained is through the convolution function."
},
{
"code": null,
"e": 2988,
"s": 2194,
"text": "The convolution function is used to obtain feature maps (feature matrices) for a convolutional layer. At the baseline, covnets have configured weights that consist of a kernel. A kernel is used to get distinct features from an input image (input layer) like it can be used to gather the input image’s sharpness, edges, or gather information on how to detect an edge. This function can be represented as n*n which is a matrix containing lots of unique values. The kernel convolutes (slides and multiplies) on top of an input image and let's say the input image is (10,10) and the kernel is (3,3). The first slide (stride) will multiply by 9 pixels in the very top left corner of the input image to produce an output of a single-pixel in the top left corner of a new matrix called a feature map."
},
{
"code": null,
"e": 3071,
"s": 2988,
"text": "This multiplication continues as the kernel slides across the input image like so."
},
{
"code": null,
"e": 3454,
"s": 3071,
"text": "This continual process doesn’t stop until the entire feature matrix has been filled with these convoluted values and once the feature matrix is complete it is stacked inside of a convolutional layer. If the network is designed to do so another kernel will produce another feature matrix with the same input image to store the next feature matrix within the same convolutional layer."
},
{
"code": null,
"e": 3748,
"s": 3454,
"text": "To keep it short, the training of a convolutional neural network is about locating all the right values with each of the kernels so that when the input image is passed through the layers it will activated different neurons on the last output layer to predict and accurately classify the image."
},
{
"code": null,
"e": 4503,
"s": 3748,
"text": "Transfer learning makes life easier and better for everyone. Although creating convolutional neural networks from scratch is fun, they can be a bit pricey and cost a lot of computational power as well. So in order to reduce the amount of power needed for the network we use transfer learning which are pre-trained weights that already undergone training on another image in order to increase performance for our network. What makes using pre-trained models an optimal choice is the fact they have already been configured and trained on millions of other images that consist of thousands of classes for many days at a time to provide the highly capable pre-trained weights we need in order to train a network of our own with ease (Aditya Ananthram, 2018)."
},
{
"code": null,
"e": 4707,
"s": 4503,
"text": "Now in order to demonstrate the practical application of transfer learning’s capabilities, I will cover the data that was used, the pre-trained model of choice, the model architecture, and then the code."
},
{
"code": null,
"e": 5018,
"s": 4707,
"text": "The dataset contains 81,104 images of different fruits and vegetables, consisting of 120 unique classifications for each image of fruits and vegetables. The total number of images is split into training and testing datasets. The training dataset contains 60,486 images and the testing dataset is 20,618 images."
},
{
"code": null,
"e": 5396,
"s": 5018,
"text": "The size of all images is 100x100 pixels and was collected with a logitech C920 camera that was used to film the fruits/vegetables (Mihai Oltean, 2019). All fruits and vegetables were planted inside of a shaft with a low-speed motor where they were recorded in a short duration of 20 seconds each. The fruits and vegetables testing images were taken with a Nexus 5X smartphone."
},
{
"code": null,
"e": 5990,
"s": 5396,
"text": "The transfer learning model of choice is called InceptionV3. The model is a convolutional neural network architecturally designed to be 48 layers deep trained on image shapes of 299 by 299. The original Inception architecture network was called “GoogLeNet” which was a 27 layer deep convolutional neural network made back in 2014(Shaikh, 2018). The name of the model derives from the movie “Inception” directed by Christopher Nolan, based on the concept of going deeper into a dream “A dream within a dream,” translating to a convolutional neural network within a convolutional neural network."
},
{
"code": null,
"e": 6796,
"s": 5990,
"text": "The idea behind GoogLeNet’s design was to eliminate the issues found commonly found with overfitting when working on deeper neural networks. Overfitting usually occurs when the dataset is too small and is being trained within a large neural network and the problem overfitting presents is a misrepresentation of the validation accuracy (testing accuracy) of the model. Testing accuracy is the measure of how precise the trained network accurately predicts the images it hasn’t seen. The solution to designing an enormous network to produce this accuracy is to create a sparsely connected neural network in place of a fully connected neural network (Shaikh, 2018), and that is the reason why the GoogLeNet model won the ImageNet Visual Recognition Challenge with a predictive accuracy of 80%+ back in 2014."
},
{
"code": null,
"e": 7333,
"s": 6796,
"text": "The InceptionV3 model is connected to two fully connected layers at the bottom but has its dimensionality reduced from 3D to a 1D with Global Average Pooling 2D before this connection. The pooling will also output one response for every feature matrix. After the pooling, the next layer of the architecture is the first dense hidden layer with 512 units (neurons) which will be connected to the final output layer with 10 neurons to match the number of fruits and vegetable classes. This is what the InceptionV3 architecture looks like."
},
{
"code": null,
"e": 7428,
"s": 7333,
"text": "And this is what the bottom fully connected layers to attached to the architecture looks like."
},
{
"code": null,
"e": 8353,
"s": 7428,
"text": "It is also worth mentioning that fine-tuning these pre-trained models and the weights associated with them. I can choose which weights I want to use from the model and it can be either the top half, bottom half, the middle, or I can freeze all the weights. Do this means that whichever portion of the pre-trained model I freeze won’t be trainable weights that can be updated for the model I am making. I also can choose the weights of which image the model was trained on, but for this example though and through trial and error, I chose not to freeze the weights. My implementation of InceptionV3 will be using the pre-trained weights on ImageNet. “ImageNet is an image database organized according to the WordNet hierarchy (currently only the nouns), in which each node of the hierarchy is depicted by hundreds and thousands of images. Currently we have an average of over five hundred images per node.” (“ImageNet,” 2017)"
},
{
"code": null,
"e": 8471,
"s": 8353,
"text": "Now that you have an idea of what the dataset looks like and an idea of the model architecture, it’s time to execute."
},
{
"code": null,
"e": 8494,
"s": 8471,
"text": "Loading the Libraries."
},
{
"code": null,
"e": 8685,
"s": 8494,
"text": "First things first; we have to load the necessary libraries. When loading the libraries, ensure that all necessary modules needed are imported so we can prepare the data and train the model."
},
{
"code": null,
"e": 9216,
"s": 8685,
"text": "# read in librariesimport tensorflow as tffrom tensorflow.keras import backend, models, layers, optimizersimport numpy as npfrom tensorflow.keras.layers import GlobalAveragePooling2Dfrom tensorflow.keras.callbacks import EarlyStoppingfrom sklearn.model_selection import train_test_splitfrom tensorflow.keras.utils import plot_modelfrom IPython.display import displayfrom PIL import Imagefrom tensorflow.keras.preprocessing.image import ImageDataGeneratorimport os, shutilfrom tensorflow.keras.models import Modelnp.random.seed(42)"
},
{
"code": null,
"e": 9246,
"s": 9216,
"text": "Load the Data and Prepare it."
},
{
"code": null,
"e": 9604,
"s": 9246,
"text": "Next, in order to prepare the data, we need to set up a train_datagen and test_datagen with ImageDataGenerator. Then with those generators resize the images of the training data and testing data to match the pre-trained model’s pixel image inputs. To ensure that the neural network doesn’t learn irrelevant patterns and in return boosts overall performance."
},
{
"code": null,
"e": 10535,
"s": 9604,
"text": "# Specify the base directory where images are located.base_dir = '/kaggle/input/fruits/fruits-360/'# Specify the traning, validation, and test dirrectories. train_dir = os.path.join(base_dir, 'Training')test_dir = os.path.join(base_dir, 'Test')# Normalize the pixels in the train data images, resize and augment the data.train_datagen = ImageDataGenerator( rescale=1./255,# The image augmentaion function in Keras shear_range=0.2, zoom_range=0.2, # Zoom in on image by 20% horizontal_flip=True) # Flip image horizontally # Normalize the test data imagees, resize them but don't augment themtest_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( train_dir, target_size=(299, 299), batch_size=16, class_mode='categorical')test_generator = test_datagen.flow_from_directory( test_dir, target_size=(299, 299), batch_size=16, class_mode='categorical')"
},
{
"code": null,
"e": 10565,
"s": 10535,
"text": "Prepare the InceptionV3 Model"
},
{
"code": null,
"e": 10687,
"s": 10565,
"text": "Now that the images are prepared it is time we import and set up the pre-trained InceptionV3 model for transfer learning."
},
{
"code": null,
"e": 11055,
"s": 10687,
"text": "# Load InceptionV3 libraryfrom tensorflow.keras.applications.inception_v3 import InceptionV3# Always clear the backend before training a modelbackend.clear_session()# InceptionV3 model and use the weights from imagenetconv_base = InceptionV3(weights = 'imagenet', #Useing the inception_v3 CNN that was trained on ImageNet data. include_top = False)"
},
{
"code": null,
"e": 11086,
"s": 11055,
"text": "Create a Functional API Model."
},
{
"code": null,
"e": 11256,
"s": 11086,
"text": "Now let's combine the pre-trained InceptionV3 model weights with the dense layers (fully connected layers) and reduce the dimensionality of the model in between the two."
},
{
"code": null,
"e": 11518,
"s": 11256,
"text": "# Connect the InceptionV3 output to the fully connected layersInceptionV3_model = conv_base.outputpool = GlobalAveragePooling2D()(InceptionV3_model)dense_1 = layers.Dense(512, activation = 'relu')(pool)output = layers.Dense(120, activation = 'softmax')(dense_1)"
},
{
"code": null,
"e": 11552,
"s": 11518,
"text": "Display the Functional API Model."
},
{
"code": null,
"e": 11696,
"s": 11552,
"text": "To get an understanding of the model architecture, we can display the functional API model as a whole to visually see the depth of the network."
},
{
"code": null,
"e": 11861,
"s": 11696,
"text": "# Create an example of the Archictecture to plot on a graphmodel_example = models.Model(inputs=conv_base.input, outputs=output)# plot graphplot_model(model_example)"
},
{
"code": null,
"e": 11938,
"s": 11861,
"text": "(The model is way too big to display here on Medium, click this link to see)"
},
{
"code": null,
"e": 11971,
"s": 11938,
"text": "Define the Model and Compile it."
},
{
"code": null,
"e": 12202,
"s": 11971,
"text": "In order for us to train the model, we need to define the functional API model and compile the model with categorical cross-entropy as the loss function and Stochastic Gradient Descent with a learning rate and momentum parameters."
},
{
"code": null,
"e": 12617,
"s": 12202,
"text": "# Define/Create the model for trainingmodel_InceptionV3 = models.Model(inputs=conv_base.input, outputs=output)# Compile the model with categorical crossentropy for the loss function and SGD for the optimizer with the learning# rate at 1e-4 and momentum at 0.9model_InceptionV3.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=1e-4, momentum=0.9), metrics=['accuracy'])"
},
{
"code": null,
"e": 12659,
"s": 12617,
"text": "Check the Device List for the GPU to Use."
},
{
"code": null,
"e": 13036,
"s": 12659,
"text": "Now I recommend using a GPU to train this model since the InceptionV3 model has over 21 million parameters and training on a CPU could take days to complete. If you have a GPU you can use your own but I used Kaggle’s GPUs supplied to their notebooks which took me about 20–25 min for the training to complete. Find the GPU device usable so the training process can be sped up."
},
{
"code": null,
"e": 13195,
"s": 13036,
"text": "# Import from tensorflow the module to read the GPU device and then printfrom tensorflow.python.client import device_libprint(device_lib.list_local_devices())"
},
{
"code": null,
"e": 13212,
"s": 13195,
"text": "Train the Model."
},
{
"code": null,
"e": 13413,
"s": 13212,
"text": "After finding the GPU to use we will incorporate it into our code to finally train the model with the train_generator for the training data and the validation_data parameter set to the test_generator."
},
{
"code": null,
"e": 13805,
"s": 13413,
"text": "# Execute the model with fit_generator within the while loop utilizing the discovered GPUimport tensorflow as tfwith tf.device(\"/device:GPU:0\"): history = model_InceptionV3.fit_generator( train_generator, epochs=5, validation_data=test_generator, verbose = 1, callbacks=[EarlyStopping(monitor='val_accuracy', patience = 5, restore_best_weights = True)])"
},
{
"code": null,
"e": 13870,
"s": 13805,
"text": "99% Validation accuracy with a loss of 0.0187 is more than good."
},
{
"code": null,
"e": 13930,
"s": 13870,
"text": "Display the Model’s Testing Accuracy and Testing Loss Value"
},
{
"code": null,
"e": 14121,
"s": 13930,
"text": "Now let’s see how our model looks by plotting the training accuracy/validation accuracy and training loss/validation loss across the epochs, then print the final test accuracy and test loss."
},
{
"code": null,
"e": 15176,
"s": 14121,
"text": "# Create a dictionary of the model history import matplotlib.pyplot as plthistory_dict = history.historyloss_values = history_dict['loss']val_loss_values = history_dict['val_loss']acc_values = history_dict['accuracy']val_acc_values = history_dict['val_accuracy']epochs = range(1, len(history_dict['accuracy']) + 1)# Plot the training/validation lossplt.plot(epochs, loss_values, 'bo', label = 'Training loss')plt.plot(epochs, val_loss_values, 'b', label = 'Validation loss')plt.title('Training and validation loss')plt.xlabel('Epochs')plt.ylabel('Loss')plt.legend()plt.show()# Plot the training/validation accuracyplt.plot(epochs, acc_values, 'bo', label = 'Training accuracy')plt.plot(epochs, val_acc_values, 'b', label = 'Validation accuracy')plt.title('Training and validation accuracy')plt.xlabel('Epochs')plt.ylabel('Accuracy')plt.legend()plt.show()# Evaluate the test accuracy and test loss of the modeltest_loss, test_acc = model_InceptionV3.evaluate_generator(test_generator)print('Model testing accuracy/testing loss:', test_acc, \" \", test_loss)"
},
{
"code": null,
"e": 15768,
"s": 15176,
"text": "The results for accurately predicting 120 classes of fruits and vegetable images comes out to being 99% testing accuracy with a 1.8% loss value. Loss value is the measure of distance for how far off our outputs are and what we expected. The training was done through 5 epochs and took around 20–25min each to complete to achieve this accuracy with the help of kaggle’s GPU to speed up the process. The training data was a 3781 step process (iterations), taking a batch size of data by every 16 samples to be propagated forward and backward to give us one pass. One pass equals one iteration."
},
{
"code": null,
"e": 16143,
"s": 15768,
"text": "In conclusion, transfer learning is a very effective way to train datasets to recognize and classify images. It allows for fast setup without going in detail designing a convolutional neural network architecture from scratch and it provides high accuracy with the pre-trained model’s previous training. You can head over to my project’s Kaggle notebook for anything further."
},
{
"code": null,
"e": 16371,
"s": 16143,
"text": "Aditya Ananthram. (2018, October 17). Deep Learning For Beginners Using Transfer Learning In Keras. Retrieved April 24, 2020, from Medium website: https://towardsdatascience.com/keras-transfer-learning-for-beginners-6c9b8b7143e"
},
{
"code": null,
"e": 16613,
"s": 16371,
"text": "Shaikh, F. (2018, October 18). Understanding Inception Network from Scratch (with Python codes). Retrieved May 7, 2020, from Analytics Vidhya website: https://www.analyticsvidhya.com/blog/2018/10/understanding-inception-network-from-scratch/"
},
{
"code": null,
"e": 16717,
"s": 16613,
"text": "Chollet, F. (2018). Deep Learning with Python. Shelter Island (New York, Estados Unidos): Manning, Cop."
},
{
"code": null,
"e": 16910,
"s": 16717,
"text": "File:Valid-padding-convolution.gif — Wikimedia Commons. (2018, July 6). Retrieved May 6, 2020, from Wikimedia.org website: https://commons.wikimedia.org/wiki/File:Valid-padding-convolution.gif"
},
{
"code": null,
"e": 17225,
"s": 16910,
"text": "Get Free Stock Photos of Concept of Intelligence with Human Brain on Blue Background Online | Download Latest Free Images and Free Illustrations. (2020). Retrieved May 9, 2020, from Freerangestock.com website: https://freerangestock.com/photos/65677/concept-of-intelligence-with-human-brain-on-blue-background.html"
},
{
"code": null,
"e": 17488,
"s": 17225,
"text": "Get Free Stock Photos of fruit and vegetables vendor italy Online | Download Latest Free Images and Free Illustrations. (2020). Retrieved May 9, 2020, from Freerangestock.com website: https://freerangestock.com/photos/37652/fruit-and-vegetables-vendor-italy.html"
},
{
"code": null,
"e": 17675,
"s": 17488,
"text": "https://github.com/syt123450, syt123450. (2020). Layer — GlobalPooling2d. Retrieved May 6, 2020, from Tensorspace.org website: https://tensorspace.org/html/docs/layerGlobalPooling2d.html"
},
{
"code": null,
"e": 17770,
"s": 17675,
"text": "ImageNet. (2017). Retrieved May 7, 2020, from Image-net.org website: http://www.image-net.org/"
},
{
"code": null,
"e": 18055,
"s": 17770,
"text": "Krut Patel. (2019, September 8). Convolutional Neural Networks — A Beginner’s Guide — Towards Data Science. Retrieved April 24, 2020, from Medium website: https://towardsdatascience.com/convolution-neural-networks-a-beginners-guide-implementing-a-mnist-hand-written-digit-8aa60330d022"
},
{
"code": null,
"e": 18175,
"s": 18055,
"text": "Mihai Oltean. (2020). Fruits 360. Retrieved May 6, 2020, from Kaggle.com website: https://www.kaggle.com/moltean/fruits"
},
{
"code": null,
"e": 18490,
"s": 18175,
"text": "Milton-Barker, A. (2019, February 17). Inception V3 Deep Convolutional Architecture For Classifying Acute Myeloid/Lymphoblastic Leukemia. Retrieved May 6, 2020, from Intel.com website: https://software.intel.com/en-us/articles/inception-v3-deep-convolutional-architecture-for-classifying-acute-myeloidlymphoblastic"
},
{
"code": null,
"e": 18723,
"s": 18490,
"text": "Prakhar Ganesh. (2019, October 18). Types of Convolution Kernels : Simplified — Towards Data Science. Retrieved April 24, 2020, from Medium website: https://towardsdatascience.com/types-of-convolution-kernels-simplified-f040cb307c37"
},
{
"code": null,
"e": 18865,
"s": 18723,
"text": "Unsplash. (2020). Ash from Modern Afflatus. Retrieved May 9, 2020, from Unsplash.com website: https://unsplash.com/@modernafflatusphotography"
},
{
"code": null,
"e": 19012,
"s": 18865,
"text": "VisibleBreadcrumbs. (2016). Retrieved April 27, 2020, from Mathworks.com website: https://www.mathworks.com/help/deeplearning/ref/inceptionv3.html"
}
]
|
How to bind multiple events with one "bind" in Tkinter? | For a particular application, if we want to perform multiple tasks with the help of buttons defined in it, then we can use the bind(Button, callback) method which binds the button and the event together to schedule the running of the event in the application.
Let us suppose we want to bind multiple events or callback with a single <bind>, then we have to first iterate over all the widgets to get it as one entity. The entity can be now configurable to bind the multiple widgets in the application.
# Import the required libraries
from tkinter import *
from tkinter import ttk
# Create an instance of tkinter frame or window
win = Tk()
# Set the size of the window
win.geometry("700x350")
def change_bgcolor(e):
label.config(background="#adad12")
def change_fgcolor(e):
label.config(foreground="white")
# Add a Label widget
label = Label(win, text="Hello World! Welcome to Tutorialspoint", font=('Georgia 19 italic'))
label.pack(pady=30)
# Add Buttons to trigger the event
b1 = ttk.Button(win, text="Button-1")
b1.pack()
# Bind the events
for b in [b1]:
b.bind("<Enter>", change_bgcolor)
b.bind("<Leave>", change_fgcolor)
win.mainloop()
If we run the above code, it will display a window that contains a button.
When we hover over the button, it will change the background color of the Label. Leaving the button will change the font color of the Label widget. | [
{
"code": null,
"e": 1322,
"s": 1062,
"text": "For a particular application, if we want to perform multiple tasks with the help of buttons defined in it, then we can use the bind(Button, callback) method which binds the button and the event together to schedule the running of the event in the application."
},
{
"code": null,
"e": 1563,
"s": 1322,
"text": "Let us suppose we want to bind multiple events or callback with a single <bind>, then we have to first iterate over all the widgets to get it as one entity. The entity can be now configurable to bind the multiple widgets in the application."
},
{
"code": null,
"e": 2221,
"s": 1563,
"text": "# Import the required libraries\nfrom tkinter import *\nfrom tkinter import ttk\n\n# Create an instance of tkinter frame or window\nwin = Tk()\n\n# Set the size of the window\nwin.geometry(\"700x350\")\n\ndef change_bgcolor(e):\n label.config(background=\"#adad12\")\n\ndef change_fgcolor(e):\n label.config(foreground=\"white\")\n\n# Add a Label widget\nlabel = Label(win, text=\"Hello World! Welcome to Tutorialspoint\", font=('Georgia 19 italic'))\nlabel.pack(pady=30)\n\n# Add Buttons to trigger the event\nb1 = ttk.Button(win, text=\"Button-1\")\nb1.pack()\n\n# Bind the events\nfor b in [b1]:\n b.bind(\"<Enter>\", change_bgcolor)\n b.bind(\"<Leave>\", change_fgcolor)\n\nwin.mainloop()"
},
{
"code": null,
"e": 2296,
"s": 2221,
"text": "If we run the above code, it will display a window that contains a button."
},
{
"code": null,
"e": 2444,
"s": 2296,
"text": "When we hover over the button, it will change the background color of the Label. Leaving the button will change the font color of the Label widget."
}
]
|
Creating a Custom Classifier for Text Cleaning | by Rodrigo Nader | Towards Data Science | Recently I've been studying NLP more than other data science fields, and one challenge that I face more often than not is the cleaning part of the process. Building NLP models require many pre-processing steps, and if the data is not properly treated, it could result in poor models, which is necessarily what we want to avoid.
In this article, we're going to focus on PDF documents. The goal here is to open a PDF file, convert it to plain text, understand the need for data cleaning and build a machine learning model for that purpose.
In this post we will:
Open a PDF file and convert it into a text string
Split that text into sentences and build a data set
Manually label that data with user interaction
Make a classifier to remove unwanted sentences
Some libraries we’re going to use:
pdfminer → read PDF files
textblob → text processing
pandas → data analysis
As always, I’ll try to explain the code used along the text, so feel free to skip the snippets if you'd like. Let's start by importing some modules:
from collections import Counterfrom IPython.display import clear_outputfrom pdfminer.converter import TextConverterfrom pdfminer.layout import LAParamsfrom pdfminer.pdfinterp import PDFResourceManagerfrom pdfminer.pdfinterp import PDFPageInterpreterfrom pdfminer.pdfpage import PDFPagefrom textblob import TextBlobimport ioimport mathimport numpy as npimport pandas as pdimport string
We are going to use pdfminer to build our PDF reader:
def read_pdf(path): rsrcmgr = PDFResourceManager() retstr = io.StringIO() codec = 'utf-8' laparams = LAParams() device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) fp = open(path, 'rb') interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos=set() for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True): interpreter.process_page(page) text = retstr.getvalue() text = " ".join(text.replace(u"\xa0", " ").strip().split()) fp.close() device.close() retstr.close() return text
Although this function seems long, it's just reading a PDF file and returning its text as a string. We'll apply it to a paper called "A Hands-on Guide to Google Data":
By just looking at the first page, we quickly see that an article contains much more than simple sentences, including elements like dates, line counts, page numbers, titles and subtitles, section separators, equations, and so on. Let's check how those properties will come out when the paper is converted to plain text (primer.pdf is the name of the file, stored locally in my computer):
read_pdf('primer.pdf')
It's clear here that we lost the all the text structure. Line counts and page numbers are spread as they were part of sentences, while titles and references can't be clearly distinguished from the text bodies. There are probably many ways out there for you to conserve the text structure while reading a PDF, but let's keep this messed up for the sake of explanation (as this is very often how raw text data looks like).
A full cleaning pipeline has many steps, and to become familiar with them I suggest following some tutorials (this and this are great starting points). In general lines, the cleaning process chain would include:
Tokenization
Normalization
Entity extraction
Spelling and grammar correction
Removing punctuation
Removing special characters
Word Stemming
Our goal here isn't to replace any of those stages, but instead, build a more general tool to remove what's unwanted for us. Take it as a complementary step to help in the middle.
Let's suppose we want to get rid of any sentence that does not look like human written. The idea is to classify those sentences as "unwanted" or “weird” and consider the remaining sentences "normal". For example:
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 related.
Or
51 52 53 54 55 # read data from correlate and make it a zoo time series dat <- read.csv("Data/econ-HSN1FNSA.csv") y <- zoo(dat[,2],as.
Those sentences are clearly messed up because of the text transformation and in case we're making, let's say, a PDF summarizer, they shouldn't be included.
To remove them, we could manually analyze the text, figure out some patterns and apply regular expressions. But, in some cases, it might be better to build a model that find those patterns for us. This is what we're doing here. We'll create a classifier to recognize weird sentences so that we can easily remove them from the text body.
Let's build a function to open the PDF file, split the text into sentences and save them into a data frame with columns label and sentence:
def pdf_to_df(path): content = read_pdf(path) blob = TextBlob(content) sentences = blob.sentences df = pd.DataFrame({'sentence': sentences, 'label': np.nan}) df['sentence'] = df.sentence.apply(''.join) return dfdf = pdf_to_df('primer.pdf')df.head()
Since we don't have the data labeled (in "weird" or "normal" ), we're going to do it manually to fill our label column. This data set will be updatable so that we can attach new documents to it and label their sentences.
Let's first save the unlabelled dataset into a .pickle file:
df.to_pickle('weird_sentences.pickle')
Now we'll create a user interaction function to manually classify the data points. For each sentence in the dataset, we'll display a text box for the user to type '1' or nothing. If the user types '1', the sentence is to be classified as "weird".
I'm using a Jupyter Notebook so I've called the clear_output() function from IPython.display to improve the interaction.
def manually_label(pickle_file): print('Is this sentence weird? Type 1 if yes. \n') df = pd.read_pickle(pickle_file) for index, row in df.iterrows(): if pd.isnull(row.label): print(row.sentence) label = input() if label == '1': df.loc[index, 'label'] = 1 if label == '': df.loc[index, 'label'] = 0 clear_output() df.to_pickle('weird_sentences.pickle') print('No more labels to classify!')manually_label('weird_sentences.pickle')
This is how the output looks like for each sentence:
Since this sentence looks pretty normal, I won't type '1', but simply press enter and move on to the next one. This process will repeat until the dataset is fully labeled or when you interrupt. Every user input is being saved to the pickle file, so the dataset is being updated at each sentence. This easy interaction made it relatively fast to label the data. It took me 20 minutes to have about 500 data points labeled.
Two other functions were written to keep things simple. One to attach another PDF file to our dataset and another one to reset all the labels (sets the label column values to np.nan).
def append_pdf(pdf_path, df_pickle): new_data = pdf_to_df(pdf_path) df = pd.read_pickle(df_pickle) df = df.append(new_data) df = df.reset_index(drop=True) df.to_pickle(df_pickle)def reset_labels(df_pickle): df = pd.read_pickle(df_pickle) df['label'] = np.nan df.to_pickle(df_pickle)
As we ended up with more "normal" than "weird" sentences, I built a function to undersample the dataset, otherwise, some machine learning algorithms wouldn't perform well:
def undersample(df, target_col, r=1): falses = df[target_col].value_counts()[0] trues = df[target_col].value_counts()[1] relation = float(trues)/float(falses) if trues >= r*falses: df_drop = df[df[target_col] == True] drop_size = int(math.fabs(int((relation - r) * (falses)))) else: df_drop = df[df[target_col] == False] drop_size = int(math.fabs(int((r-relation) * (falses)))) df_drop = df_drop.sample(drop_size) df = df.drop(labels=df_drop.index, axis=0) return dfdf = pd.read_pickle('weird_sentences.pickle').dropna()df = undersample(df, 'label')df.label.value_counts()
645 labeled data points. Not enough to make a decent model, but we'll use it as a playground example.
Now we need to transform the sentences in a way the algorithm can understand. One form of doing that is counting the occurrence of each character inside the sentence. That would be something like a bag-of-words technique, but at the character level.
def bag_of_chars(df, text_col): chars = [] df['char_list'] = df[text_col].apply(list) df['char_counts'] = df.char_list.apply(Counter) for index, row in df.iterrows(): for c in row.char_counts: df.loc[index, c] = row.char_counts[c] chars = list(set(chars)) df = df.fillna(0).drop(['sentence', 'char_list', 'char_counts'], 1) return dfdata = bag_of_chars(df, 'sentence')data.head()
Perfect! Now we're just left with a usual machine learning challenge. Many features and one target in a classification problem. Let's split the data into train and test sets:
data = data.sample(len(data)).reset_index(drop=True)train_data = data.iloc[:400]test_data = data.iloc[400:]x_train = train_data.drop('label', 1)y_train = train_data['label']x_test = test_data.drop('label', 1)y_test = test_data['label']
We're ready to choose an algorithm and check its performance. Here I'm using a Logistic Regression just to see what we can achieve:
from sklearn.linear_model import LogisticRegressionfrom sklearn.metrics import accuracy_scorelr = LogisticRegression()lr.fit(x_train, y_train)accuracy_score(y_test, lr.predict(x_test))
86 % accuracy. That's pretty good for a tiny dataset, a shallow model and a bag-of-chars approach. The only problem is that although we split into training and testing, we are evaluating the model with the same document that we trained on. A more appropriate approach would be using a new document as the test set.
Let's make a function that enables us to predict any custom sentence:
def predict_sentence(sentence): sample_test = pd.DataFrame({'label': np.nan, 'sentence': sentence}, [0]) for col in x_train.columns: sample_test[str(col)] = 0 sample_test = bag_of_chars(sample_test, 'sentence') sample_test = sample_test.drop('label', 1) pred = lr.predict(sample_test)[0] if pred == 1: return 'WEIRD' else: return 'NORMAL'weird_sentence = 'jdaij oadao //// fiajoaa32 32 5555'
Normal Sentence:
We just built a cool Machine Learning model
normal_sentence = 'We just built a cool machine learning model'predict_sentence(normal_sentence)
Weird Sentence:
jdaij oadao //// fiajoaa32 32 5555
weird_sentence = 'jdaij oadao //// fiajoaa32 32 5555'predict_sentence(weird_sentence)
And our model scores! Unfortunately, when I tried more sentences it showed bad performance classifying some of them. The bag-of-words (in this case chars) method isn't probably the best option, the algorithm itself could be highly improved and we should label many more data points for the model to become reliable. The point here is that you could use this same approach to perform a lot of different tasks, e.g. recognizing specific elements (e.g. links, dates, names, topics, titles, equations, references, and more). Used the right way, text classification can be a powerful tool to help in the cleaning process, and should not be taken for granted. Good cleaning!
Thank you if you kept reading until the end. This was an article focused on text classification to handle cleaning problems. Please follow my profile for more on data science and feel free to let me any comments or concerns. See you next post! | [
{
"code": null,
"e": 499,
"s": 171,
"text": "Recently I've been studying NLP more than other data science fields, and one challenge that I face more often than not is the cleaning part of the process. Building NLP models require many pre-processing steps, and if the data is not properly treated, it could result in poor models, which is necessarily what we want to avoid."
},
{
"code": null,
"e": 709,
"s": 499,
"text": "In this article, we're going to focus on PDF documents. The goal here is to open a PDF file, convert it to plain text, understand the need for data cleaning and build a machine learning model for that purpose."
},
{
"code": null,
"e": 731,
"s": 709,
"text": "In this post we will:"
},
{
"code": null,
"e": 781,
"s": 731,
"text": "Open a PDF file and convert it into a text string"
},
{
"code": null,
"e": 833,
"s": 781,
"text": "Split that text into sentences and build a data set"
},
{
"code": null,
"e": 880,
"s": 833,
"text": "Manually label that data with user interaction"
},
{
"code": null,
"e": 927,
"s": 880,
"text": "Make a classifier to remove unwanted sentences"
},
{
"code": null,
"e": 962,
"s": 927,
"text": "Some libraries we’re going to use:"
},
{
"code": null,
"e": 988,
"s": 962,
"text": "pdfminer → read PDF files"
},
{
"code": null,
"e": 1015,
"s": 988,
"text": "textblob → text processing"
},
{
"code": null,
"e": 1038,
"s": 1015,
"text": "pandas → data analysis"
},
{
"code": null,
"e": 1187,
"s": 1038,
"text": "As always, I’ll try to explain the code used along the text, so feel free to skip the snippets if you'd like. Let's start by importing some modules:"
},
{
"code": null,
"e": 1572,
"s": 1187,
"text": "from collections import Counterfrom IPython.display import clear_outputfrom pdfminer.converter import TextConverterfrom pdfminer.layout import LAParamsfrom pdfminer.pdfinterp import PDFResourceManagerfrom pdfminer.pdfinterp import PDFPageInterpreterfrom pdfminer.pdfpage import PDFPagefrom textblob import TextBlobimport ioimport mathimport numpy as npimport pandas as pdimport string"
},
{
"code": null,
"e": 1626,
"s": 1572,
"text": "We are going to use pdfminer to build our PDF reader:"
},
{
"code": null,
"e": 2294,
"s": 1626,
"text": "def read_pdf(path): rsrcmgr = PDFResourceManager() retstr = io.StringIO() codec = 'utf-8' laparams = LAParams() device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) fp = open(path, 'rb') interpreter = PDFPageInterpreter(rsrcmgr, device) password = \"\" maxpages = 0 caching = True pagenos=set() for page in PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True): interpreter.process_page(page) text = retstr.getvalue() text = \" \".join(text.replace(u\"\\xa0\", \" \").strip().split()) fp.close() device.close() retstr.close() return text"
},
{
"code": null,
"e": 2462,
"s": 2294,
"text": "Although this function seems long, it's just reading a PDF file and returning its text as a string. We'll apply it to a paper called \"A Hands-on Guide to Google Data\":"
},
{
"code": null,
"e": 2850,
"s": 2462,
"text": "By just looking at the first page, we quickly see that an article contains much more than simple sentences, including elements like dates, line counts, page numbers, titles and subtitles, section separators, equations, and so on. Let's check how those properties will come out when the paper is converted to plain text (primer.pdf is the name of the file, stored locally in my computer):"
},
{
"code": null,
"e": 2873,
"s": 2850,
"text": "read_pdf('primer.pdf')"
},
{
"code": null,
"e": 3294,
"s": 2873,
"text": "It's clear here that we lost the all the text structure. Line counts and page numbers are spread as they were part of sentences, while titles and references can't be clearly distinguished from the text bodies. There are probably many ways out there for you to conserve the text structure while reading a PDF, but let's keep this messed up for the sake of explanation (as this is very often how raw text data looks like)."
},
{
"code": null,
"e": 3506,
"s": 3294,
"text": "A full cleaning pipeline has many steps, and to become familiar with them I suggest following some tutorials (this and this are great starting points). In general lines, the cleaning process chain would include:"
},
{
"code": null,
"e": 3519,
"s": 3506,
"text": "Tokenization"
},
{
"code": null,
"e": 3533,
"s": 3519,
"text": "Normalization"
},
{
"code": null,
"e": 3551,
"s": 3533,
"text": "Entity extraction"
},
{
"code": null,
"e": 3583,
"s": 3551,
"text": "Spelling and grammar correction"
},
{
"code": null,
"e": 3604,
"s": 3583,
"text": "Removing punctuation"
},
{
"code": null,
"e": 3632,
"s": 3604,
"text": "Removing special characters"
},
{
"code": null,
"e": 3646,
"s": 3632,
"text": "Word Stemming"
},
{
"code": null,
"e": 3826,
"s": 3646,
"text": "Our goal here isn't to replace any of those stages, but instead, build a more general tool to remove what's unwanted for us. Take it as a complementary step to help in the middle."
},
{
"code": null,
"e": 4039,
"s": 3826,
"text": "Let's suppose we want to get rid of any sentence that does not look like human written. The idea is to classify those sentences as \"unwanted\" or “weird” and consider the remaining sentences \"normal\". For example:"
},
{
"code": null,
"e": 4105,
"s": 4039,
"text": "32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 related."
},
{
"code": null,
"e": 4108,
"s": 4105,
"text": "Or"
},
{
"code": null,
"e": 4243,
"s": 4108,
"text": "51 52 53 54 55 # read data from correlate and make it a zoo time series dat <- read.csv(\"Data/econ-HSN1FNSA.csv\") y <- zoo(dat[,2],as."
},
{
"code": null,
"e": 4399,
"s": 4243,
"text": "Those sentences are clearly messed up because of the text transformation and in case we're making, let's say, a PDF summarizer, they shouldn't be included."
},
{
"code": null,
"e": 4736,
"s": 4399,
"text": "To remove them, we could manually analyze the text, figure out some patterns and apply regular expressions. But, in some cases, it might be better to build a model that find those patterns for us. This is what we're doing here. We'll create a classifier to recognize weird sentences so that we can easily remove them from the text body."
},
{
"code": null,
"e": 4876,
"s": 4736,
"text": "Let's build a function to open the PDF file, split the text into sentences and save them into a data frame with columns label and sentence:"
},
{
"code": null,
"e": 5143,
"s": 4876,
"text": "def pdf_to_df(path): content = read_pdf(path) blob = TextBlob(content) sentences = blob.sentences df = pd.DataFrame({'sentence': sentences, 'label': np.nan}) df['sentence'] = df.sentence.apply(''.join) return dfdf = pdf_to_df('primer.pdf')df.head()"
},
{
"code": null,
"e": 5364,
"s": 5143,
"text": "Since we don't have the data labeled (in \"weird\" or \"normal\" ), we're going to do it manually to fill our label column. This data set will be updatable so that we can attach new documents to it and label their sentences."
},
{
"code": null,
"e": 5425,
"s": 5364,
"text": "Let's first save the unlabelled dataset into a .pickle file:"
},
{
"code": null,
"e": 5464,
"s": 5425,
"text": "df.to_pickle('weird_sentences.pickle')"
},
{
"code": null,
"e": 5711,
"s": 5464,
"text": "Now we'll create a user interaction function to manually classify the data points. For each sentence in the dataset, we'll display a text box for the user to type '1' or nothing. If the user types '1', the sentence is to be classified as \"weird\"."
},
{
"code": null,
"e": 5832,
"s": 5711,
"text": "I'm using a Jupyter Notebook so I've called the clear_output() function from IPython.display to improve the interaction."
},
{
"code": null,
"e": 6388,
"s": 5832,
"text": "def manually_label(pickle_file): print('Is this sentence weird? Type 1 if yes. \\n') df = pd.read_pickle(pickle_file) for index, row in df.iterrows(): if pd.isnull(row.label): print(row.sentence) label = input() if label == '1': df.loc[index, 'label'] = 1 if label == '': df.loc[index, 'label'] = 0 clear_output() df.to_pickle('weird_sentences.pickle') print('No more labels to classify!')manually_label('weird_sentences.pickle')"
},
{
"code": null,
"e": 6441,
"s": 6388,
"text": "This is how the output looks like for each sentence:"
},
{
"code": null,
"e": 6863,
"s": 6441,
"text": "Since this sentence looks pretty normal, I won't type '1', but simply press enter and move on to the next one. This process will repeat until the dataset is fully labeled or when you interrupt. Every user input is being saved to the pickle file, so the dataset is being updated at each sentence. This easy interaction made it relatively fast to label the data. It took me 20 minutes to have about 500 data points labeled."
},
{
"code": null,
"e": 7047,
"s": 6863,
"text": "Two other functions were written to keep things simple. One to attach another PDF file to our dataset and another one to reset all the labels (sets the label column values to np.nan)."
},
{
"code": null,
"e": 7354,
"s": 7047,
"text": "def append_pdf(pdf_path, df_pickle): new_data = pdf_to_df(pdf_path) df = pd.read_pickle(df_pickle) df = df.append(new_data) df = df.reset_index(drop=True) df.to_pickle(df_pickle)def reset_labels(df_pickle): df = pd.read_pickle(df_pickle) df['label'] = np.nan df.to_pickle(df_pickle)"
},
{
"code": null,
"e": 7526,
"s": 7354,
"text": "As we ended up with more \"normal\" than \"weird\" sentences, I built a function to undersample the dataset, otherwise, some machine learning algorithms wouldn't perform well:"
},
{
"code": null,
"e": 8152,
"s": 7526,
"text": "def undersample(df, target_col, r=1): falses = df[target_col].value_counts()[0] trues = df[target_col].value_counts()[1] relation = float(trues)/float(falses) if trues >= r*falses: df_drop = df[df[target_col] == True] drop_size = int(math.fabs(int((relation - r) * (falses)))) else: df_drop = df[df[target_col] == False] drop_size = int(math.fabs(int((r-relation) * (falses)))) df_drop = df_drop.sample(drop_size) df = df.drop(labels=df_drop.index, axis=0) return dfdf = pd.read_pickle('weird_sentences.pickle').dropna()df = undersample(df, 'label')df.label.value_counts()"
},
{
"code": null,
"e": 8254,
"s": 8152,
"text": "645 labeled data points. Not enough to make a decent model, but we'll use it as a playground example."
},
{
"code": null,
"e": 8504,
"s": 8254,
"text": "Now we need to transform the sentences in a way the algorithm can understand. One form of doing that is counting the occurrence of each character inside the sentence. That would be something like a bag-of-words technique, but at the character level."
},
{
"code": null,
"e": 8923,
"s": 8504,
"text": "def bag_of_chars(df, text_col): chars = [] df['char_list'] = df[text_col].apply(list) df['char_counts'] = df.char_list.apply(Counter) for index, row in df.iterrows(): for c in row.char_counts: df.loc[index, c] = row.char_counts[c] chars = list(set(chars)) df = df.fillna(0).drop(['sentence', 'char_list', 'char_counts'], 1) return dfdata = bag_of_chars(df, 'sentence')data.head()"
},
{
"code": null,
"e": 9098,
"s": 8923,
"text": "Perfect! Now we're just left with a usual machine learning challenge. Many features and one target in a classification problem. Let's split the data into train and test sets:"
},
{
"code": null,
"e": 9334,
"s": 9098,
"text": "data = data.sample(len(data)).reset_index(drop=True)train_data = data.iloc[:400]test_data = data.iloc[400:]x_train = train_data.drop('label', 1)y_train = train_data['label']x_test = test_data.drop('label', 1)y_test = test_data['label']"
},
{
"code": null,
"e": 9466,
"s": 9334,
"text": "We're ready to choose an algorithm and check its performance. Here I'm using a Logistic Regression just to see what we can achieve:"
},
{
"code": null,
"e": 9651,
"s": 9466,
"text": "from sklearn.linear_model import LogisticRegressionfrom sklearn.metrics import accuracy_scorelr = LogisticRegression()lr.fit(x_train, y_train)accuracy_score(y_test, lr.predict(x_test))"
},
{
"code": null,
"e": 9966,
"s": 9651,
"text": "86 % accuracy. That's pretty good for a tiny dataset, a shallow model and a bag-of-chars approach. The only problem is that although we split into training and testing, we are evaluating the model with the same document that we trained on. A more appropriate approach would be using a new document as the test set."
},
{
"code": null,
"e": 10036,
"s": 9966,
"text": "Let's make a function that enables us to predict any custom sentence:"
},
{
"code": null,
"e": 10470,
"s": 10036,
"text": "def predict_sentence(sentence): sample_test = pd.DataFrame({'label': np.nan, 'sentence': sentence}, [0]) for col in x_train.columns: sample_test[str(col)] = 0 sample_test = bag_of_chars(sample_test, 'sentence') sample_test = sample_test.drop('label', 1) pred = lr.predict(sample_test)[0] if pred == 1: return 'WEIRD' else: return 'NORMAL'weird_sentence = 'jdaij oadao //// fiajoaa32 32 5555'"
},
{
"code": null,
"e": 10487,
"s": 10470,
"text": "Normal Sentence:"
},
{
"code": null,
"e": 10531,
"s": 10487,
"text": "We just built a cool Machine Learning model"
},
{
"code": null,
"e": 10628,
"s": 10531,
"text": "normal_sentence = 'We just built a cool machine learning model'predict_sentence(normal_sentence)"
},
{
"code": null,
"e": 10644,
"s": 10628,
"text": "Weird Sentence:"
},
{
"code": null,
"e": 10679,
"s": 10644,
"text": "jdaij oadao //// fiajoaa32 32 5555"
},
{
"code": null,
"e": 10765,
"s": 10679,
"text": "weird_sentence = 'jdaij oadao //// fiajoaa32 32 5555'predict_sentence(weird_sentence)"
},
{
"code": null,
"e": 11434,
"s": 10765,
"text": "And our model scores! Unfortunately, when I tried more sentences it showed bad performance classifying some of them. The bag-of-words (in this case chars) method isn't probably the best option, the algorithm itself could be highly improved and we should label many more data points for the model to become reliable. The point here is that you could use this same approach to perform a lot of different tasks, e.g. recognizing specific elements (e.g. links, dates, names, topics, titles, equations, references, and more). Used the right way, text classification can be a powerful tool to help in the cleaning process, and should not be taken for granted. Good cleaning!"
}
]
|
How to Setup Sublime Text 3 for Java in Windows? - GeeksforGeeks | 02 Jul, 2020
Java is one of the most popular and widely-used object-oriented programming languages. Java syntax is similar to C/C++. But Java does not provide low-level programming functionalities like pointers. Also, Java codes are always written in the form of classes and objects. The Java codes are first compiled into byte code (machine-independent code). Then the byte code is run on Java Virtual Machine (JVM) regardless of the underlying architecture.
Written by a Google engineer sublime text is a cross-platform IDE developed in C++ and Python. It has basic built-in support for Python. Sublime text is fast and you can customize this editor as per your need to create a full-fledged Python development environment. You can install packages such as debugging, auto-completion, code linting, etc. There are also various packages for scientific development, Django, Flask, and so on.
Java can be downloaded from oracle.com. To install Java compiler on Windows, go through How to install Java on Windows?
Sublime Text 3 can be downloaded from its official site sublimetext.com. To install sublime text 3 on Windows, go through How to install Sublime Text 3 in Windows?
Step 1: Click the Advanced system settings link.
Step 2: Click Environment Variables. In the section System Variables, find the PATH environment variable and select it. Click Edit. If the PATH environment variable does not exist, click New.
Step 3: In the Edit System Variable (or New System Variable) window, specify the value of the PATH environment variable. Click OK. Close all remaining windows by clicking OK.
After the installation process is over and we are done with the environment setup, we need to test whether our compiler and IDE working or not. For this we will write a simple output based program:
public class HelloWorld { public static void main(String[] args) { System.out.println("GeeksforGeeks"); }}
How To
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Comments
Old Comments
How to Align Text in HTML?
How to filter object array based on attributes?
How to Install FFmpeg on Windows?
How to integrate Git Bash with Visual Studio Code?
How to Install and Run Apache Kafka on Windows?
Arrays in Java
Split() String method in Java with examples
For-each loop in Java
Arrays.sort() in Java with examples
Reverse a string in Java | [
{
"code": null,
"e": 24648,
"s": 24620,
"text": "\n02 Jul, 2020"
},
{
"code": null,
"e": 25095,
"s": 24648,
"text": "Java is one of the most popular and widely-used object-oriented programming languages. Java syntax is similar to C/C++. But Java does not provide low-level programming functionalities like pointers. Also, Java codes are always written in the form of classes and objects. The Java codes are first compiled into byte code (machine-independent code). Then the byte code is run on Java Virtual Machine (JVM) regardless of the underlying architecture."
},
{
"code": null,
"e": 25527,
"s": 25095,
"text": "Written by a Google engineer sublime text is a cross-platform IDE developed in C++ and Python. It has basic built-in support for Python. Sublime text is fast and you can customize this editor as per your need to create a full-fledged Python development environment. You can install packages such as debugging, auto-completion, code linting, etc. There are also various packages for scientific development, Django, Flask, and so on."
},
{
"code": null,
"e": 25647,
"s": 25527,
"text": "Java can be downloaded from oracle.com. To install Java compiler on Windows, go through How to install Java on Windows?"
},
{
"code": null,
"e": 25811,
"s": 25647,
"text": "Sublime Text 3 can be downloaded from its official site sublimetext.com. To install sublime text 3 on Windows, go through How to install Sublime Text 3 in Windows?"
},
{
"code": null,
"e": 25860,
"s": 25811,
"text": "Step 1: Click the Advanced system settings link."
},
{
"code": null,
"e": 26052,
"s": 25860,
"text": "Step 2: Click Environment Variables. In the section System Variables, find the PATH environment variable and select it. Click Edit. If the PATH environment variable does not exist, click New."
},
{
"code": null,
"e": 26227,
"s": 26052,
"text": "Step 3: In the Edit System Variable (or New System Variable) window, specify the value of the PATH environment variable. Click OK. Close all remaining windows by clicking OK."
},
{
"code": null,
"e": 26425,
"s": 26227,
"text": "After the installation process is over and we are done with the environment setup, we need to test whether our compiler and IDE working or not. For this we will write a simple output based program:"
},
{
"code": "public class HelloWorld { public static void main(String[] args) { System.out.println(\"GeeksforGeeks\"); }}",
"e": 26548,
"s": 26425,
"text": null
},
{
"code": null,
"e": 26555,
"s": 26548,
"text": "How To"
},
{
"code": null,
"e": 26560,
"s": 26555,
"text": "Java"
},
{
"code": null,
"e": 26565,
"s": 26560,
"text": "Java"
},
{
"code": null,
"e": 26663,
"s": 26565,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 26672,
"s": 26663,
"text": "Comments"
},
{
"code": null,
"e": 26685,
"s": 26672,
"text": "Old Comments"
},
{
"code": null,
"e": 26712,
"s": 26685,
"text": "How to Align Text in HTML?"
},
{
"code": null,
"e": 26760,
"s": 26712,
"text": "How to filter object array based on attributes?"
},
{
"code": null,
"e": 26794,
"s": 26760,
"text": "How to Install FFmpeg on Windows?"
},
{
"code": null,
"e": 26845,
"s": 26794,
"text": "How to integrate Git Bash with Visual Studio Code?"
},
{
"code": null,
"e": 26893,
"s": 26845,
"text": "How to Install and Run Apache Kafka on Windows?"
},
{
"code": null,
"e": 26908,
"s": 26893,
"text": "Arrays in Java"
},
{
"code": null,
"e": 26952,
"s": 26908,
"text": "Split() String method in Java with examples"
},
{
"code": null,
"e": 26974,
"s": 26952,
"text": "For-each loop in Java"
},
{
"code": null,
"e": 27010,
"s": 26974,
"text": "Arrays.sort() in Java with examples"
}
]
|
How to create a radio button similar to toggle button using Bootstrap ? - GeeksforGeeks | 11 Jun, 2020
Toggle Buttons: The buttons that can change from one state to another, i.e. which can be toggled from on state to off state or vice-versa are called toggle buttons. For example:
A particular switch in our house can either be on or off. This is a very good example of a real life toggle switch.
The WiFi or Bluetooth option in our phone is another example, that can either be on or off.
Radio Button: As the name suggests, it is the concept of the buttons in a radio, where for the first station, we select the first button, for the second station we select the second button and so on. It is like a multiple-choice question where at a time only one button will be active. Here we select from a list of options. For example:
Buttons in a FM Radio.
Multiple choice questions in an exam.
Example: Create a radio button using Bootstrap.
html
<!DOCTYPE html><html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content= "width=device-width, initial-scale=1.0"> <title>Radio Button</title></head> <body> <div class="radio"> <div class="btn-group btn-group-toggle" data-toggle="buttons"> <label class="btn btn-primary"> <input type="radio" name="button" id="button1" autocomplete="off" checked> Radio button 1 </label> <label class="btn btn-primary active"> <input type="radio" name="button" id="button2" autocomplete="off"> Radio button 2 </label> <label class="btn btn-primary"> <input type="radio" name="button" id="button3" autocomplete="off"> Radio button 3 </label> <label class="btn btn-primary"> <input type="radio" name="button" id="button4" autocomplete="off"> Radio button 4 </label> <label class="btn btn-primary"> <input type="radio" name="button" id="button5" autocomplete="off"> Radio button 5 </label> </div> </div></body> </html>
Important points:
We have to add a checked attribute to any one of the radio buttons such that it is pre-selected when the page loads. It is a boolean attribute.
The same name has to be given to all the set of elements.
Example 2: The following code will help us to understand the difference (in code) to design a toggle and a radio button.
html
<!DOCTYPE html><html> <head> <!-- Latest compiled and minified CSS --> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css"> <!-- jQuery library --> <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"> </script> <!-- Popper JS --> <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js"> </script> <!-- Latest compiled JavaScript --> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.5.0/js/bootstrap.min.js"> </script> <meta name="viewport" content= "width=device-width, initial-scale=1"> <link rel="stylesheet" type="text/css" href="style.css"> <link href="https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css" rel="stylesheet"> <link href="https://fonts.googleapis.com/css2?family=Lato:ital,wght@0,100;0,300;0,400;1,300&display=swap" rel="stylesheet"> <script src="https://cdn.jsdelivr.net/npm/[email protected]/combined/js/gijgo.min.js" type="text/javascript"> </script> <link href="https://cdn.jsdelivr.net/npm/[email protected]/combined/css/gijgo.min.js" rel="stylesheet" type="text/css"> <link rel="stylesheet" type="text/css" href="css/lightbox.min.css"> <script type="text/javascript" src= "js/lightbox-plus-jquery.min.js"> </script> <title>Buttons</title> <style> .toggle, .radio { margin: 20px; } </style></head> <body> <div class="toggle"> <button type="button" class="btn btn-warning" data-toggle="button" autocomplete="off"> Toggle Button </button> </div> <div class="radio"> <div class="btn-group btn-group-toggle" data-toggle="buttons"> <label class="btn btn-primary"> <input type="radio" name="button" id="button1" autocomplete="off" checked> Radio button 1 </label> <label class="btn btn-primary active"> <input type="radio" name="button" id="button2" autocomplete="off"> Radio button 2 </label> <label class="btn btn-primary"> <input type="radio" name="button" id="button3" autocomplete="off"> Radio button 3 </label> <label class="btn btn-primary"> <input type="radio" name="button" id="button4" autocomplete="off"> Radio button 4 </label> <label class="btn btn-primary"> <input type="radio" name="button" id="button5" autocomplete="off"> Radio button 5 </label> </div> </div></body> </html>
Output:
Attention reader! Don’t stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course.
Bootstrap-4
Bootstrap-Misc
HTML-Misc
Picked
Bootstrap
HTML
Web Technologies
Web technologies Questions
HTML
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to pass data into a bootstrap modal?
How to Show Images on Click using HTML ?
How to set Bootstrap Timepicker using datetimepicker library ?
How to change the background color of the active nav-item?
How to Use Bootstrap with React?
Top 10 Projects For Beginners To Practice HTML and CSS Skills
How to insert spaces/tabs in text using HTML/CSS?
How to set the default value for an HTML <select> element ?
How to update Node.js and NPM to next version ?
How to set input type date in dd-mm-yyyy format using HTML ? | [
{
"code": null,
"e": 24838,
"s": 24810,
"text": "\n11 Jun, 2020"
},
{
"code": null,
"e": 25017,
"s": 24838,
"text": "Toggle Buttons: The buttons that can change from one state to another, i.e. which can be toggled from on state to off state or vice-versa are called toggle buttons. For example: "
},
{
"code": null,
"e": 25134,
"s": 25017,
"text": "A particular switch in our house can either be on or off. This is a very good example of a real life toggle switch. "
},
{
"code": null,
"e": 25227,
"s": 25134,
"text": "The WiFi or Bluetooth option in our phone is another example, that can either be on or off. "
},
{
"code": null,
"e": 25565,
"s": 25227,
"text": "Radio Button: As the name suggests, it is the concept of the buttons in a radio, where for the first station, we select the first button, for the second station we select the second button and so on. It is like a multiple-choice question where at a time only one button will be active. Here we select from a list of options. For example:"
},
{
"code": null,
"e": 25589,
"s": 25565,
"text": "Buttons in a FM Radio. "
},
{
"code": null,
"e": 25628,
"s": 25589,
"text": "Multiple choice questions in an exam. "
},
{
"code": null,
"e": 25677,
"s": 25628,
"text": "Example: Create a radio button using Bootstrap. "
},
{
"code": null,
"e": 25682,
"s": 25677,
"text": "html"
},
{
"code": "<!DOCTYPE html><html lang=\"en\"> <head> <meta charset=\"UTF-8\"> <meta name=\"viewport\" content= \"width=device-width, initial-scale=1.0\"> <title>Radio Button</title></head> <body> <div class=\"radio\"> <div class=\"btn-group btn-group-toggle\" data-toggle=\"buttons\"> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button1\" autocomplete=\"off\" checked> Radio button 1 </label> <label class=\"btn btn-primary active\"> <input type=\"radio\" name=\"button\" id=\"button2\" autocomplete=\"off\"> Radio button 2 </label> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button3\" autocomplete=\"off\"> Radio button 3 </label> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button4\" autocomplete=\"off\"> Radio button 4 </label> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button5\" autocomplete=\"off\"> Radio button 5 </label> </div> </div></body> </html>",
"e": 27010,
"s": 25682,
"text": null
},
{
"code": null,
"e": 27028,
"s": 27010,
"text": "Important points:"
},
{
"code": null,
"e": 27172,
"s": 27028,
"text": "We have to add a checked attribute to any one of the radio buttons such that it is pre-selected when the page loads. It is a boolean attribute."
},
{
"code": null,
"e": 27231,
"s": 27172,
"text": "The same name has to be given to all the set of elements. "
},
{
"code": null,
"e": 27353,
"s": 27231,
"text": "Example 2: The following code will help us to understand the difference (in code) to design a toggle and a radio button. "
},
{
"code": null,
"e": 27358,
"s": 27353,
"text": "html"
},
{
"code": "<!DOCTYPE html><html> <head> <!-- Latest compiled and minified CSS --> <link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css\"> <!-- jQuery library --> <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js\"> </script> <!-- Popper JS --> <script src=\"https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js\"> </script> <!-- Latest compiled JavaScript --> <script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/4.5.0/js/bootstrap.min.js\"> </script> <meta name=\"viewport\" content= \"width=device-width, initial-scale=1\"> <link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\"> <link href=\"https://stackpath.bootstrapcdn.com/font-awesome/4.7.0/css/font-awesome.min.css\" rel=\"stylesheet\"> <link href=\"https://fonts.googleapis.com/css2?family=Lato:ital,wght@0,100;0,300;0,400;1,300&display=swap\" rel=\"stylesheet\"> <script src=\"https://cdn.jsdelivr.net/npm/[email protected]/combined/js/gijgo.min.js\" type=\"text/javascript\"> </script> <link href=\"https://cdn.jsdelivr.net/npm/[email protected]/combined/css/gijgo.min.js\" rel=\"stylesheet\" type=\"text/css\"> <link rel=\"stylesheet\" type=\"text/css\" href=\"css/lightbox.min.css\"> <script type=\"text/javascript\" src= \"js/lightbox-plus-jquery.min.js\"> </script> <title>Buttons</title> <style> .toggle, .radio { margin: 20px; } </style></head> <body> <div class=\"toggle\"> <button type=\"button\" class=\"btn btn-warning\" data-toggle=\"button\" autocomplete=\"off\"> Toggle Button </button> </div> <div class=\"radio\"> <div class=\"btn-group btn-group-toggle\" data-toggle=\"buttons\"> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button1\" autocomplete=\"off\" checked> Radio button 1 </label> <label class=\"btn btn-primary active\"> <input type=\"radio\" name=\"button\" id=\"button2\" autocomplete=\"off\"> Radio button 2 </label> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button3\" autocomplete=\"off\"> Radio button 3 </label> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button4\" autocomplete=\"off\"> Radio button 4 </label> <label class=\"btn btn-primary\"> <input type=\"radio\" name=\"button\" id=\"button5\" autocomplete=\"off\"> Radio button 5 </label> </div> </div></body> </html>",
"e": 30293,
"s": 27358,
"text": null
},
{
"code": null,
"e": 30301,
"s": 30293,
"text": "Output:"
},
{
"code": null,
"e": 30438,
"s": 30301,
"text": "Attention reader! Don’t stop learning now. Get hold of all the important HTML concepts with the Web Design for Beginners | HTML course."
},
{
"code": null,
"e": 30450,
"s": 30438,
"text": "Bootstrap-4"
},
{
"code": null,
"e": 30465,
"s": 30450,
"text": "Bootstrap-Misc"
},
{
"code": null,
"e": 30475,
"s": 30465,
"text": "HTML-Misc"
},
{
"code": null,
"e": 30482,
"s": 30475,
"text": "Picked"
},
{
"code": null,
"e": 30492,
"s": 30482,
"text": "Bootstrap"
},
{
"code": null,
"e": 30497,
"s": 30492,
"text": "HTML"
},
{
"code": null,
"e": 30514,
"s": 30497,
"text": "Web Technologies"
},
{
"code": null,
"e": 30541,
"s": 30514,
"text": "Web technologies Questions"
},
{
"code": null,
"e": 30546,
"s": 30541,
"text": "HTML"
},
{
"code": null,
"e": 30644,
"s": 30546,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30685,
"s": 30644,
"text": "How to pass data into a bootstrap modal?"
},
{
"code": null,
"e": 30726,
"s": 30685,
"text": "How to Show Images on Click using HTML ?"
},
{
"code": null,
"e": 30789,
"s": 30726,
"text": "How to set Bootstrap Timepicker using datetimepicker library ?"
},
{
"code": null,
"e": 30848,
"s": 30789,
"text": "How to change the background color of the active nav-item?"
},
{
"code": null,
"e": 30881,
"s": 30848,
"text": "How to Use Bootstrap with React?"
},
{
"code": null,
"e": 30943,
"s": 30881,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
},
{
"code": null,
"e": 30993,
"s": 30943,
"text": "How to insert spaces/tabs in text using HTML/CSS?"
},
{
"code": null,
"e": 31053,
"s": 30993,
"text": "How to set the default value for an HTML <select> element ?"
},
{
"code": null,
"e": 31101,
"s": 31053,
"text": "How to update Node.js and NPM to next version ?"
}
]
|
Intro to Applied Topological Data Analysis | by Ryan Duve | Towards Data Science | Topological data analysis, or TDA, is a set of approaches providing additional insight into datasets. It augments other forms of analysis, like statistical and geometric approaches, and is useful to any data scientist that wants a more complete understanding of their data.
This article paints a picture of the utility of the topological approach, outlines a prominent TDA algorithm called “mapper”, and describes how to use a toolkit called Scikit-TDA for application to natural language processing. Finally, we share a specifics of applying all the above to the problem of training data collection for a machine learning pipeline involve documents from the financial services industry.
Topology is the math concerning continuous objects. Not sizes and shapes, but just continuity. It is about how things are connected and where the gaps are.
A simple analogy demonstrating why topological data inspection is so useful is putting up holiday lights. When tasked with installing holiday lights on a house, it is natural to first think about the problem geometrically and statistically.
Geometry answers questions like how long is the house, how high is the roof, where are the outlets and how many lights are needed. Statistics answers questions like how much current will the lights draw, how much do they cost, and how many hours a day will they be lit on average.
After installation comes the big moment to plug in the lights for the first time and... nothing. Suddenly, the lengths and dimensions and colors don’t matter at all. How much they cost and how heavy they are do not factor into why they are not turning on. Instead, we jump to troubleshooting mode and think about strings of lights in a totally different way. We think of them as circuits.
If only a few bulbs don’t come on, the string is in parallel and we just have to replace those broken bulbs. If none of the lights come on, they might all be in series and there could just be a handful of broken bulbs we have to find. Or we could have series-strings plugged into parallel-strings together in series. Or parallel! In any event, we are now positioned to start debugging with the above picture in mind, keeping in mind the schematics look nothing like the actual strings of lights on the house.
Looking at a topological visualization of a dataset is like looking at the circuit diagram of holiday lights. We intentionally ignore the spatial features, the giant lengths and short distances between points, and instead focus on only the connectivity of the data. We ask what is connected, what is separate, where are the gaps. This gives us new information to answer tough questions. It is important to remember TDA does not replace geometric and statistic analyses, but complements them the way a circuit diagram complements a tape measure when stringing up holiday lights on a house.
There are many excellent introductory videos explaining the mapper algorithm invented by Carlsson, Memoli and Singh in 2007. Instead of trying to replicate those resources, a skeletal overview is as follows:
Start with data in a high dimensional space.Create overlapping bins of data.Within each bin, cluster the data.For each cluster, create a node in a graph.Connect the nodes that share data points.
Start with data in a high dimensional space.
Create overlapping bins of data.
Within each bin, cluster the data.
For each cluster, create a node in a graph.
Connect the nodes that share data points.
In real data analysis, the original data is high dimensional and cannot be visualized the way the initial data points are here. Otherwise, we would be done! The resulting graph, expressed as nodes and edges, allows us to see topological relationships without concern for the geometric details of the initial data vectors.
Answer: Scikit-TDA.
Scitkit-TDA is a Python package that provides standard interfaces for previously independent algorithms. Like scikit-learn, methods in the family of .fit() and .fit_transform() are used to execute algorithms and transform data. An example looks like
import kmapper as kmmapper = km.KeplerMapper(verbose=2)projected_data = mapper.fit_transform(data, projection=TSNE())graph = mapper.map( projected_data, clusterer=DBSCAN(), cover=km.Cover(35, 0.4),)
where TSNE and DBSCAN can be swapped with appropriate scikit-like objects with .fit() and .transform() methods of their own. For more detailed examples, see the Scikit-TDA example page.
Creating vectors out of text is important for machine learning models, which only know how to operate on lists of numbers. One of the simplest and easiest approaches is the bag of words model, where every unique word in a dataset gets its own direction in some high dimensional space and a document is represented by the coordinates of the words it contains.
Following the Scikit-TDA’s KepplerMapper example, we vectorize the documents’ text using TFIDF scaling, reduce down to two dimensions and use a clustering algorithm to build up the graph.
mapper = km.KeplerMapper(verbose=2)projected_X = mapper.fit_transform(term_count_matrix, projection=[TfidfTransformer(sublinear_tf=True), TruncatedSVD(n_components=100, random_state=0), Isomap(n_components=2, n_jobs=-1)], scaler=[None, None, MinMaxScaler()])
We made a few tweaks from the example in the documentation. Since we are dealing with so many large files, we use a custom parallel term counter with bounded memory to build up a term matrix, and then use a TFIDF transformer in the pipeline. We also experimented with different forms of dimensionality reduction, such as PCA, to build the projected data.
Finally, to build the graph:
graph = mapper.map(projected_X, clusterer=AgglomerativeClustering( n_clusters=3, linkage="complete", affinity="euclidean"), cover=km.Cover(n_cubes=12, perc_overlap=0.33))
Again, we experimented with different clustering algorithms such as DBSCAN and HDBSCAN as well as changing the affinity and overlap parameters.
Financial documents are at the heart of many workflows at S&P Global. Internally, machine learning classification models are used to prioritize the flow of documents for many use cases. Training data are regularly provided by content experts within the company to ensure models are operating at peak performance.
A simplification of the training data collection process is as follows. For a given workflow, a content expert provides a “positive” or “negative” tag for each ingested document. At training time, we retrieve text for each document and train a classifier to attempt to linearly separate documents based on a model confidence score. When building a new model, we might see considerable (undesirable) overlap between positive and negative documents.
For a new workflow, a reasonable explanation is a problem with the training data labeling. Looking at files the model got wrong, somewhere around the middle, is a good place to start. For a sense of scale, there are about 9000 documents between the scores 0.0 and 1.0 on the x-axis.
We now use TDA to group the documents based on their topological properties and look for patterns in classification errors. We want to learn if documents that confuse the model are topologically connected. If so, we can use clustered nodes with high confusion as candidates to send back to content experts for additional consideration.
Applying TDA to the documents as described in the previous section, we end up with nodes and edges colored by average document confusion.
In the image, each circle is a cluster of documents. The smallest circles represent only a few documents, while the biggest represent several thousand documents. The brightest yellow balls contain only documents classified correctly by the model with confidence, while the deep purple contain only incorrectly classified documents. The spectral range from yellow to purple is the continuum of “average correctness” for each circle.
Incorrectly classified document groups, the blue and purple nodes, seem to appear mostly in the north and northwest regions of the image. We can select all documents from those nodes, look up their model confidence scores and see where they are on the model confidence curve.
Each green vertical bar represents one document from the blue/purple nodes of the TDA output, and a darker green section indicates a higher concentration of documents at that model confidence score. There are a little under 500 documents flagged by TDA, mostly between scores of 0.0 and 1.0 on the x-axis.
Before the TDA analysis, we had about 9000 documents to start looking at to determine if something was wrong with our data collection. That is now shrunk down to less than 500. This is not to say we could not have achieved gains with statistical or geometric methods, but we feel confident the documents we are sending back are related. With any luck, our business partners will find a pattern in the documents we send and correct a systematic error in training data collection, allowing for a better second round of model training.
If statistical methods are a data scientist’s measuring tape and making plots is the ladder, then TDA is the multimeter. It provides an orthogonal way to look at data by focusing on connections and gaps instead of size and shape. The linked resources provide a data scientist just getting started with TDA to get up to speed and be productive in a short time.
Scikit TDA| SciPy 2019 | Nathaniel Saul
“Topological Data Analysis for the Working Data Scientist” — Anthony Bak
TDA Mapper Part 2, Isabel K. Darcy
The mapper algorithm and Reeb graphs
An Introduction to Mapper, Bala Krishnamoorthy (2/11/2020)
Sliding Windows And Persistence, Perea and Harer
Scikit-TDA Github
Scikit-TDA documentation
Topological Data Analysis of Financial Time Series, Gidea and Katz
UMAP, McInnes, Healy and Melville
Extracting insights from the shape of complex data using topology, Carlsson et al
Topological Methods for the Analysis of High Dimensional Data Sets and 3D Object Recognition, Carlsson, Memoli and Singh
KeplerMapper 2.0.0 documentation
Applied Topology
An Introduction to a New Text Classification and Visualization for NLP Using TDA, Elyasi and Moghadam
Ryan Duve is a Data Scientist at S&P Global. He’s also a physicist, Linux hacker and a bad barista. | [
{
"code": null,
"e": 446,
"s": 172,
"text": "Topological data analysis, or TDA, is a set of approaches providing additional insight into datasets. It augments other forms of analysis, like statistical and geometric approaches, and is useful to any data scientist that wants a more complete understanding of their data."
},
{
"code": null,
"e": 860,
"s": 446,
"text": "This article paints a picture of the utility of the topological approach, outlines a prominent TDA algorithm called “mapper”, and describes how to use a toolkit called Scikit-TDA for application to natural language processing. Finally, we share a specifics of applying all the above to the problem of training data collection for a machine learning pipeline involve documents from the financial services industry."
},
{
"code": null,
"e": 1016,
"s": 860,
"text": "Topology is the math concerning continuous objects. Not sizes and shapes, but just continuity. It is about how things are connected and where the gaps are."
},
{
"code": null,
"e": 1257,
"s": 1016,
"text": "A simple analogy demonstrating why topological data inspection is so useful is putting up holiday lights. When tasked with installing holiday lights on a house, it is natural to first think about the problem geometrically and statistically."
},
{
"code": null,
"e": 1538,
"s": 1257,
"text": "Geometry answers questions like how long is the house, how high is the roof, where are the outlets and how many lights are needed. Statistics answers questions like how much current will the lights draw, how much do they cost, and how many hours a day will they be lit on average."
},
{
"code": null,
"e": 1927,
"s": 1538,
"text": "After installation comes the big moment to plug in the lights for the first time and... nothing. Suddenly, the lengths and dimensions and colors don’t matter at all. How much they cost and how heavy they are do not factor into why they are not turning on. Instead, we jump to troubleshooting mode and think about strings of lights in a totally different way. We think of them as circuits."
},
{
"code": null,
"e": 2436,
"s": 1927,
"text": "If only a few bulbs don’t come on, the string is in parallel and we just have to replace those broken bulbs. If none of the lights come on, they might all be in series and there could just be a handful of broken bulbs we have to find. Or we could have series-strings plugged into parallel-strings together in series. Or parallel! In any event, we are now positioned to start debugging with the above picture in mind, keeping in mind the schematics look nothing like the actual strings of lights on the house."
},
{
"code": null,
"e": 3025,
"s": 2436,
"text": "Looking at a topological visualization of a dataset is like looking at the circuit diagram of holiday lights. We intentionally ignore the spatial features, the giant lengths and short distances between points, and instead focus on only the connectivity of the data. We ask what is connected, what is separate, where are the gaps. This gives us new information to answer tough questions. It is important to remember TDA does not replace geometric and statistic analyses, but complements them the way a circuit diagram complements a tape measure when stringing up holiday lights on a house."
},
{
"code": null,
"e": 3233,
"s": 3025,
"text": "There are many excellent introductory videos explaining the mapper algorithm invented by Carlsson, Memoli and Singh in 2007. Instead of trying to replicate those resources, a skeletal overview is as follows:"
},
{
"code": null,
"e": 3428,
"s": 3233,
"text": "Start with data in a high dimensional space.Create overlapping bins of data.Within each bin, cluster the data.For each cluster, create a node in a graph.Connect the nodes that share data points."
},
{
"code": null,
"e": 3473,
"s": 3428,
"text": "Start with data in a high dimensional space."
},
{
"code": null,
"e": 3506,
"s": 3473,
"text": "Create overlapping bins of data."
},
{
"code": null,
"e": 3541,
"s": 3506,
"text": "Within each bin, cluster the data."
},
{
"code": null,
"e": 3585,
"s": 3541,
"text": "For each cluster, create a node in a graph."
},
{
"code": null,
"e": 3627,
"s": 3585,
"text": "Connect the nodes that share data points."
},
{
"code": null,
"e": 3949,
"s": 3627,
"text": "In real data analysis, the original data is high dimensional and cannot be visualized the way the initial data points are here. Otherwise, we would be done! The resulting graph, expressed as nodes and edges, allows us to see topological relationships without concern for the geometric details of the initial data vectors."
},
{
"code": null,
"e": 3969,
"s": 3949,
"text": "Answer: Scikit-TDA."
},
{
"code": null,
"e": 4219,
"s": 3969,
"text": "Scitkit-TDA is a Python package that provides standard interfaces for previously independent algorithms. Like scikit-learn, methods in the family of .fit() and .fit_transform() are used to execute algorithms and transform data. An example looks like"
},
{
"code": null,
"e": 4427,
"s": 4219,
"text": "import kmapper as kmmapper = km.KeplerMapper(verbose=2)projected_data = mapper.fit_transform(data, projection=TSNE())graph = mapper.map( projected_data, clusterer=DBSCAN(), cover=km.Cover(35, 0.4),)"
},
{
"code": null,
"e": 4613,
"s": 4427,
"text": "where TSNE and DBSCAN can be swapped with appropriate scikit-like objects with .fit() and .transform() methods of their own. For more detailed examples, see the Scikit-TDA example page."
},
{
"code": null,
"e": 4972,
"s": 4613,
"text": "Creating vectors out of text is important for machine learning models, which only know how to operate on lists of numbers. One of the simplest and easiest approaches is the bag of words model, where every unique word in a dataset gets its own direction in some high dimensional space and a document is represented by the coordinates of the words it contains."
},
{
"code": null,
"e": 5160,
"s": 4972,
"text": "Following the Scikit-TDA’s KepplerMapper example, we vectorize the documents’ text using TFIDF scaling, reduce down to two dimensions and use a clustering algorithm to build up the graph."
},
{
"code": null,
"e": 5455,
"s": 5160,
"text": "mapper = km.KeplerMapper(verbose=2)projected_X = mapper.fit_transform(term_count_matrix, projection=[TfidfTransformer(sublinear_tf=True), TruncatedSVD(n_components=100, random_state=0), Isomap(n_components=2, n_jobs=-1)], scaler=[None, None, MinMaxScaler()])"
},
{
"code": null,
"e": 5810,
"s": 5455,
"text": "We made a few tweaks from the example in the documentation. Since we are dealing with so many large files, we use a custom parallel term counter with bounded memory to build up a term matrix, and then use a TFIDF transformer in the pipeline. We also experimented with different forms of dimensionality reduction, such as PCA, to build the projected data."
},
{
"code": null,
"e": 5839,
"s": 5810,
"text": "Finally, to build the graph:"
},
{
"code": null,
"e": 6037,
"s": 5839,
"text": "graph = mapper.map(projected_X, clusterer=AgglomerativeClustering( n_clusters=3, linkage=\"complete\", affinity=\"euclidean\"), cover=km.Cover(n_cubes=12, perc_overlap=0.33))"
},
{
"code": null,
"e": 6181,
"s": 6037,
"text": "Again, we experimented with different clustering algorithms such as DBSCAN and HDBSCAN as well as changing the affinity and overlap parameters."
},
{
"code": null,
"e": 6494,
"s": 6181,
"text": "Financial documents are at the heart of many workflows at S&P Global. Internally, machine learning classification models are used to prioritize the flow of documents for many use cases. Training data are regularly provided by content experts within the company to ensure models are operating at peak performance."
},
{
"code": null,
"e": 6942,
"s": 6494,
"text": "A simplification of the training data collection process is as follows. For a given workflow, a content expert provides a “positive” or “negative” tag for each ingested document. At training time, we retrieve text for each document and train a classifier to attempt to linearly separate documents based on a model confidence score. When building a new model, we might see considerable (undesirable) overlap between positive and negative documents."
},
{
"code": null,
"e": 7225,
"s": 6942,
"text": "For a new workflow, a reasonable explanation is a problem with the training data labeling. Looking at files the model got wrong, somewhere around the middle, is a good place to start. For a sense of scale, there are about 9000 documents between the scores 0.0 and 1.0 on the x-axis."
},
{
"code": null,
"e": 7561,
"s": 7225,
"text": "We now use TDA to group the documents based on their topological properties and look for patterns in classification errors. We want to learn if documents that confuse the model are topologically connected. If so, we can use clustered nodes with high confusion as candidates to send back to content experts for additional consideration."
},
{
"code": null,
"e": 7699,
"s": 7561,
"text": "Applying TDA to the documents as described in the previous section, we end up with nodes and edges colored by average document confusion."
},
{
"code": null,
"e": 8131,
"s": 7699,
"text": "In the image, each circle is a cluster of documents. The smallest circles represent only a few documents, while the biggest represent several thousand documents. The brightest yellow balls contain only documents classified correctly by the model with confidence, while the deep purple contain only incorrectly classified documents. The spectral range from yellow to purple is the continuum of “average correctness” for each circle."
},
{
"code": null,
"e": 8407,
"s": 8131,
"text": "Incorrectly classified document groups, the blue and purple nodes, seem to appear mostly in the north and northwest regions of the image. We can select all documents from those nodes, look up their model confidence scores and see where they are on the model confidence curve."
},
{
"code": null,
"e": 8713,
"s": 8407,
"text": "Each green vertical bar represents one document from the blue/purple nodes of the TDA output, and a darker green section indicates a higher concentration of documents at that model confidence score. There are a little under 500 documents flagged by TDA, mostly between scores of 0.0 and 1.0 on the x-axis."
},
{
"code": null,
"e": 9246,
"s": 8713,
"text": "Before the TDA analysis, we had about 9000 documents to start looking at to determine if something was wrong with our data collection. That is now shrunk down to less than 500. This is not to say we could not have achieved gains with statistical or geometric methods, but we feel confident the documents we are sending back are related. With any luck, our business partners will find a pattern in the documents we send and correct a systematic error in training data collection, allowing for a better second round of model training."
},
{
"code": null,
"e": 9606,
"s": 9246,
"text": "If statistical methods are a data scientist’s measuring tape and making plots is the ladder, then TDA is the multimeter. It provides an orthogonal way to look at data by focusing on connections and gaps instead of size and shape. The linked resources provide a data scientist just getting started with TDA to get up to speed and be productive in a short time."
},
{
"code": null,
"e": 9646,
"s": 9606,
"text": "Scikit TDA| SciPy 2019 | Nathaniel Saul"
},
{
"code": null,
"e": 9719,
"s": 9646,
"text": "“Topological Data Analysis for the Working Data Scientist” — Anthony Bak"
},
{
"code": null,
"e": 9754,
"s": 9719,
"text": "TDA Mapper Part 2, Isabel K. Darcy"
},
{
"code": null,
"e": 9791,
"s": 9754,
"text": "The mapper algorithm and Reeb graphs"
},
{
"code": null,
"e": 9850,
"s": 9791,
"text": "An Introduction to Mapper, Bala Krishnamoorthy (2/11/2020)"
},
{
"code": null,
"e": 9899,
"s": 9850,
"text": "Sliding Windows And Persistence, Perea and Harer"
},
{
"code": null,
"e": 9917,
"s": 9899,
"text": "Scikit-TDA Github"
},
{
"code": null,
"e": 9942,
"s": 9917,
"text": "Scikit-TDA documentation"
},
{
"code": null,
"e": 10009,
"s": 9942,
"text": "Topological Data Analysis of Financial Time Series, Gidea and Katz"
},
{
"code": null,
"e": 10043,
"s": 10009,
"text": "UMAP, McInnes, Healy and Melville"
},
{
"code": null,
"e": 10125,
"s": 10043,
"text": "Extracting insights from the shape of complex data using topology, Carlsson et al"
},
{
"code": null,
"e": 10246,
"s": 10125,
"text": "Topological Methods for the Analysis of High Dimensional Data Sets and 3D Object Recognition, Carlsson, Memoli and Singh"
},
{
"code": null,
"e": 10279,
"s": 10246,
"text": "KeplerMapper 2.0.0 documentation"
},
{
"code": null,
"e": 10296,
"s": 10279,
"text": "Applied Topology"
},
{
"code": null,
"e": 10398,
"s": 10296,
"text": "An Introduction to a New Text Classification and Visualization for NLP Using TDA, Elyasi and Moghadam"
}
]
|
Simplified Fractions in C++ | Suppose we have an integer n, we have to find a list of all simplified fractions between 0 and 1 (exclusive) such that the denominator <= n. Here the fractions can be in any order.
So, if the input is like n = 4, then the output will be ["1/2","1/3","1/4","2/3","3/4"] as "2/4" is not a simplified fraction because it can be simplified to "1/2".
To solve this, we will follow these steps −
Define an array ret
Define an array ret
for initialize i := 2, when i <= n, update (increase i by 1), do −for initialize j := 1, when j < i, update (increase j by 1), do −c := gcd of i and ja := j / cb := i / cinsert (a as string concatenate "/" concatenate b as string) at the end of ret
for initialize i := 2, when i <= n, update (increase i by 1), do −
for initialize j := 1, when j < i, update (increase j by 1), do −c := gcd of i and ja := j / cb := i / cinsert (a as string concatenate "/" concatenate b as string) at the end of ret
for initialize j := 1, when j < i, update (increase j by 1), do −
c := gcd of i and j
c := gcd of i and j
a := j / c
a := j / c
b := i / c
b := i / c
insert (a as string concatenate "/" concatenate b as string) at the end of ret
insert (a as string concatenate "/" concatenate b as string) at the end of ret
return an array of all unique elements present in ret
return an array of all unique elements present in ret
Let us see the following implementation to get a better understanding −
Live Demo
#include <bits/stdc++.h>
using namespace std;
void print_vector(vector<string> v){
cout << "[";
for(int i = 0; i<v.size(); i++){
cout << v[i] << ", ";
}
cout << "]"<<endl;
}
class Solution {
public:
vector<string> simplifiedFractions(int n) {
vector<string> ret;
for (int i = 2; i <= n; i++) {
for (int j = 1; j < i; j++) {
int c = __gcd(i, j);
int a = j / c;
int b = i / c;
ret.push_back(to_string(a) + "/" + to_string(b));
}
}
set<string> s(ret.begin(), ret.end());
return vector<string>(s.begin(), s.end());
}
};
main(){
Solution ob;
print_vector(ob.simplifiedFractions(4));
}
4
[1/2, 1/3, 1/4, 2/3, 3/4, ] | [
{
"code": null,
"e": 1243,
"s": 1062,
"text": "Suppose we have an integer n, we have to find a list of all simplified fractions between 0 and 1 (exclusive) such that the denominator <= n. Here the fractions can be in any order."
},
{
"code": null,
"e": 1408,
"s": 1243,
"text": "So, if the input is like n = 4, then the output will be [\"1/2\",\"1/3\",\"1/4\",\"2/3\",\"3/4\"] as \"2/4\" is not a simplified fraction because it can be simplified to \"1/2\"."
},
{
"code": null,
"e": 1452,
"s": 1408,
"text": "To solve this, we will follow these steps −"
},
{
"code": null,
"e": 1472,
"s": 1452,
"text": "Define an array ret"
},
{
"code": null,
"e": 1492,
"s": 1472,
"text": "Define an array ret"
},
{
"code": null,
"e": 1741,
"s": 1492,
"text": "for initialize i := 2, when i <= n, update (increase i by 1), do −for initialize j := 1, when j < i, update (increase j by 1), do −c := gcd of i and ja := j / cb := i / cinsert (a as string concatenate \"/\" concatenate b as string) at the end of ret"
},
{
"code": null,
"e": 1808,
"s": 1741,
"text": "for initialize i := 2, when i <= n, update (increase i by 1), do −"
},
{
"code": null,
"e": 1991,
"s": 1808,
"text": "for initialize j := 1, when j < i, update (increase j by 1), do −c := gcd of i and ja := j / cb := i / cinsert (a as string concatenate \"/\" concatenate b as string) at the end of ret"
},
{
"code": null,
"e": 2057,
"s": 1991,
"text": "for initialize j := 1, when j < i, update (increase j by 1), do −"
},
{
"code": null,
"e": 2077,
"s": 2057,
"text": "c := gcd of i and j"
},
{
"code": null,
"e": 2097,
"s": 2077,
"text": "c := gcd of i and j"
},
{
"code": null,
"e": 2108,
"s": 2097,
"text": "a := j / c"
},
{
"code": null,
"e": 2119,
"s": 2108,
"text": "a := j / c"
},
{
"code": null,
"e": 2130,
"s": 2119,
"text": "b := i / c"
},
{
"code": null,
"e": 2141,
"s": 2130,
"text": "b := i / c"
},
{
"code": null,
"e": 2220,
"s": 2141,
"text": "insert (a as string concatenate \"/\" concatenate b as string) at the end of ret"
},
{
"code": null,
"e": 2299,
"s": 2220,
"text": "insert (a as string concatenate \"/\" concatenate b as string) at the end of ret"
},
{
"code": null,
"e": 2353,
"s": 2299,
"text": "return an array of all unique elements present in ret"
},
{
"code": null,
"e": 2407,
"s": 2353,
"text": "return an array of all unique elements present in ret"
},
{
"code": null,
"e": 2479,
"s": 2407,
"text": "Let us see the following implementation to get a better understanding −"
},
{
"code": null,
"e": 2490,
"s": 2479,
"text": " Live Demo"
},
{
"code": null,
"e": 3196,
"s": 2490,
"text": "#include <bits/stdc++.h>\nusing namespace std;\nvoid print_vector(vector<string> v){\n cout << \"[\";\n for(int i = 0; i<v.size(); i++){\n cout << v[i] << \", \";\n }\n cout << \"]\"<<endl;\n}\nclass Solution {\npublic:\n vector<string> simplifiedFractions(int n) {\n vector<string> ret;\n for (int i = 2; i <= n; i++) {\n for (int j = 1; j < i; j++) {\n int c = __gcd(i, j);\n int a = j / c;\n int b = i / c;\n ret.push_back(to_string(a) + \"/\" + to_string(b));\n }\n }\n set<string> s(ret.begin(), ret.end());\n return vector<string>(s.begin(), s.end());\n }\n};\nmain(){\n Solution ob;\n print_vector(ob.simplifiedFractions(4));\n}"
},
{
"code": null,
"e": 3198,
"s": 3196,
"text": "4"
},
{
"code": null,
"e": 3226,
"s": 3198,
"text": "[1/2, 1/3, 1/4, 2/3, 3/4, ]"
}
]
|
Insert Interval in C++ | Suppose we have a set of non-overlapping intervals; we have to insert a new interval into the intervals. We can merge if necessary. So if the input is like − [[1,4],[6,9]], and new interval is [2,5], then the output will be [[1,5],[6,9]].
To solve this, we will follow these steps −
Insert new interval at the end of the previous interval list
Insert new interval at the end of the previous interval list
sort the interval list based on the initial time of the intervals, n := number of intervals
sort the interval list based on the initial time of the intervals, n := number of intervals
create one array called ans, insert first interval into ans
create one array called ans, insert first interval into ans
index := 1
index := 1
while index < n,last := size of ans – 1if max of ans[last, 0] and ans[last, 1] < min of intervals[index, 0], intervals[index, 1], then insert intervals[index] into ansotherwiseset ans[last, 0] := min of ans [last, 0], intervals[index, 0]set ans[last, 1] := min of ans [last, 1], intervals[index, 1]increase index by 1
while index < n,
last := size of ans – 1
last := size of ans – 1
if max of ans[last, 0] and ans[last, 1] < min of intervals[index, 0], intervals[index, 1], then insert intervals[index] into ans
if max of ans[last, 0] and ans[last, 1] < min of intervals[index, 0], intervals[index, 1], then insert intervals[index] into ans
otherwiseset ans[last, 0] := min of ans [last, 0], intervals[index, 0]set ans[last, 1] := min of ans [last, 1], intervals[index, 1]
otherwise
set ans[last, 0] := min of ans [last, 0], intervals[index, 0]
set ans[last, 0] := min of ans [last, 0], intervals[index, 0]
set ans[last, 1] := min of ans [last, 1], intervals[index, 1]
set ans[last, 1] := min of ans [last, 1], intervals[index, 1]
increase index by 1
increase index by 1
return ans
return ans
Let us see the following implementation to get a better understanding −
Live Demo
#include <bits/stdc++.h>
using namespace std;
void print_vector(vector<auto> v){
cout << "[";
for(int i = 0; i<v.size(); i++){
cout << v[i] << ", ";
}
cout << "]"<<endl;
}
void print_vector(vector<vector<auto> > v){
cout << "[";
for(int i = 0; i<v.size(); i++){
cout << "[";
for(int j = 0; j <v[i].size(); j++){
cout << v[i][j] << ", ";
}
cout << "],";
}
cout << "]"<<endl;
}
class Solution {
public:
static bool cmp(vector <int> a, vector <int> b){
return a[0]<b[0];
}
vector<vector <int>>insert(vector<vector <int> >& intervals, vector <int>& newInterval) {
intervals.push_back(newInterval);
sort(intervals.begin(),intervals.end(),cmp);
int n = intervals.size();
vector <vector <int>> ans;
ans.push_back(intervals[0]);
int index = 1;
bool done = false;
while(index<n){
int last = ans.size()-1;
if(max(ans[last][0],ans[last][1])<min(intervals[index][0],intervals[i ndex][1])){
ans.push_back(intervals[index]);
} else {
ans[last][0] = min(ans[last][0],intervals[index][0]);
ans[last][1] = max(ans[last][1],intervals[index][1]);
}
index++;
}
return ans;
}
};
main(){
vector<vector<int>> v = {{1,4},{6,9}};
vector<int> v1 = {2,5};
Solution ob;
print_vector(ob.insert(v, v1));
}
[[1,4],[6,9]]
[2,5]
[[1, 5, ],[6, 9, ],] | [
{
"code": null,
"e": 1301,
"s": 1062,
"text": "Suppose we have a set of non-overlapping intervals; we have to insert a new interval into the intervals. We can merge if necessary. So if the input is like − [[1,4],[6,9]], and new interval is [2,5], then the output will be [[1,5],[6,9]]."
},
{
"code": null,
"e": 1345,
"s": 1301,
"text": "To solve this, we will follow these steps −"
},
{
"code": null,
"e": 1406,
"s": 1345,
"text": "Insert new interval at the end of the previous interval list"
},
{
"code": null,
"e": 1467,
"s": 1406,
"text": "Insert new interval at the end of the previous interval list"
},
{
"code": null,
"e": 1559,
"s": 1467,
"text": "sort the interval list based on the initial time of the intervals, n := number of intervals"
},
{
"code": null,
"e": 1651,
"s": 1559,
"text": "sort the interval list based on the initial time of the intervals, n := number of intervals"
},
{
"code": null,
"e": 1711,
"s": 1651,
"text": "create one array called ans, insert first interval into ans"
},
{
"code": null,
"e": 1771,
"s": 1711,
"text": "create one array called ans, insert first interval into ans"
},
{
"code": null,
"e": 1782,
"s": 1771,
"text": "index := 1"
},
{
"code": null,
"e": 1793,
"s": 1782,
"text": "index := 1"
},
{
"code": null,
"e": 2111,
"s": 1793,
"text": "while index < n,last := size of ans – 1if max of ans[last, 0] and ans[last, 1] < min of intervals[index, 0], intervals[index, 1], then insert intervals[index] into ansotherwiseset ans[last, 0] := min of ans [last, 0], intervals[index, 0]set ans[last, 1] := min of ans [last, 1], intervals[index, 1]increase index by 1"
},
{
"code": null,
"e": 2128,
"s": 2111,
"text": "while index < n,"
},
{
"code": null,
"e": 2152,
"s": 2128,
"text": "last := size of ans – 1"
},
{
"code": null,
"e": 2176,
"s": 2152,
"text": "last := size of ans – 1"
},
{
"code": null,
"e": 2305,
"s": 2176,
"text": "if max of ans[last, 0] and ans[last, 1] < min of intervals[index, 0], intervals[index, 1], then insert intervals[index] into ans"
},
{
"code": null,
"e": 2434,
"s": 2305,
"text": "if max of ans[last, 0] and ans[last, 1] < min of intervals[index, 0], intervals[index, 1], then insert intervals[index] into ans"
},
{
"code": null,
"e": 2566,
"s": 2434,
"text": "otherwiseset ans[last, 0] := min of ans [last, 0], intervals[index, 0]set ans[last, 1] := min of ans [last, 1], intervals[index, 1]"
},
{
"code": null,
"e": 2576,
"s": 2566,
"text": "otherwise"
},
{
"code": null,
"e": 2638,
"s": 2576,
"text": "set ans[last, 0] := min of ans [last, 0], intervals[index, 0]"
},
{
"code": null,
"e": 2700,
"s": 2638,
"text": "set ans[last, 0] := min of ans [last, 0], intervals[index, 0]"
},
{
"code": null,
"e": 2762,
"s": 2700,
"text": "set ans[last, 1] := min of ans [last, 1], intervals[index, 1]"
},
{
"code": null,
"e": 2824,
"s": 2762,
"text": "set ans[last, 1] := min of ans [last, 1], intervals[index, 1]"
},
{
"code": null,
"e": 2844,
"s": 2824,
"text": "increase index by 1"
},
{
"code": null,
"e": 2864,
"s": 2844,
"text": "increase index by 1"
},
{
"code": null,
"e": 2875,
"s": 2864,
"text": "return ans"
},
{
"code": null,
"e": 2886,
"s": 2875,
"text": "return ans"
},
{
"code": null,
"e": 2958,
"s": 2886,
"text": "Let us see the following implementation to get a better understanding −"
},
{
"code": null,
"e": 2969,
"s": 2958,
"text": " Live Demo"
},
{
"code": null,
"e": 4379,
"s": 2969,
"text": "#include <bits/stdc++.h>\nusing namespace std;\nvoid print_vector(vector<auto> v){\n cout << \"[\";\n for(int i = 0; i<v.size(); i++){\n cout << v[i] << \", \";\n }\n cout << \"]\"<<endl;\n}\nvoid print_vector(vector<vector<auto> > v){\n cout << \"[\";\n for(int i = 0; i<v.size(); i++){\n cout << \"[\";\n for(int j = 0; j <v[i].size(); j++){\n cout << v[i][j] << \", \";\n }\n cout << \"],\";\n }\n cout << \"]\"<<endl;\n}\nclass Solution {\npublic:\n static bool cmp(vector <int> a, vector <int> b){\n return a[0]<b[0];\n }\n vector<vector <int>>insert(vector<vector <int> >& intervals, vector <int>& newInterval) {\n intervals.push_back(newInterval);\n sort(intervals.begin(),intervals.end(),cmp);\n int n = intervals.size();\n vector <vector <int>> ans;\n ans.push_back(intervals[0]);\n int index = 1;\n bool done = false;\n while(index<n){\n int last = ans.size()-1;\n if(max(ans[last][0],ans[last][1])<min(intervals[index][0],intervals[i ndex][1])){\n ans.push_back(intervals[index]);\n } else {\n ans[last][0] = min(ans[last][0],intervals[index][0]);\n ans[last][1] = max(ans[last][1],intervals[index][1]);\n }\n index++;\n }\n return ans;\n }\n};\nmain(){\n vector<vector<int>> v = {{1,4},{6,9}};\n vector<int> v1 = {2,5};\n Solution ob;\n print_vector(ob.insert(v, v1));\n}"
},
{
"code": null,
"e": 4399,
"s": 4379,
"text": "[[1,4],[6,9]]\n[2,5]"
},
{
"code": null,
"e": 4420,
"s": 4399,
"text": "[[1, 5, ],[6, 9, ],]"
}
]
|
Scale deep learning with 2 new libraries for distributed training on Amazon SageMaker | by Emily Webber | Towards Data Science | By Olivier Cruchant, Emily Webber, and the AWS team
The clouds of Hawaii. Beautiful reminders of peace, serenity, and potentially experiences awaiting us as we exit the global pandemic of 2020 and move back into a world allowing global travel. But until then, why not spend some time learning about how Amazon SageMaker’s new data parallelism and model parallelism libraries can dramatically improve our model performance on distributed training for deep learning! As a machine learning practitioner, you might find yourself in the following situations. You might have found just the perfect state of the art transformer-based model, only to find that when you try to fine-tune it you run into memory issues. You might have just added billions of parameters to your model, which should improve your model performance, but this too only gets you an out of memory issue. You might be able to comfortably fit a model on a single GPU, but are struggling to take advantage of all your GPU’s and find that your model still takes days to train. Should you just accept the status quo and limit your applications to models that fit within the existing hardware capacity or that train within an acceptable time? Of course not! Enter model parallelism and data parallelism on Amazon SageMaker. While customers have always been able to take advantage of SageMaker’s built-in capabilities for distributed training, such as multi-node jobs, the parameter server and support for open source frameworks, we are excited to announce two new toolkits, model parallelism and data parallelism, that allow ML developers to scale distributed training by fitting larger models into RAM and speeding up their training jobs. In this post we first describe the theory behind those two techniques and then explain how to use them in Amazon SageMaker. Right off the bat we’re seeing impressive results for PyTorch and TensorFlow. Using SageMaker model parallelism, we were able to train models much bigger than a single GPU’s memory. Using SageMaker data parallelism, we witnessed throughput improvements vs PyTorch DistributedDataParallel between 15% and 41% on BERT-Large training (cluster size 16–64 GPUs), and between 4% and 13% on MaskRCNN training (cluster size 16–64 GPUs).
This means two things. First, you can train bigger models. Second, you can do this faster than ever. Part of these improvements come from our taking full advantage of the AWS network infrastructure and EC2 instance topology. We’re introducing these to developers in less than 10 lines of code.
In this post, we’ll explain how to use SageMaker distributed training toolkits with PyTorch, and the rationale is similar with TensorFlow. You can step through our examples on GitHub right here. If you like, you can dive into the full training guides right here for TensorFlow and PyTorch.
If you’re unfamiliar with mini-batch stochastic gradient descent (SGD), we’ll quickly recap that here, but you’d be better off getting caught up on the nuts and bolts of it elsewhere such as the Optimization chapter (https://d2l.ai/chapter_optimization/index.html) of Dive Into Deep Learning (D2L.ai) co-authored by Amazon scientists Zhang, Lipton, Li and Smola. Simply put, we train neural networks by updating the parameters of the model in a direction that reduces its error. These changes happen iteratively, conducted on equally-sized sub-samples of the training dataset called mini-batches. We get predictions from the model per record for each of the mini-batches, measure the error, compute the partial derivative of the loss function with respect to the model inputs, and update the parameters. One pass over the training dataset is called an epoch, and full model trainings commonly consist of dozens to hundreds of epochs. As it turns out, mini-batch SGD by itself works really nicely in a parallel world. Passing the records of a single mini-batch over different computing devices is called data parallel distributed training. Data-parallel training scales the mini-batch size and allows even faster mini-batch processing. However, data parallel training comes with the extra complexity of having to compute the mini-batch gradient average with gradients coming from all the workers and communicating it to all the workers, a step called AllReduce. The problem with generic AllReduce algorithms is that they tend to slow down the training process as datasets grow. This can dramatically penalize training time if not properly implemented.
Using a smart parameter-server-based AllReduce, SageMaker data parallelism (SDP) trains faster than PyTorch DistributedDataParallel and Horovod in a number of specific NLP and vision tasks while being compatible with their popular APIs. SDP communicates parameters via equally-sized Balanced Fusion Buffers (BFB), which contribute to a better use of available bandwidth than pure parameter-based communication. SDP requires minor edits to be used on existing compatible code, and you’ll get faster distributed jobs as a result. Using SageMaker data parallelism in PyTorch At launch SDP supports both PyTorch and TensorFlow, but here we’ll step through a PyTorch example. It’s actually pretty accessible! First you’ll import the SDK into your script. Then you’re using the SDK to tell your data loader which node it’s on, and the rank of that node. You’ll load the model itself into the SDP framework. Finally, you’ll also want to call out in the estimator for your job that you are going to use this method of distribution.
Import and initialize the SDP client in your scriptWrap your model with SDP’s DistributedDataParallel methodUse SDP’s methods to manage which nodes in your cluster are handling certain elements:Which chunk of data is used by each worker — this can be controlled with torch.utils.data.distributed.DistributedSamplerWhich worker saves the model — most of the time we’ll just save this on the 0th node.If need be, scale the batch size and adapt hyperparameters to fully occupy the training cluster while maintaining convergence.
Import and initialize the SDP client in your script
Wrap your model with SDP’s DistributedDataParallel method
Use SDP’s methods to manage which nodes in your cluster are handling certain elements:
Which chunk of data is used by each worker — this can be controlled with torch.utils.data.distributed.DistributedSampler
Which worker saves the model — most of the time we’ll just save this on the 0th node.
If need be, scale the batch size and adapt hyperparameters to fully occupy the training cluster while maintaining convergence.
The below snippet of code summarizes edits in a model-agnostic template. For simplicity we’ll just focus on the main training function itself.
# SDP: Import SDP PyTorch API, DDP and initializeimport smdistributed.dataparallel.torch.distributed as distfrom smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDPdist.init_process_group()def main(): # Prepare dataset train_dataset = torchvision.datasets.MNIST(...) # SDP: Set num_replicas and rank in DistributedSampler train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank()) train_loader = torch.utils.data.DataLoader(train_sampler=..., ...) # SDP: Wrap the PyTorch model with SDP’s DDP model = DDP(Net().to(device)) # SDP: Pin each GPU to a single SDP process. torch.cuda.set_device(local_rank) model.cuda(local_rank) # Train optimizer = optim.Adadelta(...) # use your own optimizer and hyperparameters scheduler = StepLR(...) for epoch in range(1, args.epochs + 1): train(...) if rank == 0: test(...) scheduler.step() # SDP: Save model on master node. if dist.get_rank() == 0: torch.save(...)
In order to launch the distributed training in SageMaker, you can execute the job using the SageMaker Python SDK:
from sagemaker.pytorch import PyTorchjob = PyTorch(entry_point="my_sdp_script.py", role=role, # IAM role for the training cluster framework_version='1.6.0', py_version="py36", train_instance_count=2, train_instance_type="ml.p3.16xlarge", distributions={"smdistributed": {"dataparallel": {"enabled": True}}} # SDP distribution method enabled here)job.fit({"mychannel":"s3://bucket/path/to/training/data"})# In this case the s3 data will by copied locally at opt/ml/input/data/mychannel
Now that we’ve learned how to use SDP, let’s check out SageMaker model parallelism (SMP)!
Current data parallelism tools such as PyTorch DistributedDataParallel and Uber’s Horovod still require developers to fit at least the model and a single record within a computing worker. These current methods of distribution block innovation within deep learning, because recent results have shown that larger models tend to be associated with more accurate results. When training very large models — such as large transformers in NLP or segmentation models over high-resolution images — this may not be feasible. An alternative way to fraction the workload to accommodate models is to partition the model over multiple computing devices. This is called model parallel distributed training. The primary benefit of model parallel training is the ability to train models much larger than the memory of a single GPU. Model parallel distributed training has lighter device-to-device communication requirements and uses memory more frugally; it does not create one model copy per worker like with data parallelism.
Historically, implementing model parallel training on your own has been prohibitively difficult. Even for experts in deep learning, naive implementations are filled with custom code to manage the distributed computation and inter-worker communication. These end up using GPU unevenly, or even fail to truly parallelize the computation.
SageMaker model parallelism (SMP) handles model parallelism for you in a few lines of code. It includes a model graph partitioning algorithm that optimizes for speed or memory, and supports manual partitioning. It trains in a pipelined fashion over micro-batches to maximize GPU usage even under the model-parallel design. With SMP, we were able to fit an 20-record batch of BERT-MEGA on the 8 V100 GPUs of a single p3dn.24xlarge instance, and we were able to fit a 16-record batch of GPT-2 XL on the 4 V100 GPUs of a single p3.8xlarge instance, two achievements difficult to complete without model parallelism.
In order to distribute your PyTorch model training code with SMP, you’ll need to do the following steps. Most of these are similar to the steps for the data parallel toolkit above, but we’re going to add decorators over train and test steps in the model script itself.
Import and initialize the SMP client in your scriptWrap your model with SMP’s DistributedModel methodPut the forward and backward logic in a function and decorate it with .smp.stepReplace torch.Tensor.backward/torch.autograd.backward with DistributedModelObj.backward.Post-processing on the outputs across microbatches using StepOutput methods such as reduce_mean.You also need to write or edit model-agnostic distribution management code if necessary, for example which worker saves the model, and any changes to your batch size and hyperparameters to fully occupy the training cluster while maintaining convergence.
Import and initialize the SMP client in your script
Wrap your model with SMP’s DistributedModel method
Put the forward and backward logic in a function and decorate it with .smp.step
Replace torch.Tensor.backward/torch.autograd.backward with DistributedModelObj.backward.
Post-processing on the outputs across microbatches using StepOutput methods such as reduce_mean.
You also need to write or edit model-agnostic distribution management code if necessary, for example which worker saves the model, and any changes to your batch size and hyperparameters to fully occupy the training cluster while maintaining convergence.
The below snippet of code summarizes typical necessary edits in a fictional PyTorch training script:
# SMP: Import and initialize SMP API.import smdistributed.modelparallel.torch as smpsmp.init()model = load_my_model()# Download dataset and create dataloader.train_loader = load_my_data()# SMP: Instantiate DistributedModel object using the model.# This handles distributing the model among multiple ranks# behind the scenes# If horovod is enabled this will do an overlapping_all_reduce by# default.model = smp.DistributedModel(model)# SMP: Define the smp.step consisting of forward and backward [email protected]()def forward_backward(model, inputs, labels): loss = my_loss_function(model(inputs), labels) # SMP: Call backward on the model instead of the output tensor. # If loss is not a scalar or a 0d tensor, the backward call requires # out grad tensors in addition to the output tensors, # similar to torch.autograd.backward call. model.backward(loss) return lossfor input, label in train_loader: image, label = image.to(device), label.to(device) optimizer.zero_grad() loss_mb = forward_backward(model, inputs, labels) # SMP: Average the loss across microbatches. loss = loss_mb.reduce_mean() optimizer.step() # SMP: Print the loss only at rank 0. if smp.rank() == 0: print(f"Loss: {loss}")
In order to launch the distributed execution of this training script, you can use the high-level SageMaker SDK:
from sagemaker.pytorch.estimator import PyTorchmpi_options = { "enabled": True, "processes_per_host": 4, "custom_mpi_options":"--mca btl_vader_single_copy_mechanism none " } dist_options = { "modelparallel":{ "enabled": True, "parameters": { "partitions": 4, # we'll partition the model among the 4 GPUs "microbatches": 8, # Mini-batchs are split in micro-batch to increase parallelism "optimize": "memory" # The automatic model partitioning can optimize speed or memory } }}job = PyTorch( entry_point="my_script.py", role=role, # IAM role for the training cluster instance_type="ml.p3.8xlarge", instance_count=1, framework_version=1.6, py_version="py36", distribution={"mpi": mpi_options, "smdistributed": dist_options})job.fit() # Can optionally use fit(wait=False) for asynchronous execution.
We enable MPI because SMP uses MPI as a communication library under the hood. The number of processes_per_host must be set up to the number of GPUs available in your chosen instance type. Finally, the custom MPI flag "--mca btl_vader_single_copy_mechanism none" helps suppress certain warnings that show up when MPI is used with SageMaker containers, and keeps your training logs clean.
Distributed training means you can train on bigger batches and bigger models. One thing to keep in mind is that increases in batch sizes generally impact SGD convergence; you may need to adjust the learning rate or other hyperparameters to maintain convergence quality.
SageMaker data parallelism improves the development experience and the performance of multi-machine data parallel training — this means you’re not going to see those speed improvements unless you’re truly using multiple instances in your job.
At launch we expect customers who need to use both methods of parallelism for the same jobs to use open source data distribution methods while
You can cruise over to the toolkit documentation for a deeper dive. Don’t hesitate to take a look, and let us know what you build!
All images by the authors | [
{
"code": null,
"e": 223,
"s": 171,
"text": "By Olivier Cruchant, Emily Webber, and the AWS team"
},
{
"code": null,
"e": 2426,
"s": 223,
"text": "The clouds of Hawaii. Beautiful reminders of peace, serenity, and potentially experiences awaiting us as we exit the global pandemic of 2020 and move back into a world allowing global travel. But until then, why not spend some time learning about how Amazon SageMaker’s new data parallelism and model parallelism libraries can dramatically improve our model performance on distributed training for deep learning! As a machine learning practitioner, you might find yourself in the following situations. You might have found just the perfect state of the art transformer-based model, only to find that when you try to fine-tune it you run into memory issues. You might have just added billions of parameters to your model, which should improve your model performance, but this too only gets you an out of memory issue. You might be able to comfortably fit a model on a single GPU, but are struggling to take advantage of all your GPU’s and find that your model still takes days to train. Should you just accept the status quo and limit your applications to models that fit within the existing hardware capacity or that train within an acceptable time? Of course not! Enter model parallelism and data parallelism on Amazon SageMaker. While customers have always been able to take advantage of SageMaker’s built-in capabilities for distributed training, such as multi-node jobs, the parameter server and support for open source frameworks, we are excited to announce two new toolkits, model parallelism and data parallelism, that allow ML developers to scale distributed training by fitting larger models into RAM and speeding up their training jobs. In this post we first describe the theory behind those two techniques and then explain how to use them in Amazon SageMaker. Right off the bat we’re seeing impressive results for PyTorch and TensorFlow. Using SageMaker model parallelism, we were able to train models much bigger than a single GPU’s memory. Using SageMaker data parallelism, we witnessed throughput improvements vs PyTorch DistributedDataParallel between 15% and 41% on BERT-Large training (cluster size 16–64 GPUs), and between 4% and 13% on MaskRCNN training (cluster size 16–64 GPUs)."
},
{
"code": null,
"e": 2720,
"s": 2426,
"text": "This means two things. First, you can train bigger models. Second, you can do this faster than ever. Part of these improvements come from our taking full advantage of the AWS network infrastructure and EC2 instance topology. We’re introducing these to developers in less than 10 lines of code."
},
{
"code": null,
"e": 3010,
"s": 2720,
"text": "In this post, we’ll explain how to use SageMaker distributed training toolkits with PyTorch, and the rationale is similar with TensorFlow. You can step through our examples on GitHub right here. If you like, you can dive into the full training guides right here for TensorFlow and PyTorch."
},
{
"code": null,
"e": 4667,
"s": 3010,
"text": "If you’re unfamiliar with mini-batch stochastic gradient descent (SGD), we’ll quickly recap that here, but you’d be better off getting caught up on the nuts and bolts of it elsewhere such as the Optimization chapter (https://d2l.ai/chapter_optimization/index.html) of Dive Into Deep Learning (D2L.ai) co-authored by Amazon scientists Zhang, Lipton, Li and Smola. Simply put, we train neural networks by updating the parameters of the model in a direction that reduces its error. These changes happen iteratively, conducted on equally-sized sub-samples of the training dataset called mini-batches. We get predictions from the model per record for each of the mini-batches, measure the error, compute the partial derivative of the loss function with respect to the model inputs, and update the parameters. One pass over the training dataset is called an epoch, and full model trainings commonly consist of dozens to hundreds of epochs. As it turns out, mini-batch SGD by itself works really nicely in a parallel world. Passing the records of a single mini-batch over different computing devices is called data parallel distributed training. Data-parallel training scales the mini-batch size and allows even faster mini-batch processing. However, data parallel training comes with the extra complexity of having to compute the mini-batch gradient average with gradients coming from all the workers and communicating it to all the workers, a step called AllReduce. The problem with generic AllReduce algorithms is that they tend to slow down the training process as datasets grow. This can dramatically penalize training time if not properly implemented."
},
{
"code": null,
"e": 5692,
"s": 4667,
"text": "Using a smart parameter-server-based AllReduce, SageMaker data parallelism (SDP) trains faster than PyTorch DistributedDataParallel and Horovod in a number of specific NLP and vision tasks while being compatible with their popular APIs. SDP communicates parameters via equally-sized Balanced Fusion Buffers (BFB), which contribute to a better use of available bandwidth than pure parameter-based communication. SDP requires minor edits to be used on existing compatible code, and you’ll get faster distributed jobs as a result. Using SageMaker data parallelism in PyTorch At launch SDP supports both PyTorch and TensorFlow, but here we’ll step through a PyTorch example. It’s actually pretty accessible! First you’ll import the SDK into your script. Then you’re using the SDK to tell your data loader which node it’s on, and the rank of that node. You’ll load the model itself into the SDP framework. Finally, you’ll also want to call out in the estimator for your job that you are going to use this method of distribution."
},
{
"code": null,
"e": 6218,
"s": 5692,
"text": "Import and initialize the SDP client in your scriptWrap your model with SDP’s DistributedDataParallel methodUse SDP’s methods to manage which nodes in your cluster are handling certain elements:Which chunk of data is used by each worker — this can be controlled with torch.utils.data.distributed.DistributedSamplerWhich worker saves the model — most of the time we’ll just save this on the 0th node.If need be, scale the batch size and adapt hyperparameters to fully occupy the training cluster while maintaining convergence."
},
{
"code": null,
"e": 6270,
"s": 6218,
"text": "Import and initialize the SDP client in your script"
},
{
"code": null,
"e": 6328,
"s": 6270,
"text": "Wrap your model with SDP’s DistributedDataParallel method"
},
{
"code": null,
"e": 6415,
"s": 6328,
"text": "Use SDP’s methods to manage which nodes in your cluster are handling certain elements:"
},
{
"code": null,
"e": 6536,
"s": 6415,
"text": "Which chunk of data is used by each worker — this can be controlled with torch.utils.data.distributed.DistributedSampler"
},
{
"code": null,
"e": 6622,
"s": 6536,
"text": "Which worker saves the model — most of the time we’ll just save this on the 0th node."
},
{
"code": null,
"e": 6749,
"s": 6622,
"text": "If need be, scale the batch size and adapt hyperparameters to fully occupy the training cluster while maintaining convergence."
},
{
"code": null,
"e": 6892,
"s": 6749,
"text": "The below snippet of code summarizes edits in a model-agnostic template. For simplicity we’ll just focus on the main training function itself."
},
{
"code": null,
"e": 8049,
"s": 6892,
"text": "# SDP: Import SDP PyTorch API, DDP and initializeimport smdistributed.dataparallel.torch.distributed as distfrom smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDPdist.init_process_group()def main(): # Prepare dataset train_dataset = torchvision.datasets.MNIST(...) # SDP: Set num_replicas and rank in DistributedSampler train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank()) train_loader = torch.utils.data.DataLoader(train_sampler=..., ...) # SDP: Wrap the PyTorch model with SDP’s DDP model = DDP(Net().to(device)) # SDP: Pin each GPU to a single SDP process. torch.cuda.set_device(local_rank) model.cuda(local_rank) # Train optimizer = optim.Adadelta(...) # use your own optimizer and hyperparameters scheduler = StepLR(...) for epoch in range(1, args.epochs + 1): train(...) if rank == 0: test(...) scheduler.step() # SDP: Save model on master node. if dist.get_rank() == 0: torch.save(...)"
},
{
"code": null,
"e": 8163,
"s": 8049,
"text": "In order to launch the distributed training in SageMaker, you can execute the job using the SageMaker Python SDK:"
},
{
"code": null,
"e": 8737,
"s": 8163,
"text": "from sagemaker.pytorch import PyTorchjob = PyTorch(entry_point=\"my_sdp_script.py\", role=role, # IAM role for the training cluster framework_version='1.6.0', py_version=\"py36\", train_instance_count=2, train_instance_type=\"ml.p3.16xlarge\", distributions={\"smdistributed\": {\"dataparallel\": {\"enabled\": True}}} # SDP distribution method enabled here)job.fit({\"mychannel\":\"s3://bucket/path/to/training/data\"})# In this case the s3 data will by copied locally at opt/ml/input/data/mychannel"
},
{
"code": null,
"e": 8827,
"s": 8737,
"text": "Now that we’ve learned how to use SDP, let’s check out SageMaker model parallelism (SMP)!"
},
{
"code": null,
"e": 9841,
"s": 8827,
"text": "Current data parallelism tools such as PyTorch DistributedDataParallel and Uber’s Horovod still require developers to fit at least the model and a single record within a computing worker. These current methods of distribution block innovation within deep learning, because recent results have shown that larger models tend to be associated with more accurate results. When training very large models — such as large transformers in NLP or segmentation models over high-resolution images — this may not be feasible. An alternative way to fraction the workload to accommodate models is to partition the model over multiple computing devices. This is called model parallel distributed training. The primary benefit of model parallel training is the ability to train models much larger than the memory of a single GPU. Model parallel distributed training has lighter device-to-device communication requirements and uses memory more frugally; it does not create one model copy per worker like with data parallelism."
},
{
"code": null,
"e": 10177,
"s": 9841,
"text": "Historically, implementing model parallel training on your own has been prohibitively difficult. Even for experts in deep learning, naive implementations are filled with custom code to manage the distributed computation and inter-worker communication. These end up using GPU unevenly, or even fail to truly parallelize the computation."
},
{
"code": null,
"e": 10789,
"s": 10177,
"text": "SageMaker model parallelism (SMP) handles model parallelism for you in a few lines of code. It includes a model graph partitioning algorithm that optimizes for speed or memory, and supports manual partitioning. It trains in a pipelined fashion over micro-batches to maximize GPU usage even under the model-parallel design. With SMP, we were able to fit an 20-record batch of BERT-MEGA on the 8 V100 GPUs of a single p3dn.24xlarge instance, and we were able to fit a 16-record batch of GPT-2 XL on the 4 V100 GPUs of a single p3.8xlarge instance, two achievements difficult to complete without model parallelism."
},
{
"code": null,
"e": 11058,
"s": 10789,
"text": "In order to distribute your PyTorch model training code with SMP, you’ll need to do the following steps. Most of these are similar to the steps for the data parallel toolkit above, but we’re going to add decorators over train and test steps in the model script itself."
},
{
"code": null,
"e": 11676,
"s": 11058,
"text": "Import and initialize the SMP client in your scriptWrap your model with SMP’s DistributedModel methodPut the forward and backward logic in a function and decorate it with .smp.stepReplace torch.Tensor.backward/torch.autograd.backward with DistributedModelObj.backward.Post-processing on the outputs across microbatches using StepOutput methods such as reduce_mean.You also need to write or edit model-agnostic distribution management code if necessary, for example which worker saves the model, and any changes to your batch size and hyperparameters to fully occupy the training cluster while maintaining convergence."
},
{
"code": null,
"e": 11728,
"s": 11676,
"text": "Import and initialize the SMP client in your script"
},
{
"code": null,
"e": 11779,
"s": 11728,
"text": "Wrap your model with SMP’s DistributedModel method"
},
{
"code": null,
"e": 11859,
"s": 11779,
"text": "Put the forward and backward logic in a function and decorate it with .smp.step"
},
{
"code": null,
"e": 11948,
"s": 11859,
"text": "Replace torch.Tensor.backward/torch.autograd.backward with DistributedModelObj.backward."
},
{
"code": null,
"e": 12045,
"s": 11948,
"text": "Post-processing on the outputs across microbatches using StepOutput methods such as reduce_mean."
},
{
"code": null,
"e": 12299,
"s": 12045,
"text": "You also need to write or edit model-agnostic distribution management code if necessary, for example which worker saves the model, and any changes to your batch size and hyperparameters to fully occupy the training cluster while maintaining convergence."
},
{
"code": null,
"e": 12400,
"s": 12299,
"text": "The below snippet of code summarizes typical necessary edits in a fictional PyTorch training script:"
},
{
"code": null,
"e": 13651,
"s": 12400,
"text": "# SMP: Import and initialize SMP API.import smdistributed.modelparallel.torch as smpsmp.init()model = load_my_model()# Download dataset and create dataloader.train_loader = load_my_data()# SMP: Instantiate DistributedModel object using the model.# This handles distributing the model among multiple ranks# behind the scenes# If horovod is enabled this will do an overlapping_all_reduce by# default.model = smp.DistributedModel(model)# SMP: Define the smp.step consisting of forward and backward [email protected]()def forward_backward(model, inputs, labels): loss = my_loss_function(model(inputs), labels) # SMP: Call backward on the model instead of the output tensor. # If loss is not a scalar or a 0d tensor, the backward call requires # out grad tensors in addition to the output tensors, # similar to torch.autograd.backward call. model.backward(loss) return lossfor input, label in train_loader: image, label = image.to(device), label.to(device) optimizer.zero_grad() loss_mb = forward_backward(model, inputs, labels) # SMP: Average the loss across microbatches. loss = loss_mb.reduce_mean() optimizer.step() # SMP: Print the loss only at rank 0. if smp.rank() == 0: print(f\"Loss: {loss}\")"
},
{
"code": null,
"e": 13763,
"s": 13651,
"text": "In order to launch the distributed execution of this training script, you can use the high-level SageMaker SDK:"
},
{
"code": null,
"e": 14649,
"s": 13763,
"text": "from sagemaker.pytorch.estimator import PyTorchmpi_options = { \"enabled\": True, \"processes_per_host\": 4, \"custom_mpi_options\":\"--mca btl_vader_single_copy_mechanism none \" } dist_options = { \"modelparallel\":{ \"enabled\": True, \"parameters\": { \"partitions\": 4, # we'll partition the model among the 4 GPUs \"microbatches\": 8, # Mini-batchs are split in micro-batch to increase parallelism \"optimize\": \"memory\" # The automatic model partitioning can optimize speed or memory } }}job = PyTorch( entry_point=\"my_script.py\", role=role, # IAM role for the training cluster instance_type=\"ml.p3.8xlarge\", instance_count=1, framework_version=1.6, py_version=\"py36\", distribution={\"mpi\": mpi_options, \"smdistributed\": dist_options})job.fit() # Can optionally use fit(wait=False) for asynchronous execution."
},
{
"code": null,
"e": 15036,
"s": 14649,
"text": "We enable MPI because SMP uses MPI as a communication library under the hood. The number of processes_per_host must be set up to the number of GPUs available in your chosen instance type. Finally, the custom MPI flag \"--mca btl_vader_single_copy_mechanism none\" helps suppress certain warnings that show up when MPI is used with SageMaker containers, and keeps your training logs clean."
},
{
"code": null,
"e": 15306,
"s": 15036,
"text": "Distributed training means you can train on bigger batches and bigger models. One thing to keep in mind is that increases in batch sizes generally impact SGD convergence; you may need to adjust the learning rate or other hyperparameters to maintain convergence quality."
},
{
"code": null,
"e": 15549,
"s": 15306,
"text": "SageMaker data parallelism improves the development experience and the performance of multi-machine data parallel training — this means you’re not going to see those speed improvements unless you’re truly using multiple instances in your job."
},
{
"code": null,
"e": 15692,
"s": 15549,
"text": "At launch we expect customers who need to use both methods of parallelism for the same jobs to use open source data distribution methods while"
},
{
"code": null,
"e": 15823,
"s": 15692,
"text": "You can cruise over to the toolkit documentation for a deeper dive. Don’t hesitate to take a look, and let us know what you build!"
}
]
|
How to create your own Docker Customised Image ? - GeeksforGeeks | 01 Jul, 2021
Docker is an open-source container management service and one of the most popular tools of DevOps which is being popular among the deployment team. Docker is mostly used in Agile-based projects which require continuous delivery of the software. The founder, Chief Technical Officer, and Chief Architect Officer of the Docker Open source project is Soloman Hykes. It was launched in 2013 by Dotcloud, since then it is the world’s leading software container platform. For more details about the containerization using docker and it’s internal architecture click here.
In this article, we will see how we can create our own customized Docker images and how we can push it to the docker hub profile. It is good practice to push your images to docker hub profile as you don’t have to create it again and you can pull those images in your system as well as in cloud with all your work saved in it.
Creating docker images is not a tedious task. We can create a docker image easily with few commands. There are two ways of creating a docker image depending upon the purpose for which you want to create the image. The first method is using commit command and another method is by using Dockerfile concept. To read more details about the components of docker ie, Docker images and Docker File click here.
Now let’s start creating our own customized docker image using the commit command. Before going with our own docker image we should first set up and configure docker in our operating system. To learn more about how to setup docker you can refer to this article. After successful installation let’s learn some of the docker commands which we will be using.
The first command is pull command. This command will download/pull the complete operating system within seconds depending upon your internet connectivity. The syntax is like, docker pull image_name. Here I am pulling alex43/ubuntu-with-git:v1.0 which is my own customized image.
docker pull alex43/ubuntu-with-git:v1.0
The second command is run command which we will use to run the pulled image. This command will launch my image and we will get an interactive shell/terminal of that image. The syntax is like, -it for an interactive terminal, –name to give reference name for my image launched and then my image_name.
docker run -it --name myos alex43/ubuntu-with-git:v1.0
The third command and the most important command for creating our own image is commit command. By using this command we can simply create our own image with the packages which we want from the existing image. The syntax is like, docker commit Nameof_RunningImage your_own_name:tag.
docker commit myos ubuntu-basicbundle:v1.0
The fourth command is tag command. By using this command we need to rename our image with syntax username/image-name:tag. Before executing this command you need to create an account on the Docker hub and you have to give the same username which you have given in the docker hub profile.
docker tag alex43/ubuntu-with-git:v1.0 alex43/ubuntu-basicbundle:v1.0
The fifth command is login command. By using this command we will logged in to the docker hub account through our terminal and it is required to upload our docker image to the docker hub profile.
docker login --username alex43 --password your_passwd
The fifth command is push command. By using this command we can upload our own created docker image to the docker hub profile and can use it anywhere from our local system to cloud by pulling it.
docker push alex43/ubuntu-basicbundle:v1.0
So these were the few commands with the concept which we will be using in this tutorial and I will be uploading one fresh image so that you guys can understand it in a better way.
Now we will create our own image from existing alex43/ubuntu-with-git:v1.0 image and we will customize it with our needs and we will upload it.
Step 1: The very first step is to pull the image as I have shown in the upper commands. Use the command and pull the image into your system. docker pull alex43/ubuntu-with-git:v1.0
Step 2: Launch that image so that we can customize it as per our needs. In the below picture you can see that at 1st we were not having vim editor in our image so it was giving error of command not found. Then I installed vim in our system by using apt-get install vim command.
In the below image you can see that after installing vim editor when I opened abhi.txt with vim editor it didn’t gave any error and I was able to write content in that. You can also see the content using cat command which we already have in alex43/ubuntu-with-git:v1.0 image.
So it was only the step that you need to understand and apply your own concepts. You can create lots of more useful docker images to solve some industry use cases like launching a WordPress blogging site using a docker image, creating your own MySQL database image, etc. It is the basic concept which you need to understand and apply as per your needs.
Step 3: Come out of the image by pressing ctrl+p+q and commit the modified image as I have explained in the above command. Step 4: Create an account at docker hub and change the name of the image with proper syntax as explained above. Step 5: Log in into docker hub profile from terminal using docker login command as explained above. Step 6: The final task is to upload the image using push command to docker hub profile as explained above.
For step-3, step-5, and step-6 you can see the image below. Pushing your image might take some time depending upon the size of the image and the internet speed of your system. In the image, you can see I have used the commands which I explained above and it successfully uploaded my image at my docker hub profile.
So in this way we can create our own docker image and push it to docker hub just by using few commands.
rajeev0719singh
sumitgumber28
Docker Container
How To
Web Technologies
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
How to Install FFmpeg on Windows?
How to Set Git Username and Password in GitBash?
How to Add External JAR File to an IntelliJ IDEA Project?
How to Install Jupyter Notebook on MacOS?
How to Check the OS Version in Linux?
Roadmap to Become a Web Developer in 2022
Installation of Node.js on Linux
How to fetch data from an API in ReactJS ?
Top 10 Projects For Beginners To Practice HTML and CSS Skills
Convert a string to an integer in JavaScript | [
{
"code": null,
"e": 24978,
"s": 24950,
"text": "\n01 Jul, 2021"
},
{
"code": null,
"e": 25545,
"s": 24978,
"text": "Docker is an open-source container management service and one of the most popular tools of DevOps which is being popular among the deployment team. Docker is mostly used in Agile-based projects which require continuous delivery of the software. The founder, Chief Technical Officer, and Chief Architect Officer of the Docker Open source project is Soloman Hykes. It was launched in 2013 by Dotcloud, since then it is the world’s leading software container platform. For more details about the containerization using docker and it’s internal architecture click here. "
},
{
"code": null,
"e": 25872,
"s": 25545,
"text": "In this article, we will see how we can create our own customized Docker images and how we can push it to the docker hub profile. It is good practice to push your images to docker hub profile as you don’t have to create it again and you can pull those images in your system as well as in cloud with all your work saved in it. "
},
{
"code": null,
"e": 26277,
"s": 25872,
"text": "Creating docker images is not a tedious task. We can create a docker image easily with few commands. There are two ways of creating a docker image depending upon the purpose for which you want to create the image. The first method is using commit command and another method is by using Dockerfile concept. To read more details about the components of docker ie, Docker images and Docker File click here. "
},
{
"code": null,
"e": 26634,
"s": 26277,
"text": "Now let’s start creating our own customized docker image using the commit command. Before going with our own docker image we should first set up and configure docker in our operating system. To learn more about how to setup docker you can refer to this article. After successful installation let’s learn some of the docker commands which we will be using. "
},
{
"code": null,
"e": 26914,
"s": 26634,
"text": "The first command is pull command. This command will download/pull the complete operating system within seconds depending upon your internet connectivity. The syntax is like, docker pull image_name. Here I am pulling alex43/ubuntu-with-git:v1.0 which is my own customized image. "
},
{
"code": null,
"e": 26954,
"s": 26914,
"text": "docker pull alex43/ubuntu-with-git:v1.0"
},
{
"code": null,
"e": 27255,
"s": 26954,
"text": "The second command is run command which we will use to run the pulled image. This command will launch my image and we will get an interactive shell/terminal of that image. The syntax is like, -it for an interactive terminal, –name to give reference name for my image launched and then my image_name. "
},
{
"code": null,
"e": 27310,
"s": 27255,
"text": "docker run -it --name myos alex43/ubuntu-with-git:v1.0"
},
{
"code": null,
"e": 27593,
"s": 27310,
"text": "The third command and the most important command for creating our own image is commit command. By using this command we can simply create our own image with the packages which we want from the existing image. The syntax is like, docker commit Nameof_RunningImage your_own_name:tag. "
},
{
"code": null,
"e": 27636,
"s": 27593,
"text": "docker commit myos ubuntu-basicbundle:v1.0"
},
{
"code": null,
"e": 27924,
"s": 27636,
"text": "The fourth command is tag command. By using this command we need to rename our image with syntax username/image-name:tag. Before executing this command you need to create an account on the Docker hub and you have to give the same username which you have given in the docker hub profile. "
},
{
"code": null,
"e": 27994,
"s": 27924,
"text": "docker tag alex43/ubuntu-with-git:v1.0 alex43/ubuntu-basicbundle:v1.0"
},
{
"code": null,
"e": 28192,
"s": 27994,
"text": "The fifth command is login command. By using this command we will logged in to the docker hub account through our terminal and it is required to upload our docker image to the docker hub profile. "
},
{
"code": null,
"e": 28246,
"s": 28192,
"text": "docker login --username alex43 --password your_passwd"
},
{
"code": null,
"e": 28444,
"s": 28246,
"text": "The fifth command is push command. By using this command we can upload our own created docker image to the docker hub profile and can use it anywhere from our local system to cloud by pulling it. "
},
{
"code": null,
"e": 28487,
"s": 28444,
"text": "docker push alex43/ubuntu-basicbundle:v1.0"
},
{
"code": null,
"e": 28669,
"s": 28487,
"text": "So these were the few commands with the concept which we will be using in this tutorial and I will be uploading one fresh image so that you guys can understand it in a better way. "
},
{
"code": null,
"e": 28814,
"s": 28669,
"text": "Now we will create our own image from existing alex43/ubuntu-with-git:v1.0 image and we will customize it with our needs and we will upload it. "
},
{
"code": null,
"e": 28996,
"s": 28814,
"text": "Step 1: The very first step is to pull the image as I have shown in the upper commands. Use the command and pull the image into your system. docker pull alex43/ubuntu-with-git:v1.0 "
},
{
"code": null,
"e": 29275,
"s": 28996,
"text": "Step 2: Launch that image so that we can customize it as per our needs. In the below picture you can see that at 1st we were not having vim editor in our image so it was giving error of command not found. Then I installed vim in our system by using apt-get install vim command. "
},
{
"code": null,
"e": 29553,
"s": 29275,
"text": "In the below image you can see that after installing vim editor when I opened abhi.txt with vim editor it didn’t gave any error and I was able to write content in that. You can also see the content using cat command which we already have in alex43/ubuntu-with-git:v1.0 image. "
},
{
"code": null,
"e": 29907,
"s": 29553,
"text": "So it was only the step that you need to understand and apply your own concepts. You can create lots of more useful docker images to solve some industry use cases like launching a WordPress blogging site using a docker image, creating your own MySQL database image, etc. It is the basic concept which you need to understand and apply as per your needs. "
},
{
"code": null,
"e": 30350,
"s": 29907,
"text": "Step 3: Come out of the image by pressing ctrl+p+q and commit the modified image as I have explained in the above command. Step 4: Create an account at docker hub and change the name of the image with proper syntax as explained above. Step 5: Log in into docker hub profile from terminal using docker login command as explained above. Step 6: The final task is to upload the image using push command to docker hub profile as explained above. "
},
{
"code": null,
"e": 30667,
"s": 30350,
"text": "For step-3, step-5, and step-6 you can see the image below. Pushing your image might take some time depending upon the size of the image and the internet speed of your system. In the image, you can see I have used the commands which I explained above and it successfully uploaded my image at my docker hub profile. "
},
{
"code": null,
"e": 30772,
"s": 30667,
"text": "So in this way we can create our own docker image and push it to docker hub just by using few commands. "
},
{
"code": null,
"e": 30790,
"s": 30774,
"text": "rajeev0719singh"
},
{
"code": null,
"e": 30804,
"s": 30790,
"text": "sumitgumber28"
},
{
"code": null,
"e": 30821,
"s": 30804,
"text": "Docker Container"
},
{
"code": null,
"e": 30828,
"s": 30821,
"text": "How To"
},
{
"code": null,
"e": 30845,
"s": 30828,
"text": "Web Technologies"
},
{
"code": null,
"e": 30943,
"s": 30845,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 30977,
"s": 30943,
"text": "How to Install FFmpeg on Windows?"
},
{
"code": null,
"e": 31026,
"s": 30977,
"text": "How to Set Git Username and Password in GitBash?"
},
{
"code": null,
"e": 31084,
"s": 31026,
"text": "How to Add External JAR File to an IntelliJ IDEA Project?"
},
{
"code": null,
"e": 31126,
"s": 31084,
"text": "How to Install Jupyter Notebook on MacOS?"
},
{
"code": null,
"e": 31164,
"s": 31126,
"text": "How to Check the OS Version in Linux?"
},
{
"code": null,
"e": 31206,
"s": 31164,
"text": "Roadmap to Become a Web Developer in 2022"
},
{
"code": null,
"e": 31239,
"s": 31206,
"text": "Installation of Node.js on Linux"
},
{
"code": null,
"e": 31282,
"s": 31239,
"text": "How to fetch data from an API in ReactJS ?"
},
{
"code": null,
"e": 31344,
"s": 31282,
"text": "Top 10 Projects For Beginners To Practice HTML and CSS Skills"
}
]
|
Java Examples - Comparing performance | How to compare performance of string creation ?
Following example compares the performance of two strings created in two different ways.
public class StringComparePerformance {
public static void main(String[] args) {
long startTime = System.currentTimeMillis();
for(int i = 0; i < 50000; i++) {
String s1 = "hello";
String s2 = "hello";
}
long endTime = System.currentTimeMillis();
System.out.println("Time taken for creation"
+ " of String literals : "+ (endTime - startTime)
+ " milli seconds" );
long startTime1 = System.currentTimeMillis();
for(int i = 0; i < 50000; i++) {
String s3 = new String("hello");
String s4 = new String("hello");
}
long endTime1 = System.currentTimeMillis();
System.out.println("Time taken for creation"
+ " of String objects : " + (endTime1 - startTime1)
+ " milli seconds");
}
}
The above code sample will produce the following result.The result may vary.
Time taken for creation of String literals : 0 milli seconds
Time taken for creation of String objects : 16 milli seconds
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2116,
"s": 2068,
"text": "How to compare performance of string creation ?"
},
{
"code": null,
"e": 2205,
"s": 2116,
"text": "Following example compares the performance of two strings created in two different ways."
},
{
"code": null,
"e": 3050,
"s": 2205,
"text": "public class StringComparePerformance {\n public static void main(String[] args) { \n long startTime = System.currentTimeMillis();\n \n for(int i = 0; i < 50000; i++) {\n String s1 = \"hello\";\n String s2 = \"hello\"; \n }\n long endTime = System.currentTimeMillis();\n System.out.println(\"Time taken for creation\" \n + \" of String literals : \"+ (endTime - startTime) \n + \" milli seconds\" ); \n long startTime1 = System.currentTimeMillis();\n \n for(int i = 0; i < 50000; i++) {\n String s3 = new String(\"hello\");\n String s4 = new String(\"hello\");\n }\n long endTime1 = System.currentTimeMillis();\n System.out.println(\"Time taken for creation\" \n + \" of String objects : \" + (endTime1 - startTime1)\n + \" milli seconds\");\n }\n}"
},
{
"code": null,
"e": 3127,
"s": 3050,
"text": "The above code sample will produce the following result.The result may vary."
},
{
"code": null,
"e": 3250,
"s": 3127,
"text": "Time taken for creation of String literals : 0 milli seconds\nTime taken for creation of String objects : 16 milli seconds\n"
},
{
"code": null,
"e": 3257,
"s": 3250,
"text": " Print"
},
{
"code": null,
"e": 3268,
"s": 3257,
"text": " Add Notes"
}
]
|
How to create a database on command line in MySQL? | First, you need to open the command prompt. You can open using shortcut windows+R key.
The screenshot is as follows −
Now type CMD and press OK button −
Now the following command prompt would be visible −
Now reach the MySQL bin directory. The screenshot is as follows −
Following is the query to run MySQL in the command line to create a database −
Now you can check the database is created or not using SHOW DATABASES command −
mysql> SHOW DATABASES;
This will produce the following output −
+---------------------------+
| Database |
+---------------------------+
| bothinnodbandmyisam |
| business |
| commandline |
| customer_tracker_database |
| customertracker |
| database1 |
| databasesample |
| demo |
| education |
| hb_student_tracker |
| hello |
| information_schema |
| javadatabase2 |
| javasampledatabase |
| login |
| mybusiness |
| mydatabase |
| mysql |
| onetomanyrelationship |
| onlinebookstore |
| performance_schema |
| rdb |
| sample |
| sampledatabase |
| schemasample |
| sys |
| test |
| test3 |
| tracker |
| universitydatabase |
| web |
| webtracker |
+---------------------------+
32 rows in set (0.00 sec) | [
{
"code": null,
"e": 1149,
"s": 1062,
"text": "First, you need to open the command prompt. You can open using shortcut windows+R key."
},
{
"code": null,
"e": 1180,
"s": 1149,
"text": "The screenshot is as follows −"
},
{
"code": null,
"e": 1215,
"s": 1180,
"text": "Now type CMD and press OK button −"
},
{
"code": null,
"e": 1267,
"s": 1215,
"text": "Now the following command prompt would be visible −"
},
{
"code": null,
"e": 1333,
"s": 1267,
"text": "Now reach the MySQL bin directory. The screenshot is as follows −"
},
{
"code": null,
"e": 1412,
"s": 1333,
"text": "Following is the query to run MySQL in the command line to create a database −"
},
{
"code": null,
"e": 1492,
"s": 1412,
"text": "Now you can check the database is created or not using SHOW DATABASES command −"
},
{
"code": null,
"e": 1515,
"s": 1492,
"text": "mysql> SHOW DATABASES;"
},
{
"code": null,
"e": 1556,
"s": 1515,
"text": "This will produce the following output −"
},
{
"code": null,
"e": 2662,
"s": 1556,
"text": "+---------------------------+\n| Database |\n+---------------------------+\n| bothinnodbandmyisam |\n| business |\n| commandline |\n| customer_tracker_database |\n| customertracker |\n| database1 |\n| databasesample |\n| demo |\n| education |\n| hb_student_tracker |\n| hello |\n| information_schema |\n| javadatabase2 |\n| javasampledatabase |\n| login |\n| mybusiness |\n| mydatabase |\n| mysql |\n| onetomanyrelationship |\n| onlinebookstore |\n| performance_schema |\n| rdb |\n| sample |\n| sampledatabase |\n| schemasample |\n| sys |\n| test |\n| test3 |\n| tracker |\n| universitydatabase |\n| web |\n| webtracker |\n+---------------------------+\n32 rows in set (0.00 sec)"
}
]
|
How to shuffle a std::vector in C++ | A vector shuffle can be done in the Fisher-Yates shuffle algorithm.
In this algorithm, a linear scan of a vector is done and then swap each element with a random element among all the remaining element, including the element itself.
Begin
Declare a function show().
Pass a constructor of a vector as a parameter within show() function.
for (auto const& i: input)
Print the value of variable i.
Declare v of vector type.
Initialize some values into v vector in array pattern.
Declare a variable size of the integer datatype.
Call size() function to get the size of the vector.
Initialize size = v.size().
for (int i = 0; i < size - 1; i++)
int j = i + rand() % (size - i).
call swap() function to swap the values of v[i] and v[j].
print “Elements after getting shuffled”.
Call show() function to display the suffled value of v vector.
End.
Live Demo
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
void show(vector<int> const &input) {
for (auto const& i: input) {
std::cout << i << " ";
}
}
int main() {
vector<int> v = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
int size = v.size();
for (int i = 0; i < size - 1; i++) {
int j = i + rand() % (size - i);
swap(v[i], v[j]);
}
cout<<"Elements after getting shuffled"<<endl;
show(v);
return 0;
}
Elements after getting shuffled
2 8 5 3 1 9 4 7 6 | [
{
"code": null,
"e": 1130,
"s": 1062,
"text": "A vector shuffle can be done in the Fisher-Yates shuffle algorithm."
},
{
"code": null,
"e": 1295,
"s": 1130,
"text": "In this algorithm, a linear scan of a vector is done and then swap each element with a random element among all the remaining element, including the element itself."
},
{
"code": null,
"e": 1996,
"s": 1295,
"text": "Begin\n Declare a function show().\n Pass a constructor of a vector as a parameter within show() function.\n for (auto const& i: input)\n Print the value of variable i.\n Declare v of vector type.\n Initialize some values into v vector in array pattern.\n Declare a variable size of the integer datatype.\n Call size() function to get the size of the vector.\n Initialize size = v.size().\n for (int i = 0; i < size - 1; i++)\n int j = i + rand() % (size - i).\n call swap() function to swap the values of v[i] and v[j].\n print “Elements after getting shuffled”.\n Call show() function to display the suffled value of v vector.\nEnd."
},
{
"code": null,
"e": 2007,
"s": 1996,
"text": " Live Demo"
},
{
"code": null,
"e": 2465,
"s": 2007,
"text": "#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\nvoid show(vector<int> const &input) {\n for (auto const& i: input) {\n std::cout << i << \" \";\n }\n}\nint main() {\n vector<int> v = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };\n int size = v.size();\n for (int i = 0; i < size - 1; i++) {\n int j = i + rand() % (size - i);\n swap(v[i], v[j]);\n }\n cout<<\"Elements after getting shuffled\"<<endl;\n show(v);\n return 0;\n}"
},
{
"code": null,
"e": 2515,
"s": 2465,
"text": "Elements after getting shuffled\n2 8 5 3 1 9 4 7 6"
}
]
|
Symfony - Ajax Control | AJAX is a modern technology in web programming. It provides options to send and receive data in a webpage asynchronously, without refreshing the page. Let us learn Symfony AJAX programming in this chapter.
Symfony framework provides options to identity whether the request type is AJAX or not. Request class of Symfony HttpFoundation component has a method, isXmlHttpRequest() for this purpose. If an AJAX request is made, the current request object's isXmlHttpRequest() method returns true, otherwise false.
This method is used to handle an AJAX request properly in the server side.
if ($request->isXmlHttpRequest()) {
// Ajax request
} else {
// Normal request
}
Symfony also provides a JSON based Response class, JsonResponse to create the response in JSON format. We can combine these two methods to create a simple and clean AJAX based web application.
Let us add a new page, student/ajax in student application and try to fetch the student information asynchronously.
Step 1 − Add ajaxAction method in StudentController(src/AppBundle/Controller/StudentController.php).
/**
* @Route("/student/ajax")
*/
public function ajaxAction(Request $request) {
$students = $this->getDoctrine()
->getRepository('AppBundle:Student')
->findAll();
if ($request->isXmlHttpRequest() || $request->query->get('showJson') == 1) {
$jsonData = array();
$idx = 0;
foreach($students as $student) {
$temp = array(
'name' => $student->getName(),
'address' => $student->getAddress(),
);
$jsonData[$idx++] = $temp;
}
return new JsonResponse($jsonData);
} else {
return $this->render('student/ajax.html.twig');
}
}
Here, if the request is AJAX, we fetch student information, encode it as JSON and return it using JsonResponse object. Otherwise, we just render the corresponding view.
Step 2 − Create a view file ajax.html.twig in Student views directory, app/Resources/views/student/ and add the following code.
{% extends 'base.html.twig' %}
{% block javascripts %}
<script language = "javascript"
src = "https://code.jquery.com/jquery-2.2.4.min.js"></script>
<script language = "javascript">
$(document).ready(function(){
$("#loadstudent").on("click", function(event){
$.ajax({
url: '/student/ajax',
type: 'POST',
dataType: 'json',
async: true,
success: function(data, status) {
var e = $('<tr><th>Name</th><th>Address</th></tr>');
$('#student').html('');
$('#student').append(e);
for(i = 0; i < data.length; i++) {
student = data[i];
var e = $('<tr><td id = "name"></td><td id = "address"></td></tr>');
$('#name', e).html(student['name']);
$('#address', e).html(student['address']);
$('#student').append(e);
}
},
error : function(xhr, textStatus, errorThrown) {
alert('Ajax request failed.');
}
});
});
});
</script>
{% endblock %}
{% block stylesheets %}
<style>
.table { border-collapse: collapse; }
.table th, td {
border-bottom: 1px solid #ddd;
width: 250px;
text-align: left;
align: left;
}
</style>
{% endblock %}
{% block body %}
<a id = "loadstudent" href = "#">Load student information</a>
</br>
</br>
<table class = "table">
<tbody id = "student"></tbody>
</table>
{% endblock %}
Here, we have created an anchor tag (id: loadstudent) to load the student information using AJAX call. The AJAX call is done using JQuery. Event attached to loadstudent tag activates when a user clicks it. Then, it will fetch the student information using AJAX call and generate the required HTML code dynamically.
Step 3− Finally, run the application, http://localhost:8000/student/ajax and click the Load student information anchor tab.
Print
Add Notes
Bookmark this page | [
{
"code": null,
"e": 2409,
"s": 2203,
"text": "AJAX is a modern technology in web programming. It provides options to send and receive data in a webpage asynchronously, without refreshing the page. Let us learn Symfony AJAX programming in this chapter."
},
{
"code": null,
"e": 2712,
"s": 2409,
"text": "Symfony framework provides options to identity whether the request type is AJAX or not. Request class of Symfony HttpFoundation component has a method, isXmlHttpRequest() for this purpose. If an AJAX request is made, the current request object's isXmlHttpRequest() method returns true, otherwise false."
},
{
"code": null,
"e": 2787,
"s": 2712,
"text": "This method is used to handle an AJAX request properly in the server side."
},
{
"code": null,
"e": 2884,
"s": 2787,
"text": "if ($request->isXmlHttpRequest()) { \n // Ajax request \n} else { \n // Normal request \n} \n"
},
{
"code": null,
"e": 3077,
"s": 2884,
"text": "Symfony also provides a JSON based Response class, JsonResponse to create the response in JSON format. We can combine these two methods to create a simple and clean AJAX based web application."
},
{
"code": null,
"e": 3193,
"s": 3077,
"text": "Let us add a new page, student/ajax in student application and try to fetch the student information asynchronously."
},
{
"code": null,
"e": 3294,
"s": 3193,
"text": "Step 1 − Add ajaxAction method in StudentController(src/AppBundle/Controller/StudentController.php)."
},
{
"code": null,
"e": 3971,
"s": 3294,
"text": "/** \n * @Route(\"/student/ajax\") \n*/ \npublic function ajaxAction(Request $request) { \n $students = $this->getDoctrine() \n ->getRepository('AppBundle:Student') \n ->findAll(); \n \n if ($request->isXmlHttpRequest() || $request->query->get('showJson') == 1) { \n $jsonData = array(); \n $idx = 0; \n foreach($students as $student) { \n $temp = array(\n 'name' => $student->getName(), \n 'address' => $student->getAddress(), \n ); \n $jsonData[$idx++] = $temp; \n } \n return new JsonResponse($jsonData); \n } else { \n return $this->render('student/ajax.html.twig'); \n } \n} "
},
{
"code": null,
"e": 4140,
"s": 3971,
"text": "Here, if the request is AJAX, we fetch student information, encode it as JSON and return it using JsonResponse object. Otherwise, we just render the corresponding view."
},
{
"code": null,
"e": 4268,
"s": 4140,
"text": "Step 2 − Create a view file ajax.html.twig in Student views directory, app/Resources/views/student/ and add the following code."
},
{
"code": null,
"e": 6105,
"s": 4268,
"text": "{% extends 'base.html.twig' %} \n{% block javascripts %} \n <script language = \"javascript\" \n src = \"https://code.jquery.com/jquery-2.2.4.min.js\"></script> \n \n <script language = \"javascript\"> \n $(document).ready(function(){ \n $(\"#loadstudent\").on(\"click\", function(event){ \n $.ajax({ \n url: '/student/ajax', \n type: 'POST', \n dataType: 'json', \n async: true, \n \n success: function(data, status) { \n var e = $('<tr><th>Name</th><th>Address</th></tr>'); \n $('#student').html(''); \n $('#student').append(e); \n \n for(i = 0; i < data.length; i++) { \n student = data[i]; \n var e = $('<tr><td id = \"name\"></td><td id = \"address\"></td></tr>');\n \n $('#name', e).html(student['name']); \n $('#address', e).html(student['address']); \n $('#student').append(e); \n } \n }, \n error : function(xhr, textStatus, errorThrown) { \n alert('Ajax request failed.'); \n } \n }); \n }); \n }); \n </script> \n{% endblock %} \n\n{% block stylesheets %} \n <style> \n .table { border-collapse: collapse; } \n .table th, td { \n border-bottom: 1px solid #ddd; \n width: 250px; \n text-align: left; \n align: left; \n } \n </style> \n{% endblock %} \n\n{% block body %} \n <a id = \"loadstudent\" href = \"#\">Load student information</a> \n </br> \n </br> \n \n <table class = \"table\"> \n <tbody id = \"student\"></tbody> \n </table> \n{% endblock %} "
},
{
"code": null,
"e": 6420,
"s": 6105,
"text": "Here, we have created an anchor tag (id: loadstudent) to load the student information using AJAX call. The AJAX call is done using JQuery. Event attached to loadstudent tag activates when a user clicks it. Then, it will fetch the student information using AJAX call and generate the required HTML code dynamically."
},
{
"code": null,
"e": 6544,
"s": 6420,
"text": "Step 3− Finally, run the application, http://localhost:8000/student/ajax and click the Load student information anchor tab."
},
{
"code": null,
"e": 6551,
"s": 6544,
"text": " Print"
},
{
"code": null,
"e": 6562,
"s": 6551,
"text": " Add Notes"
}
]
|
Tryit Editor v3.7 | Tryit: Center align the content in td | []
|
frozenset() in Python | This function helps in converting a mutable list to an immutable one. This is needed when we have declared a list whose items are changeable but after certain steps we want to stop allowing the elements in it to change. In such scenario, we apply the frozenset() function as shown below.
Syntax: frozenset(iterable_object_name)
In the below example we take a list, change its element and print it. Then in the next step we apply the frozenset function, and try changing the element again. In the second step we get the error showing that the list can not be modified anymore.
# Before applying forzenset()
some_days = ["Mom","Tue","Wed","Thu"]
# change element
some_days[2]="Fri"
print("some_days =",some_days)
# Apply frozenset()
fixed_days=frozenset(some_days)
print("fixed_days= ",fixed_days)
# Change element in frozenset
fixed_days[2]="Wed"
Running the above code gives us the following result −
some_days = ['Mom', 'Tue', 'Fri', 'Thu']
Traceback (most recent call last):
fixed_days= frozenset({'Thu', 'Mom', 'Fri', 'Tue'})
File "/py3.py", line 14, in
fixed_days[2]="Wed"
TypeError: 'frozenset' object does not support item assignment | [
{
"code": null,
"e": 1350,
"s": 1062,
"text": "This function helps in converting a mutable list to an immutable one. This is needed when we have declared a list whose items are changeable but after certain steps we want to stop allowing the elements in it to change. In such scenario, we apply the frozenset() function as shown below."
},
{
"code": null,
"e": 1390,
"s": 1350,
"text": "Syntax: frozenset(iterable_object_name)"
},
{
"code": null,
"e": 1638,
"s": 1390,
"text": "In the below example we take a list, change its element and print it. Then in the next step we apply the frozenset function, and try changing the element again. In the second step we get the error showing that the list can not be modified anymore."
},
{
"code": null,
"e": 1908,
"s": 1638,
"text": "# Before applying forzenset()\nsome_days = [\"Mom\",\"Tue\",\"Wed\",\"Thu\"]\n# change element\nsome_days[2]=\"Fri\"\nprint(\"some_days =\",some_days)\n# Apply frozenset()\nfixed_days=frozenset(some_days)\nprint(\"fixed_days= \",fixed_days)\n# Change element in frozenset\nfixed_days[2]=\"Wed\""
},
{
"code": null,
"e": 1963,
"s": 1908,
"text": "Running the above code gives us the following result −"
},
{
"code": null,
"e": 2202,
"s": 1963,
"text": "some_days = ['Mom', 'Tue', 'Fri', 'Thu']\nTraceback (most recent call last):\nfixed_days= frozenset({'Thu', 'Mom', 'Fri', 'Tue'})\nFile \"/py3.py\", line 14, in\nfixed_days[2]=\"Wed\"\nTypeError: 'frozenset' object does not support item assignment"
}
]
|
Can we override private methods in Java? - GeeksforGeeks | 06 Aug, 2021
Let us first consider the following Java program as a simple example of Overriding or Runtime Polymorphism.
Java
class Base { public void fun() { System.out.println("Base fun"); }} class Derived extends Base { public void fun() { // overrides the Base's fun() System.out.println("Derived fun"); } public static void main(String[] args) { Base obj = new Derived(); obj.fun(); } }
The program prints “Derived fun”. The Base class reference ‘obj’ refers to a derived class object (see expression “Base obj = new Derived()”). When fun() is called on obj, the call is made according to the type of referred object, not according to the reference.
Is Overriding possible with private methods? Predict the output of the following program.
Java
class Base { private void fun() { System.out.println("Base fun"); }} class Derived extends Base { private void fun() { System.out.println("Derived fun"); } public static void main(String[] args) { Base obj = new Derived(); obj.fun(); } }
We get compiler error “fun() has private access in Base” (See this). So the compiler tries to call base class function, not derived class, means fun() is not overridden.
An inner class can access private members of its outer class. What if we extend an inner class and create fun() in the inner class? An Inner class can access private members of its outer class, for example in the following program, fun() of Inner accesses private data member msg which is fine by the compiler.
Java
/* Java program to demonstrate whether we can override private method of outer class inside its inner class */class Outer { private String msg = "GeeksforGeeks"; private void fun() { System.out.println("Outer fun()"); } class Inner extends Outer { private void fun() { System.out.println("Accessing Private Member of Outer: " + msg); } } public static void main(String args[]) { // In order to create instance of Inner class, we need an Outer // class instance. So, first create Outer class instance and then // inner class instance. Outer o = new Outer(); Inner i = o.new Inner(); // This will call Inner's fun, the purpose of this call is to // show that private members of Outer can be accessed in Inner. i.fun(); // o.fun() calls Outer's fun (No run-time polymorphism). o = i; o.fun(); }}
Output:
Accessing Private Member of Outer: GeeksforGeeks
Outer fun()
In the above program, we created an outer class and an inner class. We extended Inner from Outer and created a method fun() in both Outer and Inner. If we observe our output, then it is clear that the method fun() has not been overridden. It is so because private methods are bonded during compile time and it is the type of the reference variable – not the type of object that it refers to – that determines what method to be called.. As a side note, private methods may be performance-wise better (compared to non-private and non-final methods) due to static binding.
Comparison With C++ 1) In Java, inner Class is allowed to access private data members of outer class. This behavior is same as C++ (See this). 2) In Java, methods declared as private can never be overridden, they are in-fact bounded during compile time. This behavior is different from C++. In C++, we can have virtual private methods (See this).This article is contributed by Chandra Prakash. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above.
surinderdawra388
Java
Java
Writing code in comment?
Please use ide.geeksforgeeks.org,
generate link and share the link here.
Arrays in Java
Split() String method in Java with examples
For-each loop in Java
Arrays.sort() in Java with examples
Reverse a string in Java
Initialize an ArrayList in Java
Object Oriented Programming (OOPs) Concept in Java
HashMap in Java with Examples
Interfaces in Java
How to iterate any Map in Java | [
{
"code": null,
"e": 23788,
"s": 23760,
"text": "\n06 Aug, 2021"
},
{
"code": null,
"e": 23896,
"s": 23788,
"text": "Let us first consider the following Java program as a simple example of Overriding or Runtime Polymorphism."
},
{
"code": null,
"e": 23901,
"s": 23896,
"text": "Java"
},
{
"code": "class Base { public void fun() { System.out.println(\"Base fun\"); }} class Derived extends Base { public void fun() { // overrides the Base's fun() System.out.println(\"Derived fun\"); } public static void main(String[] args) { Base obj = new Derived(); obj.fun(); } }",
"e": 24202,
"s": 23901,
"text": null
},
{
"code": null,
"e": 24466,
"s": 24202,
"text": "The program prints “Derived fun”. The Base class reference ‘obj’ refers to a derived class object (see expression “Base obj = new Derived()”). When fun() is called on obj, the call is made according to the type of referred object, not according to the reference. "
},
{
"code": null,
"e": 24557,
"s": 24466,
"text": "Is Overriding possible with private methods? Predict the output of the following program. "
},
{
"code": null,
"e": 24562,
"s": 24557,
"text": "Java"
},
{
"code": "class Base { private void fun() { System.out.println(\"Base fun\"); }} class Derived extends Base { private void fun() { System.out.println(\"Derived fun\"); } public static void main(String[] args) { Base obj = new Derived(); obj.fun(); } }",
"e": 24834,
"s": 24562,
"text": null
},
{
"code": null,
"e": 25004,
"s": 24834,
"text": "We get compiler error “fun() has private access in Base” (See this). So the compiler tries to call base class function, not derived class, means fun() is not overridden."
},
{
"code": null,
"e": 25315,
"s": 25004,
"text": "An inner class can access private members of its outer class. What if we extend an inner class and create fun() in the inner class? An Inner class can access private members of its outer class, for example in the following program, fun() of Inner accesses private data member msg which is fine by the compiler."
},
{
"code": null,
"e": 25320,
"s": 25315,
"text": "Java"
},
{
"code": "/* Java program to demonstrate whether we can override private method of outer class inside its inner class */class Outer { private String msg = \"GeeksforGeeks\"; private void fun() { System.out.println(\"Outer fun()\"); } class Inner extends Outer { private void fun() { System.out.println(\"Accessing Private Member of Outer: \" + msg); } } public static void main(String args[]) { // In order to create instance of Inner class, we need an Outer // class instance. So, first create Outer class instance and then // inner class instance. Outer o = new Outer(); Inner i = o.new Inner(); // This will call Inner's fun, the purpose of this call is to // show that private members of Outer can be accessed in Inner. i.fun(); // o.fun() calls Outer's fun (No run-time polymorphism). o = i; o.fun(); }}",
"e": 26302,
"s": 25320,
"text": null
},
{
"code": null,
"e": 26311,
"s": 26302,
"text": "Output: "
},
{
"code": null,
"e": 26372,
"s": 26311,
"text": "Accessing Private Member of Outer: GeeksforGeeks\nOuter fun()"
},
{
"code": null,
"e": 26942,
"s": 26372,
"text": "In the above program, we created an outer class and an inner class. We extended Inner from Outer and created a method fun() in both Outer and Inner. If we observe our output, then it is clear that the method fun() has not been overridden. It is so because private methods are bonded during compile time and it is the type of the reference variable – not the type of object that it refers to – that determines what method to be called.. As a side note, private methods may be performance-wise better (compared to non-private and non-final methods) due to static binding."
},
{
"code": null,
"e": 27462,
"s": 26942,
"text": "Comparison With C++ 1) In Java, inner Class is allowed to access private data members of outer class. This behavior is same as C++ (See this). 2) In Java, methods declared as private can never be overridden, they are in-fact bounded during compile time. This behavior is different from C++. In C++, we can have virtual private methods (See this).This article is contributed by Chandra Prakash. Please write comments if you find anything incorrect, or you want to share more information about the topic discussed above. "
},
{
"code": null,
"e": 27479,
"s": 27462,
"text": "surinderdawra388"
},
{
"code": null,
"e": 27484,
"s": 27479,
"text": "Java"
},
{
"code": null,
"e": 27489,
"s": 27484,
"text": "Java"
},
{
"code": null,
"e": 27587,
"s": 27489,
"text": "Writing code in comment?\nPlease use ide.geeksforgeeks.org,\ngenerate link and share the link here."
},
{
"code": null,
"e": 27602,
"s": 27587,
"text": "Arrays in Java"
},
{
"code": null,
"e": 27646,
"s": 27602,
"text": "Split() String method in Java with examples"
},
{
"code": null,
"e": 27668,
"s": 27646,
"text": "For-each loop in Java"
},
{
"code": null,
"e": 27704,
"s": 27668,
"text": "Arrays.sort() in Java with examples"
},
{
"code": null,
"e": 27729,
"s": 27704,
"text": "Reverse a string in Java"
},
{
"code": null,
"e": 27761,
"s": 27729,
"text": "Initialize an ArrayList in Java"
},
{
"code": null,
"e": 27812,
"s": 27761,
"text": "Object Oriented Programming (OOPs) Concept in Java"
},
{
"code": null,
"e": 27842,
"s": 27812,
"text": "HashMap in Java with Examples"
},
{
"code": null,
"e": 27861,
"s": 27842,
"text": "Interfaces in Java"
}
]
|
LinkedList AddBefore method in C# | Add a node before a given node in C# using the AddBefore() method.
Our LinkedList with string nodes.
string [] students = {"Henry","David","Tom"};
LinkedList<string> list = new LinkedList<string>(students);
Now, let’s add node at the end.
// adding a node at the end
var newNode = list.AddLast("Brad");
Use AddBefore() method to add a node before the node added above.
list.AddBefore(newNode, "Emma");
Live Demo
using System;
using System.Collections.Generic;
class Demo {
static void Main() {
string [] students = {"Henry","David","Tom"};
LinkedList<string> list = new LinkedList<string>(students);
foreach (var stu in list) {
Console.WriteLine(stu);
}
// adding a node at the end
var newNode = list.AddLast("Brad");
// adding a new node before the node added above
list.AddBefore(newNode, "Emma");
Console.WriteLine("LinkedList after adding new nodes...");
foreach (var stu in list) {
Console.WriteLine(stu);
}
}
}
Henry
David
Tom
LinkedList after adding new nodes...
Henry
David
Tom
Emma
Brad | [
{
"code": null,
"e": 1129,
"s": 1062,
"text": "Add a node before a given node in C# using the AddBefore() method."
},
{
"code": null,
"e": 1163,
"s": 1129,
"text": "Our LinkedList with string nodes."
},
{
"code": null,
"e": 1269,
"s": 1163,
"text": "string [] students = {\"Henry\",\"David\",\"Tom\"};\nLinkedList<string> list = new LinkedList<string>(students);"
},
{
"code": null,
"e": 1301,
"s": 1269,
"text": "Now, let’s add node at the end."
},
{
"code": null,
"e": 1365,
"s": 1301,
"text": "// adding a node at the end\nvar newNode = list.AddLast(\"Brad\");"
},
{
"code": null,
"e": 1431,
"s": 1365,
"text": "Use AddBefore() method to add a node before the node added above."
},
{
"code": null,
"e": 1464,
"s": 1431,
"text": "list.AddBefore(newNode, \"Emma\");"
},
{
"code": null,
"e": 1475,
"s": 1464,
"text": " Live Demo"
},
{
"code": null,
"e": 2070,
"s": 1475,
"text": "using System;\nusing System.Collections.Generic;\nclass Demo {\n static void Main() {\n string [] students = {\"Henry\",\"David\",\"Tom\"};\n LinkedList<string> list = new LinkedList<string>(students);\n foreach (var stu in list) {\n Console.WriteLine(stu);\n }\n // adding a node at the end\n var newNode = list.AddLast(\"Brad\");\n // adding a new node before the node added above\n list.AddBefore(newNode, \"Emma\");\n Console.WriteLine(\"LinkedList after adding new nodes...\");\n foreach (var stu in list) {\n Console.WriteLine(stu);\n }\n }\n}"
},
{
"code": null,
"e": 2149,
"s": 2070,
"text": "Henry\nDavid\nTom\nLinkedList after adding new nodes...\nHenry\nDavid\nTom\nEmma\nBrad"
}
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.