{"edges":[{"edgeType":"contains","source":673,"target":873},{"edgeType":"contains","source":2880,"target":3014},{"edgeType":"calls","source":1508,"target":1511},{"edgeType":"calls","source":2703,"target":2520},{"edgeType":"calls","source":1096,"target":806},{"edgeType":"calls","source":1760,"target":834},{"edgeType":"contains","source":2444,"target":146},{"edgeType":"contains","source":1913,"target":2072},{"edgeType":"contains","source":2752,"target":2754},{"edgeType":"calls","source":425,"target":466},{"edgeType":"contains","source":2299,"target":2527},{"edgeType":"contains","source":2193,"target":2642},{"edgeType":"calls","source":841,"target":255},{"edgeType":"calls","source":1394,"target":1345},{"edgeType":"contains","source":2832,"target":2916},{"edgeType":"contains","source":784,"target":867},{"edgeType":"contains","source":1913,"target":2073},{"edgeType":"contains","source":2958,"target":2974},{"edgeType":"contains","source":2519,"target":2577},{"edgeType":"calls","source":162,"target":163},{"edgeType":"contains","source":1348,"target":1354},{"edgeType":"contains","source":487,"target":825},{"edgeType":"contains","source":1,"target":300},{"edgeType":"contains","source":1835,"target":1848},{"edgeType":"contains","source":550,"target":986},{"edgeType":"contains","source":1570,"target":1607},{"edgeType":"calls","source":592,"target":573},{"edgeType":"calls","source":967,"target":47},{"edgeType":"contains","source":2730,"target":530},{"edgeType":"contains","source":1268,"target":1456},{"edgeType":"calls","source":1957,"target":540},{"edgeType":"contains","source":1430,"target":1455},{"edgeType":"contains","source":481,"target":482},{"edgeType":"contains","source":2299,"target":2528},{"edgeType":"contains","source":2752,"target":2755},{"edgeType":"calls","source":2053,"target":2056},{"edgeType":"calls","source":172,"target":116},{"edgeType":"contains","source":225,"target":226},{"edgeType":"calls","source":547,"target":383},{"edgeType":"calls","source":1974,"target":541},{"edgeType":"contains","source":550,"target":1249},{"edgeType":"contains","source":2832,"target":2917},{"edgeType":"contains","source":2273,"target":2277},{"edgeType":"imports","source":840,"target":266},{"edgeType":"contains","source":1929,"target":521},{"edgeType":"contains","source":3072,"target":1555},{"edgeType":"calls","source":551,"target":523},{"edgeType":"contains","source":1662,"target":1663},{"edgeType":"contains","source":1348,"target":1355},{"edgeType":"contains","source":487,"target":824},{"edgeType":"contains","source":994,"target":698},{"edgeType":"calls","source":121,"target":112},{"edgeType":"calls","source":1160,"target":938},{"edgeType":"calls","source":175,"target":26},{"edgeType":"contains","source":671,"target":673},{"edgeType":"calls","source":592,"target":576},{"edgeType":"contains","source":2299,"target":2525},{"edgeType":"contains","source":2920,"target":2300},{"edgeType":"calls","source":733,"target":698},{"edgeType":"contains","source":2519,"target":2575},{"edgeType":"calls","source":418,"target":421},{"edgeType":"contains","source":1381,"target":1386},{"edgeType":"contains","source":1348,"target":1352},{"edgeType":"contains","source":2966,"target":81},{"edgeType":"contains","source":1675,"target":464},{"edgeType":"contains","source":487,"target":827},{"edgeType":"contains","source":460,"target":1399},{"edgeType":"contains","source":1268,"target":1454},{"edgeType":"calls","source":1155,"target":1092},{"edgeType":"calls","source":1558,"target":1548},{"edgeType":"contains","source":2752,"target":2753},{"edgeType":"contains","source":1430,"target":1453},{"edgeType":"contains","source":1913,"target":2071},{"edgeType":"calls","source":1328,"target":486},{"edgeType":"contains","source":2299,"target":2526},{"edgeType":"contains","source":460,"target":2192},{"edgeType":"contains","source":1211,"target":1900},{"edgeType":"contains","source":2407,"target":2084},{"edgeType":"contains","source":910,"target":926},{"edgeType":"contains","source":2995,"target":2883},{"edgeType":"contains","source":2,"target":6},{"edgeType":"contains","source":2717,"target":1723},{"edgeType":"contains","source":487,"target":826},{"edgeType":"calls","source":490,"target":302},{"edgeType":"contains","source":1902,"target":2941},{"edgeType":"contains","source":1348,"target":1353},{"edgeType":"contains","source":550,"target":2044},{"edgeType":"contains","source":926,"target":962},{"edgeType":"contains","source":1430,"target":1458},{"edgeType":"contains","source":2511,"target":2301},{"edgeType":"calls","source":1588,"target":877},{"edgeType":"contains","source":2299,"target":2531},{"edgeType":"contains","source":1153,"target":1324},{"edgeType":"contains","source":2193,"target":2381},{"edgeType":"contains","source":1744,"target":1766},{"edgeType":"calls","source":2345,"target":1458},{"edgeType":"contains","source":1913,"target":2077},{"edgeType":"contains","source":840,"target":985},{"edgeType":"contains","source":1348,"target":1358},{"edgeType":"contains","source":2,"target":9},{"edgeType":"calls","source":514,"target":346},{"edgeType":"contains","source":1381,"target":1392},{"edgeType":"imports","source":3050,"target":2837},{"edgeType":"contains","source":550,"target":726},{"edgeType":"calls","source":682,"target":687},{"edgeType":"contains","source":1662,"target":1666},{"edgeType":"contains","source":487,"target":829},{"edgeType":"calls","source":905,"target":909},{"edgeType":"contains","source":1268,"target":1460},{"edgeType":"contains","source":1902,"target":2947},{"edgeType":"calls","source":1285,"target":1284},{"edgeType":"contains","source":666,"target":1093},{"edgeType":"contains","source":1495,"target":1558},{"edgeType":"contains","source":50,"target":105},{"edgeType":"contains","source":2299,"target":2532},{"edgeType":"contains","source":1982,"target":731},{"edgeType":"calls","source":1967,"target":754},{"edgeType":"contains","source":1744,"target":1767},{"edgeType":"calls","source":514,"target":80},{"edgeType":"calls","source":2771,"target":2787},{"edgeType":"calls","source":2218,"target":1961},{"edgeType":"contains","source":1348,"target":1359},{"edgeType":"contains","source":910,"target":928},{"edgeType":"contains","source":1495,"target":1559},{"edgeType":"calls","source":905,"target":908},{"edgeType":"contains","source":550,"target":989},{"edgeType":"contains","source":487,"target":828},{"edgeType":"contains","source":1268,"target":1461},{"edgeType":"contains","source":1084,"target":1087},{"edgeType":"contains","source":3041,"target":3046},{"edgeType":"contains","source":1268,"target":1457},{"edgeType":"contains","source":460,"target":1140},{"edgeType":"calls","source":2270,"target":2195},{"edgeType":"contains","source":2661,"target":2670},{"edgeType":"imports","source":1682,"target":319},{"edgeType":"calls","source":2850,"target":1128},{"edgeType":"contains","source":2299,"target":2529},{"edgeType":"contains","source":51,"target":77},{"edgeType":"contains","source":2752,"target":2756},{"edgeType":"calls","source":674,"target":144},{"edgeType":"contains","source":2519,"target":2579},{"edgeType":"contains","source":1381,"target":1390},{"edgeType":"calls","source":2508,"target":2481},{"edgeType":"contains","source":1348,"target":1356},{"edgeType":"contains","source":1913,"target":2075},{"edgeType":"contains","source":2958,"target":2976},{"edgeType":"contains","source":1662,"target":1664},{"edgeType":"contains","source":1570,"target":1609},{"edgeType":"contains","source":2280,"target":2325},{"edgeType":"contains","source":1084,"target":1086},{"edgeType":"calls","source":1483,"target":1226},{"edgeType":"contains","source":673,"target":878},{"edgeType":"calls","source":491,"target":531},{"edgeType":"calls","source":1276,"target":1301},{"edgeType":"contains","source":2299,"target":2530},{"edgeType":"imports","source":729,"target":266},{"edgeType":"contains","source":1611,"target":1660},{"edgeType":"contains","source":2752,"target":2757},{"edgeType":"imports","source":1631,"target":1635},{"edgeType":"calls","source":3095,"target":1198},{"edgeType":"contains","source":2958,"target":2977},{"edgeType":"contains","source":1348,"target":1357},{"edgeType":"calls","source":2938,"target":251},{"edgeType":"calls","source":567,"target":25},{"edgeType":"contains","source":10,"target":2933},{"edgeType":"calls","source":1409,"target":877},{"edgeType":"contains","source":487,"target":830},{"edgeType":"imports","source":1682,"target":1640},{"edgeType":"calls","source":839,"target":842},{"edgeType":"contains","source":460,"target":1142},{"edgeType":"contains","source":8,"target":91},{"edgeType":"contains","source":1744,"target":1770},{"edgeType":"calls","source":2466,"target":2195},{"edgeType":"contains","source":2752,"target":2762},{"edgeType":"contains","source":2280,"target":2331},{"edgeType":"calls","source":294,"target":26},{"edgeType":"calls","source":1547,"target":558},{"edgeType":"calls","source":1990,"target":1888},{"edgeType":"calls","source":2619,"target":623},{"edgeType":"contains","source":1631,"target":252},{"edgeType":"contains","source":1084,"target":1089},{"edgeType":"contains","source":3000,"target":3002},{"edgeType":"contains","source":1268,"target":935},{"edgeType":"contains","source":50,"target":110},{"edgeType":"calls","source":1403,"target":1322},{"edgeType":"contains","source":1348,"target":1362},{"edgeType":"imports","source":2858,"target":81},{"edgeType":"contains","source":10,"target":1615},{"edgeType":"contains","source":3015,"target":3065},{"edgeType":"contains","source":883,"target":977},{"edgeType":"contains","source":711,"target":3138},{"edgeType":"calls","source":1301,"target":1313},{"edgeType":"calls","source":748,"target":751},{"edgeType":"contains","source":2717,"target":1734},{"edgeType":"contains","source":1744,"target":1771},{"edgeType":"contains","source":2752,"target":2763},{"edgeType":"contains","source":1631,"target":253},{"edgeType":"contains","source":2280,"target":2332},{"edgeType":"contains","source":460,"target":877},{"edgeType":"contains","source":50,"target":109},{"edgeType":"calls","source":3168,"target":3161},{"edgeType":"contains","source":550,"target":1258},{"edgeType":"contains","source":1381,"target":1397},{"edgeType":"contains","source":1153,"target":1327},{"edgeType":"contains","source":1348,"target":1363},{"edgeType":"calls","source":136,"target":167},{"edgeType":"calls","source":1846,"target":1858},{"edgeType":"calls","source":1856,"target":491},{"edgeType":"imports","source":1675,"target":550},{"edgeType":"contains","source":3015,"target":3066},{"edgeType":"calls","source":3110,"target":3109},{"edgeType":"contains","source":1268,"target":1465},{"edgeType":"contains","source":2193,"target":2651},{"edgeType":"imports","source":3050,"target":470},{"edgeType":"contains","source":1913,"target":2082},{"edgeType":"contains","source":550,"target":1786},{"edgeType":"contains","source":1430,"target":1464},{"edgeType":"contains","source":1744,"target":1768},{"edgeType":"calls","source":1408,"target":641},{"edgeType":"imports","source":729,"target":277},{"edgeType":"contains","source":1835,"target":1854},{"edgeType":"calls","source":1990,"target":1890},{"edgeType":"calls","source":1311,"target":1270},{"edgeType":"calls","source":1802,"target":1640},{"edgeType":"calls","source":3102,"target":3089},{"edgeType":"contains","source":50,"target":112},{"edgeType":"contains","source":51,"target":81},{"edgeType":"contains","source":2193,"target":2383},{"edgeType":"calls","source":1714,"target":1725},{"edgeType":"calls","source":136,"target":170},{"edgeType":"contains","source":1348,"target":1360},{"edgeType":"imports","source":2636,"target":883},{"edgeType":"contains","source":1913,"target":2079},{"edgeType":"calls","source":3023,"target":518},{"edgeType":"calls","source":922,"target":116},{"edgeType":"contains","source":910,"target":671},{"edgeType":"calls","source":682,"target":685},{"edgeType":"calls","source":2922,"target":2856},{"edgeType":"imports","source":1600,"target":496},{"edgeType":"contains","source":1495,"target":1560},{"edgeType":"contains","source":711,"target":3140},{"edgeType":"contains","source":1268,"target":1462},{"edgeType":"calls","source":344,"target":327},{"edgeType":"calls","source":2695,"target":116},{"edgeType":"calls","source":500,"target":512},{"edgeType":"imports","source":910,"target":481},{"edgeType":"contains","source":1744,"target":1769},{"edgeType":"imports","source":2607,"target":460},{"edgeType":"contains","source":1835,"target":1855},{"edgeType":"contains","source":2280,"target":2330},{"edgeType":"contains","source":3000,"target":3001},{"edgeType":"contains","source":8,"target":92},{"edgeType":"contains","source":1631,"target":251},{"edgeType":"calls","source":1102,"target":877},{"edgeType":"contains","source":470,"target":569},{"edgeType":"contains","source":460,"target":879},{"edgeType":"calls","source":306,"target":184},{"edgeType":"contains","source":50,"target":111},{"edgeType":"calls","source":2364,"target":1128},{"edgeType":"contains","source":1656,"target":269},{"edgeType":"contains","source":1348,"target":1361},{"edgeType":"contains","source":10,"target":1616},{"edgeType":"calls","source":682,"target":684},{"edgeType":"contains","source":1495,"target":1561},{"edgeType":"calls","source":1723,"target":1709},{"edgeType":"contains","source":883,"target":978},{"edgeType":"contains","source":2,"target":14},{"edgeType":"contains","source":2967,"target":2972},{"edgeType":"contains","source":883,"target":982},{"edgeType":"contains","source":1744,"target":1774},{"edgeType":"contains","source":2245,"target":2363},{"edgeType":"calls","source":122,"target":69},{"edgeType":"calls","source":2028,"target":2027},{"edgeType":"calls","source":1161,"target":1157},{"edgeType":"contains","source":1348,"target":1366},{"edgeType":"calls","source":2774,"target":1892},{"edgeType":"contains","source":2767,"target":2037},{"edgeType":"contains","source":1835,"target":1860},{"edgeType":"contains","source":50,"target":115},{"edgeType":"contains","source":550,"target":1791},{"edgeType":"calls","source":1868,"target":116},{"edgeType":"calls","source":1786,"target":544},{"edgeType":"contains","source":910,"target":673},{"edgeType":"contains","source":1268,"target":1468},{"edgeType":"contains","source":1913,"target":2085},{"edgeType":"calls","source":2219,"target":1128},{"edgeType":"contains","source":550,"target":470},{"edgeType":"calls","source":2678,"target":376},{"edgeType":"contains","source":487,"target":573},{"edgeType":"calls","source":1695,"target":986},{"edgeType":"contains","source":1611,"target":1670},{"edgeType":"calls","source":1319,"target":486},{"edgeType":"imports","source":7,"target":460},{"edgeType":"contains","source":2717,"target":1738},{"edgeType":"contains","source":460,"target":1145},{"edgeType":"imports","source":3038,"target":2160},{"edgeType":"contains","source":1744,"target":1775},{"edgeType":"contains","source":2280,"target":2336},{"edgeType":"imports","source":2770,"target":1219},{"edgeType":"contains","source":1348,"target":1367},{"edgeType":"contains","source":1675,"target":479},{"edgeType":"contains","source":50,"target":113},{"edgeType":"imports","source":910,"target":743},{"edgeType":"contains","source":50,"target":114},{"edgeType":"calls","source":145,"target":148},{"edgeType":"contains","source":305,"target":400},{"edgeType":"calls","source":746,"target":544},{"edgeType":"contains","source":1844,"target":1847},{"edgeType":"contains","source":1744,"target":1776},{"edgeType":"contains","source":550,"target":1526},{"edgeType":"contains","source":2662,"target":536},{"edgeType":"calls","source":925,"target":544},{"edgeType":"contains","source":840,"target":992},{"edgeType":"contains","source":460,"target":1409},{"edgeType":"contains","source":487,"target":572},{"edgeType":"contains","source":883,"target":984},{"edgeType":"contains","source":3000,"target":3004},{"edgeType":"imports","source":1682,"target":1913},{"edgeType":"calls","source":1516,"target":1517},{"edgeType":"contains","source":2,"target":19},{"edgeType":"contains","source":2752,"target":2764},{"edgeType":"contains","source":2280,"target":2333},{"edgeType":"contains","source":2151,"target":2368},{"edgeType":"contains","source":550,"target":2058},{"edgeType":"contains","source":2767,"target":2035},{"edgeType":"contains","source":305,"target":404},{"edgeType":"contains","source":1348,"target":1364},{"edgeType":"contains","source":3015,"target":3067},{"edgeType":"calls","source":1459,"target":641},{"edgeType":"calls","source":1597,"target":327},{"edgeType":"calls","source":2037,"target":2014},{"edgeType":"calls","source":2094,"target":2094},{"edgeType":"contains","source":2880,"target":3024},{"edgeType":"calls","source":748,"target":749},{"edgeType":"contains","source":1268,"target":1466},{"edgeType":"contains","source":2672,"target":1280},{"edgeType":"contains","source":2193,"target":2652},{"edgeType":"contains","source":10,"target":1621},{"edgeType":"contains","source":3113,"target":2407},{"edgeType":"contains","source":3063,"target":1052},{"edgeType":"contains","source":3000,"target":3005},{"edgeType":"contains","source":883,"target":983},{"edgeType":"imports","source":2962,"target":550},{"edgeType":"contains","source":1611,"target":1668},{"edgeType":"calls","source":3064,"target":36},{"edgeType":"imports","source":3072,"target":1104},{"edgeType":"calls","source":1249,"target":544},{"edgeType":"contains","source":2280,"target":2334},{"edgeType":"calls","source":1724,"target":353},{"edgeType":"contains","source":10,"target":2148},{"edgeType":"calls","source":1396,"target":1008},{"edgeType":"contains","source":305,"target":403},{"edgeType":"contains","source":1348,"target":1365},{"edgeType":"calls","source":79,"target":82},{"edgeType":"calls","source":1316,"target":52},{"edgeType":"calls","source":616,"target":612},{"edgeType":"contains","source":10,"target":299},{"edgeType":"calls","source":1155,"target":1079},{"edgeType":"calls","source":1817,"target":641},{"edgeType":"imports","source":3072,"target":46},{"edgeType":"contains","source":1631,"target":256},{"edgeType":"contains","source":2742,"target":2783},{"edgeType":"contains","source":2900,"target":2906},{"edgeType":"contains","source":973,"target":1070},{"edgeType":"contains","source":1799,"target":1626},{"edgeType":"calls","source":2648,"target":2656},{"edgeType":"imports","source":3072,"target":2672},{"edgeType":"contains","source":2717,"target":1708},{"edgeType":"contains","source":784,"target":58},{"edgeType":"calls","source":2000,"target":1074},{"edgeType":"calls","source":1705,"target":970},{"edgeType":"contains","source":2223,"target":2224},{"edgeType":"contains","source":1,"target":548},{"edgeType":"calls","source":410,"target":155},{"edgeType":"calls","source":2395,"target":986},{"edgeType":"contains","source":2883,"target":2904},{"edgeType":"contains","source":1430,"target":1438},{"edgeType":"contains","source":2725,"target":1988},{"edgeType":"contains","source":3073,"target":3092},{"edgeType":"contains","source":1744,"target":1746},{"edgeType":"calls","source":359,"target":415},{"edgeType":"calls","source":1891,"target":753},{"edgeType":"imports","source":3041,"target":461},{"edgeType":"calls","source":1833,"target":1758},{"edgeType":"calls","source":2673,"target":2674},{"edgeType":"contains","source":666,"target":1073},{"edgeType":"calls","source":592,"target":591},{"edgeType":"contains","source":2900,"target":2907},{"edgeType":"contains","source":2742,"target":2784},{"edgeType":"contains","source":2958,"target":2959},{"edgeType":"calls","source":990,"target":936},{"edgeType":"calls","source":1335,"target":1340},{"edgeType":"contains","source":3041,"target":2500},{"edgeType":"contains","source":2725,"target":1990},{"edgeType":"calls","source":2664,"target":47},{"edgeType":"calls","source":2211,"target":877},{"edgeType":"contains","source":460,"target":588},{"edgeType":"contains","source":550,"target":2026},{"edgeType":"contains","source":2643,"target":2682},{"edgeType":"calls","source":2037,"target":2040},{"edgeType":"contains","source":2962,"target":2042},{"edgeType":"calls","source":1758,"target":383},{"edgeType":"contains","source":553,"target":611},{"edgeType":"contains","source":2883,"target":2905},{"edgeType":"contains","source":3073,"target":3093},{"edgeType":"contains","source":2151,"target":493},{"edgeType":"contains","source":84,"target":88},{"edgeType":"imports","source":549,"target":550},{"edgeType":"calls","source":1637,"target":698},{"edgeType":"contains","source":2717,"target":1709},{"edgeType":"calls","source":2192,"target":938},{"edgeType":"contains","source":2299,"target":2511},{"edgeType":"calls","source":304,"target":269},{"edgeType":"imports","source":729,"target":255},{"edgeType":"contains","source":973,"target":1072},{"edgeType":"contains","source":994,"target":421},{"edgeType":"calls","source":807,"target":270},{"edgeType":"contains","source":2962,"target":2039},{"edgeType":"contains","source":2519,"target":2559},{"edgeType":"contains","source":2717,"target":1706},{"edgeType":"calls","source":3152,"target":3150},{"edgeType":"calls","source":1165,"target":1064},{"edgeType":"imports","source":3072,"target":1084},{"edgeType":"calls","source":674,"target":693},{"edgeType":"contains","source":2487,"target":2494},{"edgeType":"contains","source":550,"target":972},{"edgeType":"contains","source":673,"target":858},{"edgeType":"calls","source":146,"target":149},{"edgeType":"calls","source":2070,"target":2074},{"edgeType":"calls","source":402,"target":405},{"edgeType":"imports","source":2661,"target":83},{"edgeType":"calls","source":2766,"target":582},{"edgeType":"contains","source":926,"target":943},{"edgeType":"contains","source":2880,"target":2995},{"edgeType":"contains","source":2817,"target":2834},{"edgeType":"contains","source":1268,"target":1437},{"edgeType":"calls","source":1333,"target":1405},{"edgeType":"calls","source":1302,"target":1309},{"edgeType":"calls","source":725,"target":698},{"edgeType":"calls","source":359,"target":417},{"edgeType":"calls","source":2552,"target":2195},{"edgeType":"imports","source":3072,"target":819},{"edgeType":"contains","source":2966,"target":2973},{"edgeType":"contains","source":2832,"target":2899},{"edgeType":"calls","source":922,"target":932},{"edgeType":"contains","source":2519,"target":2560},{"edgeType":"contains","source":1611,"target":1639},{"edgeType":"calls","source":1525,"target":1530},{"edgeType":"contains","source":994,"target":684},{"edgeType":"imports","source":3072,"target":1083},{"edgeType":"contains","source":2768,"target":2769},{"edgeType":"calls","source":742,"target":698},{"edgeType":"calls","source":1075,"target":682},{"edgeType":"contains","source":550,"target":971},{"edgeType":"contains","source":2256,"target":2257},{"edgeType":"contains","source":1381,"target":49},{"edgeType":"contains","source":2883,"target":2903},{"edgeType":"imports","source":2661,"target":1932},{"edgeType":"contains","source":2501,"target":2589},{"edgeType":"contains","source":6,"target":394},{"edgeType":"contains","source":553,"target":613},{"edgeType":"calls","source":466,"target":534},{"edgeType":"calls","source":1961,"target":698},{"edgeType":"contains","source":2444,"target":128},{"edgeType":"calls","source":1276,"target":1321},{"edgeType":"calls","source":466,"target":270},{"edgeType":"calls","source":1302,"target":1303},{"edgeType":"contains","source":784,"target":855},{"edgeType":"calls","source":1532,"target":251},{"edgeType":"contains","source":550,"target":974},{"edgeType":"calls","source":230,"target":181},{"edgeType":"contains","source":2519,"target":2565},{"edgeType":"contains","source":3041,"target":2503},{"edgeType":"contains","source":844,"target":845},{"edgeType":"imports","source":6,"target":201},{"edgeType":"imports","source":3072,"target":818},{"edgeType":"contains","source":1314,"target":1338},{"edgeType":"imports","source":2636,"target":64},{"edgeType":"calls","source":2783,"target":52},{"edgeType":"calls","source":1564,"target":52},{"edgeType":"contains","source":550,"target":710},{"edgeType":"calls","source":1627,"target":1534},{"edgeType":"calls","source":3062,"target":2502},{"edgeType":"imports","source":1631,"target":36},{"edgeType":"calls","source":1791,"target":414},{"edgeType":"contains","source":3015,"target":402},{"edgeType":"imports","source":1523,"target":213},{"edgeType":"contains","source":1430,"target":1442},{"edgeType":"imports","source":2607,"target":698},{"edgeType":"contains","source":3007,"target":3028},{"edgeType":"calls","source":981,"target":684},{"edgeType":"contains","source":673,"target":860},{"edgeType":"calls","source":179,"target":177},{"edgeType":"contains","source":2982,"target":632},{"edgeType":"calls","source":539,"target":116},{"edgeType":"contains","source":4,"target":459},{"edgeType":"calls","source":1399,"target":938},{"edgeType":"calls","source":814,"target":47},{"edgeType":"calls","source":1852,"target":108},{"edgeType":"calls","source":1500,"target":1507},{"edgeType":"contains","source":1645,"target":1648},{"edgeType":"contains","source":2469,"target":151},{"edgeType":"calls","source":175,"target":37},{"edgeType":"contains","source":7,"target":630},{"edgeType":"contains","source":460,"target":1121},{"edgeType":"contains","source":994,"target":158},{"edgeType":"contains","source":2886,"target":1758},{"edgeType":"imports","source":3072,"target":553},{"edgeType":"contains","source":550,"target":2030},{"edgeType":"contains","source":2519,"target":2566},{"edgeType":"imports","source":45,"target":50},{"edgeType":"contains","source":2935,"target":1562},{"edgeType":"imports","source":3072,"target":817},{"edgeType":"calls","source":3152,"target":3149},{"edgeType":"imports","source":3072,"target":24},{"edgeType":"calls","source":451,"target":464},{"edgeType":"contains","source":1268,"target":1444},{"edgeType":"contains","source":673,"target":859},{"edgeType":"contains","source":4,"target":458},{"edgeType":"contains","source":2403,"target":2462},{"edgeType":"contains","source":2444,"target":1191},{"edgeType":"contains","source":51,"target":58},{"edgeType":"contains","source":10,"target":1329},{"edgeType":"imports","source":2767,"target":494},{"edgeType":"contains","source":2193,"target":2366},{"edgeType":"contains","source":2883,"target":2909},{"edgeType":"calls","source":1640,"target":601},{"edgeType":"contains","source":1645,"target":1649},{"edgeType":"contains","source":2407,"target":2074},{"edgeType":"imports","source":458,"target":460},{"edgeType":"calls","source":2219,"target":1154},{"edgeType":"contains","source":550,"target":976},{"edgeType":"contains","source":2519,"target":2563},{"edgeType":"contains","source":3041,"target":2501},{"edgeType":"contains","source":1675,"target":451},{"edgeType":"contains","source":2643,"target":2683},{"edgeType":"contains","source":460,"target":1652},{"edgeType":"contains","source":1611,"target":1642},{"edgeType":"calls","source":772,"target":558},{"edgeType":"calls","source":785,"target":155},{"edgeType":"contains","source":1477,"target":1568},{"edgeType":"calls","source":1389,"target":1251},{"edgeType":"contains","source":2607,"target":2742},{"edgeType":"contains","source":2280,"target":2308},{"edgeType":"calls","source":1349,"target":641},{"edgeType":"contains","source":2900,"target":2908},{"edgeType":"contains","source":2962,"target":2043},{"edgeType":"contains","source":784,"target":857},{"edgeType":"imports","source":2858,"target":50},{"edgeType":"contains","source":10,"target":11},{"edgeType":"contains","source":673,"target":862},{"edgeType":"contains","source":2444,"target":1188},{"edgeType":"contains","source":1645,"target":1646},{"edgeType":"calls","source":1403,"target":1345},{"edgeType":"contains","source":2444,"target":131},{"edgeType":"calls","source":534,"target":537},{"edgeType":"calls","source":3169,"target":3147},{"edgeType":"calls","source":2059,"target":1885},{"edgeType":"contains","source":910,"target":914},{"edgeType":"contains","source":2519,"target":2564},{"edgeType":"calls","source":2253,"target":1154},{"edgeType":"contains","source":2767,"target":2011},{"edgeType":"imports","source":1682,"target":1887},{"edgeType":"contains","source":2729,"target":1868},{"edgeType":"calls","source":96,"target":108},{"edgeType":"contains","source":1704,"target":346},{"edgeType":"contains","source":2245,"target":2337},{"edgeType":"calls","source":1249,"target":569},{"edgeType":"calls","source":1793,"target":353},{"edgeType":"contains","source":228,"target":386},{"edgeType":"imports","source":2962,"target":789},{"edgeType":"contains","source":2151,"target":2345},{"edgeType":"calls","source":1399,"target":939},{"edgeType":"contains","source":673,"target":861},{"edgeType":"contains","source":671,"target":923},{"edgeType":"calls","source":184,"target":23},{"edgeType":"contains","source":2403,"target":2460},{"edgeType":"contains","source":550,"target":1239},{"edgeType":"contains","source":2858,"target":2889},{"edgeType":"calls","source":2160,"target":2188},{"edgeType":"contains","source":3015,"target":3043},{"edgeType":"contains","source":2,"target":2636},{"edgeType":"contains","source":1645,"target":1647},{"edgeType":"calls","source":567,"target":570},{"edgeType":"contains","source":470,"target":812},{"edgeType":"contains","source":460,"target":2179},{"edgeType":"calls","source":359,"target":406},{"edgeType":"contains","source":2935,"target":1565},{"edgeType":"contains","source":305,"target":381},{"edgeType":"calls","source":1793,"target":348},{"edgeType":"calls","source":1758,"target":376},{"edgeType":"contains","source":1314,"target":1342},{"edgeType":"calls","source":2624,"target":2599},{"edgeType":"calls","source":175,"target":32},{"edgeType":"contains","source":2469,"target":2534},{"edgeType":"contains","source":228,"target":390},{"edgeType":"contains","source":2570,"target":2574},{"edgeType":"contains","source":2301,"target":2456},{"edgeType":"contains","source":2151,"target":2350},{"edgeType":"calls","source":2053,"target":2065},{"edgeType":"calls","source":2103,"target":2102},{"edgeType":"contains","source":673,"target":864},{"edgeType":"calls","source":411,"target":116},{"edgeType":"contains","source":2363,"target":2384},{"edgeType":"calls","source":2137,"target":1841},{"edgeType":"contains","source":1902,"target":2934},{"edgeType":"contains","source":2519,"target":2569},{"edgeType":"contains","source":1153,"target":1311},{"edgeType":"calls","source":600,"target":599},{"edgeType":"calls","source":1974,"target":1871},{"edgeType":"calls","source":306,"target":198},{"edgeType":"contains","source":550,"target":2034},{"edgeType":"calls","source":2798,"target":641},{"edgeType":"contains","source":1268,"target":1448},{"edgeType":"contains","source":586,"target":654},{"edgeType":"calls","source":1309,"target":1081},{"edgeType":"calls","source":1828,"target":1640},{"edgeType":"contains","source":1611,"target":64},{"edgeType":"imports","source":3072,"target":36},{"edgeType":"calls","source":1109,"target":938},{"edgeType":"calls","source":1574,"target":1586},{"edgeType":"contains","source":2966,"target":2983},{"edgeType":"calls","source":253,"target":256},{"edgeType":"calls","source":2160,"target":2186},{"edgeType":"calls","source":2262,"target":2195},{"edgeType":"contains","source":793,"target":1107},{"edgeType":"calls","source":1640,"target":1654},{"edgeType":"contains","source":1211,"target":1626},{"edgeType":"contains","source":1570,"target":1596},{"edgeType":"contains","source":1913,"target":2062},{"edgeType":"contains","source":844,"target":851},{"edgeType":"contains","source":2858,"target":2893},{"edgeType":"contains","source":2767,"target":2014},{"edgeType":"contains","source":2193,"target":2631},{"edgeType":"calls","source":1880,"target":1881},{"edgeType":"calls","source":359,"target":144},{"edgeType":"contains","source":550,"target":716},{"edgeType":"imports","source":840,"target":255},{"edgeType":"contains","source":586,"target":657},{"edgeType":"calls","source":2766,"target":573},{"edgeType":"contains","source":460,"target":1127},{"edgeType":"contains","source":134,"target":135},{"edgeType":"contains","source":2672,"target":1259},{"edgeType":"contains","source":2403,"target":2463},{"edgeType":"contains","source":2982,"target":634},{"edgeType":"calls","source":1109,"target":677},{"edgeType":"contains","source":2151,"target":2348},{"edgeType":"contains","source":1835,"target":1838},{"edgeType":"calls","source":1613,"target":116},{"edgeType":"calls","source":3124,"target":2160},{"edgeType":"contains","source":2725,"target":2787},{"edgeType":"contains","source":2883,"target":2910},{"edgeType":"calls","source":988,"target":993},{"edgeType":"calls","source":490,"target":310},{"edgeType":"calls","source":1403,"target":1340},{"edgeType":"contains","source":2958,"target":2965},{"edgeType":"contains","source":1224,"target":1226},{"edgeType":"contains","source":1913,"target":2063},{"edgeType":"calls","source":2017,"target":540},{"edgeType":"calls","source":1526,"target":698},{"edgeType":"contains","source":2570,"target":2573},{"edgeType":"contains","source":228,"target":391},{"edgeType":"contains","source":2469,"target":2533},{"edgeType":"contains","source":1268,"target":1446},{"edgeType":"contains","source":586,"target":656},{"edgeType":"imports","source":6,"target":213},{"edgeType":"contains","source":2444,"target":136},{"edgeType":"calls","source":1632,"target":1640},{"edgeType":"calls","source":1957,"target":1871},{"edgeType":"calls","source":1555,"target":327},{"edgeType":"calls","source":2593,"target":2502},{"edgeType":"contains","source":2853,"target":406},{"edgeType":"calls","source":1957,"target":1078},{"edgeType":"calls","source":306,"target":201},{"edgeType":"contains","source":2407,"target":2076},{"edgeType":"contains","source":51,"target":64},{"edgeType":"contains","source":1477,"target":1574},{"edgeType":"contains","source":2519,"target":2568},{"edgeType":"contains","source":586,"target":659},{"edgeType":"calls","source":2697,"target":2710},{"edgeType":"contains","source":550,"target":718},{"edgeType":"contains","source":487,"target":821},{"edgeType":"calls","source":1409,"target":1413},{"edgeType":"contains","source":1268,"target":1451},{"edgeType":"contains","source":784,"target":863},{"edgeType":"calls","source":3079,"target":117},{"edgeType":"imports","source":3038,"target":2937},{"edgeType":"contains","source":844,"target":853},{"edgeType":"contains","source":673,"target":869},{"edgeType":"contains","source":460,"target":1394},{"edgeType":"contains","source":1913,"target":2068},{"edgeType":"calls","source":1345,"target":754},{"edgeType":"calls","source":63,"target":65},{"edgeType":"contains","source":134,"target":137},{"edgeType":"contains","source":1,"target":296},{"edgeType":"calls","source":514,"target":90},{"edgeType":"contains","source":460,"target":1129},{"edgeType":"contains","source":2832,"target":2912},{"edgeType":"imports","source":3015,"target":743},{"edgeType":"calls","source":988,"target":987},{"edgeType":"contains","source":1348,"target":1350},{"edgeType":"calls","source":2276,"target":1756},{"edgeType":"imports","source":3015,"target":1007},{"edgeType":"calls","source":1160,"target":939},{"edgeType":"calls","source":304,"target":256},{"edgeType":"contains","source":1125,"target":1126},{"edgeType":"calls","source":1311,"target":1279},{"edgeType":"contains","source":550,"target":981},{"edgeType":"contains","source":1268,"target":1452},{"edgeType":"calls","source":2737,"target":678},{"edgeType":"contains","source":228,"target":393},{"edgeType":"contains","source":673,"target":868},{"edgeType":"contains","source":3041,"target":2512},{"edgeType":"contains","source":4,"target":994},{"edgeType":"imports","source":549,"target":26},{"edgeType":"contains","source":550,"target":717},{"edgeType":"contains","source":2306,"target":2307},{"edgeType":"contains","source":2299,"target":2524},{"edgeType":"calls","source":129,"target":132},{"edgeType":"calls","source":1327,"target":1311},{"edgeType":"calls","source":2053,"target":2060},{"edgeType":"calls","source":757,"target":748},{"edgeType":"contains","source":2958,"target":2178},{"edgeType":"contains","source":550,"target":2038},{"edgeType":"calls","source":842,"target":491},{"edgeType":"contains","source":2643,"target":2694},{"edgeType":"contains","source":1348,"target":1351},{"edgeType":"calls","source":2252,"target":383},{"edgeType":"calls","source":1490,"target":754},{"edgeType":"calls","source":1500,"target":1501},{"edgeType":"calls","source":370,"target":327},{"edgeType":"calls","source":2005,"target":116},{"edgeType":"calls","source":426,"target":177},{"edgeType":"calls","source":418,"target":425},{"edgeType":"contains","source":1381,"target":60},{"edgeType":"contains","source":1268,"target":1449},{"edgeType":"contains","source":2661,"target":2662},{"edgeType":"contains","source":673,"target":871},{"edgeType":"calls","source":266,"target":116},{"edgeType":"calls","source":1756,"target":1757},{"edgeType":"contains","source":2958,"target":2968},{"edgeType":"contains","source":258,"target":259},{"edgeType":"calls","source":1669,"target":754},{"edgeType":"contains","source":883,"target":967},{"edgeType":"contains","source":1835,"target":1842},{"edgeType":"contains","source":550,"target":1248},{"edgeType":"contains","source":460,"target":1131},{"edgeType":"contains","source":134,"target":139},{"edgeType":"calls","source":468,"target":461},{"edgeType":"contains","source":487,"target":823},{"edgeType":"contains","source":1381,"target":1382},{"edgeType":"contains","source":586,"target":660},{"edgeType":"contains","source":487,"target":822},{"edgeType":"calls","source":3079,"target":116},{"edgeType":"calls","source":1412,"target":1321},{"edgeType":"imports","source":6,"target":208},{"edgeType":"contains","source":673,"target":870},{"edgeType":"contains","source":1495,"target":1548},{"edgeType":"imports","source":549,"target":24},{"edgeType":"contains","source":1913,"target":2067},{"edgeType":"calls","source":1988,"target":1963},{"edgeType":"contains","source":134,"target":138},{"edgeType":"calls","source":2467,"target":2440},{"edgeType":"contains","source":2407,"target":2080},{"edgeType":"contains","source":2886,"target":1765},{"edgeType":"calls","source":1289,"target":1170},{"edgeType":"contains","source":2519,"target":2572},{"edgeType":"contains","source":10,"target":18},{"edgeType":"imports","source":2401,"target":2597},{"edgeType":"imports","source":14,"target":271},{"edgeType":"calls","source":1061,"target":1065},{"edgeType":"calls","source":466,"target":483},{"edgeType":"imports","source":3072,"target":1136},{"edgeType":"contains","source":2468,"target":2540},{"edgeType":"contains","source":793,"target":1084},{"edgeType":"calls","source":1689,"target":624},{"edgeType":"contains","source":3073,"target":3076},{"edgeType":"calls","source":2663,"target":2670},{"edgeType":"contains","source":3015,"target":117},{"edgeType":"contains","source":2717,"target":2749},{"edgeType":"contains","source":910,"target":892},{"edgeType":"calls","source":1437,"target":1301},{"edgeType":"imports","source":1262,"target":1487},{"edgeType":"contains","source":1,"target":3},{"edgeType":"imports","source":3050,"target":496},{"edgeType":"contains","source":973,"target":1053},{"edgeType":"contains","source":1477,"target":1550},{"edgeType":"contains","source":2519,"target":2544},{"edgeType":"calls","source":2250,"target":939},{"edgeType":"calls","source":1206,"target":1062},{"edgeType":"calls","source":2665,"target":2080},{"edgeType":"contains","source":2717,"target":2748},{"edgeType":"calls","source":1623,"target":1612},{"edgeType":"contains","source":1900,"target":1914},{"edgeType":"imports","source":1682,"target":359},{"edgeType":"imports","source":666,"target":671},{"edgeType":"calls","source":1099,"target":943},{"edgeType":"contains","source":3073,"target":3077},{"edgeType":"contains","source":2725,"target":2766},{"edgeType":"calls","source":2383,"target":2366},{"edgeType":"contains","source":8,"target":313},{"edgeType":"contains","source":1153,"target":1286},{"edgeType":"contains","source":769,"target":770},{"edgeType":"contains","source":910,"target":891},{"edgeType":"calls","source":1308,"target":1335},{"edgeType":"contains","source":673,"target":839},{"edgeType":"imports","source":1262,"target":1486},{"edgeType":"contains","source":2932,"target":2955},{"edgeType":"contains","source":1,"target":2},{"edgeType":"contains","source":2880,"target":1396},{"edgeType":"calls","source":1953,"target":366},{"edgeType":"contains","source":1477,"target":1551},{"edgeType":"contains","source":784,"target":833},{"edgeType":"contains","source":1900,"target":1915},{"edgeType":"contains","source":2,"target":235},{"edgeType":"contains","source":1314,"target":1319},{"edgeType":"calls","source":1765,"target":116},{"edgeType":"calls","source":2688,"target":2689},{"edgeType":"calls","source":1867,"target":1182},{"edgeType":"contains","source":1570,"target":1575},{"edgeType":"contains","source":769,"target":773},{"edgeType":"imports","source":3050,"target":1023},{"edgeType":"contains","source":3073,"target":3074},{"edgeType":"contains","source":2817,"target":2818},{"edgeType":"contains","source":2767,"target":2782},{"edgeType":"calls","source":814,"target":796},{"edgeType":"contains","source":1211,"target":1605},{"edgeType":"imports","source":1262,"target":1485},{"edgeType":"imports","source":235,"target":24},{"edgeType":"contains","source":784,"target":836},{"edgeType":"calls","source":384,"target":385},{"edgeType":"imports","source":3050,"target":2608},{"edgeType":"contains","source":2932,"target":2952},{"edgeType":"calls","source":802,"target":376},{"edgeType":"contains","source":910,"target":894},{"edgeType":"calls","source":565,"target":588},{"edgeType":"contains","source":1570,"target":1572},{"edgeType":"contains","source":9,"target":21},{"edgeType":"contains","source":671,"target":903},{"edgeType":"calls","source":2687,"target":75},{"edgeType":"contains","source":550,"target":1747},{"edgeType":"contains","source":2817,"target":2819},{"edgeType":"calls","source":1289,"target":1133},{"edgeType":"contains","source":3073,"target":3075},{"edgeType":"contains","source":2193,"target":2343},{"edgeType":"imports","source":2880,"target":1007},{"edgeType":"contains","source":460,"target":1102},{"edgeType":"contains","source":460,"target":2159},{"edgeType":"contains","source":910,"target":893},{"edgeType":"calls","source":1412,"target":1020},{"edgeType":"contains","source":4,"target":2817},{"edgeType":"calls","source":1697,"target":1698},{"edgeType":"contains","source":711,"target":720},{"edgeType":"contains","source":1,"target":4},{"edgeType":"calls","source":937,"target":418},{"edgeType":"contains","source":2932,"target":2953},{"edgeType":"contains","source":3041,"target":2481},{"edgeType":"contains","source":784,"target":835},{"edgeType":"contains","source":2016,"target":2017},{"edgeType":"imports","source":1211,"target":1216},{"edgeType":"calls","source":2302,"target":383},{"edgeType":"calls","source":2250,"target":938},{"edgeType":"contains","source":1570,"target":1573},{"edgeType":"calls","source":1995,"target":123},{"edgeType":"contains","source":1314,"target":1317},{"edgeType":"contains","source":2742,"target":2765},{"edgeType":"calls","source":1140,"target":726},{"edgeType":"calls","source":1289,"target":1128},{"edgeType":"contains","source":3073,"target":3080},{"edgeType":"calls","source":1809,"target":1128},{"edgeType":"calls","source":1389,"target":1199},{"edgeType":"calls","source":1407,"target":641},{"edgeType":"calls","source":981,"target":898},{"edgeType":"contains","source":1153,"target":1291},{"edgeType":"contains","source":711,"target":723},{"edgeType":"calls","source":412,"target":39},{"edgeType":"calls","source":1763,"target":1761},{"edgeType":"contains","source":2363,"target":2364},{"edgeType":"contains","source":784,"target":838},{"edgeType":"calls","source":1717,"target":544},{"edgeType":"contains","source":2832,"target":2887},{"edgeType":"contains","source":2519,"target":2548},{"edgeType":"contains","source":729,"target":693},{"edgeType":"calls","source":745,"target":551},{"edgeType":"calls","source":2327,"target":1455},{"edgeType":"contains","source":2920,"target":1216},{"edgeType":"calls","source":1180,"target":542},{"edgeType":"contains","source":673,"target":843},{"edgeType":"contains","source":671,"target":905},{"edgeType":"contains","source":2661,"target":526},{"edgeType":"contains","source":769,"target":774},{"edgeType":"calls","source":413,"target":271},{"edgeType":"contains","source":2982,"target":617},{"edgeType":"contains","source":1153,"target":1290},{"edgeType":"contains","source":1125,"target":2158},{"edgeType":"calls","source":682,"target":124},{"edgeType":"calls","source":732,"target":688},{"edgeType":"contains","source":3015,"target":3029},{"edgeType":"calls","source":617,"target":25},{"edgeType":"calls","source":2219,"target":2161},{"edgeType":"calls","source":1554,"target":47},{"edgeType":"contains","source":2817,"target":2825},{"edgeType":"contains","source":1913,"target":2045},{"edgeType":"imports","source":2770,"target":2300},{"edgeType":"contains","source":784,"target":837},{"edgeType":"calls","source":614,"target":383},{"edgeType":"calls","source":1284,"target":1285},{"edgeType":"contains","source":1,"target":1330},{"edgeType":"contains","source":926,"target":931},{"edgeType":"contains","source":2151,"target":2327},{"edgeType":"contains","source":3073,"target":3078},{"edgeType":"contains","source":460,"target":2164},{"edgeType":"contains","source":2817,"target":2822},{"edgeType":"contains","source":2886,"target":683},{"edgeType":"calls","source":714,"target":721},{"edgeType":"calls","source":2711,"target":383},{"edgeType":"calls","source":2663,"target":1871},{"edgeType":"calls","source":2833,"target":36},{"edgeType":"contains","source":8,"target":320},{"edgeType":"contains","source":2717,"target":2751},{"edgeType":"imports","source":2187,"target":24},{"edgeType":"contains","source":1153,"target":1293},{"edgeType":"imports","source":2151,"target":83},{"edgeType":"calls","source":2253,"target":2161},{"edgeType":"calls","source":2325,"target":986},{"edgeType":"calls","source":2590,"target":2549},{"edgeType":"calls","source":1395,"target":486},{"edgeType":"calls","source":2971,"target":251},{"edgeType":"contains","source":586,"target":636},{"edgeType":"contains","source":769,"target":776},{"edgeType":"contains","source":3007,"target":3011},{"edgeType":"contains","source":2725,"target":2768},{"edgeType":"calls","source":1180,"target":544},{"edgeType":"calls","source":2913,"target":200},{"edgeType":"contains","source":1211,"target":1344},{"edgeType":"contains","source":1153,"target":1292},{"edgeType":"calls","source":1705,"target":1710},{"edgeType":"calls","source":3167,"target":3161},{"edgeType":"calls","source":1961,"target":1966},{"edgeType":"calls","source":1346,"target":1212},{"edgeType":"calls","source":1862,"target":1864},{"edgeType":"contains","source":2519,"target":2547},{"edgeType":"calls","source":2192,"target":1939},{"edgeType":"calls","source":848,"target":266},{"edgeType":"contains","source":3015,"target":120},{"edgeType":"calls","source":805,"target":806},{"edgeType":"calls","source":2308,"target":986},{"edgeType":"imports","source":1631,"target":83},{"edgeType":"contains","source":2932,"target":2957},{"edgeType":"contains","source":2935,"target":3134},{"edgeType":"contains","source":460,"target":1109},{"edgeType":"contains","source":550,"target":1490},{"edgeType":"contains","source":8,"target":322},{"edgeType":"contains","source":10,"target":260},{"edgeType":"calls","source":144,"target":151},{"edgeType":"contains","source":793,"target":1092},{"edgeType":"calls","source":2387,"target":383},{"edgeType":"contains","source":2717,"target":1700},{"edgeType":"contains","source":1153,"target":1295},{"edgeType":"contains","source":550,"target":961},{"edgeType":"contains","source":460,"target":1637},{"edgeType":"contains","source":487,"target":800},{"edgeType":"calls","source":103,"target":101},{"edgeType":"contains","source":550,"target":2018},{"edgeType":"calls","source":1319,"target":986},{"edgeType":"calls","source":2728,"target":383},{"edgeType":"contains","source":711,"target":727},{"edgeType":"calls","source":2302,"target":376},{"edgeType":"calls","source":2012,"target":1965},{"edgeType":"contains","source":2832,"target":2891},{"edgeType":"contains","source":2701,"target":2724},{"edgeType":"imports","source":2518,"target":83},{"edgeType":"contains","source":2962,"target":1241},{"edgeType":"contains","source":1430,"target":373},{"edgeType":"calls","source":2303,"target":2195},{"edgeType":"contains","source":4,"target":1238},{"edgeType":"imports","source":1905,"target":1909},{"edgeType":"contains","source":671,"target":909},{"edgeType":"contains","source":3072,"target":3116},{"edgeType":"calls","source":466,"target":474},{"edgeType":"contains","source":8,"target":321},{"edgeType":"calls","source":1548,"target":1550},{"edgeType":"contains","source":1153,"target":1294},{"edgeType":"contains","source":769,"target":778},{"edgeType":"contains","source":2519,"target":2554},{"edgeType":"contains","source":550,"target":960},{"edgeType":"imports","source":3050,"target":239},{"edgeType":"contains","source":973,"target":1060},{"edgeType":"contains","source":1631,"target":749},{"edgeType":"contains","source":1947,"target":731},{"edgeType":"contains","source":3015,"target":3033},{"edgeType":"calls","source":295,"target":490},{"edgeType":"calls","source":1816,"target":641},{"edgeType":"contains","source":550,"target":696},{"edgeType":"contains","source":2725,"target":2774},{"edgeType":"contains","source":1,"target":10},{"edgeType":"contains","source":784,"target":841},{"edgeType":"contains","source":2468,"target":2546},{"edgeType":"contains","source":2725,"target":2771},{"edgeType":"calls","source":1738,"target":1741},{"edgeType":"contains","source":769,"target":781},{"edgeType":"contains","source":2519,"target":2551},{"edgeType":"contains","source":2518,"target":2582},{"edgeType":"imports","source":2607,"target":1023},{"edgeType":"contains","source":3073,"target":3082},{"edgeType":"imports","source":729,"target":46},{"edgeType":"contains","source":1569,"target":1611},{"edgeType":"contains","source":2519,"target":2550},{"edgeType":"contains","source":2704,"target":1836},{"edgeType":"calls","source":681,"target":683},{"edgeType":"contains","source":673,"target":850},{"edgeType":"contains","source":671,"target":912},{"edgeType":"contains","source":973,"target":1063},{"edgeType":"imports","source":2607,"target":2608},{"edgeType":"calls","source":939,"target":877},{"edgeType":"calls","source":2499,"target":2195},{"edgeType":"calls","source":1441,"target":641},{"edgeType":"imports","source":1905,"target":1907},{"edgeType":"calls","source":1146,"target":537},{"edgeType":"calls","source":742,"target":376},{"edgeType":"contains","source":8,"target":323},{"edgeType":"calls","source":341,"target":123},{"edgeType":"contains","source":2464,"target":2407},{"edgeType":"contains","source":3055,"target":734},{"edgeType":"imports","source":14,"target":278},{"edgeType":"contains","source":769,"target":780},{"edgeType":"imports","source":1682,"target":1685},{"edgeType":"contains","source":1153,"target":1296},{"edgeType":"imports","source":729,"target":47},{"edgeType":"calls","source":1639,"target":52},{"edgeType":"contains","source":2264,"target":2265},{"edgeType":"calls","source":2320,"target":2195},{"edgeType":"calls","source":329,"target":231},{"edgeType":"imports","source":3111,"target":460},{"edgeType":"imports","source":2187,"target":36},{"edgeType":"contains","source":1,"target":12},{"edgeType":"calls","source":922,"target":346},{"edgeType":"calls","source":936,"target":969},{"edgeType":"contains","source":2501,"target":2581},{"edgeType":"contains","source":2920,"target":1219},{"edgeType":"contains","source":2832,"target":2890},{"edgeType":"calls","source":2250,"target":1987},{"edgeType":"contains","source":2966,"target":2970},{"edgeType":"imports","source":3072,"target":83},{"edgeType":"contains","source":1611,"target":1636},{"edgeType":"contains","source":460,"target":2170},{"edgeType":"contains","source":460,"target":1113},{"edgeType":"contains","source":926,"target":673},{"edgeType":"contains","source":550,"target":966},{"edgeType":"contains","source":2519,"target":2557},{"edgeType":"imports","source":1682,"target":1684},{"edgeType":"calls","source":1213,"target":569},{"edgeType":"contains","source":2,"target":1569},{"edgeType":"contains","source":51,"target":50},{"edgeType":"calls","source":2491,"target":2440},{"edgeType":"contains","source":926,"target":937},{"edgeType":"contains","source":84,"target":85},{"edgeType":"calls","source":144,"target":147},{"edgeType":"contains","source":2701,"target":2728},{"edgeType":"contains","source":1430,"target":1434},{"edgeType":"contains","source":2832,"target":2895},{"edgeType":"contains","source":2469,"target":2521},{"edgeType":"contains","source":2501,"target":2586},{"edgeType":"calls","source":1731,"target":1687},{"edgeType":"contains","source":673,"target":852},{"edgeType":"calls","source":2045,"target":145},{"edgeType":"contains","source":2966,"target":2971},{"edgeType":"contains","source":550,"target":2022},{"edgeType":"contains","source":1477,"target":1563},{"edgeType":"calls","source":1853,"target":1340},{"edgeType":"contains","source":769,"target":782},{"edgeType":"contains","source":2962,"target":1245},{"edgeType":"contains","source":2322,"target":2323},{"edgeType":"contains","source":2768,"target":1974},{"edgeType":"calls","source":1308,"target":1322},{"edgeType":"contains","source":3072,"target":3120},{"edgeType":"contains","source":2501,"target":2587},{"edgeType":"contains","source":2016,"target":2031},{"edgeType":"contains","source":2935,"target":1553},{"edgeType":"calls","source":1069,"target":1071},{"edgeType":"calls","source":1563,"target":820},{"edgeType":"contains","source":784,"target":849},{"edgeType":"contains","source":1844,"target":1810},{"edgeType":"calls","source":1180,"target":537},{"edgeType":"contains","source":2962,"target":1242},{"edgeType":"calls","source":419,"target":80},{"edgeType":"contains","source":2701,"target":2726},{"edgeType":"contains","source":1314,"target":1328},{"edgeType":"imports","source":2963,"target":24},{"edgeType":"calls","source":2981,"target":990},{"edgeType":"calls","source":1515,"target":986},{"edgeType":"contains","source":84,"target":87},{"edgeType":"contains","source":994,"target":152},{"edgeType":"calls","source":295,"target":489},{"edgeType":"contains","source":2982,"target":3000},{"edgeType":"contains","source":793,"target":1098},{"edgeType":"imports","source":1211,"target":1219},{"edgeType":"contains","source":1268,"target":1433},{"edgeType":"contains","source":7,"target":624},{"edgeType":"contains","source":673,"target":854},{"edgeType":"contains","source":973,"target":1067},{"edgeType":"contains","source":2501,"target":2584},{"edgeType":"imports","source":3050,"target":1555},{"edgeType":"contains","source":3035,"target":1887},{"edgeType":"calls","source":1201,"target":1206},{"edgeType":"imports","source":3072,"target":80},{"edgeType":"contains","source":460,"target":2171},{"edgeType":"contains","source":784,"target":848},{"edgeType":"contains","source":2966,"target":2969},{"edgeType":"contains","source":2900,"target":2901},{"edgeType":"contains","source":3072,"target":3118},{"edgeType":"contains","source":550,"target":2024},{"edgeType":"contains","source":550,"target":1759},{"edgeType":"contains","source":553,"target":609},{"edgeType":"contains","source":84,"target":86},{"edgeType":"contains","source":2701,"target":2727},{"edgeType":"calls","source":2679,"target":47},{"edgeType":"calls","source":2965,"target":1487},{"edgeType":"calls","source":1955,"target":558},{"edgeType":"contains","source":2501,"target":2585},{"edgeType":"contains","source":228,"target":378},{"edgeType":"contains","source":2880,"target":1406},{"edgeType":"calls","source":1044,"target":1052},{"edgeType":"calls","source":2845,"target":2300},{"edgeType":"contains","source":973,"target":1066},{"edgeType":"calls","source":1967,"target":743},{"edgeType":"contains","source":130,"target":215},{"edgeType":"calls","source":3151,"target":3150},{"edgeType":"calls","source":62,"target":75},{"edgeType":"contains","source":2800,"target":2802},{"edgeType":"contains","source":2962,"target":1215},{"edgeType":"imports","source":2880,"target":200},{"edgeType":"contains","source":2791,"target":2816},{"edgeType":"calls","source":491,"target":518},{"edgeType":"calls","source":402,"target":370},{"edgeType":"contains","source":487,"target":511},{"edgeType":"calls","source":1483,"target":1212},{"edgeType":"contains","source":1153,"target":1270},{"edgeType":"imports","source":2401,"target":779},{"edgeType":"contains","source":2886,"target":664},{"edgeType":"calls","source":249,"target":92},{"edgeType":"calls","source":2094,"target":2092},{"edgeType":"calls","source":2771,"target":2774},{"edgeType":"contains","source":2608,"target":1090},{"edgeType":"calls","source":1020,"target":1031},{"edgeType":"calls","source":1527,"target":641},{"edgeType":"contains","source":2337,"target":2356},{"edgeType":"calls","source":2225,"target":938},{"edgeType":"calls","source":591,"target":589},{"edgeType":"contains","source":2191,"target":2654},{"edgeType":"calls","source":1078,"target":1082},{"edgeType":"contains","source":2191,"target":2655},{"edgeType":"calls","source":988,"target":965},{"edgeType":"contains","source":130,"target":214},{"edgeType":"calls","source":62,"target":74},{"edgeType":"contains","source":550,"target":1993},{"edgeType":"contains","source":2800,"target":2803},{"edgeType":"contains","source":2643,"target":2649},{"edgeType":"imports","source":1929,"target":83},{"edgeType":"contains","source":2511,"target":2513},{"edgeType":"calls","source":2004,"target":123},{"edgeType":"contains","source":1878,"target":731},{"edgeType":"calls","source":3081,"target":36},{"edgeType":"calls","source":249,"target":91},{"edgeType":"imports","source":729,"target":24},{"edgeType":"contains","source":2193,"target":2329},{"edgeType":"calls","source":2225,"target":939},{"edgeType":"contains","source":14,"target":375},{"edgeType":"calls","source":471,"target":80},{"edgeType":"contains","source":487,"target":510},{"edgeType":"contains","source":2301,"target":2416},{"edgeType":"contains","source":3155,"target":3158},{"edgeType":"contains","source":883,"target":922},{"edgeType":"contains","source":2932,"target":2936},{"edgeType":"contains","source":130,"target":217},{"edgeType":"calls","source":1534,"target":1219},{"edgeType":"contains","source":2643,"target":2646},{"edgeType":"calls","source":1852,"target":346},{"edgeType":"contains","source":2982,"target":593},{"edgeType":"calls","source":2064,"target":116},{"edgeType":"calls","source":1412,"target":1301},{"edgeType":"calls","source":2981,"target":3133},{"edgeType":"calls","source":1851,"target":641},{"edgeType":"contains","source":2193,"target":2326},{"edgeType":"calls","source":631,"target":144},{"edgeType":"calls","source":1619,"target":698},{"edgeType":"calls","source":2697,"target":2688},{"edgeType":"calls","source":500,"target":505},{"edgeType":"contains","source":1913,"target":2021},{"edgeType":"calls","source":136,"target":162},{"edgeType":"contains","source":550,"target":1467},{"edgeType":"calls","source":2046,"target":145},{"edgeType":"contains","source":1330,"target":1332},{"edgeType":"calls","source":1544,"target":116},{"edgeType":"contains","source":19,"target":223},{"edgeType":"contains","source":1164,"target":1195},{"edgeType":"contains","source":2932,"target":2937},{"edgeType":"contains","source":130,"target":216},{"edgeType":"calls","source":3151,"target":3149},{"edgeType":"contains","source":1434,"target":751},{"edgeType":"contains","source":2386,"target":2422},{"edgeType":"contains","source":2453,"target":2459},{"edgeType":"imports","source":2468,"target":21},{"edgeType":"contains","source":1866,"target":1893},{"edgeType":"calls","source":1515,"target":486},{"edgeType":"contains","source":2643,"target":2647},{"edgeType":"calls","source":486,"target":674},{"edgeType":"contains","source":17,"target":20},{"edgeType":"calls","source":2981,"target":1020},{"edgeType":"calls","source":754,"target":558},{"edgeType":"contains","source":1268,"target":1405},{"edgeType":"calls","source":1617,"target":759},{"edgeType":"contains","source":1430,"target":347},{"edgeType":"contains","source":19,"target":222},{"edgeType":"contains","source":840,"target":932},{"edgeType":"calls","source":2981,"target":1284},{"edgeType":"contains","source":3041,"target":2464},{"edgeType":"contains","source":4,"target":1744},{"edgeType":"calls","source":1278,"target":1226},{"edgeType":"contains","source":1164,"target":1194},{"edgeType":"contains","source":671,"target":886},{"edgeType":"contains","source":2191,"target":2653},{"edgeType":"calls","source":2024,"target":1086},{"edgeType":"contains","source":1132,"target":1133},{"edgeType":"calls","source":2601,"target":376},{"edgeType":"contains","source":1605,"target":1533},{"edgeType":"contains","source":2641,"target":2714},{"edgeType":"contains","source":4,"target":3068},{"edgeType":"contains","source":2800,"target":2806},{"edgeType":"contains","source":2445,"target":2448},{"edgeType":"imports","source":14,"target":250},{"edgeType":"contains","source":1844,"target":731},{"edgeType":"calls","source":120,"target":123},{"edgeType":"contains","source":3072,"target":3094},{"edgeType":"contains","source":3088,"target":2598},{"edgeType":"calls","source":2583,"target":2520},{"edgeType":"calls","source":558,"target":551},{"edgeType":"contains","source":2791,"target":2820},{"edgeType":"calls","source":505,"target":80},{"edgeType":"contains","source":1495,"target":1508},{"edgeType":"contains","source":487,"target":515},{"edgeType":"contains","source":2337,"target":2360},{"edgeType":"calls","source":236,"target":227},{"edgeType":"calls","source":888,"target":891},{"edgeType":"imports","source":459,"target":461},{"edgeType":"contains","source":2641,"target":2715},{"edgeType":"contains","source":1524,"target":81},{"edgeType":"contains","source":1164,"target":1197},{"edgeType":"calls","source":1248,"target":36},{"edgeType":"calls","source":359,"target":376},{"edgeType":"calls","source":1243,"target":1248},{"edgeType":"contains","source":1651,"target":108},{"edgeType":"calls","source":2283,"target":986},{"edgeType":"contains","source":130,"target":218},{"edgeType":"calls","source":617,"target":570},{"edgeType":"contains","source":2800,"target":2807},{"edgeType":"contains","source":671,"target":888},{"edgeType":"contains","source":2500,"target":2593},{"edgeType":"calls","source":2658,"target":2046},{"edgeType":"calls","source":343,"target":344},{"edgeType":"contains","source":2151,"target":2314},{"edgeType":"calls","source":1259,"target":1280},{"edgeType":"calls","source":277,"target":276},{"edgeType":"contains","source":2791,"target":2821},{"edgeType":"contains","source":2337,"target":2361},{"edgeType":"contains","source":1611,"target":1613},{"edgeType":"calls","source":1346,"target":1226},{"edgeType":"contains","source":994,"target":124},{"edgeType":"contains","source":1631,"target":2050},{"edgeType":"calls","source":2981,"target":1546},{"edgeType":"imports","source":840,"target":540},{"edgeType":"contains","source":1164,"target":1200},{"edgeType":"calls","source":3159,"target":3160},{"edgeType":"contains","source":973,"target":1043},{"edgeType":"contains","source":1913,"target":2025},{"edgeType":"contains","source":810,"target":811},{"edgeType":"contains","source":2725,"target":1958},{"edgeType":"contains","source":130,"target":221},{"edgeType":"contains","source":586,"target":620},{"edgeType":"contains","source":2800,"target":2804},{"edgeType":"contains","source":2445,"target":2446},{"edgeType":"calls","source":1146,"target":558},{"edgeType":"contains","source":487,"target":517},{"edgeType":"contains","source":2500,"target":2590},{"edgeType":"contains","source":1631,"target":2047},{"edgeType":"contains","source":2337,"target":2358},{"edgeType":"calls","source":566,"target":569},{"edgeType":"calls","source":533,"target":535},{"edgeType":"calls","source":3168,"target":3145},{"edgeType":"contains","source":460,"target":2147},{"edgeType":"calls","source":1731,"target":1714},{"edgeType":"calls","source":690,"target":689},{"edgeType":"calls","source":376,"target":117},{"edgeType":"calls","source":1301,"target":1302},{"edgeType":"calls","source":1276,"target":1020},{"edgeType":"imports","source":1477,"target":83},{"edgeType":"contains","source":2191,"target":2657},{"edgeType":"contains","source":550,"target":1735},{"edgeType":"contains","source":130,"target":220},{"edgeType":"calls","source":2278,"target":82},{"edgeType":"contains","source":2701,"target":2703},{"edgeType":"contains","source":2725,"target":1959},{"edgeType":"contains","source":2800,"target":2805},{"edgeType":"calls","source":1005,"target":965},{"edgeType":"contains","source":2445,"target":2447},{"edgeType":"contains","source":2880,"target":2967},{"edgeType":"calls","source":2606,"target":2599},{"edgeType":"contains","source":1900,"target":1901},{"edgeType":"contains","source":550,"target":677},{"edgeType":"imports","source":3072,"target":319},{"edgeType":"calls","source":310,"target":312},{"edgeType":"contains","source":487,"target":516},{"edgeType":"calls","source":2901,"target":589},{"edgeType":"calls","source":1276,"target":1284},{"edgeType":"contains","source":2337,"target":2359},{"edgeType":"calls","source":566,"target":568},{"edgeType":"calls","source":376,"target":116},{"edgeType":"contains","source":994,"target":126},{"edgeType":"contains","source":2800,"target":2810},{"edgeType":"contains","source":1631,"target":731},{"edgeType":"contains","source":2641,"target":2718},{"edgeType":"contains","source":2932,"target":2946},{"edgeType":"contains","source":487,"target":520},{"edgeType":"contains","source":4,"target":3072},{"edgeType":"contains","source":1330,"target":2399},{"edgeType":"calls","source":583,"target":36},{"edgeType":"contains","source":1900,"target":1906},{"edgeType":"calls","source":1327,"target":1283},{"edgeType":"imports","source":3050,"target":1016},{"edgeType":"contains","source":460,"target":35},{"edgeType":"calls","source":466,"target":492},{"edgeType":"contains","source":1288,"target":1323},{"edgeType":"contains","source":2519,"target":2536},{"edgeType":"imports","source":2725,"target":521},{"edgeType":"contains","source":1732,"target":508},{"edgeType":"contains","source":550,"target":2001},{"edgeType":"contains","source":1913,"target":2032},{"edgeType":"contains","source":2800,"target":2811},{"edgeType":"contains","source":2641,"target":2719},{"edgeType":"calls","source":1791,"target":376},{"edgeType":"contains","source":550,"target":1737},{"edgeType":"calls","source":2664,"target":2648},{"edgeType":"calls","source":802,"target":116},{"edgeType":"calls","source":411,"target":346},{"edgeType":"contains","source":1,"target":258},{"edgeType":"contains","source":1835,"target":1808},{"edgeType":"contains","source":2280,"target":2283},{"edgeType":"contains","source":1211,"target":1861},{"edgeType":"calls","source":1731,"target":1707},{"edgeType":"contains","source":2518,"target":2832},{"edgeType":"contains","source":731,"target":89},{"edgeType":"calls","source":1487,"target":1079},{"edgeType":"calls","source":1576,"target":698},{"edgeType":"contains","source":550,"target":1208},{"edgeType":"contains","source":1844,"target":1793},{"edgeType":"contains","source":460,"target":1091},{"edgeType":"calls","source":630,"target":692},{"edgeType":"imports","source":2187,"target":550},{"edgeType":"contains","source":2519,"target":2537},{"edgeType":"contains","source":2717,"target":1684},{"edgeType":"contains","source":2582,"target":2698},{"edgeType":"contains","source":2800,"target":2808},{"edgeType":"contains","source":460,"target":566},{"edgeType":"contains","source":1913,"target":2029},{"edgeType":"contains","source":2641,"target":2716},{"edgeType":"imports","source":2468,"target":30},{"edgeType":"contains","source":2858,"target":2860},{"edgeType":"contains","source":1126,"target":1325},{"edgeType":"imports","source":840,"target":24},{"edgeType":"contains","source":487,"target":522},{"edgeType":"contains","source":2791,"target":2823},{"edgeType":"calls","source":1458,"target":1453},{"edgeType":"contains","source":1268,"target":1412},{"edgeType":"contains","source":2500,"target":2594},{"edgeType":"calls","source":311,"target":278},{"edgeType":"contains","source":2337,"target":2362},{"edgeType":"calls","source":1213,"target":327},{"edgeType":"contains","source":2800,"target":2809},{"edgeType":"contains","source":1631,"target":730},{"edgeType":"contains","source":460,"target":565},{"edgeType":"contains","source":1989,"target":731},{"edgeType":"imports","source":2245,"target":1128},{"edgeType":"contains","source":711,"target":712},{"edgeType":"contains","source":2791,"target":2824},{"edgeType":"contains","source":973,"target":1046},{"edgeType":"contains","source":3015,"target":1429},{"edgeType":"contains","source":2445,"target":2451},{"edgeType":"contains","source":2500,"target":2596},{"edgeType":"calls","source":1827,"target":1640},{"edgeType":"calls","source":2011,"target":2014},{"edgeType":"contains","source":2725,"target":1963},{"edgeType":"calls","source":1613,"target":346},{"edgeType":"contains","source":460,"target":2150},{"edgeType":"calls","source":345,"target":280},{"edgeType":"contains","source":2962,"target":1222},{"edgeType":"contains","source":2500,"target":2595},{"edgeType":"calls","source":2305,"target":34},{"edgeType":"contains","source":673,"target":832},{"edgeType":"contains","source":8,"target":307},{"edgeType":"contains","source":2582,"target":2696},{"edgeType":"imports","source":14,"target":262},{"edgeType":"calls","source":2713,"target":2713},{"edgeType":"calls","source":1487,"target":1081},{"edgeType":"imports","source":666,"target":926},{"edgeType":"calls","source":2028,"target":2015},{"edgeType":"calls","source":1201,"target":698},{"edgeType":"contains","source":1153,"target":1279},{"edgeType":"contains","source":550,"target":1742},{"edgeType":"contains","source":3015,"target":3020},{"edgeType":"contains","source":2193,"target":2340},{"edgeType":"contains","source":1153,"target":1283},{"edgeType":"contains","source":2800,"target":2814},{"edgeType":"contains","source":487,"target":788},{"edgeType":"contains","source":2883,"target":2884},{"edgeType":"calls","source":295,"target":239},{"edgeType":"calls","source":849,"target":770},{"edgeType":"contains","source":3041,"target":2478},{"edgeType":"imports","source":2518,"target":328},{"edgeType":"contains","source":2791,"target":2829},{"edgeType":"calls","source":530,"target":353},{"edgeType":"contains","source":1211,"target":1864},{"edgeType":"calls","source":937,"target":421},{"edgeType":"calls","source":1842,"target":641},{"edgeType":"calls","source":1742,"target":834},{"edgeType":"contains","source":550,"target":1213},{"edgeType":"contains","source":3,"target":1522},{"edgeType":"contains","source":1900,"target":1910},{"edgeType":"calls","source":1185,"target":1188},{"edgeType":"contains","source":3015,"target":3021},{"edgeType":"calls","source":2160,"target":34},{"edgeType":"calls","source":1791,"target":1429},{"edgeType":"contains","source":550,"target":2005},{"edgeType":"contains","source":2767,"target":2781},{"edgeType":"contains","source":2800,"target":2815},{"edgeType":"contains","source":487,"target":787},{"edgeType":"calls","source":136,"target":146},{"edgeType":"contains","source":1126,"target":1326},{"edgeType":"contains","source":2501,"target":2571},{"edgeType":"contains","source":1913,"target":2036},{"edgeType":"calls","source":129,"target":99},{"edgeType":"calls","source":1845,"target":1340},{"edgeType":"imports","source":1682,"target":1931},{"edgeType":"calls","source":417,"target":420},{"edgeType":"calls","source":486,"target":395},{"edgeType":"calls","source":483,"target":488},{"edgeType":"contains","source":1570,"target":1571},{"edgeType":"contains","source":50,"target":64},{"edgeType":"contains","source":550,"target":1212},{"edgeType":"contains","source":3,"target":1521},{"edgeType":"calls","source":2329,"target":2195},{"edgeType":"contains","source":1314,"target":1315},{"edgeType":"contains","source":2692,"target":1932},{"edgeType":"contains","source":2767,"target":2778},{"edgeType":"contains","source":2800,"target":2812},{"edgeType":"calls","source":1241,"target":1305},{"edgeType":"calls","source":905,"target":886},{"edgeType":"contains","source":2511,"target":2522},{"edgeType":"calls","source":252,"target":253},{"edgeType":"contains","source":2444,"target":106},{"edgeType":"contains","source":2962,"target":1225},{"edgeType":"calls","source":986,"target":754},{"edgeType":"calls","source":630,"target":691},{"edgeType":"calls","source":2021,"target":1965},{"edgeType":"contains","source":3015,"target":1432},{"edgeType":"imports","source":3072,"target":64},{"edgeType":"contains","source":995,"target":633},{"edgeType":"imports","source":1682,"target":80},{"edgeType":"contains","source":2932,"target":2948},{"edgeType":"contains","source":3072,"target":3100},{"edgeType":"contains","source":2151,"target":2319},{"edgeType":"contains","source":2582,"target":2699},{"edgeType":"contains","source":2519,"target":2538},{"edgeType":"calls","source":1328,"target":986},{"edgeType":"contains","source":2511,"target":2523},{"edgeType":"contains","source":2701,"target":2711},{"edgeType":"contains","source":2767,"target":2779},{"edgeType":"contains","source":2800,"target":2813},{"edgeType":"contains","source":1,"target":792},{"edgeType":"calls","source":3080,"target":52},{"edgeType":"contains","source":2791,"target":2828},{"edgeType":"calls","source":2513,"target":2302},{"edgeType":"contains","source":2444,"target":107},{"edgeType":"calls","source":1702,"target":754},{"edgeType":"contains","source":2717,"target":2743},{"edgeType":"calls","source":1227,"target":1209},{"edgeType":"calls","source":1489,"target":486},{"edgeType":"calls","source":797,"target":798},{"edgeType":"contains","source":2519,"target":2539},{"edgeType":"calls","source":674,"target":647},{"edgeType":"contains","source":1330,"target":2931},{"edgeType":"calls","source":1188,"target":1191},{"edgeType":"contains","source":550,"target":1713},{"edgeType":"contains","source":7,"target":312},{"edgeType":"calls","source":2769,"target":540},{"edgeType":"contains","source":462,"target":477},{"edgeType":"imports","source":1262,"target":1520},{"edgeType":"contains","source":1211,"target":1835},{"edgeType":"imports","source":6,"target":24},{"edgeType":"contains","source":3088,"target":3106},{"edgeType":"imports","source":2444,"target":24},{"edgeType":"contains","source":550,"target":1184},{"edgeType":"contains","source":1477,"target":1517},{"edgeType":"contains","source":1631,"target":2028},{"edgeType":"imports","source":3050,"target":1322},{"edgeType":"contains","source":1792,"target":1794},{"edgeType":"calls","source":1145,"target":938},{"edgeType":"calls","source":885,"target":278},{"edgeType":"calls","source":1413,"target":558},{"edgeType":"calls","source":2658,"target":2660},{"edgeType":"contains","source":10,"target":2069},{"edgeType":"contains","source":2,"target":1523},{"edgeType":"contains","source":2791,"target":2801},{"edgeType":"contains","source":4,"target":2518},{"edgeType":"calls","source":2673,"target":346},{"edgeType":"contains","source":487,"target":758},{"edgeType":"contains","source":1808,"target":1827},{"edgeType":"contains","source":305,"target":58},{"edgeType":"contains","source":2337,"target":2341},{"edgeType":"contains","source":1007,"target":1022},{"edgeType":"contains","source":673,"target":806},{"edgeType":"calls","source":1469,"target":1464},{"edgeType":"contains","source":2193,"target":2576},{"edgeType":"contains","source":3088,"target":3107},{"edgeType":"contains","source":97,"target":164},{"edgeType":"contains","source":1982,"target":1983},{"edgeType":"calls","source":570,"target":529},{"edgeType":"calls","source":2601,"target":993},{"edgeType":"contains","source":2982,"target":579},{"edgeType":"contains","source":2672,"target":2790},{"edgeType":"contains","source":673,"target":809},{"edgeType":"contains","source":586,"target":599},{"edgeType":"contains","source":2626,"target":2628},{"edgeType":"contains","source":2853,"target":2990},{"edgeType":"calls","source":3136,"target":788},{"edgeType":"contains","source":2337,"target":2338},{"edgeType":"calls","source":1857,"target":1859},{"edgeType":"calls","source":636,"target":600},{"edgeType":"calls","source":32,"target":33},{"edgeType":"contains","source":4,"target":3050},{"edgeType":"contains","source":1569,"target":1570},{"edgeType":"imports","source":3035,"target":1256},{"edgeType":"contains","source":3119,"target":2407},{"edgeType":"calls","source":2746,"target":987},{"edgeType":"calls","source":3079,"target":3084},{"edgeType":"contains","source":8,"target":282},{"edgeType":"contains","source":2791,"target":2799},{"edgeType":"calls","source":381,"target":47},{"edgeType":"contains","source":3072,"target":3073},{"edgeType":"calls","source":443,"target":239},{"edgeType":"calls","source":519,"target":526},{"edgeType":"imports","source":2704,"target":418},{"edgeType":"calls","source":106,"target":116},{"edgeType":"contains","source":2403,"target":2407},{"edgeType":"calls","source":1632,"target":641},{"edgeType":"contains","source":2886,"target":911},{"edgeType":"contains","source":1164,"target":1178},{"edgeType":"calls","source":1890,"target":1892},{"edgeType":"contains","source":673,"target":808},{"edgeType":"calls","source":1145,"target":939},{"edgeType":"contains","source":1631,"target":970},{"edgeType":"contains","source":3088,"target":3105},{"edgeType":"contains","source":1631,"target":2027},{"edgeType":"imports","source":1631,"target":913},{"edgeType":"contains","source":792,"target":2932},{"edgeType":"contains","source":1477,"target":1516},{"edgeType":"calls","source":353,"target":123},{"edgeType":"contains","source":305,"target":324},{"edgeType":"calls","source":965,"target":970},{"edgeType":"imports","source":3072,"target":1429},{"edgeType":"calls","source":1196,"target":939},{"edgeType":"contains","source":550,"target":1717},{"edgeType":"contains","source":2982,"target":583},{"edgeType":"contains","source":2853,"target":2996},{"edgeType":"contains","source":2337,"target":2344},{"edgeType":"contains","source":2717,"target":2720},{"edgeType":"calls","source":2055,"target":2057},{"edgeType":"contains","source":19,"target":208},{"edgeType":"calls","source":73,"target":77},{"edgeType":"contains","source":1982,"target":1986},{"edgeType":"contains","source":1164,"target":1181},{"edgeType":"contains","source":2672,"target":2793},{"edgeType":"calls","source":742,"target":743},{"edgeType":"contains","source":130,"target":203},{"edgeType":"imports","source":2767,"target":49},{"edgeType":"contains","source":792,"target":2935},{"edgeType":"calls","source":519,"target":521},{"edgeType":"contains","source":2730,"target":731},{"edgeType":"calls","source":1804,"target":60},{"edgeType":"contains","source":460,"target":1335},{"edgeType":"calls","source":1318,"target":327},{"edgeType":"contains","source":673,"target":810},{"edgeType":"calls","source":2366,"target":2195},{"edgeType":"calls","source":2703,"target":2583},{"edgeType":"calls","source":1196,"target":938},{"edgeType":"calls","source":1419,"target":1424},{"edgeType":"contains","source":586,"target":600},{"edgeType":"contains","source":1675,"target":1726},{"edgeType":"contains","source":2582,"target":2678},{"edgeType":"contains","source":2918,"target":2832},{"edgeType":"contains","source":2717,"target":2721},{"edgeType":"calls","source":2221,"target":877},{"edgeType":"contains","source":2582,"target":2677},{"edgeType":"calls","source":1739,"target":1810},{"edgeType":"contains","source":2768,"target":1932},{"edgeType":"contains","source":1913,"target":2012},{"edgeType":"contains","source":130,"target":202},{"edgeType":"calls","source":1320,"target":529},{"edgeType":"calls","source":2862,"target":2675},{"edgeType":"contains","source":1808,"target":1828},{"edgeType":"calls","source":115,"target":98},{"edgeType":"contains","source":673,"target":813},{"edgeType":"imports","source":3035,"target":460},{"edgeType":"contains","source":19,"target":210},{"edgeType":"calls","source":2919,"target":376},{"edgeType":"contains","source":2468,"target":130},{"edgeType":"contains","source":2193,"target":2578},{"edgeType":"contains","source":19,"target":211},{"edgeType":"imports","source":1262,"target":1514},{"edgeType":"contains","source":2,"target":2852},{"edgeType":"contains","source":1164,"target":1183},{"edgeType":"contains","source":305,"target":329},{"edgeType":"calls","source":1845,"target":641},{"edgeType":"calls","source":230,"target":233},{"edgeType":"contains","source":1982,"target":1984},{"edgeType":"calls","source":345,"target":368},{"edgeType":"contains","source":2982,"target":580},{"edgeType":"contains","source":586,"target":603},{"edgeType":"calls","source":1560,"target":1548},{"edgeType":"contains","source":1541,"target":64},{"edgeType":"calls","source":1535,"target":1530},{"edgeType":"imports","source":2858,"target":132},{"edgeType":"calls","source":91,"target":48},{"edgeType":"contains","source":1808,"target":1829},{"edgeType":"calls","source":1504,"target":641},{"edgeType":"contains","source":1125,"target":2920},{"edgeType":"calls","source":1669,"target":1340},{"edgeType":"calls","source":1098,"target":807},{"edgeType":"calls","source":1281,"target":1212},{"edgeType":"imports","source":793,"target":990},{"edgeType":"contains","source":4,"target":1732},{"edgeType":"contains","source":2672,"target":2792},{"edgeType":"contains","source":730,"target":366},{"edgeType":"calls","source":1694,"target":36},{"edgeType":"contains","source":1982,"target":1985},{"edgeType":"contains","source":3063,"target":3091},{"edgeType":"contains","source":2982,"target":581},{"edgeType":"calls","source":2600,"target":2603},{"edgeType":"contains","source":550,"target":925},{"edgeType":"calls","source":2456,"target":725},{"edgeType":"imports","source":729,"target":81},{"edgeType":"calls","source":2364,"target":2252},{"edgeType":"calls","source":732,"target":520},{"edgeType":"calls","source":3145,"target":3146},{"edgeType":"contains","source":673,"target":815},{"edgeType":"contains","source":1211,"target":1314},{"edgeType":"imports","source":2932,"target":496},{"edgeType":"contains","source":8,"target":289},{"edgeType":"calls","source":2021,"target":200},{"edgeType":"contains","source":1481,"target":1401},{"edgeType":"contains","source":2519,"target":2520},{"edgeType":"contains","source":1808,"target":1834},{"edgeType":"calls","source":466,"target":574},{"edgeType":"contains","source":19,"target":213},{"edgeType":"contains","source":2852,"target":3031},{"edgeType":"contains","source":994,"target":377},{"edgeType":"calls","source":466,"target":310},{"edgeType":"calls","source":106,"target":107},{"edgeType":"calls","source":1880,"target":1928},{"edgeType":"calls","source":1409,"target":938},{"edgeType":"contains","source":2643,"target":261},{"edgeType":"calls","source":1247,"target":939},{"edgeType":"contains","source":2501,"target":2549},{"edgeType":"contains","source":130,"target":207},{"edgeType":"imports","source":2607,"target":529},{"edgeType":"calls","source":1196,"target":1198},{"edgeType":"contains","source":2932,"target":1081},{"edgeType":"calls","source":1340,"target":698},{"edgeType":"calls","source":255,"target":243},{"edgeType":"calls","source":294,"target":91},{"edgeType":"imports","source":3050,"target":537},{"edgeType":"calls","source":2179,"target":589},{"edgeType":"contains","source":2193,"target":2320},{"edgeType":"contains","source":1675,"target":1730},{"edgeType":"calls","source":709,"target":704},{"edgeType":"calls","source":1886,"target":420},{"edgeType":"calls","source":2981,"target":2946},{"edgeType":"calls","source":3062,"target":2549},{"edgeType":"calls","source":1247,"target":938},{"edgeType":"contains","source":1495,"target":1497},{"edgeType":"calls","source":294,"target":92},{"edgeType":"imports","source":2176,"target":1469},{"edgeType":"calls","source":940,"target":942},{"edgeType":"calls","source":2278,"target":2009},{"edgeType":"imports","source":1569,"target":201},{"edgeType":"calls","source":2046,"target":2070},{"edgeType":"calls","source":566,"target":383},{"edgeType":"imports","source":3072,"target":911},{"edgeType":"calls","source":1315,"target":1212},{"edgeType":"contains","source":994,"target":908},{"edgeType":"imports","source":3050,"target":1064},{"edgeType":"contains","source":2717,"target":2722},{"edgeType":"calls","source":1547,"target":1419},{"edgeType":"contains","source":2193,"target":2317},{"edgeType":"contains","source":1808,"target":1832},{"edgeType":"calls","source":2663,"target":116},{"edgeType":"calls","source":1863,"target":1136},{"edgeType":"calls","source":1666,"target":108},{"edgeType":"contains","source":58,"target":63},{"edgeType":"contains","source":550,"target":1987},{"edgeType":"contains","source":263,"target":578},{"edgeType":"calls","source":2845,"target":1345},{"edgeType":"calls","source":1728,"target":564},{"edgeType":"imports","source":2982,"target":529},{"edgeType":"contains","source":2052,"target":1932},{"edgeType":"calls","source":2535,"target":2499},{"edgeType":"contains","source":130,"target":209},{"edgeType":"imports","source":14,"target":47},{"edgeType":"contains","source":460,"target":2134},{"edgeType":"calls","source":630,"target":512},{"edgeType":"contains","source":2932,"target":1079},{"edgeType":"contains","source":3063,"target":3096},{"edgeType":"contains","source":673,"target":816},{"edgeType":"calls","source":799,"target":558},{"edgeType":"contains","source":2552,"target":2553},{"edgeType":"calls","source":2784,"target":594},{"edgeType":"calls","source":466,"target":575},{"edgeType":"calls","source":2220,"target":2223},{"edgeType":"contains","source":550,"target":929},{"edgeType":"imports","source":3050,"target":270},{"edgeType":"contains","source":263,"target":577},{"edgeType":"contains","source":2852,"target":3030},{"edgeType":"contains","source":2935,"target":1514},{"edgeType":"contains","source":1905,"target":148},{"edgeType":"contains","source":763,"target":140},{"edgeType":"calls","source":512,"target":471},{"edgeType":"contains","source":2444,"target":2465},{"edgeType":"imports","source":3072,"target":1966},{"edgeType":"contains","source":1956,"target":2002},{"edgeType":"calls","source":2278,"target":2275},{"edgeType":"contains","source":550,"target":1722},{"edgeType":"calls","source":1409,"target":939},{"edgeType":"calls","source":2002,"target":521},{"edgeType":"calls","source":1640,"target":1701},{"edgeType":"imports","source":458,"target":550},{"edgeType":"calls","source":1549,"target":558},{"edgeType":"contains","source":3155,"target":3156},{"edgeType":"calls","source":744,"target":144},{"edgeType":"calls","source":383,"target":500},{"edgeType":"contains","source":460,"target":2137},{"edgeType":"calls","source":1841,"target":491},{"edgeType":"calls","source":2676,"target":1032},{"edgeType":"calls","source":2850,"target":2773},{"edgeType":"contains","source":2742,"target":2746},{"edgeType":"calls","source":2634,"target":2599},{"edgeType":"calls","source":2005,"target":2013},{"edgeType":"calls","source":80,"target":116},{"edgeType":"calls","source":271,"target":273},{"edgeType":"calls","source":1490,"target":537},{"edgeType":"calls","source":2660,"target":2057},{"edgeType":"imports","source":3072,"target":908},{"edgeType":"contains","source":2,"target":2858},{"edgeType":"calls","source":527,"target":529},{"edgeType":"calls","source":854,"target":698},{"edgeType":"calls","source":494,"target":495},{"edgeType":"imports","source":7,"target":261},{"edgeType":"imports","source":2518,"target":2226},{"edgeType":"contains","source":3155,"target":3157},{"edgeType":"calls","source":987,"target":802},{"edgeType":"contains","source":1631,"target":2041},{"edgeType":"imports","source":840,"target":601},{"edgeType":"calls","source":353,"target":372},{"edgeType":"calls","source":732,"target":515},{"edgeType":"calls","source":3047,"target":2481},{"edgeType":"calls","source":2147,"target":1841},{"edgeType":"contains","source":1935,"target":545},{"edgeType":"calls","source":1146,"target":630},{"edgeType":"calls","source":2593,"target":2549},{"edgeType":"contains","source":2453,"target":2457},{"edgeType":"contains","source":2151,"target":2305},{"edgeType":"contains","source":2978,"target":1773},{"edgeType":"calls","source":1813,"target":565},{"edgeType":"contains","source":1929,"target":2052},{"edgeType":"calls","source":423,"target":52},{"edgeType":"calls","source":1336,"target":818},{"edgeType":"calls","source":271,"target":272},{"edgeType":"calls","source":1642,"target":52},{"edgeType":"contains","source":2935,"target":1520},{"edgeType":"contains","source":1330,"target":1331},{"edgeType":"calls","source":3047,"target":2478},{"edgeType":"calls","source":2622,"target":2440},{"edgeType":"calls","source":251,"target":366},{"edgeType":"calls","source":253,"target":304},{"edgeType":"calls","source":370,"target":377},{"edgeType":"contains","source":548,"target":3111},{"edgeType":"contains","source":2742,"target":2744},{"edgeType":"calls","source":1997,"target":1999},{"edgeType":"contains","source":2453,"target":2454},{"edgeType":"calls","source":1708,"target":1709},{"edgeType":"calls","source":181,"target":158},{"edgeType":"contains","source":45,"target":206},{"edgeType":"contains","source":550,"target":1727},{"edgeType":"contains","source":4,"target":1477},{"edgeType":"contains","source":2193,"target":2322},{"edgeType":"calls","source":1412,"target":1107},{"edgeType":"calls","source":73,"target":71},{"edgeType":"imports","source":2982,"target":261},{"edgeType":"contains","source":19,"target":219},{"edgeType":"contains","source":14,"target":374},{"edgeType":"calls","source":940,"target":941},{"edgeType":"contains","source":1430,"target":342},{"edgeType":"contains","source":550,"target":1198},{"edgeType":"contains","source":130,"target":212},{"edgeType":"contains","source":228,"target":81},{"edgeType":"calls","source":674,"target":201},{"edgeType":"calls","source":327,"target":123},{"edgeType":"contains","source":2468,"target":140},{"edgeType":"calls","source":1302,"target":1081},{"edgeType":"calls","source":2306,"target":2195},{"edgeType":"imports","source":3041,"target":1074},{"edgeType":"contains","source":2329,"target":2335},{"edgeType":"contains","source":550,"target":934},{"edgeType":"contains","source":2742,"target":2745},{"edgeType":"contains","source":8,"target":294},{"edgeType":"calls","source":800,"target":787},{"edgeType":"contains","source":2982,"target":590},{"edgeType":"contains","source":1477,"target":1528},{"edgeType":"contains","source":2453,"target":2455},{"edgeType":"calls","source":2668,"target":2601},{"edgeType":"calls","source":80,"target":117},{"edgeType":"calls","source":841,"target":838},{"edgeType":"contains","source":550,"target":933},{"edgeType":"calls","source":1880,"target":1925},{"edgeType":"calls","source":1821,"target":1640},{"edgeType":"contains","source":784,"target":814},{"edgeType":"calls","source":73,"target":70},{"edgeType":"contains","source":1164,"target":1190},{"edgeType":"calls","source":486,"target":744},{"edgeType":"contains","source":1073,"target":1075},{"edgeType":"contains","source":3088,"target":3090},{"edgeType":"calls","source":1296,"target":1295},{"edgeType":"imports","source":3072,"target":624},{"edgeType":"contains","source":3055,"target":3056},{"edgeType":"contains","source":784,"target":785},{"edgeType":"imports","source":2636,"target":134},{"edgeType":"contains","source":550,"target":1961},{"edgeType":"contains","source":2151,"target":2275},{"edgeType":"contains","source":1344,"target":1395},{"edgeType":"calls","source":2798,"target":2300},{"edgeType":"calls","source":546,"target":497},{"edgeType":"calls","source":3079,"target":3103},{"edgeType":"contains","source":8,"target":265},{"edgeType":"calls","source":264,"target":255},{"edgeType":"calls","source":1246,"target":1259},{"edgeType":"contains","source":319,"target":401},{"edgeType":"calls","source":491,"target":617},{"edgeType":"contains","source":10,"target":1787},{"edgeType":"contains","source":2772,"target":2845},{"edgeType":"calls","source":1296,"target":1294},{"edgeType":"contains","source":460,"target":2107},{"edgeType":"contains","source":3055,"target":3057},{"edgeType":"imports","source":2187,"target":1103},{"edgeType":"calls","source":1409,"target":698},{"edgeType":"imports","source":14,"target":24},{"edgeType":"calls","source":900,"target":885},{"edgeType":"contains","source":305,"target":306},{"edgeType":"contains","source":460,"target":1843},{"edgeType":"calls","source":967,"target":922},{"edgeType":"contains","source":9,"target":232},{"edgeType":"calls","source":544,"target":558},{"edgeType":"contains","source":550,"target":1695},{"edgeType":"calls","source":486,"target":771},{"edgeType":"imports","source":2886,"target":46},{"edgeType":"contains","source":550,"target":638},{"edgeType":"contains","source":7,"target":295},{"edgeType":"imports","source":2962,"target":1654},{"edgeType":"contains","source":1401,"target":1479},{"edgeType":"calls","source":3145,"target":2115},{"edgeType":"contains","source":3038,"target":3055},{"edgeType":"calls","source":619,"target":612},{"edgeType":"contains","source":481,"target":663},{"edgeType":"contains","source":460,"target":2371},{"edgeType":"calls","source":1156,"target":877},{"edgeType":"contains","source":1631,"target":1220},{"edgeType":"contains","source":2151,"target":2009},{"edgeType":"contains","source":1007,"target":1009},{"edgeType":"calls","source":635,"target":647},{"edgeType":"contains","source":1073,"target":1077},{"edgeType":"contains","source":2642,"target":2644},{"edgeType":"contains","source":462,"target":463},{"edgeType":"imports","source":3035,"target":1240},{"edgeType":"contains","source":205,"target":238},{"edgeType":"calls","source":2161,"target":376},{"edgeType":"contains","source":1929,"target":2020},{"edgeType":"calls","source":381,"target":329},{"edgeType":"imports","source":2858,"target":913},{"edgeType":"calls","source":272,"target":273},{"edgeType":"contains","source":8,"target":267},{"edgeType":"contains","source":550,"target":641},{"edgeType":"contains","source":1973,"target":1978},{"edgeType":"contains","source":1,"target":484},{"edgeType":"calls","source":1100,"target":766},{"edgeType":"calls","source":619,"target":615},{"edgeType":"contains","source":2193,"target":2293},{"edgeType":"contains","source":1401,"target":1476},{"edgeType":"contains","source":994,"target":1147},{"edgeType":"contains","source":3,"target":2800},{"edgeType":"contains","source":3,"target":1743},{"edgeType":"calls","source":2660,"target":2084},{"edgeType":"imports","source":2725,"target":543},{"edgeType":"contains","source":1007,"target":1008},{"edgeType":"contains","source":3088,"target":3089},{"edgeType":"calls","source":1640,"target":1731},{"edgeType":"imports","source":2963,"target":36},{"edgeType":"contains","source":1,"target":3125},{"edgeType":"contains","source":2832,"target":2833},{"edgeType":"calls","source":254,"target":37},{"edgeType":"imports","source":3038,"target":89},{"edgeType":"imports","source":7,"target":239},{"edgeType":"calls","source":2253,"target":1753},{"edgeType":"contains","source":3038,"target":3053},{"edgeType":"contains","source":2320,"target":2321},{"edgeType":"contains","source":205,"target":237},{"edgeType":"calls","source":1837,"target":116},{"edgeType":"calls","source":2161,"target":1170},{"edgeType":"contains","source":1808,"target":1809},{"edgeType":"calls","source":1552,"target":759},{"edgeType":"calls","source":1339,"target":1284},{"edgeType":"contains","source":2640,"target":64},{"edgeType":"contains","source":470,"target":742},{"edgeType":"calls","source":364,"target":327},{"edgeType":"calls","source":909,"target":80},{"edgeType":"contains","source":2193,"target":2558},{"edgeType":"contains","source":470,"target":481},{"edgeType":"calls","source":1094,"target":682},{"edgeType":"calls","source":2189,"target":34},{"edgeType":"contains","source":3055,"target":3060},{"edgeType":"contains","source":2642,"target":2650},{"edgeType":"imports","source":2636,"target":394},{"edgeType":"contains","source":2672,"target":2777},{"edgeType":"calls","source":512,"target":490},{"edgeType":"calls","source":3145,"target":788},{"edgeType":"contains","source":4,"target":3035},{"edgeType":"contains","source":1007,"target":1011},{"edgeType":"calls","source":760,"target":201},{"edgeType":"contains","source":2932,"target":1059},{"edgeType":"contains","source":305,"target":311},{"edgeType":"contains","source":763,"target":647},{"edgeType":"imports","source":2636,"target":130},{"edgeType":"calls","source":1926,"target":523},{"edgeType":"calls","source":1296,"target":1291},{"edgeType":"calls","source":833,"target":52},{"edgeType":"calls","source":2171,"target":2178},{"edgeType":"contains","source":140,"target":141},{"edgeType":"contains","source":1808,"target":1814},{"edgeType":"calls","source":254,"target":32},{"edgeType":"contains","source":1932,"target":1934},{"edgeType":"contains","source":460,"target":1055},{"edgeType":"contains","source":994,"target":1149},{"edgeType":"contains","source":7,"target":564},{"edgeType":"contains","source":1007,"target":1010},{"edgeType":"contains","source":3041,"target":2438},{"edgeType":"imports","source":2725,"target":541},{"edgeType":"contains","source":550,"target":1171},{"edgeType":"calls","source":512,"target":489},{"edgeType":"contains","source":1344,"target":1400},{"edgeType":"contains","source":2386,"target":2395},{"edgeType":"contains","source":2725,"target":1928},{"edgeType":"contains","source":2401,"target":2723},{"edgeType":"contains","source":1913,"target":145},{"edgeType":"calls","source":1296,"target":1290},{"edgeType":"calls","source":2968,"target":1520},{"edgeType":"calls","source":2667,"target":2659},{"edgeType":"contains","source":550,"target":1699},{"edgeType":"imports","source":549,"target":83},{"edgeType":"contains","source":8,"target":268},{"edgeType":"calls","source":379,"target":384},{"edgeType":"contains","source":2982,"target":567},{"edgeType":"contains","source":1866,"target":1867},{"edgeType":"imports","source":2962,"target":64},{"edgeType":"imports","source":2935,"target":901},{"edgeType":"contains","source":460,"target":1054},{"edgeType":"contains","source":994,"target":1148},{"edgeType":"calls","source":2220,"target":2246},{"edgeType":"calls","source":1576,"target":537},{"edgeType":"imports","source":840,"target":47},{"edgeType":"contains","source":19,"target":191},{"edgeType":"contains","source":550,"target":1967},{"edgeType":"contains","source":2772,"target":2846},{"edgeType":"imports","source":2725,"target":540},{"edgeType":"contains","source":1126,"target":1288},{"edgeType":"contains","source":2151,"target":2013},{"edgeType":"calls","source":1512,"target":939},{"edgeType":"contains","source":1007,"target":1013},{"edgeType":"calls","source":767,"target":779},{"edgeType":"calls","source":2278,"target":2294},{"edgeType":"calls","source":346,"target":353},{"edgeType":"contains","source":2725,"target":1925},{"edgeType":"contains","source":205,"target":242},{"edgeType":"calls","source":3148,"target":2279},{"edgeType":"calls","source":1296,"target":1293},{"edgeType":"contains","source":2672,"target":2775},{"edgeType":"imports","source":1905,"target":1912},{"edgeType":"calls","source":409,"target":250},{"edgeType":"contains","source":2254,"target":2256},{"edgeType":"contains","source":140,"target":143},{"edgeType":"imports","source":2880,"target":491},{"edgeType":"contains","source":7,"target":302},{"edgeType":"contains","source":2852,"target":3008},{"edgeType":"contains","source":2444,"target":65},{"edgeType":"contains","source":1631,"target":1221},{"edgeType":"calls","source":2576,"target":2570},{"edgeType":"contains","source":19,"target":194},{"edgeType":"contains","source":460,"target":1057},{"edgeType":"contains","source":994,"target":1151},{"edgeType":"calls","source":1160,"target":752},{"edgeType":"calls","source":2189,"target":33},{"edgeType":"contains","source":2672,"target":2776},{"edgeType":"imports","source":2932,"target":200},{"edgeType":"calls","source":1512,"target":938},{"edgeType":"contains","source":1073,"target":1080},{"edgeType":"contains","source":1007,"target":1012},{"edgeType":"contains","source":1344,"target":1398},{"edgeType":"contains","source":2962,"target":1185},{"edgeType":"contains","source":550,"target":1702},{"edgeType":"contains","source":97,"target":153},{"edgeType":"calls","source":2634,"target":2622},{"edgeType":"calls","source":2032,"target":145},{"edgeType":"contains","source":205,"target":241},{"edgeType":"contains","source":2444,"target":2445},{"edgeType":"calls","source":1296,"target":1292},{"edgeType":"calls","source":1317,"target":641},{"edgeType":"contains","source":2151,"target":2278},{"edgeType":"contains","source":140,"target":142},{"edgeType":"calls","source":2419,"target":2417},{"edgeType":"contains","source":1932,"target":1933},{"edgeType":"contains","source":1631,"target":2015},{"edgeType":"calls","source":1887,"target":1997},{"edgeType":"contains","source":3073,"target":1179},{"edgeType":"contains","source":460,"target":1056},{"edgeType":"contains","source":994,"target":1150},{"edgeType":"calls","source":1215,"target":98},{"edgeType":"contains","source":883,"target":631},{"edgeType":"calls","source":1155,"target":901},{"edgeType":"contains","source":19,"target":196},{"edgeType":"contains","source":460,"target":1588},{"edgeType":"contains","source":2853,"target":2984},{"edgeType":"contains","source":2193,"target":2567},{"edgeType":"contains","source":205,"target":244},{"edgeType":"calls","source":1692,"target":1695},{"edgeType":"imports","source":2963,"target":47},{"edgeType":"contains","source":97,"target":156},{"edgeType":"contains","source":1007,"target":1015},{"edgeType":"contains","source":462,"target":469},{"edgeType":"contains","source":1262,"target":1567},{"edgeType":"calls","source":1408,"target":986},{"edgeType":"contains","source":1238,"target":1254},{"edgeType":"calls","source":850,"target":843},{"edgeType":"contains","source":2982,"target":570},{"edgeType":"contains","source":550,"target":383},{"edgeType":"imports","source":2158,"target":1219},{"edgeType":"calls","source":1694,"target":1104},{"edgeType":"calls","source":2512,"target":2438},{"edgeType":"imports","source":2962,"target":606},{"edgeType":"calls","source":1315,"target":1226},{"edgeType":"calls","source":1965,"target":1952},{"edgeType":"contains","source":2193,"target":2303},{"edgeType":"calls","source":2031,"target":2020},{"edgeType":"contains","source":19,"target":195},{"edgeType":"contains","source":2853,"target":2985},{"edgeType":"calls","source":1811,"target":1705},{"edgeType":"contains","source":1007,"target":1014},{"edgeType":"contains","source":1164,"target":1168},{"edgeType":"contains","source":3113,"target":3117},{"edgeType":"calls","source":1725,"target":1728},{"edgeType":"calls","source":1385,"target":641},{"edgeType":"imports","source":2963,"target":46},{"edgeType":"contains","source":553,"target":554},{"edgeType":"contains","source":1238,"target":1255},{"edgeType":"contains","source":550,"target":1439},{"edgeType":"calls","source":1544,"target":1525},{"edgeType":"calls","source":379,"target":380},{"edgeType":"contains","source":1,"target":225},{"edgeType":"contains","source":1947,"target":417},{"edgeType":"contains","source":2982,"target":571},{"edgeType":"calls","source":1993,"target":819},{"edgeType":"calls","source":2002,"target":540},{"edgeType":"calls","source":123,"target":124},{"edgeType":"contains","source":3063,"target":3081},{"edgeType":"imports","source":2187,"target":319},{"edgeType":"imports","source":1682,"target":911},{"edgeType":"contains","source":550,"target":1175},{"edgeType":"calls","source":511,"target":516},{"edgeType":"contains","source":7,"target":303},{"edgeType":"calls","source":891,"target":892},{"edgeType":"calls","source":3136,"target":800},{"edgeType":"contains","source":460,"target":1058},{"edgeType":"contains","source":460,"target":2115},{"edgeType":"calls","source":2000,"target":1923},{"edgeType":"contains","source":19,"target":198},{"edgeType":"imports","source":3072,"target":630},{"edgeType":"contains","source":3113,"target":3114},{"edgeType":"contains","source":205,"target":246},{"edgeType":"contains","source":673,"target":801},{"edgeType":"contains","source":1007,"target":1017},{"edgeType":"contains","source":2967,"target":2883},{"edgeType":"calls","source":2735,"target":543},{"edgeType":"calls","source":1993,"target":558},{"edgeType":"contains","source":1073,"target":1085},{"edgeType":"imports","source":2725,"target":24},{"edgeType":"calls","source":2279,"target":938},{"edgeType":"contains","source":1238,"target":1252},{"edgeType":"calls","source":2429,"target":2366},{"edgeType":"calls","source":529,"target":490},{"edgeType":"calls","source":1758,"target":1765},{"edgeType":"contains","source":2932,"target":1061},{"edgeType":"calls","source":2002,"target":543},{"edgeType":"calls","source":1419,"target":1439},{"edgeType":"calls","source":419,"target":200},{"edgeType":"calls","source":1298,"target":698},{"edgeType":"imports","source":2636,"target":140},{"edgeType":"contains","source":1808,"target":1816},{"edgeType":"calls","source":329,"target":346},{"edgeType":"contains","source":3113,"target":3115},{"edgeType":"contains","source":3041,"target":2440},{"edgeType":"imports","source":840,"target":58},{"edgeType":"contains","source":2672,"target":2780},{"edgeType":"contains","source":711,"target":1736},{"edgeType":"contains","source":205,"target":245},{"edgeType":"imports","source":1523,"target":24},{"edgeType":"calls","source":1987,"target":743},{"edgeType":"contains","source":1262,"target":1566},{"edgeType":"contains","source":460,"target":2117},{"edgeType":"calls","source":2279,"target":939},{"edgeType":"calls","source":978,"target":47},{"edgeType":"imports","source":2401,"target":2403},{"edgeType":"contains","source":1288,"target":1289},{"edgeType":"contains","source":1808,"target":1817},{"edgeType":"contains","source":2852,"target":3019},{"edgeType":"contains","source":462,"target":473},{"edgeType":"calls","source":1526,"target":759},{"edgeType":"contains","source":2853,"target":81},{"edgeType":"contains","source":1164,"target":1173},{"edgeType":"contains","source":673,"target":803},{"edgeType":"calls","source":531,"target":421},{"edgeType":"contains","source":883,"target":635},{"edgeType":"contains","source":2791,"target":2796},{"edgeType":"contains","source":3088,"target":3102},{"edgeType":"contains","source":97,"target":160},{"edgeType":"contains","source":10,"target":2857},{"edgeType":"contains","source":2444,"target":76},{"edgeType":"contains","source":2966,"target":2656},{"edgeType":"contains","source":460,"target":2385},{"edgeType":"contains","source":3015,"target":80},{"edgeType":"contains","source":97,"target":161},{"edgeType":"contains","source":2444,"target":75},{"edgeType":"imports","source":2725,"target":1871},{"edgeType":"calls","source":1006,"target":231},{"edgeType":"contains","source":460,"target":2120},{"edgeType":"contains","source":2672,"target":2785},{"edgeType":"contains","source":2761,"target":1083},{"edgeType":"imports","source":2401,"target":2402},{"edgeType":"calls","source":115,"target":105},{"edgeType":"contains","source":2582,"target":2668},{"edgeType":"contains","source":550,"target":1180},{"edgeType":"contains","source":7,"target":308},{"edgeType":"contains","source":462,"target":472},{"edgeType":"calls","source":486,"target":494},{"edgeType":"calls","source":1441,"target":486},{"edgeType":"contains","source":2900,"target":1268},{"edgeType":"calls","source":1838,"target":1128},{"edgeType":"contains","source":1164,"target":1172},{"edgeType":"contains","source":784,"target":796},{"edgeType":"contains","source":1631,"target":1232},{"edgeType":"calls","source":2180,"target":2156},{"edgeType":"contains","source":2725,"target":2729},{"edgeType":"contains","source":97,"target":159},{"edgeType":"calls","source":395,"target":144},{"edgeType":"calls","source":1345,"target":819},{"edgeType":"contains","source":2672,"target":2786},{"edgeType":"calls","source":2264,"target":2195},{"edgeType":"calls","source":1396,"target":1088},{"edgeType":"imports","source":2962,"target":601},{"edgeType":"contains","source":81,"target":127},{"edgeType":"calls","source":348,"target":280},{"edgeType":"contains","source":2386,"target":2400},{"edgeType":"contains","source":1913,"target":2000},{"edgeType":"contains","source":2,"target":729},{"edgeType":"contains","source":58,"target":50},{"edgeType":"imports","source":2886,"target":1635},{"edgeType":"calls","source":1796,"target":1640},{"edgeType":"contains","source":673,"target":805},{"edgeType":"contains","source":2469,"target":2470},{"edgeType":"contains","source":1007,"target":1021},{"edgeType":"contains","source":1238,"target":1256},{"edgeType":"contains","source":550,"target":1182},{"edgeType":"calls","source":2697,"target":132},{"edgeType":"contains","source":2246,"target":2248},{"edgeType":"contains","source":2932,"target":1065},{"edgeType":"contains","source":1401,"target":1224},{"edgeType":"contains","source":2280,"target":2251},{"edgeType":"contains","source":1631,"target":1229},{"edgeType":"contains","source":2301,"target":2392},{"edgeType":"calls","source":758,"target":786},{"edgeType":"calls","source":740,"target":551},{"edgeType":"imports","source":729,"target":58},{"edgeType":"calls","source":1407,"target":486},{"edgeType":"calls","source":254,"target":26},{"edgeType":"contains","source":7,"target":310},{"edgeType":"contains","source":1477,"target":1511},{"edgeType":"calls","source":602,"target":601},{"edgeType":"contains","source":2193,"target":2306},{"edgeType":"calls","source":2735,"target":540},{"edgeType":"contains","source":1153,"target":1251},{"edgeType":"contains","source":19,"target":201},{"edgeType":"contains","source":4,"target":666},{"edgeType":"contains","source":2444,"target":2453},{"edgeType":"calls","source":173,"target":157},{"edgeType":"contains","source":487,"target":756},{"edgeType":"contains","source":2193,"target":2570},{"edgeType":"contains","source":2692,"target":2693},{"edgeType":"contains","source":673,"target":804},{"edgeType":"contains","source":1238,"target":1257},{"edgeType":"imports","source":2900,"target":936},{"edgeType":"calls","source":2769,"target":543},{"edgeType":"contains","source":2444,"target":74},{"edgeType":"contains","source":2245,"target":2280},{"edgeType":"contains","source":883,"target":900},{"edgeType":"contains","source":1631,"target":1230},{"edgeType":"calls","source":1230,"target":158},{"edgeType":"contains","source":1808,"target":1821},{"edgeType":"contains","source":2858,"target":1246},{"edgeType":"calls","source":491,"target":567},{"edgeType":"imports","source":2767,"target":2729},{"edgeType":"calls","source":2773,"target":383},{"edgeType":"calls","source":1887,"target":1951},{"edgeType":"imports","source":1682,"target":1746},{"edgeType":"calls","source":1336,"target":270},{"edgeType":"contains","source":2501,"target":2508},{"edgeType":"imports","source":1262,"target":1553},{"edgeType":"contains","source":481,"target":648},{"edgeType":"calls","source":270,"target":283},{"edgeType":"calls","source":2041,"target":348},{"edgeType":"contains","source":2303,"target":2304},{"edgeType":"contains","source":319,"target":385},{"edgeType":"contains","source":4,"target":3015},{"edgeType":"contains","source":470,"target":724},{"edgeType":"calls","source":1559,"target":1548},{"edgeType":"calls","source":1145,"target":641},{"edgeType":"contains","source":883,"target":1134},{"edgeType":"calls","source":2946,"target":2948},{"edgeType":"calls","source":3029,"target":1432},{"edgeType":"calls","source":3079,"target":3054},{"edgeType":"contains","source":550,"target":622},{"edgeType":"imports","source":3072,"target":1201},{"edgeType":"contains","source":2725,"target":1907},{"edgeType":"calls","source":2728,"target":986},{"edgeType":"contains","source":319,"target":384},{"edgeType":"contains","source":460,"target":2355},{"edgeType":"contains","source":10,"target":713},{"edgeType":"calls","source":1988,"target":1990},{"edgeType":"calls","source":73,"target":47},{"edgeType":"contains","source":2270,"target":2271},{"edgeType":"imports","source":2607,"target":24},{"edgeType":"contains","source":2151,"target":1996},{"edgeType":"contains","source":1533,"target":1599},{"edgeType":"calls","source":1005,"target":1016},{"edgeType":"contains","source":1381,"target":233},{"edgeType":"contains","source":550,"target":625},{"edgeType":"contains","source":1023,"target":1025},{"edgeType":"contains","source":481,"target":650},{"edgeType":"contains","source":734,"target":735},{"edgeType":"contains","source":1401,"target":1459},{"edgeType":"calls","source":295,"target":303},{"edgeType":"contains","source":319,"target":387},{"edgeType":"contains","source":1789,"target":1851},{"edgeType":"contains","source":460,"target":1037},{"edgeType":"contains","source":3111,"target":3151},{"edgeType":"calls","source":3161,"target":3152},{"edgeType":"imports","source":7,"target":24},{"edgeType":"contains","source":1799,"target":1806},{"edgeType":"contains","source":1989,"target":1994},{"edgeType":"calls","source":2218,"target":938},{"edgeType":"contains","source":64,"target":100},{"edgeType":"contains","source":792,"target":2900},{"edgeType":"calls","source":1747,"target":743},{"edgeType":"calls","source":2624,"target":2622},{"edgeType":"calls","source":1154,"target":1156},{"edgeType":"calls","source":1683,"target":877},{"edgeType":"contains","source":1023,"target":1024},{"edgeType":"contains","source":481,"target":649},{"edgeType":"contains","source":8,"target":249},{"edgeType":"contains","source":4,"target":1430},{"edgeType":"imports","source":2982,"target":25},{"edgeType":"contains","source":2401,"target":2700},{"edgeType":"calls","source":1962,"target":420},{"edgeType":"contains","source":2598,"target":2407},{"edgeType":"contains","source":3111,"target":3152},{"edgeType":"calls","source":295,"target":302},{"edgeType":"contains","source":460,"target":1036},{"edgeType":"imports","source":2444,"target":319},{"edgeType":"contains","source":2193,"target":2541},{"edgeType":"calls","source":1922,"target":1924},{"edgeType":"contains","source":840,"target":883},{"edgeType":"imports","source":7,"target":25},{"edgeType":"contains","source":1989,"target":1995},{"edgeType":"contains","source":550,"target":1152},{"edgeType":"contains","source":470,"target":725},{"edgeType":"contains","source":994,"target":601},{"edgeType":"calls","source":2218,"target":939},{"edgeType":"contains","source":2245,"target":2251},{"edgeType":"calls","source":2034,"target":1887},{"edgeType":"contains","source":1789,"target":1853},{"edgeType":"contains","source":2151,"target":2258},{"edgeType":"contains","source":2608,"target":2625},{"edgeType":"contains","source":1878,"target":415},{"edgeType":"calls","source":3079,"target":3052},{"edgeType":"contains","source":481,"target":652},{"edgeType":"imports","source":1523,"target":64},{"edgeType":"imports","source":2962,"target":1701},{"edgeType":"imports","source":7,"target":550},{"edgeType":"calls","source":1666,"target":1663},{"edgeType":"contains","source":1211,"target":1277},{"edgeType":"contains","source":734,"target":737},{"edgeType":"imports","source":2607,"target":550},{"edgeType":"imports","source":2982,"target":24},{"edgeType":"contains","source":1023,"target":1027},{"edgeType":"contains","source":550,"target":627},{"edgeType":"contains","source":3046,"target":3058},{"edgeType":"contains","source":1631,"target":1206},{"edgeType":"imports","source":910,"target":40},{"edgeType":"calls","source":105,"target":109},{"edgeType":"contains","source":64,"target":102},{"edgeType":"calls","source":1840,"target":1818},{"edgeType":"contains","source":1808,"target":1533},{"edgeType":"calls","source":709,"target":676},{"edgeType":"calls","source":1793,"target":368},{"edgeType":"contains","source":3015,"target":319},{"edgeType":"contains","source":1533,"target":1602},{"edgeType":"contains","source":673,"target":777},{"edgeType":"contains","source":1023,"target":1026},{"edgeType":"contains","source":481,"target":651},{"edgeType":"contains","source":1631,"target":1207},{"edgeType":"calls","source":1163,"target":78},{"edgeType":"contains","source":734,"target":736},{"edgeType":"contains","source":2151,"target":414},{"edgeType":"contains","source":550,"target":626},{"edgeType":"contains","source":319,"target":388},{"edgeType":"contains","source":470,"target":728},{"edgeType":"contains","source":460,"target":1038},{"edgeType":"contains","source":460,"target":2095},{"edgeType":"contains","source":3046,"target":3059},{"edgeType":"contains","source":550,"target":1419},{"edgeType":"calls","source":3161,"target":3151},{"edgeType":"calls","source":612,"target":511},{"edgeType":"calls","source":3161,"target":3150},{"edgeType":"contains","source":64,"target":101},{"edgeType":"contains","source":1626,"target":1627},{"edgeType":"calls","source":1170,"target":1182},{"edgeType":"contains","source":734,"target":739},{"edgeType":"calls","source":1962,"target":417},{"edgeType":"contains","source":8,"target":254},{"edgeType":"contains","source":550,"target":629},{"edgeType":"contains","source":1023,"target":1029},{"edgeType":"calls","source":1220,"target":1221},{"edgeType":"imports","source":2401,"target":1913},{"edgeType":"contains","source":3000,"target":518},{"edgeType":"contains","source":1093,"target":973},{"edgeType":"contains","source":1153,"target":1227},{"edgeType":"calls","source":514,"target":116},{"edgeType":"contains","source":994,"target":1135},{"edgeType":"contains","source":994,"target":78},{"edgeType":"calls","source":1402,"target":1128},{"edgeType":"contains","source":550,"target":1157},{"edgeType":"imports","source":2982,"target":550},{"edgeType":"contains","source":64,"target":104},{"edgeType":"contains","source":487,"target":732},{"edgeType":"calls","source":253,"target":280},{"edgeType":"contains","source":994,"target":606},{"edgeType":"calls","source":1755,"target":491},{"edgeType":"calls","source":122,"target":111},{"edgeType":"contains","source":734,"target":738},{"edgeType":"calls","source":3102,"target":2599},{"edgeType":"contains","source":550,"target":628},{"edgeType":"contains","source":1023,"target":1028},{"edgeType":"contains","source":481,"target":653},{"edgeType":"contains","source":460,"target":1304},{"edgeType":"contains","source":2151,"target":412},{"edgeType":"imports","source":2963,"target":81},{"edgeType":"calls","source":1617,"target":1597},{"edgeType":"calls","source":576,"target":572},{"edgeType":"contains","source":2453,"target":563},{"edgeType":"calls","source":105,"target":110},{"edgeType":"calls","source":2578,"target":2195},{"edgeType":"calls","source":833,"target":797},{"edgeType":"calls","source":491,"target":36},{"edgeType":"contains","source":64,"target":103},{"edgeType":"contains","source":2729,"target":2842},{"edgeType":"contains","source":2725,"target":1909},{"edgeType":"imports","source":1682,"target":418},{"edgeType":"contains","source":1982,"target":371},{"edgeType":"calls","source":1414,"target":486},{"edgeType":"calls","source":885,"target":765},{"edgeType":"contains","source":550,"target":1424},{"edgeType":"imports","source":3015,"target":1656},{"edgeType":"calls","source":252,"target":304},{"edgeType":"contains","source":3063,"target":3064},{"edgeType":"imports","source":549,"target":673},{"edgeType":"contains","source":792,"target":793},{"edgeType":"calls","source":2070,"target":2084},{"edgeType":"imports","source":2982,"target":36},{"edgeType":"calls","source":1957,"target":37},{"edgeType":"calls","source":414,"target":39},{"edgeType":"calls","source":1838,"target":1612},{"edgeType":"contains","source":470,"target":733},{"edgeType":"contains","source":2295,"target":2296},{"edgeType":"contains","source":3111,"target":3161},{"edgeType":"calls","source":2690,"target":2684},{"edgeType":"calls","source":2508,"target":2512},{"edgeType":"calls","source":486,"target":714},{"edgeType":"contains","source":1989,"target":2004},{"edgeType":"contains","source":134,"target":50},{"edgeType":"calls","source":2665,"target":2666},{"edgeType":"calls","source":1107,"target":1018},{"edgeType":"contains","source":319,"target":392},{"edgeType":"contains","source":481,"target":655},{"edgeType":"contains","source":9,"target":224},{"edgeType":"calls","source":1702,"target":542},{"edgeType":"contains","source":550,"target":1159},{"edgeType":"calls","source":1258,"target":36},{"edgeType":"calls","source":1497,"target":819},{"edgeType":"calls","source":2348,"target":338},{"edgeType":"imports","source":2880,"target":819},{"edgeType":"contains","source":460,"target":1042},{"edgeType":"contains","source":460,"target":2099},{"edgeType":"calls","source":800,"target":756},{"edgeType":"contains","source":994,"target":1136},{"edgeType":"contains","source":2518,"target":2519},{"edgeType":"calls","source":1674,"target":1674},{"edgeType":"calls","source":47,"target":52},{"edgeType":"contains","source":2262,"target":2263},{"edgeType":"calls","source":2690,"target":2685},{"edgeType":"contains","source":2193,"target":2552},{"edgeType":"calls","source":526,"target":530},{"edgeType":"contains","source":130,"target":173},{"edgeType":"calls","source":2281,"target":1887},{"edgeType":"contains","source":1788,"target":1894},{"edgeType":"contains","source":1611,"target":771},{"edgeType":"contains","source":1651,"target":1645},{"edgeType":"imports","source":2704,"target":460},{"edgeType":"contains","source":1211,"target":1808},{"edgeType":"imports","source":263,"target":24},{"edgeType":"contains","source":1153,"target":1231},{"edgeType":"calls","source":1343,"target":1103},{"edgeType":"calls","source":1748,"target":1761},{"edgeType":"calls","source":2409,"target":2408},{"edgeType":"contains","source":4,"target":2761},{"edgeType":"calls","source":1617,"target":537},{"edgeType":"calls","source":1520,"target":901},{"edgeType":"contains","source":1344,"target":1385},{"edgeType":"calls","source":774,"target":773},{"edgeType":"contains","source":481,"target":658},{"edgeType":"contains","source":2725,"target":1912},{"edgeType":"calls","source":909,"target":816},{"edgeType":"contains","source":1533,"target":1604},{"edgeType":"imports","source":2880,"target":24},{"edgeType":"contains","source":460,"target":1044},{"edgeType":"contains","source":3,"target":1205},{"edgeType":"imports","source":2607,"target":295},{"edgeType":"calls","source":2774,"target":1928},{"edgeType":"contains","source":550,"target":1161},{"edgeType":"calls","source":1818,"target":383},{"edgeType":"calls","source":3169,"target":3161},{"edgeType":"contains","source":4,"target":910},{"edgeType":"imports","source":6,"target":64},{"edgeType":"contains","source":550,"target":1689},{"edgeType":"contains","source":1989,"target":2003},{"edgeType":"contains","source":460,"target":2365},{"edgeType":"contains","source":460,"target":1308},{"edgeType":"calls","source":909,"target":815},{"edgeType":"calls","source":631,"target":184},{"edgeType":"calls","source":253,"target":269},{"edgeType":"contains","source":319,"target":397},{"edgeType":"contains","source":1989,"target":2008},{"edgeType":"calls","source":1297,"target":938},{"edgeType":"calls","source":1550,"target":1551},{"edgeType":"contains","source":9,"target":229},{"edgeType":"contains","source":460,"target":1840},{"edgeType":"calls","source":1862,"target":1128},{"edgeType":"calls","source":1311,"target":1296},{"edgeType":"contains","source":994,"target":84},{"edgeType":"contains","source":2935,"target":1486},{"edgeType":"calls","source":2095,"target":2098},{"edgeType":"contains","source":205,"target":231},{"edgeType":"contains","source":3111,"target":3165},{"edgeType":"contains","source":19,"target":184},{"edgeType":"contains","source":7,"target":556},{"edgeType":"contains","source":1973,"target":1976},{"edgeType":"calls","source":1854,"target":1640},{"edgeType":"contains","source":2193,"target":2555},{"edgeType":"imports","source":3015,"target":1651},{"edgeType":"contains","source":319,"target":396},{"edgeType":"calls","source":834,"target":491},{"edgeType":"calls","source":534,"target":542},{"edgeType":"calls","source":376,"target":419},{"edgeType":"contains","source":550,"target":1163},{"edgeType":"calls","source":2031,"target":116},{"edgeType":"contains","source":2386,"target":2387},{"edgeType":"contains","source":1973,"target":1977},{"edgeType":"calls","source":1320,"target":1016},{"edgeType":"contains","source":550,"target":1955},{"edgeType":"contains","source":19,"target":183},{"edgeType":"contains","source":550,"target":898},{"edgeType":"calls","source":909,"target":809},{"edgeType":"calls","source":2017,"target":1871},{"edgeType":"contains","source":460,"target":2367},{"edgeType":"contains","source":1651,"target":1653},{"edgeType":"contains","source":2935,"target":1487},{"edgeType":"contains","source":130,"target":177},{"edgeType":"calls","source":567,"target":579},{"edgeType":"contains","source":1929,"target":2016},{"edgeType":"calls","source":328,"target":60},{"edgeType":"contains","source":1989,"target":2006},{"edgeType":"contains","source":883,"target":885},{"edgeType":"contains","source":1979,"target":730},{"edgeType":"contains","source":550,"target":1694},{"edgeType":"calls","source":2053,"target":494},{"edgeType":"contains","source":994,"target":1143},{"edgeType":"contains","source":2761,"target":1064},{"edgeType":"contains","source":3111,"target":3163},{"edgeType":"calls","source":2192,"target":939},{"edgeType":"calls","source":2673,"target":2676},{"edgeType":"calls","source":1552,"target":698},{"edgeType":"contains","source":319,"target":399},{"edgeType":"contains","source":1238,"target":1240},{"edgeType":"contains","source":481,"target":662},{"edgeType":"contains","source":1631,"target":420},{"edgeType":"contains","source":1626,"target":1632},{"edgeType":"calls","source":422,"target":52},{"edgeType":"contains","source":130,"target":179},{"edgeType":"calls","source":2648,"target":2659},{"edgeType":"contains","source":319,"target":398},{"edgeType":"calls","source":534,"target":544},{"edgeType":"calls","source":1071,"target":1074},{"edgeType":"calls","source":1031,"target":200},{"edgeType":"calls","source":295,"target":25},{"edgeType":"contains","source":1989,"target":2007},{"edgeType":"calls","source":923,"target":905},{"edgeType":"contains","source":2852,"target":359},{"edgeType":"imports","source":3050,"target":35},{"edgeType":"contains","source":550,"target":1165},{"edgeType":"contains","source":1477,"target":1495},{"edgeType":"calls","source":1297,"target":939},{"edgeType":"contains","source":2445,"target":555},{"edgeType":"contains","source":2935,"target":1485},{"edgeType":"contains","source":19,"target":185},{"edgeType":"contains","source":7,"target":557},{"edgeType":"calls","source":843,"target":743},{"edgeType":"calls","source":1880,"target":1892},{"edgeType":"calls","source":888,"target":405},{"edgeType":"contains","source":1973,"target":1975},{"edgeType":"contains","source":3111,"target":3164},{"edgeType":"contains","source":460,"target":1576},{"edgeType":"calls","source":2322,"target":2195},{"edgeType":"calls","source":1612,"target":1215},{"edgeType":"contains","source":1789,"target":1865},{"edgeType":"contains","source":1788,"target":1896},{"edgeType":"contains","source":460,"target":2369},{"edgeType":"contains","source":481,"target":661},{"edgeType":"calls","source":2340,"target":2195},{"edgeType":"calls","source":131,"target":116},{"edgeType":"calls","source":63,"target":110},{"edgeType":"calls","source":1215,"target":602},{"edgeType":"calls","source":2724,"target":2711},{"edgeType":"calls","source":1163,"target":1157},{"edgeType":"contains","source":1533,"target":1581},{"edgeType":"contains","source":883,"target":1118},{"edgeType":"calls","source":2005,"target":2009},{"edgeType":"contains","source":436,"target":441},{"edgeType":"calls","source":2981,"target":2936},{"edgeType":"contains","source":2770,"target":2873},{"edgeType":"calls","source":2735,"target":521},{"edgeType":"contains","source":2193,"target":2262},{"edgeType":"contains","source":487,"target":709},{"edgeType":"contains","source":2158,"target":2290},{"edgeType":"calls","source":114,"target":115},{"edgeType":"calls","source":2046,"target":2059},{"edgeType":"contains","source":1947,"target":1960},{"edgeType":"calls","source":2567,"target":2558},{"edgeType":"contains","source":3111,"target":3136},{"edgeType":"contains","source":460,"target":2339},{"edgeType":"calls","source":1690,"target":939},{"edgeType":"contains","source":914,"target":685},{"edgeType":"contains","source":2770,"target":2874},{"edgeType":"calls","source":510,"target":522},{"edgeType":"contains","source":1631,"target":1979},{"edgeType":"calls","source":63,"target":109},{"edgeType":"calls","source":1716,"target":1718},{"edgeType":"contains","source":1533,"target":1582},{"edgeType":"calls","source":2766,"target":353},{"edgeType":"contains","source":8,"target":230},{"edgeType":"calls","source":617,"target":376},{"edgeType":"calls","source":1170,"target":939},{"edgeType":"contains","source":2641,"target":2645},{"edgeType":"contains","source":883,"target":1117},{"edgeType":"contains","source":436,"target":440},{"edgeType":"calls","source":1881,"target":1888},{"edgeType":"calls","source":1839,"target":1340},{"edgeType":"contains","source":2158,"target":2291},{"edgeType":"calls","source":2456,"target":2300},{"edgeType":"contains","source":2725,"target":1890},{"edgeType":"calls","source":1633,"target":327},{"edgeType":"calls","source":732,"target":511},{"edgeType":"contains","source":2069,"target":2143},{"edgeType":"contains","source":666,"target":973},{"edgeType":"calls","source":1701,"target":1654},{"edgeType":"contains","source":2176,"target":940},{"edgeType":"calls","source":1690,"target":938},{"edgeType":"calls","source":1836,"target":1701},{"edgeType":"calls","source":800,"target":520},{"edgeType":"contains","source":460,"target":1813},{"edgeType":"contains","source":3153,"target":3154},{"edgeType":"calls","source":353,"target":371},{"edgeType":"calls","source":1802,"target":641},{"edgeType":"contains","source":2963,"target":2966},{"edgeType":"contains","source":2920,"target":1128},{"edgeType":"contains","source":883,"target":1120},{"edgeType":"calls","source":1336,"target":817},{"edgeType":"calls","source":1886,"target":415},{"edgeType":"contains","source":1631,"target":1712},{"edgeType":"calls","source":1215,"target":604},{"edgeType":"contains","source":2860,"target":81},{"edgeType":"contains","source":2770,"target":2871},{"edgeType":"imports","source":2151,"target":1458},{"edgeType":"calls","source":2219,"target":1716},{"edgeType":"contains","source":2932,"target":1020},{"edgeType":"calls","source":1855,"target":1640},{"edgeType":"contains","source":394,"target":423},{"edgeType":"calls","source":796,"target":116},{"edgeType":"calls","source":313,"target":26},{"edgeType":"calls","source":238,"target":237},{"edgeType":"calls","source":240,"target":175},{"edgeType":"calls","source":2850,"target":1182},{"edgeType":"contains","source":2158,"target":2288},{"edgeType":"calls","source":370,"target":373},{"edgeType":"calls","source":48,"target":49},{"edgeType":"contains","source":2256,"target":2421},{"edgeType":"contains","source":2932,"target":1284},{"edgeType":"calls","source":2096,"target":2094},{"edgeType":"contains","source":7,"target":529},{"edgeType":"contains","source":1788,"target":1866},{"edgeType":"calls","source":674,"target":198},{"edgeType":"imports","source":840,"target":81},{"edgeType":"contains","source":319,"target":370},{"edgeType":"calls","source":1171,"target":1174},{"edgeType":"contains","source":2962,"target":2998},{"edgeType":"contains","source":460,"target":1812},{"edgeType":"contains","source":883,"target":1119},{"edgeType":"calls","source":1887,"target":383},{"edgeType":"calls","source":992,"target":116},{"edgeType":"contains","source":2770,"target":2872},{"edgeType":"contains","source":394,"target":422},{"edgeType":"calls","source":147,"target":150},{"edgeType":"calls","source":618,"target":612},{"edgeType":"calls","source":1881,"target":1890},{"edgeType":"imports","source":2962,"target":1685},{"edgeType":"contains","source":81,"target":84},{"edgeType":"contains","source":2158,"target":2289},{"edgeType":"contains","source":2725,"target":1888},{"edgeType":"contains","source":1533,"target":1580},{"edgeType":"contains","source":2932,"target":1285},{"edgeType":"contains","source":2607,"target":261},{"edgeType":"contains","source":7,"target":528},{"edgeType":"contains","source":2069,"target":2141},{"edgeType":"calls","source":2674,"target":2675},{"edgeType":"calls","source":593,"target":594},{"edgeType":"calls","source":981,"target":986},{"edgeType":"contains","source":319,"target":369},{"edgeType":"contains","source":973,"target":975},{"edgeType":"contains","source":436,"target":445},{"edgeType":"contains","source":2770,"target":2877},{"edgeType":"calls","source":866,"target":846},{"edgeType":"imports","source":2935,"target":936},{"edgeType":"contains","source":1631,"target":1982},{"edgeType":"imports","source":1262,"target":1268},{"edgeType":"contains","source":2982,"target":533},{"edgeType":"calls","source":1039,"target":1033},{"edgeType":"contains","source":394,"target":426},{"edgeType":"calls","source":1747,"target":754},{"edgeType":"calls","source":1881,"target":1885},{"edgeType":"contains","source":2608,"target":2614},{"edgeType":"contains","source":1533,"target":1585},{"edgeType":"calls","source":1165,"target":1090},{"edgeType":"calls","source":2046,"target":2055},{"edgeType":"calls","source":1413,"target":537},{"edgeType":"contains","source":8,"target":236},{"edgeType":"contains","source":2193,"target":2266},{"edgeType":"contains","source":994,"target":1116},{"edgeType":"imports","source":1569,"target":208},{"edgeType":"contains","source":2256,"target":2427},{"edgeType":"contains","source":550,"target":1403},{"edgeType":"contains","source":2069,"target":2146},{"edgeType":"calls","source":1379,"target":270},{"edgeType":"calls","source":1867,"target":1790},{"edgeType":"contains","source":2932,"target":1556},{"edgeType":"imports","source":910,"target":553},{"edgeType":"imports","source":2151,"target":1455},{"edgeType":"imports","source":1523,"target":47},{"edgeType":"contains","source":394,"target":424},{"edgeType":"calls","source":345,"target":348},{"edgeType":"calls","source":2769,"target":521},{"edgeType":"calls","source":977,"target":47},{"edgeType":"calls","source":2974,"target":1565},{"edgeType":"contains","source":1675,"target":1677},{"edgeType":"contains","source":1386,"target":1387},{"edgeType":"imports","source":910,"target":24},{"edgeType":"contains","source":279,"target":293},{"edgeType":"contains","source":436,"target":447},{"edgeType":"imports","source":3050,"target":1597},{"edgeType":"contains","source":2770,"target":2875},{"edgeType":"calls","source":2261,"target":938},{"edgeType":"calls","source":418,"target":466},{"edgeType":"imports","source":2725,"target":573},{"edgeType":"contains","source":1789,"target":1839},{"edgeType":"contains","source":3041,"target":2402},{"edgeType":"contains","source":550,"target":1141},{"edgeType":"calls","source":2844,"target":1892},{"edgeType":"imports","source":2962,"target":97},{"edgeType":"contains","source":1704,"target":1831},{"edgeType":"contains","source":130,"target":155},{"edgeType":"contains","source":2982,"target":531},{"edgeType":"calls","source":91,"target":33},{"edgeType":"calls","source":327,"target":116},{"edgeType":"calls","source":1853,"target":641},{"edgeType":"contains","source":394,"target":428},{"edgeType":"calls","source":1170,"target":938},{"edgeType":"contains","source":2607,"target":2643},{"edgeType":"imports","source":1523,"target":46},{"edgeType":"calls","source":2009,"target":33},{"edgeType":"contains","source":883,"target":1124},{"edgeType":"calls","source":621,"target":516},{"edgeType":"contains","source":3072,"target":1969},{"edgeType":"contains","source":550,"target":1669},{"edgeType":"contains","source":2193,"target":2264},{"edgeType":"contains","source":481,"target":637},{"edgeType":"contains","source":2158,"target":2292},{"edgeType":"contains","source":1153,"target":1210},{"edgeType":"calls","source":732,"target":510},{"edgeType":"contains","source":81,"target":89},{"edgeType":"contains","source":1947,"target":1962},{"edgeType":"calls","source":2261,"target":939},{"edgeType":"calls","source":772,"target":327},{"edgeType":"contains","source":2176,"target":941},{"edgeType":"calls","source":2467,"target":2481},{"edgeType":"contains","source":436,"target":446},{"edgeType":"imports","source":2176,"target":1471},{"edgeType":"contains","source":2770,"target":2876},{"edgeType":"calls","source":510,"target":520},{"edgeType":"contains","source":2363,"target":2280},{"edgeType":"calls","source":197,"target":181},{"edgeType":"contains","source":3041,"target":2403},{"edgeType":"contains","source":130,"target":154},{"edgeType":"calls","source":1528,"target":145},{"edgeType":"calls","source":2570,"target":2195},{"edgeType":"calls","source":2981,"target":1081},{"edgeType":"contains","source":394,"target":427},{"edgeType":"calls","source":353,"target":366},{"edgeType":"calls","source":1759,"target":383},{"edgeType":"calls","source":1887,"target":1964},{"edgeType":"contains","source":2725,"target":1892},{"edgeType":"calls","source":1795,"target":588},{"edgeType":"calls","source":2364,"target":1182},{"edgeType":"imports","source":2963,"target":64},{"edgeType":"contains","source":1153,"target":1209},{"edgeType":"calls","source":2096,"target":2091},{"edgeType":"contains","source":2176,"target":942},{"edgeType":"calls","source":2252,"target":1219},{"edgeType":"calls","source":344,"target":117},{"edgeType":"calls","source":800,"target":515},{"edgeType":"contains","source":2742,"target":1101},{"edgeType":"calls","source":2856,"target":200},{"edgeType":"contains","source":7,"target":532},{"edgeType":"contains","source":1947,"target":1968},{"edgeType":"calls","source":1212,"target":1215},{"edgeType":"calls","source":709,"target":688},{"edgeType":"contains","source":1913,"target":1965},{"edgeType":"contains","source":2608,"target":2618},{"edgeType":"contains","source":3111,"target":3145},{"edgeType":"calls","source":2261,"target":1993},{"edgeType":"contains","source":1533,"target":1589},{"edgeType":"calls","source":270,"target":291},{"edgeType":"contains","source":460,"target":1819},{"edgeType":"contains","source":436,"target":449},{"edgeType":"calls","source":1880,"target":1912},{"edgeType":"contains","source":130,"target":157},{"edgeType":"calls","source":2467,"target":2478},{"edgeType":"contains","source":840,"target":1138},{"edgeType":"contains","source":296,"target":297},{"edgeType":"contains","source":1789,"target":1845},{"edgeType":"calls","source":155,"target":157},{"edgeType":"imports","source":793,"target":1020},{"edgeType":"calls","source":1316,"target":633},{"edgeType":"calls","source":1633,"target":1641},{"edgeType":"contains","source":481,"target":639},{"edgeType":"contains","source":460,"target":2347},{"edgeType":"contains","source":1799,"target":1800},{"edgeType":"contains","source":2301,"target":2357},{"edgeType":"calls","source":510,"target":515},{"edgeType":"calls","source":490,"target":78},{"edgeType":"contains","source":8,"target":240},{"edgeType":"contains","source":2193,"target":2270},{"edgeType":"imports","source":3050,"target":818},{"edgeType":"contains","source":2608,"target":2619},{"edgeType":"contains","source":2886,"target":79},{"edgeType":"contains","source":1533,"target":1590},{"edgeType":"calls","source":1412,"target":1092},{"edgeType":"contains","source":300,"target":436},{"edgeType":"contains","source":3046,"target":3047},{"edgeType":"contains","source":2193,"target":2535},{"edgeType":"contains","source":436,"target":448},{"edgeType":"contains","source":585,"target":586},{"edgeType":"contains","source":550,"target":614},{"edgeType":"calls","source":2246,"target":2195},{"edgeType":"calls","source":631,"target":198},{"edgeType":"contains","source":840,"target":1137},{"edgeType":"calls","source":1698,"target":418},{"edgeType":"calls","source":617,"target":632},{"edgeType":"calls","source":2981,"target":1079},{"edgeType":"calls","source":131,"target":107},{"edgeType":"contains","source":2022,"target":2023},{"edgeType":"contains","source":460,"target":2346},{"edgeType":"calls","source":1526,"target":200},{"edgeType":"contains","source":2932,"target":1031},{"edgeType":"calls","source":1182,"target":558},{"edgeType":"calls","source":1220,"target":1230},{"edgeType":"imports","source":3050,"target":24},{"edgeType":"imports","source":3050,"target":817},{"edgeType":"contains","source":470,"target":715},{"edgeType":"calls","source":48,"target":38},{"edgeType":"contains","source":2608,"target":2616},{"edgeType":"calls","source":1739,"target":1793},{"edgeType":"contains","source":550,"target":1938},{"edgeType":"contains","source":2932,"target":1557},{"edgeType":"calls","source":621,"target":511},{"edgeType":"contains","source":481,"target":642},{"edgeType":"imports","source":6,"target":46},{"edgeType":"contains","source":1533,"target":1587},{"edgeType":"calls","source":2766,"target":1663},{"edgeType":"contains","source":2187,"target":2190},{"edgeType":"calls","source":1759,"target":908},{"edgeType":"calls","source":376,"target":443},{"edgeType":"imports","source":793,"target":1018},{"edgeType":"calls","source":1343,"target":327},{"edgeType":"contains","source":2193,"target":2268},{"edgeType":"calls","source":2844,"target":1888},{"edgeType":"contains","source":1153,"target":1214},{"edgeType":"contains","source":1386,"target":1388},{"edgeType":"imports","source":2191,"target":491},{"edgeType":"contains","source":2982,"target":1856},{"edgeType":"calls","source":851,"target":251},{"edgeType":"contains","source":2608,"target":2617},{"edgeType":"contains","source":3041,"target":2407},{"edgeType":"contains","source":994,"target":1122},{"edgeType":"calls","source":1435,"target":1438},{"edgeType":"contains","source":436,"target":450},{"edgeType":"calls","source":1597,"target":116},{"edgeType":"imports","source":6,"target":47},{"edgeType":"calls","source":1881,"target":1882},{"edgeType":"contains","source":460,"target":2349},{"edgeType":"contains","source":840,"target":1139},{"edgeType":"contains","source":481,"target":640},{"edgeType":"calls","source":1220,"target":1232},{"edgeType":"calls","source":2011,"target":494},{"edgeType":"imports","source":2725,"target":584},{"edgeType":"contains","source":1381,"target":227},{"edgeType":"calls","source":2319,"target":2305},{"edgeType":"calls","source":931,"target":937},{"edgeType":"contains","source":1211,"target":1533},{"edgeType":"contains","source":2886,"target":82},{"edgeType":"contains","source":1904,"target":3041},{"edgeType":"contains","source":3111,"target":3149},{"edgeType":"calls","source":807,"target":817},{"edgeType":"calls","source":1749,"target":1741},{"edgeType":"contains","source":1533,"target":1593},{"edgeType":"contains","source":481,"target":644},{"edgeType":"imports","source":3050,"target":550},{"edgeType":"contains","source":64,"target":93},{"edgeType":"contains","source":1947,"target":1972},{"edgeType":"calls","source":2695,"target":163},{"edgeType":"contains","source":883,"target":1130},{"edgeType":"imports","source":2852,"target":81},{"edgeType":"calls","source":1879,"target":353},{"edgeType":"contains","source":1989,"target":1992},{"edgeType":"contains","source":2151,"target":2255},{"edgeType":"calls","source":2046,"target":461},{"edgeType":"calls","source":2683,"target":799},{"edgeType":"calls","source":543,"target":545},{"edgeType":"contains","source":7,"target":538},{"edgeType":"calls","source":1127,"target":938},{"edgeType":"contains","source":550,"target":1939},{"edgeType":"contains","source":2608,"target":2623},{"edgeType":"contains","source":3111,"target":3150},{"edgeType":"contains","source":2069,"target":2155},{"edgeType":"contains","source":1533,"target":1594},{"edgeType":"calls","source":2690,"target":2697},{"edgeType":"imports","source":3041,"target":1885},{"edgeType":"contains","source":481,"target":643},{"edgeType":"contains","source":460,"target":2351},{"edgeType":"contains","source":319,"target":379},{"edgeType":"contains","source":1799,"target":1805},{"edgeType":"contains","source":470,"target":719},{"edgeType":"calls","source":567,"target":593},{"edgeType":"contains","source":1211,"target":1799},{"edgeType":"calls","source":1811,"target":346},{"edgeType":"imports","source":2886,"target":83},{"edgeType":"imports","source":263,"target":270},{"edgeType":"contains","source":460,"target":1822},{"edgeType":"imports","source":2725,"target":582},{"edgeType":"contains","source":2880,"target":2911},{"edgeType":"calls","source":2035,"target":1858},{"edgeType":"contains","source":279,"target":298},{"edgeType":"contains","source":550,"target":1146},{"edgeType":"calls","source":832,"target":834},{"edgeType":"contains","source":2069,"target":2152},{"edgeType":"contains","source":1947,"target":1970},{"edgeType":"calls","source":1345,"target":1318},{"edgeType":"contains","source":3111,"target":3147},{"edgeType":"calls","source":594,"target":551},{"edgeType":"contains","source":460,"target":2354},{"edgeType":"contains","source":1982,"target":1942},{"edgeType":"calls","source":849,"target":838},{"edgeType":"contains","source":2501,"target":2502},{"edgeType":"contains","source":64,"target":95},{"edgeType":"contains","source":481,"target":646},{"edgeType":"contains","source":460,"target":1297},{"edgeType":"imports","source":14,"target":58},{"edgeType":"calls","source":1077,"target":905},{"edgeType":"calls","source":2697,"target":99},{"edgeType":"contains","source":1799,"target":1802},{"edgeType":"contains","source":840,"target":1144},{"edgeType":"contains","source":550,"target":1413},{"edgeType":"contains","source":470,"target":722},{"edgeType":"calls","source":2599,"target":2080},{"edgeType":"contains","source":1631,"target":1989},{"edgeType":"calls","source":1220,"target":1229},{"edgeType":"contains","source":1533,"target":1592},{"edgeType":"contains","source":671,"target":40},{"edgeType":"contains","source":2193,"target":2273},{"edgeType":"contains","source":3111,"target":3148},{"edgeType":"calls","source":1245,"target":1246},{"edgeType":"calls","source":807,"target":818},{"edgeType":"contains","source":2069,"target":2153},{"edgeType":"contains","source":2468,"target":2469},{"edgeType":"calls","source":2046,"target":2048},{"edgeType":"contains","source":1947,"target":1971},{"edgeType":"calls","source":1829,"target":1640},{"edgeType":"contains","source":481,"target":645},{"edgeType":"contains","source":460,"target":2353},{"edgeType":"calls","source":898,"target":376},{"edgeType":"calls","source":1640,"target":1685},{"edgeType":"calls","source":1576,"target":1555},{"edgeType":"contains","source":1956,"target":1957},{"edgeType":"contains","source":1989,"target":1991},{"edgeType":"calls","source":986,"target":1083},{"edgeType":"imports","source":1682,"target":1731},{"edgeType":"calls","source":635,"target":601},{"edgeType":"calls","source":799,"target":802},{"edgeType":"contains","source":81,"target":96},{"edgeType":"contains","source":2151,"target":2490},{"edgeType":"imports","source":1682,"target":2043},{"edgeType":"contains","source":2343,"target":2352},{"edgeType":"contains","source":2069,"target":2125},{"edgeType":"contains","source":2962,"target":604},{"edgeType":"calls","source":2416,"target":2300},{"edgeType":"contains","source":550,"target":2175},{"edgeType":"contains","source":549,"target":2206},{"edgeType":"calls","source":1547,"target":698},{"edgeType":"calls","source":2462,"target":2460},{"edgeType":"contains","source":97,"target":99},{"edgeType":"contains","source":4,"target":2982},{"edgeType":"contains","source":881,"target":899},{"edgeType":"contains","source":550,"target":1911},{"edgeType":"calls","source":2213,"target":930},{"edgeType":"contains","source":2704,"target":1731},{"edgeType":"calls","source":579,"target":581},{"edgeType":"calls","source":1152,"target":523},{"edgeType":"contains","source":1344,"target":1346},{"edgeType":"contains","source":2245,"target":2483},{"edgeType":"imports","source":1682,"target":2042},{"edgeType":"calls","source":635,"target":165},{"edgeType":"contains","source":1153,"target":1187},{"edgeType":"contains","source":2193,"target":2246},{"edgeType":"calls","source":1096,"target":937},{"edgeType":"contains","source":2880,"target":2883},{"edgeType":"contains","source":994,"target":1095},{"edgeType":"imports","source":2963,"target":913},{"edgeType":"calls","source":108,"target":118},{"edgeType":"calls","source":1832,"target":1640},{"edgeType":"contains","source":2692,"target":519},{"edgeType":"contains","source":2069,"target":2126},{"edgeType":"contains","source":2962,"target":605},{"edgeType":"calls","source":466,"target":383},{"edgeType":"contains","source":2761,"target":2608},{"edgeType":"calls","source":1925,"target":1928},{"edgeType":"contains","source":97,"target":98},{"edgeType":"contains","source":549,"target":2205},{"edgeType":"calls","source":1113,"target":938},{"edgeType":"imports","source":3072,"target":705},{"edgeType":"contains","source":1600,"target":1603},{"edgeType":"calls","source":2660,"target":2665},{"edgeType":"contains","source":1935,"target":731},{"edgeType":"calls","source":1810,"target":1793},{"edgeType":"contains","source":883,"target":1100},{"edgeType":"calls","source":1437,"target":936},{"edgeType":"contains","source":3061,"target":3085},{"edgeType":"calls","source":579,"target":580},{"edgeType":"contains","source":1277,"target":1310},{"edgeType":"calls","source":2266,"target":2195},{"edgeType":"contains","source":2069,"target":2123},{"edgeType":"contains","source":2401,"target":2666},{"edgeType":"imports","source":3111,"target":24},{"edgeType":"contains","source":910,"target":795},{"edgeType":"contains","source":1268,"target":1321},{"edgeType":"contains","source":1030,"target":1039},{"edgeType":"contains","source":7,"target":512},{"edgeType":"contains","source":2962,"target":602},{"edgeType":"calls","source":604,"target":601},{"edgeType":"imports","source":8,"target":26},{"edgeType":"contains","source":64,"target":67},{"edgeType":"contains","source":550,"target":2177},{"edgeType":"calls","source":993,"target":698},{"edgeType":"contains","source":1704,"target":1811},{"edgeType":"contains","source":2176,"target":2242},{"edgeType":"calls","source":579,"target":583},{"edgeType":"calls","source":1413,"target":1419},{"edgeType":"contains","source":4,"target":2191},{"edgeType":"contains","source":1541,"target":1579},{"edgeType":"contains","source":460,"target":2324},{"edgeType":"contains","source":548,"target":2767},{"edgeType":"calls","source":2161,"target":1219},{"edgeType":"contains","source":2151,"target":2489},{"edgeType":"contains","source":2069,"target":2124},{"edgeType":"contains","source":2256,"target":2405},{"edgeType":"contains","source":1905,"target":1923},{"edgeType":"calls","source":266,"target":243},{"edgeType":"contains","source":10,"target":2797},{"edgeType":"contains","source":3061,"target":3083},{"edgeType":"contains","source":64,"target":66},{"edgeType":"contains","source":549,"target":2207},{"edgeType":"contains","source":300,"target":2791},{"edgeType":"contains","source":460,"target":1795},{"edgeType":"contains","source":458,"target":1857},{"edgeType":"contains","source":2245,"target":2482},{"edgeType":"contains","source":458,"target":2389},{"edgeType":"calls","source":981,"target":799},{"edgeType":"imports","source":1682,"target":1246},{"edgeType":"calls","source":818,"target":831},{"edgeType":"contains","source":550,"target":594},{"edgeType":"contains","source":2962,"target":608},{"edgeType":"contains","source":673,"target":481},{"edgeType":"contains","source":1030,"target":1041},{"edgeType":"contains","source":2193,"target":2249},{"edgeType":"calls","source":1958,"target":1959},{"edgeType":"calls","source":443,"target":36},{"edgeType":"calls","source":1488,"target":938},{"edgeType":"calls","source":3064,"target":970},{"edgeType":"calls","source":598,"target":516},{"edgeType":"contains","source":3111,"target":3124},{"edgeType":"contains","source":2558,"target":2562},{"edgeType":"contains","source":2069,"target":2129},{"edgeType":"imports","source":1682,"target":2039},{"edgeType":"contains","source":2193,"target":2514},{"edgeType":"calls","source":2437,"target":2438},{"edgeType":"contains","source":2772,"target":2798},{"edgeType":"contains","source":64,"target":69},{"edgeType":"imports","source":3111,"target":550},{"edgeType":"calls","source":2981,"target":901},{"edgeType":"calls","source":3100,"target":383},{"edgeType":"calls","source":2041,"target":2028},{"edgeType":"calls","source":411,"target":234},{"edgeType":"calls","source":564,"target":512},{"edgeType":"calls","source":2198,"target":2184},{"edgeType":"contains","source":2245,"target":2488},{"edgeType":"calls","source":2668,"target":2677},{"edgeType":"contains","source":1030,"target":1040},{"edgeType":"contains","source":2151,"target":2495},{"edgeType":"contains","source":2932,"target":1539},{"edgeType":"contains","source":460,"target":1005},{"edgeType":"calls","source":1471,"target":1464},{"edgeType":"calls","source":2317,"target":2195},{"edgeType":"imports","source":840,"target":130},{"edgeType":"contains","source":2069,"target":2130},{"edgeType":"calls","source":400,"target":47},{"edgeType":"contains","source":460,"target":1797},{"edgeType":"contains","source":458,"target":1859},{"edgeType":"calls","source":1489,"target":641},{"edgeType":"contains","source":2770,"target":2861},{"edgeType":"contains","source":64,"target":68},{"edgeType":"contains","source":2661,"target":162},{"edgeType":"calls","source":3082,"target":678},{"edgeType":"contains","source":1658,"target":1659},{"edgeType":"contains","source":1314,"target":1224},{"edgeType":"calls","source":2693,"target":2695},{"edgeType":"contains","source":2245,"target":2485},{"edgeType":"calls","source":2107,"target":2095},{"edgeType":"calls","source":1846,"target":1998},{"edgeType":"calls","source":1864,"target":383},{"edgeType":"calls","source":777,"target":783},{"edgeType":"contains","source":883,"target":1108},{"edgeType":"contains","source":2151,"target":2492},{"edgeType":"contains","source":2847,"target":2850},{"edgeType":"contains","source":3037,"target":3038},{"edgeType":"calls","source":2272,"target":2001},{"edgeType":"calls","source":179,"target":30},{"edgeType":"contains","source":550,"target":2182},{"edgeType":"contains","source":2069,"target":2127},{"edgeType":"calls","source":307,"target":26},{"edgeType":"contains","source":910,"target":535},{"edgeType":"imports","source":3015,"target":353},{"edgeType":"calls","source":987,"target":879},{"edgeType":"imports","source":263,"target":308},{"edgeType":"contains","source":1541,"target":1583},{"edgeType":"imports","source":3050,"target":589},{"edgeType":"calls","source":430,"target":177},{"edgeType":"contains","source":64,"target":71},{"edgeType":"contains","source":550,"target":2181},{"edgeType":"calls","source":1684,"target":1734},{"edgeType":"calls","source":3081,"target":970},{"edgeType":"contains","source":394,"target":411},{"edgeType":"contains","source":3061,"target":3086},{"edgeType":"contains","source":2245,"target":2486},{"edgeType":"imports","source":2962,"target":1731},{"edgeType":"imports","source":1788,"target":1128},{"edgeType":"contains","source":458,"target":2390},{"edgeType":"contains","source":2151,"target":2493},{"edgeType":"contains","source":2158,"target":2276},{"edgeType":"contains","source":470,"target":697},{"edgeType":"contains","source":2558,"target":2561},{"edgeType":"contains","source":2069,"target":2128},{"edgeType":"contains","source":2962,"target":607},{"edgeType":"calls","source":1488,"target":939},{"edgeType":"imports","source":263,"target":310},{"edgeType":"contains","source":3111,"target":3123},{"edgeType":"contains","source":1541,"target":1584},{"edgeType":"calls","source":1402,"target":1226},{"edgeType":"contains","source":881,"target":904},{"edgeType":"calls","source":1666,"target":970},{"edgeType":"contains","source":1789,"target":1824},{"edgeType":"contains","source":64,"target":70},{"edgeType":"imports","source":2444,"target":83},{"edgeType":"contains","source":883,"target":1106},{"edgeType":"contains","source":1344,"target":1349},{"edgeType":"contains","source":394,"target":410},{"edgeType":"contains","source":1277,"target":1312},{"edgeType":"contains","source":1956,"target":1932},{"edgeType":"contains","source":3061,"target":3087},{"edgeType":"calls","source":1349,"target":486},{"edgeType":"calls","source":253,"target":108},{"edgeType":"calls","source":923,"target":743},{"edgeType":"contains","source":844,"target":205},{"edgeType":"contains","source":3015,"target":1876},{"edgeType":"contains","source":549,"target":2215},{"edgeType":"contains","source":1030,"target":1045},{"edgeType":"contains","source":994,"target":1104},{"edgeType":"contains","source":994,"target":47},{"edgeType":"calls","source":1685,"target":1697},{"edgeType":"contains","source":2191,"target":2580},{"edgeType":"contains","source":2770,"target":2864},{"edgeType":"contains","source":64,"target":73},{"edgeType":"contains","source":2725,"target":1881},{"edgeType":"contains","source":881,"target":907},{"edgeType":"contains","source":549,"target":2214},{"edgeType":"contains","source":2770,"target":2866},{"edgeType":"contains","source":2607,"target":2898},{"edgeType":"contains","source":2661,"target":167},{"edgeType":"calls","source":1246,"target":1300},{"edgeType":"contains","source":994,"target":1103},{"edgeType":"contains","source":994,"target":46},{"edgeType":"contains","source":2256,"target":2415},{"edgeType":"calls","source":1685,"target":1696},{"edgeType":"contains","source":2640,"target":2667},{"edgeType":"calls","source":1345,"target":80},{"edgeType":"calls","source":1246,"target":1299},{"edgeType":"contains","source":1947,"target":1953},{"edgeType":"calls","source":75,"target":76},{"edgeType":"contains","source":2932,"target":1543},{"edgeType":"contains","source":1782,"target":1783},{"edgeType":"contains","source":2770,"target":2865},{"edgeType":"contains","source":2962,"target":2991},{"edgeType":"contains","source":64,"target":72},{"edgeType":"contains","source":130,"target":140},{"edgeType":"imports","source":8,"target":37},{"edgeType":"contains","source":881,"target":906},{"edgeType":"contains","source":2725,"target":1882},{"edgeType":"calls","source":1396,"target":1406},{"edgeType":"contains","source":1268,"target":1333},{"edgeType":"calls","source":467,"target":80},{"edgeType":"calls","source":2064,"target":521},{"edgeType":"calls","source":1155,"target":1217},{"edgeType":"calls","source":282,"target":268},{"edgeType":"contains","source":2151,"target":2496},{"edgeType":"calls","source":1640,"target":1773},{"edgeType":"contains","source":279,"target":281},{"edgeType":"contains","source":2256,"target":2412},{"edgeType":"contains","source":1631,"target":118},{"edgeType":"contains","source":549,"target":2217},{"edgeType":"contains","source":1631,"target":1704},{"edgeType":"calls","source":395,"target":201},{"edgeType":"calls","source":1636,"target":47},{"edgeType":"contains","source":2640,"target":2664},{"edgeType":"contains","source":1030,"target":1047},{"edgeType":"calls","source":2223,"target":2195},{"edgeType":"contains","source":3,"target":2759},{"edgeType":"contains","source":2661,"target":163},{"edgeType":"contains","source":2962,"target":610},{"edgeType":"contains","source":3015,"target":24},{"edgeType":"calls","source":1552,"target":537},{"edgeType":"imports","source":2767,"target":2768},{"edgeType":"calls","source":2734,"target":2738},{"edgeType":"contains","source":1631,"target":119},{"edgeType":"calls","source":529,"target":538},{"edgeType":"contains","source":45,"target":134},{"edgeType":"contains","source":549,"target":2216},{"edgeType":"contains","source":2151,"target":2497},{"edgeType":"contains","source":2607,"target":2896},{"edgeType":"contains","source":994,"target":1105},{"edgeType":"contains","source":1704,"target":1820},{"edgeType":"contains","source":2069,"target":2132},{"edgeType":"contains","source":2982,"target":1841},{"edgeType":"contains","source":2176,"target":930},{"edgeType":"calls","source":1206,"target":1220},{"edgeType":"contains","source":2770,"target":2863},{"edgeType":"calls","source":1473,"target":342},{"edgeType":"calls","source":1974,"target":1988},{"edgeType":"calls","source":1487,"target":1494},{"edgeType":"contains","source":2725,"target":1880},{"edgeType":"contains","source":2962,"target":2989},{"edgeType":"calls","source":1735,"target":1729},{"edgeType":"contains","source":319,"target":364},{"edgeType":"contains","source":1704,"target":1825},{"edgeType":"contains","source":2932,"target":1018},{"edgeType":"imports","source":263,"target":319},{"edgeType":"calls","source":2272,"target":938},{"edgeType":"calls","source":1926,"target":36},{"edgeType":"contains","source":436,"target":437},{"edgeType":"contains","source":2770,"target":2869},{"edgeType":"calls","source":1710,"target":1711},{"edgeType":"contains","source":2158,"target":2285},{"edgeType":"contains","source":2932,"target":1546},{"edgeType":"contains","source":2158,"target":2286},{"edgeType":"contains","source":2725,"target":1885},{"edgeType":"contains","source":2853,"target":2938},{"edgeType":"calls","source":1395,"target":641},{"edgeType":"contains","source":2636,"target":2794},{"edgeType":"calls","source":463,"target":465},{"edgeType":"contains","source":1401,"target":1441},{"edgeType":"contains","source":1631,"target":1710},{"edgeType":"imports","source":8,"target":32},{"edgeType":"contains","source":2932,"target":1019},{"edgeType":"imports","source":2636,"target":1541},{"edgeType":"contains","source":1704,"target":1826},{"edgeType":"imports","source":1682,"target":2046},{"edgeType":"calls","source":3147,"target":3147},{"edgeType":"imports","source":2852,"target":130},{"edgeType":"calls","source":2272,"target":939},{"edgeType":"contains","source":1631,"target":1711},{"edgeType":"calls","source":1553,"target":1556},{"edgeType":"contains","source":2770,"target":2870},{"edgeType":"contains","source":1153,"target":1199},{"edgeType":"contains","source":1030,"target":1048},{"edgeType":"contains","source":2158,"target":2287},{"edgeType":"contains","source":14,"target":305},{"edgeType":"contains","source":2853,"target":2939},{"edgeType":"contains","source":487,"target":704},{"edgeType":"contains","source":2193,"target":2259},{"edgeType":"contains","source":89,"target":94},{"edgeType":"calls","source":1684,"target":1723},{"edgeType":"contains","source":2256,"target":2420},{"edgeType":"contains","source":550,"target":1922},{"edgeType":"contains","source":2636,"target":2795},{"edgeType":"contains","source":2640,"target":2671},{"edgeType":"contains","source":2069,"target":2139},{"edgeType":"imports","source":8,"target":33},{"edgeType":"calls","source":1637,"target":537},{"edgeType":"contains","source":2016,"target":1932},{"edgeType":"contains","source":394,"target":416},{"edgeType":"calls","source":304,"target":108},{"edgeType":"contains","source":130,"target":144},{"edgeType":"imports","source":2900,"target":1020},{"edgeType":"calls","source":1198,"target":1201},{"edgeType":"contains","source":130,"target":147},{"edgeType":"contains","source":1989,"target":123},{"edgeType":"imports","source":2900,"target":1284},{"edgeType":"calls","source":1394,"target":939},{"edgeType":"contains","source":550,"target":1926},{"edgeType":"contains","source":2770,"target":2867},{"edgeType":"imports","source":487,"target":508},{"edgeType":"calls","source":1549,"target":1419},{"edgeType":"contains","source":1704,"target":1823},{"edgeType":"imports","source":1682,"target":460},{"edgeType":"contains","source":1,"target":711},{"edgeType":"contains","source":1782,"target":1784},{"edgeType":"calls","source":2002,"target":60},{"edgeType":"calls","source":2571,"target":2502},{"edgeType":"contains","source":883,"target":58},{"edgeType":"contains","source":1,"target":2295},{"edgeType":"calls","source":1394,"target":938},{"edgeType":"calls","source":512,"target":532},{"edgeType":"calls","source":1710,"target":1712},{"edgeType":"contains","source":2770,"target":2868},{"edgeType":"contains","source":2847,"target":2859},{"edgeType":"contains","source":458,"target":1870},{"edgeType":"contains","source":1631,"target":1973},{"edgeType":"contains","source":2158,"target":2284},{"edgeType":"contains","source":1913,"target":1952},{"edgeType":"contains","source":2932,"target":1545},{"edgeType":"contains","source":1782,"target":1785},{"edgeType":"imports","source":910,"target":80},{"edgeType":"contains","source":2640,"target":2669},{"edgeType":"calls","source":615,"target":511},{"edgeType":"contains","source":2962,"target":2993},{"edgeType":"calls","source":2648,"target":116},{"edgeType":"imports","source":2935,"target":990},{"edgeType":"calls","source":774,"target":366},{"edgeType":"calls","source":1455,"target":1453},{"edgeType":"contains","source":2176,"target":2227},{"edgeType":"calls","source":1546,"target":1539},{"edgeType":"contains","source":914,"target":917},{"edgeType":"calls","source":802,"target":819},{"edgeType":"contains","source":1541,"target":1564},{"edgeType":"calls","source":749,"target":348},{"edgeType":"calls","source":2430,"target":2411},{"edgeType":"contains","source":1929,"target":1956},{"edgeType":"contains","source":261,"target":547},{"edgeType":"calls","source":2578,"target":2580},{"edgeType":"contains","source":487,"target":676},{"edgeType":"contains","source":10,"target":2515},{"edgeType":"calls","source":1868,"target":541},{"edgeType":"calls","source":2064,"target":543},{"edgeType":"calls","source":629,"target":633},{"edgeType":"contains","source":2069,"target":2109},{"edgeType":"calls","source":535,"target":376},{"edgeType":"imports","source":2401,"target":2464},{"edgeType":"contains","source":1905,"target":2172},{"edgeType":"calls","source":1891,"target":1942},{"edgeType":"contains","source":3119,"target":3121},{"edgeType":"contains","source":2176,"target":2228},{"edgeType":"contains","source":914,"target":916},{"edgeType":"calls","source":1501,"target":819},{"edgeType":"imports","source":2761,"target":24},{"edgeType":"calls","source":2257,"target":376},{"edgeType":"contains","source":2932,"target":990},{"edgeType":"contains","source":8,"target":197},{"edgeType":"calls","source":117,"target":120},{"edgeType":"contains","source":792,"target":1262},{"edgeType":"calls","source":282,"target":26},{"edgeType":"contains","source":1434,"target":1447},{"edgeType":"imports","source":2725,"target":1932},{"edgeType":"contains","source":1523,"target":1595},{"edgeType":"contains","source":261,"target":546},{"edgeType":"contains","source":2151,"target":2475},{"edgeType":"contains","source":460,"target":1512},{"edgeType":"contains","source":994,"target":1078},{"edgeType":"contains","source":2982,"target":3139},{"edgeType":"calls","source":819,"target":820},{"edgeType":"contains","source":2069,"target":2110},{"edgeType":"calls","source":1677,"target":383},{"edgeType":"contains","source":228,"target":248},{"edgeType":"calls","source":1198,"target":698},{"edgeType":"calls","source":1121,"target":710},{"edgeType":"contains","source":914,"target":919},{"edgeType":"contains","source":460,"target":987},{"edgeType":"contains","source":2929,"target":2930},{"edgeType":"calls","source":2033,"target":2035},{"edgeType":"imports","source":3041,"target":1913},{"edgeType":"contains","source":2582,"target":2588},{"edgeType":"imports","source":3072,"target":2009},{"edgeType":"contains","source":3041,"target":779},{"edgeType":"contains","source":550,"target":575},{"edgeType":"imports","source":3111,"target":1064},{"edgeType":"contains","source":1495,"target":1667},{"edgeType":"contains","source":10,"target":2517},{"edgeType":"contains","source":14,"target":279},{"edgeType":"contains","source":2598,"target":2620},{"edgeType":"imports","source":2725,"target":1931},{"edgeType":"calls","source":1250,"target":939},{"edgeType":"contains","source":793,"target":1234},{"edgeType":"contains","source":914,"target":918},{"edgeType":"contains","source":2995,"target":2999},{"edgeType":"calls","source":1141,"target":1146},{"edgeType":"contains","source":2176,"target":2226},{"edgeType":"contains","source":881,"target":884},{"edgeType":"calls","source":324,"target":311},{"edgeType":"calls","source":1447,"target":1438},{"edgeType":"contains","source":2704,"target":1714},{"edgeType":"calls","source":93,"target":73},{"edgeType":"calls","source":1967,"target":116},{"edgeType":"contains","source":1211,"target":1224},{"edgeType":"calls","source":1843,"target":1846},{"edgeType":"contains","source":4,"target":1381},{"edgeType":"calls","source":466,"target":402},{"edgeType":"contains","source":550,"target":574},{"edgeType":"contains","source":2069,"target":2108},{"edgeType":"contains","source":2886,"target":35},{"edgeType":"calls","source":1684,"target":1754},{"edgeType":"contains","source":2911,"target":2960},{"edgeType":"contains","source":1401,"target":1411},{"edgeType":"contains","source":1153,"target":1436},{"edgeType":"contains","source":2598,"target":2621},{"edgeType":"contains","source":460,"target":1250},{"edgeType":"calls","source":1250,"target":938},{"edgeType":"contains","source":2982,"target":3137},{"edgeType":"imports","source":2245,"target":1219},{"edgeType":"contains","source":296,"target":2902},{"edgeType":"contains","source":914,"target":921},{"edgeType":"contains","source":881,"target":887},{"edgeType":"calls","source":1339,"target":1081},{"edgeType":"contains","source":550,"target":2163},{"edgeType":"contains","source":2982,"target":3143},{"edgeType":"imports","source":1682,"target":1758},{"edgeType":"contains","source":2176,"target":2231},{"edgeType":"contains","source":1495,"target":1673},{"edgeType":"calls","source":2325,"target":641},{"edgeType":"calls","source":1689,"target":537},{"edgeType":"contains","source":1434,"target":1450},{"edgeType":"contains","source":261,"target":552},{"edgeType":"contains","source":4,"target":2176},{"edgeType":"calls","source":2213,"target":942},{"edgeType":"calls","source":1686,"target":1687},{"edgeType":"calls","source":2074,"target":2080},{"edgeType":"contains","source":460,"target":2310},{"edgeType":"imports","source":2725,"target":79},{"edgeType":"contains","source":2245,"target":2471},{"edgeType":"imports","source":1523,"target":81},{"edgeType":"calls","source":1215,"target":1225},{"edgeType":"calls","source":2845,"target":641},{"edgeType":"contains","source":793,"target":1236},{"edgeType":"contains","source":2069,"target":2113},{"edgeType":"calls","source":1083,"target":1088},{"edgeType":"contains","source":914,"target":920},{"edgeType":"contains","source":550,"target":2162},{"edgeType":"calls","source":1613,"target":1571},{"edgeType":"calls","source":622,"target":52},{"edgeType":"calls","source":604,"target":610},{"edgeType":"contains","source":2982,"target":3144},{"edgeType":"contains","source":2541,"target":2545},{"edgeType":"contains","source":2176,"target":2232},{"edgeType":"calls","source":2276,"target":2161},{"edgeType":"calls","source":1504,"target":986},{"edgeType":"imports","source":2299,"target":1128},{"edgeType":"contains","source":4,"target":326},{"edgeType":"contains","source":1334,"target":1380},{"edgeType":"contains","source":550,"target":1633},{"edgeType":"contains","source":460,"target":2309},{"edgeType":"contains","source":2245,"target":2472},{"edgeType":"calls","source":1335,"target":939},{"edgeType":"calls","source":1327,"target":1187},{"edgeType":"calls","source":1694,"target":1702},{"edgeType":"calls","source":1722,"target":834},{"edgeType":"imports","source":3072,"target":684},{"edgeType":"calls","source":1619,"target":327},{"edgeType":"calls","source":3090,"target":3089},{"edgeType":"calls","source":2308,"target":641},{"edgeType":"calls","source":2710,"target":2713},{"edgeType":"contains","source":460,"target":988},{"edgeType":"contains","source":994,"target":1082},{"edgeType":"contains","source":793,"target":1235},{"edgeType":"calls","source":2314,"target":2305},{"edgeType":"contains","source":4,"target":2704},{"edgeType":"contains","source":1905,"target":1913},{"edgeType":"imports","source":1262,"target":1565},{"edgeType":"contains","source":3119,"target":3122},{"edgeType":"calls","source":2157,"target":33},{"edgeType":"imports","source":3015,"target":1658},{"edgeType":"contains","source":2176,"target":2229},{"edgeType":"contains","source":910,"target":783},{"edgeType":"contains","source":1631,"target":1947},{"edgeType":"imports","source":910,"target":319},{"edgeType":"contains","source":394,"target":395},{"edgeType":"calls","source":2343,"target":2195},{"edgeType":"contains","source":881,"target":889},{"edgeType":"calls","source":2213,"target":940},{"edgeType":"calls","source":2727,"target":2711},{"edgeType":"contains","source":1401,"target":1414},{"edgeType":"contains","source":2151,"target":2476},{"edgeType":"calls","source":2416,"target":725},{"edgeType":"contains","source":460,"target":2312},{"edgeType":"contains","source":1268,"target":1309},{"edgeType":"calls","source":1182,"target":1193},{"edgeType":"contains","source":470,"target":681},{"edgeType":"contains","source":1905,"target":2174},{"edgeType":"contains","source":1153,"target":1177},{"edgeType":"calls","source":1891,"target":1940},{"edgeType":"calls","source":2258,"target":2188},{"edgeType":"contains","source":2069,"target":2111},{"edgeType":"contains","source":319,"target":341},{"edgeType":"imports","source":2245,"target":1216},{"edgeType":"contains","source":7,"target":500},{"edgeType":"contains","source":3108,"target":2407},{"edgeType":"calls","source":486,"target":306},{"edgeType":"calls","source":1396,"target":1429},{"edgeType":"imports","source":1262,"target":1562},{"edgeType":"calls","source":2930,"target":383},{"edgeType":"contains","source":2982,"target":3142},{"edgeType":"contains","source":1789,"target":1807},{"edgeType":"contains","source":2176,"target":2230},{"edgeType":"contains","source":2886,"target":40},{"edgeType":"calls","source":2213,"target":941},{"edgeType":"contains","source":2995,"target":3003},{"edgeType":"calls","source":547,"target":529},{"edgeType":"contains","source":460,"target":2311},{"edgeType":"contains","source":2911,"target":2964},{"edgeType":"contains","source":1153,"target":1176},{"edgeType":"calls","source":2064,"target":540},{"edgeType":"imports","source":2935,"target":1494},{"edgeType":"contains","source":319,"target":340},{"edgeType":"calls","source":1182,"target":1192},{"edgeType":"contains","source":2982,"target":3141},{"edgeType":"contains","source":793,"target":1237},{"edgeType":"calls","source":2326,"target":2195},{"edgeType":"calls","source":547,"target":524},{"edgeType":"calls","source":835,"target":52},{"edgeType":"calls","source":1694,"target":1699},{"edgeType":"contains","source":1658,"target":1645},{"edgeType":"contains","source":2176,"target":2235},{"edgeType":"contains","source":1631,"target":368},{"edgeType":"contains","source":2401,"target":2660},{"edgeType":"contains","source":2929,"target":2940},{"edgeType":"imports","source":6,"target":81},{"edgeType":"imports","source":3072,"target":698},{"edgeType":"contains","source":926,"target":553},{"edgeType":"imports","source":729,"target":130},{"edgeType":"calls","source":1213,"target":754},{"edgeType":"calls","source":2074,"target":2076},{"edgeType":"calls","source":1677,"target":376},{"edgeType":"contains","source":319,"target":343},{"edgeType":"calls","source":1859,"target":1869},{"edgeType":"contains","source":550,"target":2167},{"edgeType":"contains","source":549,"target":2198},{"edgeType":"calls","source":1091,"target":1101},{"edgeType":"calls","source":604,"target":607},{"edgeType":"contains","source":2661,"target":149},{"edgeType":"contains","source":460,"target":993},{"edgeType":"calls","source":2258,"target":2186},{"edgeType":"contains","source":2069,"target":2118},{"edgeType":"calls","source":1438,"target":1442},{"edgeType":"imports","source":3015,"target":1143},{"edgeType":"calls","source":1515,"target":641},{"edgeType":"calls","source":1520,"target":1543},{"edgeType":"calls","source":1633,"target":418},{"edgeType":"contains","source":1495,"target":1678},{"edgeType":"contains","source":881,"target":890},{"edgeType":"contains","source":2176,"target":2236},{"edgeType":"contains","source":914,"target":924},{"edgeType":"calls","source":943,"target":931},{"edgeType":"contains","source":3,"target":1153},{"edgeType":"calls","source":489,"target":471},{"edgeType":"calls","source":1512,"target":1526},{"edgeType":"calls","source":759,"target":558},{"edgeType":"contains","source":460,"target":2313},{"edgeType":"calls","source":2099,"target":2095},{"edgeType":"contains","source":550,"target":2166},{"edgeType":"contains","source":549,"target":2197},{"edgeType":"calls","source":604,"target":606},{"edgeType":"calls","source":1718,"target":1482},{"edgeType":"imports","source":2444,"target":1931},{"edgeType":"calls","source":1822,"target":376},{"edgeType":"contains","source":2640,"target":2648},{"edgeType":"contains","source":2193,"target":2499},{"edgeType":"contains","source":2641,"target":767},{"edgeType":"contains","source":2176,"target":2233},{"edgeType":"contains","source":914,"target":927},{"edgeType":"calls","source":1520,"target":1546},{"edgeType":"contains","source":2995,"target":3006},{"edgeType":"contains","source":2401,"target":2658},{"edgeType":"contains","source":3055,"target":89},{"edgeType":"contains","source":1268,"target":1313},{"edgeType":"calls","source":2247,"target":939},{"edgeType":"contains","source":2245,"target":2473},{"edgeType":"contains","source":1330,"target":2298},{"edgeType":"calls","source":1335,"target":938},{"edgeType":"calls","source":1759,"target":743},{"edgeType":"contains","source":2151,"target":2480},{"edgeType":"contains","source":550,"target":2169},{"edgeType":"contains","source":549,"target":2200},{"edgeType":"imports","source":2444,"target":80},{"edgeType":"calls","source":984,"target":985},{"edgeType":"imports","source":1600,"target":83},{"edgeType":"calls","source":1839,"target":641},{"edgeType":"calls","source":579,"target":590},{"edgeType":"contains","source":2770,"target":2847},{"edgeType":"calls","source":1240,"target":1240},{"edgeType":"contains","source":2962,"target":595},{"edgeType":"calls","source":744,"target":760},{"edgeType":"contains","source":460,"target":2316},{"edgeType":"contains","source":2176,"target":2234},{"edgeType":"calls","source":1520,"target":1545},{"edgeType":"contains","source":3119,"target":3127},{"edgeType":"calls","source":1166,"target":1156},{"edgeType":"calls","source":1961,"target":558},{"edgeType":"calls","source":1786,"target":698},{"edgeType":"contains","source":1495,"target":1676},{"edgeType":"calls","source":1319,"target":641},{"edgeType":"contains","source":261,"target":292},{"edgeType":"imports","source":487,"target":496},{"edgeType":"contains","source":319,"target":344},{"edgeType":"calls","source":512,"target":25},{"edgeType":"contains","source":460,"target":2315},{"edgeType":"contains","source":2245,"target":2474},{"edgeType":"calls","source":604,"target":608},{"edgeType":"contains","source":550,"target":2168},{"edgeType":"contains","source":1093,"target":926},{"edgeType":"imports","source":3015,"target":83},{"edgeType":"imports","source":2444,"target":79},{"edgeType":"contains","source":2069,"target":2116},{"edgeType":"calls","source":1215,"target":1222},{"edgeType":"contains","source":460,"target":2051},{"edgeType":"imports","source":3050,"target":319},{"edgeType":"contains","source":1030,"target":1033},{"edgeType":"contains","source":487,"target":689},{"edgeType":"contains","source":2853,"target":2922},{"edgeType":"calls","source":2693,"target":2702},{"edgeType":"contains","source":2069,"target":2121},{"edgeType":"calls","source":1727,"target":1729},{"edgeType":"imports","source":7,"target":46},{"edgeType":"calls","source":292,"target":497},{"edgeType":"contains","source":2176,"target":2239},{"edgeType":"contains","source":2176,"target":2240},{"edgeType":"contains","source":883,"target":1097},{"edgeType":"calls","source":852,"target":843},{"edgeType":"contains","source":2962,"target":2978},{"edgeType":"calls","source":1520,"target":1539},{"edgeType":"calls","source":311,"target":173},{"edgeType":"imports","source":487,"target":493},{"edgeType":"imports","source":2607,"target":46},{"edgeType":"contains","source":549,"target":2202},{"edgeType":"contains","source":1541,"target":1577},{"edgeType":"contains","source":7,"target":505},{"edgeType":"calls","source":902,"target":877},{"edgeType":"contains","source":1030,"target":1032},{"edgeType":"contains","source":840,"target":844},{"edgeType":"contains","source":487,"target":688},{"edgeType":"contains","source":2401,"target":2665},{"edgeType":"calls","source":1248,"target":986},{"edgeType":"calls","source":2247,"target":938},{"edgeType":"contains","source":2176,"target":2241},{"edgeType":"calls","source":1627,"target":1128},{"edgeType":"contains","source":470,"target":686},{"edgeType":"contains","source":1662,"target":734},{"edgeType":"contains","source":2704,"target":1728},{"edgeType":"contains","source":549,"target":2201},{"edgeType":"contains","source":1541,"target":1578},{"edgeType":"contains","source":1153,"target":1186},{"edgeType":"imports","source":3038,"target":1746},{"edgeType":"calls","source":270,"target":124},{"edgeType":"contains","source":1495,"target":1679},{"edgeType":"calls","source":1050,"target":1049},{"edgeType":"calls","source":108,"target":125},{"edgeType":"contains","source":1030,"target":1035},{"edgeType":"contains","source":2704,"target":1725},{"edgeType":"contains","source":2995,"target":3010},{"edgeType":"contains","source":2176,"target":2237},{"edgeType":"contains","source":910,"target":791},{"edgeType":"calls","source":1722,"target":36},{"edgeType":"contains","source":881,"target":897},{"edgeType":"calls","source":3083,"target":2502},{"edgeType":"contains","source":4,"target":2187},{"edgeType":"calls","source":2862,"target":368},{"edgeType":"contains","source":2151,"target":2484},{"edgeType":"calls","source":604,"target":605},{"edgeType":"contains","source":460,"target":1792},{"edgeType":"contains","source":550,"target":2173},{"edgeType":"contains","source":549,"target":2204},{"edgeType":"contains","source":487,"target":690},{"edgeType":"contains","source":460,"target":470},{"edgeType":"contains","source":994,"target":36},{"edgeType":"contains","source":1030,"target":1034},{"edgeType":"calls","source":1554,"target":1544},{"edgeType":"contains","source":550,"target":1908},{"edgeType":"calls","source":75,"target":90},{"edgeType":"calls","source":2249,"target":2195},{"edgeType":"calls","source":364,"target":116},{"edgeType":"contains","source":2500,"target":2501},{"edgeType":"calls","source":624,"target":512},{"edgeType":"contains","source":2176,"target":2238},{"edgeType":"imports","source":3050,"target":2958},{"edgeType":"contains","source":550,"target":1379},{"edgeType":"contains","source":2837,"target":2888},{"edgeType":"imports","source":910,"target":592},{"edgeType":"contains","source":1125,"target":1788},{"edgeType":"contains","source":2582,"target":2601},{"edgeType":"contains","source":1732,"target":1733},{"edgeType":"calls","source":893,"target":894},{"edgeType":"imports","source":1600,"target":1135},{"edgeType":"imports","source":3015,"target":1136},{"edgeType":"calls","source":1058,"target":1064},{"edgeType":"calls","source":80,"target":200},{"edgeType":"contains","source":549,"target":2203},{"edgeType":"calls","source":2054,"target":540},{"edgeType":"contains","source":2598,"target":2606},{"edgeType":"contains","source":2597,"target":2637},{"edgeType":"calls","source":909,"target":888},{"edgeType":"contains","source":2254,"target":2435},{"edgeType":"imports","source":840,"target":166},{"edgeType":"calls","source":231,"target":237},{"edgeType":"calls","source":1719,"target":1147},{"edgeType":"calls","source":495,"target":509},{"edgeType":"imports","source":1682,"target":491},{"edgeType":"calls","source":1459,"target":486},{"edgeType":"contains","source":2672,"target":2690},{"edgeType":"contains","source":2704,"target":1698},{"edgeType":"imports","source":3050,"target":2742},{"edgeType":"calls","source":1848,"target":1640},{"edgeType":"calls","source":1098,"target":1107},{"edgeType":"contains","source":46,"target":61},{"edgeType":"contains","source":2444,"target":509},{"edgeType":"calls","source":1701,"target":383},{"edgeType":"contains","source":2597,"target":2638},{"edgeType":"imports","source":2858,"target":1030},{"edgeType":"calls","source":1129,"target":938},{"edgeType":"contains","source":51,"target":169},{"edgeType":"contains","source":994,"target":1062},{"edgeType":"calls","source":1511,"target":1516},{"edgeType":"contains","source":2499,"target":2505},{"edgeType":"calls","source":1734,"target":1738},{"edgeType":"calls","source":1304,"target":270},{"edgeType":"contains","source":2069,"target":2093},{"edgeType":"contains","source":548,"target":1675},{"edgeType":"contains","source":351,"target":382},{"edgeType":"contains","source":1732,"target":913},{"edgeType":"contains","source":2176,"target":2212},{"edgeType":"calls","source":100,"target":69},{"edgeType":"calls","source":1528,"target":1517},{"edgeType":"contains","source":1344,"target":1314},{"edgeType":"contains","source":460,"target":1760},{"edgeType":"contains","source":8,"target":181},{"edgeType":"contains","source":1495,"target":1650},{"edgeType":"contains","source":2860,"target":2674},{"edgeType":"calls","source":937,"target":815},{"edgeType":"calls","source":1684,"target":1706},{"edgeType":"contains","source":1631,"target":1662},{"edgeType":"contains","source":2069,"target":2090},{"edgeType":"calls","source":269,"target":118},{"edgeType":"calls","source":2048,"target":2049},{"edgeType":"contains","source":1153,"target":1421},{"edgeType":"imports","source":2880,"target":83},{"edgeType":"calls","source":486,"target":790},{"edgeType":"contains","source":793,"target":1218},{"edgeType":"contains","source":2176,"target":2209},{"edgeType":"imports","source":2935,"target":1020},{"edgeType":"calls","source":1006,"target":262},{"edgeType":"calls","source":1701,"target":1707},{"edgeType":"imports","source":459,"target":83},{"edgeType":"contains","source":1477,"target":1680},{"edgeType":"imports","source":2935,"target":1284},{"edgeType":"contains","source":2672,"target":2688},{"edgeType":"calls","source":123,"target":152},{"edgeType":"contains","source":2704,"target":1696},{"edgeType":"imports","source":51,"target":47},{"edgeType":"contains","source":2860,"target":2675},{"edgeType":"calls","source":620,"target":599},{"edgeType":"contains","source":550,"target":558},{"edgeType":"contains","source":2187,"target":283},{"edgeType":"calls","source":2693,"target":1871},{"edgeType":"calls","source":682,"target":791},{"edgeType":"calls","source":1345,"target":1379},{"edgeType":"calls","source":346,"target":108},{"edgeType":"imports","source":840,"target":165},{"edgeType":"contains","source":51,"target":171},{"edgeType":"contains","source":228,"target":234},{"edgeType":"contains","source":1153,"target":1420},{"edgeType":"contains","source":2598,"target":2605},{"edgeType":"calls","source":486,"target":789},{"edgeType":"calls","source":491,"target":634},{"edgeType":"contains","source":1477,"target":1681},{"edgeType":"contains","source":484,"target":3132},{"edgeType":"contains","source":2176,"target":2210},{"edgeType":"calls","source":33,"target":34},{"edgeType":"calls","source":1298,"target":986},{"edgeType":"calls","source":289,"target":26},{"edgeType":"contains","source":2767,"target":1858},{"edgeType":"calls","source":157,"target":154},{"edgeType":"contains","source":2704,"target":1697},{"edgeType":"calls","source":1066,"target":1043},{"edgeType":"calls","source":3147,"target":3124},{"edgeType":"calls","source":1455,"target":347},{"edgeType":"calls","source":416,"target":47},{"edgeType":"calls","source":1955,"target":698},{"edgeType":"contains","source":2886,"target":553},{"edgeType":"calls","source":115,"target":129},{"edgeType":"calls","source":785,"target":764},{"edgeType":"calls","source":1222,"target":1223},{"edgeType":"calls","source":254,"target":48},{"edgeType":"contains","source":2151,"target":2461},{"edgeType":"contains","source":2598,"target":2610},{"edgeType":"contains","source":4,"target":1631},{"edgeType":"contains","source":2817,"target":1635},{"edgeType":"calls","source":2258,"target":26},{"edgeType":"imports","source":3111,"target":319},{"edgeType":"contains","source":1153,"target":1423},{"edgeType":"calls","source":74,"target":79},{"edgeType":"contains","source":2187,"target":817},{"edgeType":"contains","source":3,"target":1927},{"edgeType":"contains","source":1979,"target":1980},{"edgeType":"calls","source":937,"target":809},{"edgeType":"contains","source":235,"target":284},{"edgeType":"contains","source":2911,"target":2950},{"edgeType":"contains","source":550,"target":1617},{"edgeType":"imports","source":3072,"target":470},{"edgeType":"imports","source":2935,"target":1546},{"edgeType":"contains","source":2742,"target":261},{"edgeType":"contains","source":2598,"target":2611},{"edgeType":"calls","source":1306,"target":732},{"edgeType":"contains","source":1631,"target":348},{"edgeType":"contains","source":1541,"target":760},{"edgeType":"calls","source":49,"target":60},{"edgeType":"contains","source":2978,"target":2987},{"edgeType":"contains","source":1153,"target":1422},{"edgeType":"contains","source":2069,"target":2097},{"edgeType":"contains","source":550,"target":2145},{"edgeType":"contains","source":994,"target":537},{"edgeType":"contains","source":3068,"target":461},{"edgeType":"contains","source":2963,"target":2659},{"edgeType":"imports","source":263,"target":80},{"edgeType":"contains","source":2187,"target":818},{"edgeType":"contains","source":1979,"target":1981},{"edgeType":"contains","source":2444,"target":514},{"edgeType":"contains","source":1277,"target":1281},{"edgeType":"imports","source":1523,"target":130},{"edgeType":"contains","source":2187,"target":25},{"edgeType":"contains","source":1434,"target":1435},{"edgeType":"imports","source":1682,"target":486},{"edgeType":"contains","source":1631,"target":345},{"edgeType":"contains","source":2597,"target":2639},{"edgeType":"calls","source":3081,"target":1201},{"edgeType":"imports","source":2935,"target":1545},{"edgeType":"contains","source":2187,"target":1343},{"edgeType":"contains","source":548,"target":1682},{"edgeType":"contains","source":2672,"target":50},{"edgeType":"calls","source":1214,"target":1209},{"edgeType":"contains","source":2499,"target":2506},{"edgeType":"contains","source":2301,"target":2302},{"edgeType":"contains","source":10,"target":2504},{"edgeType":"contains","source":1153,"target":1425},{"edgeType":"imports","source":549,"target":460},{"edgeType":"imports","source":1569,"target":24},{"edgeType":"contains","source":2176,"target":2213},{"edgeType":"calls","source":2213,"target":2212},{"edgeType":"contains","source":1789,"target":1790},{"edgeType":"calls","source":1141,"target":36},{"edgeType":"contains","source":301,"target":354},{"edgeType":"calls","source":1231,"target":1210},{"edgeType":"contains","source":1533,"target":1534},{"edgeType":"contains","source":1495,"target":1655},{"edgeType":"contains","source":235,"target":286},{"edgeType":"contains","source":1277,"target":1278},{"edgeType":"contains","source":2407,"target":2452},{"edgeType":"contains","source":550,"target":1619},{"edgeType":"contains","source":2499,"target":2507},{"edgeType":"contains","source":470,"target":664},{"edgeType":"contains","source":994,"target":1068},{"edgeType":"contains","source":2268,"target":2269},{"edgeType":"calls","source":2502,"target":2503},{"edgeType":"contains","source":1902,"target":2516},{"edgeType":"calls","source":2222,"target":877},{"edgeType":"calls","source":1231,"target":1209},{"edgeType":"contains","source":235,"target":285},{"edgeType":"calls","source":629,"target":52},{"edgeType":"contains","source":263,"target":474},{"edgeType":"calls","source":253,"target":345},{"edgeType":"imports","source":3015,"target":913},{"edgeType":"contains","source":2704,"target":1701},{"edgeType":"calls","source":1684,"target":1700},{"edgeType":"contains","source":261,"target":275},{"edgeType":"imports","source":2661,"target":540},{"edgeType":"contains","source":319,"target":327},{"edgeType":"calls","source":493,"target":34},{"edgeType":"contains","source":1704,"target":731},{"edgeType":"contains","source":2193,"target":2220},{"edgeType":"contains","source":1153,"target":1427},{"edgeType":"calls","source":173,"target":177},{"edgeType":"contains","source":2069,"target":2101},{"edgeType":"contains","source":1878,"target":1944},{"edgeType":"imports","source":910,"target":911},{"edgeType":"calls","source":2046,"target":779},{"edgeType":"calls","source":1520,"target":1494},{"edgeType":"calls","source":2213,"target":2210},{"edgeType":"contains","source":235,"target":288},{"edgeType":"calls","source":198,"target":195},{"edgeType":"contains","source":1495,"target":1661},{"edgeType":"calls","source":1627,"target":1612},{"edgeType":"contains","source":2466,"target":2479},{"edgeType":"contains","source":3094,"target":3095},{"edgeType":"contains","source":261,"target":274},{"edgeType":"calls","source":1800,"target":1534},{"edgeType":"calls","source":1031,"target":1061},{"edgeType":"calls","source":493,"target":33},{"edgeType":"contains","source":2582,"target":2583},{"edgeType":"contains","source":3011,"target":3025},{"edgeType":"contains","source":460,"target":2033},{"edgeType":"contains","source":1153,"target":1426},{"edgeType":"contains","source":2704,"target":1707},{"edgeType":"contains","source":1905,"target":2165},{"edgeType":"contains","source":1878,"target":1945},{"edgeType":"contains","source":459,"target":478},{"edgeType":"contains","source":1631,"target":353},{"edgeType":"contains","source":235,"target":287},{"edgeType":"contains","source":2598,"target":2615},{"edgeType":"contains","source":3094,"target":980},{"edgeType":"calls","source":3148,"target":2030},{"edgeType":"contains","source":2407,"target":2458},{"edgeType":"calls","source":466,"target":78},{"edgeType":"contains","source":460,"target":1243},{"edgeType":"contains","source":2598,"target":2612},{"edgeType":"calls","source":2690,"target":105},{"edgeType":"contains","source":460,"target":1772},{"edgeType":"contains","source":2886,"target":819},{"edgeType":"contains","source":460,"target":979},{"edgeType":"imports","source":2886,"target":698},{"edgeType":"contains","source":1211,"target":1481},{"edgeType":"contains","source":2978,"target":2988},{"edgeType":"calls","source":1527,"target":486},{"edgeType":"contains","source":1,"target":2788},{"edgeType":"contains","source":22,"target":23},{"edgeType":"contains","source":1541,"target":1554},{"edgeType":"calls","source":2213,"target":2208},{"edgeType":"contains","source":235,"target":290},{"edgeType":"contains","source":1132,"target":1287},{"edgeType":"contains","source":1401,"target":1402},{"edgeType":"calls","source":1757,"target":491},{"edgeType":"imports","source":2636,"target":784},{"edgeType":"calls","source":388,"target":385},{"edgeType":"contains","source":2401,"target":2641},{"edgeType":"calls","source":1880,"target":1963},{"edgeType":"contains","source":2466,"target":2477},{"edgeType":"calls","source":303,"target":376},{"edgeType":"calls","source":2029,"target":779},{"edgeType":"contains","source":2886,"target":820},{"edgeType":"imports","source":2935,"target":1556},{"edgeType":"contains","source":2187,"target":291},{"edgeType":"calls","source":1610,"target":1612},{"edgeType":"contains","source":549,"target":2183},{"edgeType":"contains","source":1904,"target":1929},{"edgeType":"contains","source":17,"target":176},{"edgeType":"calls","source":512,"target":239},{"edgeType":"calls","source":3064,"target":1196},{"edgeType":"calls","source":3145,"target":800},{"edgeType":"contains","source":2672,"target":2697},{"edgeType":"contains","source":1153,"target":1428},{"edgeType":"calls","source":198,"target":196},{"edgeType":"calls","source":197,"target":227},{"edgeType":"contains","source":301,"target":357},{"edgeType":"calls","source":2411,"target":2413},{"edgeType":"contains","source":1878,"target":1943},{"edgeType":"calls","source":2766,"target":2771},{"edgeType":"calls","source":2213,"target":2209},{"edgeType":"contains","source":459,"target":480},{"edgeType":"imports","source":6,"target":130},{"edgeType":"calls","source":2273,"target":2195},{"edgeType":"calls","source":1411,"target":641},{"edgeType":"calls","source":1805,"target":1640},{"edgeType":"contains","source":3094,"target":3098},{"edgeType":"calls","source":1552,"target":1555},{"edgeType":"calls","source":115,"target":121},{"edgeType":"contains","source":2962,"target":1640},{"edgeType":"contains","source":7,"target":490},{"edgeType":"contains","source":1495,"target":1665},{"edgeType":"contains","source":1153,"target":1431},{"edgeType":"contains","source":1125,"target":2299},{"edgeType":"contains","source":2069,"target":2105},{"edgeType":"contains","source":1401,"target":1408},{"edgeType":"contains","source":1268,"target":1303},{"edgeType":"imports","source":3072,"target":743},{"edgeType":"calls","source":1660,"target":1642},{"edgeType":"contains","source":1,"target":1203},{"edgeType":"contains","source":3094,"target":3099},{"edgeType":"contains","source":2995,"target":2997},{"edgeType":"calls","source":1303,"target":1081},{"edgeType":"imports","source":910,"target":908},{"edgeType":"contains","source":2293,"target":2297},{"edgeType":"contains","source":550,"target":2154},{"edgeType":"contains","source":2158,"target":2253},{"edgeType":"calls","source":742,"target":767},{"edgeType":"contains","source":7,"target":489},{"edgeType":"contains","source":550,"target":1889},{"edgeType":"calls","source":619,"target":616},{"edgeType":"calls","source":535,"target":49},{"edgeType":"calls","source":338,"target":342},{"edgeType":"contains","source":2069,"target":2106},{"edgeType":"imports","source":1682,"target":494},{"edgeType":"calls","source":1669,"target":569},{"edgeType":"calls","source":486,"target":775},{"edgeType":"calls","source":682,"target":777},{"edgeType":"contains","source":1523,"target":1591},{"edgeType":"contains","source":1631,"target":89},{"edgeType":"contains","source":1904,"target":2725},{"edgeType":"contains","source":2607,"target":1016},{"edgeType":"calls","source":2050,"target":125},{"edgeType":"contains","source":914,"target":915},{"edgeType":"contains","source":1866,"target":1789},{"edgeType":"contains","source":2337,"target":2251},{"edgeType":"contains","source":3061,"target":3062},{"edgeType":"contains","source":2251,"target":2274},{"edgeType":"calls","source":1412,"target":1405},{"edgeType":"contains","source":3011,"target":3026},{"edgeType":"contains","source":2326,"target":2328},{"edgeType":"contains","source":1913,"target":1918},{"edgeType":"contains","source":263,"target":483},{"edgeType":"contains","source":1878,"target":1946},{"edgeType":"calls","source":776,"target":773},{"edgeType":"calls","source":2648,"target":346},{"edgeType":"calls","source":1820,"target":1705},{"edgeType":"calls","source":1619,"target":537},{"edgeType":"contains","source":17,"target":182},{"edgeType":"contains","source":2193,"target":2487},{"edgeType":"contains","source":460,"target":1247},{"edgeType":"contains","source":1989,"target":1940},{"edgeType":"contains","source":228,"target":247},{"edgeType":"contains","source":1268,"target":1301},{"edgeType":"contains","source":3,"target":880},{"edgeType":"calls","source":2631,"target":2195},{"edgeType":"contains","source":1203,"target":1204},{"edgeType":"contains","source":2982,"target":491},{"edgeType":"calls","source":1031,"target":1059},{"edgeType":"contains","source":2772,"target":2773},{"edgeType":"contains","source":2911,"target":2956},{"edgeType":"calls","source":115,"target":122},{"edgeType":"calls","source":117,"target":60},{"edgeType":"calls","source":1092,"target":1018},{"edgeType":"contains","source":2193,"target":2223},{"edgeType":"contains","source":2069,"target":2104},{"edgeType":"contains","source":3011,"target":3027},{"edgeType":"calls","source":544,"target":36},{"edgeType":"calls","source":1880,"target":1958},{"edgeType":"calls","source":1999,"target":383},{"edgeType":"contains","source":1401,"target":1407},{"edgeType":"calls","source":2018,"target":1908},{"edgeType":"calls","source":2649,"target":52},{"edgeType":"contains","source":1268,"target":1302},{"edgeType":"calls","source":1241,"target":98},{"edgeType":"contains","source":840,"target":271},{"edgeType":"contains","source":3041,"target":2598},{"edgeType":"imports","source":3072,"target":2837},{"edgeType":"calls","source":2005,"target":754},{"edgeType":"contains","source":460,"target":952},{"edgeType":"calls","source":1901,"target":1864},{"edgeType":"calls","source":2095,"target":78},{"edgeType":"calls","source":612,"target":598},{"edgeType":"calls","source":2667,"target":52},{"edgeType":"calls","source":1321,"target":1081},{"edgeType":"calls","source":2659,"target":36},{"edgeType":"contains","source":301,"target":331},{"edgeType":"contains","source":2661,"target":2750},{"edgeType":"contains","source":1268,"target":1274},{"edgeType":"contains","source":326,"target":349},{"edgeType":"calls","source":2690,"target":132},{"edgeType":"contains","source":1524,"target":1530},{"edgeType":"contains","source":351,"target":367},{"edgeType":"contains","source":460,"target":951},{"edgeType":"calls","source":1429,"target":1432},{"edgeType":"calls","source":1402,"target":1212},{"edgeType":"imports","source":793,"target":1079},{"edgeType":"contains","source":2444,"target":494},{"edgeType":"calls","source":546,"target":529},{"edgeType":"calls","source":1345,"target":1393},{"edgeType":"calls","source":1189,"target":1208},{"edgeType":"contains","source":301,"target":330},{"edgeType":"contains","source":3041,"target":2070},{"edgeType":"contains","source":2151,"target":2442},{"edgeType":"calls","source":1318,"target":116},{"edgeType":"contains","source":2407,"target":2433},{"edgeType":"calls","source":471,"target":476},{"edgeType":"contains","source":1268,"target":1275},{"edgeType":"calls","source":2513,"target":1128},{"edgeType":"calls","source":1273,"target":1247},{"edgeType":"contains","source":2962,"target":1612},{"edgeType":"calls","source":2017,"target":116},{"edgeType":"calls","source":1121,"target":938},{"edgeType":"contains","source":460,"target":2272},{"edgeType":"calls","source":1952,"target":1074},{"edgeType":"contains","source":1658,"target":280},{"edgeType":"contains","source":460,"target":954},{"edgeType":"contains","source":2701,"target":2830},{"edgeType":"calls","source":1699,"target":200},{"edgeType":"contains","source":2911,"target":2926},{"edgeType":"calls","source":989,"target":1069},{"edgeType":"contains","source":2607,"target":2837},{"edgeType":"contains","source":301,"target":334},{"edgeType":"contains","source":2187,"target":1322},{"edgeType":"contains","source":840,"target":273},{"edgeType":"calls","source":1304,"target":818},{"edgeType":"contains","source":1495,"target":1634},{"edgeType":"contains","source":2193,"target":2194},{"edgeType":"contains","source":2151,"target":2439},{"edgeType":"contains","source":550,"target":542},{"edgeType":"contains","source":2,"target":2468},{"edgeType":"calls","source":1345,"target":1396},{"edgeType":"calls","source":942,"target":941},{"edgeType":"contains","source":2407,"target":2430},{"edgeType":"calls","source":1241,"target":1185},{"edgeType":"calls","source":2487,"target":2195},{"edgeType":"calls","source":1297,"target":1298},{"edgeType":"contains","source":3041,"target":2597},{"edgeType":"calls","source":1074,"target":1076},{"edgeType":"contains","source":460,"target":2010},{"edgeType":"contains","source":460,"target":953},{"edgeType":"calls","source":2414,"target":2080},{"edgeType":"contains","source":550,"target":1863},{"edgeType":"calls","source":1304,"target":817},{"edgeType":"contains","source":2672,"target":2673},{"edgeType":"contains","source":2701,"target":2831},{"edgeType":"contains","source":6,"target":228},{"edgeType":"contains","source":1979,"target":372},{"edgeType":"calls","source":512,"target":528},{"edgeType":"contains","source":301,"target":333},{"edgeType":"contains","source":840,"target":272},{"edgeType":"contains","source":548,"target":2717},{"edgeType":"imports","source":3050,"target":80},{"edgeType":"contains","source":10,"target":1162},{"edgeType":"contains","source":301,"target":332},{"edgeType":"contains","source":235,"target":264},{"edgeType":"contains","source":2193,"target":2195},{"edgeType":"contains","source":1904,"target":1905},{"edgeType":"contains","source":21,"target":28},{"edgeType":"contains","source":2407,"target":2431},{"edgeType":"contains","source":2911,"target":2927},{"edgeType":"calls","source":1239,"target":1241},{"edgeType":"calls","source":512,"target":523},{"edgeType":"contains","source":460,"target":692},{"edgeType":"calls","source":993,"target":939},{"edgeType":"calls","source":1824,"target":1340},{"edgeType":"calls","source":1206,"target":1207},{"edgeType":"calls","source":1248,"target":698},{"edgeType":"contains","source":2935,"target":2981},{"edgeType":"calls","source":1943,"target":1879},{"edgeType":"calls","source":1852,"target":1793},{"edgeType":"contains","source":2704,"target":1686},{"edgeType":"contains","source":2540,"target":2542},{"edgeType":"calls","source":2726,"target":2711},{"edgeType":"calls","source":1718,"target":1719},{"edgeType":"contains","source":301,"target":336},{"edgeType":"contains","source":550,"target":544},{"edgeType":"contains","source":2597,"target":2624},{"edgeType":"contains","source":2251,"target":2252},{"edgeType":"contains","source":2407,"target":2436},{"edgeType":"calls","source":248,"target":251},{"edgeType":"contains","source":731,"target":747},{"edgeType":"calls","source":728,"target":698},{"edgeType":"contains","source":460,"target":956},{"edgeType":"contains","source":994,"target":1050},{"edgeType":"calls","source":1692,"target":939},{"edgeType":"imports","source":3050,"target":2193},{"edgeType":"contains","source":3041,"target":3130},{"edgeType":"calls","source":1734,"target":1751},{"edgeType":"contains","source":2445,"target":731},{"edgeType":"calls","source":993,"target":938},{"edgeType":"contains","source":460,"target":955},{"edgeType":"calls","source":3122,"target":3121},{"edgeType":"contains","source":2730,"target":2731},{"edgeType":"contains","source":460,"target":691},{"edgeType":"contains","source":2662,"target":1932},{"edgeType":"contains","source":2672,"target":2679},{"edgeType":"contains","source":2704,"target":1687},{"edgeType":"contains","source":3041,"target":2603},{"edgeType":"contains","source":2540,"target":2543},{"edgeType":"contains","source":301,"target":335},{"edgeType":"contains","source":1524,"target":1535},{"edgeType":"contains","source":2407,"target":2437},{"edgeType":"contains","source":235,"target":266},{"edgeType":"calls","source":1511,"target":1528},{"edgeType":"contains","source":1334,"target":1347},{"edgeType":"contains","source":994,"target":1049},{"edgeType":"calls","source":623,"target":517},{"edgeType":"calls","source":1246,"target":494},{"edgeType":"contains","source":1541,"target":744},{"edgeType":"calls","source":929,"target":544},{"edgeType":"calls","source":1060,"target":975},{"edgeType":"calls","source":1006,"target":271},{"edgeType":"contains","source":550,"target":1336},{"edgeType":"calls","source":1692,"target":938},{"edgeType":"calls","source":1734,"target":1750},{"edgeType":"calls","source":1218,"target":1098},{"edgeType":"contains","source":3041,"target":3131},{"edgeType":"calls","source":265,"target":254},{"edgeType":"calls","source":2115,"target":2095},{"edgeType":"contains","source":2444,"target":495},{"edgeType":"contains","source":2317,"target":2318},{"edgeType":"calls","source":2247,"target":1967},{"edgeType":"contains","source":460,"target":2279},{"edgeType":"contains","source":2187,"target":270},{"edgeType":"calls","source":892,"target":373},{"edgeType":"contains","source":2597,"target":2622},{"edgeType":"contains","source":1524,"target":1532},{"edgeType":"contains","source":359,"target":389},{"edgeType":"calls","source":1807,"target":1340},{"edgeType":"contains","source":3041,"target":3128},{"edgeType":"contains","source":1495,"target":1638},{"edgeType":"calls","source":909,"target":903},{"edgeType":"contains","source":2151,"target":2443},{"edgeType":"calls","source":471,"target":475},{"edgeType":"contains","source":1268,"target":1276},{"edgeType":"contains","source":460,"target":958},{"edgeType":"imports","source":793,"target":807},{"edgeType":"calls","source":902,"target":327},{"edgeType":"contains","source":2069,"target":2078},{"edgeType":"calls","source":788,"target":690},{"edgeType":"calls","source":2465,"target":495},{"edgeType":"contains","source":550,"target":2131},{"edgeType":"calls","source":1718,"target":1720},{"edgeType":"contains","source":301,"target":337},{"edgeType":"calls","source":1821,"target":641},{"edgeType":"calls","source":1889,"target":383},{"edgeType":"calls","source":1340,"target":754},{"edgeType":"contains","source":1631,"target":1651},{"edgeType":"contains","source":4,"target":2144},{"edgeType":"calls","source":1685,"target":1686},{"edgeType":"contains","source":2767,"target":1846},{"edgeType":"contains","source":2704,"target":1685},{"edgeType":"calls","source":1345,"target":1391},{"edgeType":"contains","source":2193,"target":2199},{"edgeType":"contains","source":2963,"target":2640},{"edgeType":"calls","source":1734,"target":1752},{"edgeType":"contains","source":460,"target":957},{"edgeType":"contains","source":994,"target":1051},{"edgeType":"contains","source":3041,"target":3129},{"edgeType":"contains","source":1631,"target":1656},{"edgeType":"calls","source":808,"target":592},{"edgeType":"calls","source":2959,"target":1562},{"edgeType":"imports","source":2935,"target":1543},{"edgeType":"contains","source":460,"target":1753},{"edgeType":"calls","source":1669,"target":327},{"edgeType":"contains","source":2631,"target":2632},{"edgeType":"contains","source":2407,"target":2441},{"edgeType":"calls","source":2279,"target":2030},{"edgeType":"contains","source":300,"target":1164},{"edgeType":"contains","source":2730,"target":2734},{"edgeType":"contains","source":1495,"target":1644},{"edgeType":"contains","source":3035,"target":1999},{"edgeType":"imports","source":3072,"target":995},{"edgeType":"contains","source":1524,"target":1538},{"edgeType":"contains","source":46,"target":53},{"edgeType":"contains","source":1929,"target":1932},{"edgeType":"contains","source":460,"target":2281},{"edgeType":"calls","source":2555,"target":2195},{"edgeType":"contains","source":2151,"target":2449},{"edgeType":"calls","source":570,"target":571},{"edgeType":"contains","source":550,"target":1341},{"edgeType":"calls","source":298,"target":281},{"edgeType":"contains","source":2932,"target":1494},{"edgeType":"contains","source":2920,"target":2923},{"edgeType":"calls","source":467,"target":327},{"edgeType":"contains","source":840,"target":278},{"edgeType":"contains","source":459,"target":462},{"edgeType":"contains","source":2598,"target":2599},{"edgeType":"contains","source":301,"target":339},{"edgeType":"contains","source":460,"target":1488},{"edgeType":"contains","source":994,"target":789},{"edgeType":"calls","source":124,"target":126},{"edgeType":"calls","source":1636,"target":1613},{"edgeType":"contains","source":46,"target":52},{"edgeType":"calls","source":2053,"target":49},{"edgeType":"contains","source":3041,"target":3135},{"edgeType":"calls","source":1157,"target":78},{"edgeType":"contains","source":550,"target":1340},{"edgeType":"contains","source":2193,"target":2466},{"edgeType":"contains","source":2672,"target":2680},{"edgeType":"calls","source":1619,"target":558},{"edgeType":"contains","source":7,"target":471},{"edgeType":"calls","source":922,"target":231},{"edgeType":"contains","source":261,"target":525},{"edgeType":"contains","source":550,"target":551},{"edgeType":"calls","source":1561,"target":1563},{"edgeType":"contains","source":2730,"target":2732},{"edgeType":"contains","source":459,"target":465},{"edgeType":"contains","source":3015,"target":239},{"edgeType":"calls","source":329,"target":116},{"edgeType":"contains","source":1334,"target":1348},{"edgeType":"calls","source":2054,"target":1871},{"edgeType":"contains","source":1524,"target":1536},{"edgeType":"contains","source":3035,"target":1997},{"edgeType":"contains","source":46,"target":55},{"edgeType":"calls","source":2039,"target":486},{"edgeType":"calls","source":240,"target":233},{"edgeType":"imports","source":2982,"target":83},{"edgeType":"calls","source":1189,"target":939},{"edgeType":"calls","source":1734,"target":1749},{"edgeType":"imports","source":2661,"target":521},{"edgeType":"calls","source":269,"target":125},{"edgeType":"calls","source":1189,"target":938},{"edgeType":"calls","source":2658,"target":2700},{"edgeType":"contains","source":2672,"target":2681},{"edgeType":"contains","source":2920,"target":2921},{"edgeType":"calls","source":1705,"target":269},{"edgeType":"contains","source":261,"target":524},{"edgeType":"contains","source":2730,"target":2733},{"edgeType":"contains","source":1495,"target":1643},{"edgeType":"calls","source":1925,"target":584},{"edgeType":"calls","source":1701,"target":1714},{"edgeType":"calls","source":2663,"target":167},{"edgeType":"contains","source":460,"target":2282},{"edgeType":"contains","source":8,"target":175},{"edgeType":"contains","source":1524,"target":1537},{"edgeType":"imports","source":14,"target":130},{"edgeType":"calls","source":1858,"target":1868},{"edgeType":"contains","source":1929,"target":1931},{"edgeType":"contains","source":46,"target":54},{"edgeType":"imports","source":2935,"target":1539},{"edgeType":"contains","source":2069,"target":2083},{"edgeType":"calls","source":1247,"target":1249},{"edgeType":"contains","source":51,"target":166},{"edgeType":"contains","source":1902,"target":2244},{"edgeType":"contains","source":2672,"target":2686},{"edgeType":"contains","source":261,"target":527},{"edgeType":"calls","source":2258,"target":34},{"edgeType":"contains","source":3011,"target":3012},{"edgeType":"contains","source":1153,"target":1415},{"edgeType":"contains","source":235,"target":276},{"edgeType":"calls","source":1684,"target":1708},{"edgeType":"contains","source":550,"target":553},{"edgeType":"contains","source":484,"target":485},{"edgeType":"imports","source":8,"target":48},{"edgeType":"calls","source":495,"target":513},{"edgeType":"contains","source":1132,"target":1273},{"edgeType":"calls","source":329,"target":374},{"edgeType":"contains","source":10,"target":2758},{"edgeType":"contains","source":1,"target":3037},{"edgeType":"calls","source":2120,"target":877},{"edgeType":"contains","source":550,"target":1345},{"edgeType":"contains","source":460,"target":964},{"edgeType":"contains","source":2151,"target":2189},{"edgeType":"contains","source":1541,"target":1544},{"edgeType":"contains","source":46,"target":57},{"edgeType":"calls","source":1408,"target":486},{"edgeType":"calls","source":937,"target":816},{"edgeType":"contains","source":2730,"target":2739},{"edgeType":"contains","source":2254,"target":2432},{"edgeType":"contains","source":51,"target":165},{"edgeType":"calls","source":2348,"target":1473},{"edgeType":"contains","source":2978,"target":2979},{"edgeType":"contains","source":3011,"target":3013},{"edgeType":"imports","source":7,"target":80},{"edgeType":"calls","source":1082,"target":1078},{"edgeType":"calls","source":909,"target":891},{"edgeType":"calls","source":1974,"target":116},{"edgeType":"contains","source":2597,"target":2634},{"edgeType":"contains","source":2176,"target":2208},{"edgeType":"contains","source":4,"target":2151},{"edgeType":"contains","source":2932,"target":969},{"edgeType":"contains","source":300,"target":2752},{"edgeType":"contains","source":460,"target":1756},{"edgeType":"imports","source":2401,"target":2500},{"edgeType":"calls","source":991,"target":992},{"edgeType":"contains","source":46,"target":56},{"edgeType":"contains","source":460,"target":963},{"edgeType":"calls","source":1814,"target":1640},{"edgeType":"calls","source":698,"target":36},{"edgeType":"calls","source":1246,"target":1282},{"edgeType":"contains","source":263,"target":467},{"edgeType":"contains","source":1631,"target":1658},{"edgeType":"contains","source":2069,"target":2086},{"edgeType":"contains","source":51,"target":168},{"edgeType":"contains","source":2672,"target":2684},{"edgeType":"contains","source":2598,"target":2600},{"edgeType":"contains","source":1524,"target":1540},{"edgeType":"calls","source":759,"target":523},{"edgeType":"contains","source":10,"target":2760},{"edgeType":"calls","source":1734,"target":1745},{"edgeType":"calls","source":415,"target":353},{"edgeType":"calls","source":1809,"target":1534},{"edgeType":"contains","source":2900,"target":901},{"edgeType":"calls","source":92,"target":60},{"edgeType":"imports","source":3072,"target":460},{"edgeType":"contains","source":1541,"target":1542},{"edgeType":"calls","source":2673,"target":116},{"edgeType":"calls","source":892,"target":893},{"edgeType":"imports","source":3035,"target":550},{"edgeType":"contains","source":2518,"target":2701},{"edgeType":"contains","source":46,"target":59},{"edgeType":"calls","source":1215,"target":1185},{"edgeType":"contains","source":1902,"target":2243},{"edgeType":"contains","source":459,"target":468},{"edgeType":"contains","source":235,"target":277},{"edgeType":"contains","source":263,"target":466},{"edgeType":"contains","source":2672,"target":2685},{"edgeType":"contains","source":2788,"target":2789},{"edgeType":"imports","source":7,"target":78},{"edgeType":"calls","source":495,"target":514},{"edgeType":"imports","source":2636,"target":763},{"edgeType":"calls","source":1343,"target":124},{"edgeType":"calls","source":620,"target":603},{"edgeType":"contains","source":2187,"target":807},{"edgeType":"contains","source":2631,"target":2635},{"edgeType":"calls","source":682,"target":795},{"edgeType":"contains","source":460,"target":965},{"edgeType":"calls","source":1419,"target":1467},{"edgeType":"contains","source":1929,"target":1935},{"edgeType":"calls","source":1512,"target":698},{"edgeType":"imports","source":793,"target":1081},{"edgeType":"calls","source":2694,"target":523},{"edgeType":"calls","source":1957,"target":116},{"edgeType":"calls","source":595,"target":415},{"edgeType":"imports","source":2187,"target":460},{"edgeType":"contains","source":3038,"target":3203},{"edgeType":"calls","source":2514,"target":2499},{"edgeType":"contains","source":261,"target":498},{"edgeType":"contains","source":301,"target":315},{"edgeType":"calls","source":1792,"target":569},{"edgeType":"contains","source":995,"target":999},{"edgeType":"contains","source":994,"target":1030},{"edgeType":"contains","source":1125,"target":2254},{"edgeType":"calls","source":1988,"target":1892},{"edgeType":"calls","source":2051,"target":2053},{"edgeType":"contains","source":2886,"target":2894},{"edgeType":"contains","source":7,"target":708},{"edgeType":"calls","source":1243,"target":939},{"edgeType":"contains","source":550,"target":1316},{"edgeType":"calls","source":292,"target":295},{"edgeType":"contains","source":2151,"target":2160},{"edgeType":"contains","source":730,"target":757},{"edgeType":"calls","source":275,"target":292},{"edgeType":"contains","source":995,"target":998},{"edgeType":"contains","source":301,"target":314},{"edgeType":"contains","source":261,"target":497},{"edgeType":"contains","source":3038,"target":3204},{"edgeType":"imports","source":51,"target":83},{"edgeType":"contains","source":2407,"target":2417},{"edgeType":"contains","source":2911,"target":2913},{"edgeType":"contains","source":1495,"target":1620},{"edgeType":"calls","source":2677,"target":1147},{"edgeType":"contains","source":7,"target":707},{"edgeType":"calls","source":1243,"target":938},{"edgeType":"contains","source":4,"target":8},{"edgeType":"calls","source":1654,"target":1674},{"edgeType":"calls","source":2502,"target":2080},{"edgeType":"contains","source":2886,"target":2892},{"edgeType":"contains","source":1430,"target":1518},{"edgeType":"contains","source":995,"target":1001},{"edgeType":"calls","source":1184,"target":1185},{"edgeType":"calls","source":2043,"target":2039},{"edgeType":"contains","source":3041,"target":3108},{"edgeType":"contains","source":3038,"target":3201},{"edgeType":"contains","source":2407,"target":2414},{"edgeType":"contains","source":1746,"target":1764},{"edgeType":"contains","source":2299,"target":2591},{"edgeType":"calls","source":595,"target":417},{"edgeType":"contains","source":2501,"target":2407},{"edgeType":"contains","source":1861,"target":1899},{"edgeType":"imports","source":2900,"target":1081},{"edgeType":"contains","source":2935,"target":2958},{"edgeType":"calls","source":898,"target":802},{"edgeType":"calls","source":1395,"target":986},{"edgeType":"contains","source":301,"target":317},{"edgeType":"calls","source":1520,"target":1339},{"edgeType":"contains","source":261,"target":499},{"edgeType":"contains","source":1495,"target":1618},{"edgeType":"contains","source":3038,"target":3202},{"edgeType":"contains","source":301,"target":316},{"edgeType":"contains","source":1430,"target":1519},{"edgeType":"calls","source":181,"target":37},{"edgeType":"contains","source":995,"target":1000},{"edgeType":"imports","source":840,"target":198},{"edgeType":"contains","source":550,"target":1318},{"edgeType":"contains","source":2299,"target":2592},{"edgeType":"imports","source":2704,"target":550},{"edgeType":"contains","source":351,"target":352},{"edgeType":"calls","source":1534,"target":376},{"edgeType":"contains","source":2464,"target":2498},{"edgeType":"contains","source":2880,"target":1493},{"edgeType":"calls","source":819,"target":79},{"edgeType":"calls","source":2345,"target":2188},{"edgeType":"contains","source":279,"target":205},{"edgeType":"calls","source":1652,"target":681},{"edgeType":"contains","source":2880,"target":1757},{"edgeType":"calls","source":962,"target":931},{"edgeType":"calls","source":2658,"target":145},{"edgeType":"contains","source":460,"target":2261},{"edgeType":"contains","source":2151,"target":2428},{"edgeType":"contains","source":2518,"target":2943},{"edgeType":"contains","source":3068,"target":3070},{"edgeType":"contains","source":730,"target":762},{"edgeType":"contains","source":2661,"target":2738},{"edgeType":"imports","source":729,"target":201},{"edgeType":"calls","source":181,"target":32},{"edgeType":"contains","source":351,"target":355},{"edgeType":"imports","source":1682,"target":1841},{"edgeType":"calls","source":1318,"target":989},{"edgeType":"contains","source":2366,"target":2370},{"edgeType":"contains","source":1905,"target":1069},{"edgeType":"calls","source":838,"target":277},{"edgeType":"contains","source":3,"target":1629},{"edgeType":"contains","source":995,"target":1003},{"edgeType":"contains","source":1481,"target":1529},{"edgeType":"contains","source":3041,"target":2057},{"edgeType":"contains","source":3038,"target":3207},{"edgeType":"contains","source":460,"target":939},{"edgeType":"calls","source":1637,"target":877},{"edgeType":"contains","source":769,"target":81},{"edgeType":"contains","source":730,"target":761},{"edgeType":"calls","source":2278,"target":34},{"edgeType":"calls","source":416,"target":411},{"edgeType":"calls","source":2599,"target":2503},{"edgeType":"calls","source":2819,"target":2818},{"edgeType":"calls","source":1549,"target":698},{"edgeType":"contains","source":261,"target":502},{"edgeType":"calls","source":1225,"target":1228},{"edgeType":"calls","source":1208,"target":698},{"edgeType":"contains","source":995,"target":1002},{"edgeType":"contains","source":962,"target":968},{"edgeType":"contains","source":2518,"target":2944},{"edgeType":"contains","source":3068,"target":3071},{"edgeType":"contains","source":2453,"target":731},{"edgeType":"calls","source":818,"target":896},{"edgeType":"contains","source":2858,"target":2710},{"edgeType":"contains","source":550,"target":1320},{"edgeType":"calls","source":1993,"target":1996},{"edgeType":"contains","source":1153,"target":1389},{"edgeType":"calls","source":2450,"target":2366},{"edgeType":"calls","source":1155,"target":1020},{"edgeType":"contains","source":3038,"target":3208},{"edgeType":"calls","source":2345,"target":2186},{"edgeType":"contains","source":12,"target":1614},{"edgeType":"contains","source":319,"target":24},{"edgeType":"contains","source":460,"target":938},{"edgeType":"imports","source":729,"target":730},{"edgeType":"calls","source":2276,"target":1154},{"edgeType":"calls","source":2696,"target":2668},{"edgeType":"calls","source":1458,"target":347},{"edgeType":"contains","source":301,"target":318},{"edgeType":"calls","source":2268,"target":2195},{"edgeType":"contains","source":261,"target":501},{"edgeType":"calls","source":1694,"target":698},{"edgeType":"contains","source":3041,"target":2055},{"edgeType":"contains","source":3038,"target":3205},{"edgeType":"contains","source":2407,"target":2418},{"edgeType":"calls","source":1411,"target":486},{"edgeType":"contains","source":300,"target":881},{"edgeType":"calls","source":697,"target":686},{"edgeType":"contains","source":3050,"target":3097},{"edgeType":"contains","source":4,"target":1600},{"edgeType":"contains","source":1481,"target":1527},{"edgeType":"calls","source":1233,"target":1247},{"edgeType":"contains","source":2176,"target":2180},{"edgeType":"imports","source":729,"target":198},{"edgeType":"contains","source":1600,"target":481},{"edgeType":"contains","source":2151,"target":48},{"edgeType":"calls","source":2195,"target":491},{"edgeType":"contains","source":261,"target":504},{"edgeType":"contains","source":550,"target":794},{"edgeType":"contains","source":2880,"target":1496},{"edgeType":"contains","source":326,"target":338},{"edgeType":"contains","source":2407,"target":2419},{"edgeType":"contains","source":2911,"target":2915},{"edgeType":"contains","source":1495,"target":1622},{"edgeType":"contains","source":2518,"target":2942},{"edgeType":"contains","source":3068,"target":3069},{"edgeType":"contains","source":351,"target":356},{"edgeType":"calls","source":1572,"target":251},{"edgeType":"calls","source":2981,"target":1494},{"edgeType":"contains","source":995,"target":1004},{"edgeType":"contains","source":3041,"target":3113},{"edgeType":"contains","source":3038,"target":3206},{"edgeType":"contains","source":2886,"target":2897},{"edgeType":"calls","source":2693,"target":116},{"edgeType":"calls","source":136,"target":107},{"edgeType":"calls","source":203,"target":144},{"edgeType":"contains","source":784,"target":674},{"edgeType":"contains","source":1788,"target":1789},{"edgeType":"calls","source":1817,"target":1640},{"edgeType":"calls","source":2787,"target":1959},{"edgeType":"contains","source":261,"target":503},{"edgeType":"calls","source":1511,"target":820},{"edgeType":"calls","source":2645,"target":2658},{"edgeType":"contains","source":3035,"target":3040},{"edgeType":"contains","source":548,"target":2444},{"edgeType":"imports","source":3015,"target":698},{"edgeType":"contains","source":729,"target":797},{"edgeType":"contains","source":2245,"target":2425},{"edgeType":"calls","source":1159,"target":1157},{"edgeType":"contains","source":2858,"target":2713},{"edgeType":"calls","source":400,"target":374},{"edgeType":"contains","source":3038,"target":3211},{"edgeType":"contains","source":460,"target":944},{"edgeType":"calls","source":1213,"target":1068},{"edgeType":"contains","source":1905,"target":1074},{"edgeType":"contains","source":840,"target":262},{"edgeType":"calls","source":965,"target":36},{"edgeType":"calls","source":2084,"target":2080},{"edgeType":"contains","source":235,"target":255},{"edgeType":"contains","source":261,"target":506},{"edgeType":"contains","source":1495,"target":1628},{"edgeType":"calls","source":1317,"target":486},{"edgeType":"contains","source":2299,"target":2602},{"edgeType":"contains","source":673,"target":682},{"edgeType":"contains","source":731,"target":734},{"edgeType":"calls","source":741,"target":108},{"edgeType":"contains","source":2245,"target":2426},{"edgeType":"contains","source":351,"target":358},{"edgeType":"contains","source":3041,"target":3119},{"edgeType":"contains","source":3038,"target":3212},{"edgeType":"contains","source":2704,"target":1674},{"edgeType":"contains","source":3050,"target":3104},{"edgeType":"contains","source":1626,"target":1796},{"edgeType":"calls","source":514,"target":539},{"edgeType":"imports","source":2636,"target":1611},{"edgeType":"calls","source":1133,"target":376},{"edgeType":"imports","source":2852,"target":200},{"edgeType":"calls","source":2392,"target":725},{"edgeType":"contains","source":2245,"target":2423},{"edgeType":"calls","source":818,"target":895},{"edgeType":"contains","source":2518,"target":2945},{"edgeType":"contains","source":351,"target":361},{"edgeType":"contains","source":2366,"target":2372},{"edgeType":"contains","source":1905,"target":1071},{"edgeType":"contains","source":460,"target":946},{"edgeType":"contains","source":1481,"target":1531},{"edgeType":"contains","source":3041,"target":2059},{"edgeType":"calls","source":177,"target":154},{"edgeType":"contains","source":3038,"target":3209},{"edgeType":"calls","source":1241,"target":1259},{"edgeType":"contains","source":2193,"target":2450},{"edgeType":"calls","source":236,"target":175},{"edgeType":"contains","source":3050,"target":3101},{"edgeType":"contains","source":550,"target":534},{"edgeType":"contains","source":2880,"target":1500},{"edgeType":"contains","source":2052,"target":2064},{"edgeType":"contains","source":729,"target":798},{"edgeType":"contains","source":2245,"target":2424},{"edgeType":"contains","source":351,"target":360},{"edgeType":"contains","source":2540,"target":150},{"edgeType":"contains","source":3050,"target":460},{"edgeType":"contains","source":550,"target":2119},{"edgeType":"contains","source":3038,"target":3210},{"edgeType":"calls","source":1116,"target":377},{"edgeType":"contains","source":2366,"target":2373},{"edgeType":"contains","source":460,"target":945},{"edgeType":"calls","source":3124,"target":493},{"edgeType":"contains","source":261,"target":507},{"edgeType":"contains","source":1656,"target":1657},{"edgeType":"contains","source":2880,"target":1501},{"edgeType":"calls","source":1565,"target":1520},{"edgeType":"calls","source":1715,"target":834},{"edgeType":"calls","source":2765,"target":523},{"edgeType":"calls","source":519,"target":116},{"edgeType":"contains","source":351,"target":363},{"edgeType":"contains","source":460,"target":948},{"edgeType":"contains","source":2837,"target":2840},{"edgeType":"contains","source":2880,"target":1507},{"edgeType":"calls","source":1035,"target":1033},{"edgeType":"calls","source":1803,"target":1801},{"edgeType":"contains","source":2911,"target":2924},{"edgeType":"contains","source":2158,"target":2219},{"edgeType":"calls","source":413,"target":231},{"edgeType":"contains","source":550,"target":2122},{"edgeType":"calls","source":989,"target":80},{"edgeType":"calls","source":2001,"target":2005},{"edgeType":"calls","source":1078,"target":493},{"edgeType":"calls","source":117,"target":158},{"edgeType":"contains","source":2259,"target":2260},{"edgeType":"contains","source":3038,"target":3215},{"edgeType":"calls","source":514,"target":536},{"edgeType":"calls","source":1715,"target":36},{"edgeType":"calls","source":2084,"target":2076},{"edgeType":"contains","source":548,"target":2712},{"edgeType":"calls","source":975,"target":1043},{"edgeType":"contains","source":2837,"target":2841},{"edgeType":"calls","source":1652,"target":1196},{"edgeType":"contains","source":351,"target":362},{"edgeType":"calls","source":1588,"target":537},{"edgeType":"contains","source":2911,"target":2925},{"edgeType":"imports","source":2299,"target":1219},{"edgeType":"contains","source":460,"target":947},{"edgeType":"imports","source":2852,"target":460},{"edgeType":"contains","source":550,"target":799},{"edgeType":"calls","source":280,"target":125},{"edgeType":"calls","source":465,"target":468},{"edgeType":"contains","source":3038,"target":3216},{"edgeType":"contains","source":1631,"target":1645},{"edgeType":"calls","source":293,"target":251},{"edgeType":"calls","source":802,"target":327},{"edgeType":"contains","source":2661,"target":2747},{"edgeType":"imports","source":51,"target":84},{"edgeType":"calls","source":126,"target":145},{"edgeType":"contains","source":460,"target":950},{"edgeType":"contains","source":3035,"target":3042},{"edgeType":"contains","source":351,"target":365},{"edgeType":"contains","source":2837,"target":2838},{"edgeType":"calls","source":2011,"target":376},{"edgeType":"contains","source":2151,"target":2434},{"edgeType":"contains","source":3038,"target":3213},{"edgeType":"contains","source":4,"target":2401},{"edgeType":"contains","source":731,"target":741},{"edgeType":"calls","source":1251,"target":1210},{"edgeType":"contains","source":10,"target":1158},{"edgeType":"calls","source":1311,"target":1199},{"edgeType":"imports","source":2886,"target":462},{"edgeType":"imports","source":840,"target":201},{"edgeType":"calls","source":1588,"target":1597},{"edgeType":"contains","source":2176,"target":1924},{"edgeType":"contains","source":1905,"target":1076},{"edgeType":"contains","source":1125,"target":1211},{"edgeType":"contains","source":1626,"target":1533},{"edgeType":"contains","source":1495,"target":1630},{"edgeType":"contains","source":1073,"target":973},{"edgeType":"calls","source":1201,"target":116},{"edgeType":"calls","source":1830,"target":1758},{"edgeType":"contains","source":460,"target":949},{"edgeType":"contains","source":2837,"target":2839},{"edgeType":"calls","source":2798,"target":1345},{"edgeType":"calls","source":1850,"target":345},{"edgeType":"imports","source":1569,"target":64},{"edgeType":"calls","source":2668,"target":1147},{"edgeType":"calls","source":3090,"target":2599},{"edgeType":"contains","source":3061,"target":2501},{"edgeType":"contains","source":3038,"target":3214},{"edgeType":"contains","source":1626,"target":1798},{"edgeType":"contains","source":1524,"target":1525},{"edgeType":"contains","source":1334,"target":1337},{"edgeType":"contains","source":2299,"target":2604},{"edgeType":"imports","source":1569,"target":46},{"edgeType":"imports","source":840,"target":183},{"edgeType":"imports","source":1675,"target":460},{"edgeType":"calls","source":1389,"target":1187},{"edgeType":"contains","source":305,"target":439},{"edgeType":"contains","source":2704,"target":2706},{"edgeType":"calls","source":2035,"target":2037},{"edgeType":"imports","source":51,"target":65},{"edgeType":"calls","source":1868,"target":79},{"edgeType":"calls","source":1790,"target":383},{"edgeType":"calls","source":1226,"target":1219},{"edgeType":"contains","source":3038,"target":3186},{"edgeType":"calls","source":466,"target":467},{"edgeType":"calls","source":943,"target":743},{"edgeType":"calls","source":342,"target":347},{"edgeType":"calls","source":879,"target":877},{"edgeType":"contains","source":2390,"target":1869},{"edgeType":"calls","source":857,"target":766},{"edgeType":"calls","source":1858,"target":1974},{"edgeType":"contains","source":1878,"target":1886},{"edgeType":"contains","source":664,"target":672},{"edgeType":"calls","source":2946,"target":1545},{"edgeType":"calls","source":732,"target":676},{"edgeType":"imports","source":840,"target":184},{"edgeType":"contains","source":305,"target":438},{"edgeType":"calls","source":1690,"target":575},{"edgeType":"contains","source":3000,"target":3044},{"edgeType":"contains","source":1645,"target":125},{"edgeType":"contains","source":2704,"target":2707},{"edgeType":"contains","source":350,"target":3007},{"edgeType":"contains","source":2729,"target":1932},{"edgeType":"calls","source":514,"target":563},{"edgeType":"calls","source":682,"target":376},{"edgeType":"calls","source":1292,"target":1286},{"edgeType":"calls","source":754,"target":523},{"edgeType":"contains","source":2858,"target":2689},{"edgeType":"contains","source":3038,"target":3187},{"edgeType":"contains","source":2725,"target":2848},{"edgeType":"contains","source":263,"target":418},{"edgeType":"contains","source":1430,"target":1505},{"edgeType":"calls","source":2663,"target":541},{"edgeType":"contains","source":470,"target":872},{"edgeType":"imports","source":1905,"target":200},{"edgeType":"contains","source":2,"target":2963},{"edgeType":"contains","source":2837,"target":959},{"edgeType":"contains","source":664,"target":675},{"edgeType":"contains","source":2390,"target":2396},{"edgeType":"contains","source":134,"target":192},{"edgeType":"contains","source":2770,"target":2772},{"edgeType":"contains","source":1878,"target":1884},{"edgeType":"contains","source":1495,"target":1601},{"edgeType":"calls","source":306,"target":144},{"edgeType":"contains","source":1430,"target":1502},{"edgeType":"contains","source":1277,"target":1224},{"edgeType":"contains","source":487,"target":612},{"edgeType":"calls","source":555,"target":353},{"edgeType":"contains","source":3038,"target":3184},{"edgeType":"imports","source":910,"target":124},{"edgeType":"calls","source":392,"target":385},{"edgeType":"calls","source":812,"target":842},{"edgeType":"contains","source":1746,"target":1748},{"edgeType":"calls","source":1333,"target":1339},{"edgeType":"contains","source":7,"target":694},{"edgeType":"contains","source":470,"target":875},{"edgeType":"contains","source":1861,"target":1883},{"edgeType":"contains","source":2,"target":2962},{"edgeType":"contains","source":2390,"target":2397},{"edgeType":"contains","source":1631,"target":1878},{"edgeType":"contains","source":2151,"target":2406},{"edgeType":"contains","source":1,"target":350},{"edgeType":"contains","source":2858,"target":2687},{"edgeType":"calls","source":1544,"target":346},{"edgeType":"contains","source":550,"target":772},{"edgeType":"contains","source":1430,"target":1503},{"edgeType":"calls","source":3149,"target":1064},{"edgeType":"contains","source":3038,"target":3185},{"edgeType":"contains","source":225,"target":1334},{"edgeType":"contains","source":470,"target":874},{"edgeType":"contains","source":460,"target":1716},{"edgeType":"imports","source":2187,"target":1232},{"edgeType":"calls","source":2185,"target":26},{"edgeType":"contains","source":557,"target":559},{"edgeType":"contains","source":2151,"target":33},{"edgeType":"contains","source":487,"target":615},{"edgeType":"calls","source":1995,"target":366},{"edgeType":"imports","source":3072,"target":1016},{"edgeType":"contains","source":2880,"target":1482},{"edgeType":"contains","source":1125,"target":2770},{"edgeType":"contains","source":2468,"target":2626},{"edgeType":"contains","source":12,"target":13},{"edgeType":"contains","source":2725,"target":2851},{"edgeType":"calls","source":788,"target":787},{"edgeType":"contains","source":3038,"target":3190},{"edgeType":"imports","source":2636,"target":2640},{"edgeType":"imports","source":2661,"target":543},{"edgeType":"contains","source":7,"target":695},{"edgeType":"imports","source":2963,"target":166},{"edgeType":"contains","source":305,"target":442},{"edgeType":"contains","source":2151,"target":34},{"edgeType":"contains","source":1495,"target":1608},{"edgeType":"imports","source":3015,"target":1989},{"edgeType":"contains","source":134,"target":193},{"edgeType":"contains","source":2704,"target":1654},{"edgeType":"contains","source":460,"target":1715},{"edgeType":"imports","source":51,"target":62},{"edgeType":"imports","source":2770,"target":1128},{"edgeType":"calls","source":2134,"target":2137},{"edgeType":"contains","source":1430,"target":1509},{"edgeType":"calls","source":2605,"target":2599},{"edgeType":"contains","source":2662,"target":2691},{"edgeType":"contains","source":1744,"target":496},{"edgeType":"calls","source":181,"target":48},{"edgeType":"contains","source":1878,"target":1891},{"edgeType":"contains","source":1904,"target":2142},{"edgeType":"contains","source":470,"target":876},{"edgeType":"contains","source":3038,"target":3191},{"edgeType":"calls","source":359,"target":346},{"edgeType":"imports","source":2962,"target":196},{"edgeType":"calls","source":1399,"target":1403},{"edgeType":"calls","source":1409,"target":36},{"edgeType":"contains","source":664,"target":679},{"edgeType":"calls","source":1690,"target":574},{"edgeType":"contains","source":3000,"target":3045},{"edgeType":"imports","source":2962,"target":460},{"edgeType":"calls","source":1556,"target":1557},{"edgeType":"contains","source":2704,"target":2708},{"edgeType":"contains","source":557,"target":561},{"edgeType":"calls","source":1800,"target":1128},{"edgeType":"contains","source":1430,"target":1506},{"edgeType":"contains","source":4,"target":263},{"edgeType":"imports","source":910,"target":913},{"edgeType":"contains","source":12,"target":15},{"edgeType":"contains","source":460,"target":1189},{"edgeType":"contains","source":3038,"target":3188},{"edgeType":"contains","source":2725,"target":2849},{"edgeType":"contains","source":2176,"target":842},{"edgeType":"contains","source":394,"target":64},{"edgeType":"calls","source":631,"target":635},{"edgeType":"contains","source":550,"target":1306},{"edgeType":"contains","source":2193,"target":2429},{"edgeType":"calls","source":2665,"target":2057},{"edgeType":"calls","source":267,"target":26},{"edgeType":"imports","source":2661,"target":541},{"edgeType":"contains","source":3072,"target":550},{"edgeType":"contains","source":1132,"target":1233},{"edgeType":"contains","source":664,"target":678},{"edgeType":"contains","source":305,"target":444},{"edgeType":"calls","source":1345,"target":698},{"edgeType":"calls","source":95,"target":73},{"edgeType":"contains","source":557,"target":560},{"edgeType":"contains","source":487,"target":616},{"edgeType":"calls","source":1663,"target":353},{"edgeType":"calls","source":1705,"target":108},{"edgeType":"contains","source":2151,"target":32},{"edgeType":"contains","source":263,"target":425},{"edgeType":"contains","source":2704,"target":2709},{"edgeType":"calls","source":1226,"target":1216},{"edgeType":"contains","source":3038,"target":3189},{"edgeType":"calls","source":2022,"target":1908},{"edgeType":"calls","source":2946,"target":1539},{"edgeType":"calls","source":1838,"target":1534},{"edgeType":"calls","source":565,"target":566},{"edgeType":"imports","source":3072,"target":500},{"edgeType":"calls","source":1534,"target":383},{"edgeType":"calls","source":482,"target":491},{"edgeType":"contains","source":1905,"target":2114},{"edgeType":"calls","source":268,"target":254},{"edgeType":"contains","source":7,"target":700},{"edgeType":"calls","source":2010,"target":2011},{"edgeType":"calls","source":486,"target":631},{"edgeType":"contains","source":2151,"target":37},{"edgeType":"calls","source":494,"target":383},{"edgeType":"calls","source":2392,"target":1797},{"edgeType":"contains","source":550,"target":1837},{"edgeType":"contains","source":487,"target":619},{"edgeType":"contains","source":2407,"target":2408},{"edgeType":"calls","source":1413,"target":698},{"edgeType":"contains","source":3038,"target":3194},{"edgeType":"contains","source":1704,"target":1739},{"edgeType":"contains","source":1125,"target":2245},{"edgeType":"calls","source":984,"target":255},{"edgeType":"calls","source":2417,"target":2080},{"edgeType":"calls","source":514,"target":555},{"edgeType":"contains","source":7,"target":699},{"edgeType":"contains","source":550,"target":2100},{"edgeType":"calls","source":813,"target":535},{"edgeType":"contains","source":2886,"target":1830},{"edgeType":"contains","source":487,"target":618},{"edgeType":"calls","source":788,"target":517},{"edgeType":"contains","source":2407,"target":2409},{"edgeType":"contains","source":2151,"target":38},{"edgeType":"contains","source":557,"target":562},{"edgeType":"contains","source":1430,"target":1513},{"edgeType":"calls","source":1824,"target":641},{"edgeType":"calls","source":755,"target":748},{"edgeType":"calls","source":251,"target":252},{"edgeType":"contains","source":460,"target":2247},{"edgeType":"contains","source":2151,"target":39},{"edgeType":"imports","source":1682,"target":1836},{"edgeType":"calls","source":1038,"target":959},{"edgeType":"contains","source":730,"target":748},{"edgeType":"contains","source":2464,"target":2491},{"edgeType":"contains","source":664,"target":680},{"edgeType":"contains","source":3038,"target":3195},{"edgeType":"calls","source":1718,"target":491},{"edgeType":"contains","source":2729,"target":90},{"edgeType":"contains","source":550,"target":1307},{"edgeType":"calls","source":1250,"target":1258},{"edgeType":"calls","source":818,"target":383},{"edgeType":"contains","source":2982,"target":3079},{"edgeType":"contains","source":7,"target":702},{"edgeType":"contains","source":301,"target":309},{"edgeType":"calls","source":2678,"target":328},{"edgeType":"contains","source":1430,"target":1510},{"edgeType":"calls","source":1133,"target":1189},{"edgeType":"contains","source":487,"target":621},{"edgeType":"calls","source":788,"target":520},{"edgeType":"imports","source":8,"target":83},{"edgeType":"imports","source":2880,"target":1429},{"edgeType":"calls","source":912,"target":905},{"edgeType":"contains","source":460,"target":2250},{"edgeType":"contains","source":3035,"target":1964},{"edgeType":"contains","source":1905,"target":2112},{"edgeType":"calls","source":986,"target":989},{"edgeType":"contains","source":994,"target":1023},{"edgeType":"contains","source":1481,"target":1515},{"edgeType":"contains","source":3038,"target":3192},{"edgeType":"calls","source":598,"target":597},{"edgeType":"calls","source":1242,"target":1244},{"edgeType":"contains","source":7,"target":701},{"edgeType":"calls","source":2696,"target":2678},{"edgeType":"calls","source":1298,"target":36},{"edgeType":"contains","source":2847,"target":2772},{"edgeType":"imports","source":1631,"target":508},{"edgeType":"contains","source":3050,"target":443},{"edgeType":"contains","source":2518,"target":2929},{"edgeType":"calls","source":1807,"target":641},{"edgeType":"calls","source":507,"target":510},{"edgeType":"calls","source":1870,"target":1859},{"edgeType":"calls","source":598,"target":596},{"edgeType":"calls","source":697,"target":698},{"edgeType":"contains","source":3038,"target":3193},{"edgeType":"calls","source":1688,"target":1683},{"edgeType":"calls","source":2973,"target":166},{"edgeType":"calls","source":1926,"target":383},{"edgeType":"contains","source":487,"target":623},{"edgeType":"contains","source":2853,"target":2856},{"edgeType":"contains","source":2886,"target":1833},{"edgeType":"imports","source":3072,"target":496},{"edgeType":"contains","source":3041,"target":2049},{"edgeType":"contains","source":3038,"target":3199},{"edgeType":"contains","source":235,"target":243},{"edgeType":"contains","source":1746,"target":1762},{"edgeType":"calls","source":2541,"target":2195},{"edgeType":"contains","source":2643,"target":1174},{"edgeType":"contains","source":58,"target":180},{"edgeType":"contains","source":460,"target":1196},{"edgeType":"calls","source":234,"target":247},{"edgeType":"contains","source":1656,"target":1645},{"edgeType":"imports","source":2661,"target":24},{"edgeType":"contains","source":2661,"target":2730},{"edgeType":"contains","source":730,"target":753},{"edgeType":"contains","source":729,"target":784},{"edgeType":"imports","source":1682,"target":1833},{"edgeType":"contains","source":2052,"target":2054},{"edgeType":"contains","source":1861,"target":1897},{"edgeType":"contains","source":840,"target":250},{"edgeType":"contains","source":2151,"target":2156},{"edgeType":"calls","source":2042,"target":486},{"edgeType":"contains","source":3041,"target":2048},{"edgeType":"contains","source":51,"target":132},{"edgeType":"calls","source":1341,"target":1340},{"edgeType":"calls","source":818,"target":376},{"edgeType":"calls","source":2600,"target":2481},{"edgeType":"calls","source":1185,"target":98},{"edgeType":"contains","source":3038,"target":3200},{"edgeType":"contains","source":2407,"target":2413},{"edgeType":"contains","source":1523,"target":1541},{"edgeType":"contains","source":1746,"target":1763},{"edgeType":"contains","source":7,"target":703},{"edgeType":"calls","source":563,"target":353},{"edgeType":"contains","source":326,"target":328},{"edgeType":"calls","source":2734,"target":970},{"edgeType":"contains","source":3,"target":2149},{"edgeType":"contains","source":2932,"target":936},{"edgeType":"contains","source":1861,"target":1898},{"edgeType":"contains","source":1732,"target":83},{"edgeType":"contains","source":2151,"target":2157},{"edgeType":"contains","source":2880,"target":1755},{"edgeType":"calls","source":117,"target":174},{"edgeType":"calls","source":573,"target":572},{"edgeType":"contains","source":995,"target":997},{"edgeType":"calls","source":1131,"target":718},{"edgeType":"imports","source":3072,"target":2608},{"edgeType":"contains","source":2853,"target":2854},{"edgeType":"contains","source":7,"target":706},{"edgeType":"contains","source":2597,"target":2598},{"edgeType":"contains","source":1631,"target":304},{"edgeType":"calls","source":136,"target":116},{"edgeType":"calls","source":1046,"target":975},{"edgeType":"contains","source":2407,"target":2410},{"edgeType":"imports","source":2661,"target":1871},{"edgeType":"contains","source":4,"target":7},{"edgeType":"contains","source":1570,"target":81},{"edgeType":"contains","source":1861,"target":1895},{"edgeType":"calls","source":796,"target":797},{"edgeType":"contains","source":305,"target":453},{"edgeType":"calls","source":1520,"target":1079},{"edgeType":"calls","source":2054,"target":116},{"edgeType":"contains","source":3041,"target":2046},{"edgeType":"calls","source":1713,"target":1702},{"edgeType":"contains","source":3038,"target":3196},{"edgeType":"calls","source":540,"target":541},{"edgeType":"contains","source":730,"target":755},{"edgeType":"calls","source":295,"target":471},{"edgeType":"calls","source":1142,"target":376},{"edgeType":"contains","source":995,"target":996},{"edgeType":"contains","source":2982,"target":3084},{"edgeType":"imports","source":1569,"target":47},{"edgeType":"calls","source":3167,"target":3136},{"edgeType":"contains","source":3038,"target":3198},{"edgeType":"contains","source":2853,"target":2855},{"edgeType":"calls","source":2036,"target":145},{"edgeType":"calls","source":2558,"target":2195},{"edgeType":"contains","source":7,"target":705},{"edgeType":"contains","source":2407,"target":2411},{"edgeType":"contains","source":2729,"target":2735},{"edgeType":"calls","source":2209,"target":2180},{"edgeType":"calls","source":2679,"target":2673},{"edgeType":"calls","source":598,"target":592},{"edgeType":"calls","source":1810,"target":280},{"edgeType":"contains","source":134,"target":204},{"edgeType":"contains","source":1746,"target":1761},{"edgeType":"imports","source":3050,"target":117},{"edgeType":"contains","source":3038,"target":3197},{"edgeType":"imports","source":1682,"target":1830},{"edgeType":"calls","source":1063,"target":975},{"edgeType":"contains","source":305,"target":452},{"edgeType":"contains","source":3038,"target":3170},{"edgeType":"contains","source":2858,"target":2672},{"edgeType":"calls","source":464,"target":479},{"edgeType":"imports","source":14,"target":205},{"edgeType":"contains","source":1929,"target":2138},{"edgeType":"contains","source":1093,"target":1099},{"edgeType":"contains","source":460,"target":1167},{"edgeType":"calls","source":756,"target":676},{"edgeType":"calls","source":1952,"target":1918},{"edgeType":"contains","source":460,"target":902},{"edgeType":"calls","source":1652,"target":383},{"edgeType":"calls","source":2063,"target":1912},{"edgeType":"contains","source":1224,"target":1267},{"edgeType":"calls","source":1226,"target":376},{"edgeType":"contains","source":10,"target":2961},{"edgeType":"imports","source":2158,"target":1128},{"edgeType":"imports","source":1523,"target":201},{"edgeType":"imports","source":3072,"target":1069},{"edgeType":"contains","source":1569,"target":1671},{"edgeType":"contains","source":2967,"target":2992},{"edgeType":"contains","source":3050,"target":419},{"edgeType":"calls","source":2327,"target":2186},{"edgeType":"contains","source":3038,"target":3171},{"edgeType":"contains","source":2701,"target":2519},{"edgeType":"contains","source":2661,"target":2702},{"edgeType":"contains","source":550,"target":754},{"edgeType":"contains","source":1126,"target":1132},{"edgeType":"contains","source":14,"target":457},{"edgeType":"contains","source":460,"target":1166},{"edgeType":"contains","source":3050,"target":3063},{"edgeType":"calls","source":1926,"target":1930},{"edgeType":"contains","source":550,"target":1547},{"edgeType":"contains","source":2464,"target":2467},{"edgeType":"imports","source":1682,"target":556},{"edgeType":"calls","source":682,"target":327},{"edgeType":"contains","source":1,"target":1125},{"edgeType":"calls","source":1816,"target":1640},{"edgeType":"contains","source":1682,"target":2133},{"edgeType":"contains","source":1569,"target":1672},{"edgeType":"contains","source":487,"target":596},{"edgeType":"contains","source":3038,"target":3168},{"edgeType":"contains","source":460,"target":376},{"edgeType":"calls","source":1220,"target":36},{"edgeType":"contains","source":3108,"target":3112},{"edgeType":"calls","source":1083,"target":1112},{"edgeType":"imports","source":1682,"target":1876},{"edgeType":"calls","source":1411,"target":986},{"edgeType":"contains","source":350,"target":351},{"edgeType":"calls","source":635,"target":201},{"edgeType":"contains","source":2982,"target":3054},{"edgeType":"contains","source":460,"target":1169},{"edgeType":"calls","source":417,"target":353},{"edgeType":"imports","source":1682,"target":1612},{"edgeType":"calls","source":1526,"target":327},{"edgeType":"imports","source":2935,"target":2936},{"edgeType":"contains","source":1224,"target":1265},{"edgeType":"calls","source":2357,"target":725},{"edgeType":"calls","source":547,"target":551},{"edgeType":"contains","source":2578,"target":2629},{"edgeType":"imports","source":1682,"target":1348},{"edgeType":"contains","source":1935,"target":1950},{"edgeType":"contains","source":1929,"target":2136},{"edgeType":"calls","source":1864,"target":1212},{"edgeType":"calls","source":2283,"target":641},{"edgeType":"calls","source":770,"target":773},{"edgeType":"calls","source":2981,"target":936},{"edgeType":"contains","source":994,"target":734},{"edgeType":"contains","source":1605,"target":1610},{"edgeType":"calls","source":2549,"target":2437},{"edgeType":"calls","source":2581,"target":2502},{"edgeType":"contains","source":1662,"target":1693},{"edgeType":"calls","source":1335,"target":698},{"edgeType":"contains","source":3038,"target":3169},{"edgeType":"calls","source":564,"target":551},{"edgeType":"contains","source":550,"target":1549},{"edgeType":"contains","source":3,"target":2914},{"edgeType":"contains","source":460,"target":2225},{"edgeType":"contains","source":2767,"target":2056},{"edgeType":"calls","source":1868,"target":1880},{"edgeType":"contains","source":1224,"target":1266},{"edgeType":"calls","source":2743,"target":1709},{"edgeType":"calls","source":1689,"target":558},{"edgeType":"contains","source":1288,"target":1132},{"edgeType":"contains","source":2578,"target":2630},{"edgeType":"calls","source":2225,"target":1379},{"edgeType":"contains","source":1430,"target":1491},{"edgeType":"contains","source":2911,"target":2883},{"edgeType":"contains","source":550,"target":1552},{"edgeType":"calls","source":1348,"target":1347},{"edgeType":"contains","source":487,"target":598},{"edgeType":"contains","source":2661,"target":2705},{"edgeType":"calls","source":2050,"target":2047},{"edgeType":"contains","source":2555,"target":2556},{"edgeType":"calls","source":368,"target":280},{"edgeType":"contains","source":3038,"target":3174},{"edgeType":"contains","source":2858,"target":2676},{"edgeType":"imports","source":1682,"target":24},{"edgeType":"imports","source":3072,"target":537},{"edgeType":"contains","source":1224,"target":1271},{"edgeType":"contains","source":2860,"target":2878},{"edgeType":"contains","source":2742,"target":2836},{"edgeType":"calls","source":2438,"target":2440},{"edgeType":"contains","source":550,"target":759},{"edgeType":"contains","source":134,"target":178},{"edgeType":"contains","source":2183,"target":2196},{"edgeType":"imports","source":14,"target":201},{"edgeType":"contains","source":883,"target":1006},{"edgeType":"imports","source":2401,"target":2046},{"edgeType":"contains","source":1430,"target":1492},{"edgeType":"contains","source":487,"target":597},{"edgeType":"contains","source":3000,"target":3032},{"edgeType":"calls","source":2031,"target":521},{"edgeType":"calls","source":588,"target":594},{"edgeType":"calls","source":1131,"target":938},{"edgeType":"contains","source":1905,"target":2094},{"edgeType":"contains","source":460,"target":1170},{"edgeType":"contains","source":2299,"target":2301},{"edgeType":"contains","source":3038,"target":3175},{"edgeType":"contains","source":1744,"target":743},{"edgeType":"contains","source":1224,"target":1272},{"edgeType":"contains","source":2640,"target":714},{"edgeType":"contains","source":2266,"target":2267},{"edgeType":"calls","source":1814,"target":641},{"edgeType":"calls","source":909,"target":421},{"edgeType":"contains","source":4,"target":2886},{"edgeType":"imports","source":729,"target":763},{"edgeType":"contains","source":2860,"target":2879},{"edgeType":"contains","source":550,"target":1815},{"edgeType":"calls","source":1105,"target":158},{"edgeType":"calls","source":935,"target":936},{"edgeType":"calls","source":2642,"target":2195},{"edgeType":"imports","source":3072,"target":1064},{"edgeType":"imports","source":2299,"target":2300},{"edgeType":"contains","source":3038,"target":3172},{"edgeType":"contains","source":2151,"target":2393},{"edgeType":"contains","source":763,"target":765},{"edgeType":"calls","source":1058,"target":1090},{"edgeType":"contains","source":730,"target":731},{"edgeType":"contains","source":1935,"target":1954},{"edgeType":"contains","source":1929,"target":2140},{"edgeType":"calls","source":1155,"target":990},{"edgeType":"contains","source":3,"target":1598},{"edgeType":"contains","source":2918,"target":2928},{"edgeType":"calls","source":1483,"target":1128},{"edgeType":"contains","source":1905,"target":2091},{"edgeType":"imports","source":6,"target":183},{"edgeType":"contains","source":2880,"target":1463},{"edgeType":"contains","source":1224,"target":1269},{"edgeType":"calls","source":2513,"target":1170},{"edgeType":"imports","source":2468,"target":496},{"edgeType":"calls","source":1993,"target":116},{"edgeType":"imports","source":910,"target":698},{"edgeType":"imports","source":3072,"target":270},{"edgeType":"contains","source":550,"target":1818},{"edgeType":"contains","source":2245,"target":2386},{"edgeType":"contains","source":2967,"target":2994},{"edgeType":"contains","source":2468,"target":2609},{"edgeType":"contains","source":2151,"target":2394},{"edgeType":"contains","source":3038,"target":3173},{"edgeType":"calls","source":2695,"target":1871},{"edgeType":"contains","source":2880,"target":1729},{"edgeType":"contains","source":763,"target":764},{"edgeType":"contains","source":793,"target":1155},{"edgeType":"contains","source":1905,"target":2092},{"edgeType":"calls","source":2063,"target":1909},{"edgeType":"imports","source":6,"target":184},{"edgeType":"contains","source":550,"target":2081},{"edgeType":"calls","source":1463,"target":1482},{"edgeType":"imports","source":2935,"target":1081},{"edgeType":"contains","source":2742,"target":2835},{"edgeType":"contains","source":2767,"target":2060},{"edgeType":"calls","source":2645,"target":779},{"edgeType":"calls","source":1811,"target":1793},{"edgeType":"contains","source":1682,"target":2135},{"edgeType":"calls","source":901,"target":935},{"edgeType":"calls","source":2600,"target":2438},{"edgeType":"imports","source":1682,"target":550},{"edgeType":"contains","source":470,"target":865},{"edgeType":"calls","source":2257,"target":383},{"edgeType":"contains","source":1844,"target":1874},{"edgeType":"contains","source":3038,"target":3178},{"edgeType":"contains","source":664,"target":665},{"edgeType":"contains","source":185,"target":187},{"edgeType":"calls","source":1868,"target":1871},{"edgeType":"contains","source":305,"target":430},{"edgeType":"contains","source":1662,"target":1703},{"edgeType":"calls","source":1328,"target":641},{"edgeType":"calls","source":1848,"target":641},{"edgeType":"contains","source":2860,"target":2882},{"edgeType":"calls","source":1995,"target":1105},{"edgeType":"calls","source":905,"target":806},{"edgeType":"imports","source":2935,"target":2946},{"edgeType":"imports","source":3050,"target":1495},{"edgeType":"calls","source":1396,"target":1440},{"edgeType":"contains","source":3035,"target":1951},{"edgeType":"contains","source":3000,"target":3036},{"edgeType":"contains","source":763,"target":766},{"edgeType":"calls","source":535,"target":383},{"edgeType":"calls","source":1851,"target":1340},{"edgeType":"contains","source":1844,"target":1875},{"edgeType":"contains","source":10,"target":325},{"edgeType":"imports","source":1682,"target":564},{"edgeType":"contains","source":3038,"target":3179},{"edgeType":"contains","source":2643,"target":2740},{"edgeType":"contains","source":1913,"target":1586},{"edgeType":"contains","source":185,"target":186},{"edgeType":"calls","source":512,"target":303},{"edgeType":"contains","source":2390,"target":2391},{"edgeType":"contains","source":305,"target":429},{"edgeType":"calls","source":1343,"target":1232},{"edgeType":"contains","source":1878,"target":1879},{"edgeType":"contains","source":2,"target":45},{"edgeType":"calls","source":1697,"target":564},{"edgeType":"imports","source":2858,"target":47},{"edgeType":"calls","source":3095,"target":36},{"edgeType":"contains","source":1844,"target":1872},{"edgeType":"contains","source":185,"target":189},{"edgeType":"contains","source":664,"target":667},{"edgeType":"contains","source":305,"target":433},{"edgeType":"contains","source":994,"target":1007},{"edgeType":"contains","source":3038,"target":3176},{"edgeType":"calls","source":392,"target":327},{"edgeType":"contains","source":2962,"target":775},{"edgeType":"imports","source":2401,"target":2057},{"edgeType":"contains","source":2643,"target":2736},{"edgeType":"contains","source":3,"target":17},{"edgeType":"contains","source":2962,"target":1305},{"edgeType":"contains","source":763,"target":768},{"edgeType":"contains","source":2151,"target":2398},{"edgeType":"contains","source":470,"target":866},{"edgeType":"contains","source":3072,"target":802},{"edgeType":"contains","source":3000,"target":3034},{"edgeType":"calls","source":981,"target":36},{"edgeType":"contains","source":1704,"target":1721},{"edgeType":"contains","source":2767,"target":2065},{"edgeType":"contains","source":1905,"target":2096},{"edgeType":"contains","source":1844,"target":1873},{"edgeType":"contains","source":185,"target":188},{"edgeType":"contains","source":1073,"target":671},{"edgeType":"calls","source":909,"target":418},{"edgeType":"contains","source":3038,"target":3177},{"edgeType":"contains","source":2860,"target":2881},{"edgeType":"calls","source":993,"target":986},{"edgeType":"calls","source":1053,"target":975},{"edgeType":"calls","source":1546,"target":1019},{"edgeType":"contains","source":2,"target":840},{"edgeType":"calls","source":847,"target":491},{"edgeType":"contains","source":2643,"target":2737},{"edgeType":"calls","source":1258,"target":698},{"edgeType":"calls","source":1241,"target":1225},{"edgeType":"imports","source":487,"target":83},{"edgeType":"contains","source":2468,"target":763},{"edgeType":"contains","source":2468,"target":2613},{"edgeType":"calls","source":489,"target":490},{"edgeType":"contains","source":3,"target":16},{"edgeType":"contains","source":1904,"target":2661},{"edgeType":"calls","source":1911,"target":270},{"edgeType":"contains","source":3000,"target":3039},{"edgeType":"calls","source":2025,"target":2021},{"edgeType":"calls","source":1113,"target":696},{"edgeType":"calls","source":1576,"target":877},{"edgeType":"calls","source":704,"target":690},{"edgeType":"contains","source":10,"target":1123},{"edgeType":"calls","source":1772,"target":879},{"edgeType":"contains","source":1605,"target":1624},{"edgeType":"calls","source":2357,"target":2300},{"edgeType":"contains","source":1481,"target":1504},{"edgeType":"contains","source":664,"target":669},{"edgeType":"calls","source":1967,"target":1969},{"edgeType":"contains","source":1905,"target":2102},{"edgeType":"contains","source":734,"target":84},{"edgeType":"contains","source":1093,"target":1111},{"edgeType":"contains","source":3038,"target":3182},{"edgeType":"calls","source":991,"target":250},{"edgeType":"calls","source":75,"target":106},{"edgeType":"calls","source":3115,"target":3114},{"edgeType":"contains","source":1523,"target":1524},{"edgeType":"contains","source":2518,"target":2918},{"edgeType":"calls","source":2030,"target":544},{"edgeType":"calls","source":1731,"target":564},{"edgeType":"contains","source":2301,"target":2510},{"edgeType":"contains","source":2725,"target":2843},{"edgeType":"contains","source":2151,"target":26},{"edgeType":"calls","source":1488,"target":1490},{"edgeType":"imports","source":3050,"target":698},{"edgeType":"contains","source":3046,"target":2407},{"edgeType":"contains","source":854,"target":856},{"edgeType":"contains","source":185,"target":190},{"edgeType":"contains","source":664,"target":668},{"edgeType":"contains","source":2151,"target":2404},{"edgeType":"calls","source":1290,"target":1286},{"edgeType":"calls","source":1711,"target":1712},{"edgeType":"contains","source":1093,"target":1110},{"edgeType":"calls","source":1092,"target":1081},{"edgeType":"calls","source":934,"target":959},{"edgeType":"calls","source":1999,"target":1240},{"edgeType":"contains","source":1911,"target":1917},{"edgeType":"contains","source":1905,"target":2103},{"edgeType":"calls","source":1348,"target":1337},{"edgeType":"contains","source":3038,"target":3183},{"edgeType":"imports","source":793,"target":901},{"edgeType":"calls","source":2702,"target":2705},{"edgeType":"calls","source":1577,"target":52},{"edgeType":"calls","source":1574,"target":145},{"edgeType":"contains","source":2725,"target":2844},{"edgeType":"calls","source":1327,"target":1199},{"edgeType":"imports","source":840,"target":763},{"edgeType":"contains","source":3125,"target":3126},{"edgeType":"calls","source":771,"target":201},{"edgeType":"contains","source":550,"target":1298},{"edgeType":"contains","source":10,"target":2975},{"edgeType":"calls","source":1034,"target":1033},{"edgeType":"contains","source":2643,"target":2741},{"edgeType":"contains","source":3038,"target":3180},{"edgeType":"contains","source":1704,"target":1724},{"edgeType":"calls","source":512,"target":302},{"edgeType":"contains","source":2176,"target":834},{"edgeType":"calls","source":1385,"target":986},{"edgeType":"calls","source":1987,"target":558},{"edgeType":"contains","source":1844,"target":1877},{"edgeType":"contains","source":1430,"target":1498},{"edgeType":"contains","source":3072,"target":1597},{"edgeType":"calls","source":167,"target":163},{"edgeType":"calls","source":1241,"target":1222},{"edgeType":"calls","source":1199,"target":1202},{"edgeType":"calls","source":1396,"target":116},{"edgeType":"imports","source":2886,"target":1023},{"edgeType":"imports","source":2900,"target":589},{"edgeType":"contains","source":2,"target":51},{"edgeType":"calls","source":1606,"target":1534},{"edgeType":"contains","source":1605,"target":1623},{"edgeType":"contains","source":2640,"target":721},{"edgeType":"contains","source":2582,"target":2519},{"edgeType":"contains","source":3041,"target":3088},{"edgeType":"contains","source":3038,"target":3181},{"edgeType":"contains","source":664,"target":670},{"edgeType":"contains","source":729,"target":769},{"edgeType":"calls","source":539,"target":521},{"edgeType":"calls","source":857,"target":177},{"edgeType":"calls","source":2827,"target":678},{"edgeType":"calls","source":266,"target":264},{"edgeType":"contains","source":1430,"target":1499},{"edgeType":"calls","source":922,"target":540},{"edgeType":"contains","source":2860,"target":2885},{"edgeType":"calls","source":976,"target":980},{"edgeType":"calls","source":522,"target":520},{"edgeType":"contains","source":2301,"target":2509},{"edgeType":"contains","source":840,"target":205},{"edgeType":"calls","source":3083,"target":2549},{"edgeType":"contains","source":1913,"target":2089},{"edgeType":"calls","source":1737,"target":1729},{"edgeType":"contains","source":1348,"target":1370},{"edgeType":"imports","source":1523,"target":184},{"edgeType":"contains","source":1381,"target":1404},{"edgeType":"contains","source":2151,"target":2374},{"edgeType":"calls","source":249,"target":26},{"edgeType":"contains","source":1844,"target":1850},{"edgeType":"contains","source":2151,"target":2375},{"edgeType":"contains","source":1268,"target":1472},{"edgeType":"contains","source":1935,"target":1936},{"edgeType":"contains","source":1430,"target":1471},{"edgeType":"calls","source":1258,"target":986},{"edgeType":"contains","source":3,"target":2633},{"edgeType":"contains","source":3038,"target":3153},{"edgeType":"contains","source":2929,"target":2832},{"edgeType":"contains","source":2717,"target":1741},{"edgeType":"calls","source":986,"target":698},{"edgeType":"calls","source":1102,"target":537},{"edgeType":"contains","source":1744,"target":1779},{"edgeType":"calls","source":531,"target":533},{"edgeType":"contains","source":1,"target":1902},{"edgeType":"contains","source":2662,"target":539},{"edgeType":"calls","source":877,"target":376},{"edgeType":"imports","source":1523,"target":183},{"edgeType":"calls","source":2005,"target":819},{"edgeType":"contains","source":1348,"target":1371},{"edgeType":"calls","source":788,"target":493},{"edgeType":"contains","source":2151,"target":2376},{"edgeType":"contains","source":3041,"target":3061},{"edgeType":"contains","source":19,"target":22},{"edgeType":"contains","source":4,"target":487},{"edgeType":"contains","source":487,"target":576},{"edgeType":"imports","source":1126,"target":1128},{"edgeType":"calls","source":589,"target":584},{"edgeType":"contains","source":2191,"target":2193},{"edgeType":"imports","source":2852,"target":208},{"edgeType":"contains","source":1902,"target":1903},{"edgeType":"contains","source":1935,"target":1937},{"edgeType":"contains","source":1211,"target":1919},{"edgeType":"contains","source":1744,"target":1780},{"edgeType":"imports","source":1631,"target":1062},{"edgeType":"calls","source":1960,"target":1953},{"edgeType":"contains","source":305,"target":408},{"edgeType":"contains","source":1348,"target":1368},{"edgeType":"calls","source":1939,"target":754},{"edgeType":"contains","source":1631,"target":1844},{"edgeType":"contains","source":2962,"target":486},{"edgeType":"contains","source":550,"target":740},{"edgeType":"contains","source":1268,"target":1470},{"edgeType":"contains","source":50,"target":121},{"edgeType":"imports","source":840,"target":213},{"edgeType":"contains","source":1744,"target":1777},{"edgeType":"contains","source":1430,"target":1469},{"edgeType":"contains","source":1913,"target":2087},{"edgeType":"calls","source":2048,"target":1074},{"edgeType":"calls","source":1341,"target":1322},{"edgeType":"calls","source":589,"target":587},{"edgeType":"contains","source":10,"target":1625},{"edgeType":"contains","source":1211,"target":1916},{"edgeType":"contains","source":1,"target":1904},{"edgeType":"contains","source":2767,"target":2040},{"edgeType":"calls","source":1809,"target":1612},{"edgeType":"contains","source":305,"target":407},{"edgeType":"contains","source":1348,"target":1369},{"edgeType":"contains","source":3000,"target":3009},{"edgeType":"calls","source":1385,"target":486},{"edgeType":"contains","source":550,"target":2061},{"edgeType":"calls","source":1563,"target":1574},{"edgeType":"contains","source":1844,"target":1849},{"edgeType":"calls","source":2392,"target":2300},{"edgeType":"calls","source":2259,"target":2195},{"edgeType":"calls","source":75,"target":136},{"edgeType":"contains","source":1744,"target":1778},{"edgeType":"contains","source":1913,"target":2088},{"edgeType":"calls","source":2031,"target":543},{"edgeType":"calls","source":1201,"target":376},{"edgeType":"calls","source":2735,"target":2766},{"edgeType":"calls","source":1241,"target":1245},{"edgeType":"calls","source":796,"target":770},{"edgeType":"contains","source":1348,"target":1374},{"edgeType":"contains","source":2860,"target":2862},{"edgeType":"contains","source":1929,"target":540},{"edgeType":"contains","source":2967,"target":2980},{"edgeType":"imports","source":14,"target":184},{"edgeType":"calls","source":2669,"target":52},{"edgeType":"calls","source":486,"target":602},{"edgeType":"calls","source":1728,"target":418},{"edgeType":"imports","source":910,"target":684},{"edgeType":"contains","source":300,"target":301},{"edgeType":"contains","source":460,"target":1683},{"edgeType":"calls","source":937,"target":891},{"edgeType":"calls","source":2002,"target":116},{"edgeType":"calls","source":374,"target":375},{"edgeType":"calls","source":280,"target":118},{"edgeType":"calls","source":2293,"target":2195},{"edgeType":"contains","source":2151,"target":2379},{"edgeType":"calls","source":118,"target":119},{"edgeType":"contains","source":2886,"target":470},{"edgeType":"calls","source":1937,"target":545},{"edgeType":"contains","source":1,"target":585},{"edgeType":"calls","source":75,"target":131},{"edgeType":"contains","source":460,"target":1154},{"edgeType":"contains","source":460,"target":2211},{"edgeType":"contains","source":2717,"target":1745},{"edgeType":"contains","source":1929,"target":541},{"edgeType":"calls","source":1842,"target":1640},{"edgeType":"contains","source":305,"target":409},{"edgeType":"contains","source":1348,"target":1375},{"edgeType":"imports","source":1682,"target":1856},{"edgeType":"calls","source":1563,"target":1568},{"edgeType":"calls","source":486,"target":601},{"edgeType":"calls","source":2590,"target":2502},{"edgeType":"contains","source":3,"target":257},{"edgeType":"imports","source":910,"target":421},{"edgeType":"calls","source":419,"target":36},{"edgeType":"contains","source":1935,"target":1941},{"edgeType":"contains","source":2151,"target":2380},{"edgeType":"contains","source":50,"target":122},{"edgeType":"imports","source":1682,"target":1857},{"edgeType":"imports","source":3050,"target":1201},{"edgeType":"imports","source":2636,"target":2672},{"edgeType":"imports","source":2704,"target":564},{"edgeType":"imports","source":910,"target":418},{"edgeType":"calls","source":1858,"target":90},{"edgeType":"calls","source":1175,"target":1179},{"edgeType":"calls","source":1208,"target":1213},{"edgeType":"imports","source":2767,"target":460},{"edgeType":"contains","source":550,"target":2066},{"edgeType":"calls","source":1463,"target":1500},{"edgeType":"contains","source":1348,"target":1372},{"edgeType":"calls","source":1396,"target":1463},{"edgeType":"contains","source":550,"target":745},{"edgeType":"contains","source":487,"target":584},{"edgeType":"calls","source":1414,"target":641},{"edgeType":"contains","source":10,"target":44},{"edgeType":"contains","source":1268,"target":1474},{"edgeType":"contains","source":3038,"target":3155},{"edgeType":"contains","source":460,"target":1156},{"edgeType":"contains","source":470,"target":846},{"edgeType":"contains","source":2151,"target":2377},{"edgeType":"calls","source":2031,"target":540},{"edgeType":"contains","source":1430,"target":1473},{"edgeType":"imports","source":1682,"target":270},{"edgeType":"contains","source":6,"target":432},{"edgeType":"calls","source":788,"target":756},{"edgeType":"calls","source":2771,"target":1909},{"edgeType":"contains","source":1211,"target":1920},{"edgeType":"calls","source":564,"target":565},{"edgeType":"contains","source":1744,"target":1781},{"edgeType":"contains","source":1844,"target":1852},{"edgeType":"imports","source":1211,"target":1128},{"edgeType":"calls","source":1333,"target":1301},{"edgeType":"contains","source":1224,"target":1253},{"edgeType":"calls","source":3124,"target":3123},{"edgeType":"contains","source":1348,"target":1373},{"edgeType":"imports","source":14,"target":183},{"edgeType":"contains","source":883,"target":991},{"edgeType":"contains","source":550,"target":1801},{"edgeType":"contains","source":10,"target":2949},{"edgeType":"contains","source":2761,"target":1966},{"edgeType":"contains","source":1268,"target":1475},{"edgeType":"contains","source":2151,"target":2378},{"edgeType":"contains","source":2158,"target":2161},{"edgeType":"contains","source":10,"target":43},{"edgeType":"contains","source":487,"target":582},{"edgeType":"calls","source":2159,"target":270},{"edgeType":"imports","source":1675,"target":486},{"edgeType":"calls","source":486,"target":604},{"edgeType":"calls","source":631,"target":601},{"edgeType":"contains","source":1211,"target":1921},{"edgeType":"contains","source":4,"target":2607},{"edgeType":"contains","source":6,"target":431},{"edgeType":"contains","source":1744,"target":1782},{"edgeType":"contains","source":1348,"target":1378},{"edgeType":"contains","source":844,"target":882},{"edgeType":"calls","source":1018,"target":1019},{"edgeType":"calls","source":1340,"target":1343},{"edgeType":"contains","source":2982,"target":3048},{"edgeType":"calls","source":752,"target":544},{"edgeType":"contains","source":666,"target":1115},{"edgeType":"contains","source":1268,"target":1480},{"edgeType":"calls","source":1918,"target":1923},{"edgeType":"contains","source":550,"target":1804},{"edgeType":"contains","source":2183,"target":2184},{"edgeType":"contains","source":2444,"target":170},{"edgeType":"contains","source":2717,"target":1749},{"edgeType":"calls","source":1167,"target":1156},{"edgeType":"imports","source":1682,"target":1870},{"edgeType":"contains","source":2661,"target":2692},{"edgeType":"contains","source":3000,"target":3018},{"edgeType":"calls","source":2735,"target":116},{"edgeType":"calls","source":378,"target":247},{"edgeType":"contains","source":6,"target":434},{"edgeType":"calls","source":812,"target":270},{"edgeType":"contains","source":305,"target":413},{"edgeType":"calls","source":1463,"target":1493},{"edgeType":"calls","source":1215,"target":196},{"edgeType":"contains","source":2982,"target":3049},{"edgeType":"contains","source":2918,"target":2919},{"edgeType":"calls","source":443,"target":80},{"edgeType":"contains","source":666,"target":1114},{"edgeType":"calls","source":1637,"target":327},{"edgeType":"contains","source":2662,"target":2663},{"edgeType":"contains","source":550,"target":1803},{"edgeType":"contains","source":550,"target":746},{"edgeType":"contains","source":3072,"target":523},{"edgeType":"contains","source":2717,"target":1750},{"edgeType":"contains","source":2880,"target":1718},{"edgeType":"contains","source":470,"target":847},{"edgeType":"calls","source":1226,"target":383},{"edgeType":"contains","source":2183,"target":2185},{"edgeType":"imports","source":2935,"target":1079},{"edgeType":"calls","source":1504,"target":486},{"edgeType":"contains","source":3038,"target":3162},{"edgeType":"contains","source":19,"target":30},{"edgeType":"imports","source":14,"target":198},{"edgeType":"contains","source":1224,"target":1260},{"edgeType":"contains","source":1382,"target":1383},{"edgeType":"calls","source":539,"target":540},{"edgeType":"contains","source":1381,"target":1410},{"edgeType":"calls","source":75,"target":128},{"edgeType":"calls","source":1463,"target":1496},{"edgeType":"contains","source":1348,"target":1376},{"edgeType":"calls","source":1867,"target":1128},{"edgeType":"calls","source":1311,"target":1187},{"edgeType":"calls","source":1753,"target":1755},{"edgeType":"contains","source":205,"target":81},{"edgeType":"calls","source":2513,"target":1182},{"edgeType":"contains","source":3000,"target":3016},{"edgeType":"contains","source":460,"target":1160},{"edgeType":"calls","source":539,"target":543},{"edgeType":"imports","source":2636,"target":305},{"edgeType":"contains","source":3038,"target":3159},{"edgeType":"contains","source":50,"target":129},{"edgeType":"contains","source":3015,"target":174},{"edgeType":"contains","source":1348,"target":1377},{"edgeType":"calls","source":3136,"target":1161},{"edgeType":"contains","source":1929,"target":543},{"edgeType":"contains","source":910,"target":687},{"edgeType":"contains","source":460,"target":1688},{"edgeType":"calls","source":1241,"target":1242},{"edgeType":"contains","source":1430,"target":1478},{"edgeType":"calls","source":1140,"target":938},{"edgeType":"contains","source":487,"target":587},{"edgeType":"contains","source":10,"target":2954},{"edgeType":"calls","source":2690,"target":1246},{"edgeType":"contains","source":3000,"target":3017},{"edgeType":"calls","source":1324,"target":1311},{"edgeType":"contains","source":2151,"target":2382},{"edgeType":"imports","source":3015,"target":2825},{"edgeType":"imports","source":1929,"target":24},{"edgeType":"calls","source":2481,"target":2440},{"edgeType":"contains","source":1704,"target":1705},{"edgeType":"contains","source":1481,"target":1483},{"edgeType":"contains","source":3038,"target":3160},{"edgeType":"contains","source":51,"target":97},{"edgeType":"contains","source":6,"target":435},{"edgeType":"imports","source":3072,"target":2643},{"edgeType":"contains","source":3108,"target":3110},{"edgeType":"contains","source":484,"target":1740},{"edgeType":"contains","source":2982,"target":3052},{"edgeType":"calls","source":1310,"target":1212},{"edgeType":"imports","source":2886,"target":481},{"edgeType":"contains","source":3038,"target":3166},{"edgeType":"calls","source":991,"target":266},{"edgeType":"contains","source":994,"target":200},{"edgeType":"calls","source":1051,"target":1049},{"edgeType":"imports","source":3072,"target":529},{"edgeType":"contains","source":1268,"target":1484},{"edgeType":"contains","source":14,"target":454},{"edgeType":"contains","source":548,"target":549},{"edgeType":"contains","source":2340,"target":2342},{"edgeType":"imports","source":45,"target":24},{"edgeType":"calls","source":853,"target":845},{"edgeType":"contains","source":3000,"target":3022},{"edgeType":"contains","source":300,"target":2951},{"edgeType":"calls","source":514,"target":519},{"edgeType":"contains","source":1224,"target":1263},{"edgeType":"contains","source":1935,"target":1948},{"edgeType":"contains","source":1381,"target":1417},{"edgeType":"contains","source":2578,"target":2627},{"edgeType":"calls","source":791,"target":124},{"edgeType":"calls","source":938,"target":588},{"edgeType":"calls","source":2053,"target":376},{"edgeType":"contains","source":2767,"target":2053},{"edgeType":"contains","source":1093,"target":1094},{"edgeType":"calls","source":2381,"target":2366},{"edgeType":"contains","source":550,"target":750},{"edgeType":"contains","source":2935,"target":1339},{"edgeType":"contains","source":460,"target":1690},{"edgeType":"contains","source":1662,"target":1691},{"edgeType":"contains","source":3038,"target":3167},{"edgeType":"imports","source":1929,"target":1078},{"edgeType":"contains","source":2607,"target":1201},{"edgeType":"calls","source":1800,"target":1612},{"edgeType":"calls","source":1852,"target":1850},{"edgeType":"contains","source":487,"target":589},{"edgeType":"calls","source":2017,"target":2020},{"edgeType":"contains","source":2018,"target":2019},{"edgeType":"imports","source":263,"target":402},{"edgeType":"contains","source":3000,"target":3023},{"edgeType":"calls","source":2208,"target":2180},{"edgeType":"contains","source":1929,"target":1871},{"edgeType":"contains","source":460,"target":2218},{"edgeType":"contains","source":2717,"target":1754},{"edgeType":"contains","source":1481,"target":1489},{"edgeType":"contains","source":1224,"target":1264},{"edgeType":"contains","source":1381,"target":1418},{"edgeType":"calls","source":2199,"target":2195},{"edgeType":"calls","source":2769,"target":116},{"edgeType":"contains","source":1935,"target":1949},{"edgeType":"contains","source":2151,"target":2388},{"edgeType":"contains","source":994,"target":995},{"edgeType":"contains","source":487,"target":592},{"edgeType":"contains","source":50,"target":133},{"edgeType":"imports","source":1477,"target":819},{"edgeType":"calls","source":514,"target":521},{"edgeType":"contains","source":14,"target":456},{"edgeType":"contains","source":460,"target":2222},{"edgeType":"contains","source":2661,"target":2695},{"edgeType":"contains","source":4,"target":2880},{"edgeType":"contains","source":3,"target":5},{"edgeType":"contains","source":2880,"target":1719},{"edgeType":"calls","source":758,"target":621},{"edgeType":"contains","source":2717,"target":1751},{"edgeType":"contains","source":134,"target":172},{"edgeType":"contains","source":2967,"target":2986},{"edgeType":"contains","source":1861,"target":1862},{"edgeType":"calls","source":464,"target":486},{"edgeType":"imports","source":2886,"target":743},{"edgeType":"contains","source":1224,"target":1261},{"edgeType":"calls","source":1198,"target":986},{"edgeType":"contains","source":1605,"target":1606},{"edgeType":"imports","source":1477,"target":820},{"edgeType":"contains","source":2742,"target":2826},{"edgeType":"calls","source":545,"target":353},{"edgeType":"contains","source":1382,"target":1384},{"edgeType":"imports","source":1682,"target":807},{"edgeType":"contains","source":550,"target":752},{"edgeType":"calls","source":486,"target":595},{"edgeType":"contains","source":2982,"target":3051},{"edgeType":"contains","source":1093,"target":1096},{"edgeType":"contains","source":3108,"target":3109},{"edgeType":"contains","source":487,"target":591},{"edgeType":"calls","source":2327,"target":2188},{"edgeType":"contains","source":2852,"target":2853},{"edgeType":"calls","source":1051,"target":1050},{"edgeType":"contains","source":14,"target":455},{"edgeType":"contains","source":460,"target":2221},{"edgeType":"contains","source":2717,"target":1752},{"edgeType":"imports","source":2858,"target":24},{"edgeType":"calls","source":3033,"target":3029},{"edgeType":"imports","source":1569,"target":81},{"edgeType":"contains","source":460,"target":1692},{"edgeType":"calls","source":931,"target":806},{"edgeType":"calls","source":2686,"target":2687},{"edgeType":"contains","source":2742,"target":2827},{"edgeType":"contains","source":1381,"target":1416},{"edgeType":"contains","source":1211,"target":1401},{"edgeType":"contains","source":2880,"target":1720}],"nodes":[{"groupName":"","id":1,"nodeType":"Repo","repoName":"pydata#xarray#863e49066ca4d61c9adfe62aca3bf21b90e1af8c"},{"id":2,"name":"xarray/backends","nodeType":"Package"},{"id":3,"name":"","nodeType":"Package"},{"id":4,"name":"xarray/core","nodeType":"Package"},{"id":5,"name":"azure-pipelines.yml","nodeType":"TextFile","path":"","text":"variables:\n pytest_extra_flags: ''\n allow_failure: false\n upstream_dev: false\n\njobs:\n\n- job: Linux\n strategy:\n matrix:\n py36-bare-minimum:\n conda_env: py36-bare-minimum\n py36-min-all-deps:\n conda_env: py36-min-all-deps\n py36-min-nep18:\n conda_env: py36-min-nep18\n py36:\n conda_env: py36\n py37:\n conda_env: py37\n py37-upstream-dev:\n conda_env: py37\n upstream_dev: true\n py36-flaky:\n conda_env: py36\n pytest_extra_flags: --run-flaky --run-network-tests\n allow_failure: true\n pool:\n vmImage: 'ubuntu-16.04'\n steps:\n - template: ci/azure/unit-tests.yml\n\n- job: MacOSX\n strategy:\n matrix:\n py36:\n conda_env: py36\n pool:\n vmImage: 'macOS-10.13'\n steps:\n - template: ci/azure/unit-tests.yml\n\n- job: Windows\n strategy:\n matrix:\n py37:\n conda_env: py37-windows\n pool:\n vmImage: 'vs2017-win2016'\n steps:\n - template: ci/azure/unit-tests.yml\n\n- job: LintFlake8\n pool:\n vmImage: 'ubuntu-16.04'\n steps:\n - task: UsePythonVersion@0\n - bash: python -m pip install flake8\n displayName: Install flake8\n - bash: flake8\n displayName: flake8 lint checks\n\n- job: FormattingBlack\n pool:\n vmImage: 'ubuntu-16.04'\n steps:\n - task: UsePythonVersion@0\n - bash: python -m pip install black\n displayName: Install black\n - bash: black --check .\n displayName: black formatting check\n\n- job: TypeChecking\n variables:\n conda_env: py37\n pool:\n vmImage: 'ubuntu-16.04'\n steps:\n - template: ci/azure/install.yml\n - bash: |\n source activate xarray-tests\n mypy .\n displayName: mypy type checks\n\n- job: MinimumVersionsPolicy\n pool:\n vmImage: 'ubuntu-16.04'\n steps:\n - template: ci/azure/add-conda-to-path.yml\n - bash: |\n conda install -y pyyaml\n python ci/min_deps_check.py ci/requirements/py36-bare-minimum.yml\n python ci/min_deps_check.py ci/requirements/py36-min-all-deps.yml\n displayName: minimum versions policy\n\n- job: Docs\n pool:\n vmImage: 'ubuntu-16.04'\n steps:\n - template: ci/azure/install.yml\n parameters:\n env_file: ci/requirements/doc.yml\n - bash: |\n source activate xarray-tests\n # Replicate the exact environment created by the readthedocs CI\n conda install --yes --quiet -c pkgs/main mock pillow sphinx sphinx_rtd_theme\n displayName: Replicate readthedocs CI environment\n - bash: |\n source activate xarray-tests\n cd doc\n sphinx-build -n -j auto -b html -d _build/doctrees . _build/html\n displayName: Build HTML docs\n"},{"fileName":"pynio_.py","filePath":"xarray/backends","id":6,"nodeType":"File","text":"import numpy as np\n\nfrom .. import Variable\nfrom ..core import indexing\nfrom ..core.utils import Frozen, FrozenDict\nfrom .common import AbstractDataStore, BackendArray\nfrom .file_manager import CachingFileManager\nfrom .locks import HDF5_LOCK, NETCDFC_LOCK, SerializableLock, combine_locks, ensure_lock\n\n# PyNIO can invoke netCDF libraries internally\n# Add a dedicated lock just in case NCL as well isn't thread-safe.\nNCL_LOCK = SerializableLock()\nPYNIO_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK, NCL_LOCK])\n\n\nclass NioArrayWrapper(BackendArray):\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_array()\n self.shape = array.shape\n self.dtype = np.dtype(array.typecode())\n\n def get_array(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.BASIC, self._getitem\n )\n\n def _getitem(self, key):\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n\n if key == () and self.ndim == 0:\n return array.get_value()\n\n return array[key]\n\n\nclass NioDataStore(AbstractDataStore):\n \"\"\"Store for accessing datasets via PyNIO\n \"\"\"\n\n def __init__(self, filename, mode=\"r\", lock=None, **kwargs):\n import Nio\n\n if lock is None:\n lock = PYNIO_LOCK\n self.lock = ensure_lock(lock)\n self._manager = CachingFileManager(\n Nio.open_file, filename, lock=lock, mode=mode, kwargs=kwargs\n )\n # xarray provides its own support for FillValue,\n # so turn off PyNIO's support for the same.\n self.ds.set_option(\"MaskedArrayMode\", \"MaskedNever\")\n\n @property\n def ds(self):\n return self._manager.acquire()\n\n def open_store_variable(self, name, var):\n data = indexing.LazilyOuterIndexedArray(NioArrayWrapper(name, self))\n return Variable(var.dimensions, data, var.attributes)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen(self.ds.attributes)\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n return {\n \"unlimited_dims\": {k for k in self.ds.dimensions if self.ds.unlimited(k)}\n }\n\n def close(self):\n self._manager.close()\n"},{"fileName":"merge.py","filePath":"xarray/core","id":7,"nodeType":"File","text":"from typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Dict,\n Hashable,\n Iterable,\n List,\n Mapping,\n NamedTuple,\n Optional,\n Sequence,\n Set,\n Tuple,\n Union,\n)\n\nimport pandas as pd\n\nfrom . import dtypes, pdcompat\nfrom .alignment import deep_align\nfrom .utils import Frozen, dict_equiv\nfrom .variable import Variable, as_variable, assert_unique_multiindex_level_names\n\nif TYPE_CHECKING:\n from .coordinates import Coordinates\n from .dataarray import DataArray\n from .dataset import Dataset\n\n DimsLike = Union[Hashable, Sequence[Hashable]]\n ArrayLike = Any\n VariableLike = Union[\n ArrayLike,\n Tuple[DimsLike, ArrayLike],\n Tuple[DimsLike, ArrayLike, Mapping],\n Tuple[DimsLike, ArrayLike, Mapping, Mapping],\n ]\n XarrayValue = Union[DataArray, Variable, VariableLike]\n DatasetLike = Union[Dataset, Mapping[Hashable, XarrayValue]]\n CoercibleValue = Union[XarrayValue, pd.Series, pd.DataFrame]\n CoercibleMapping = Union[Dataset, Mapping[Hashable, CoercibleValue]]\n\n\nPANDAS_TYPES = (pd.Series, pd.DataFrame, pdcompat.Panel)\n\n_VALID_COMPAT = Frozen(\n {\n \"identical\": 0,\n \"equals\": 1,\n \"broadcast_equals\": 2,\n \"minimal\": 3,\n \"no_conflicts\": 4,\n \"override\": 5,\n }\n)\n\n\ndef broadcast_dimension_size(variables: List[Variable],) -> Dict[Hashable, int]:\n \"\"\"Extract dimension sizes from a dictionary of variables.\n\n Raises ValueError if any dimensions have different sizes.\n \"\"\"\n dims: Dict[Hashable, int] = {}\n for var in variables:\n for dim, size in zip(var.dims, var.shape):\n if dim in dims and size != dims[dim]:\n raise ValueError(\"index %r not aligned\" % dim)\n dims[dim] = size\n return dims\n\n\nclass MergeError(ValueError):\n \"\"\"Error class for merge failures due to incompatible arguments.\n \"\"\"\n\n # inherits from ValueError for backward compatibility\n # TODO: move this to an xarray.exceptions module?\n\n\ndef unique_variable(\n name: Hashable,\n variables: List[Variable],\n compat: str = \"broadcast_equals\",\n equals: bool = None,\n) -> Variable:\n \"\"\"Return the unique variable from a list of variables or raise MergeError.\n\n Parameters\n ----------\n name : hashable\n Name for this variable.\n variables : list of xarray.Variable\n List of Variable objects, all of which go by the same name in different\n inputs.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n Type of equality check to use.\n equals: None or bool,\n corresponding to result of compat test\n\n Returns\n -------\n Variable to use in the result.\n\n Raises\n ------\n MergeError: if any of the variables are not equal.\n \"\"\"\n out = variables[0]\n\n if len(variables) == 1 or compat == \"override\":\n return out\n\n combine_method = None\n\n if compat == \"minimal\":\n compat = \"broadcast_equals\"\n\n if compat == \"broadcast_equals\":\n dim_lengths = broadcast_dimension_size(variables)\n out = out.set_dims(dim_lengths)\n\n if compat == \"no_conflicts\":\n combine_method = \"fillna\"\n\n if equals is None:\n out = out.compute()\n for var in variables[1:]:\n equals = getattr(out, compat)(var)\n if not equals:\n break\n\n if not equals:\n raise MergeError(\n \"conflicting values for variable {!r} on objects to be combined. \"\n \"You can skip this check by specifying compat='override'.\".format(name)\n )\n\n if combine_method:\n for var in variables[1:]:\n out = getattr(out, combine_method)(var)\n\n return out\n\n\ndef _assert_compat_valid(compat):\n if compat not in _VALID_COMPAT:\n raise ValueError(\"compat=%r invalid: must be %s\" % (compat, set(_VALID_COMPAT)))\n\n\nMergeElement = Tuple[Variable, Optional[pd.Index]]\n\n\ndef merge_collected(\n grouped: Dict[Hashable, List[MergeElement]],\n prioritized: Mapping[Hashable, MergeElement] = None,\n compat: str = \"minimal\",\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Merge dicts of variables, while resolving conflicts appropriately.\n\n Parameters\n ----------\n\n Type of equality check to use when checking for conflicts.\n\n Returns\n -------\n Dict with keys taken by the union of keys on list_of_mappings,\n and Variable values corresponding to those that should be found on the\n merged result.\n \"\"\"\n if prioritized is None:\n prioritized = {}\n\n _assert_compat_valid(compat)\n\n merged_vars: Dict[Hashable, Variable] = {}\n merged_indexes: Dict[Hashable, pd.Index] = {}\n\n for name, elements_list in grouped.items():\n if name in prioritized:\n variable, index = prioritized[name]\n merged_vars[name] = variable\n if index is not None:\n merged_indexes[name] = index\n else:\n indexed_elements = [\n (variable, index)\n for variable, index in elements_list\n if index is not None\n ]\n\n if indexed_elements:\n # TODO(shoyer): consider adjusting this logic. Are we really\n # OK throwing away variable without an index in favor of\n # indexed variables, without even checking if values match?\n variable, index = indexed_elements[0]\n for _, other_index in indexed_elements[1:]:\n if not index.equals(other_index):\n raise MergeError(\n \"conflicting values for index %r on objects to be \"\n \"combined:\\nfirst value: %r\\nsecond value: %r\"\n % (name, index, other_index)\n )\n if compat == \"identical\":\n for other_variable, _ in indexed_elements[1:]:\n if not dict_equiv(variable.attrs, other_variable.attrs):\n raise MergeError(\n \"conflicting attribute values on combined \"\n \"variable %r:\\nfirst value: %r\\nsecond value: %r\"\n % (name, variable.attrs, other_variable.attrs)\n )\n merged_vars[name] = variable\n merged_indexes[name] = index\n else:\n variables = [variable for variable, _ in elements_list]\n try:\n merged_vars[name] = unique_variable(name, variables, compat)\n except MergeError:\n if compat != \"minimal\":\n # we need more than \"minimal\" compatibility (for which\n # we drop conflicting coordinates)\n raise\n\n return merged_vars, merged_indexes\n\n\ndef collect_variables_and_indexes(\n list_of_mappings: \"List[DatasetLike]\",\n) -> Dict[Hashable, List[MergeElement]]:\n \"\"\"Collect variables and indexes from list of mappings of xarray objects.\n\n Mappings must either be Dataset objects, or have values of one of the\n following types:\n - an xarray.Variable\n - a tuple `(dims, data[, attrs[, encoding]])` that can be converted in\n an xarray.Variable\n - or an xarray.DataArray\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n grouped: Dict[Hashable, List[Tuple[Variable, pd.Index]]] = {}\n\n def append(name, variable, index):\n values = grouped.setdefault(name, [])\n values.append((variable, index))\n\n def append_all(variables, indexes):\n for name, variable in variables.items():\n append(name, variable, indexes.get(name))\n\n for mapping in list_of_mappings:\n if isinstance(mapping, Dataset):\n append_all(mapping.variables, mapping.indexes)\n continue\n\n for name, variable in mapping.items():\n if isinstance(variable, DataArray):\n coords = variable._coords.copy() # use private API for speed\n indexes = dict(variable.indexes)\n # explicitly overwritten variables should take precedence\n coords.pop(name, None)\n indexes.pop(name, None)\n append_all(coords, indexes)\n\n variable = as_variable(variable, name=name)\n if variable.dims == (name,):\n variable = variable.to_index_variable()\n index = variable.to_index()\n else:\n index = None\n append(name, variable, index)\n\n return grouped\n\n\ndef collect_from_coordinates(\n list_of_coords: \"List[Coordinates]\"\n) -> Dict[Hashable, List[MergeElement]]:\n \"\"\"Collect variables and indexes to be merged from Coordinate objects.\"\"\"\n grouped: Dict[Hashable, List[Tuple[Variable, pd.Index]]] = {}\n\n for coords in list_of_coords:\n variables = coords.variables\n indexes = coords.indexes\n for name, variable in variables.items():\n value = grouped.setdefault(name, [])\n value.append((variable, indexes.get(name)))\n return grouped\n\n\ndef merge_coordinates_without_align(\n objects: \"List[Coordinates]\",\n prioritized: Mapping[Hashable, MergeElement] = None,\n exclude_dims: AbstractSet = frozenset(),\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Merge variables/indexes from coordinates without automatic alignments.\n\n This function is used for merging coordinate from pre-existing xarray\n objects.\n \"\"\"\n collected = collect_from_coordinates(objects)\n\n if exclude_dims:\n filtered: Dict[Hashable, List[MergeElement]] = {}\n for name, elements in collected.items():\n new_elements = [\n (variable, index)\n for variable, index in elements\n if exclude_dims.isdisjoint(variable.dims)\n ]\n if new_elements:\n filtered[name] = new_elements\n else:\n filtered = collected\n\n return merge_collected(filtered, prioritized)\n\n\ndef determine_coords(\n list_of_mappings: Iterable[\"DatasetLike\"]\n) -> Tuple[Set[Hashable], Set[Hashable]]:\n \"\"\"Given a list of dicts with xarray object values, identify coordinates.\n\n Parameters\n ----------\n list_of_mappings : list of dict or Dataset objects\n Of the same form as the arguments to expand_variable_dicts.\n\n Returns\n -------\n coord_names : set of variable names\n noncoord_names : set of variable names\n All variable found in the input should appear in either the set of\n coordinate or non-coordinate names.\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n coord_names: Set[Hashable] = set()\n noncoord_names: Set[Hashable] = set()\n\n for mapping in list_of_mappings:\n if isinstance(mapping, Dataset):\n coord_names.update(mapping.coords)\n noncoord_names.update(mapping.data_vars)\n else:\n for name, var in mapping.items():\n if isinstance(var, DataArray):\n coords = set(var._coords) # use private API for speed\n # explicitly overwritten variables should take precedence\n coords.discard(name)\n coord_names.update(coords)\n\n return coord_names, noncoord_names\n\n\ndef coerce_pandas_values(objects: Iterable[\"CoercibleMapping\"]) -> List[\"DatasetLike\"]:\n \"\"\"Convert pandas values found in a list of labeled objects.\n\n Parameters\n ----------\n objects : list of Dataset or mappings\n The mappings may contain any sort of objects coercible to\n xarray.Variables as keys, including pandas objects.\n\n Returns\n -------\n List of Dataset or dictionary objects. Any inputs or values in the inputs\n that were pandas objects have been converted into native xarray objects.\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n out = []\n for obj in objects:\n if isinstance(obj, Dataset):\n variables: \"DatasetLike\" = obj\n else:\n variables = {}\n if isinstance(obj, PANDAS_TYPES):\n obj = dict(obj.iteritems())\n for k, v in obj.items():\n if isinstance(v, PANDAS_TYPES):\n v = DataArray(v)\n variables[k] = v\n out.append(variables)\n return out\n\n\ndef _get_priority_vars_and_indexes(\n objects: List[\"DatasetLike\"], priority_arg: Optional[int], compat: str = \"equals\"\n) -> Dict[Hashable, MergeElement]:\n \"\"\"Extract the priority variable from a list of mappings.\n\n We need this method because in some cases the priority argument itself\n might have conflicting values (e.g., if it is a dict with two DataArray\n values with conflicting coordinate values).\n\n Parameters\n ----------\n objects : list of dictionaries of variables\n Dictionaries in which to find the priority variables.\n priority_arg : int or None\n Integer object whose variable should take priority.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional\n Compatibility checks to use when merging variables.\n\n Returns\n -------\n A dictionary of variables and associated indexes (if any) to prioritize.\n \"\"\"\n if priority_arg is None:\n return {}\n\n collected = collect_variables_and_indexes([objects[priority_arg]])\n variables, indexes = merge_collected(collected, compat=compat)\n grouped: Dict[Hashable, MergeElement] = {}\n for name, variable in variables.items():\n grouped[name] = (variable, indexes.get(name))\n return grouped\n\n\ndef merge_coords(\n objects: Iterable[\"CoercibleMapping\"],\n compat: str = \"minimal\",\n join: str = \"outer\",\n priority_arg: Optional[int] = None,\n indexes: Optional[Mapping[Hashable, pd.Index]] = None,\n fill_value: object = dtypes.NA,\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Merge coordinate variables.\n\n See merge_core below for argument descriptions. This works similarly to\n merge_core, except everything we don't worry about whether variables are\n coordinates or not.\n \"\"\"\n _assert_compat_valid(compat)\n coerced = coerce_pandas_values(objects)\n aligned = deep_align(\n coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value\n )\n collected = collect_variables_and_indexes(aligned)\n prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)\n variables, out_indexes = merge_collected(collected, prioritized, compat=compat)\n assert_unique_multiindex_level_names(variables)\n return variables, out_indexes\n\n\ndef merge_data_and_coords(data, coords, compat=\"broadcast_equals\", join=\"outer\"):\n \"\"\"Used in Dataset.__init__.\"\"\"\n objects = [data, coords]\n explicit_coords = coords.keys()\n indexes = dict(_extract_indexes_from_coords(coords))\n return merge_core(\n objects, compat, join, explicit_coords=explicit_coords, indexes=indexes\n )\n\n\ndef _extract_indexes_from_coords(coords):\n \"\"\"Yields the name & index of valid indexes from a mapping of coords\"\"\"\n for name, variable in coords.items():\n variable = as_variable(variable, name=name)\n if variable.dims == (name,):\n yield name, variable.to_index()\n\n\ndef assert_valid_explicit_coords(variables, dims, explicit_coords):\n \"\"\"Validate explicit coordinate names/dims.\n\n Raise a MergeError if an explicit coord shares a name with a dimension\n but is comprised of arbitrary dimensions.\n \"\"\"\n for coord_name in explicit_coords:\n if coord_name in dims and variables[coord_name].dims != (coord_name,):\n raise MergeError(\n \"coordinate %s shares a name with a dataset dimension, but is \"\n \"not a 1D variable along that dimension. This is disallowed \"\n \"by the xarray data model.\" % coord_name\n )\n\n\nclass _MergeResult(NamedTuple):\n variables: Dict[Hashable, Variable]\n coord_names: Set[Hashable]\n dims: Dict[Hashable, int]\n indexes: Dict[Hashable, pd.Index]\n\n\ndef merge_core(\n objects: Iterable[\"CoercibleMapping\"],\n compat: str = \"broadcast_equals\",\n join: str = \"outer\",\n priority_arg: Optional[int] = None,\n explicit_coords: Optional[Sequence] = None,\n indexes: Optional[Mapping[Hashable, pd.Index]] = None,\n fill_value: object = dtypes.NA,\n) -> _MergeResult:\n \"\"\"Core logic for merging labeled objects.\n\n This is not public API.\n\n Parameters\n ----------\n objects : list of mappings\n All values must be convertable to labeled arrays.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n Compatibility checks to use when merging variables.\n join : {'outer', 'inner', 'left', 'right'}, optional\n How to combine objects with different indexes.\n priority_arg : integer, optional\n Optional argument in `objects` that takes precedence over the others.\n explicit_coords : set, optional\n An explicit list of variables from `objects` that are coordinates.\n indexes : dict, optional\n Dictionary with values given by pandas.Index objects.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n variables : dict\n Dictionary of Variable objects.\n coord_names : set\n Set of coordinate names.\n dims : dict\n Dictionary mapping from dimension names to sizes.\n\n Raises\n ------\n MergeError if the merge cannot be done successfully.\n \"\"\"\n from .dataset import calculate_dimensions\n\n _assert_compat_valid(compat)\n\n coerced = coerce_pandas_values(objects)\n aligned = deep_align(\n coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value\n )\n collected = collect_variables_and_indexes(aligned)\n\n prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)\n variables, out_indexes = merge_collected(collected, prioritized, compat=compat)\n assert_unique_multiindex_level_names(variables)\n\n dims = calculate_dimensions(variables)\n\n coord_names, noncoord_names = determine_coords(coerced)\n if explicit_coords is not None:\n assert_valid_explicit_coords(variables, dims, explicit_coords)\n coord_names.update(explicit_coords)\n for dim, size in dims.items():\n if dim in variables:\n coord_names.add(dim)\n ambiguous_coords = coord_names.intersection(noncoord_names)\n if ambiguous_coords:\n raise MergeError(\n \"unable to determine if these variables should be \"\n \"coordinates or not in the merged result: %s\" % ambiguous_coords\n )\n\n return _MergeResult(variables, coord_names, dims, out_indexes)\n\n\ndef merge(\n objects: Iterable[Union[\"DataArray\", \"CoercibleMapping\"]],\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: object = dtypes.NA,\n) -> \"Dataset\":\n \"\"\"Merge any number of xarray objects into a single Dataset as variables.\n\n Parameters\n ----------\n objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]]\n Merge together all variables from these objects. If any of them are\n DataArray objects, they must have a name.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes in objects.\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n Dataset\n Dataset with combined variables from each object.\n\n Examples\n --------\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... [[1.0, 2.0], [3.0, 5.0]],\n ... dims=(\"lat\", \"lon\"),\n ... coords={\"lat\": [35.0, 40.0], \"lon\": [100.0, 120.0]},\n ... name=\"var1\",\n ... )\n >>> y = xr.DataArray(\n ... [[5.0, 6.0], [7.0, 8.0]],\n ... dims=(\"lat\", \"lon\"),\n ... coords={\"lat\": [35.0, 42.0], \"lon\": [100.0, 150.0]},\n ... name=\"var2\",\n ... )\n >>> z = xr.DataArray(\n ... [[0.0, 3.0], [4.0, 9.0]],\n ... dims=(\"time\", \"lon\"),\n ... coords={\"time\": [30.0, 60.0], \"lon\": [100.0, 150.0]},\n ... name=\"var3\",\n ... )\n\n >>> x\n \n array([[1., 2.],\n [3., 5.]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> y\n \n array([[5., 6.],\n [7., 8.]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 150.0\n\n >>> z\n \n array([[0., 3.],\n [4., 9.]])\n Coordinates:\n * time (time) float64 30.0 60.0\n * lon (lon) float64 100.0 150.0\n\n >>> xr.merge([x, y, z])\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='identical')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='equals')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='equals', fill_value=-999.)\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0\n var2 (lat, lon) float64 5.0 -999.0 6.0 -999.0 ... -999.0 7.0 -999.0 8.0\n var3 (time, lon) float64 0.0 -999.0 3.0 4.0 -999.0 9.0\n\n >>> xr.merge([x, y, z], join='override')\n \n Dimensions: (lat: 2, lon: 2, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 3.0 5.0\n var2 (lat, lon) float64 5.0 6.0 7.0 8.0\n var3 (time, lon) float64 0.0 3.0 4.0 9.0\n\n >>> xr.merge([x, y, z], join='inner')\n \n Dimensions: (lat: 1, lon: 1, time: 2)\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0\n var2 (lat, lon) float64 5.0\n var3 (time, lon) float64 0.0 4.0\n\n >>> xr.merge([x, y, z], compat='identical', join='inner')\n \n Dimensions: (lat: 1, lon: 1, time: 2)\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0\n var2 (lat, lon) float64 5.0\n var3 (time, lon) float64 0.0 4.0\n\n >>> xr.merge([x, y, z], compat='broadcast_equals', join='outer')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], join='exact')\n Traceback (most recent call last):\n ...\n ValueError: indexes along dimension 'lat' are not equal\n\n Raises\n ------\n xarray.MergeError\n If any variables with the same name have conflicting values.\n\n See also\n --------\n concat\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n dict_like_objects = list()\n for obj in objects:\n if not isinstance(obj, (DataArray, Dataset, dict)):\n raise TypeError(\n \"objects must be an iterable containing only \"\n \"Dataset(s), DataArray(s), and dictionaries.\"\n )\n\n obj = obj.to_dataset() if isinstance(obj, DataArray) else obj\n dict_like_objects.append(obj)\n\n merge_result = merge_core(dict_like_objects, compat, join, fill_value=fill_value)\n merged = Dataset._construct_direct(**merge_result._asdict())\n return merged\n\n\ndef dataset_merge_method(\n dataset: \"Dataset\",\n other: \"CoercibleMapping\",\n overwrite_vars: Union[Hashable, Iterable[Hashable]],\n compat: str,\n join: str,\n fill_value: Any,\n) -> _MergeResult:\n \"\"\"Guts of the Dataset.merge method.\n \"\"\"\n # we are locked into supporting overwrite_vars for the Dataset.merge\n # method due for backwards compatibility\n # TODO: consider deprecating it?\n\n if isinstance(overwrite_vars, Iterable) and not isinstance(overwrite_vars, str):\n overwrite_vars = set(overwrite_vars)\n else:\n overwrite_vars = {overwrite_vars}\n\n if not overwrite_vars:\n objs = [dataset, other]\n priority_arg = None\n elif overwrite_vars == set(other):\n objs = [dataset, other]\n priority_arg = 1\n else:\n other_overwrite: Dict[Hashable, CoercibleValue] = {}\n other_no_overwrite: Dict[Hashable, CoercibleValue] = {}\n for k, v in other.items():\n if k in overwrite_vars:\n other_overwrite[k] = v\n else:\n other_no_overwrite[k] = v\n objs = [dataset, other_no_overwrite, other_overwrite]\n priority_arg = 2\n\n return merge_core(\n objs, compat, join, priority_arg=priority_arg, fill_value=fill_value\n )\n\n\ndef dataset_update_method(\n dataset: \"Dataset\", other: \"CoercibleMapping\"\n) -> _MergeResult:\n \"\"\"Guts of the Dataset.update method.\n\n This drops a duplicated coordinates from `other` if `other` is not an\n `xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068,\n GH2180).\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if not isinstance(other, Dataset):\n other = dict(other)\n for key, value in other.items():\n if isinstance(value, DataArray):\n # drop conflicting coordinates\n coord_names = [\n c\n for c in value.coords\n if c not in value.dims and c in dataset.coords\n ]\n if coord_names:\n other[key] = value.drop(coord_names)\n\n return merge_core([dataset, other], priority_arg=1, indexes=dataset.indexes)\n"},{"fileName":"nanops.py","filePath":"xarray/core","id":8,"nodeType":"File","text":"import numpy as np\n\nfrom . import dtypes, nputils, utils\nfrom .duck_array_ops import _dask_or_eager_func, count, fillna, isnull, where_method\nfrom .pycompat import dask_array_type\n\ntry:\n import dask.array as dask_array\nexcept ImportError:\n dask_array = None\n\n\ndef _replace_nan(a, val):\n \"\"\"\n replace nan in a by val, and returns the replaced array and the nan\n position\n \"\"\"\n mask = isnull(a)\n return where_method(val, mask, a), mask\n\n\ndef _maybe_null_out(result, axis, mask, min_count=1):\n \"\"\"\n xarray version of pandas.core.nanops._maybe_null_out\n \"\"\"\n if hasattr(axis, \"__len__\"): # if tuple or list\n raise ValueError(\n \"min_count is not available for reduction \" \"with more than one dimensions.\"\n )\n\n if axis is not None and getattr(result, \"ndim\", False):\n null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0\n if null_mask.any():\n dtype, fill_value = dtypes.maybe_promote(result.dtype)\n result = result.astype(dtype)\n result[null_mask] = fill_value\n\n elif getattr(result, \"dtype\", None) not in dtypes.NAT_TYPES:\n null_mask = mask.size - mask.sum()\n if null_mask < min_count:\n result = np.nan\n\n return result\n\n\ndef _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanargmin, nanargmax for object arrays. Always return integer\n type\n \"\"\"\n valid_count = count(value, axis=axis)\n value = fillna(value, fill_value)\n data = _dask_or_eager_func(func)(value, axis=axis, **kwargs)\n\n # TODO This will evaluate dask arrays and might be costly.\n if (valid_count == 0).any():\n raise ValueError(\"All-NaN slice encountered\")\n\n return data\n\n\ndef _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanmin and nanmax for object array \"\"\"\n valid_count = count(value, axis=axis)\n filled_value = fillna(value, fill_value)\n data = getattr(np, func)(filled_value, axis=axis, **kwargs)\n if not hasattr(data, \"dtype\"): # scalar case\n data = fill_value if valid_count == 0 else data\n # we've computed a single min, max value of type object.\n # don't let np.array turn a tuple back into an array\n return utils.to_0d_object_array(data)\n return where_method(data, valid_count != 0)\n\n\ndef nanmin(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"min\", dtypes.get_pos_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmin(a, axis=axis)\n\n\ndef nanmax(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"max\", dtypes.get_neg_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmax(a, axis=axis)\n\n\ndef nanargmin(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_pos_infinity(a.dtype)\n return _nan_argminmax_object(\"argmin\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmin(a, axis=axis)\n\n\ndef nanargmax(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_neg_infinity(a.dtype)\n return _nan_argminmax_object(\"argmax\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmax(a, axis=axis)\n\n\ndef nansum(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 0)\n result = _dask_or_eager_func(\"sum\")(a, axis=axis, dtype=dtype)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result\n\n\ndef _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs):\n \"\"\" In house nanmean. ddof argument will be used in _nanvar method \"\"\"\n from .duck_array_ops import count, fillna, _dask_or_eager_func, where_method\n\n valid_count = count(value, axis=axis)\n value = fillna(value, 0)\n # As dtype inference is impossible for object dtype, we assume float\n # https://github.com/dask/dask/issues/3162\n if dtype is None and value.dtype.kind == \"O\":\n dtype = value.dtype if value.dtype.kind in [\"cf\"] else float\n\n data = _dask_or_eager_func(\"sum\")(value, axis=axis, dtype=dtype, **kwargs)\n data = data / (valid_count - ddof)\n return where_method(data, valid_count != 0)\n\n\ndef nanmean(a, axis=None, dtype=None, out=None):\n if a.dtype.kind == \"O\":\n return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype)\n\n if isinstance(a, dask_array_type):\n return dask_array.nanmean(a, axis=axis, dtype=dtype)\n\n return np.nanmean(a, axis=axis, dtype=dtype)\n\n\ndef nanmedian(a, axis=None, out=None):\n return _dask_or_eager_func(\"nanmedian\", eager_module=nputils)(a, axis=axis)\n\n\ndef _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs):\n value_mean = _nanmean_ddof_object(\n ddof=0, value=value, axis=axis, keepdims=True, **kwargs\n )\n squared = (value.astype(value_mean.dtype) - value_mean) ** 2\n return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs)\n\n\ndef nanvar(a, axis=None, dtype=None, out=None, ddof=0):\n if a.dtype.kind == \"O\":\n return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof)\n\n return _dask_or_eager_func(\"nanvar\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )\n\n\ndef nanstd(a, axis=None, dtype=None, out=None, ddof=0):\n return _dask_or_eager_func(\"nanstd\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )\n\n\ndef nanprod(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 1)\n result = _dask_or_eager_func(\"nanprod\")(a, axis=axis, dtype=dtype, out=out)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result\n\n\ndef nancumsum(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumsum\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )\n\n\ndef nancumprod(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumprod\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )\n"},{"fileName":"lru_cache.py","filePath":"xarray/backends","id":9,"nodeType":"File","text":"import threading\nfrom collections import OrderedDict\nfrom typing import Any, Callable, Iterator, MutableMapping, Optional, TypeVar\n\nK = TypeVar(\"K\")\nV = TypeVar(\"V\")\n\n\nclass LRUCache(MutableMapping[K, V]):\n \"\"\"Thread-safe LRUCache based on an OrderedDict.\n\n All dict operations (__getitem__, __setitem__, __contains__) update the\n priority of the relevant key and take O(1) time. The dict is iterated over\n in order from the oldest to newest key, which means that a complete pass\n over the dict should not affect the order of any entries.\n\n When a new item is set and the maximum size of the cache is exceeded, the\n oldest item is dropped and called with ``on_evict(key, value)``.\n\n The ``maxsize`` property can be used to view or adjust the capacity of\n the cache, e.g., ``cache.maxsize = new_size``.\n \"\"\"\n\n _cache: \"OrderedDict[K, V]\"\n _maxsize: int\n _lock: threading.RLock\n _on_evict: Optional[Callable[[K, V], Any]]\n\n __slots__ = (\"_cache\", \"_lock\", \"_maxsize\", \"_on_evict\")\n\n def __init__(self, maxsize: int, on_evict: Callable[[K, V], Any] = None):\n \"\"\"\n Parameters\n ----------\n maxsize : int\n Integer maximum number of items to hold in the cache.\n on_evict: callable, optional\n Function to call like ``on_evict(key, value)`` when items are\n evicted.\n \"\"\"\n if not isinstance(maxsize, int):\n raise TypeError(\"maxsize must be an integer\")\n if maxsize < 0:\n raise ValueError(\"maxsize must be non-negative\")\n self._maxsize = maxsize\n self._cache = OrderedDict()\n self._lock = threading.RLock()\n self._on_evict = on_evict\n\n def __getitem__(self, key: K) -> V:\n # record recent use of the key by moving it to the front of the list\n with self._lock:\n value = self._cache[key]\n self._cache.move_to_end(key)\n return value\n\n def _enforce_size_limit(self, capacity: int) -> None:\n \"\"\"Shrink the cache if necessary, evicting the oldest items.\n \"\"\"\n while len(self._cache) > capacity:\n key, value = self._cache.popitem(last=False)\n if self._on_evict is not None:\n self._on_evict(key, value)\n\n def __setitem__(self, key: K, value: V) -> None:\n with self._lock:\n if key in self._cache:\n # insert the new value at the end\n del self._cache[key]\n self._cache[key] = value\n elif self._maxsize:\n # make room if necessary\n self._enforce_size_limit(self._maxsize - 1)\n self._cache[key] = value\n elif self._on_evict is not None:\n # not saving, immediately evict\n self._on_evict(key, value)\n\n def __delitem__(self, key: K) -> None:\n del self._cache[key]\n\n def __iter__(self) -> Iterator[K]:\n # create a list, so accessing the cache during iteration cannot change\n # the iteration order\n return iter(list(self._cache))\n\n def __len__(self) -> int:\n return len(self._cache)\n\n @property\n def maxsize(self) -> int:\n \"\"\"Maximum number of items can be held in the cache.\"\"\"\n return self._maxsize\n\n @maxsize.setter\n def maxsize(self, size: int) -> None:\n \"\"\"Resize the cache, evicting the oldest items if necessary.\"\"\"\n if size < 0:\n raise ValueError(\"maxsize must be non-negative\")\n with self._lock:\n self._enforce_size_limit(size)\n self._maxsize = size\n"},{"id":10,"name":"doc","nodeType":"Package"},{"id":11,"name":"pandas.rst","nodeType":"TextFile","path":"doc","text":".. _pandas:\n\n===================\nWorking with pandas\n===================\n\nOne of the most important features of xarray is the ability to convert to and\nfrom :py:mod:`pandas` objects to interact with the rest of the PyData\necosystem. For example, for plotting labeled data, we highly recommend\nusing the visualization `built in to pandas itself`__ or provided by the pandas\naware libraries such as `Seaborn`__.\n\n__ http://pandas.pydata.org/pandas-docs/stable/visualization.html\n__ http://stanford.edu/~mwaskom/software/seaborn/\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\nHierarchical and tidy data\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTabular data is easiest to work with when it meets the criteria for\n`tidy data`__:\n\n* Each column holds a different variable.\n* Each rows holds a different observation.\n\n__ http://www.jstatsoft.org/v59/i10/\n\nIn this \"tidy data\" format, we can represent any :py:class:`~xarray.Dataset` and\n:py:class:`~xarray.DataArray` in terms of :py:class:`pandas.DataFrame` and\n:py:class:`pandas.Series`, respectively (and vice-versa). The representation\nworks by flattening non-coordinates to 1D, and turning the tensor product of\ncoordinate indexes into a :py:class:`pandas.MultiIndex`.\n\nDataset and DataFrame\n---------------------\n\nTo convert any dataset to a ``DataFrame`` in tidy form, use the\n:py:meth:`Dataset.to_dataframe() ` method:\n\n.. ipython:: python\n\n ds = xr.Dataset({'foo': (('x', 'y'), np.random.randn(2, 3))},\n coords={'x': [10, 20], 'y': ['a', 'b', 'c'],\n 'along_x': ('x', np.random.randn(2)),\n 'scalar': 123})\n ds\n df = ds.to_dataframe()\n df\n\nWe see that each variable and coordinate in the Dataset is now a column in the\nDataFrame, with the exception of indexes which are in the index.\nTo convert the ``DataFrame`` to any other convenient representation,\nuse ``DataFrame`` methods like :py:meth:`~pandas.DataFrame.reset_index`,\n:py:meth:`~pandas.DataFrame.stack` and :py:meth:`~pandas.DataFrame.unstack`.\n\nFor datasets containing dask arrays where the data should be lazily loaded, see the\n:py:meth:`Dataset.to_dask_dataframe() ` method.\n\nTo create a ``Dataset`` from a ``DataFrame``, use the\n:py:meth:`~xarray.Dataset.from_dataframe` class method or the equivalent\n:py:meth:`pandas.DataFrame.to_xarray ` method:\n\n.. ipython:: python\n\n xr.Dataset.from_dataframe(df)\n\nNotice that that dimensions of variables in the ``Dataset`` have now\nexpanded after the round-trip conversion to a ``DataFrame``. This is because\nevery object in a ``DataFrame`` must have the same indices, so we need to\nbroadcast the data of each array to the full size of the new ``MultiIndex``.\n\nLikewise, all the coordinates (other than indexes) ended up as variables,\nbecause pandas does not distinguish non-index coordinates.\n\nDataArray and Series\n--------------------\n\n``DataArray`` objects have a complementary representation in terms of a\n:py:class:`pandas.Series`. Using a Series preserves the ``Dataset`` to\n``DataArray`` relationship, because ``DataFrames`` are dict-like containers\nof ``Series``. The methods are very similar to those for working with\nDataFrames:\n\n.. ipython:: python\n\n s = ds['foo'].to_series()\n s\n # or equivalently, with Series.to_xarray()\n xr.DataArray.from_series(s)\n\nBoth the ``from_series`` and ``from_dataframe`` methods use reindexing, so they\nwork even if not the hierarchical index is not a full tensor product:\n\n.. ipython:: python\n\n s[::2]\n s[::2].to_xarray()\n\nMulti-dimensional data\n~~~~~~~~~~~~~~~~~~~~~~\n\nTidy data is great, but it sometimes you want to preserve dimensions instead of\nautomatically stacking them into a ``MultiIndex``.\n\n:py:meth:`DataArray.to_pandas() ` is a shortcut that\nlets you convert a DataArray directly into a pandas object with the same\ndimensionality (i.e., a 1D array is converted to a :py:class:`~pandas.Series`,\n2D to :py:class:`~pandas.DataFrame` and 3D to :py:class:`~pandas.Panel`):\n\n.. ipython:: python\n\n arr = xr.DataArray(np.random.randn(2, 3),\n coords=[('x', [10, 20]), ('y', ['a', 'b', 'c'])])\n df = arr.to_pandas()\n df\n\nTo perform the inverse operation of converting any pandas objects into a data\narray with the same shape, simply use the :py:class:`~xarray.DataArray`\nconstructor:\n\n.. ipython:: python\n\n xr.DataArray(df)\n\nBoth the ``DataArray`` and ``Dataset`` constructors directly convert pandas\nobjects into xarray objects with the same shape. This means that they\npreserve all use of multi-indexes:\n\n.. ipython:: python\n\n index = pd.MultiIndex.from_arrays([['a', 'a', 'b'], [0, 1, 2]],\n names=['one', 'two'])\n df = pd.DataFrame({'x': 1, 'y': 2}, index=index)\n ds = xr.Dataset(df)\n ds\n\nHowever, you will need to set dimension names explicitly, either with the\n``dims`` argument on in the ``DataArray`` constructor or by calling\n:py:class:`~xarray.Dataset.rename` on the new object.\n\n.. _panel transition:\n\nTransitioning from pandas.Panel to xarray\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n``Panel``, pandas' data structure for 3D arrays, has always\nbeen a second class data structure compared to the Series and DataFrame. To\nallow pandas developers to focus more on its core functionality built around\nthe DataFrame, pandas has deprecated ``Panel``. It will be removed in pandas\n0.25.\n\nxarray has most of ``Panel``'s features, a more explicit API (particularly around\nindexing), and the ability to scale to >3 dimensions with the same interface.\n\nAs discussed :ref:`elsewhere ` in the docs, there are two primary data structures in\nxarray: ``DataArray`` and ``Dataset``. You can imagine a ``DataArray`` as a\nn-dimensional pandas ``Series`` (i.e. a single typed array), and a ``Dataset``\nas the ``DataFrame`` equivalent (i.e. a dict of aligned ``DataArray`` objects).\n\nSo you can represent a Panel, in two ways:\n\n- As a 3-dimensional ``DataArray``,\n- Or as a ``Dataset`` containing a number of 2-dimensional DataArray objects.\n\nLet's take a look:\n\n.. ipython:: python\n\n data = np.random.RandomState(0).rand(2, 3, 4)\n items = list('ab')\n major_axis = list('mno')\n minor_axis = pd.date_range(start='2000', periods=4, name='date')\n\nWith old versions of pandas (prior to 0.25), this could stored in a ``Panel``:\n\n.. ipython::\n :verbatim:\n\n In [1]: pd.Panel(data, items, major_axis, minor_axis)\n Out[1]:\n \n Dimensions: 2 (items) x 3 (major_axis) x 4 (minor_axis)\n Items axis: a to b\n Major_axis axis: m to o\n Minor_axis axis: 2000-01-01 00:00:00 to 2000-01-04 00:00:00\n\nTo put this data in a ``DataArray``, write:\n\n.. ipython:: python\n\n array = xr.DataArray(data, [items, major_axis, minor_axis])\n array\n\nAs you can see, there are three dimensions (each is also a coordinate). Two of\nthe axes of were unnamed, so have been assigned ``dim_0`` and ``dim_1``\nrespectively, while the third retains its name ``date``.\n\nYou can also easily convert this data into ``Dataset``:\n\n.. ipython:: python\n\n array.to_dataset(dim='dim_0')\n\nHere, there are two data variables, each representing a DataFrame on panel's\n``items`` axis, and labelled as such. Each variable is a 2D array of the\nrespective values along the ``items`` dimension.\n\nWhile the xarray docs are relatively complete, a few items stand out for Panel users:\n\n- A DataArray's data is stored as a numpy array, and so can only contain a single\n type. As a result, a Panel that contains :py:class:`~pandas.DataFrame` objects\n with multiple types will be converted to ``dtype=object``. A ``Dataset`` of\n multiple ``DataArray`` objects each with its own dtype will allow original\n types to be preserved.\n- :ref:`Indexing ` is similar to pandas, but more explicit and\n leverages xarray's naming of dimensions.\n- Because of those features, making much higher dimensional data is very\n practical.\n- Variables in ``Dataset`` objects can use a subset of its dimensions. For\n example, you can have one dataset with Person x Score x Time, and another with\n Person x Score.\n- You can use coordinates are used for both dimensions and for variables which\n _label_ the data variables, so you could have a coordinate Age, that labelled\n the Person dimension of a Dataset of Person x Score x Time.\n\nWhile xarray may take some getting used to, it's worth it! If anything is unclear,\nplease post an issue on `GitHub `__ or\n`StackOverflow `__,\nand we'll endeavor to respond to the specific case or improve the general docs.\n"},{"id":12,"name":"doc/examples","nodeType":"Package"},{"id":13,"name":"multidimensional-coords.rst","nodeType":"TextFile","path":"doc/examples","text":".. _examples.multidim:\n\nWorking with Multidimensional Coordinates\n=========================================\n\nAuthor: `Ryan Abernathey `__\n\nMany datasets have *physical coordinates* which differ from their\n*logical coordinates*. Xarray provides several ways to plot and analyze\nsuch datasets.\n\n\n.. ipython:: python\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n import netCDF4\n import cartopy.crs as ccrs\n import matplotlib.pyplot as plt\n\nAs an example, consider this dataset from the\n`xarray-data `__ repository.\n\n\n.. ipython:: python\n\n ds = xr.tutorial.open_dataset('rasm').load()\n ds\n\nIn this example, the *logical coordinates* are ``x`` and ``y``, while\nthe *physical coordinates* are ``xc`` and ``yc``, which represent the\nlatitudes and longitude of the data.\n\n\n.. ipython:: python\n\n ds.xc.attrs\n ds.yc.attrs\n\n\nPlotting\n--------\n\nLet's examine these coordinate variables by plotting them.\n\n.. ipython:: python\n\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(9,3))\n ds.xc.plot(ax=ax1);\n @savefig xarray_multidimensional_coords_8_2.png width=100%\n ds.yc.plot(ax=ax2);\n\nNote that the variables ``xc`` (longitude) and ``yc`` (latitude) are\ntwo-dimensional scalar fields.\n\nIf we try to plot the data variable ``Tair``, by default we get the\nlogical coordinates.\n\n.. ipython:: python\n :suppress:\n\n f = plt.figure(figsize=(6, 4))\n\n.. ipython:: python\n\n @savefig xarray_multidimensional_coords_10_1.png width=5in\n ds.Tair[0].plot();\n\n\nIn order to visualize the data on a conventional latitude-longitude\ngrid, we can take advantage of xarray's ability to apply\n`cartopy `__ map projections.\n\n.. ipython:: python\n\n plt.figure(figsize=(7,2));\n ax = plt.axes(projection=ccrs.PlateCarree());\n ds.Tair[0].plot.pcolormesh(ax=ax, transform=ccrs.PlateCarree(),\n x='xc', y='yc', add_colorbar=False);\n @savefig xarray_multidimensional_coords_12_0.png width=100%\n ax.coastlines();\n\nMultidimensional Groupby\n------------------------\n\nThe above example allowed us to visualize the data on a regular\nlatitude-longitude grid. But what if we want to do a calculation that\ninvolves grouping over one of these physical coordinates (rather than\nthe logical coordinates), for example, calculating the mean temperature\nat each latitude. This can be achieved using xarray's ``groupby``\nfunction, which accepts multidimensional variables. By default,\n``groupby`` will use every unique value in the variable, which is\nprobably not what we want. Instead, we can use the ``groupby_bins``\nfunction to specify the output coordinates of the group.\n\n.. ipython:: python\n :suppress:\n\n f = plt.figure(figsize=(6, 4.5))\n\n.. ipython:: python\n\n # define two-degree wide latitude bins\n lat_bins = np.arange(0, 91, 2)\n # define a label for each bin corresponding to the central latitude\n lat_center = np.arange(1, 90, 2)\n # group according to those bins and take the mean\n Tair_lat_mean = (ds.Tair.groupby_bins('xc', lat_bins, labels=lat_center)\n\t .mean(xr.ALL_DIMS))\n # plot the result\n @savefig xarray_multidimensional_coords_14_1.png width=5in\n Tair_lat_mean.plot();\n\n\nNote that the resulting coordinate for the ``groupby_bins`` operation\ngot the ``_bins`` suffix appended: ``xc_bins``. This help us distinguish\nit from the original multidimensional variable ``xc``.\n"},{"fileName":"h5netcdf_.py","filePath":"xarray/backends","id":14,"nodeType":"File","text":"import functools\n\nimport numpy as np\n\nfrom .. import Variable\nfrom ..core import indexing\nfrom ..core.utils import FrozenDict\nfrom .common import WritableCFDataStore\nfrom .file_manager import CachingFileManager\nfrom .locks import HDF5_LOCK, combine_locks, ensure_lock, get_write_lock\nfrom .netCDF4_ import (\n BaseNetCDF4Array,\n _encode_nc4_variable,\n _extract_nc4_variable_encoding,\n _get_datatype,\n _nc4_require_group,\n)\n\n\nclass H5NetCDFArrayWrapper(BaseNetCDF4Array):\n def get_array(self, needs_lock=True):\n ds = self.datastore._acquire(needs_lock)\n variable = ds.variables[self.variable_name]\n return variable\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem\n )\n\n def _getitem(self, key):\n # h5py requires using lists for fancy indexing:\n # https://github.com/h5py/h5py/issues/992\n key = tuple(list(k) if isinstance(k, np.ndarray) else k for k in key)\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n return array[key]\n\n\ndef maybe_decode_bytes(txt):\n if isinstance(txt, bytes):\n return txt.decode(\"utf-8\")\n else:\n return txt\n\n\ndef _read_attributes(h5netcdf_var):\n # GH451\n # to ensure conventions decoding works properly on Python 3, decode all\n # bytes attributes to strings\n attrs = {}\n for k, v in h5netcdf_var.attrs.items():\n if k not in [\"_FillValue\", \"missing_value\"]:\n v = maybe_decode_bytes(v)\n attrs[k] = v\n return attrs\n\n\n_extract_h5nc_encoding = functools.partial(\n _extract_nc4_variable_encoding, lsd_okay=False, h5py_okay=True, backend=\"h5netcdf\"\n)\n\n\ndef _h5netcdf_create_group(dataset, name):\n return dataset.create_group(name)\n\n\nclass H5NetCDFStore(WritableCFDataStore):\n \"\"\"Store for reading and writing data via h5netcdf\n \"\"\"\n\n def __init__(\n self,\n filename,\n mode=\"r\",\n format=None,\n group=None,\n lock=None,\n autoclose=False,\n invalid_netcdf=None,\n ):\n import h5netcdf\n\n if format not in [None, \"NETCDF4\"]:\n raise ValueError(\"invalid format for h5netcdf backend\")\n\n kwargs = {\"invalid_netcdf\": invalid_netcdf}\n\n self._manager = CachingFileManager(\n h5netcdf.File, filename, mode=mode, kwargs=kwargs\n )\n\n if lock is None:\n if mode == \"r\":\n lock = HDF5_LOCK\n else:\n lock = combine_locks([HDF5_LOCK, get_write_lock(filename)])\n\n self._group = group\n self.format = format\n self._filename = filename\n self._mode = mode\n self.lock = ensure_lock(lock)\n self.autoclose = autoclose\n\n def _acquire(self, needs_lock=True):\n with self._manager.acquire_context(needs_lock) as root:\n ds = _nc4_require_group(\n root, self._group, self._mode, create_group=_h5netcdf_create_group\n )\n return ds\n\n @property\n def ds(self):\n return self._acquire()\n\n def open_store_variable(self, name, var):\n import h5py\n\n dimensions = var.dimensions\n data = indexing.LazilyOuterIndexedArray(H5NetCDFArrayWrapper(name, self))\n attrs = _read_attributes(var)\n\n # netCDF4 specific encoding\n encoding = {\n \"chunksizes\": var.chunks,\n \"fletcher32\": var.fletcher32,\n \"shuffle\": var.shuffle,\n }\n # Convert h5py-style compression options to NetCDF4-Python\n # style, if possible\n if var.compression == \"gzip\":\n encoding[\"zlib\"] = True\n encoding[\"complevel\"] = var.compression_opts\n elif var.compression is not None:\n encoding[\"compression\"] = var.compression\n encoding[\"compression_opts\"] = var.compression_opts\n\n # save source so __repr__ can detect if it's local or not\n encoding[\"source\"] = self._filename\n encoding[\"original_shape\"] = var.shape\n\n vlen_dtype = h5py.check_dtype(vlen=var.dtype)\n if vlen_dtype is str:\n encoding[\"dtype\"] = str\n elif vlen_dtype is not None: # pragma: no cover\n # xarray doesn't support writing arbitrary vlen dtypes yet.\n pass\n else:\n encoding[\"dtype\"] = var.dtype\n\n return Variable(dimensions, data, attrs, encoding)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return FrozenDict(_read_attributes(self.ds))\n\n def get_dimensions(self):\n return self.ds.dimensions\n\n def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v is None\n }\n return encoding\n\n def set_dimension(self, name, length, is_unlimited=False):\n if is_unlimited:\n self.ds.dimensions[name] = None\n self.ds.resize_dimension(name, length)\n else:\n self.ds.dimensions[name] = length\n\n def set_attribute(self, key, value):\n self.ds.attrs[key] = value\n\n def encode_variable(self, variable):\n return _encode_nc4_variable(variable)\n\n def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n import h5py\n\n attrs = variable.attrs.copy()\n dtype = _get_datatype(variable, raise_on_invalid_encoding=check_encoding)\n\n fillvalue = attrs.pop(\"_FillValue\", None)\n if dtype is str and fillvalue is not None:\n raise NotImplementedError(\n \"h5netcdf does not yet support setting a fill value for \"\n \"variable-length strings \"\n \"(https://github.com/shoyer/h5netcdf/issues/37). \"\n \"Either remove '_FillValue' from encoding on variable %r \"\n \"or set {'dtype': 'S1'} in encoding to use the fixed width \"\n \"NC_CHAR type.\" % name\n )\n\n if dtype is str:\n dtype = h5py.special_dtype(vlen=str)\n\n encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding)\n kwargs = {}\n\n # Convert from NetCDF4-Python style compression settings to h5py style\n # If both styles are used together, h5py takes precedence\n # If set_encoding=True, raise ValueError in case of mismatch\n if encoding.pop(\"zlib\", False):\n if check_encoding and encoding.get(\"compression\") not in (None, \"gzip\"):\n raise ValueError(\"'zlib' and 'compression' encodings mismatch\")\n encoding.setdefault(\"compression\", \"gzip\")\n\n if (\n check_encoding\n and \"complevel\" in encoding\n and \"compression_opts\" in encoding\n and encoding[\"complevel\"] != encoding[\"compression_opts\"]\n ):\n raise ValueError(\"'complevel' and 'compression_opts' encodings \" \"mismatch\")\n complevel = encoding.pop(\"complevel\", 0)\n if complevel != 0:\n encoding.setdefault(\"compression_opts\", complevel)\n\n encoding[\"chunks\"] = encoding.pop(\"chunksizes\", None)\n\n # Do not apply compression, filters or chunking to scalars.\n if variable.shape:\n for key in [\n \"compression\",\n \"compression_opts\",\n \"shuffle\",\n \"chunks\",\n \"fletcher32\",\n ]:\n if key in encoding:\n kwargs[key] = encoding[key]\n if name not in self.ds:\n nc4_var = self.ds.create_variable(\n name,\n dtype=dtype,\n dimensions=variable.dims,\n fillvalue=fillvalue,\n **kwargs\n )\n else:\n nc4_var = self.ds[name]\n\n for k, v in attrs.items():\n nc4_var.attrs[k] = v\n\n target = H5NetCDFArrayWrapper(name, self)\n\n return target, variable.data\n\n def sync(self):\n self.ds.sync()\n\n def close(self, **kwargs):\n self._manager.close(**kwargs)\n"},{"id":15,"name":"monthly-means.rst","nodeType":"TextFile","path":"doc/examples","text":".. _monthly means example:\n\nCalculating Seasonal Averages from Timeseries of Monthly Means\n==============================================================\n\nAuthor: `Joe Hamman `__\n\nThe data used for this example can be found in the\n`xarray-data `__ repository.\n\nSuppose we have a netCDF or ``xarray.Dataset`` of monthly mean data and\nwe want to calculate the seasonal average. To do this properly, we need\nto calculate the weighted average considering that each month has a\ndifferent number of days.\n\n.. code:: python\n\n %matplotlib inline\n import numpy as np\n import pandas as pd\n import xarray as xr\n from netCDF4 import num2date\n import matplotlib.pyplot as plt\n\n print(\"numpy version : \", np.__version__)\n print(\"pandas version : \", pd.__version__)\n print(\"xarray version : \", xr.__version__)\n\n\n.. parsed-literal::\n\n numpy version : 1.11.1\n pandas version : 0.18.1\n xarray version : 0.8.2\n\n\nSome calendar information so we can support any netCDF calendar.\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. code:: python\n\n dpm = {'noleap': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n '365_day': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'standard': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'proleptic_gregorian': [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n 'all_leap': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n '366_day': [0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],\n '360_day': [0, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30]}\n\nA few calendar functions to determine the number of days in each month\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf you were just using the standard calendar, it would be easy to use\nthe ``calendar.month_range`` function.\n\n.. code:: python\n\n def leap_year(year, calendar='standard'):\n \"\"\"Determine if year is a leap year\"\"\"\n leap = False\n if ((calendar in ['standard', 'gregorian',\n 'proleptic_gregorian', 'julian']) and\n (year % 4 == 0)):\n leap = True\n if ((calendar == 'proleptic_gregorian') and\n (year % 100 == 0) and\n (year % 400 != 0)):\n leap = False\n elif ((calendar in ['standard', 'gregorian']) and\n (year % 100 == 0) and (year % 400 != 0) and\n (year < 1583)):\n leap = False\n return leap\n\n def get_dpm(time, calendar='standard'):\n \"\"\"\n return a array of days per month corresponding to the months provided in `months`\n \"\"\"\n month_length = np.zeros(len(time), dtype=np.int)\n\n cal_days = dpm[calendar]\n\n for i, (month, year) in enumerate(zip(time.month, time.year)):\n month_length[i] = cal_days[month]\n if leap_year(year, calendar=calendar):\n month_length[i] += 1\n return month_length\n\nOpen the ``Dataset``\n^^^^^^^^^^^^^^^^^^^^\n\n.. code:: python\n\n ds = xr.tutorial.load_dataset('rasm')\n print(ds)\n\n\n.. parsed-literal::\n\n \n Dimensions: (time: 36, x: 275, y: 205)\n Coordinates:\n * time (time) datetime64[ns] 1980-09-16T12:00:00 1980-10-17 ...\n * y (y) int64 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...\n * x (x) int64 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...\n Data variables:\n Tair (time, y, x) float64 nan nan nan nan nan nan nan nan nan nan ...\n yc (y, x) float64 16.53 16.78 17.02 17.27 17.51 17.76 18.0 18.25 ...\n xc (y, x) float64 189.2 189.4 189.6 189.7 189.9 190.1 190.2 190.4 ...\n Attributes:\n title: /workspace/jhamman/processed/R1002RBRxaaa01a/lnd/temp/R1002RBRxaaa01a.vic.ha.1979-09-01.nc\n institution: U.W.\n source: RACM R1002RBRxaaa01a\n output_frequency: daily\n output_mode: averaged\n convention: CF-1.4\n references: Based on the initial model of Liang et al., 1994, JGR, 99, 14,415- 14,429.\n comment: Output from the Variable Infiltration Capacity (VIC) model.\n nco_openmp_thread_number: 1\n NCO: 4.3.7\n history: history deleted for brevity\n\n\nNow for the heavy lifting:\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nWe first have to come up with the weights, - calculate the month lengths\nfor each monthly data record - calculate weights using\n``groupby('time.season')``\n\nFinally, we just need to multiply our weights by the ``Dataset`` and sum\nalong the time dimension.\n\n.. code:: python\n\n # Make a DataArray with the number of days in each month, size = len(time)\n month_length = xr.DataArray(get_dpm(ds.time.to_index(), calendar='noleap'),\n coords=[ds.time], name='month_length')\n\n # Calculate the weights by grouping by 'time.season'.\n # Conversion to float type ('astype(float)') only necessary for Python 2.x\n weights = month_length.groupby('time.season') / month_length.astype(float).groupby('time.season').sum()\n\n # Test that the sum of the weights for each season is 1.0\n np.testing.assert_allclose(weights.groupby('time.season').sum().values, np.ones(4))\n\n # Calculate the weighted average\n ds_weighted = (ds * weights).groupby('time.season').sum(dim='time')\n\n.. code:: python\n\n print(ds_weighted)\n\n\n.. parsed-literal::\n\n \n Dimensions: (season: 4, x: 275, y: 205)\n Coordinates:\n * y (y) int64 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...\n * x (x) int64 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...\n * season (season) object 'DJF' 'JJA' 'MAM' 'SON'\n Data variables:\n Tair (season, y, x) float64 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ...\n xc (season, y, x) float64 189.2 189.4 189.6 189.7 189.9 190.1 ...\n yc (season, y, x) float64 16.53 16.78 17.02 17.27 17.51 17.76 18.0 ...\n\n\n.. code:: python\n\n # only used for comparisons\n ds_unweighted = ds.groupby('time.season').mean('time')\n ds_diff = ds_weighted - ds_unweighted\n\n.. code:: python\n\n # Quick plot to show the results\n notnull = pd.notnull(ds_unweighted['Tair'][0])\n\n fig, axes = plt.subplots(nrows=4, ncols=3, figsize=(14,12))\n for i, season in enumerate(('DJF', 'MAM', 'JJA', 'SON')):\n ds_weighted['Tair'].sel(season=season).where(notnull).plot.pcolormesh(\n ax=axes[i, 0], vmin=-30, vmax=30, cmap='Spectral_r',\n add_colorbar=True, extend='both')\n\n ds_unweighted['Tair'].sel(season=season).where(notnull).plot.pcolormesh(\n ax=axes[i, 1], vmin=-30, vmax=30, cmap='Spectral_r',\n add_colorbar=True, extend='both')\n\n ds_diff['Tair'].sel(season=season).where(notnull).plot.pcolormesh(\n ax=axes[i, 2], vmin=-0.1, vmax=.1, cmap='RdBu_r',\n add_colorbar=True, extend='both')\n\n axes[i, 0].set_ylabel(season)\n axes[i, 1].set_ylabel('')\n axes[i, 2].set_ylabel('')\n\n for ax in axes.flat:\n ax.axes.get_xaxis().set_ticklabels([])\n ax.axes.get_yaxis().set_ticklabels([])\n ax.axes.axis('tight')\n ax.set_xlabel('')\n\n axes[0, 0].set_title('Weighted by DPM')\n axes[0, 1].set_title('Equal Weighting')\n axes[0, 2].set_title('Difference')\n\n plt.tight_layout()\n\n fig.suptitle('Seasonal Surface Air Temperature', fontsize=16, y=1.02)\n\n\n\n\n.. parsed-literal::\n\n \n\n\n\n\n.. image:: monthly_means_output.png\n\n\n.. code:: python\n\n # Wrap it into a simple function\n def season_mean(ds, calendar='standard'):\n # Make a DataArray of season/year groups\n year_season = xr.DataArray(ds.time.to_index().to_period(freq='Q-NOV').to_timestamp(how='E'),\n coords=[ds.time], name='year_season')\n\n # Make a DataArray with the number of days in each month, size = len(time)\n month_length = xr.DataArray(get_dpm(ds.time.to_index(), calendar=calendar),\n coords=[ds.time], name='month_length')\n # Calculate the weights by grouping by 'time.season'\n weights = month_length.groupby('time.season') / month_length.groupby('time.season').sum()\n\n # Test that the sum of the weights for each season is 1.0\n np.testing.assert_allclose(weights.groupby('time.season').sum().values, np.ones(4))\n\n # Calculate the weighted average\n return (ds * weights).groupby('time.season').sum(dim='time')\n"},{"id":16,"name":"CODE_OF_CONDUCT.md","nodeType":"TextFile","path":"","text":"# Contributor Covenant Code of Conduct\n\n## Our Pledge\n\nIn the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.\n\n## Our Standards\n\nExamples of behavior that contributes to creating a positive environment include:\n\n* Using welcoming and inclusive language\n* Being respectful of differing viewpoints and experiences\n* Gracefully accepting constructive criticism\n* Focusing on what is best for the community\n* Showing empathy towards other community members\n\nExamples of unacceptable behavior by participants include:\n\n* The use of sexualized language or imagery and unwelcome sexual attention or advances\n* Trolling, insulting/derogatory comments, and personal or political attacks\n* Public or private harassment\n* Publishing others' private information, such as a physical or electronic address, without explicit permission\n* Other conduct which could reasonably be considered inappropriate in a professional setting\n\n## Our Responsibilities\n\nProject maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.\n\nProject maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.\n\n## Scope\n\nThis Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.\n\n## Enforcement\n\nInstances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at xarray-core-team@googlegroups.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.\n\nProject maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.\n\n## Attribution\n\nThis Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]\n\n[homepage]: http://contributor-covenant.org\n[version]: http://contributor-covenant.org/version/1/4/\n"},{"fileName":"conftest.py","filePath":"","id":17,"nodeType":"File","text":"\"\"\"Configuration for pytest.\"\"\"\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n \"\"\"Add command-line flags for pytest.\"\"\"\n parser.addoption(\"--run-flaky\", action=\"store_true\", help=\"runs flaky tests\")\n parser.addoption(\n \"--run-network-tests\",\n action=\"store_true\",\n help=\"runs tests requiring a network connection\",\n )\n\n\ndef pytest_runtest_setup(item):\n # based on https://stackoverflow.com/questions/47559524\n if \"flaky\" in item.keywords and not item.config.getoption(\"--run-flaky\"):\n pytest.skip(\"set --run-flaky option to run flaky tests\")\n if \"network\" in item.keywords and not item.config.getoption(\"--run-network-tests\"):\n pytest.skip(\n \"set --run-network-tests to run test requiring an \" \"internet connection\"\n )\n"},{"id":18,"name":"data-structures.rst","nodeType":"TextFile","path":"doc","text":".. _data structures:\n\nData Structures\n===============\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n np.set_printoptions(threshold=10)\n\nDataArray\n---------\n\n:py:class:`xarray.DataArray` is xarray's implementation of a labeled,\nmulti-dimensional array. It has several key properties:\n\n- ``values``: a :py:class:`numpy.ndarray` holding the array's values\n- ``dims``: dimension names for each axis (e.g., ``('x', 'y', 'z')``)\n- ``coords``: a dict-like container of arrays (*coordinates*) that label each\n point (e.g., 1-dimensional arrays of numbers, datetime objects or\n strings)\n- ``attrs``: :py:class:`dict` to hold arbitrary metadata (*attributes*)\n\nxarray uses ``dims`` and ``coords`` to enable its core metadata aware operations.\nDimensions provide names that xarray uses instead of the ``axis`` argument found\nin many numpy functions. Coordinates enable fast label based indexing and\nalignment, building on the functionality of the ``index`` found on a pandas\n:py:class:`~pandas.DataFrame` or :py:class:`~pandas.Series`.\n\nDataArray objects also can have a ``name`` and can hold arbitrary metadata in\nthe form of their ``attrs`` property. Names and attributes are strictly for\nusers and user-written code: xarray makes no attempt to interpret them, and\npropagates them only in unambiguous cases\n(see FAQ, :ref:`approach to metadata`).\n\n.. _creating a dataarray:\n\nCreating a DataArray\n~~~~~~~~~~~~~~~~~~~~\n\nThe :py:class:`~xarray.DataArray` constructor takes:\n\n- ``data``: a multi-dimensional array of values (e.g., a numpy ndarray,\n :py:class:`~pandas.Series`, :py:class:`~pandas.DataFrame` or :py:class:`~pandas.Panel`)\n- ``coords``: a list or dictionary of coordinates. If a list, it should be a\n list of tuples where the first element is the dimension name and the second\n element is the corresponding coordinate array_like object.\n- ``dims``: a list of dimension names. If omitted and ``coords`` is a list of\n tuples, dimension names are taken from ``coords``.\n- ``attrs``: a dictionary of attributes to add to the instance\n- ``name``: a string that names the instance\n\n.. ipython:: python\n\n data = np.random.rand(4, 3)\n locs = ['IA', 'IL', 'IN']\n times = pd.date_range('2000-01-01', periods=4)\n foo = xr.DataArray(data, coords=[times, locs], dims=['time', 'space'])\n foo\n\nOnly ``data`` is required; all of other arguments will be filled\nin with default values:\n\n.. ipython:: python\n\n xr.DataArray(data)\n\nAs you can see, dimension names are always present in the xarray data model: if\nyou do not provide them, defaults of the form ``dim_N`` will be created.\nHowever, coordinates are always optional, and dimensions do not have automatic\ncoordinate labels.\n\n.. note::\n\n This is different from pandas, where axes always have tick labels, which\n default to the integers ``[0, ..., n-1]``.\n\n Prior to xarray v0.9, xarray copied this behavior: default coordinates for\n each dimension would be created if coordinates were not supplied explicitly.\n This is no longer the case.\n\nCoordinates can be specified in the following ways:\n\n- A list of values with length equal to the number of dimensions, providing\n coordinate labels for each dimension. Each value must be of one of the\n following forms:\n\n * A :py:class:`~xarray.DataArray` or :py:class:`~xarray.Variable`\n * A tuple of the form ``(dims, data[, attrs])``, which is converted into\n arguments for :py:class:`~xarray.Variable`\n * A pandas object or scalar value, which is converted into a ``DataArray``\n * A 1D array or list, which is interpreted as values for a one dimensional\n coordinate variable along the same dimension as it's name\n\n- A dictionary of ``{coord_name: coord}`` where values are of the same form\n as the list. Supplying coordinates as a dictionary allows other coordinates\n than those corresponding to dimensions (more on these later). If you supply\n ``coords`` as a dictionary, you must explicitly provide ``dims``.\n\nAs a list of tuples:\n\n.. ipython:: python\n\n xr.DataArray(data, coords=[('time', times), ('space', locs)])\n\nAs a dictionary:\n\n.. ipython:: python\n\n xr.DataArray(data, coords={'time': times, 'space': locs, 'const': 42,\n 'ranking': ('space', [1, 2, 3])},\n dims=['time', 'space'])\n\nAs a dictionary with coords across multiple dimensions:\n\n.. ipython:: python\n\n xr.DataArray(data, coords={'time': times, 'space': locs, 'const': 42,\n 'ranking': (('time', 'space'), np.arange(12).reshape(4,3))},\n dims=['time', 'space'])\n\nIf you create a ``DataArray`` by supplying a pandas\n:py:class:`~pandas.Series`, :py:class:`~pandas.DataFrame` or\n:py:class:`~pandas.Panel`, any non-specified arguments in the\n``DataArray`` constructor will be filled in from the pandas object:\n\n.. ipython:: python\n\n df = pd.DataFrame({'x': [0, 1], 'y': [2, 3]}, index=['a', 'b'])\n df.index.name = 'abc'\n df.columns.name = 'xyz'\n df\n xr.DataArray(df)\n\nDataArray properties\n~~~~~~~~~~~~~~~~~~~~\n\nLet's take a look at the important properties on our array:\n\n.. ipython:: python\n\n foo.values\n foo.dims\n foo.coords\n foo.attrs\n print(foo.name)\n\nYou can modify ``values`` inplace:\n\n.. ipython:: python\n\n foo.values = 1.0 * foo.values\n\n.. note::\n\n The array values in a :py:class:`~xarray.DataArray` have a single\n (homogeneous) data type. To work with heterogeneous or structured data\n types in xarray, use coordinates, or put separate ``DataArray`` objects\n in a single :py:class:`~xarray.Dataset` (see below).\n\nNow fill in some of that missing metadata:\n\n.. ipython:: python\n\n foo.name = 'foo'\n foo.attrs['units'] = 'meters'\n foo\n\nThe :py:meth:`~xarray.DataArray.rename` method is another option, returning a\nnew data array:\n\n.. ipython:: python\n\n foo.rename('bar')\n\nDataArray Coordinates\n~~~~~~~~~~~~~~~~~~~~~\n\nThe ``coords`` property is ``dict`` like. Individual coordinates can be\naccessed from the coordinates by name, or even by indexing the data array\nitself:\n\n.. ipython:: python\n\n foo.coords['time']\n foo['time']\n\nThese are also :py:class:`~xarray.DataArray` objects, which contain tick-labels\nfor each dimension.\n\nCoordinates can also be set or removed by using the dictionary like syntax:\n\n.. ipython:: python\n\n foo['ranking'] = ('space', [1, 2, 3])\n foo.coords\n del foo['ranking']\n foo.coords\n\nFor more details, see :ref:`coordinates` below.\n\nDataset\n-------\n\n:py:class:`xarray.Dataset` is xarray's multi-dimensional equivalent of a\n:py:class:`~pandas.DataFrame`. It is a dict-like\ncontainer of labeled arrays (:py:class:`~xarray.DataArray` objects) with aligned\ndimensions. It is designed as an in-memory representation of the data model\nfrom the `netCDF`__ file format.\n\n__ http://www.unidata.ucar.edu/software/netcdf/\n\nIn addition to the dict-like interface of the dataset itself, which can be used\nto access any variable in a dataset, datasets have four key properties:\n\n- ``dims``: a dictionary mapping from dimension names to the fixed length of\n each dimension (e.g., ``{'x': 6, 'y': 6, 'time': 8}``)\n- ``data_vars``: a dict-like container of DataArrays corresponding to variables\n- ``coords``: another dict-like container of DataArrays intended to label points\n used in ``data_vars`` (e.g., arrays of numbers, datetime objects or strings)\n- ``attrs``: :py:class:`dict` to hold arbitrary metadata\n\nThe distinction between whether a variables falls in data or coordinates\n(borrowed from `CF conventions`_) is mostly semantic, and you can probably get\naway with ignoring it if you like: dictionary like access on a dataset will\nsupply variables found in either category. However, xarray does make use of the\ndistinction for indexing and computations. Coordinates indicate\nconstant/fixed/independent quantities, unlike the varying/measured/dependent\nquantities that belong in data.\n\n.. _CF conventions: http://cfconventions.org/\n\nHere is an example of how we might structure a dataset for a weather forecast:\n\n.. image:: _static/dataset-diagram.png\n\nIn this example, it would be natural to call ``temperature`` and\n``precipitation`` \"data variables\" and all the other arrays \"coordinate\nvariables\" because they label the points along the dimensions. (see [1]_ for\nmore background on this example).\n\n.. _dataarray constructor:\n\nCreating a Dataset\n~~~~~~~~~~~~~~~~~~\n\nTo make an :py:class:`~xarray.Dataset` from scratch, supply dictionaries for any\nvariables (``data_vars``), coordinates (``coords``) and attributes (``attrs``).\n\n- ``data_vars`` should be a dictionary with each key as the name of the variable\n and each value as one of:\n\n * A :py:class:`~xarray.DataArray` or :py:class:`~xarray.Variable`\n * A tuple of the form ``(dims, data[, attrs])``, which is converted into\n arguments for :py:class:`~xarray.Variable`\n * A pandas object, which is converted into a ``DataArray``\n * A 1D array or list, which is interpreted as values for a one dimensional\n coordinate variable along the same dimension as it's name\n\n- ``coords`` should be a dictionary of the same form as ``data_vars``.\n\n- ``attrs`` should be a dictionary.\n\nLet's create some fake data for the example we show above:\n\n.. ipython:: python\n\n temp = 15 + 8 * np.random.randn(2, 2, 3)\n precip = 10 * np.random.rand(2, 2, 3)\n lon = [[-99.83, -99.32], [-99.79, -99.23]]\n lat = [[42.25, 42.21], [42.63, 42.59]]\n\n # for real use cases, its good practice to supply array attributes such as\n # units, but we won't bother here for the sake of brevity\n ds = xr.Dataset({'temperature': (['x', 'y', 'time'], temp),\n 'precipitation': (['x', 'y', 'time'], precip)},\n coords={'lon': (['x', 'y'], lon),\n 'lat': (['x', 'y'], lat),\n 'time': pd.date_range('2014-09-06', periods=3),\n 'reference_time': pd.Timestamp('2014-09-05')})\n ds\n\nHere we pass :py:class:`xarray.DataArray` objects or a pandas object as values\nin the dictionary:\n\n.. ipython:: python\n\n xr.Dataset({'bar': foo})\n\n\n.. ipython:: python\n\n xr.Dataset({'bar': foo.to_pandas()})\n\nWhere a pandas object is supplied as a value, the names of its indexes are used as dimension\nnames, and its data is aligned to any existing dimensions.\n\nYou can also create an dataset from:\n\n- A :py:class:`pandas.DataFrame` or :py:class:`pandas.Panel` along its columns and items\n respectively, by passing it into the :py:class:`~xarray.Dataset` directly\n- A :py:class:`pandas.DataFrame` with :py:meth:`Dataset.from_dataframe `,\n which will additionally handle MultiIndexes See :ref:`pandas`\n- A netCDF file on disk with :py:func:`~xarray.open_dataset`. See :ref:`io`.\n\nDataset contents\n~~~~~~~~~~~~~~~~\n\n:py:class:`~xarray.Dataset` implements the Python mapping interface, with\nvalues given by :py:class:`xarray.DataArray` objects:\n\n.. ipython:: python\n\n 'temperature' in ds\n ds['temperature']\n\nValid keys include each listed coordinate and data variable.\n\nData and coordinate variables are also contained separately in the\n:py:attr:`~xarray.Dataset.data_vars` and :py:attr:`~xarray.Dataset.coords`\ndictionary-like attributes:\n\n.. ipython:: python\n\n ds.data_vars\n ds.coords\n\nFinally, like data arrays, datasets also store arbitrary metadata in the form\nof `attributes`:\n\n.. ipython:: python\n\n ds.attrs\n\n ds.attrs['title'] = 'example attribute'\n ds\n\nxarray does not enforce any restrictions on attributes, but serialization to\nsome file formats may fail if you use objects that are not strings, numbers\nor :py:class:`numpy.ndarray` objects.\n\nAs a useful shortcut, you can use attribute style access for reading (but not\nsetting) variables and attributes:\n\n.. ipython:: python\n\n ds.temperature\n\nThis is particularly useful in an exploratory context, because you can\ntab-complete these variable names with tools like IPython.\n\nDictionary like methods\n~~~~~~~~~~~~~~~~~~~~~~~\n\nWe can update a dataset in-place using Python's standard dictionary syntax. For\nexample, to create this example dataset from scratch, we could have written:\n\n.. ipython:: python\n\n ds = xr.Dataset()\n ds['temperature'] = (('x', 'y', 'time'), temp)\n ds['temperature_double'] = (('x', 'y', 'time'), temp * 2 )\n ds['precipitation'] = (('x', 'y', 'time'), precip)\n ds.coords['lat'] = (('x', 'y'), lat)\n ds.coords['lon'] = (('x', 'y'), lon)\n ds.coords['time'] = pd.date_range('2014-09-06', periods=3)\n ds.coords['reference_time'] = pd.Timestamp('2014-09-05')\n\nTo change the variables in a ``Dataset``, you can use all the standard dictionary\nmethods, including ``values``, ``items``, ``__delitem__``, ``get`` and\n:py:meth:`~xarray.Dataset.update`. Note that assigning a ``DataArray`` or pandas\nobject to a ``Dataset`` variable using ``__setitem__`` or ``update`` will\n:ref:`automatically align` the array(s) to the original\ndataset's indexes.\n\nYou can copy a ``Dataset`` by calling the :py:meth:`~xarray.Dataset.copy`\nmethod. By default, the copy is shallow, so only the container will be copied:\nthe arrays in the ``Dataset`` will still be stored in the same underlying\n:py:class:`numpy.ndarray` objects. You can copy all data by calling\n``ds.copy(deep=True)``.\n\n.. _transforming datasets:\n\nTransforming datasets\n~~~~~~~~~~~~~~~~~~~~~\n\nIn addition to dictionary-like methods (described above), xarray has additional\nmethods (like pandas) for transforming datasets into new objects.\n\nFor removing variables, you can select and drop an explicit list of\nvariables by indexing with a list of names or using the\n:py:meth:`~xarray.Dataset.drop` methods to return a new ``Dataset``. These\noperations keep around coordinates:\n\n.. ipython:: python\n\n ds[['temperature']]\n ds[['temperature', 'temperature_double']]\n ds.drop('temperature')\n\nTo remove a dimension, you can use :py:meth:`~xarray.Dataset.drop_dims` method.\nAny variables using that dimension are dropped:\n\n.. ipython:: python\n\n ds.drop_dims('time')\n\nAs an alternate to dictionary-like modifications, you can use\n:py:meth:`~xarray.Dataset.assign` and :py:meth:`~xarray.Dataset.assign_coords`.\nThese methods return a new dataset with additional (or replaced) or values:\n\n.. ipython:: python\n\n ds.assign(temperature2 = 2 * ds.temperature)\n\nThere is also the :py:meth:`~xarray.Dataset.pipe` method that allows you to use\na method call with an external function (e.g., ``ds.pipe(func)``) instead of\nsimply calling it (e.g., ``func(ds)``). This allows you to write pipelines for\ntransforming you data (using \"method chaining\") instead of writing hard to\nfollow nested function calls:\n\n.. ipython:: python\n\n # these lines are equivalent, but with pipe we can make the logic flow\n # entirely from left to right\n plt.plot((2 * ds.temperature.sel(x=0)).mean('y'))\n (ds.temperature\n .sel(x=0)\n .pipe(lambda x: 2 * x)\n .mean('y')\n .pipe(plt.plot))\n\nBoth ``pipe`` and ``assign`` replicate the pandas methods of the same names\n(:py:meth:`DataFrame.pipe ` and\n:py:meth:`DataFrame.assign `).\n\nWith xarray, there is no performance penalty for creating new datasets, even if\nvariables are lazily loaded from a file on disk. Creating new objects instead\nof mutating existing objects often results in easier to understand code, so we\nencourage using this approach.\n\nRenaming variables\n~~~~~~~~~~~~~~~~~~\n\nAnother useful option is the :py:meth:`~xarray.Dataset.rename` method to rename\ndataset variables:\n\n.. ipython:: python\n\n ds.rename({'temperature': 'temp', 'precipitation': 'precip'})\n\nThe related :py:meth:`~xarray.Dataset.swap_dims` method allows you do to swap\ndimension and non-dimension variables:\n\n.. ipython:: python\n\n ds.coords['day'] = ('time', [6, 7, 8])\n ds.swap_dims({'time': 'day'})\n\n.. _coordinates:\n\nCoordinates\n-----------\n\nCoordinates are ancillary variables stored for ``DataArray`` and ``Dataset``\nobjects in the ``coords`` attribute:\n\n.. ipython:: python\n\n ds.coords\n\nUnlike attributes, xarray *does* interpret and persist coordinates in\noperations that transform xarray objects. There are two types of coordinates\nin xarray:\n\n- **dimension coordinates** are one dimensional coordinates with a name equal\n to their sole dimension (marked by ``*`` when printing a dataset or data\n array). They are used for label based indexing and alignment,\n like the ``index`` found on a pandas :py:class:`~pandas.DataFrame` or\n :py:class:`~pandas.Series`. Indeed, these \"dimension\" coordinates use a\n :py:class:`pandas.Index` internally to store their values.\n\n- **non-dimension coordinates** are variables that contain coordinate\n data, but are not a dimension coordinate. They can be multidimensional\n (see :ref:`examples.multidim`), and there is no relationship between the\n name of a non-dimension coordinate and the name(s) of its dimension(s).\n Non-dimension coordinates can be useful for indexing or plotting; otherwise,\n xarray does not make any direct use of the values associated with them.\n They are not used for alignment or automatic indexing, nor are they required\n to match when doing arithmetic\n (see :ref:`coordinates math`).\n\n.. note::\n\n xarray's terminology differs from the `CF terminology`_, where the\n \"dimension coordinates\" are called \"coordinate variables\", and the\n \"non-dimension coordinates\" are called \"auxiliary coordinate variables\"\n (see :issue:`1295` for more details).\n\n.. _CF terminology: http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#terminology\n\n\nModifying coordinates\n~~~~~~~~~~~~~~~~~~~~~\n\nTo entirely add or remove coordinate arrays, you can use dictionary like\nsyntax, as shown above.\n\nTo convert back and forth between data and coordinates, you can use the\n:py:meth:`~xarray.Dataset.set_coords` and\n:py:meth:`~xarray.Dataset.reset_coords` methods:\n\n.. ipython:: python\n\n ds.reset_coords()\n ds.set_coords(['temperature', 'precipitation'])\n ds['temperature'].reset_coords(drop=True)\n\nNotice that these operations skip coordinates with names given by dimensions,\nas used for indexing. This mostly because we are not entirely sure how to\ndesign the interface around the fact that xarray cannot store a coordinate and\nvariable with the name but different values in the same dictionary. But we do\nrecognize that supporting something like this would be useful.\n\nCoordinates methods\n~~~~~~~~~~~~~~~~~~~\n\n``Coordinates`` objects also have a few useful methods, mostly for converting\nthem into dataset objects:\n\n.. ipython:: python\n\n ds.coords.to_dataset()\n\nThe merge method is particularly interesting, because it implements the same\nlogic used for merging coordinates in arithmetic operations\n(see :ref:`comput`):\n\n.. ipython:: python\n\n alt = xr.Dataset(coords={'z': [10], 'lat': 0, 'lon': 0})\n ds.coords.merge(alt.coords)\n\nThe ``coords.merge`` method may be useful if you want to implement your own\nbinary operations that act on xarray objects. In the future, we hope to write\nmore helper functions so that you can easily make your functions act like\nxarray's built-in arithmetic.\n\nIndexes\n~~~~~~~\n\nTo convert a coordinate (or any ``DataArray``) into an actual\n:py:class:`pandas.Index`, use the :py:meth:`~xarray.DataArray.to_index` method:\n\n.. ipython:: python\n\n ds['time'].to_index()\n\nA useful shortcut is the ``indexes`` property (on both ``DataArray`` and\n``Dataset``), which lazily constructs a dictionary whose keys are given by each\ndimension and whose the values are ``Index`` objects:\n\n.. ipython:: python\n\n ds.indexes\n\nMultiIndex coordinates\n~~~~~~~~~~~~~~~~~~~~~~\n\nXarray supports labeling coordinate values with a :py:class:`pandas.MultiIndex`:\n\n.. ipython:: python\n\n midx = pd.MultiIndex.from_arrays([['R', 'R', 'V', 'V'], [.1, .2, .7, .9]],\n names=('band', 'wn'))\n mda = xr.DataArray(np.random.rand(4), coords={'spec': midx}, dims='spec')\n mda\n\nFor convenience multi-index levels are directly accessible as \"virtual\" or\n\"derived\" coordinates (marked by ``-`` when printing a dataset or data array):\n\n.. ipython:: python\n\n mda['band']\n mda.wn\n\nIndexing with multi-index levels is also possible using the ``sel`` method\n(see :ref:`multi-level indexing`).\n\nUnlike other coordinates, \"virtual\" level coordinates are not stored in\nthe ``coords`` attribute of ``DataArray`` and ``Dataset`` objects\n(although they are shown when printing the ``coords`` attribute).\nConsequently, most of the coordinates related methods don't apply for them.\nIt also can't be used to replace one particular level.\n\nBecause in a ``DataArray`` or ``Dataset`` object each multi-index level is\naccessible as a \"virtual\" coordinate, its name must not conflict with the names\nof the other levels, coordinates and data variables of the same object.\nEven though Xarray set default names for multi-indexes with unnamed levels,\nit is recommended that you explicitly set the names of the levels.\n\n.. [1] Latitude and longitude are 2D arrays because the dataset uses\n `projected coordinates`__. ``reference_time`` refers to the reference time\n at which the forecast was made, rather than ``time`` which is the valid time\n for which the forecast applies.\n\n__ http://en.wikipedia.org/wiki/Map_projection\n"},{"fileName":"locks.py","filePath":"xarray/backends","id":19,"nodeType":"File","text":"import multiprocessing\nimport threading\nimport weakref\nfrom typing import Any, MutableMapping\n\ntry:\n from dask.utils import SerializableLock\nexcept ImportError:\n # no need to worry about serializing the lock\n SerializableLock = threading.Lock\n\ntry:\n from dask.distributed import Lock as DistributedLock\nexcept ImportError:\n DistributedLock = None\n\n\n# Locks used by multiple backends.\n# Neither HDF5 nor the netCDF-C library are thread-safe.\nHDF5_LOCK = SerializableLock()\nNETCDFC_LOCK = SerializableLock()\n\n\n_FILE_LOCKS: MutableMapping[Any, threading.Lock] = weakref.WeakValueDictionary()\n\n\ndef _get_threaded_lock(key):\n try:\n lock = _FILE_LOCKS[key]\n except KeyError:\n lock = _FILE_LOCKS[key] = threading.Lock()\n return lock\n\n\ndef _get_multiprocessing_lock(key):\n # TODO: make use of the key -- maybe use locket.py?\n # https://github.com/mwilliamson/locket.py\n del key # unused\n return multiprocessing.Lock()\n\n\n_LOCK_MAKERS = {\n None: _get_threaded_lock,\n \"threaded\": _get_threaded_lock,\n \"multiprocessing\": _get_multiprocessing_lock,\n \"distributed\": DistributedLock,\n}\n\n\ndef _get_lock_maker(scheduler=None):\n \"\"\"Returns an appropriate function for creating resource locks.\n\n Parameters\n ----------\n scheduler : str or None\n Dask scheduler being used.\n\n See Also\n --------\n dask.utils.get_scheduler_lock\n \"\"\"\n return _LOCK_MAKERS[scheduler]\n\n\ndef _get_scheduler(get=None, collection=None):\n \"\"\"Determine the dask scheduler that is being used.\n\n None is returned if no dask scheduler is active.\n\n See also\n --------\n dask.base.get_scheduler\n \"\"\"\n try:\n import dask # noqa: F401\n except ImportError:\n return None\n\n actual_get = dask.base.get_scheduler(get, collection)\n\n try:\n from dask.distributed import Client\n\n if isinstance(actual_get.__self__, Client):\n return \"distributed\"\n except (ImportError, AttributeError):\n pass\n\n if actual_get is dask.multiprocessing.get:\n return \"multiprocessing\"\n else:\n return \"threaded\"\n\n\ndef get_write_lock(key):\n \"\"\"Get a scheduler appropriate lock for writing to the given resource.\n\n Parameters\n ----------\n key : str\n Name of the resource for which to acquire a lock. Typically a filename.\n\n Returns\n -------\n Lock object that can be used like a threading.Lock object.\n \"\"\"\n scheduler = _get_scheduler()\n lock_maker = _get_lock_maker(scheduler)\n return lock_maker(key)\n\n\ndef acquire(lock, blocking=True):\n \"\"\"Acquire a lock, possibly in a non-blocking fashion.\n\n Includes backwards compatibility hacks for old versions of Python, dask\n and dask-distributed.\n \"\"\"\n if blocking:\n # no arguments needed\n return lock.acquire()\n elif DistributedLock is not None and isinstance(lock, DistributedLock):\n # distributed.Lock doesn't support the blocking argument yet:\n # https://github.com/dask/distributed/pull/2412\n return lock.acquire(timeout=0)\n else:\n # \"blocking\" keyword argument not supported for:\n # - threading.Lock on Python 2.\n # - dask.SerializableLock with dask v1.0.0 or earlier.\n # - multiprocessing.Lock calls the argument \"block\" instead.\n return lock.acquire(blocking)\n\n\nclass CombinedLock:\n \"\"\"A combination of multiple locks.\n\n Like a locked door, a CombinedLock is locked if any of its constituent\n locks are locked.\n \"\"\"\n\n def __init__(self, locks):\n self.locks = tuple(set(locks)) # remove duplicates\n\n def acquire(self, blocking=True):\n return all(acquire(lock, blocking=blocking) for lock in self.locks)\n\n def release(self):\n for lock in self.locks:\n lock.release()\n\n def __enter__(self):\n for lock in self.locks:\n lock.__enter__()\n\n def __exit__(self, *args):\n for lock in self.locks:\n lock.__exit__(*args)\n\n def locked(self):\n return any(lock.locked for lock in self.locks)\n\n def __repr__(self):\n return \"CombinedLock(%r)\" % list(self.locks)\n\n\nclass DummyLock:\n \"\"\"DummyLock provides the lock API without any actual locking.\"\"\"\n\n def acquire(self, blocking=True):\n pass\n\n def release(self):\n pass\n\n def __enter__(self):\n pass\n\n def __exit__(self, *args):\n pass\n\n def locked(self):\n return False\n\n\ndef combine_locks(locks):\n \"\"\"Combine a sequence of locks into a single lock.\"\"\"\n all_locks = []\n for lock in locks:\n if isinstance(lock, CombinedLock):\n all_locks.extend(lock.locks)\n elif lock is not None:\n all_locks.append(lock)\n\n num_locks = len(all_locks)\n if num_locks > 1:\n return CombinedLock(all_locks)\n elif num_locks == 1:\n return all_locks[0]\n else:\n return DummyLock()\n\n\ndef ensure_lock(lock):\n \"\"\"Ensure that the given object is a lock.\"\"\"\n if lock is None or lock is False:\n return DummyLock()\n return lock\n"},{"col":0,"comment":"Add command-line flags for pytest.","endLoc":13,"header":"def pytest_addoption(parser)","id":20,"name":"pytest_addoption","nodeType":"Function","startLoc":6,"text":"def pytest_addoption(parser):\n \"\"\"Add command-line flags for pytest.\"\"\"\n parser.addoption(\"--run-flaky\", action=\"store_true\", help=\"runs flaky tests\")\n parser.addoption(\n \"--run-network-tests\",\n action=\"store_true\",\n help=\"runs tests requiring a network connection\",\n )"},{"className":"LRUCache","col":0,"comment":"Thread-safe LRUCache based on an OrderedDict.\n\n All dict operations (__getitem__, __setitem__, __contains__) update the\n priority of the relevant key and take O(1) time. The dict is iterated over\n in order from the oldest to newest key, which means that a complete pass\n over the dict should not affect the order of any entries.\n\n When a new item is set and the maximum size of the cache is exceeded, the\n oldest item is dropped and called with ``on_evict(key, value)``.\n\n The ``maxsize`` property can be used to view or adjust the capacity of\n the cache, e.g., ``cache.maxsize = new_size``.\n ","endLoc":102,"id":21,"nodeType":"Class","startLoc":9,"text":"class LRUCache(MutableMapping[K, V]):\n \"\"\"Thread-safe LRUCache based on an OrderedDict.\n\n All dict operations (__getitem__, __setitem__, __contains__) update the\n priority of the relevant key and take O(1) time. The dict is iterated over\n in order from the oldest to newest key, which means that a complete pass\n over the dict should not affect the order of any entries.\n\n When a new item is set and the maximum size of the cache is exceeded, the\n oldest item is dropped and called with ``on_evict(key, value)``.\n\n The ``maxsize`` property can be used to view or adjust the capacity of\n the cache, e.g., ``cache.maxsize = new_size``.\n \"\"\"\n\n _cache: \"OrderedDict[K, V]\"\n _maxsize: int\n _lock: threading.RLock\n _on_evict: Optional[Callable[[K, V], Any]]\n\n __slots__ = (\"_cache\", \"_lock\", \"_maxsize\", \"_on_evict\")\n\n def __init__(self, maxsize: int, on_evict: Callable[[K, V], Any] = None):\n \"\"\"\n Parameters\n ----------\n maxsize : int\n Integer maximum number of items to hold in the cache.\n on_evict: callable, optional\n Function to call like ``on_evict(key, value)`` when items are\n evicted.\n \"\"\"\n if not isinstance(maxsize, int):\n raise TypeError(\"maxsize must be an integer\")\n if maxsize < 0:\n raise ValueError(\"maxsize must be non-negative\")\n self._maxsize = maxsize\n self._cache = OrderedDict()\n self._lock = threading.RLock()\n self._on_evict = on_evict\n\n def __getitem__(self, key: K) -> V:\n # record recent use of the key by moving it to the front of the list\n with self._lock:\n value = self._cache[key]\n self._cache.move_to_end(key)\n return value\n\n def _enforce_size_limit(self, capacity: int) -> None:\n \"\"\"Shrink the cache if necessary, evicting the oldest items.\n \"\"\"\n while len(self._cache) > capacity:\n key, value = self._cache.popitem(last=False)\n if self._on_evict is not None:\n self._on_evict(key, value)\n\n def __setitem__(self, key: K, value: V) -> None:\n with self._lock:\n if key in self._cache:\n # insert the new value at the end\n del self._cache[key]\n self._cache[key] = value\n elif self._maxsize:\n # make room if necessary\n self._enforce_size_limit(self._maxsize - 1)\n self._cache[key] = value\n elif self._on_evict is not None:\n # not saving, immediately evict\n self._on_evict(key, value)\n\n def __delitem__(self, key: K) -> None:\n del self._cache[key]\n\n def __iter__(self) -> Iterator[K]:\n # create a list, so accessing the cache during iteration cannot change\n # the iteration order\n return iter(list(self._cache))\n\n def __len__(self) -> int:\n return len(self._cache)\n\n @property\n def maxsize(self) -> int:\n \"\"\"Maximum number of items can be held in the cache.\"\"\"\n return self._maxsize\n\n @maxsize.setter\n def maxsize(self, size: int) -> None:\n \"\"\"Resize the cache, evicting the oldest items if necessary.\"\"\"\n if size < 0:\n raise ValueError(\"maxsize must be non-negative\")\n with self._lock:\n self._enforce_size_limit(size)\n self._maxsize = size"},{"className":"CombinedLock","col":0,"comment":"A combination of multiple locks.\n\n Like a locked door, a CombinedLock is locked if any of its constituent\n locks are locked.\n ","endLoc":162,"id":22,"nodeType":"Class","startLoc":133,"text":"class CombinedLock:\n \"\"\"A combination of multiple locks.\n\n Like a locked door, a CombinedLock is locked if any of its constituent\n locks are locked.\n \"\"\"\n\n def __init__(self, locks):\n self.locks = tuple(set(locks)) # remove duplicates\n\n def acquire(self, blocking=True):\n return all(acquire(lock, blocking=blocking) for lock in self.locks)\n\n def release(self):\n for lock in self.locks:\n lock.release()\n\n def __enter__(self):\n for lock in self.locks:\n lock.__enter__()\n\n def __exit__(self, *args):\n for lock in self.locks:\n lock.__exit__(*args)\n\n def locked(self):\n return any(lock.locked for lock in self.locks)\n\n def __repr__(self):\n return \"CombinedLock(%r)\" % list(self.locks)"},{"col":4,"comment":"null","endLoc":141,"header":"def __init__(self, locks)","id":23,"name":"__init__","nodeType":"Function","startLoc":140,"text":"def __init__(self, locks):\n self.locks = tuple(set(locks)) # remove duplicates"},{"className":"Variable","col":0,"comment":"A netcdf-like variable consisting of dimensions, data and attributes\n which describe a single Array. A single Variable object is not fully\n described outside the context of its parent Dataset (if you want such a\n fully described object, use a DataArray instead).\n\n The main functional difference between Variables and numpy arrays is that\n numerical operations on Variables implement array broadcasting by dimension\n name. For example, adding an Variable with dimensions `('time',)` to\n another Variable with dimensions `('space',)` results in a new Variable\n with dimensions `('time', 'space')`. Furthermore, numpy reduce operations\n like ``mean`` or ``sum`` are overwritten to take a \"dimension\" argument\n instead of an \"axis\".\n\n Variables are light-weight objects used as the building block for datasets.\n They are more primitive objects, so operations with them provide marginally\n higher performance than using DataArrays. However, manipulating data in the\n form of a Dataset or DataArray should almost always be preferred, because\n they can use more complete metadata in context of coordinate labels.\n ","endLoc":1936,"id":24,"nodeType":"Class","startLoc":259,"text":"class Variable(\n common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin\n):\n \"\"\"A netcdf-like variable consisting of dimensions, data and attributes\n which describe a single Array. A single Variable object is not fully\n described outside the context of its parent Dataset (if you want such a\n fully described object, use a DataArray instead).\n\n The main functional difference between Variables and numpy arrays is that\n numerical operations on Variables implement array broadcasting by dimension\n name. For example, adding an Variable with dimensions `('time',)` to\n another Variable with dimensions `('space',)` results in a new Variable\n with dimensions `('time', 'space')`. Furthermore, numpy reduce operations\n like ``mean`` or ``sum`` are overwritten to take a \"dimension\" argument\n instead of an \"axis\".\n\n Variables are light-weight objects used as the building block for datasets.\n They are more primitive objects, so operations with them provide marginally\n higher performance than using DataArrays. However, manipulating data in the\n form of a Dataset or DataArray should almost always be preferred, because\n they can use more complete metadata in context of coordinate labels.\n \"\"\"\n\n __slots__ = (\"_dims\", \"_data\", \"_attrs\", \"_encoding\")\n\n def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):\n \"\"\"\n Parameters\n ----------\n dims : str or sequence of str\n Name(s) of the the data dimension(s). Must be either a string (only\n for 1D data) or a sequence of strings with length equal to the\n number of dimensions.\n data : array_like\n Data array which supports numpy-like data access.\n attrs : dict_like or None, optional\n Attributes to assign to the new variable. If None (default), an\n empty attribute dictionary is initialized.\n encoding : dict_like or None, optional\n Dictionary specifying how to encode this array's data into a\n serialized format like netCDF4. Currently used keys (for netCDF)\n include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.\n Well-behaved code to serialize a Variable should ignore\n unrecognized encoding items.\n \"\"\"\n self._data = as_compatible_data(data, fastpath=fastpath)\n self._dims = self._parse_dimensions(dims)\n self._attrs = None\n self._encoding = None\n if attrs is not None:\n self.attrs = attrs\n if encoding is not None:\n self.encoding = encoding\n\n @property\n def dtype(self):\n return self._data.dtype\n\n @property\n def shape(self):\n return self._data.shape\n\n @property\n def nbytes(self):\n return self.size * self.dtype.itemsize\n\n @property\n def _in_memory(self):\n return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or (\n isinstance(self._data, indexing.MemoryCachedArray)\n and isinstance(self._data.array, indexing.NumpyIndexingAdapter)\n )\n\n @property\n def data(self):\n if hasattr(self._data, \"__array_function__\") or isinstance(\n self._data, dask_array_type\n ):\n return self._data\n else:\n return self.values\n\n @data.setter\n def data(self, data):\n data = as_compatible_data(data)\n if data.shape != self.shape:\n raise ValueError(\"replacement data must match the Variable's shape\")\n self._data = data\n\n def load(self, **kwargs):\n \"\"\"Manually trigger loading of this variable's data from disk or a\n remote source into memory and return this variable.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n if isinstance(self._data, dask_array_type):\n self._data = as_compatible_data(self._data.compute(**kwargs))\n elif not hasattr(self._data, \"__array_function__\"):\n self._data = np.asarray(self._data)\n return self\n\n def compute(self, **kwargs):\n \"\"\"Manually trigger loading of this variable's data from disk or a\n remote source into memory and return a new variable. The original is\n left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def __dask_graph__(self):\n if isinstance(self._data, dask_array_type):\n return self._data.__dask_graph__()\n else:\n return None\n\n def __dask_keys__(self):\n return self._data.__dask_keys__()\n\n def __dask_layers__(self):\n return self._data.__dask_layers__()\n\n @property\n def __dask_optimize__(self):\n return self._data.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n return self._data.__dask_scheduler__\n\n def __dask_postcompute__(self):\n array_func, array_args = self._data.__dask_postcompute__()\n return (\n self._dask_finalize,\n (array_func, array_args, self._dims, self._attrs, self._encoding),\n )\n\n def __dask_postpersist__(self):\n array_func, array_args = self._data.__dask_postpersist__()\n return (\n self._dask_finalize,\n (array_func, array_args, self._dims, self._attrs, self._encoding),\n )\n\n @staticmethod\n def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):\n if isinstance(results, dict): # persist case\n name = array_args[0]\n results = {k: v for k, v in results.items() if k[0] == name}\n data = array_func(results, *array_args)\n return Variable(dims, data, attrs=attrs, encoding=encoding)\n\n @property\n def values(self):\n \"\"\"The variable's data as a numpy.ndarray\"\"\"\n return _as_array_or_item(self._data)\n\n @values.setter\n def values(self, values):\n self.data = values\n\n def to_base_variable(self):\n \"\"\"Return this variable as a base xarray.Variable\"\"\"\n return Variable(\n self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True\n )\n\n to_variable = utils.alias(to_base_variable, \"to_variable\")\n\n def to_index_variable(self):\n \"\"\"Return this variable as an xarray.IndexVariable\"\"\"\n return IndexVariable(\n self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True\n )\n\n to_coord = utils.alias(to_index_variable, \"to_coord\")\n\n def to_index(self):\n \"\"\"Convert this variable to a pandas.Index\"\"\"\n return self.to_index_variable().to_index()\n\n def to_dict(self, data=True):\n \"\"\"Dictionary representation of variable.\"\"\"\n item = {\"dims\": self.dims, \"attrs\": decode_numpy_dict_values(self.attrs)}\n if data:\n item[\"data\"] = ensure_us_time_resolution(self.values).tolist()\n else:\n item.update({\"dtype\": str(self.dtype), \"shape\": self.shape})\n return item\n\n @property\n def dims(self):\n \"\"\"Tuple of dimension names with which this variable is associated.\n \"\"\"\n return self._dims\n\n @dims.setter\n def dims(self, value):\n self._dims = self._parse_dimensions(value)\n\n def _parse_dimensions(self, dims):\n if isinstance(dims, str):\n dims = (dims,)\n dims = tuple(dims)\n if len(dims) != self.ndim:\n raise ValueError(\n \"dimensions %s must have the same length as the \"\n \"number of data dimensions, ndim=%s\" % (dims, self.ndim)\n )\n return dims\n\n def _item_key_to_tuple(self, key):\n if utils.is_dict_like(key):\n return tuple(key.get(dim, slice(None)) for dim in self.dims)\n else:\n return key\n\n def _broadcast_indexes(self, key):\n \"\"\"Prepare an indexing key for an indexing operation.\n\n Parameters\n -----------\n key: int, slice, array, dict or tuple of integer, slices and arrays\n Any valid input for indexing.\n\n Returns\n -------\n dims: tuple\n Dimension of the resultant variable.\n indexers: IndexingTuple subclass\n Tuple of integer, array-like, or slices to use when indexing\n self._data. The type of this argument indicates the type of\n indexing to perform, either basic, outer or vectorized.\n new_order : Optional[Sequence[int]]\n Optional reordering to do on the result of indexing. If not None,\n the first len(new_order) indexing should be moved to these\n positions.\n \"\"\"\n key = self._item_key_to_tuple(key) # key is a tuple\n # key is a tuple of full size\n key = indexing.expanded_indexer(key, self.ndim)\n # Convert a scalar Variable to an integer\n key = tuple(\n k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key\n )\n # Convert a 0d-array to an integer\n key = tuple(\n k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key\n )\n\n if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):\n return self._broadcast_indexes_basic(key)\n\n self._validate_indexers(key)\n # Detect it can be mapped as an outer indexer\n # If all key is unlabeled, or\n # key can be mapped as an OuterIndexer.\n if all(not isinstance(k, Variable) for k in key):\n return self._broadcast_indexes_outer(key)\n\n # If all key is 1-dimensional and there are no duplicate labels,\n # key can be mapped as an OuterIndexer.\n dims = []\n for k, d in zip(key, self.dims):\n if isinstance(k, Variable):\n if len(k.dims) > 1:\n return self._broadcast_indexes_vectorized(key)\n dims.append(k.dims[0])\n elif not isinstance(k, integer_types):\n dims.append(d)\n if len(set(dims)) == len(dims):\n return self._broadcast_indexes_outer(key)\n\n return self._broadcast_indexes_vectorized(key)\n\n def _broadcast_indexes_basic(self, key):\n dims = tuple(\n dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)\n )\n return dims, BasicIndexer(key), None\n\n def _validate_indexers(self, key):\n \"\"\" Make sanity checks \"\"\"\n for dim, k in zip(self.dims, key):\n if isinstance(k, BASIC_INDEXING_TYPES):\n pass\n else:\n if not isinstance(k, Variable):\n k = np.asarray(k)\n if k.ndim > 1:\n raise IndexError(\n \"Unlabeled multi-dimensional array cannot be \"\n \"used for indexing: {}\".format(k)\n )\n if k.dtype.kind == \"b\":\n if self.shape[self.get_axis_num(dim)] != len(k):\n raise IndexError(\n \"Boolean array size {:d} is used to index array \"\n \"with shape {:s}.\".format(len(k), str(self.shape))\n )\n if k.ndim > 1:\n raise IndexError(\n \"{}-dimensional boolean indexing is \"\n \"not supported. \".format(k.ndim)\n )\n if getattr(k, \"dims\", (dim,)) != (dim,):\n raise IndexError(\n \"Boolean indexer should be unlabeled or on the \"\n \"same dimension to the indexed array. Indexer is \"\n \"on {:s} but the target dimension is {:s}.\".format(\n str(k.dims), dim\n )\n )\n\n def _broadcast_indexes_outer(self, key):\n dims = tuple(\n k.dims[0] if isinstance(k, Variable) else dim\n for k, dim in zip(key, self.dims)\n if not isinstance(k, integer_types)\n )\n\n new_key = []\n for k in key:\n if isinstance(k, Variable):\n k = k.data\n if not isinstance(k, BASIC_INDEXING_TYPES):\n k = np.asarray(k)\n if k.dtype.kind == \"b\":\n (k,) = np.nonzero(k)\n new_key.append(k)\n\n return dims, OuterIndexer(tuple(new_key)), None\n\n def _nonzero(self):\n \"\"\" Equivalent numpy's nonzero but returns a tuple of Varibles. \"\"\"\n # TODO we should replace dask's native nonzero\n # after https://github.com/dask/dask/issues/1076 is implemented.\n nonzeros = np.nonzero(self.data)\n return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))\n\n def _broadcast_indexes_vectorized(self, key):\n variables = []\n out_dims_set = OrderedSet()\n for dim, value in zip(self.dims, key):\n if isinstance(value, slice):\n out_dims_set.add(dim)\n else:\n variable = (\n value\n if isinstance(value, Variable)\n else as_variable(value, name=dim)\n )\n if variable.dtype.kind == \"b\": # boolean indexing case\n (variable,) = variable._nonzero()\n\n variables.append(variable)\n out_dims_set.update(variable.dims)\n\n variable_dims = set()\n for variable in variables:\n variable_dims.update(variable.dims)\n\n slices = []\n for i, (dim, value) in enumerate(zip(self.dims, key)):\n if isinstance(value, slice):\n if dim in variable_dims:\n # We only convert slice objects to variables if they share\n # a dimension with at least one other variable. Otherwise,\n # we can equivalently leave them as slices aknd transpose\n # the result. This is significantly faster/more efficient\n # for most array backends.\n values = np.arange(*value.indices(self.sizes[dim]))\n variables.insert(i - len(slices), Variable((dim,), values))\n else:\n slices.append((i, value))\n\n try:\n variables = _broadcast_compat_variables(*variables)\n except ValueError:\n raise IndexError(\"Dimensions of indexers mismatch: {}\".format(key))\n\n out_key = [variable.data for variable in variables]\n out_dims = tuple(out_dims_set)\n slice_positions = set()\n for i, value in slices:\n out_key.insert(i, value)\n new_position = out_dims.index(self.dims[i])\n slice_positions.add(new_position)\n\n if slice_positions:\n new_order = [i for i in range(len(out_dims)) if i not in slice_positions]\n else:\n new_order = None\n\n return out_dims, VectorizedIndexer(tuple(out_key)), new_order\n\n def __getitem__(self: VariableType, key) -> VariableType:\n \"\"\"Return a new Variable object whose contents are consistent with\n getting the provided key from the underlying data.\n\n NB. __getitem__ and __setitem__ implement xarray-style indexing,\n where if keys are unlabeled arrays, we index the array orthogonally\n with them. If keys are labeled array (such as Variables), they are\n broadcasted with our usual scheme and then the array is indexed with\n the broadcasted key, like numpy's fancy indexing.\n\n If you really want to do indexing like `x[x > 0]`, manipulate the numpy\n array `x.values` directly.\n \"\"\"\n dims, indexer, new_order = self._broadcast_indexes(key)\n data = as_indexable(self._data)[indexer]\n if new_order:\n data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)\n return self._finalize_indexing_result(dims, data)\n\n def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:\n \"\"\"Used by IndexVariable to return IndexVariable objects when possible.\n \"\"\"\n return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)\n\n def _getitem_with_mask(self, key, fill_value=dtypes.NA):\n \"\"\"Index this Variable with -1 remapped to fill_value.\"\"\"\n # TODO(shoyer): expose this method in public API somewhere (isel?) and\n # use it for reindex.\n # TODO(shoyer): add a sanity check that all other integers are\n # non-negative\n # TODO(shoyer): add an optimization, remapping -1 to an adjacent value\n # that is actually indexed rather than mapping it to the last value\n # along each axis.\n\n if fill_value is dtypes.NA:\n fill_value = dtypes.get_fill_value(self.dtype)\n\n dims, indexer, new_order = self._broadcast_indexes(key)\n\n if self.size:\n if isinstance(self._data, dask_array_type):\n # dask's indexing is faster this way; also vindex does not\n # support negative indices yet:\n # https://github.com/dask/dask/pull/2967\n actual_indexer = indexing.posify_mask_indexer(indexer)\n else:\n actual_indexer = indexer\n\n data = as_indexable(self._data)[actual_indexer]\n mask = indexing.create_mask(indexer, self.shape, data)\n data = duck_array_ops.where(mask, fill_value, data)\n else:\n # array cannot be indexed along dimensions of size 0, so just\n # build the mask directly instead.\n mask = indexing.create_mask(indexer, self.shape)\n data = np.broadcast_to(fill_value, getattr(mask, \"shape\", ()))\n\n if new_order:\n data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)\n return self._finalize_indexing_result(dims, data)\n\n def __setitem__(self, key, value):\n \"\"\"__setitem__ is overloaded to access the underlying numpy values with\n orthogonal indexing.\n\n See __getitem__ for more details.\n \"\"\"\n dims, index_tuple, new_order = self._broadcast_indexes(key)\n\n if not isinstance(value, Variable):\n value = as_compatible_data(value)\n if value.ndim > len(dims):\n raise ValueError(\n \"shape mismatch: value array of shape %s could not be \"\n \"broadcast to indexing result with %s dimensions\"\n % (value.shape, len(dims))\n )\n if value.ndim == 0:\n value = Variable((), value)\n else:\n value = Variable(dims[-value.ndim :], value)\n # broadcast to become assignable\n value = value.set_dims(dims).data\n\n if new_order:\n value = duck_array_ops.asarray(value)\n value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]\n value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))\n\n indexable = as_indexable(self._data)\n indexable[index_tuple] = value\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of local attributes on this variable.\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n self._attrs = dict(value)\n\n @property\n def encoding(self):\n \"\"\"Dictionary of encodings on this variable.\n \"\"\"\n if self._encoding is None:\n self._encoding = {}\n return self._encoding\n\n @encoding.setter\n def encoding(self, value):\n try:\n self._encoding = dict(value)\n except ValueError:\n raise ValueError(\"encoding must be castable to a dictionary\")\n\n def copy(self, deep=True, data=None):\n \"\"\"Returns a copy of this object.\n\n If `deep=True`, the data array is loaded into memory and copied onto\n the new object. Dimensions, attributes and encodings are always copied.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether the data array is loaded into memory and copied onto\n the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n When `data` is used, `deep` is ignored.\n\n Returns\n -------\n object : Variable\n New object with dimensions, attributes, encodings, and optionally\n data copied from original.\n\n Examples\n --------\n\n Shallow copy versus deep copy\n\n >>> var = xr.Variable(data=[1, 2, 3], dims='x')\n >>> var.copy()\n \n array([1, 2, 3])\n >>> var_0 = var.copy(deep=False)\n >>> var_0[0] = 7\n >>> var_0\n \n array([7, 2, 3])\n >>> var\n \n array([7, 2, 3])\n\n Changing the data using the ``data`` argument maintains the\n structure of the original object, but with the new data. Original\n object is unaffected.\n\n >>> var.copy(data=[0.1, 0.2, 0.3])\n \n array([ 0.1, 0.2, 0.3])\n >>> var\n \n array([7, 2, 3])\n\n See Also\n --------\n pandas.DataFrame.copy\n \"\"\"\n if data is None:\n data = self._data\n\n if isinstance(data, indexing.MemoryCachedArray):\n # don't share caching between copies\n data = indexing.MemoryCachedArray(data.array)\n\n if deep:\n if hasattr(data, \"__array_function__\") or isinstance(\n data, dask_array_type\n ):\n data = data.copy()\n elif not isinstance(data, PandasIndexAdapter):\n # pandas.Index is immutable\n data = np.array(data)\n else:\n data = as_compatible_data(data)\n if self.shape != data.shape:\n raise ValueError(\n \"Data shape {} must match shape of object {}\".format(\n data.shape, self.shape\n )\n )\n\n # note:\n # dims is already an immutable tuple\n # attributes and encoding will be copied when the new Array is created\n return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)\n\n def __copy__(self):\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None):\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore\n\n @property\n def chunks(self):\n \"\"\"Block dimensions for this array's data or None if it's not a dask\n array.\n \"\"\"\n return getattr(self._data, \"chunks\", None)\n\n _array_counter = itertools.count()\n\n def chunk(self, chunks=None, name=None, lock=False):\n \"\"\"Coerce this array's data into a dask arrays with the given chunks.\n\n If this variable is a non-dask array, it will be converted to dask\n array. If it's a dask array, it will be rechunked to the given chunk\n sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``.\n name : str, optional\n Used to generate the name for this array in the internal dask\n graph. Does not need not be unique.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Variable\n \"\"\"\n import dask\n import dask.array as da\n\n if utils.is_dict_like(chunks):\n chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}\n\n if chunks is None:\n chunks = self.chunks or self.shape\n\n data = self._data\n if isinstance(data, da.Array):\n data = data.rechunk(chunks)\n else:\n if isinstance(data, indexing.ExplicitlyIndexed):\n # Unambiguously handle array storage backends (like NetCDF4 and h5py)\n # that can't handle general array indexing. For example, in netCDF4 you\n # can do \"outer\" indexing along two dimensions independent, which works\n # differently from how NumPy handles it.\n # da.from_array works by using lazy indexing with a tuple of slices.\n # Using OuterIndexer is a pragmatic choice: dask does not yet handle\n # different indexing types in an explicit way:\n # https://github.com/dask/dask/issues/2883\n data = indexing.ImplicitToExplicitIndexingAdapter(\n data, indexing.OuterIndexer\n )\n if LooseVersion(dask.__version__) < \"2.0.0\":\n kwargs = {}\n else:\n # All of our lazily loaded backend array classes should use NumPy\n # array operations.\n kwargs = {\"meta\": np.ndarray}\n else:\n kwargs = {}\n\n if utils.is_dict_like(chunks):\n chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))\n\n data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)\n\n return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)\n\n def isel(\n self: VariableType,\n indexers: Mapping[Hashable, Any] = None,\n **indexers_kwargs: Any\n ) -> VariableType:\n \"\"\"Return a new array indexed along the specified dimension(s).\n\n Parameters\n ----------\n **indexers : {dim: indexer, ...}\n Keyword arguments with names matching dimensions and values given\n by integers, slice objects or arrays.\n\n Returns\n -------\n obj : Array object\n A new Array with the selected data and dimensions. In general,\n the new variable's data will be a view of this variable's data,\n unless numpy fancy indexing was triggered by using an array\n indexer, in which case the data will be a copy.\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n\n invalid = indexers.keys() - set(self.dims)\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)\n return self[key]\n\n def squeeze(self, dim=None):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or str or tuple of str, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = common.get_squeeze_dims(self, dim)\n return self.isel({d: 0 for d in dims})\n\n def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):\n axis = self.get_axis_num(dim)\n\n if count > 0:\n keep = slice(None, -count)\n elif count < 0:\n keep = slice(-count, None)\n else:\n keep = slice(None)\n\n trimmed_data = self[(slice(None),) * axis + (keep,)].data\n\n if fill_value is dtypes.NA:\n dtype, fill_value = dtypes.maybe_promote(self.dtype)\n else:\n dtype = self.dtype\n\n shape = list(self.shape)\n shape[axis] = min(abs(count), shape[axis])\n\n if isinstance(trimmed_data, dask_array_type):\n chunks = list(trimmed_data.chunks)\n chunks[axis] = (shape[axis],)\n full = functools.partial(da.full, chunks=chunks)\n else:\n full = np.full\n\n filler = full(shape, fill_value, dtype=dtype)\n\n if count > 0:\n arrays = [filler, trimmed_data]\n else:\n arrays = [trimmed_data, filler]\n\n data = duck_array_ops.concatenate(arrays, axis)\n\n if isinstance(data, dask_array_type):\n # chunked data should come out with the same chunks; this makes\n # it feasible to combine shifted and unshifted data\n # TODO: remove this once dask.array automatically aligns chunks\n data = data.rechunk(self.data.chunks)\n\n return type(self)(self.dims, data, self._attrs, fastpath=True)\n\n def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):\n \"\"\"\n Return a new Variable with shifted data.\n\n Parameters\n ----------\n shifts : mapping of the form {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Variable\n Variable with the same dimensions and attributes but shifted data.\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"shift\")\n result = self\n for dim, count in shifts.items():\n result = result._shift_one_dim(dim, count, fill_value=fill_value)\n return result\n\n def pad_with_fill_value(\n self, pad_widths=None, fill_value=dtypes.NA, **pad_widths_kwargs\n ):\n \"\"\"\n Return a new Variable with paddings.\n\n Parameters\n ----------\n pad_width: Mapping of the form {dim: (before, after)}\n Number of values padded to the edges of each dimension.\n **pad_widths_kwargs:\n Keyword argument for pad_widths\n \"\"\"\n pad_widths = either_dict_or_kwargs(pad_widths, pad_widths_kwargs, \"pad\")\n\n if fill_value is dtypes.NA:\n dtype, fill_value = dtypes.maybe_promote(self.dtype)\n else:\n dtype = self.dtype\n\n if isinstance(self.data, dask_array_type):\n array = self.data\n\n # Dask does not yet support pad. We manually implement it.\n # https://github.com/dask/dask/issues/1926\n for d, pad in pad_widths.items():\n axis = self.get_axis_num(d)\n before_shape = list(array.shape)\n before_shape[axis] = pad[0]\n before_chunks = list(array.chunks)\n before_chunks[axis] = (pad[0],)\n after_shape = list(array.shape)\n after_shape[axis] = pad[1]\n after_chunks = list(array.chunks)\n after_chunks[axis] = (pad[1],)\n\n arrays = []\n if pad[0] > 0:\n arrays.append(\n da.full(\n before_shape, fill_value, dtype=dtype, chunks=before_chunks\n )\n )\n arrays.append(array)\n if pad[1] > 0:\n arrays.append(\n da.full(\n after_shape, fill_value, dtype=dtype, chunks=after_chunks\n )\n )\n if len(arrays) > 1:\n array = da.concatenate(arrays, axis=axis)\n else:\n pads = [(0, 0) if d not in pad_widths else pad_widths[d] for d in self.dims]\n array = np.pad(\n self.data.astype(dtype, copy=False),\n pads,\n mode=\"constant\",\n constant_values=fill_value,\n )\n return type(self)(self.dims, array)\n\n def _roll_one_dim(self, dim, count):\n axis = self.get_axis_num(dim)\n\n count %= self.shape[axis]\n if count != 0:\n indices = [slice(-count, None), slice(None, -count)]\n else:\n indices = [slice(None)]\n\n arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]\n\n data = duck_array_ops.concatenate(arrays, axis)\n\n if isinstance(data, dask_array_type):\n # chunked data should come out with the same chunks; this makes\n # it feasible to combine shifted and unshifted data\n # TODO: remove this once dask.array automatically aligns chunks\n data = data.rechunk(self.data.chunks)\n\n return type(self)(self.dims, data, self._attrs, fastpath=True)\n\n def roll(self, shifts=None, **shifts_kwargs):\n \"\"\"\n Return a new Variable with rolld data.\n\n Parameters\n ----------\n shifts : mapping of the form {dim: offset}\n Integer offset to roll along each of the given dimensions.\n Positive offsets roll to the right; negative offsets roll to the\n left.\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Variable\n Variable with the same dimensions and attributes but rolled data.\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"roll\")\n\n result = self\n for dim, count in shifts.items():\n result = result._roll_one_dim(dim, count)\n return result\n\n def transpose(self, *dims) -> \"Variable\":\n \"\"\"Return a new Variable object with transposed dimensions.\n\n Parameters\n ----------\n *dims : str, optional\n By default, reverse the dimensions. Otherwise, reorder the\n dimensions to this order.\n\n Returns\n -------\n transposed : Variable\n The returned object has transposed data and dimensions with the\n same attributes as the original.\n\n Notes\n -----\n This operation returns a view of this variable's data. It is\n lazy for dask-backed Variables but not for numpy-backed Variables.\n\n See Also\n --------\n numpy.transpose\n \"\"\"\n if len(dims) == 0:\n dims = self.dims[::-1]\n axes = self.get_axis_num(dims)\n if len(dims) < 2: # no need to transpose if only one dimension\n return self.copy(deep=False)\n\n data = as_indexable(self._data).transpose(axes)\n return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)\n\n @property\n def T(self) -> \"Variable\":\n return self.transpose()\n\n def set_dims(self, dims, shape=None):\n \"\"\"Return a new variable with given set of dimensions.\n This method might be used to attach new dimension(s) to variable.\n\n When possible, this operation does not copy this variable's data.\n\n Parameters\n ----------\n dims : str or sequence of str or dict\n Dimensions to include on the new variable. If a dict, values are\n used to provide the sizes of new dimensions; otherwise, new\n dimensions are inserted with length 1.\n\n Returns\n -------\n Variable\n \"\"\"\n if isinstance(dims, str):\n dims = [dims]\n\n if shape is None and utils.is_dict_like(dims):\n shape = dims.values()\n\n missing_dims = set(self.dims) - set(dims)\n if missing_dims:\n raise ValueError(\n \"new dimensions %r must be a superset of \"\n \"existing dimensions %r\" % (dims, self.dims)\n )\n\n self_dims = set(self.dims)\n expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims\n\n if self.dims == expanded_dims:\n # don't use broadcast_to unless necessary so the result remains\n # writeable if possible\n expanded_data = self.data\n elif shape is not None:\n dims_map = dict(zip(dims, shape))\n tmp_shape = tuple(dims_map[d] for d in expanded_dims)\n expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)\n else:\n expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]\n\n expanded_var = Variable(\n expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True\n )\n return expanded_var.transpose(*dims)\n\n def _stack_once(self, dims, new_dim):\n if not set(dims) <= set(self.dims):\n raise ValueError(\"invalid existing dimensions: %s\" % dims)\n\n if new_dim in self.dims:\n raise ValueError(\n \"cannot create a new dimension with the same \"\n \"name as an existing dimension\"\n )\n\n if len(dims) == 0:\n # don't stack\n return self.copy(deep=False)\n\n other_dims = [d for d in self.dims if d not in dims]\n dim_order = other_dims + list(dims)\n reordered = self.transpose(*dim_order)\n\n new_shape = reordered.shape[: len(other_dims)] + (-1,)\n new_data = reordered.data.reshape(new_shape)\n new_dims = reordered.dims[: len(other_dims)] + (new_dim,)\n\n return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)\n\n def stack(self, dimensions=None, **dimensions_kwargs):\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the order of the data\n along each new dimension will be in contiguous (C) order.\n\n Parameters\n ----------\n dimensions : Mapping of form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Variable\n Variable with the same attributes but stacked data.\n\n See also\n --------\n Variable.unstack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"stack\")\n result = self\n for new_dim, dims in dimensions.items():\n result = result._stack_once(dims, new_dim)\n return result\n\n def _unstack_once(self, dims, old_dim):\n new_dim_names = tuple(dims.keys())\n new_dim_sizes = tuple(dims.values())\n\n if old_dim not in self.dims:\n raise ValueError(\"invalid existing dimension: %s\" % old_dim)\n\n if set(new_dim_names).intersection(self.dims):\n raise ValueError(\n \"cannot create a new dimension with the same \"\n \"name as an existing dimension\"\n )\n\n if np.prod(new_dim_sizes) != self.sizes[old_dim]:\n raise ValueError(\n \"the product of the new dimension sizes must \"\n \"equal the size of the old dimension\"\n )\n\n other_dims = [d for d in self.dims if d != old_dim]\n dim_order = other_dims + [old_dim]\n reordered = self.transpose(*dim_order)\n\n new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes\n new_data = reordered.data.reshape(new_shape)\n new_dims = reordered.dims[: len(other_dims)] + new_dim_names\n\n return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)\n\n def unstack(self, dimensions=None, **dimensions_kwargs):\n \"\"\"\n Unstack an existing dimension into multiple new dimensions.\n\n New dimensions will be added at the end, and the order of the data\n along each new dimension will be in contiguous (C) order.\n\n Parameters\n ----------\n dimensions : mapping of the form old_dim={dim1: size1, ...}\n Names of existing dimensions, and the new dimensions and sizes\n that they map to.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n unstacked : Variable\n Variable with the same attributes but unstacked data.\n\n See also\n --------\n Variable.stack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"unstack\")\n result = self\n for old_dim, dims in dimensions.items():\n result = result._unstack_once(dims, old_dim)\n return result\n\n def fillna(self, value):\n return ops.fillna(self, value)\n\n def where(self, cond, other=dtypes.NA):\n return ops.where_method(self, cond, other)\n\n def reduce(\n self,\n func,\n dim=None,\n axis=None,\n keep_attrs=None,\n keepdims=False,\n allow_lazy=False,\n **kwargs\n ):\n \"\"\"Reduce this array by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dim'\n and 'axis' arguments can be supplied. If neither are supplied, then\n the reduction is calculated over the flattened array (by calling\n `func(x)` without an axis argument).\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is common.ALL_DIMS:\n dim = None\n if dim is not None and axis is not None:\n raise ValueError(\"cannot supply both 'axis' and 'dim' arguments\")\n\n if dim is not None:\n axis = self.get_axis_num(dim)\n input_data = self.data if allow_lazy else self.values\n if axis is not None:\n data = func(input_data, axis=axis, **kwargs)\n else:\n data = func(input_data, **kwargs)\n\n if getattr(data, \"shape\", ()) == self.shape:\n dims = self.dims\n else:\n removed_axes = (\n range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim\n )\n if keepdims:\n # Insert np.newaxis for removed dims\n slices = tuple(\n np.newaxis if i in removed_axes else slice(None, None)\n for i in range(self.ndim)\n )\n if getattr(data, \"shape\", None) is None:\n # Reduce has produced a scalar value, not an array-like\n data = np.asanyarray(data)[slices]\n else:\n data = data[slices]\n dims = self.dims\n else:\n dims = [\n adim for n, adim in enumerate(self.dims) if n not in removed_axes\n ]\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self._attrs if keep_attrs else None\n\n return Variable(dims, data, attrs=attrs)\n\n @classmethod\n def concat(cls, variables, dim=\"concat_dim\", positions=None, shortcut=False):\n \"\"\"Concatenate variables along a new or existing dimension.\n\n Parameters\n ----------\n variables : iterable of Array\n Arrays to stack together. Each variable is expected to have\n matching dimensions and shape except for along the stacked\n dimension.\n dim : str or DataArray, optional\n Name of the dimension to stack along. This can either be a new\n dimension name, in which case it is added along axis=0, or an\n existing dimension name, in which case the location of the\n dimension is unchanged. Where to insert the new dimension is\n determined by the first variable.\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to\n which to assign each dataset along the concatenated dimension.\n If not supplied, objects are concatenated in the provided order.\n shortcut : bool, optional\n This option is used internally to speed-up groupby operations.\n If `shortcut` is True, some checks of internal consistency between\n arrays to concatenate are skipped.\n\n Returns\n -------\n stacked : Variable\n Concatenated Variable formed by stacking all the supplied variables\n along the given dimension.\n \"\"\"\n if not isinstance(dim, str):\n dim, = dim.dims\n\n # can't do this lazily: we need to loop through variables at least\n # twice\n variables = list(variables)\n first_var = variables[0]\n\n arrays = [v.data for v in variables]\n\n if dim in first_var.dims:\n axis = first_var.get_axis_num(dim)\n dims = first_var.dims\n data = duck_array_ops.concatenate(arrays, axis=axis)\n if positions is not None:\n # TODO: deprecate this option -- we don't need it for groupby\n # any more.\n indices = nputils.inverse_permutation(np.concatenate(positions))\n data = duck_array_ops.take(data, indices, axis=axis)\n else:\n axis = 0\n dims = (dim,) + first_var.dims\n data = duck_array_ops.stack(arrays, axis=axis)\n\n attrs = dict(first_var.attrs)\n encoding = dict(first_var.encoding)\n if not shortcut:\n for var in variables:\n if var.dims != first_var.dims:\n raise ValueError(\"inconsistent dimensions\")\n utils.remove_incompatible_items(attrs, var.attrs)\n\n return cls(dims, data, attrs, encoding)\n\n def equals(self, other, equiv=duck_array_ops.array_equiv):\n \"\"\"True if two Variables have the same dimensions and values;\n otherwise False.\n\n Variables can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for Variables\n does element-wise comparisons (like numpy.ndarrays).\n \"\"\"\n other = getattr(other, \"variable\", other)\n try:\n return self.dims == other.dims and (\n self._data is other._data or equiv(self.data, other.data)\n )\n except (TypeError, AttributeError):\n return False\n\n def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):\n \"\"\"True if two Variables have the values after being broadcast against\n each other; otherwise False.\n\n Variables can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n \"\"\"\n try:\n self, other = broadcast_variables(self, other)\n except (ValueError, AttributeError):\n return False\n return self.equals(other, equiv=equiv)\n\n def identical(self, other):\n \"\"\"Like equals, but also checks attributes.\n \"\"\"\n try:\n return utils.dict_equiv(self.attrs, other.attrs) and self.equals(other)\n except (TypeError, AttributeError):\n return False\n\n def no_conflicts(self, other):\n \"\"\"True if the intersection of two Variable's non-null data is\n equal; otherwise false.\n\n Variables can thus still be equal if there are locations where either,\n or both, contain NaN values.\n \"\"\"\n return self.broadcast_equals(other, equiv=duck_array_ops.array_notnull_equiv)\n\n def quantile(self, q, dim=None, interpolation=\"linear\", keep_attrs=None):\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements.\n\n Parameters\n ----------\n q : float in range of [0,1] (or sequence of floats)\n Quantile to compute, which must be between 0 and 1\n inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n quantiles : Variable\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,\n DataArray.quantile\n \"\"\"\n if isinstance(self.data, dask_array_type):\n raise TypeError(\n \"quantile does not work for arrays stored as dask \"\n \"arrays. Load the data via .compute() or .load() \"\n \"prior to calling this method.\"\n )\n\n q = np.asarray(q, dtype=np.float64)\n\n new_dims = list(self.dims)\n if dim is not None:\n axis = self.get_axis_num(dim)\n if utils.is_scalar(dim):\n new_dims.remove(dim)\n else:\n for d in dim:\n new_dims.remove(d)\n else:\n axis = None\n new_dims = []\n\n # Only add the quantile dimension if q is array-like\n if q.ndim != 0:\n new_dims = [\"quantile\"] + new_dims\n\n qs = np.nanpercentile(\n self.data, q * 100.0, axis=axis, interpolation=interpolation\n )\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self._attrs if keep_attrs else None\n\n return Variable(new_dims, qs, attrs)\n\n def rank(self, dim, pct=False):\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within that\n set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n\n Returns\n -------\n ranked : Variable\n\n See Also\n --------\n Dataset.rank, DataArray.rank\n \"\"\"\n import bottleneck as bn\n\n data = self.data\n\n if isinstance(data, dask_array_type):\n raise TypeError(\n \"rank does not work for arrays stored as dask \"\n \"arrays. Load the data via .compute() or .load() \"\n \"prior to calling this method.\"\n )\n elif not isinstance(data, np.ndarray):\n raise TypeError(\n \"rank is not implemented for {} objects.\".format(type(data))\n )\n\n axis = self.get_axis_num(dim)\n func = bn.nanrankdata if self.dtype.kind == \"f\" else bn.rankdata\n ranked = func(data, axis=axis)\n if pct:\n count = np.sum(~np.isnan(data), axis=axis, keepdims=True)\n ranked /= count\n return Variable(self.dims, ranked)\n\n def rolling_window(\n self, dim, window, window_dim, center=False, fill_value=dtypes.NA\n ):\n \"\"\"\n Make a rolling_window along dim and add a new_dim to the last place.\n\n Parameters\n ----------\n dim: str\n Dimension over which to compute rolling_window\n window: int\n Window size of the rolling\n window_dim: str\n New name of the window dimension.\n center: boolean. default False.\n If True, pad fill_value for both ends. Otherwise, pad in the head\n of the axis.\n fill_value:\n value to be filled.\n\n Returns\n -------\n Variable that is a view of the original array with a added dimension of\n size w.\n The return dim: self.dims + (window_dim, )\n The return shape: self.shape + (window, )\n\n Examples\n --------\n >>> v=Variable(('a', 'b'), np.arange(8).reshape((2,4)))\n >>> v.rolling_window(x, 'b', 3, 'window_dim')\n \n array([[[nan, nan, 0], [nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[nan, nan, 4], [nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n\n >>> v.rolling_window(x, 'b', 3, 'window_dim', center=True)\n \n array([[[nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, nan]],\n [[nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, nan]]])\n \"\"\"\n if fill_value is dtypes.NA: # np.nan is passed\n dtype, fill_value = dtypes.maybe_promote(self.dtype)\n array = self.astype(dtype, copy=False).data\n else:\n dtype = self.dtype\n array = self.data\n\n new_dims = self.dims + (window_dim,)\n return Variable(\n new_dims,\n duck_array_ops.rolling_window(\n array,\n axis=self.get_axis_num(dim),\n window=window,\n center=center,\n fill_value=fill_value,\n ),\n )\n\n def coarsen(self, windows, func, boundary=\"exact\", side=\"left\"):\n \"\"\"\n Apply\n \"\"\"\n windows = {k: v for k, v in windows.items() if k in self.dims}\n if not windows:\n return self.copy()\n\n reshaped, axes = self._coarsen_reshape(windows, boundary, side)\n if isinstance(func, str):\n name = func\n func = getattr(duck_array_ops, name, None)\n if func is None:\n raise NameError(\"{} is not a valid method.\".format(name))\n return type(self)(self.dims, func(reshaped, axis=axes), self._attrs)\n\n def _coarsen_reshape(self, windows, boundary, side):\n \"\"\"\n Construct a reshaped-array for corsen\n \"\"\"\n if not utils.is_dict_like(boundary):\n boundary = {d: boundary for d in windows.keys()}\n\n if not utils.is_dict_like(side):\n side = {d: side for d in windows.keys()}\n\n # remove unrelated dimensions\n boundary = {k: v for k, v in boundary.items() if k in windows}\n side = {k: v for k, v in side.items() if k in windows}\n\n for d, window in windows.items():\n if window <= 0:\n raise ValueError(\"window must be > 0. Given {}\".format(window))\n\n variable = self\n for d, window in windows.items():\n # trim or pad the object\n size = variable.shape[self._get_axis_num(d)]\n n = int(size / window)\n if boundary[d] == \"exact\":\n if n * window != size:\n raise ValueError(\n \"Could not coarsen a dimension of size {} with \"\n \"window {}\".format(size, window)\n )\n elif boundary[d] == \"trim\":\n if side[d] == \"left\":\n variable = variable.isel({d: slice(0, window * n)})\n else:\n excess = size - window * n\n variable = variable.isel({d: slice(excess, None)})\n elif boundary[d] == \"pad\": # pad\n pad = window * n - size\n if pad < 0:\n pad += window\n if side[d] == \"left\":\n pad_widths = {d: (0, pad)}\n else:\n pad_widths = {d: (pad, 0)}\n variable = variable.pad_with_fill_value(pad_widths)\n else:\n raise TypeError(\n \"{} is invalid for boundary. Valid option is 'exact', \"\n \"'trim' and 'pad'\".format(boundary[d])\n )\n\n shape = []\n axes = []\n axis_count = 0\n for i, d in enumerate(variable.dims):\n if d in windows:\n size = variable.shape[i]\n shape.append(int(size / windows[d]))\n shape.append(windows[d])\n axis_count += 1\n axes.append(i + axis_count)\n else:\n shape.append(variable.shape[i])\n\n return variable.data.reshape(shape), tuple(axes)\n\n @property\n def real(self):\n return type(self)(self.dims, self.data.real, self._attrs)\n\n @property\n def imag(self):\n return type(self)(self.dims, self.data.imag, self._attrs)\n\n def __array_wrap__(self, obj, context=None):\n return Variable(self.dims, obj)\n\n @staticmethod\n def _unary_op(f):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n with np.errstate(all=\"ignore\"):\n return self.__array_wrap__(f(self.data, *args, **kwargs))\n\n return func\n\n @staticmethod\n def _binary_op(f, reflexive=False, **ignored_kwargs):\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, (xr.DataArray, xr.Dataset)):\n return NotImplemented\n self_data, other_data, dims = _broadcast_compat_data(self, other)\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self._attrs if keep_attrs else None\n with np.errstate(all=\"ignore\"):\n new_data = (\n f(self_data, other_data)\n if not reflexive\n else f(other_data, self_data)\n )\n result = Variable(dims, new_data, attrs=attrs)\n return result\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, xr.Dataset):\n raise TypeError(\"cannot add a Dataset to a Variable in-place\")\n self_data, other_data, dims = _broadcast_compat_data(self, other)\n if dims != self.dims:\n raise ValueError(\"dimensions cannot change for in-place \" \"operations\")\n with np.errstate(all=\"ignore\"):\n self.values = f(self_data, other_data)\n return self\n\n return func\n\n def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):\n \"\"\" A (private) method to convert datetime array to numeric dtype\n See duck_array_ops.datetime_to_numeric\n \"\"\"\n numeric_array = duck_array_ops.datetime_to_numeric(\n self.data, offset, datetime_unit, dtype\n )\n return type(self)(self.dims, numeric_array, self._attrs)"},{"col":0,"comment":"Align objects for merging, recursing into dictionary values.\n\n This function is not public API.\n ","endLoc":420,"header":"def deep_align(\n objects,\n join=\"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n raise_on_invalid=True,\n fill_value=dtypes.NA,\n)","id":25,"name":"deep_align","nodeType":"Function","startLoc":340,"text":"def deep_align(\n objects,\n join=\"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n raise_on_invalid=True,\n fill_value=dtypes.NA,\n):\n \"\"\"Align objects for merging, recursing into dictionary values.\n\n This function is not public API.\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if indexes is None:\n indexes = {}\n\n def is_alignable(obj):\n return isinstance(obj, (DataArray, Dataset))\n\n positions = []\n keys = []\n out = []\n targets = []\n no_key = object()\n not_replaced = object()\n for position, variables in enumerate(objects):\n if is_alignable(variables):\n positions.append(position)\n keys.append(no_key)\n targets.append(variables)\n out.append(not_replaced)\n elif is_dict_like(variables):\n current_out = {}\n for k, v in variables.items():\n if is_alignable(v) and k not in indexes:\n # Skip variables in indexes for alignment, because these\n # should to be overwritten instead:\n # https://github.com/pydata/xarray/issues/725\n # https://github.com/pydata/xarray/issues/3377\n # TODO(shoyer): doing this here feels super-hacky -- can we\n # move it explicitly into merge instead?\n positions.append(position)\n keys.append(k)\n targets.append(v)\n current_out[k] = not_replaced\n else:\n current_out[k] = v\n out.append(current_out)\n elif raise_on_invalid:\n raise ValueError(\n \"object to align is neither an xarray.Dataset, \"\n \"an xarray.DataArray nor a dictionary: {!r}\".format(variables)\n )\n else:\n out.append(variables)\n\n aligned = align(\n *targets,\n join=join,\n copy=copy,\n indexes=indexes,\n exclude=exclude,\n fill_value=fill_value\n )\n\n for position, key, aligned_obj in zip(positions, keys, aligned):\n if key is no_key:\n out[position] = aligned_obj\n else:\n out[position][key] = aligned_obj\n\n # something went wrong: we should have replaced all sentinel values\n for arg in out:\n assert arg is not not_replaced\n if is_dict_like(arg):\n assert all(value is not not_replaced for value in arg.values())\n\n return out"},{"col":0,"comment":"Create a function that dispatches to dask for dask array inputs.","endLoc":54,"header":"def _dask_or_eager_func(\n name,\n eager_module=np,\n dask_module=dask_array,\n list_of_args=False,\n array_args=slice(1),\n requires_dask=None,\n)","id":26,"name":"_dask_or_eager_func","nodeType":"Function","startLoc":24,"text":"def _dask_or_eager_func(\n name,\n eager_module=np,\n dask_module=dask_array,\n list_of_args=False,\n array_args=slice(1),\n requires_dask=None,\n):\n \"\"\"Create a function that dispatches to dask for dask array inputs.\"\"\"\n if dask_module is not None:\n\n def f(*args, **kwargs):\n if list_of_args:\n dispatch_args = args[0]\n else:\n dispatch_args = args[array_args]\n if any(isinstance(a, dask_array.Array) for a in dispatch_args):\n try:\n wrapped = getattr(dask_module, name)\n except AttributeError as e:\n raise AttributeError(\"%s: requires dask >=%s\" % (e, requires_dask))\n else:\n wrapped = getattr(eager_module, name)\n return wrapped(*args, **kwargs)\n\n else:\n\n def f(*args, **kwargs):\n return getattr(eager_module, name)(*args, **kwargs)\n\n return f"},{"col":4,"comment":"\n Parameters\n ----------\n maxsize : int\n Integer maximum number of items to hold in the cache.\n on_evict: callable, optional\n Function to call like ``on_evict(key, value)`` when items are\n evicted.\n ","endLoc":48,"header":"def __init__(self, maxsize: int, on_evict: Callable[[K, V], Any] = None)","id":28,"name":"__init__","nodeType":"Function","startLoc":31,"text":"def __init__(self, maxsize: int, on_evict: Callable[[K, V], Any] = None):\n \"\"\"\n Parameters\n ----------\n maxsize : int\n Integer maximum number of items to hold in the cache.\n on_evict: callable, optional\n Function to call like ``on_evict(key, value)`` when items are\n evicted.\n \"\"\"\n if not isinstance(maxsize, int):\n raise TypeError(\"maxsize must be an integer\")\n if maxsize < 0:\n raise ValueError(\"maxsize must be non-negative\")\n self._maxsize = maxsize\n self._cache = OrderedDict()\n self._lock = threading.RLock()\n self._on_evict = on_evict"},{"col":0,"comment":"Acquire a lock, possibly in a non-blocking fashion.\n\n Includes backwards compatibility hacks for old versions of Python, dask\n and dask-distributed.\n ","endLoc":130,"header":"def acquire(lock, blocking=True)","id":30,"name":"acquire","nodeType":"Function","startLoc":112,"text":"def acquire(lock, blocking=True):\n \"\"\"Acquire a lock, possibly in a non-blocking fashion.\n\n Includes backwards compatibility hacks for old versions of Python, dask\n and dask-distributed.\n \"\"\"\n if blocking:\n # no arguments needed\n return lock.acquire()\n elif DistributedLock is not None and isinstance(lock, DistributedLock):\n # distributed.Lock doesn't support the blocking argument yet:\n # https://github.com/dask/distributed/pull/2412\n return lock.acquire(timeout=0)\n else:\n # \"blocking\" keyword argument not supported for:\n # - threading.Lock on Python 2.\n # - dask.SerializableLock with dask v1.0.0 or earlier.\n # - multiprocessing.Lock calls the argument \"block\" instead.\n return lock.acquire(blocking)"},{"col":0,"comment":"Count the number of non-NA in this array along the given axis or axes\n ","endLoc":217,"header":"def count(data, axis=None)","id":32,"name":"count","nodeType":"Function","startLoc":214,"text":"def count(data, axis=None):\n \"\"\"Count the number of non-NA in this array along the given axis or axes\n \"\"\"\n return np.sum(np.logical_not(isnull(data)), axis=axis)"},{"col":0,"comment":"null","endLoc":112,"header":"def isnull(data)","id":33,"name":"isnull","nodeType":"Function","startLoc":89,"text":"def isnull(data):\n data = asarray(data)\n scalar_type = data.dtype.type\n if issubclass(scalar_type, (np.datetime64, np.timedelta64)):\n # datetime types use NaT for null\n # note: must check timedelta64 before integers, because currently\n # timedelta64 inherits from np.integer\n return isnat(data)\n elif issubclass(scalar_type, np.inexact):\n # float types use NaN for null\n return isnan(data)\n elif issubclass(scalar_type, (np.bool_, np.integer, np.character, np.void)):\n # these types cannot represent missing values\n return zeros_like(data, dtype=bool)\n else:\n # at this point, array should have dtype=object\n if isinstance(data, (np.ndarray, dask_array_type)):\n return pandas_isnull(data)\n else:\n # Not reachable yet, but intended for use with other duck array\n # types. For full consistency with pandas, we should accept None as\n # a null value as well as NaN, but it isn't clear how to do this\n # with duck typing.\n return data != data"},{"col":0,"comment":"null","endLoc":163,"header":"def asarray(data)","id":34,"name":"asarray","nodeType":"Function","startLoc":158,"text":"def asarray(data):\n return (\n data\n if (isinstance(data, dask_array_type) or hasattr(data, \"__array_function__\"))\n else np.asarray(data)\n )"},{"className":"AbstractArray","col":0,"comment":"Shared base class for DataArray and Variable.\n ","endLoc":182,"id":35,"nodeType":"Class","startLoc":113,"text":"class AbstractArray(ImplementsArrayReduce):\n \"\"\"Shared base class for DataArray and Variable.\n \"\"\"\n\n __slots__ = ()\n\n def __bool__(self: Any) -> bool:\n return bool(self.values)\n\n def __float__(self: Any) -> float:\n return float(self.values)\n\n def __int__(self: Any) -> int:\n return int(self.values)\n\n def __complex__(self: Any) -> complex:\n return complex(self.values)\n\n def __array__(self: Any, dtype: DTypeLike = None) -> np.ndarray:\n return np.asarray(self.values, dtype=dtype)\n\n def __repr__(self) -> str:\n return formatting.array_repr(self)\n\n def _iter(self: Any) -> Iterator[Any]:\n for n in range(len(self)):\n yield self[n]\n\n def __iter__(self: Any) -> Iterator[Any]:\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n return self._iter()\n\n def get_axis_num(\n self, dim: Union[Hashable, Iterable[Hashable]]\n ) -> Union[int, Tuple[int, ...]]:\n \"\"\"Return axis number(s) corresponding to dimension(s) in this array.\n\n Parameters\n ----------\n dim : str or iterable of str\n Dimension name(s) for which to lookup axes.\n\n Returns\n -------\n int or tuple of int\n Axis number or numbers corresponding to the given dimensions.\n \"\"\"\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n return tuple(self._get_axis_num(d) for d in dim)\n else:\n return self._get_axis_num(dim)\n\n def _get_axis_num(self: Any, dim: Hashable) -> int:\n try:\n return self.dims.index(dim)\n except ValueError:\n raise ValueError(\"%r not found in array dimensions %r\" % (dim, self.dims))\n\n @property\n def sizes(self: Any) -> Mapping[Hashable, int]:\n \"\"\"Ordered mapping from dimension names to lengths.\n\n Immutable.\n\n See also\n --------\n Dataset.sizes\n \"\"\"\n return Frozen(dict(zip(self.dims, self.shape)))"},{"col":0,"comment":"null","endLoc":236,"header":"def is_dict_like(value: Any) -> bool","id":36,"name":"is_dict_like","nodeType":"Function","startLoc":235,"text":"def is_dict_like(value: Any) -> bool:\n return hasattr(value, \"keys\") and hasattr(value, \"__getitem__\")"},{"col":0,"comment":"null","endLoc":232,"header":"def fillna(data, other)","id":37,"name":"fillna","nodeType":"Function","startLoc":231,"text":"def fillna(data, other):\n return where(isnull(data), other, data)"},{"col":0,"comment":"Three argument where() with better dtype promotion rules.","endLoc":222,"header":"def where(condition, x, y)","id":38,"name":"where","nodeType":"Function","startLoc":220,"text":"def where(condition, x, y):\n \"\"\"Three argument where() with better dtype promotion rules.\"\"\"\n return _where(condition, *as_shared_dtype([x, y]))"},{"col":0,"comment":"Cast a arrays to a shared dtype using xarray's type promotion rules.","endLoc":174,"header":"def as_shared_dtype(scalars_or_arrays)","id":39,"name":"as_shared_dtype","nodeType":"Function","startLoc":166,"text":"def as_shared_dtype(scalars_or_arrays):\n \"\"\"Cast a arrays to a shared dtype using xarray's type promotion rules.\"\"\"\n arrays = [asarray(x) for x in scalars_or_arrays]\n # Pass arrays directly instead of dtypes to result_type so scalars\n # get handled properly.\n # Note that result_type() safely gets the dtype from dask arrays without\n # evaluating them.\n out_type = dtypes.result_type(*arrays)\n return [x.astype(out_type, copy=False) for x in arrays]"},{"className":"ImplementsArrayReduce","col":0,"comment":"null","endLoc":73,"id":40,"nodeType":"Class","startLoc":37,"text":"class ImplementsArrayReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs):\n return self.reduce(\n func, dim, axis, skipna=skipna, allow_lazy=True, **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore\n return self.reduce(func, dim, axis, allow_lazy=True, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `{name}` is calculated over axes.\"\"\"\n )\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\"\"\"\n )"},{"id":43,"name":"roadmap.rst","nodeType":"TextFile","path":"doc","text":".. _roadmap:\n\nDevelopment roadmap\n===================\n\nAuthors: Stephan Hoyer, Joe Hamman and xarray developers\n\nDate: July 24, 2018\n\nXarray is an open source Python library for labeled multidimensional\narrays and datasets.\n\nOur philosophy\n--------------\n\nWhy has xarray been successful? In our opinion:\n\n- Xarray does a great job of solving **specific use-cases** for\n multidimensional data analysis:\n\n - The dominant use-case for xarray is for analysis of gridded\n dataset in the geosciences, e.g., as part of the\n `Pangeo `__ project.\n - Xarray is also used more broadly in the physical sciences, where\n we've found the needs for analyzing multidimensional datasets are\n remarkably consistent (e.g., see\n `SunPy `__ and\n `PlasmaPy `__).\n - Finally, xarray is used in a variety of other domains, including\n finance, `probabilistic\n programming `__ and\n genomics.\n\n- Xarray is also a **domain agnostic** solution:\n\n - We focus on providing a flexible set of functionality related\n labeled multidimensional arrays, rather than solving particular\n problems.\n - This facilitates collaboration between users with different needs,\n and helps us attract a broad community of contributers.\n - Importantly, this retains flexibility, for use cases that don't\n fit particularly well into existing frameworks.\n\n- Xarray **integrates well** with other libraries in the scientific\n Python stack.\n\n - We leverage first-class external libraries for core features of\n xarray (e.g., NumPy for ndarrays, pandas for indexing, dask for\n parallel computing)\n - We expose our internal abstractions to users (e.g.,\n ``apply_ufunc()``), which facilitates extending xarray in various\n ways.\n\nTogether, these features have made xarray a first-class choice for\nlabeled multidimensional arrays in Python.\n\nWe want to double-down on xarray's strengths by making it an even more\nflexible and powerful tool for multidimensional data analysis. We want\nto continue to engage xarray's core geoscience users, and to also reach\nout to new domains to learn from other successful data models like those\nof `yt `__ or the `OLAP\ncube `__.\n\nSpecific needs\n--------------\n\nThe user community has voiced a number specific needs related to how\nxarray interfaces with domain specific problems. Xarray may not solve\nall of these issues directly, but these areas provide opportunities for\nxarray to provide better, more extensible, interfaces. Some examples of\nthese common needs are:\n\n- Non-regular grids (e.g., staggered and unstructured meshes).\n- Physical units.\n- Lazily computed arrays (e.g., for coordinate systems).\n- New file-formats.\n\nTechnical vision\n----------------\n\nWe think the right approach to extending xarray's user community and the\nusefulness of the project is to focus on improving key interfaces that\ncan be used externally to meet domain-specific needs.\n\nWe can generalize the community's needs into three main catagories:\n\n- More flexible grids/indexing.\n- More flexible arrays/computing.\n- More flexible storage backends.\n\nEach of these are detailed further in the subsections below.\n\nFlexible indexes\n~~~~~~~~~~~~~~~~\n\nXarray currently keeps track of indexes associated with coordinates by\nstoring them in the form of a ``pandas.Index`` in special\n``xarray.IndexVariable`` objects.\n\nThe limitations of this model became clear with the addition of\n``pandas.MultiIndex`` support in xarray 0.9, where a single index\ncorresponds to multiple xarray variables. MultiIndex support is highly\nuseful, but xarray now has numerous special cases to check for\nMultiIndex levels.\n\nA cleaner model would be to elevate ``indexes`` to an explicit part of\nxarray's data model, e.g., as attributes on the ``Dataset`` and\n``DataArray`` classes. Indexes would need to be propagated along with\ncoordinates in xarray operations, but will no longer would need to have\na one-to-one correspondance with coordinate variables. Instead, an index\nshould be able to refer to multiple (possibly multidimensional)\ncoordinates that define it. See `GH\n1603 `__ for full details\n\nSpecific tasks:\n\n- Add an ``indexes`` attribute to ``xarray.Dataset`` and\n ``xarray.Dataset``, as dictionaries that map from coordinate names to\n xarray index objects.\n- Use the new index interface to write wrappers for ``pandas.Index``,\n ``pandas.MultiIndex`` and ``scipy.spatial.KDTree``.\n- Expose the interface externally to allow third-party libraries to\n implement custom indexing routines, e.g., for geospatial look-ups on\n the surface of the Earth.\n\nIn addition to the new features it directly enables, this clean up will\nallow xarray to more easily implement some long-awaited features that\nbuild upon indexing, such as groupby operations with multiple variables.\n\nFlexible arrays\n~~~~~~~~~~~~~~~\n\nXarray currently supports wrapping multidimensional arrays defined by\nNumPy, dask and to a limited-extent pandas. It would be nice to have\ninterfaces that allow xarray to wrap alternative N-D array\nimplementations, e.g.:\n\n- Arrays holding physical units.\n- Lazily computed arrays.\n- Other ndarray objects, e.g., sparse, xnd, xtensor.\n\nOur strategy has been to pursue upstream improvements in NumPy (see\n`NEP-22 `__)\nfor supporting a complete duck-typing interface using with NumPy's\nhigher level array API. Improvements in NumPy's support for custom data\ntypes would also be highly useful for xarray users.\n\nBy pursuing these improvements in NumPy we hope to extend the benefits\nto the full scientific Python community, and avoid tight coupling\nbetween xarray and specific third-party libraries (e.g., for\nimplementing untis). This will allow xarray to maintain its domain\nagnostic strengths.\n\nWe expect that we may eventually add some minimal interfaces in xarray\nfor features that we delegate to external array libraries (e.g., for\ngetting units and changing units). If we do add these features, we\nexpect them to be thin wrappers, with core functionality implemented by\nthird-party libraries.\n\nFlexible storage\n~~~~~~~~~~~~~~~~\n\nThe xarray backends module has grown in size and complexity. Much of\nthis growth has been \"organic\" and mostly to support incremental\nadditions to the supported backends. This has left us with a fragile\ninternal API that is difficult for even experienced xarray developers to\nuse. Moreover, the lack of a public facing API for building xarray\nbackends means that users can not easily build backend interface for\nxarray in third-party libraries.\n\nThe idea of refactoring the backends API and exposing it to users was\noriginally proposed in `GH\n1970 `__. The idea would\nbe to develop a well tested and generic backend base class and\nassociated utilities for external use. Specific tasks for this\ndevelopment would include:\n\n- Exposing an abstract backend for writing new storage systems.\n- Exposing utilities for features like automatic closing of files,\n LRU-caching and explicit/lazy indexing.\n- Possibly moving some infrequently used backends to third-party\n packages.\n\nEngaging more users\n-------------------\n\nLike many open-source projects, the documentation of xarray has grown\ntogether with the library's features. While we think that the xarray\ndocumentation is comprehensive already, we acknowledge that the adoption\nof xarray might be slowed down because of the substantial time\ninvestment required to learn its working principles. In particular,\nnon-computer scientists or users less familiar with the pydata ecosystem\nmight find it difficult to learn xarray and realize how xarray can help\nthem in their daily work.\n\nIn order to lower this adoption barrier, we propose to:\n\n- Develop entry-level tutorials for users with different backgrounds. For\n example, we would like to develop tutorials for users with or without\n previous knowledge of pandas, numpy, netCDF, etc. These tutorials may be\n built as part of xarray's documentation or included in a separate repository\n to enable interactive use (e.g. mybinder.org).\n- Document typical user workflows in a dedicated website, following the example\n of `dask-stories\n `__.\n- Write a basic glossary that defines terms that might not be familiar to all\n (e.g. \"lazy\", \"labeled\", \"serialization\", \"indexing\", \"backend\").\n\nAdministrative\n--------------\n\nCurrent core developers\n~~~~~~~~~~~~~~~~~~~~~~~\n\n- Stephan Hoyer\n- Ryan Abernathey\n- Joe Hamman\n- Benoit Bovy\n- Fabien Maussion\n- Keisuke Fujii\n- Maximilian Roos\n\nNumFOCUS\n~~~~~~~~\n\nOn July 16, 2018, Joe and Stephan submitted xarray's fiscal sponsorship\napplication to NumFOCUS.\n"},{"id":44,"name":"whats-new.rst","nodeType":"TextFile","path":"doc","text":".. currentmodule:: xarray\n\nWhat's New\n==========\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xray\n import xarray\n import xarray as xr\n np.random.seed(123456)\n\n.. _whats-new.0.14.0:\n\nv0.14.0 (unreleased)\n--------------------\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n- This release introduces a rolling policy for minimum dependency versions:\n :ref:`mindeps_policy`.\n\n Several minimum versions have been increased:\n\n ============ ================== ====\n Package Old New\n ============ ================== ====\n Python 3.5.3 3.6\n numpy 1.12 1.14\n pandas 0.19.2 0.24\n dask 0.16 (tested: 2.4) 1.2\n bottleneck 1.1 (tested: 1.2) 1.2\n matplotlib 1.5 (tested: 3.1) 3.1\n ============ ================== ====\n\n Obsolete patch versions (x.y.Z) are not tested anymore.\n The oldest supported versions of all optional dependencies are now covered by\n automated tests (before, only the very latest versions were tested).\n\n (:issue:`3222`, :issue:`3293`, :issue:`3340`, :issue:`3346`, :issue:`3358`).\n By `Guido Imperiale `_.\n- Dropped the 'drop=False' optional parameter from :meth:`Variable.isel`.\n It was unused and doesn't make sense for a Variable.\n (:pull:`3375`) by `Guido Imperiale `_.\n\n- Remove internal usage of `collections.OrderedDict`. After dropping support for\n Python <=3.5, most uses of `OrderedDict` in Xarray were no longer necessary. We\n have removed the internal use of the `OrderedDict` in favor of Python's builtin\n `dict` object which is now ordered itself. This change will be most obvious when\n interacting with the `attrs` property on the Dataset and DataArray objects.\n\n (:issue:`3380`, :issue:`3389`). By `Joe Hamman `_.\n\nNew functions/methods\n~~~~~~~~~~~~~~~~~~~~~\n\n- Added :py:func:`~xarray.map_blocks`, modeled after :py:func:`dask.array.map_blocks`.\n Also added :py:meth:`Dataset.unify_chunks`, :py:meth:`DataArray.unify_chunks` and\n :py:meth:`testing.assert_chunks_equal`. By `Deepak Cherian `_\n and `Guido Imperiale `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- :py:class:`~xarray.core.GroupBy` enhancements. By `Deepak Cherian `_.\n\n - Added a repr. Example::\n\n >>> da.groupby(\"time.season\")\n DataArrayGroupBy, grouped over 'season'\n 4 groups with labels 'DJF', 'JJA', 'MAM', 'SON'\n\n - Added a ``GroupBy.dims`` property that mirrors the dimensions\n of each group.(:issue:`3344`)\n \n- Speed up :meth:`Dataset.isel` up to 33% and :meth:`DataArray.isel` up to 25% for small\n arrays (:issue:`2799`, :pull:`3375`) by\n `Guido Imperiale `_.\n\nBug fixes\n~~~~~~~~~\n- Reintroduce support for :mod:`weakref` (broken in v0.13.0). Support has been\n reinstated for :class:`DataArray` and :class:`Dataset` objects only. Internal xarray\n objects remain unaddressable by weakref in order to save memory\n (:issue:`3317`). By `Guido Imperiale `_.\n- Line plots with the ``x`` or ``y`` argument set to a 1D non-dimensional coord\n now plot the correct data for 2D DataArrays\n (:issue:`3334`). By `Tom Nicholas `_.\n- The default behaviour of reducing across all dimensions for\n :py:class:`~xarray.core.groupby.DataArrayGroupBy` objects has now been properly removed\n as was done for :py:class:`~xarray.core.groupby.DatasetGroupBy` in 0.13.0 (:issue:`3337`).\n Use `xarray.ALL_DIMS` if you need to replicate previous behaviour.\n Also raise nicer error message when no groups are created (:issue:`1764`).\n By `Deepak Cherian `_.\n- Fix error in concatenating unlabeled dimensions (:pull:`3362`).\n By `Deepak Cherian `_.\n\nDocumentation\n~~~~~~~~~~~~~\n\n- Created a glossary of important xarray terms (:issue:`2410`, :pull:`3352`).\n By `Gregory Gundersen `_.\n- Created a \"How do I...\" section (:ref:`howdoi`) for solutions to common questions. (:pull:`3357`).\n By `Deepak Cherian `_.\n- Add examples for :py:meth:`Dataset.swap_dims` and :py:meth:`DataArray.swap_dims`.\n By `Justus Magin `_.\n- Add examples for :py:meth:`align`, :py:meth:`merge`, :py:meth:`combine_by_coords`, \n :py:meth:`full_like`, :py:meth:`zeros_like`, :py:meth:`ones_like`, :py:meth:`Dataset.pipe`, \n :py:meth:`Dataset.assign`, :py:meth:`Dataset.reindex`, :py:meth:`Dataset.fillna`.\n By `Anderson Banihirwe `_.\n- Fixed documentation to clean up an unwanted file created in ``ipython`` example\n (:pull:`3353`).\n By `Gregory Gundersen `_.\n\n\n.. _whats-new.0.13.0:\n\nv0.13.0 (17 Sep 2019)\n---------------------\n\nThis release includes many exciting changes: wrapping of\n`NEP18 `_ compliant\nnumpy-like arrays; new :py:meth:`~Dataset.plot.scatter` plotting method that can scatter\ntwo ``DataArrays`` in a ``Dataset`` against each other; support for converting pandas\nDataFrames to xarray objects that wrap ``pydata/sparse``; and more!\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- This release increases the minimum required Python version from 3.5.0 to 3.5.3\n (:issue:`3089`). By `Guido Imperiale `_.\n- The ``isel_points`` and ``sel_points`` methods are removed, having been deprecated\n since v0.10.0. These are redundant with the ``isel`` / ``sel`` methods.\n See :ref:`vectorized_indexing` for the details\n By `Maximilian Roos `_\n- The ``inplace`` kwarg for public methods now raises an error, having been deprecated\n since v0.11.0.\n By `Maximilian Roos `_\n- :py:func:`~xarray.concat` now requires the ``dim`` argument. Its ``indexers``, ``mode``\n and ``concat_over`` kwargs have now been removed.\n By `Deepak Cherian `_\n- Passing a list of colors in ``cmap`` will now raise an error, having been deprecated since\n v0.6.1.\n- Most xarray objects now define ``__slots__``. This reduces overall RAM usage by ~22%\n (not counting the underlying numpy buffers); on CPython 3.7/x64, a trivial DataArray\n has gone down from 1.9kB to 1.5kB.\n\n Caveats:\n\n - Pickle streams produced by older versions of xarray can't be loaded using this\n release, and vice versa.\n - Any user code that was accessing the ``__dict__`` attribute of\n xarray objects will break. The best practice to attach custom metadata to xarray\n objects is to use the ``attrs`` dictionary.\n - Any user code that defines custom subclasses of xarray classes must now explicitly\n define ``__slots__`` itself. Subclasses that don't add any attributes must state so\n by defining ``__slots__ = ()`` right after the class header.\n Omitting ``__slots__`` will now cause a ``FutureWarning`` to be logged, and will raise an\n error in a later release.\n\n (:issue:`3250`) by `Guido Imperiale `_.\n- The default dimension for :py:meth:`Dataset.groupby`, :py:meth:`Dataset.resample`,\n :py:meth:`DataArray.groupby` and :py:meth:`DataArray.resample` reductions is now the\n grouping or resampling dimension.\n- :py:meth:`DataArray.to_dataset` requires ``name`` to be passed as a kwarg (previously ambiguous\n positional arguments were deprecated)\n- Reindexing with variables of a different dimension now raise an error (previously deprecated)\n- :py:func:`~xarray.broadcast_array` is removed (previously deprecated in favor of\n :py:func:`~xarray.broadcast`)\n- :py:meth:`Variable.expand_dims` is removed (previously deprecated in favor of\n :py:meth:`Variable.set_dims`)\n\nNew functions/methods\n~~~~~~~~~~~~~~~~~~~~~\n\n- xarray can now wrap around any\n `NEP18 `_ compliant\n numpy-like library (important: read notes about NUMPY_EXPERIMENTAL_ARRAY_FUNCTION in\n the above link). Added explicit test coverage for\n `sparse `_. (:issue:`3117`, :issue:`3202`).\n This requires `sparse>=0.8.0`. By `Nezar Abdennur `_\n and `Guido Imperiale `_.\n\n- :py:meth:`~Dataset.from_dataframe` and :py:meth:`~DataArray.from_series` now\n support ``sparse=True`` for converting pandas objects into xarray objects\n wrapping sparse arrays. This is particularly useful with sparsely populated\n hierarchical indexes. (:issue:`3206`)\n By `Stephan Hoyer `_.\n\n- The xarray package is now discoverable by mypy (although typing hints coverage is not\n complete yet). mypy type checking is now enforced by CI. Libraries that depend on\n xarray and use mypy can now remove from their setup.cfg the lines::\n\n [mypy-xarray]\n ignore_missing_imports = True\n\n (:issue:`2877`, :issue:`3088`, :issue:`3090`, :issue:`3112`, :issue:`3117`,\n :issue:`3207`)\n By `Guido Imperiale `_\n and `Maximilian Roos `_.\n\n- Added :py:meth:`DataArray.broadcast_like` and :py:meth:`Dataset.broadcast_like`.\n By `Deepak Cherian `_ and `David Mertz\n `_.\n\n- Dataset plotting API for visualizing dependencies between two DataArrays!\n Currently only :py:meth:`Dataset.plot.scatter` is implemented.\n By `Yohai Bar Sinai `_ and `Deepak Cherian `_\n\n- Added :py:meth:`DataArray.head`, :py:meth:`DataArray.tail` and :py:meth:`DataArray.thin`;\n as well as :py:meth:`Dataset.head`, :py:meth:`Dataset.tail` and :py:meth:`Dataset.thin` methods.\n (:issue:`319`) By `Gerardo Rivera `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Multiple enhancements to :py:func:`~xarray.concat` and :py:func:`~xarray.open_mfdataset`.\n By `Deepak Cherian `_\n\n - Added ``compat='override'``. When merging, this option picks the variable from the first dataset\n and skips all comparisons.\n\n - Added ``join='override'``. When aligning, this only checks that index sizes are equal among objects\n and skips checking indexes for equality.\n\n - :py:func:`~xarray.concat` and :py:func:`~xarray.open_mfdataset` now support the ``join`` kwarg.\n It is passed down to :py:func:`~xarray.align`.\n\n - :py:func:`~xarray.concat` now calls :py:func:`~xarray.merge` on variables that are not concatenated\n (i.e. variables without ``concat_dim`` when ``data_vars`` or ``coords`` are ``\"minimal\"``).\n :py:func:`~xarray.concat` passes its new ``compat`` kwarg down to :py:func:`~xarray.merge`.\n (:issue:`2064`)\n\n Users can avoid a common bottleneck when using :py:func:`~xarray.open_mfdataset` on a large number of\n files with variables that are known to be aligned and some of which need not be concatenated.\n Slow equality comparisons can now be avoided, for e.g.::\n\n data = xr.open_mfdataset(files, concat_dim='time', data_vars='minimal',\n coords='minimal', compat='override', join='override')\n\n- In :py:meth:`~xarray.Dataset.to_zarr`, passing ``mode`` is not mandatory if\n ``append_dim`` is set, as it will automatically be set to ``'a'`` internally.\n By `David Brochart `_.\n\n- Added the ability to initialize an empty or full DataArray\n with a single value. (:issue:`277`)\n By `Gerardo Rivera `_.\n\n- :py:func:`~xarray.Dataset.to_netcdf()` now supports the ``invalid_netcdf`` kwarg when used\n with ``engine=\"h5netcdf\"``. It is passed to :py:func:`h5netcdf.File`.\n By `Ulrich Herter `_.\n\n- :py:meth:`~xarray.Dataset.drop` now supports keyword arguments; dropping index\n labels by using both ``dim`` and ``labels`` or using a\n :py:class:`~xarray.core.coordinates.DataArrayCoordinates` object are\n deprecated (:issue:`2910`).\n By `Gregory Gundersen `_.\n\n- Added examples of :py:meth:`Dataset.set_index` and\n :py:meth:`DataArray.set_index`, as well are more specific error messages\n when the user passes invalid arguments (:issue:`3176`).\n By `Gregory Gundersen `_.\n\n- :py:func:`filter_by_attrs` now filters the coordinates as well as the variables.\n By `Spencer Jones `_.\n\nBug fixes\n~~~~~~~~~\n\n- Improve \"missing dimensions\" error message for :py:func:`~xarray.apply_ufunc`\n (:issue:`2078`).\n By `Rick Russotto `_.\n- :py:meth:`~xarray.DataArray.assign_coords` now supports dictionary arguments\n (:issue:`3231`).\n By `Gregory Gundersen `_.\n- Fix regression introduced in v0.12.2 where ``copy(deep=True)`` would convert\n unicode indices to dtype=object (:issue:`3094`).\n By `Guido Imperiale `_.\n- Improved error handling and documentation for `.expand_dims()`\n read-only view.\n- Fix tests for big-endian systems (:issue:`3125`).\n By `Graham Inggs `_.\n- XFAIL several tests which are expected to fail on ARM systems\n due to a ``datetime`` issue in NumPy (:issue:`2334`).\n By `Graham Inggs `_.\n- Fix KeyError that arises when using .sel method with float values\n different from coords float type (:issue:`3137`).\n By `Hasan Ahmad `_.\n- Fixed bug in ``combine_by_coords()`` causing a `ValueError` if the input had\n an unused dimension with coordinates which were not monotonic (:issue:`3150`).\n By `Tom Nicholas `_.\n- Fixed crash when applying ``distributed.Client.compute()`` to a DataArray\n (:issue:`3171`). By `Guido Imperiale `_.\n- Better error message when using groupby on an empty DataArray (:issue:`3037`).\n By `Hasan Ahmad `_.\n- Fix error that arises when using open_mfdataset on a series of netcdf files\n having differing values for a variable attribute of type list. (:issue:`3034`)\n By `Hasan Ahmad `_.\n- Prevent :py:meth:`~xarray.DataArray.argmax` and :py:meth:`~xarray.DataArray.argmin` from calling\n dask compute (:issue:`3237`). By `Ulrich Herter `_.\n- Plots in 2 dimensions (pcolormesh, contour) now allow to specify levels as numpy\n array (:issue:`3284`). By `Mathias Hauser `_.\n- Fixed bug in :meth:`DataArray.quantile` failing to keep attributes when\n `keep_attrs` was True (:issue:`3304`). By David Huard ``_.\n\nDocumentation\n~~~~~~~~~~~~~\n\n- Created a `PR checklist `_ as a quick reference for tasks before creating a new PR\n or pushing new commits.\n By `Gregory Gundersen `_.\n\n- Fixed documentation to clean up unwanted files created in ``ipython`` examples\n (:issue:`3227`).\n By `Gregory Gundersen `_.\n\n.. _whats-new.0.12.3:\n\nv0.12.3 (10 July 2019)\n----------------------\n\nNew functions/methods\n~~~~~~~~~~~~~~~~~~~~~\n\n- New methods :py:meth:`Dataset.to_stacked_array` and\n :py:meth:`DataArray.to_unstacked_dataset` for reshaping Datasets of variables\n with different dimensions\n (:issue:`1317`).\n This is useful for feeding data from xarray into machine learning models,\n as described in :ref:`reshape.stacking_different`.\n By `Noah Brenowitz `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Support for renaming ``Dataset`` variables and dimensions independently\n with :py:meth:`~Dataset.rename_vars` and :py:meth:`~Dataset.rename_dims`\n (:issue:`3026`).\n By `Julia Kent `_.\n\n- Add ``scales``, ``offsets``, ``units`` and ``descriptions``\n attributes to :py:class:`~xarray.DataArray` returned by\n :py:func:`~xarray.open_rasterio`. (:issue:`3013`)\n By `Erle Carrara `_.\n\nBug fixes\n~~~~~~~~~\n\n- Resolved deprecation warnings from newer versions of matplotlib and dask.\n- Compatibility fixes for the upcoming pandas 0.25 and NumPy 1.17 releases.\n By `Stephan Hoyer `_.\n- Fix summaries for multiindex coordinates (:issue:`3079`).\n By `Jonas Hörsch `_.\n- Fix HDF5 error that could arise when reading multiple groups from a file at\n once (:issue:`2954`).\n By `Stephan Hoyer `_.\n\n.. _whats-new.0.12.2:\n\nv0.12.2 (29 June 2019)\n----------------------\n\nNew functions/methods\n~~~~~~~~~~~~~~~~~~~~~\n\n- Two new functions, :py:func:`~xarray.combine_nested` and\n :py:func:`~xarray.combine_by_coords`, allow for combining datasets along any\n number of dimensions, instead of the one-dimensional list of datasets\n supported by :py:func:`~xarray.concat`.\n\n The new ``combine_nested`` will accept the datasets as a nested\n list-of-lists, and combine by applying a series of concat and merge\n operations. The new ``combine_by_coords`` instead uses the dimension\n coordinates of datasets to order them.\n\n :py:func:`~xarray.open_mfdataset` can use either ``combine_nested`` or\n ``combine_by_coords`` to combine datasets along multiple dimensions, by\n specifying the argument ``combine='nested'`` or ``combine='by_coords'``.\n\n The older function :py:func:`~xarray.auto_combine` has been deprecated,\n because its functionality has been subsumed by the new functions.\n To avoid FutureWarnings switch to using ``combine_nested`` or\n ``combine_by_coords``, (or set the ``combine`` argument in\n ``open_mfdataset``). (:issue:`2159`)\n By `Tom Nicholas `_.\n\n- :py:meth:`~xarray.DataArray.rolling_exp` and\n :py:meth:`~xarray.Dataset.rolling_exp` added, similar to pandas'\n ``pd.DataFrame.ewm`` method. Calling ``.mean`` on the resulting object\n will return an exponentially weighted moving average.\n By `Maximilian Roos `_.\n\n- New :py:func:`DataArray.str ` for string\n related manipulations, based on ``pandas.Series.str``.\n By `0x0L `_.\n\n- Added ``strftime`` method to ``.dt`` accessor, making it simpler to hand a\n datetime ``DataArray`` to other code expecting formatted dates and times.\n (:issue:`2090`). :py:meth:`~xarray.CFTimeIndex.strftime` is also now\n available on :py:class:`CFTimeIndex`.\n By `Alan Brammer `_ and\n `Ryan May `_.\n\n- :py:meth:`~xarray.core.GroupBy.quantile` is now a method of ``GroupBy``\n objects (:issue:`3018`).\n By `David Huard `_.\n\n- Argument and return types are added to most methods on ``DataArray`` and\n ``Dataset``, allowing static type checking both within xarray and external\n libraries. Type checking with `mypy `_ is enabled in\n CI (though not required yet).\n By `Guido Imperiale `_\n and `Maximilian Roos `_.\n\nEnhancements to existing functionality\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Add ``keepdims`` argument for reduce operations (:issue:`2170`)\n By `Scott Wales `_.\n- Enable ``@`` operator for DataArray. This is equivalent to :py:meth:`DataArray.dot`\n By `Maximilian Roos `_.\n- Add ``fill_value`` argument for reindex, align, and merge operations\n to enable custom fill values. (:issue:`2876`)\n By `Zach Griffith `_.\n- :py:meth:`DataArray.transpose` now accepts a keyword argument\n ``transpose_coords`` which enables transposition of coordinates in the\n same way as :py:meth:`Dataset.transpose`. :py:meth:`DataArray.groupby`\n :py:meth:`DataArray.groupby_bins`, and :py:meth:`DataArray.resample` now\n accept a keyword argument ``restore_coord_dims`` which keeps the order\n of the dimensions of multi-dimensional coordinates intact (:issue:`1856`).\n By `Peter Hausamann `_.\n- Clean up Python 2 compatibility in code (:issue:`2950`)\n By `Guido Imperiale `_.\n- Better warning message when supplying invalid objects to ``xr.merge``\n (:issue:`2948`). By `Mathias Hauser `_.\n- Add ``errors`` keyword argument to :py:meth:`Dataset.drop` and :py:meth:`Dataset.drop_dims`\n that allows ignoring errors if a passed label or dimension is not in the dataset\n (:issue:`2994`).\n By `Andrew Ross `_.\n\nIO related enhancements\n~~~~~~~~~~~~~~~~~~~~~~~\n\n- Implement :py:func:`~xarray.load_dataset` and\n :py:func:`~xarray.load_dataarray` as alternatives to\n :py:func:`~xarray.open_dataset` and :py:func:`~xarray.open_dataarray` to\n open, load into memory, and close files, returning the Dataset or DataArray.\n These functions are helpful for avoiding file-lock errors when trying to\n write to files opened using ``open_dataset()`` or ``open_dataarray()``.\n (:issue:`2887`)\n By `Dan Nowacki `_.\n- It is now possible to extend existing :ref:`io.zarr` datasets, by using\n ``mode='a'`` and the new ``append_dim`` argument in\n :py:meth:`~xarray.Dataset.to_zarr`.\n By `Jendrik Jördening `_,\n `David Brochart `_,\n `Ryan Abernathey `_ and\n `Shikhar Goenka `_.\n- ``xr.open_zarr`` now accepts manually specified chunks with the ``chunks=``\n parameter. ``auto_chunk=True`` is equivalent to ``chunks='auto'`` for\n backwards compatibility. The ``overwrite_encoded_chunks`` parameter is\n added to remove the original zarr chunk encoding.\n By `Lily Wang `_.\n- netCDF chunksizes are now only dropped when original_shape is different,\n not when it isn't found. (:issue:`2207`)\n By `Karel van de Plassche `_.\n- Character arrays' character dimension name decoding and encoding handled by\n ``var.encoding['char_dim_name']`` (:issue:`2895`)\n By `James McCreight `_.\n- open_rasterio() now supports rasterio.vrt.WarpedVRT with custom transform,\n width and height (:issue:`2864`).\n By `Julien Michel `_.\n\nBug fixes\n~~~~~~~~~\n\n- Rolling operations on xarray objects containing dask arrays could silently\n compute the incorrect result or use large amounts of memory (:issue:`2940`).\n By `Stephan Hoyer `_.\n- Don't set encoding attributes on bounds variables when writing to netCDF.\n (:issue:`2921`)\n By `Deepak Cherian `_.\n- NetCDF4 output: variables with unlimited dimensions must be chunked (not\n contiguous) on output. (:issue:`1849`)\n By `James McCreight `_.\n- indexing with an empty list creates an object with zero-length axis (:issue:`2882`)\n By `Mayeul d'Avezac `_.\n- Return correct count for scalar datetime64 arrays (:issue:`2770`)\n By `Dan Nowacki `_.\n- Fixed max, min exception when applied to a multiIndex (:issue:`2923`)\n By `Ian Castleden `_\n- A deep copy deep-copies the coords (:issue:`1463`)\n By `Martin Pletcher `_.\n- Increased support for `missing_value` (:issue:`2871`)\n By `Deepak Cherian `_.\n- Removed usages of `pytest.config`, which is deprecated (:issue:`2988`)\n By `Maximilian Roos `_.\n- Fixed performance issues with cftime installed (:issue:`3000`)\n By `0x0L `_.\n- Replace incorrect usages of `message` in pytest assertions\n with `match` (:issue:`3011`)\n By `Maximilian Roos `_.\n- Add explicit pytest markers, now required by pytest\n (:issue:`3032`).\n By `Maximilian Roos `_.\n- Test suite fixes for newer versions of pytest (:issue:`3011`, :issue:`3032`).\n By `Maximilian Roos `_\n and `Stephan Hoyer `_.\n\n.. _whats-new.0.12.1:\n\nv0.12.1 (4 April 2019)\n----------------------\n\nEnhancements\n~~~~~~~~~~~~\n\n- Allow ``expand_dims`` method to support inserting/broadcasting dimensions\n with size > 1. (:issue:`2710`)\n By `Martin Pletcher `_.\n\nBug fixes\n~~~~~~~~~\n\n- Dataset.copy(deep=True) now creates a deep copy of the attrs (:issue:`2835`).\n By `Andras Gefferth `_.\n- Fix incorrect ``indexes`` resulting from various ``Dataset`` operations\n (e.g., ``swap_dims``, ``isel``, ``reindex``, ``[]``) (:issue:`2842`,\n :issue:`2856`).\n By `Stephan Hoyer `_.\n\n.. _whats-new.0.12.0:\n\nv0.12.0 (15 March 2019)\n-----------------------\n\nHighlights include:\n\n- Removed support for Python 2. This is the first version of xarray that is\n Python 3 only!\n- New :py:meth:`~xarray.DataArray.coarsen` and\n :py:meth:`~xarray.DataArray.integrate` methods. See :ref:`comput.coarsen`\n and :ref:`compute.using_coordinates` for details.\n- Many improvements to cftime support. See below for details.\n\nDeprecations\n~~~~~~~~~~~~\n\n- The ``compat`` argument to ``Dataset`` and the ``encoding`` argument to\n ``DataArray`` are deprecated and will be removed in a future release.\n (:issue:`1188`)\n By `Maximilian Roos `_.\n\ncftime related enhancements\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Resampling of standard and non-standard calendars indexed by\n :py:class:`~xarray.CFTimeIndex` is now possible. (:issue:`2191`).\n By `Jwen Fai Low `_ and\n `Spencer Clark `_.\n\n- Taking the mean of arrays of :py:class:`cftime.datetime` objects, and\n by extension, use of :py:meth:`~xarray.DataArray.coarsen` with\n :py:class:`cftime.datetime` coordinates is now possible. By `Spencer Clark\n `_.\n\n- Internal plotting now supports ``cftime.datetime`` objects as time series.\n (:issue:`2164`)\n By `Julius Busecke `_ and\n `Spencer Clark `_.\n\n- :py:meth:`~xarray.cftime_range` now supports QuarterBegin and QuarterEnd offsets (:issue:`2663`).\n By `Jwen Fai Low `_\n\n- :py:meth:`~xarray.open_dataset` now accepts a ``use_cftime`` argument, which\n can be used to require that ``cftime.datetime`` objects are always used, or\n never used when decoding dates encoded with a standard calendar. This can be\n used to ensure consistent date types are returned when using\n :py:meth:`~xarray.open_mfdataset` (:issue:`1263`) and/or to silence\n serialization warnings raised if dates from a standard calendar are found to\n be outside the :py:class:`pandas.Timestamp`-valid range (:issue:`2754`). By\n `Spencer Clark `_.\n\n- :py:meth:`pandas.Series.dropna` is now supported for a\n :py:class:`pandas.Series` indexed by a :py:class:`~xarray.CFTimeIndex`\n (:issue:`2688`). By `Spencer Clark `_.\n\nOther enhancements\n~~~~~~~~~~~~~~~~~~\n\n- Added ability to open netcdf4/hdf5 file-like objects with ``open_dataset``.\n Requires (h5netcdf>0.7 and h5py>2.9.0). (:issue:`2781`)\n By `Scott Henderson `_\n- Add ``data=False`` option to ``to_dict()`` methods. (:issue:`2656`)\n By `Ryan Abernathey `_\n- :py:meth:`DataArray.coarsen` and\n :py:meth:`Dataset.coarsen` are newly added.\n See :ref:`comput.coarsen` for details.\n (:issue:`2525`)\n By `Keisuke Fujii `_.\n- Upsampling an array via interpolation with resample is now dask-compatible,\n as long as the array is not chunked along the resampling dimension.\n By `Spencer Clark `_.\n- :py:func:`xarray.testing.assert_equal` and\n :py:func:`xarray.testing.assert_identical` now provide a more detailed\n report showing what exactly differs between the two objects (dimensions /\n coordinates / variables / attributes) (:issue:`1507`).\n By `Benoit Bovy `_.\n- Add ``tolerance`` option to ``resample()`` methods ``bfill``, ``pad``,\n ``nearest``. (:issue:`2695`)\n By `Hauke Schulz `_.\n- :py:meth:`DataArray.integrate` and\n :py:meth:`Dataset.integrate` are newly added.\n See :ref:`compute.using_coordinates` for the detail.\n (:issue:`1332`)\n By `Keisuke Fujii `_.\n- Added :py:meth:`~xarray.Dataset.drop_dims` (:issue:`1949`).\n By `Kevin Squire `_.\n\nBug fixes\n~~~~~~~~~\n\n- Silenced warnings that appear when using pandas 0.24.\n By `Stephan Hoyer `_\n- Interpolating via resample now internally specifies ``bounds_error=False``\n as an argument to ``scipy.interpolate.interp1d``, allowing for interpolation\n from higher frequencies to lower frequencies. Datapoints outside the bounds\n of the original time coordinate are now filled with NaN (:issue:`2197`). By\n `Spencer Clark `_.\n- Line plots with the ``x`` argument set to a non-dimensional coord now plot\n the correct data for 1D DataArrays.\n (:issue:`2725`). By `Tom Nicholas `_.\n- Subtracting a scalar ``cftime.datetime`` object from a\n :py:class:`CFTimeIndex` now results in a :py:class:`pandas.TimedeltaIndex`\n instead of raising a ``TypeError`` (:issue:`2671`). By `Spencer Clark\n `_.\n- backend_kwargs are no longer ignored when using open_dataset with pynio engine\n (:issue:'2380')\n By `Jonathan Joyce `_.\n- Fix ``open_rasterio`` creating a WKT CRS instead of PROJ.4 with\n ``rasterio`` 1.0.14+ (:issue:`2715`).\n By `David Hoese `_.\n- Masking data arrays with :py:meth:`xarray.DataArray.where` now returns an\n array with the name of the original masked array (:issue:`2748` and :issue:`2457`).\n By `Yohai Bar-Sinai `_.\n- Fixed error when trying to reduce a DataArray using a function which does not\n require an axis argument. (:issue:`2768`)\n By `Tom Nicholas `_.\n- Concatenating a sequence of :py:class:`~xarray.DataArray` with varying names\n sets the name of the output array to ``None``, instead of the name of the\n first input array. If the names are the same it sets the name to that,\n instead to the name of the first DataArray in the list as it did before.\n (:issue:`2775`). By `Tom Nicholas `_.\n\n- Per `CF conventions\n `_,\n specifying ``'standard'`` as the calendar type in\n :py:meth:`~xarray.cftime_range` now correctly refers to the ``'gregorian'``\n calendar instead of the ``'proleptic_gregorian'`` calendar (:issue:`2761`).\n\n.. _whats-new.0.11.3:\n\nv0.11.3 (26 January 2019)\n-------------------------\n\nBug fixes\n~~~~~~~~~\n\n- Saving files with times encoded with reference dates with timezones\n (e.g. '2000-01-01T00:00:00-05:00') no longer raises an error\n (:issue:`2649`). By `Spencer Clark `_.\n- Fixed performance regression with ``open_mfdataset`` (:issue:`2662`).\n By `Tom Nicholas `_.\n- Fixed supplying an explicit dimension in the ``concat_dim`` argument to\n to ``open_mfdataset`` (:issue:`2647`).\n By `Ben Root `_.\n\n.. _whats-new.0.11.2:\n\nv0.11.2 (2 January 2019)\n------------------------\n\nRemoves inadvertently introduced setup dependency on pytest-runner\n(:issue:`2641`). Otherwise, this release is exactly equivalent to 0.11.1.\n\n.. warning::\n\n This is the last xarray release that will support Python 2.7. Future releases\n will be Python 3 only, but older versions of xarray will always be available\n for Python 2.7 users. For the more details, see:\n\n - `Xarray Github issue discussing dropping Python 2 `__\n - `Python 3 Statement `__\n - `Tips on porting to Python 3 `__\n\n.. _whats-new.0.11.1:\n\nv0.11.1 (29 December 2018)\n--------------------------\n\nThis minor release includes a number of enhancements and bug fixes, and two\n(slightly) breaking changes.\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- Minimum rasterio version increased from 0.36 to 1.0 (for ``open_rasterio``)\n- Time bounds variables are now also decoded according to CF conventions\n (:issue:`2565`). The previous behavior was to decode them only if they\n had specific time attributes, now these attributes are copied\n automatically from the corresponding time coordinate. This might\n break downstream code that was relying on these variables to be\n brake downstream code that was relying on these variables to be\n not decoded.\n By `Fabien Maussion `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Ability to read and write consolidated metadata in zarr stores (:issue:`2558`).\n By `Ryan Abernathey `_.\n- :py:class:`CFTimeIndex` uses slicing for string indexing when possible (like\n :py:class:`pandas.DatetimeIndex`), which avoids unnecessary copies.\n By `Stephan Hoyer `_\n- Enable passing ``rasterio.io.DatasetReader`` or ``rasterio.vrt.WarpedVRT`` to\n ``open_rasterio`` instead of file path string. Allows for in-memory\n reprojection, see (:issue:`2588`).\n By `Scott Henderson `_.\n- Like :py:class:`pandas.DatetimeIndex`, :py:class:`CFTimeIndex` now supports\n \"dayofyear\" and \"dayofweek\" accessors (:issue:`2597`). Note this requires a\n version of cftime greater than 1.0.2. By `Spencer Clark\n `_.\n- The option ``'warn_for_unclosed_files'`` (False by default) has been added to\n allow users to enable a warning when files opened by xarray are deallocated\n but were not explicitly closed. This is mostly useful for debugging; we\n recommend enabling it in your test suites if you use xarray for IO.\n By `Stephan Hoyer `_\n- Support Dask ``HighLevelGraphs`` by `Matthew Rocklin `_.\n- :py:meth:`DataArray.resample` and :py:meth:`Dataset.resample` now supports the\n ``loffset`` kwarg just like Pandas.\n By `Deepak Cherian `_\n- Datasets are now guaranteed to have a ``'source'`` encoding, so the source\n file name is always stored (:issue:`2550`).\n By `Tom Nicholas `_.\n- The ``apply`` methods for ``DatasetGroupBy``, ``DataArrayGroupBy``,\n ``DatasetResample`` and ``DataArrayResample`` now support passing positional\n arguments to the applied function as a tuple to the ``args`` argument.\n By `Matti Eskelinen `_.\n- 0d slices of ndarrays are now obtained directly through indexing, rather than\n extracting and wrapping a scalar, avoiding unnecessary copying. By `Daniel\n Wennberg `_.\n- Added support for ``fill_value`` with\n :py:meth:`~xarray.DataArray.shift` and :py:meth:`~xarray.Dataset.shift`\n By `Maximilian Roos `_\n\nBug fixes\n~~~~~~~~~\n\n- Ensure files are automatically closed, if possible, when no longer referenced\n by a Python variable (:issue:`2560`).\n By `Stephan Hoyer `_\n- Fixed possible race conditions when reading/writing to disk in parallel\n (:issue:`2595`).\n By `Stephan Hoyer `_\n- Fix h5netcdf saving scalars with filters or chunks (:issue:`2563`).\n By `Martin Raspaud `_.\n- Fix parsing of ``_Unsigned`` attribute set by OPENDAP servers. (:issue:`2583`).\n By `Deepak Cherian `_\n- Fix failure in time encoding when exporting to netCDF with versions of pandas\n less than 0.21.1 (:issue:`2623`). By `Spencer Clark\n `_.\n- Fix MultiIndex selection to update label and level (:issue:`2619`).\n By `Keisuke Fujii `_.\n\n.. _whats-new.0.11.0:\n\nv0.11.0 (7 November 2018)\n-------------------------\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- Finished deprecations (changed behavior with this release):\n\n - ``Dataset.T`` has been removed as a shortcut for :py:meth:`Dataset.transpose`.\n Call :py:meth:`Dataset.transpose` directly instead.\n - Iterating over a ``Dataset`` now includes only data variables, not coordinates.\n Similarily, calling ``len`` and ``bool`` on a ``Dataset`` now\n includes only data variables.\n - ``DataArray.__contains__`` (used by Python's ``in`` operator) now checks\n array data, not coordinates.\n - The old resample syntax from before xarray 0.10, e.g.,\n ``data.resample('1D', dim='time', how='mean')``, is no longer supported will\n raise an error in most cases. You need to use the new resample syntax\n instead, e.g., ``data.resample(time='1D').mean()`` or\n ``data.resample({'time': '1D'}).mean()``.\n\n\n- New deprecations (behavior will be changed in xarray 0.12):\n\n - Reduction of :py:meth:`DataArray.groupby` and :py:meth:`DataArray.resample`\n without dimension argument will change in the next release.\n Now we warn a FutureWarning.\n By `Keisuke Fujii `_.\n - The ``inplace`` kwarg of a number of `DataArray` and `Dataset` methods is being\n deprecated and will be removed in the next release.\n By `Deepak Cherian `_.\n\n\n- Refactored storage backends:\n\n - Xarray's storage backends now automatically open and close files when\n necessary, rather than requiring opening a file with ``autoclose=True``. A\n global least-recently-used cache is used to store open files; the default\n limit of 128 open files should suffice in most cases, but can be adjusted if\n necessary with\n ``xarray.set_options(file_cache_maxsize=...)``. The ``autoclose`` argument\n to ``open_dataset`` and related functions has been deprecated and is now a\n no-op.\n\n This change, along with an internal refactor of xarray's storage backends,\n should significantly improve performance when reading and writing\n netCDF files with Dask, especially when working with many files or using\n Dask Distributed. By `Stephan Hoyer `_\n\n\n- Support for non-standard calendars used in climate science:\n\n - Xarray will now always use :py:class:`cftime.datetime` objects, rather\n than by default trying to coerce them into ``np.datetime64[ns]`` objects.\n A :py:class:`~xarray.CFTimeIndex` will be used for indexing along time\n coordinates in these cases.\n - A new method :py:meth:`~xarray.CFTimeIndex.to_datetimeindex` has been added\n to aid in converting from a :py:class:`~xarray.CFTimeIndex` to a\n :py:class:`pandas.DatetimeIndex` for the remaining use-cases where\n using a :py:class:`~xarray.CFTimeIndex` is still a limitation (e.g. for\n resample or plotting).\n - Setting the ``enable_cftimeindex`` option is now a no-op and emits a\n ``FutureWarning``.\n\nEnhancements\n~~~~~~~~~~~~\n\n- :py:meth:`xarray.DataArray.plot.line` can now accept multidimensional\n coordinate variables as input. `hue` must be a dimension name in this case.\n (:issue:`2407`)\n By `Deepak Cherian `_.\n- Added support for Python 3.7. (:issue:`2271`).\n By `Joe Hamman `_.\n- Added support for plotting data with `pandas.Interval` coordinates, such as those\n created by :py:meth:`~xarray.DataArray.groupby_bins`\n By `Maximilian Maahn `_.\n- Added :py:meth:`~xarray.CFTimeIndex.shift` for shifting the values of a\n CFTimeIndex by a specified frequency. (:issue:`2244`).\n By `Spencer Clark `_.\n- Added support for using ``cftime.datetime`` coordinates with\n :py:meth:`~xarray.DataArray.differentiate`,\n :py:meth:`~xarray.Dataset.differentiate`,\n :py:meth:`~xarray.DataArray.interp`, and\n :py:meth:`~xarray.Dataset.interp`.\n By `Spencer Clark `_\n- There is now a global option to either always keep or always discard\n dataset and dataarray attrs upon operations. The option is set with\n ``xarray.set_options(keep_attrs=True)``, and the default is to use the old\n behaviour.\n By `Tom Nicholas `_.\n- Added a new backend for the GRIB file format based on ECMWF *cfgrib*\n python driver and *ecCodes* C-library. (:issue:`2475`)\n By `Alessandro Amici `_,\n sponsored by `ECMWF `_.\n- Resample now supports a dictionary mapping from dimension to frequency as\n its first argument, e.g., ``data.resample({'time': '1D'}).mean()``. This is\n consistent with other xarray functions that accept either dictionaries or\n keyword arguments. By `Stephan Hoyer `_.\n\n- The preferred way to access tutorial data is now to load it lazily with\n :py:meth:`xarray.tutorial.open_dataset`.\n :py:meth:`xarray.tutorial.load_dataset` calls `Dataset.load()` prior\n to returning (and is now deprecated). This was changed in order to facilitate\n using tutorial datasets with dask.\n By `Joe Hamman `_.\n- ``DataArray`` can now use ``xr.set_option(keep_attrs=True)`` and retain attributes in binary operations,\n such as (``+, -, * ,/``). Default behaviour is unchanged (*Attributes will be dismissed*). By `Michael Blaschek `_\n\nBug fixes\n~~~~~~~~~\n\n- ``FacetGrid`` now properly uses the ``cbar_kwargs`` keyword argument.\n (:issue:`1504`, :issue:`1717`)\n By `Deepak Cherian `_.\n- Addition and subtraction operators used with a CFTimeIndex now preserve the\n index's type. (:issue:`2244`).\n By `Spencer Clark `_.\n- We now properly handle arrays of ``datetime.datetime`` and ``datetime.timedelta``\n provided as coordinates. (:issue:`2512`)\n By `Deepak Cherian `_.\n- ``xarray.DataArray.roll`` correctly handles multidimensional arrays.\n (:issue:`2445`)\n By `Keisuke Fujii `_.\n- ``xarray.plot()`` now properly accepts a ``norm`` argument and does not override\n the norm's ``vmin`` and ``vmax``. (:issue:`2381`)\n By `Deepak Cherian `_.\n- ``xarray.DataArray.std()`` now correctly accepts ``ddof`` keyword argument.\n (:issue:`2240`)\n By `Keisuke Fujii `_.\n- Restore matplotlib's default of plotting dashed negative contours when\n a single color is passed to ``DataArray.contour()`` e.g. ``colors='k'``.\n By `Deepak Cherian `_.\n\n\n- Fix a bug that caused some indexing operations on arrays opened with\n ``open_rasterio`` to error (:issue:`2454`).\n By `Stephan Hoyer `_.\n\n- Subtracting one CFTimeIndex from another now returns a\n ``pandas.TimedeltaIndex``, analogous to the behavior for DatetimeIndexes\n (:issue:`2484`). By `Spencer Clark `_.\n- Adding a TimedeltaIndex to, or subtracting a TimedeltaIndex from a\n CFTimeIndex is now allowed (:issue:`2484`).\n By `Spencer Clark `_.\n- Avoid use of Dask's deprecated ``get=`` parameter in tests\n by `Matthew Rocklin `_.\n- An ``OverflowError`` is now accurately raised and caught during the\n encoding process if a reference date is used that is so distant that\n the dates must be encoded using cftime rather than NumPy (:issue:`2272`).\n By `Spencer Clark `_.\n\n- Chunked datasets can now roundtrip to Zarr storage continually\n with `to_zarr` and ``open_zarr`` (:issue:`2300`).\n By `Lily Wang `_.\n\n.. _whats-new.0.10.9:\n\nv0.10.9 (21 September 2018)\n---------------------------\n\nThis minor release contains a number of backwards compatible enhancements.\n\nAnnouncements of note:\n\n- Xarray is now a NumFOCUS fiscally sponsored project! Read\n `the anouncement `_\n for more details.\n- We have a new :doc:`roadmap` that outlines our future development plans.\n\n- `Dataset.apply` now properly documents the way `func` is called.\n By `Matti Eskelinen `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- :py:meth:`~xarray.DataArray.differentiate` and\n :py:meth:`~xarray.Dataset.differentiate` are newly added.\n (:issue:`1332`)\n By `Keisuke Fujii `_.\n\n- Default colormap for sequential and divergent data can now be set via\n :py:func:`~xarray.set_options()`\n (:issue:`2394`)\n By `Julius Busecke `_.\n\n- min_count option is newly supported in :py:meth:`~xarray.DataArray.sum`,\n :py:meth:`~xarray.DataArray.prod` and :py:meth:`~xarray.Dataset.sum`, and\n :py:meth:`~xarray.Dataset.prod`.\n (:issue:`2230`)\n By `Keisuke Fujii `_.\n\n- :py:meth:`plot()` now accepts the kwargs\n ``xscale, yscale, xlim, ylim, xticks, yticks`` just like Pandas. Also ``xincrease=False, yincrease=False`` now use matplotlib's axis inverting methods instead of setting limits.\n By `Deepak Cherian `_. (:issue:`2224`)\n\n- DataArray coordinates and Dataset coordinates and data variables are\n now displayed as `a b ... y z` rather than `a b c d ...`.\n (:issue:`1186`)\n By `Seth P `_.\n- A new CFTimeIndex-enabled :py:func:`cftime_range` function for use in\n generating dates from standard or non-standard calendars. By `Spencer Clark\n `_.\n\n- When interpolating over a ``datetime64`` axis, you can now provide a datetime string instead of a ``datetime64`` object. E.g. ``da.interp(time='1991-02-01')``\n (:issue:`2284`)\n By `Deepak Cherian `_.\n\n- A clear error message is now displayed if a ``set`` or ``dict`` is passed in place of an array\n (:issue:`2331`)\n By `Maximilian Roos `_.\n\n- Applying ``unstack`` to a large DataArray or Dataset is now much faster if the MultiIndex has not been modified after stacking the indices.\n (:issue:`1560`)\n By `Maximilian Maahn `_.\n\n- You can now control whether or not to offset the coordinates when using\n the ``roll`` method and the current behavior, coordinates rolled by default,\n raises a deprecation warning unless explicitly setting the keyword argument.\n (:issue:`1875`)\n By `Andrew Huang `_.\n\n- You can now call ``unstack`` without arguments to unstack every MultiIndex in a DataArray or Dataset.\n By `Julia Signell `_.\n\n- Added the ability to pass a data kwarg to ``copy`` to create a new object with the\n same metadata as the original object but using new values.\n By `Julia Signell `_.\n\nBug fixes\n~~~~~~~~~\n\n- ``xarray.plot.imshow()`` correctly uses the ``origin`` argument.\n (:issue:`2379`)\n By `Deepak Cherian `_.\n\n- Fixed ``DataArray.to_iris()`` failure while creating ``DimCoord`` by\n falling back to creating ``AuxCoord``. Fixed dependency on ``var_name``\n attribute being set.\n (:issue:`2201`)\n By `Thomas Voigt `_.\n- Fixed a bug in ``zarr`` backend which prevented use with datasets with\n invalid chunk size encoding after reading from an existing store\n (:issue:`2278`).\n By `Joe Hamman `_.\n\n- Tests can be run in parallel with pytest-xdist\n By `Tony Tung `_.\n\n- Follow up the renamings in dask; from dask.ghost to dask.overlap\n By `Keisuke Fujii `_.\n\n- Now raises a ValueError when there is a conflict between dimension names and\n level names of MultiIndex. (:issue:`2299`)\n By `Keisuke Fujii `_.\n\n- Follow up the renamings in dask; from dask.ghost to dask.overlap\n By `Keisuke Fujii `_.\n\n- Now :py:func:`xr.apply_ufunc` raises a ValueError when the size of\n ``input_core_dims`` is inconsistent with the number of arguments.\n (:issue:`2341`)\n By `Keisuke Fujii `_.\n\n- Fixed ``Dataset.filter_by_attrs()`` behavior not matching ``netCDF4.Dataset.get_variables_by_attributes()``.\n When more than one ``key=value`` is passed into ``Dataset.filter_by_attrs()`` it will now return a Dataset with variables which pass\n all the filters.\n (:issue:`2315`)\n By `Andrew Barna `_.\n\n.. _whats-new.0.10.8:\n\nv0.10.8 (18 July 2018)\n----------------------\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- Xarray no longer supports python 3.4. Additionally, the minimum supported\n versions of the following dependencies has been updated and/or clarified:\n\n - Pandas: 0.18 -> 0.19\n - NumPy: 1.11 -> 1.12\n - Dask: 0.9 -> 0.16\n - Matplotlib: unspecified -> 1.5\n\n (:issue:`2204`). By `Joe Hamman `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- :py:meth:`~xarray.DataArray.interp_like` and\n :py:meth:`~xarray.Dataset.interp_like` methods are newly added.\n (:issue:`2218`)\n By `Keisuke Fujii `_.\n\n- Added support for curvilinear and unstructured generic grids\n to :py:meth:`~xarray.DataArray.to_cdms2` and\n :py:meth:`~xarray.DataArray.from_cdms2` (:issue:`2262`).\n By `Stephane Raynaud `_.\n\nBug fixes\n~~~~~~~~~\n\n- Fixed a bug in ``zarr`` backend which prevented use with datasets with\n incomplete chunks in multiple dimensions (:issue:`2225`).\n By `Joe Hamman `_.\n\n- Fixed a bug in :py:meth:`~Dataset.to_netcdf` which prevented writing\n datasets when the arrays had different chunk sizes (:issue:`2254`).\n By `Mike Neish `_.\n\n- Fixed masking during the conversion to cdms2 objects by\n :py:meth:`~xarray.DataArray.to_cdms2` (:issue:`2262`).\n By `Stephane Raynaud `_.\n\n- Fixed a bug in 2D plots which incorrectly raised an error when 2D coordinates\n weren't monotonic (:issue:`2250`).\n By `Fabien Maussion `_.\n\n- Fixed warning raised in :py:meth:`~Dataset.to_netcdf` due to deprecation of\n `effective_get` in dask (:issue:`2238`).\n By `Joe Hamman `_.\n\n.. _whats-new.0.10.7:\n\nv0.10.7 (7 June 2018)\n---------------------\n\nEnhancements\n~~~~~~~~~~~~\n\n- Plot labels now make use of metadata that follow CF conventions\n (:issue:`2135`).\n By `Deepak Cherian `_ and `Ryan Abernathey `_.\n\n- Line plots now support facetting with ``row`` and ``col`` arguments\n (:issue:`2107`).\n By `Yohai Bar Sinai `_.\n\n- :py:meth:`~xarray.DataArray.interp` and :py:meth:`~xarray.Dataset.interp`\n methods are newly added.\n See :ref:`interpolating values with interp` for the detail.\n (:issue:`2079`)\n By `Keisuke Fujii `_.\n\nBug fixes\n~~~~~~~~~\n\n- Fixed a bug in ``rasterio`` backend which prevented use with ``distributed``.\n The ``rasterio`` backend now returns pickleable objects (:issue:`2021`).\n By `Joe Hamman `_.\n\n.. _whats-new.0.10.6:\n\nv0.10.6 (31 May 2018)\n---------------------\n\nThe minor release includes a number of bug-fixes and backwards compatible\nenhancements.\n\nEnhancements\n~~~~~~~~~~~~\n\n- New PseudoNetCDF backend for many Atmospheric data formats including\n GEOS-Chem, CAMx, NOAA arlpacked bit and many others. See\n :ref:`io.PseudoNetCDF` for more details.\n By `Barron Henderson `_.\n\n- The :py:class:`Dataset` constructor now aligns :py:class:`DataArray`\n arguments in ``data_vars`` to indexes set explicitly in ``coords``,\n where previously an error would be raised.\n (:issue:`674`)\n By `Maximilian Roos `_.\n\n- :py:meth:`~DataArray.sel`, :py:meth:`~DataArray.isel` & :py:meth:`~DataArray.reindex`,\n (and their :py:class:`Dataset` counterparts) now support supplying a ``dict``\n as a first argument, as an alternative to the existing approach\n of supplying `kwargs`. This allows for more robust behavior\n of dimension names which conflict with other keyword names, or are\n not strings.\n By `Maximilian Roos `_.\n\n- :py:meth:`~DataArray.rename` now supports supplying ``**kwargs``, as an\n alternative to the existing approach of supplying a ``dict`` as the\n first argument.\n By `Maximilian Roos `_.\n\n- :py:meth:`~DataArray.cumsum` and :py:meth:`~DataArray.cumprod` now support\n aggregation over multiple dimensions at the same time. This is the default\n behavior when dimensions are not specified (previously this raised an error).\n By `Stephan Hoyer `_\n\n- :py:meth:`DataArray.dot` and :py:func:`dot` are partly supported with older\n dask<0.17.4. (related to :issue:`2203`)\n By `Keisuke Fujii `_.\n\n- Xarray now uses `Versioneer `__\n to manage its version strings. (:issue:`1300`).\n By `Joe Hamman `_.\n\nBug fixes\n~~~~~~~~~\n\n- Fixed a regression in 0.10.4, where explicitly specifying ``dtype='S1'`` or\n ``dtype=str`` in ``encoding`` with ``to_netcdf()`` raised an error\n (:issue:`2149`).\n `Stephan Hoyer `_\n\n- :py:func:`apply_ufunc` now directly validates output variables\n (:issue:`1931`).\n By `Stephan Hoyer `_.\n\n- Fixed a bug where ``to_netcdf(..., unlimited_dims='bar')`` yielded NetCDF\n files with spurious 0-length dimensions (i.e. ``b``, ``a``, and ``r``)\n (:issue:`2134`).\n By `Joe Hamman `_.\n\n- Removed spurious warnings with ``Dataset.update(Dataset)`` (:issue:`2161`)\n and ``array.equals(array)`` when ``array`` contains ``NaT`` (:issue:`2162`).\n By `Stephan Hoyer `_.\n\n- Aggregations with :py:meth:`Dataset.reduce` (including ``mean``, ``sum``,\n etc) no longer drop unrelated coordinates (:issue:`1470`). Also fixed a\n bug where non-scalar data-variables that did not include the aggregation\n dimension were improperly skipped.\n By `Stephan Hoyer `_\n\n- Fix :meth:`~DataArray.stack` with non-unique coordinates on pandas 0.23\n (:issue:`2160`).\n By `Stephan Hoyer `_\n\n- Selecting data indexed by a length-1 ``CFTimeIndex`` with a slice of strings\n now behaves as it does when using a length-1 ``DatetimeIndex`` (i.e. it no\n longer falsely returns an empty array when the slice includes the value in\n the index) (:issue:`2165`).\n By `Spencer Clark `_.\n\n- Fix ``DataArray.groupby().reduce()`` mutating coordinates on the input array\n when grouping over dimension coordinates with duplicated entries\n (:issue:`2153`).\n By `Stephan Hoyer `_\n\n- Fix ``Dataset.to_netcdf()`` cannot create group with ``engine=\"h5netcdf\"``\n (:issue:`2177`).\n By `Stephan Hoyer `_\n\n.. _whats-new.0.10.4:\n\nv0.10.4 (16 May 2018)\n----------------------\n\nThe minor release includes a number of bug-fixes and backwards compatible\nenhancements. A highlight is ``CFTimeIndex``, which offers support for\nnon-standard calendars used in climate modeling.\n\nDocumentation\n~~~~~~~~~~~~~\n\n- New FAQ entry, :ref:`faq.other_projects`.\n By `Deepak Cherian `_.\n- :ref:`assigning_values` now includes examples on how to select and assign\n values to a :py:class:`~xarray.DataArray` with ``.loc``.\n By `Chiara Lepore `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Add an option for using a ``CFTimeIndex`` for indexing times with\n non-standard calendars and/or outside the Timestamp-valid range; this index\n enables a subset of the functionality of a standard\n ``pandas.DatetimeIndex``.\n See :ref:`CFTimeIndex` for full details.\n (:issue:`789`, :issue:`1084`, :issue:`1252`)\n By `Spencer Clark `_ with help from\n `Stephan Hoyer `_.\n- Allow for serialization of ``cftime.datetime`` objects (:issue:`789`,\n :issue:`1084`, :issue:`2008`, :issue:`1252`) using the standalone ``cftime``\n library.\n By `Spencer Clark `_.\n- Support writing lists of strings as netCDF attributes (:issue:`2044`).\n By `Dan Nowacki `_.\n- :py:meth:`~xarray.Dataset.to_netcdf` with ``engine='h5netcdf'`` now accepts h5py\n encoding settings ``compression`` and ``compression_opts``, along with the\n NetCDF4-Python style settings ``gzip=True`` and ``complevel``.\n This allows using any compression plugin installed in hdf5, e.g. LZF\n (:issue:`1536`). By `Guido Imperiale `_.\n- :py:meth:`~xarray.dot` on dask-backed data will now call :func:`dask.array.einsum`.\n This greatly boosts speed and allows chunking on the core dims.\n The function now requires dask >= 0.17.3 to work on dask-backed data\n (:issue:`2074`). By `Guido Imperiale `_.\n- ``plot.line()`` learned new kwargs: ``xincrease``, ``yincrease`` that change\n the direction of the respective axes.\n By `Deepak Cherian `_.\n\n- Added the ``parallel`` option to :py:func:`open_mfdataset`. This option uses\n ``dask.delayed`` to parallelize the open and preprocessing steps within\n ``open_mfdataset``. This is expected to provide performance improvements when\n opening many files, particularly when used in conjunction with dask's\n multiprocessing or distributed schedulers (:issue:`1981`).\n By `Joe Hamman `_.\n\n- New ``compute`` option in :py:meth:`~xarray.Dataset.to_netcdf`,\n :py:meth:`~xarray.Dataset.to_zarr`, and :py:func:`~xarray.save_mfdataset` to\n allow for the lazy computation of netCDF and zarr stores. This feature is\n currently only supported by the netCDF4 and zarr backends. (:issue:`1784`).\n By `Joe Hamman `_.\n\n\nBug fixes\n~~~~~~~~~\n\n- ``ValueError`` is raised when coordinates with the wrong size are assigned to\n a :py:class:`DataArray`. (:issue:`2112`)\n By `Keisuke Fujii `_.\n- Fixed a bug in :py:meth:`~xarary.DatasArray.rolling` with bottleneck. Also,\n fixed a bug in rolling an integer dask array. (:issue:`2113`)\n By `Keisuke Fujii `_.\n- Fixed a bug where `keep_attrs=True` flag was neglected if\n :py:func:`apply_ufunc` was used with :py:class:`Variable`. (:issue:`2114`)\n By `Keisuke Fujii `_.\n- When assigning a :py:class:`DataArray` to :py:class:`Dataset`, any conflicted\n non-dimensional coordinates of the DataArray are now dropped.\n (:issue:`2068`)\n By `Keisuke Fujii `_.\n- Better error handling in ``open_mfdataset`` (:issue:`2077`).\n By `Stephan Hoyer `_.\n- ``plot.line()`` does not call ``autofmt_xdate()`` anymore. Instead it changes\n the rotation and horizontal alignment of labels without removing the x-axes of\n any other subplots in the figure (if any).\n By `Deepak Cherian `_.\n- Colorbar limits are now determined by excluding ±Infs too.\n By `Deepak Cherian `_.\n By `Joe Hamman `_.\n- Fixed ``to_iris`` to maintain lazy dask array after conversion (:issue:`2046`).\n By `Alex Hilson `_ and `Stephan Hoyer `_.\n\n.. _whats-new.0.10.3:\n\nv0.10.3 (13 April 2018)\n------------------------\n\nThe minor release includes a number of bug-fixes and backwards compatible enhancements.\n\nEnhancements\n~~~~~~~~~~~~\n\n- :py:meth:`~xarray.DataArray.isin` and :py:meth:`~xarray.Dataset.isin` methods,\n which test each value in the array for whether it is contained in the\n supplied list, returning a bool array. See :ref:`selecting values with isin`\n for full details. Similar to the ``np.isin`` function.\n By `Maximilian Roos `_.\n- Some speed improvement to construct :py:class:`~xarray.DataArrayRolling`\n object (:issue:`1993`)\n By `Keisuke Fujii `_.\n- Handle variables with different values for ``missing_value`` and\n ``_FillValue`` by masking values for both attributes; previously this\n resulted in a ``ValueError``. (:issue:`2016`)\n By `Ryan May `_.\n\nBug fixes\n~~~~~~~~~\n\n- Fixed ``decode_cf`` function to operate lazily on dask arrays\n (:issue:`1372`). By `Ryan Abernathey `_.\n- Fixed labeled indexing with slice bounds given by xarray objects with\n datetime64 or timedelta64 dtypes (:issue:`1240`).\n By `Stephan Hoyer `_.\n- Attempting to convert an xarray.Dataset into a numpy array now raises an\n informative error message.\n By `Stephan Hoyer `_.\n- Fixed a bug in decode_cf_datetime where ``int32`` arrays weren't parsed\n correctly (:issue:`2002`).\n By `Fabien Maussion `_.\n- When calling `xr.auto_combine()` or `xr.open_mfdataset()` with a `concat_dim`,\n the resulting dataset will have that one-element dimension (it was\n silently dropped, previously) (:issue:`1988`).\n By `Ben Root `_.\n\n.. _whats-new.0.10.2:\n\nv0.10.2 (13 March 2018)\n-----------------------\n\nThe minor release includes a number of bug-fixes and enhancements, along with\none possibly **backwards incompatible change**.\n\nBackwards incompatible changes\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- The addition of ``__array_ufunc__`` for xarray objects (see below) means that\n NumPy `ufunc methods`_ (e.g., ``np.add.reduce``) that previously worked on\n ``xarray.DataArray`` objects by converting them into NumPy arrays will now\n raise ``NotImplementedError`` instead. In all cases, the work-around is\n simple: convert your objects explicitly into NumPy arrays before calling the\n ufunc (e.g., with ``.values``).\n\n.. _ufunc methods: https://docs.scipy.org/doc/numpy/reference/ufuncs.html#methods\n\nEnhancements\n~~~~~~~~~~~~\n\n- Added :py:func:`~xarray.dot`, equivalent to :py:func:`np.einsum`.\n Also, :py:func:`~xarray.DataArray.dot` now supports ``dims`` option,\n which specifies the dimensions to sum over.\n (:issue:`1951`)\n By `Keisuke Fujii `_.\n\n- Support for writing xarray datasets to netCDF files (netcdf4 backend only)\n when using the `dask.distributed `_\n scheduler (:issue:`1464`).\n By `Joe Hamman `_.\n\n- Support lazy vectorized-indexing. After this change, flexible indexing such\n as orthogonal/vectorized indexing, becomes possible for all the backend\n arrays. Also, lazy ``transpose`` is now also supported. (:issue:`1897`)\n By `Keisuke Fujii `_.\n\n- Implemented NumPy's ``__array_ufunc__`` protocol for all xarray objects\n (:issue:`1617`). This enables using NumPy ufuncs directly on\n ``xarray.Dataset`` objects with recent versions of NumPy (v1.13 and newer):\n\n .. ipython:: python\n\n ds = xr.Dataset({'a': 1})\n np.sin(ds)\n\n This obliviates the need for the ``xarray.ufuncs`` module, which will be\n deprecated in the future when xarray drops support for older versions of\n NumPy. By `Stephan Hoyer `_.\n\n- Improve :py:func:`~xarray.DataArray.rolling` logic.\n :py:func:`~xarray.DataArrayRolling` object now supports\n :py:func:`~xarray.DataArrayRolling.construct` method that returns a view\n of the DataArray / Dataset object with the rolling-window dimension added\n to the last axis. This enables more flexible operation, such as strided\n rolling, windowed rolling, ND-rolling, short-time FFT and convolution.\n (:issue:`1831`, :issue:`1142`, :issue:`819`)\n By `Keisuke Fujii `_.\n- :py:func:`~plot.line()` learned to make plots with data on x-axis if so specified. (:issue:`575`)\n By `Deepak Cherian `_.\n\nBug fixes\n~~~~~~~~~\n\n- Raise an informative error message when using ``apply_ufunc`` with numpy\n v1.11 (:issue:`1956`).\n By `Stephan Hoyer `_.\n- Fix the precision drop after indexing datetime64 arrays (:issue:`1932`).\n By `Keisuke Fujii `_.\n- Silenced irrelevant warnings issued by ``open_rasterio`` (:issue:`1964`).\n By `Stephan Hoyer `_.\n- Fix kwarg `colors` clashing with auto-inferred `cmap` (:issue:`1461`)\n By `Deepak Cherian `_.\n- Fix :py:func:`~xarray.plot.imshow` error when passed an RGB array with\n size one in a spatial dimension.\n By `Zac Hatfield-Dodds `_.\n\n.. _whats-new.0.10.1:\n\nv0.10.1 (25 February 2018)\n--------------------------\n\nThe minor release includes a number of bug-fixes and backwards compatible enhancements.\n\nDocumentation\n~~~~~~~~~~~~~\n\n- Added a new guide on :ref:`contributing` (:issue:`640`)\n By `Joe Hamman `_.\n- Added apply_ufunc example to :ref:`toy weather data` (:issue:`1844`).\n By `Liam Brannigan `_.\n- New entry `Why don’t aggregations return Python scalars?` in the\n :doc:`faq` (:issue:`1726`).\n By `0x0L `_.\n\nEnhancements\n~~~~~~~~~~~~\n**New functions and methods**:\n\n- Added :py:meth:`DataArray.to_iris` and\n :py:meth:`DataArray.from_iris` for\n converting data arrays to and from Iris_ Cubes with the same data and coordinates\n (:issue:`621` and :issue:`37`).\n By `Neil Parley `_ and `Duncan Watson-Parris `_.\n- Experimental support for using `Zarr`_ as storage layer for xarray\n (:issue:`1223`).\n By `Ryan Abernathey `_ and\n `Joe Hamman `_.\n- New :py:meth:`~xarray.DataArray.rank` on arrays and datasets. Requires\n bottleneck (:issue:`1731`).\n By `0x0L `_.\n- ``.dt`` accessor can now ceil, floor and round timestamps to specified frequency.\n By `Deepak Cherian `_.\n\n**Plotting enhancements**:\n\n- :func:`xarray.plot.imshow` now handles RGB and RGBA images.\n Saturation can be adjusted with ``vmin`` and ``vmax``, or with ``robust=True``.\n By `Zac Hatfield-Dodds `_.\n- :py:func:`~plot.contourf()` learned to contour 2D variables that have both a\n 1D coordinate (e.g. time) and a 2D coordinate (e.g. depth as a function of\n time) (:issue:`1737`).\n By `Deepak Cherian `_.\n- :py:func:`~plot()` rotates x-axis ticks if x-axis is time.\n By `Deepak Cherian `_.\n- :py:func:`~plot.line()` can draw multiple lines if provided with a\n 2D variable.\n By `Deepak Cherian `_.\n\n**Other enhancements**:\n\n- Reduce methods such as :py:func:`DataArray.sum()` now handles object-type array.\n\n .. ipython:: python\n\n da = xr.DataArray(np.array([True, False, np.nan], dtype=object), dims='x')\n da.sum()\n\n (:issue:`1866`)\n By `Keisuke Fujii `_.\n- Reduce methods such as :py:func:`DataArray.sum()` now accepts ``dtype``\n arguments. (:issue:`1838`)\n By `Keisuke Fujii `_.\n- Added nodatavals attribute to DataArray when using :py:func:`~xarray.open_rasterio`. (:issue:`1736`).\n By `Alan Snow `_.\n- Use ``pandas.Grouper`` class in xarray resample methods rather than the\n deprecated ``pandas.TimeGrouper`` class (:issue:`1766`).\n By `Joe Hamman `_.\n- Experimental support for parsing ENVI metadata to coordinates and attributes\n in :py:func:`xarray.open_rasterio`.\n By `Matti Eskelinen `_.\n- Reduce memory usage when decoding a variable with a scale_factor, by\n converting 8-bit and 16-bit integers to float32 instead of float64\n (:pull:`1840`), and keeping float16 and float32 as float32 (:issue:`1842`).\n Correspondingly, encoded variables may also be saved with a smaller dtype.\n By `Zac Hatfield-Dodds `_.\n- Speed of reindexing/alignment with dask array is orders of magnitude faster\n when inserting missing values (:issue:`1847`).\n By `Stephan Hoyer `_.\n- Fix ``axis`` keyword ignored when applying ``np.squeeze`` to ``DataArray`` (:issue:`1487`).\n By `Florian Pinault `_.\n- ``netcdf4-python`` has moved the its time handling in the ``netcdftime`` module to\n a standalone package (`netcdftime`_). As such, xarray now considers `netcdftime`_\n an optional dependency. One benefit of this change is that it allows for\n encoding/decoding of datetimes with non-standard calendars without the\n ``netcdf4-python`` dependency (:issue:`1084`).\n By `Joe Hamman `_.\n\n.. _Zarr: http://zarr.readthedocs.io/\n\n.. _Iris: http://scitools.org.uk/iris\n\n.. _netcdftime: https://unidata.github.io/netcdftime\n\n**New functions/methods**\n\n- New :py:meth:`~xarray.DataArray.rank` on arrays and datasets. Requires\n bottleneck (:issue:`1731`).\n By `0x0L `_.\n\nBug fixes\n~~~~~~~~~\n- Rolling aggregation with ``center=True`` option now gives the same result\n with pandas including the last element (:issue:`1046`).\n By `Keisuke Fujii `_.\n\n- Support indexing with a 0d-np.ndarray (:issue:`1921`).\n By `Keisuke Fujii `_.\n- Added warning in api.py of a netCDF4 bug that occurs when\n the filepath has 88 characters (:issue:`1745`).\n By `Liam Brannigan `_.\n- Fixed encoding of multi-dimensional coordinates in\n :py:meth:`~Dataset.to_netcdf` (:issue:`1763`).\n By `Mike Neish `_.\n- Fixed chunking with non-file-based rasterio datasets (:issue:`1816`) and\n refactored rasterio test suite.\n By `Ryan Abernathey `_\n- Bug fix in open_dataset(engine='pydap') (:issue:`1775`)\n By `Keisuke Fujii `_.\n- Bug fix in vectorized assignment (:issue:`1743`, :issue:`1744`).\n Now item assignment to :py:meth:`~DataArray.__setitem__` checks\n- Bug fix in vectorized assignment (:issue:`1743`, :issue:`1744`).\n Now item assignment to :py:meth:`DataArray.__setitem__` checks\n coordinates of target, destination and keys. If there are any conflict among\n these coordinates, ``IndexError`` will be raised.\n By `Keisuke Fujii `_.\n- Properly point :py:meth:`DataArray.__dask_scheduler__` to\n ``dask.threaded.get``. By `Matthew Rocklin `_.\n- Bug fixes in :py:meth:`DataArray.plot.imshow`: all-NaN arrays and arrays\n with size one in some dimension can now be plotted, which is good for\n exploring satellite imagery (:issue:`1780`).\n By `Zac Hatfield-Dodds `_.\n- Fixed ``UnboundLocalError`` when opening netCDF file (:issue:`1781`).\n By `Stephan Hoyer `_.\n- The ``variables``, ``attrs``, and ``dimensions`` properties have been\n deprecated as part of a bug fix addressing an issue where backends were\n unintentionally loading the datastores data and attributes repeatedly during\n writes (:issue:`1798`).\n By `Joe Hamman `_.\n- Compatibility fixes to plotting module for Numpy 1.14 and Pandas 0.22\n (:issue:`1813`).\n By `Joe Hamman `_.\n- Bug fix in encoding coordinates with ``{'_FillValue': None}`` in netCDF\n metadata (:issue:`1865`).\n By `Chris Roth `_.\n- Fix indexing with lists for arrays loaded from netCDF files with\n ``engine='h5netcdf`` (:issue:`1864`).\n By `Stephan Hoyer `_.\n- Corrected a bug with incorrect coordinates for non-georeferenced geotiff\n files (:issue:`1686`). Internally, we now use the rasterio coordinate\n transform tool instead of doing the computations ourselves. A\n ``parse_coordinates`` kwarg has beed added to :py:func:`~open_rasterio`\n (set to ``True`` per default).\n By `Fabien Maussion `_.\n- The colors of discrete colormaps are now the same regardless if `seaborn`\n is installed or not (:issue:`1896`).\n By `Fabien Maussion `_.\n- Fixed dtype promotion rules in :py:func:`where` and :py:func:`concat` to\n match pandas (:issue:`1847`). A combination of strings/numbers or\n unicode/bytes now promote to object dtype, instead of strings or unicode.\n By `Stephan Hoyer `_.\n- Fixed bug where :py:meth:`~xarray.DataArray.isnull` was loading data\n stored as dask arrays (:issue:`1937`).\n By `Joe Hamman `_.\n\n.. _whats-new.0.10.0:\n\nv0.10.0 (20 November 2017)\n--------------------------\n\nThis is a major release that includes bug fixes, new features and a few\nbackwards incompatible changes. Highlights include:\n\n- Indexing now supports broadcasting over dimensions, similar to NumPy's\n vectorized indexing (but better!).\n- :py:meth:`~DataArray.resample` has a new groupby-like API like pandas.\n- :py:func:`~xarray.apply_ufunc` facilitates wrapping and parallelizing\n functions written for NumPy arrays.\n- Performance improvements, particularly for dask and :py:func:`open_mfdataset`.\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- xarray now supports a form of vectorized indexing with broadcasting, where\n the result of indexing depends on dimensions of indexers,\n e.g., ``array.sel(x=ind)`` with ``ind.dims == ('y',)``. Alignment between\n coordinates on indexed and indexing objects is also now enforced.\n Due to these changes, existing uses of xarray objects to index other xarray\n objects will break in some cases.\n\n The new indexing API is much more powerful, supporting outer, diagonal and\n vectorized indexing in a single interface.\n The ``isel_points`` and ``sel_points`` methods are deprecated, since they are\n now redundant with the ``isel`` / ``sel`` methods.\n See :ref:`vectorized_indexing` for the details (:issue:`1444`,\n :issue:`1436`).\n By `Keisuke Fujii `_ and\n `Stephan Hoyer `_.\n\n- A new resampling interface to match pandas' groupby-like API was added to\n :py:meth:`Dataset.resample` and :py:meth:`DataArray.resample`\n (:issue:`1272`). :ref:`Timeseries resampling ` is\n fully supported for data with arbitrary dimensions as is both downsampling\n and upsampling (including linear, quadratic, cubic, and spline interpolation).\n\n Old syntax:\n\n .. ipython::\n :verbatim:\n\n In [1]: ds.resample('24H', dim='time', how='max')\n Out[1]:\n \n [...]\n\n New syntax:\n\n .. ipython::\n :verbatim:\n\n In [1]: ds.resample(time='24H').max()\n Out[1]:\n \n [...]\n\n Note that both versions are currently supported, but using the old syntax will\n produce a warning encouraging users to adopt the new syntax.\n By `Daniel Rothenberg `_.\n\n- Calling ``repr()`` or printing xarray objects at the command line or in a\n Jupyter Notebook will not longer automatically compute dask variables or\n load data on arrays lazily loaded from disk (:issue:`1522`).\n By `Guido Imperiale `_.\n\n- Supplying ``coords`` as a dictionary to the ``DataArray`` constructor without\n also supplying an explicit ``dims`` argument is no longer supported. This\n behavior was deprecated in version 0.9 but will now raise an error\n (:issue:`727`).\n\n- Several existing features have been deprecated and will change to new\n behavior in xarray v0.11. If you use any of them with xarray v0.10, you\n should see a ``FutureWarning`` that describes how to update your code:\n\n - ``Dataset.T`` has been deprecated an alias for ``Dataset.transpose()``\n (:issue:`1232`). In the next major version of xarray, it will provide short-\n cut lookup for variables or attributes with name ``'T'``.\n - ``DataArray.__contains__`` (e.g., ``key in data_array``) currently checks\n for membership in ``DataArray.coords``. In the next major version of\n xarray, it will check membership in the array data found in\n ``DataArray.values`` instead (:issue:`1267`).\n - Direct iteration over and counting a ``Dataset`` (e.g., ``[k for k in ds]``,\n ``ds.keys()``, ``ds.values()``, ``len(ds)`` and ``if ds``) currently\n includes all variables, both data and coordinates. For improved usability\n and consistency with pandas, in the next major version of xarray these will\n change to only include data variables (:issue:`884`). Use ``ds.variables``,\n ``ds.data_vars`` or ``ds.coords`` as alternatives.\n\n- Changes to minimum versions of dependencies:\n\n - Old numpy < 1.11 and pandas < 0.18 are no longer supported (:issue:`1512`).\n By `Keisuke Fujii `_.\n - The minimum supported version bottleneck has increased to 1.1\n (:issue:`1279`).\n By `Joe Hamman `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n**New functions/methods**\n\n- New helper function :py:func:`~xarray.apply_ufunc` for wrapping functions\n written to work on NumPy arrays to support labels on xarray objects\n (:issue:`770`). ``apply_ufunc`` also support automatic parallelization for\n many functions with dask. See :ref:`comput.wrapping-custom` and\n :ref:`dask.automatic-parallelization` for details.\n By `Stephan Hoyer `_.\n\n- Added new method :py:meth:`Dataset.to_dask_dataframe`, convert a dataset into\n a dask dataframe.\n This allows lazy loading of data from a dataset containing dask arrays (:issue:`1462`).\n By `James Munroe `_.\n\n- New function :py:func:`~xarray.where` for conditionally switching between\n values in xarray objects, like :py:func:`numpy.where`:\n\n .. ipython::\n :verbatim:\n\n In [1]: import xarray as xr\n\n In [2]: arr = xr.DataArray([[1, 2, 3], [4, 5, 6]], dims=('x', 'y'))\n\n In [3]: xr.where(arr % 2, 'even', 'odd')\n Out[3]:\n \n array([['even', 'odd', 'even'],\n ['odd', 'even', 'odd']],\n dtype='`_.\n\n- Added :py:func:`~xarray.show_versions` function to aid in debugging\n (:issue:`1485`).\n By `Joe Hamman `_.\n\n**Performance improvements**\n\n- :py:func:`~xarray.concat` was computing variables that aren't in memory\n (e.g. dask-based) multiple times; :py:func:`~xarray.open_mfdataset`\n was loading them multiple times from disk. Now, both functions will instead\n load them at most once and, if they do, store them in memory in the\n concatenated array/dataset (:issue:`1521`).\n By `Guido Imperiale `_.\n\n- Speed-up (x 100) of :py:func:`~xarray.conventions.decode_cf_datetime`.\n By `Christian Chwala `_.\n\n**IO related improvements**\n\n- Unicode strings (``str`` on Python 3) are now round-tripped successfully even\n when written as character arrays (e.g., as netCDF3 files or when using\n ``engine='scipy'``) (:issue:`1638`). This is controlled by the ``_Encoding``\n attribute convention, which is also understood directly by the netCDF4-Python\n interface. See :ref:`io.string-encoding` for full details.\n By `Stephan Hoyer `_.\n\n- Support for ``data_vars`` and ``coords`` keywords from\n :py:func:`~xarray.concat` added to :py:func:`~xarray.open_mfdataset`\n (:issue:`438`). Using these keyword arguments can significantly reduce\n memory usage and increase speed.\n By `Oleksandr Huziy `_.\n\n- Support for :py:class:`pathlib.Path` objects added to\n :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_mfdataset`,\n :py:func:`~xarray.to_netcdf`, and :py:func:`~xarray.save_mfdataset`\n (:issue:`799`):\n\n .. ipython::\n :verbatim:\n\n In [2]: from pathlib import Path # In Python 2, use pathlib2!\n\n In [3]: data_dir = Path(\"data/\")\n\n In [4]: one_file = data_dir / \"dta_for_month_01.nc\"\n\n In [5]: xr.open_dataset(one_file)\n Out[5]:\n \n [...]\n\n By `Willi Rath `_.\n\n- You can now explicitly disable any default ``_FillValue`` (``NaN`` for\n floating point values) by passing the enconding ``{'_FillValue': None}``\n (:issue:`1598`).\n By `Stephan Hoyer `_.\n\n- More attributes available in :py:attr:`~xarray.Dataset.attrs` dictionary when\n raster files are opened with :py:func:`~xarray.open_rasterio`.\n By `Greg Brener `_.\n\n- Support for NetCDF files using an ``_Unsigned`` attribute to indicate that a\n a signed integer data type should be interpreted as unsigned bytes\n (:issue:`1444`).\n By `Eric Bruning `_.\n\n- Support using an existing, opened netCDF4 ``Dataset`` with\n :py:class:`~xarray.backends.NetCDF4DataStore`. This permits creating an\n :py:class:`~xarray.Dataset` from a netCDF4 ``Dataset`` that has been opened using\n other means (:issue:`1459`).\n By `Ryan May `_.\n\n- Changed :py:class:`~xarray.backends.PydapDataStore` to take a Pydap dataset.\n This permits opening Opendap datasets that require authentication, by\n instantiating a Pydap dataset with a session object. Also added\n :py:meth:`xarray.backends.PydapDataStore.open` which takes a url and session\n object (:issue:`1068`).\n By `Philip Graae `_.\n\n- Support reading and writing unlimited dimensions with h5netcdf (:issue:`1636`).\n By `Joe Hamman `_.\n\n**Other improvements**\n\n- Added ``_ipython_key_completions_`` to xarray objects, to enable\n autocompletion for dictionary-like access in IPython, e.g.,\n ``ds['tem`` + tab -> ``ds['temperature']`` (:issue:`1628`).\n By `Keisuke Fujii `_.\n\n- Support passing keyword arguments to ``load``, ``compute``, and ``persist``\n methods. Any keyword arguments supplied to these methods are passed on to\n the corresponding dask function (:issue:`1523`).\n By `Joe Hamman `_.\n\n- Encoding attributes are now preserved when xarray objects are concatenated.\n The encoding is copied from the first object (:issue:`1297`).\n By `Joe Hamman `_ and\n `Gerrit Holl `_.\n\n- Support applying rolling window operations using bottleneck's moving window\n functions on data stored as dask arrays (:issue:`1279`).\n By `Joe Hamman `_.\n\n- Experimental support for the Dask collection interface (:issue:`1674`).\n By `Matthew Rocklin `_.\n\nBug fixes\n~~~~~~~~~\n\n- Suppress ``RuntimeWarning`` issued by ``numpy`` for \"invalid value comparisons\"\n (e.g. ``NaN``). Xarray now behaves similarly to Pandas in its treatment of\n binary and unary operations on objects with NaNs (:issue:`1657`).\n By `Joe Hamman `_.\n\n- Unsigned int support for reduce methods with ``skipna=True``\n (:issue:`1562`).\n By `Keisuke Fujii `_.\n\n- Fixes to ensure xarray works properly with pandas 0.21:\n\n - Fix :py:meth:`~xarray.DataArray.isnull` method (:issue:`1549`).\n - :py:meth:`~xarray.DataArray.to_series` and\n :py:meth:`~xarray.Dataset.to_dataframe` should not return a ``pandas.MultiIndex``\n for 1D data (:issue:`1548`).\n - Fix plotting with datetime64 axis labels (:issue:`1661`).\n\n By `Stephan Hoyer `_.\n\n- :py:func:`~xarray.open_rasterio` method now shifts the rasterio\n coordinates so that they are centered in each pixel (:issue:`1468`).\n By `Greg Brener `_.\n\n- :py:meth:`~xarray.Dataset.rename` method now doesn't throw errors\n if some ``Variable`` is renamed to the same name as another ``Variable``\n as long as that other ``Variable`` is also renamed (:issue:`1477`). This\n method now does throw when two ``Variables`` would end up with the same name\n after the rename (since one of them would get overwritten in this case).\n By `Prakhar Goel `_.\n\n- Fix :py:func:`xarray.testing.assert_allclose` to actually use ``atol`` and\n ``rtol`` arguments when called on ``DataArray`` objects (:issue:`1488`).\n By `Stephan Hoyer `_.\n\n- xarray ``quantile`` methods now properly raise a ``TypeError`` when applied to\n objects with data stored as ``dask`` arrays (:issue:`1529`).\n By `Joe Hamman `_.\n\n- Fix positional indexing to allow the use of unsigned integers (:issue:`1405`).\n By `Joe Hamman `_ and\n `Gerrit Holl `_.\n\n- Creating a :py:class:`Dataset` now raises ``MergeError`` if a coordinate\n shares a name with a dimension but is comprised of arbitrary dimensions\n (:issue:`1120`).\n By `Joe Hamman `_.\n\n- :py:func:`~xarray.open_rasterio` method now skips rasterio's ``crs``\n attribute if its value is ``None`` (:issue:`1520`).\n By `Leevi Annala `_.\n\n- Fix :py:func:`xarray.DataArray.to_netcdf` to return bytes when no path is\n provided (:issue:`1410`).\n By `Joe Hamman `_.\n\n- Fix :py:func:`xarray.save_mfdataset` to properly raise an informative error\n when objects other than ``Dataset`` are provided (:issue:`1555`).\n By `Joe Hamman `_.\n\n- :py:func:`xarray.Dataset.copy` would not preserve the encoding property\n (:issue:`1586`).\n By `Guido Imperiale `_.\n\n- :py:func:`xarray.concat` would eagerly load dask variables into memory if\n the first argument was a numpy variable (:issue:`1588`).\n By `Guido Imperiale `_.\n\n- Fix bug in :py:meth:`~xarray.Dataset.to_netcdf` when writing in append mode\n (:issue:`1215`).\n By `Joe Hamman `_.\n\n- Fix ``netCDF4`` backend to properly roundtrip the ``shuffle`` encoding option\n (:issue:`1606`).\n By `Joe Hamman `_.\n\n- Fix bug when using ``pytest`` class decorators to skiping certain unittests.\n The previous behavior unintentionally causing additional tests to be skipped\n (:issue:`1531`). By `Joe Hamman `_.\n\n- Fix pynio backend for upcoming release of pynio with Python 3 support\n (:issue:`1611`). By `Ben Hillman `_.\n\n- Fix ``seaborn`` import warning for Seaborn versions 0.8 and newer when the\n ``apionly`` module was deprecated.\n (:issue:`1633`). By `Joe Hamman `_.\n\n- Fix COMPAT: MultiIndex checking is fragile\n (:issue:`1833`). By `Florian Pinault `_.\n\n- Fix ``rasterio`` backend for Rasterio versions 1.0alpha10 and newer.\n (:issue:`1641`). By `Chris Holden `_.\n\nBug fixes after rc1\n~~~~~~~~~~~~~~~~~~~\n\n- Suppress warning in IPython autocompletion, related to the deprecation\n of ``.T`` attributes (:issue:`1675`).\n By `Keisuke Fujii `_.\n\n- Fix a bug in lazily-indexing netCDF array. (:issue:`1688`)\n By `Keisuke Fujii `_.\n\n- (Internal bug) MemoryCachedArray now supports the orthogonal indexing.\n Also made some internal cleanups around array wrappers (:issue:`1429`).\n By `Keisuke Fujii `_.\n\n- (Internal bug) MemoryCachedArray now always wraps ``np.ndarray`` by\n ``NumpyIndexingAdapter``. (:issue:`1694`)\n By `Keisuke Fujii `_.\n\n- Fix importing xarray when running Python with ``-OO`` (:issue:`1706`).\n By `Stephan Hoyer `_.\n\n- Saving a netCDF file with a coordinates with a spaces in its names now raises\n an appropriate warning (:issue:`1689`).\n By `Stephan Hoyer `_.\n\n- Fix two bugs that were preventing dask arrays from being specified as\n coordinates in the DataArray constructor (:issue:`1684`).\n By `Joe Hamman `_.\n\n- Fixed ``apply_ufunc`` with ``dask='parallelized'`` for scalar arguments\n (:issue:`1697`).\n By `Stephan Hoyer `_.\n\n- Fix \"Chunksize cannot exceed dimension size\" error when writing netCDF4 files\n loaded from disk (:issue:`1225`).\n By `Stephan Hoyer `_.\n\n- Validate the shape of coordinates with names matching dimensions in the\n DataArray constructor (:issue:`1709`).\n By `Stephan Hoyer `_.\n\n- Raise ``NotImplementedError`` when attempting to save a MultiIndex to a\n netCDF file (:issue:`1547`).\n By `Stephan Hoyer `_.\n\n- Remove netCDF dependency from rasterio backend tests.\n By `Matti Eskelinen `_\n\nBug fixes after rc2\n~~~~~~~~~~~~~~~~~~~\n\n- Fixed unexpected behavior in ``Dataset.set_index()`` and\n ``DataArray.set_index()`` introduced by Pandas 0.21.0. Setting a new\n index with a single variable resulted in 1-level\n ``pandas.MultiIndex`` instead of a simple ``pandas.Index``\n (:issue:`1722`). By `Benoit Bovy `_.\n\n- Fixed unexpected memory loading of backend arrays after ``print``.\n (:issue:`1720`). By `Keisuke Fujii `_.\n\n.. _whats-new.0.9.6:\n\nv0.9.6 (8 June 2017)\n--------------------\n\nThis release includes a number of backwards compatible enhancements and bug\nfixes.\n\nEnhancements\n~~~~~~~~~~~~\n\n- New :py:meth:`~xarray.Dataset.sortby` method to ``Dataset`` and ``DataArray``\n that enable sorting along dimensions (:issue:`967`).\n See :ref:`the docs ` for examples.\n By `Chun-Wei Yuan `_ and\n `Kyle Heuton `_.\n\n- Add ``.dt`` accessor to DataArrays for computing datetime-like properties\n for the values they contain, similar to ``pandas.Series`` (:issue:`358`).\n By `Daniel Rothenberg `_.\n\n- Renamed internal dask arrays created by ``open_dataset`` to match new dask\n conventions (:issue:`1343`).\n By `Ryan Abernathey `_.\n\n- :py:meth:`~xarray.as_variable` is now part of the public API (:issue:`1303`).\n By `Benoit Bovy `_.\n\n- :py:func:`~xarray.align` now supports ``join='exact'``, which raises\n an error instead of aligning when indexes to be aligned are not equal.\n By `Stephan Hoyer `_.\n\n- New function :py:func:`~xarray.open_rasterio` for opening raster files with\n the `rasterio `_ library.\n See :ref:`the docs ` for details.\n By `Joe Hamman `_,\n `Nic Wayand `_ and\n `Fabien Maussion `_\n\nBug fixes\n~~~~~~~~~\n\n- Fix error from repeated indexing of datasets loaded from disk (:issue:`1374`).\n By `Stephan Hoyer `_.\n\n- Fix a bug where ``.isel_points`` wrongly assigns unselected coordinate to\n ``data_vars``.\n By `Keisuke Fujii `_.\n\n- Tutorial datasets are now checked against a reference MD5 sum to confirm\n successful download (:issue:`1392`). By `Matthew Gidden\n `_.\n\n- ``DataArray.chunk()`` now accepts dask specific kwargs like\n ``Dataset.chunk()`` does. By `Fabien Maussion `_.\n\n- Support for ``engine='pydap'`` with recent releases of Pydap (3.2.2+),\n including on Python 3 (:issue:`1174`).\n\nDocumentation\n~~~~~~~~~~~~~\n\n- A new `gallery `_\n allows to add interactive examples to the documentation.\n By `Fabien Maussion `_.\n\nTesting\n~~~~~~~\n\n- Fix test suite failure caused by changes to ``pandas.cut`` function\n (:issue:`1386`).\n By `Ryan Abernathey `_.\n\n- Enhanced tests suite by use of ``@network`` decorator, which is\n controlled via ``--run-network-tests`` command line argument\n to ``py.test`` (:issue:`1393`).\n By `Matthew Gidden `_.\n\n.. _whats-new.0.9.5:\n\nv0.9.5 (17 April, 2017)\n-----------------------\n\nRemove an inadvertently introduced print statement.\n\n.. _whats-new.0.9.3:\n\nv0.9.3 (16 April, 2017)\n-----------------------\n\nThis minor release includes bug-fixes and backwards compatible enhancements.\n\nEnhancements\n~~~~~~~~~~~~\n\n- New :py:meth:`~xarray.DataArray.persist` method to Datasets and DataArrays to\n enable persisting data in distributed memory when using Dask (:issue:`1344`).\n By `Matthew Rocklin `_.\n\n- New :py:meth:`~xarray.DataArray.expand_dims` method for ``DataArray`` and\n ``Dataset`` (:issue:`1326`).\n By `Keisuke Fujii `_.\n\nBug fixes\n~~~~~~~~~\n\n- Fix ``.where()`` with ``drop=True`` when arguments do not have indexes\n (:issue:`1350`). This bug, introduced in v0.9, resulted in xarray producing\n incorrect results in some cases.\n By `Stephan Hoyer `_.\n\n- Fixed writing to file-like objects with :py:meth:`~xarray.Dataset.to_netcdf`\n (:issue:`1320`).\n `Stephan Hoyer `_.\n\n- Fixed explicitly setting ``engine='scipy'`` with ``to_netcdf`` when not\n providing a path (:issue:`1321`).\n `Stephan Hoyer `_.\n\n- Fixed open_dataarray does not pass properly its parameters to open_dataset\n (:issue:`1359`).\n `Stephan Hoyer `_.\n\n- Ensure test suite works when runs from an installed version of xarray\n (:issue:`1336`). Use ``@pytest.mark.slow`` instead of a custom flag to mark\n slow tests.\n By `Stephan Hoyer `_\n\n.. _whats-new.0.9.2:\n\nv0.9.2 (2 April 2017)\n---------------------\n\nThe minor release includes bug-fixes and backwards compatible enhancements.\n\nEnhancements\n~~~~~~~~~~~~\n\n- ``rolling`` on Dataset is now supported (:issue:`859`).\n\n- ``.rolling()`` on Dataset is now supported (:issue:`859`).\n By `Keisuke Fujii `_.\n\n- When bottleneck version 1.1 or later is installed, use bottleneck for rolling\n ``var``, ``argmin``, ``argmax``, and ``rank`` computations. Also, rolling\n median now accepts a ``min_periods`` argument (:issue:`1276`).\n By `Joe Hamman `_.\n\n- When ``.plot()`` is called on a 2D DataArray and only one dimension is\n specified with ``x=`` or ``y=``, the other dimension is now guessed\n (:issue:`1291`).\n By `Vincent Noel `_.\n\n- Added new method :py:meth:`~Dataset.assign_attrs` to ``DataArray`` and\n ``Dataset``, a chained-method compatible implementation of the\n ``dict.update`` method on attrs (:issue:`1281`).\n By `Henry S. Harrison `_.\n\n- Added new ``autoclose=True`` argument to\n :py:func:`~xarray.open_mfdataset` to explicitly close opened files when not in\n use to prevent occurrence of an OS Error related to too many open files\n (:issue:`1198`).\n Note, the default is ``autoclose=False``, which is consistent with\n previous xarray behavior.\n By `Phillip J. Wolfram `_.\n\n- The ``repr()`` of ``Dataset`` and ``DataArray`` attributes uses a similar\n format to coordinates and variables, with vertically aligned entries\n truncated to fit on a single line (:issue:`1319`). Hopefully this will stop\n people writing ``data.attrs = {}`` and discarding metadata in notebooks for\n the sake of cleaner output. The full metadata is still available as\n ``data.attrs``.\n By `Zac Hatfield-Dodds `_.\n\n- Enhanced tests suite by use of ``@slow`` and ``@flaky`` decorators, which are\n controlled via ``--run-flaky`` and ``--skip-slow`` command line arguments\n to ``py.test`` (:issue:`1336`).\n By `Stephan Hoyer `_ and\n `Phillip J. Wolfram `_.\n\n- New aggregation on rolling objects :py:meth:`DataArray.rolling(...).count()`\n which providing a rolling count of valid values (:issue:`1138`).\n\nBug fixes\n~~~~~~~~~\n- Rolling operations now keep preserve original dimension order (:issue:`1125`).\n By `Keisuke Fujii `_.\n\n- Fixed ``sel`` with ``method='nearest'`` on Python 2.7 and 64-bit Windows\n (:issue:`1140`).\n `Stephan Hoyer `_.\n\n- Fixed ``where`` with ``drop='True'`` for empty masks (:issue:`1341`).\n By `Stephan Hoyer `_ and\n `Phillip J. Wolfram `_.\n\n.. _whats-new.0.9.1:\n\nv0.9.1 (30 January 2017)\n------------------------\n\nRenamed the \"Unindexed dimensions\" section in the ``Dataset`` and\n``DataArray`` repr (added in v0.9.0) to \"Dimensions without coordinates\"\n(:issue:`1199`).\n\n.. _whats-new.0.9.0:\n\nv0.9.0 (25 January 2017)\n------------------------\n\nThis major release includes five months worth of enhancements and bug fixes from\n24 contributors, including some significant changes that are not fully backwards\ncompatible. Highlights include:\n\n- Coordinates are now *optional* in the xarray data model, even for dimensions.\n- Changes to caching, lazy loading and pickling to improve xarray's experience\n for parallel computing.\n- Improvements for accessing and manipulating ``pandas.MultiIndex`` levels.\n- Many new methods and functions, including\n :py:meth:`~DataArray.quantile`,\n :py:meth:`~DataArray.cumsum`,\n :py:meth:`~DataArray.cumprod`\n :py:attr:`~DataArray.combine_first`\n :py:meth:`~DataArray.set_index`,\n :py:meth:`~DataArray.reset_index`,\n :py:meth:`~DataArray.reorder_levels`,\n :py:func:`~xarray.full_like`,\n :py:func:`~xarray.zeros_like`,\n :py:func:`~xarray.ones_like`\n :py:func:`~xarray.open_dataarray`,\n :py:meth:`~DataArray.compute`,\n :py:meth:`Dataset.info`,\n :py:func:`testing.assert_equal`,\n :py:func:`testing.assert_identical`, and\n :py:func:`testing.assert_allclose`.\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- Index coordinates for each dimensions are now optional, and no longer created\n by default :issue:`1017`. You can identify such dimensions without coordinates\n by their appearance in list of \"Dimensions without coordinates\" in the\n ``Dataset`` or ``DataArray`` repr:\n\n .. ipython::\n :verbatim:\n\n In [1]: xr.Dataset({'foo': (('x', 'y'), [[1, 2]])})\n Out[1]:\n \n Dimensions: (x: 1, y: 2)\n Dimensions without coordinates: x, y\n Data variables:\n foo (x, y) int64 1 2\n\n This has a number of implications:\n\n - :py:func:`~align` and :py:meth:`~Dataset.reindex` can now error, if\n dimensions labels are missing and dimensions have different sizes.\n - Because pandas does not support missing indexes, methods such as\n ``to_dataframe``/``from_dataframe`` and ``stack``/``unstack`` no longer\n roundtrip faithfully on all inputs. Use :py:meth:`~Dataset.reset_index` to\n remove undesired indexes.\n - ``Dataset.__delitem__`` and :py:meth:`~Dataset.drop` no longer delete/drop\n variables that have dimensions matching a deleted/dropped variable.\n - ``DataArray.coords.__delitem__`` is now allowed on variables matching\n dimension names.\n - ``.sel`` and ``.loc`` now handle indexing along a dimension without\n coordinate labels by doing integer based indexing. See\n :ref:`indexing.missing_coordinates` for an example.\n - :py:attr:`~Dataset.indexes` is no longer guaranteed to include all\n dimensions names as keys. The new method :py:meth:`~Dataset.get_index` has\n been added to get an index for a dimension guaranteed, falling back to\n produce a default ``RangeIndex`` if necessary.\n\n- The default behavior of ``merge`` is now ``compat='no_conflicts'``, so some\n merges will now succeed in cases that previously raised\n ``xarray.MergeError``. Set ``compat='broadcast_equals'`` to restore the\n previous default. See :ref:`combining.no_conflicts` for more details.\n\n- Reading :py:attr:`~DataArray.values` no longer always caches values in a NumPy\n array :issue:`1128`. Caching of ``.values`` on variables read from netCDF\n files on disk is still the default when :py:func:`open_dataset` is called with\n ``cache=True``.\n By `Guido Imperiale `_ and\n `Stephan Hoyer `_.\n- Pickling a ``Dataset`` or ``DataArray`` linked to a file on disk no longer\n caches its values into memory before pickling (:issue:`1128`). Instead, pickle\n stores file paths and restores objects by reopening file references. This\n enables preliminary, experimental use of xarray for opening files with\n `dask.distributed `_.\n By `Stephan Hoyer `_.\n- Coordinates used to index a dimension are now loaded eagerly into\n :py:class:`pandas.Index` objects, instead of loading the values lazily.\n By `Guido Imperiale `_.\n- Automatic levels for 2d plots are now guaranteed to land on ``vmin`` and\n ``vmax`` when these kwargs are explicitly provided (:issue:`1191`). The\n automated level selection logic also slightly changed.\n By `Fabien Maussion `_.\n\n- ``DataArray.rename()`` behavior changed to strictly change the ``DataArray.name``\n if called with string argument, or strictly change coordinate names if called with\n dict-like argument.\n By `Markus Gonser `_.\n\n- By default ``to_netcdf()`` add a ``_FillValue = NaN`` attributes to float types.\n By `Frederic Laliberte `_.\n\n- ``repr`` on ``DataArray`` objects uses an shortened display for NumPy array\n data that is less likely to overflow onto multiple pages (:issue:`1207`).\n By `Stephan Hoyer `_.\n\n- xarray no longer supports python 3.3, versions of dask prior to v0.9.0,\n or versions of bottleneck prior to v1.0.\n\nDeprecations\n~~~~~~~~~~~~\n\n- Renamed the ``Coordinate`` class from xarray's low level API to\n :py:class:`~xarray.IndexVariable`. ``Variable.to_variable`` and\n ``Variable.to_coord`` have been renamed to\n :py:meth:`~xarray.Variable.to_base_variable` and\n :py:meth:`~xarray.Variable.to_index_variable`.\n- Deprecated supplying ``coords`` as a dictionary to the ``DataArray``\n constructor without also supplying an explicit ``dims`` argument. The old\n behavior encouraged relying on the iteration order of dictionaries, which is\n a bad practice (:issue:`727`).\n- Removed a number of methods deprecated since v0.7.0 or earlier:\n ``load_data``, ``vars``, ``drop_vars``, ``dump``, ``dumps`` and the\n ``variables`` keyword argument to ``Dataset``.\n- Removed the dummy module that enabled ``import xray``.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Added new method :py:meth:`~DataArray.combine_first` to ``DataArray`` and\n ``Dataset``, based on the pandas method of the same name (see :ref:`combine`).\n By `Chun-Wei Yuan `_.\n\n- Added the ability to change default automatic alignment (arithmetic_join=\"inner\")\n for binary operations via :py:func:`~xarray.set_options()`\n (see :ref:`math automatic alignment`).\n By `Chun-Wei Yuan `_.\n\n- Add checking of ``attr`` names and values when saving to netCDF, raising useful\n error messages if they are invalid. (:issue:`911`).\n By `Robin Wilson `_.\n- Added ability to save ``DataArray`` objects directly to netCDF files using\n :py:meth:`~xarray.DataArray.to_netcdf`, and to load directly from netCDF files\n using :py:func:`~xarray.open_dataarray` (:issue:`915`). These remove the need\n to convert a ``DataArray`` to a ``Dataset`` before saving as a netCDF file,\n and deals with names to ensure a perfect 'roundtrip' capability.\n By `Robin Wilson `_.\n- Multi-index levels are now accessible as \"virtual\" coordinate variables,\n e.g., ``ds['time']`` can pull out the ``'time'`` level of a multi-index\n (see :ref:`coordinates`). ``sel`` also accepts providing multi-index levels\n as keyword arguments, e.g., ``ds.sel(time='2000-01')``\n (see :ref:`multi-level indexing`).\n By `Benoit Bovy `_.\n- Added ``set_index``, ``reset_index`` and ``reorder_levels`` methods to\n easily create and manipulate (multi-)indexes (see :ref:`reshape.set_index`).\n By `Benoit Bovy `_.\n- Added the ``compat`` option ``'no_conflicts'`` to ``merge``, allowing the\n combination of xarray objects with disjoint (:issue:`742`) or\n overlapping (:issue:`835`) coordinates as long as all present data agrees.\n By `Johnnie Gray `_. See\n :ref:`combining.no_conflicts` for more details.\n- It is now possible to set ``concat_dim=None`` explicitly in\n :py:func:`~xarray.open_mfdataset` to disable inferring a dimension along\n which to concatenate.\n By `Stephan Hoyer `_.\n- Added methods :py:meth:`DataArray.compute`, :py:meth:`Dataset.compute`, and\n :py:meth:`Variable.compute` as a non-mutating alternative to\n :py:meth:`~DataArray.load`.\n By `Guido Imperiale `_.\n- Adds DataArray and Dataset methods :py:meth:`~xarray.DataArray.cumsum` and\n :py:meth:`~xarray.DataArray.cumprod`. By `Phillip J. Wolfram\n `_.\n\n- New properties :py:attr:`Dataset.sizes` and :py:attr:`DataArray.sizes` for\n providing consistent access to dimension length on both ``Dataset`` and\n ``DataArray`` (:issue:`921`).\n By `Stephan Hoyer `_.\n- New keyword argument ``drop=True`` for :py:meth:`~DataArray.sel`,\n :py:meth:`~DataArray.isel` and :py:meth:`~DataArray.squeeze` for dropping\n scalar coordinates that arise from indexing.\n ``DataArray`` (:issue:`242`).\n By `Stephan Hoyer `_.\n\n- New top-level functions :py:func:`~xarray.full_like`,\n :py:func:`~xarray.zeros_like`, and :py:func:`~xarray.ones_like`\n By `Guido Imperiale `_.\n- Overriding a preexisting attribute with\n :py:func:`~xarray.register_dataset_accessor` or\n :py:func:`~xarray.register_dataarray_accessor` now issues a warning instead of\n raising an error (:issue:`1082`).\n By `Stephan Hoyer `_.\n- Options for axes sharing between subplots are exposed to\n :py:class:`FacetGrid` and :py:func:`~xarray.plot.plot`, so axes\n sharing can be disabled for polar plots.\n By `Bas Hoonhout `_.\n- New utility functions :py:func:`~xarray.testing.assert_equal`,\n :py:func:`~xarray.testing.assert_identical`, and\n :py:func:`~xarray.testing.assert_allclose` for asserting relationships\n between xarray objects, designed for use in a pytest test suite.\n- ``figsize``, ``size`` and ``aspect`` plot arguments are now supported for all\n plots (:issue:`897`). See :ref:`plotting.figsize` for more details.\n By `Stephan Hoyer `_ and\n `Fabien Maussion `_.\n- New :py:meth:`~Dataset.info` method to summarize ``Dataset`` variables\n and attributes. The method prints to a buffer (e.g. ``stdout``) with output\n similar to what the command line utility ``ncdump -h`` produces (:issue:`1150`).\n By `Joe Hamman `_.\n- Added the ability write unlimited netCDF dimensions with the ``scipy`` and\n ``netcdf4`` backends via the new :py:attr:`~xray.Dataset.encoding` attribute\n or via the ``unlimited_dims`` argument to :py:meth:`~xray.Dataset.to_netcdf`.\n By `Joe Hamman `_.\n- New :py:meth:`~DataArray.quantile` method to calculate quantiles from\n DataArray objects (:issue:`1187`).\n By `Joe Hamman `_.\n\n\nBug fixes\n~~~~~~~~~\n- ``groupby_bins`` now restores empty bins by default (:issue:`1019`).\n By `Ryan Abernathey `_.\n\n- Fix issues for dates outside the valid range of pandas timestamps\n (:issue:`975`). By `Mathias Hauser `_.\n\n- Unstacking produced flipped array after stacking decreasing coordinate values\n (:issue:`980`).\n By `Stephan Hoyer `_.\n\n- Setting ``dtype`` via the ``encoding`` parameter of ``to_netcdf`` failed if\n the encoded dtype was the same as the dtype of the original array\n (:issue:`873`).\n By `Stephan Hoyer `_.\n\n- Fix issues with variables where both attributes ``_FillValue`` and\n ``missing_value`` are set to ``NaN`` (:issue:`997`).\n By `Marco Zühlke `_.\n\n- ``.where()`` and ``.fillna()`` now preserve attributes (:issue:`1009`).\n By `Fabien Maussion `_.\n\n- Applying :py:func:`broadcast()` to an xarray object based on the dask backend\n won't accidentally convert the array from dask to numpy anymore (:issue:`978`).\n By `Guido Imperiale `_.\n\n- ``Dataset.concat()`` now preserves variables order (:issue:`1027`).\n By `Fabien Maussion `_.\n\n- Fixed an issue with pcolormesh (:issue:`781`). A new\n ``infer_intervals`` keyword gives control on whether the cell intervals\n should be computed or not.\n By `Fabien Maussion `_.\n\n- Grouping over an dimension with non-unique values with ``groupby`` gives\n correct groups.\n By `Stephan Hoyer `_.\n\n- Fixed accessing coordinate variables with non-string names from ``.coords``.\n By `Stephan Hoyer `_.\n\n- :py:meth:`~xarray.DataArray.rename` now simultaneously renames the array and\n any coordinate with the same name, when supplied via a :py:class:`dict`\n (:issue:`1116`).\n By `Yves Delley `_.\n\n- Fixed sub-optimal performance in certain operations with object arrays (:issue:`1121`).\n By `Yves Delley `_.\n\n- Fix ``.groupby(group)`` when ``group`` has datetime dtype (:issue:`1132`).\n By `Jonas Sølvsteen `_.\n\n- Fixed a bug with facetgrid (the ``norm`` keyword was ignored, :issue:`1159`).\n By `Fabien Maussion `_.\n\n- Resolved a concurrency bug that could cause Python to crash when\n simultaneously reading and writing netCDF4 files with dask (:issue:`1172`).\n By `Stephan Hoyer `_.\n\n- Fix to make ``.copy()`` actually copy dask arrays, which will be relevant for\n future releases of dask in which dask arrays will be mutable (:issue:`1180`).\n By `Stephan Hoyer `_.\n\n- Fix opening NetCDF files with multi-dimensional time variables\n (:issue:`1229`).\n By `Stephan Hoyer `_.\n\nPerformance improvements\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n- :py:meth:`~xarray.Dataset.isel_points` and\n :py:meth:`~xarray.Dataset.sel_points` now use vectorised indexing in numpy\n and dask (:issue:`1161`), which can result in several orders of magnitude\n speedup.\n By `Jonathan Chambers `_.\n\n.. _whats-new.0.8.2:\n\nv0.8.2 (18 August 2016)\n-----------------------\n\nThis release includes a number of bug fixes and minor enhancements.\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- :py:func:`~xarray.broadcast` and :py:func:`~xarray.concat` now auto-align\n inputs, using ``join=outer``. Previously, these functions raised\n ``ValueError`` for non-aligned inputs.\n By `Guido Imperiale `_.\n\nEnhancements\n~~~~~~~~~~~~\n\n- New documentation on :ref:`panel transition`. By\n `Maximilian Roos `_.\n- New ``Dataset`` and ``DataArray`` methods :py:meth:`~xarray.Dataset.to_dict`\n and :py:meth:`~xarray.Dataset.from_dict` to allow easy conversion between\n dictionaries and xarray objects (:issue:`432`). See\n :ref:`dictionary IO` for more details.\n By `Julia Signell `_.\n- Added ``exclude`` and ``indexes`` optional parameters to :py:func:`~xarray.align`,\n and ``exclude`` optional parameter to :py:func:`~xarray.broadcast`.\n By `Guido Imperiale `_.\n- Better error message when assigning variables without dimensions\n (:issue:`971`). By `Stephan Hoyer `_.\n- Better error message when reindex/align fails due to duplicate index values\n (:issue:`956`). By `Stephan Hoyer `_.\n\nBug fixes\n~~~~~~~~~\n\n- Ensure xarray works with h5netcdf v0.3.0 for arrays with ``dtype=str``\n (:issue:`953`). By `Stephan Hoyer `_.\n- ``Dataset.__dir__()`` (i.e. the method python calls to get autocomplete\n options) failed if one of the dataset's keys was not a string (:issue:`852`).\n By `Maximilian Roos `_.\n- ``Dataset`` constructor can now take arbitrary objects as values\n (:issue:`647`). By `Maximilian Roos `_.\n- Clarified ``copy`` argument for :py:meth:`~xarray.DataArray.reindex` and\n :py:func:`~xarray.align`, which now consistently always return new xarray\n objects (:issue:`927`).\n- Fix ``open_mfdataset`` with ``engine='pynio'`` (:issue:`936`).\n By `Stephan Hoyer `_.\n- ``groupby_bins`` sorted bin labels as strings (:issue:`952`).\n By `Stephan Hoyer `_.\n- Fix bug introduced by v0.8.0 that broke assignment to datasets when both the\n left and right side have the same non-unique index values (:issue:`956`).\n\n.. _whats-new.0.8.1:\n\nv0.8.1 (5 August 2016)\n----------------------\n\nBug fixes\n~~~~~~~~~\n\n- Fix bug in v0.8.0 that broke assignment to Datasets with non-unique\n indexes (:issue:`943`). By `Stephan Hoyer `_.\n\n.. _whats-new.0.8.0:\n\nv0.8.0 (2 August 2016)\n----------------------\n\nThis release includes four months of new features and bug fixes, including\nseveral breaking changes.\n\n.. _v0.8.0.breaking:\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- Dropped support for Python 2.6 (:issue:`855`).\n- Indexing on multi-index now drop levels, which is consistent with pandas.\n It also changes the name of the dimension / coordinate when the multi-index is\n reduced to a single index (:issue:`802`).\n- Contour plots no longer add a colorbar per default (:issue:`866`). Filled\n contour plots are unchanged.\n- ``DataArray.values`` and ``.data`` now always returns an NumPy array-like\n object, even for 0-dimensional arrays with object dtype (:issue:`867`).\n Previously, ``.values`` returned native Python objects in such cases. To\n convert the values of scalar arrays to Python objects, use the ``.item()``\n method.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Groupby operations now support grouping over multidimensional variables. A new\n method called :py:meth:`~xarray.Dataset.groupby_bins` has also been added to\n allow users to specify bins for grouping. The new features are described in\n :ref:`groupby.multidim` and :ref:`examples.multidim`.\n By `Ryan Abernathey `_.\n\n- DataArray and Dataset method :py:meth:`where` now supports a ``drop=True``\n option that clips coordinate elements that are fully masked. By\n `Phillip J. Wolfram `_.\n\n- New top level :py:func:`merge` function allows for combining variables from\n any number of ``Dataset`` and/or ``DataArray`` variables. See :ref:`merge`\n for more details. By `Stephan Hoyer `_.\n\n- DataArray and Dataset method :py:meth:`resample` now supports the\n ``keep_attrs=False`` option that determines whether variable and dataset\n attributes are retained in the resampled object. By\n `Jeremy McGibbon `_.\n\n- Better multi-index support in DataArray and Dataset :py:meth:`sel` and\n :py:meth:`loc` methods, which now behave more closely to pandas and which\n also accept dictionaries for indexing based on given level names and labels\n (see :ref:`multi-level indexing`). By\n `Benoit Bovy `_.\n\n- New (experimental) decorators :py:func:`~xarray.register_dataset_accessor` and\n :py:func:`~xarray.register_dataarray_accessor` for registering custom xarray\n extensions without subclassing. They are described in the new documentation\n page on :ref:`internals`. By `Stephan Hoyer `_.\n\n- Round trip boolean datatypes. Previously, writing boolean datatypes to netCDF\n formats would raise an error since netCDF does not have a `bool` datatype.\n This feature reads/writes a `dtype` attribute to boolean variables in netCDF\n files. By `Joe Hamman `_.\n\n- 2D plotting methods now have two new keywords (`cbar_ax` and `cbar_kwargs`),\n allowing more control on the colorbar (:issue:`872`).\n By `Fabien Maussion `_.\n\n- New Dataset method :py:meth:`filter_by_attrs`, akin to\n ``netCDF4.Dataset.get_variables_by_attributes``, to easily filter\n data variables using its attributes.\n `Filipe Fernandes `_.\n\nBug fixes\n~~~~~~~~~\n\n- Attributes were being retained by default for some resampling\n operations when they should not. With the ``keep_attrs=False`` option, they\n will no longer be retained by default. This may be backwards-incompatible\n with some scripts, but the attributes may be kept by adding the\n ``keep_attrs=True`` option. By\n `Jeremy McGibbon `_.\n\n- Concatenating xarray objects along an axis with a MultiIndex or PeriodIndex\n preserves the nature of the index (:issue:`875`). By\n `Stephan Hoyer `_.\n\n- Fixed bug in arithmetic operations on DataArray objects whose dimensions\n are numpy structured arrays or recarrays :issue:`861`, :issue:`837`. By\n `Maciek Swat `_.\n\n- ``decode_cf_timedelta`` now accepts arrays with ``ndim`` >1 (:issue:`842`).\n This fixes issue :issue:`665`.\n `Filipe Fernandes `_.\n\n- Fix a bug where `xarray.ufuncs` that take two arguments would incorrectly\n use to numpy functions instead of dask.array functions (:issue:`876`). By\n `Stephan Hoyer `_.\n\n- Support for pickling functions from ``xarray.ufuncs`` (:issue:`901`). By\n `Stephan Hoyer `_.\n\n- ``Variable.copy(deep=True)`` no longer converts MultiIndex into a base Index\n (:issue:`769`). By `Benoit Bovy `_.\n\n- Fixes for groupby on dimensions with a multi-index (:issue:`867`). By\n `Stephan Hoyer `_.\n\n- Fix printing datasets with unicode attributes on Python 2 (:issue:`892`). By\n `Stephan Hoyer `_.\n\n- Fixed incorrect test for dask version (:issue:`891`). By\n `Stephan Hoyer `_.\n\n- Fixed `dim` argument for `isel_points`/`sel_points` when a `pandas.Index` is\n passed. By `Stephan Hoyer `_.\n\n- :py:func:`~xarray.plot.contour` now plots the correct number of contours\n (:issue:`866`). By `Fabien Maussion `_.\n\n.. _whats-new.0.7.2:\n\nv0.7.2 (13 March 2016)\n----------------------\n\nThis release includes two new, entirely backwards compatible features and\nseveral bug fixes.\n\nEnhancements\n~~~~~~~~~~~~\n\n- New DataArray method :py:meth:`DataArray.dot` for calculating the dot\n product of two DataArrays along shared dimensions. By\n `Dean Pospisil `_.\n\n- Rolling window operations on DataArray objects are now supported via a new\n :py:meth:`DataArray.rolling` method. For example:\n\n .. ipython::\n :verbatim:\n\n In [1]: import xarray as xr; import numpy as np\n\n In [2]: arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5),\n dims=('x', 'y'))\n\n In [3]: arr\n Out[3]:\n \n array([[ 0. , 0.5, 1. , 1.5, 2. ],\n [ 2.5, 3. , 3.5, 4. , 4.5],\n [ 5. , 5.5, 6. , 6.5, 7. ]])\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1 2 3 4\n\n In [4]: arr.rolling(y=3, min_periods=2).mean()\n Out[4]:\n \n array([[ nan, 0.25, 0.5 , 1. , 1.5 ],\n [ nan, 2.75, 3. , 3.5 , 4. ],\n [ nan, 5.25, 5.5 , 6. , 6.5 ]])\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1 2 3 4\n\n See :ref:`comput.rolling` for more details. By\n `Joe Hamman `_.\n\nBug fixes\n~~~~~~~~~\n\n- Fixed an issue where plots using pcolormesh and Cartopy axes were being distorted\n by the inference of the axis interval breaks. This change chooses not to modify\n the coordinate variables when the axes have the attribute ``projection``, allowing\n Cartopy to handle the extent of pcolormesh plots (:issue:`781`). By\n `Joe Hamman `_.\n\n- 2D plots now better handle additional coordinates which are not ``DataArray``\n dimensions (:issue:`788`). By `Fabien Maussion `_.\n\n\n.. _whats-new.0.7.1:\n\nv0.7.1 (16 February 2016)\n-------------------------\n\nThis is a bug fix release that includes two small, backwards compatible enhancements.\nWe recommend that all users upgrade.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Numerical operations now return empty objects on no overlapping labels rather\n than raising ``ValueError`` (:issue:`739`).\n- :py:class:`~pd.Series` is now supported as valid input to the ``Dataset``\n constructor (:issue:`740`).\n\nBug fixes\n~~~~~~~~~\n\n- Restore checks for shape consistency between data and coordinates in the\n DataArray constructor (:issue:`758`).\n- Single dimension variables no longer transpose as part of a broader\n ``.transpose``. This behavior was causing ``pandas.PeriodIndex`` dimensions\n to lose their type (:issue:`749`)\n- :py:class:`~xarray.Dataset` labels remain as their native type on ``.to_dataset``.\n Previously they were coerced to strings (:issue:`745`)\n- Fixed a bug where replacing a ``DataArray`` index coordinate would improperly\n align the coordinate (:issue:`725`).\n- ``DataArray.reindex_like`` now maintains the dtype of complex numbers when\n reindexing leads to NaN values (:issue:`738`).\n- ``Dataset.rename`` and ``DataArray.rename`` support the old and new names\n being the same (:issue:`724`).\n- Fix :py:meth:`~xarray.Dataset.from_dataset` for DataFrames with Categorical\n column and a MultiIndex index (:issue:`737`).\n- Fixes to ensure xarray works properly after the upcoming pandas v0.18 and\n NumPy v1.11 releases.\n\nAcknowledgments\n~~~~~~~~~~~~~~~\n\nThe following individuals contributed to this release:\n\n- Edward Richards\n- Maximilian Roos\n- Rafael Guedes\n- Spencer Hill\n- Stephan Hoyer\n\n.. _whats-new.0.7.0:\n\nv0.7.0 (21 January 2016)\n------------------------\n\nThis major release includes redesign of :py:class:`~xarray.DataArray`\ninternals, as well as new methods for reshaping, rolling and shifting\ndata. It includes preliminary support for :py:class:`pandas.MultiIndex`,\nas well as a number of other features and bug fixes, several of which\noffer improved compatibility with pandas.\n\nNew name\n~~~~~~~~\n\nThe project formerly known as \"xray\" is now \"xarray\", pronounced \"x-array\"!\nThis avoids a namespace conflict with the entire field of x-ray science. Renaming\nour project seemed like the right thing to do, especially because some\nscientists who work with actual x-rays are interested in using this project in\ntheir work. Thanks for your understanding and patience in this transition. You\ncan now find our documentation and code repository at new URLs:\n\n- http://xarray.pydata.org\n- http://github.com/pydata/xarray/\n\nTo ease the transition, we have simultaneously released v0.7.0 of both\n``xray`` and ``xarray`` on the Python Package Index. These packages are\nidentical. For now, ``import xray`` still works, except it issues a\ndeprecation warning. This will be the last xray release. Going forward, we\nrecommend switching your import statements to ``import xarray as xr``.\n\n.. _v0.7.0.breaking:\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- The internal data model used by :py:class:`~xray.DataArray` has been\n rewritten to fix several outstanding issues (:issue:`367`, :issue:`634`,\n `this stackoverflow report`_). Internally, ``DataArray`` is now implemented\n in terms of ``._variable`` and ``._coords`` attributes instead of holding\n variables in a ``Dataset`` object.\n\n This refactor ensures that if a DataArray has the\n same name as one of its coordinates, the array and the coordinate no longer\n share the same data.\n\n In practice, this means that creating a DataArray with the same ``name`` as\n one of its dimensions no longer automatically uses that array to label the\n corresponding coordinate. You will now need to provide coordinate labels\n explicitly. Here's the old behavior:\n\n .. ipython::\n :verbatim:\n\n In [2]: xray.DataArray([4, 5, 6], dims='x', name='x')\n Out[2]:\n \n array([4, 5, 6])\n Coordinates:\n * x (x) int64 4 5 6\n\n and the new behavior (compare the values of the ``x`` coordinate):\n\n .. ipython::\n :verbatim:\n\n In [2]: xray.DataArray([4, 5, 6], dims='x', name='x')\n Out[2]:\n \n array([4, 5, 6])\n Coordinates:\n * x (x) int64 0 1 2\n\n- It is no longer possible to convert a DataArray to a Dataset with\n :py:meth:`xray.DataArray.to_dataset` if it is unnamed. This will now\n raise ``ValueError``. If the array is unnamed, you need to supply the\n ``name`` argument.\n\n.. _this stackoverflow report: http://stackoverflow.com/questions/33158558/python-xray-extract-first-and-last-time-value-within-each-month-of-a-timeseries\n\nEnhancements\n~~~~~~~~~~~~\n\n- Basic support for :py:class:`~pandas.MultiIndex` coordinates on xray objects, including\n indexing, :py:meth:`~DataArray.stack` and :py:meth:`~DataArray.unstack`:\n\n .. ipython::\n :verbatim:\n\n In [7]: df = pd.DataFrame({'foo': range(3),\n ...: 'x': ['a', 'b', 'b'],\n ...: 'y': [0, 0, 1]})\n\n In [8]: s = df.set_index(['x', 'y'])['foo']\n\n In [12]: arr = xray.DataArray(s, dims='z')\n\n In [13]: arr\n Out[13]:\n \n array([0, 1, 2])\n Coordinates:\n * z (z) object ('a', 0) ('b', 0) ('b', 1)\n\n In [19]: arr.indexes['z']\n Out[19]:\n MultiIndex(levels=[[u'a', u'b'], [0, 1]],\n labels=[[0, 1, 1], [0, 0, 1]],\n names=[u'x', u'y'])\n\n In [14]: arr.unstack('z')\n Out[14]:\n \n array([[ 0., nan],\n [ 1., 2.]])\n Coordinates:\n * x (x) object 'a' 'b'\n * y (y) int64 0 1\n\n In [26]: arr.unstack('z').stack(z=('x', 'y'))\n Out[26]:\n \n array([ 0., nan, 1., 2.])\n Coordinates:\n * z (z) object ('a', 0) ('a', 1) ('b', 0) ('b', 1)\n\n See :ref:`reshape.stack` for more details.\n\n .. warning::\n\n xray's MultiIndex support is still experimental, and we have a long to-\n do list of desired additions (:issue:`719`), including better display of\n multi-index levels when printing a ``Dataset``, and support for saving\n datasets with a MultiIndex to a netCDF file. User contributions in this\n area would be greatly appreciated.\n\n- Support for reading GRIB, HDF4 and other file formats via PyNIO_. See\n :ref:`io.pynio` for more details.\n- Better error message when a variable is supplied with the same name as\n one of its dimensions.\n- Plotting: more control on colormap parameters (:issue:`642`). ``vmin`` and\n ``vmax`` will not be silently ignored anymore. Setting ``center=False``\n prevents automatic selection of a divergent colormap.\n- New :py:meth:`~xray.Dataset.shift` and :py:meth:`~xray.Dataset.roll` methods\n for shifting/rotating datasets or arrays along a dimension:\n\n .. ipython:: python\n :okwarning:\n\n array = xray.DataArray([5, 6, 7, 8], dims='x')\n array.shift(x=2)\n array.roll(x=2)\n\n Notice that ``shift`` moves data independently of coordinates, but ``roll``\n moves both data and coordinates.\n- Assigning a ``pandas`` object directly as a ``Dataset`` variable is now permitted. Its\n index names correspond to the ``dims`` of the ``Dataset``, and its data is aligned.\n- Passing a :py:class:`pandas.DataFrame` or :py:class:`pandas.Panel` to a Dataset constructor\n is now permitted.\n- New function :py:func:`~xray.broadcast` for explicitly broadcasting\n ``DataArray`` and ``Dataset`` objects against each other. For example:\n\n .. ipython:: python\n\n a = xray.DataArray([1, 2, 3], dims='x')\n b = xray.DataArray([5, 6], dims='y')\n a\n b\n a2, b2 = xray.broadcast(a, b)\n a2\n b2\n\n.. _PyNIO: https://www.pyngl.ucar.edu/Nio.shtml\n\nBug fixes\n~~~~~~~~~\n\n- Fixes for several issues found on ``DataArray`` objects with the same name\n as one of their coordinates (see :ref:`v0.7.0.breaking` for more details).\n- ``DataArray.to_masked_array`` always returns masked array with mask being an\n array (not a scalar value) (:issue:`684`)\n- Allows for (imperfect) repr of Coords when underlying index is PeriodIndex (:issue:`645`).\n- Fixes for several issues found on ``DataArray`` objects with the same name\n as one of their coordinates (see :ref:`v0.7.0.breaking` for more details).\n- Attempting to assign a ``Dataset`` or ``DataArray`` variable/attribute using\n attribute-style syntax (e.g., ``ds.foo = 42``) now raises an error rather\n than silently failing (:issue:`656`, :issue:`714`).\n- You can now pass pandas objects with non-numpy dtypes (e.g., ``categorical``\n or ``datetime64`` with a timezone) into xray without an error\n (:issue:`716`).\n\nAcknowledgments\n~~~~~~~~~~~~~~~\n\nThe following individuals contributed to this release:\n\n- Antony Lee\n- Fabien Maussion\n- Joe Hamman\n- Maximilian Roos\n- Stephan Hoyer\n- Takeshi Kanmae\n- femtotrader\n\nv0.6.1 (21 October 2015)\n------------------------\n\nThis release contains a number of bug and compatibility fixes, as well\nas enhancements to plotting, indexing and writing files to disk.\n\nNote that the minimum required version of dask for use with xray is now\nversion 0.6.\n\nAPI Changes\n~~~~~~~~~~~\n\n- The handling of colormaps and discrete color lists for 2D plots in\n :py:meth:`~xray.DataArray.plot` was changed to provide more compatibility\n with matplotlib's ``contour`` and ``contourf`` functions (:issue:`538`).\n Now discrete lists of colors should be specified using ``colors`` keyword,\n rather than ``cmap``.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Faceted plotting through :py:class:`~xray.plot.FacetGrid` and the\n :py:meth:`~xray.plot.plot` method. See :ref:`plotting.faceting` for more details\n and examples.\n- :py:meth:`~xray.Dataset.sel` and :py:meth:`~xray.Dataset.reindex` now support\n the ``tolerance`` argument for controlling nearest-neighbor selection\n (:issue:`629`):\n\n .. ipython::\n :verbatim:\n\n In [5]: array = xray.DataArray([1, 2, 3], dims='x')\n\n In [6]: array.reindex(x=[0.9, 1.5], method='nearest', tolerance=0.2)\n Out[6]:\n \n array([ 2., nan])\n Coordinates:\n * x (x) float64 0.9 1.5\n\n This feature requires pandas v0.17 or newer.\n- New ``encoding`` argument in :py:meth:`~xray.Dataset.to_netcdf` for writing\n netCDF files with compression, as described in the new documentation\n section on :ref:`io.netcdf.writing_encoded`.\n- Add :py:attr:`~xray.Dataset.real` and :py:attr:`~xray.Dataset.imag`\n attributes to Dataset and DataArray (:issue:`553`).\n- More informative error message with :py:meth:`~xray.Dataset.from_dataframe`\n if the frame has duplicate columns.\n- xray now uses deterministic names for dask arrays it creates or opens from\n disk. This allows xray users to take advantage of dask's nascent support for\n caching intermediate computation results. See :issue:`555` for an example.\n\nBug fixes\n~~~~~~~~~\n\n- Forwards compatibility with the latest pandas release (v0.17.0). We were\n using some internal pandas routines for datetime conversion, which\n unfortunately have now changed upstream (:issue:`569`).\n- Aggregation functions now correctly skip ``NaN`` for data for ``complex128``\n dtype (:issue:`554`).\n- Fixed indexing 0d arrays with unicode dtype (:issue:`568`).\n- :py:meth:`~xray.DataArray.name` and Dataset keys must be a string or None to\n be written to netCDF (:issue:`533`).\n- :py:meth:`~xray.DataArray.where` now uses dask instead of numpy if either the\n array or ``other`` is a dask array. Previously, if ``other`` was a numpy array\n the method was evaluated eagerly.\n- Global attributes are now handled more consistently when loading remote\n datasets using ``engine='pydap'`` (:issue:`574`).\n- It is now possible to assign to the ``.data`` attribute of DataArray objects.\n- ``coordinates`` attribute is now kept in the encoding dictionary after\n decoding (:issue:`610`).\n- Compatibility with numpy 1.10 (:issue:`617`).\n\nAcknowledgments\n~~~~~~~~~~~~~~~\n\nThe following individuals contributed to this release:\n\n- Ryan Abernathey\n- Pete Cable\n- Clark Fitzgerald\n- Joe Hamman\n- Stephan Hoyer\n- Scott Sinclair\n\nv0.6.0 (21 August 2015)\n-----------------------\n\nThis release includes numerous bug fixes and enhancements. Highlights\ninclude the introduction of a plotting module and the new Dataset and DataArray\nmethods :py:meth:`~xray.Dataset.isel_points`, :py:meth:`~xray.Dataset.sel_points`,\n:py:meth:`~xray.Dataset.where` and :py:meth:`~xray.Dataset.diff`. There are no\nbreaking changes from v0.5.2.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Plotting methods have been implemented on DataArray objects\n :py:meth:`~xray.DataArray.plot` through integration with matplotlib\n (:issue:`185`). For an introduction, see :ref:`plotting`.\n- Variables in netCDF files with multiple missing values are now decoded as NaN\n after issuing a warning if open_dataset is called with mask_and_scale=True.\n- We clarified our rules for when the result from an xray operation is a copy\n vs. a view (see :ref:`copies vs views` for more details).\n- Dataset variables are now written to netCDF files in order of appearance\n when using the netcdf4 backend (:issue:`479`).\n\n- Added :py:meth:`~xray.Dataset.isel_points` and :py:meth:`~xray.Dataset.sel_points`\n to support pointwise indexing of Datasets and DataArrays (:issue:`475`).\n\n .. ipython::\n :verbatim:\n\n In [1]: da = xray.DataArray(np.arange(56).reshape((7, 8)),\n ...: coords={'x': list('abcdefg'),\n ...: 'y': 10 * np.arange(8)},\n ...: dims=['x', 'y'])\n\n In [2]: da\n Out[2]:\n \n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n Coordinates:\n * y (y) int64 0 10 20 30 40 50 60 70\n * x (x) |S1 'a' 'b' 'c' 'd' 'e' 'f' 'g'\n\n # we can index by position along each dimension\n In [3]: da.isel_points(x=[0, 1, 6], y=[0, 1, 0], dim='points')\n Out[3]:\n \n array([ 0, 9, 48])\n Coordinates:\n y (points) int64 0 10 0\n x (points) |S1 'a' 'b' 'g'\n * points (points) int64 0 1 2\n\n # or equivalently by label\n In [9]: da.sel_points(x=['a', 'b', 'g'], y=[0, 10, 0], dim='points')\n Out[9]:\n \n array([ 0, 9, 48])\n Coordinates:\n y (points) int64 0 10 0\n x (points) |S1 'a' 'b' 'g'\n * points (points) int64 0 1 2\n\n- New :py:meth:`~xray.Dataset.where` method for masking xray objects according\n to some criteria. This works particularly well with multi-dimensional data:\n\n .. ipython:: python\n\n ds = xray.Dataset(coords={'x': range(100), 'y': range(100)})\n ds['distance'] = np.sqrt(ds.x ** 2 + ds.y ** 2)\n\n @savefig where_example.png width=4in height=4in\n ds.distance.where(ds.distance < 100).plot()\n\n- Added new methods :py:meth:`DataArray.diff `\n and :py:meth:`Dataset.diff ` for finite\n difference calculations along a given axis.\n\n- New :py:meth:`~xray.DataArray.to_masked_array` convenience method for\n returning a numpy.ma.MaskedArray.\n\n .. ipython:: python\n\n da = xray.DataArray(np.random.random_sample(size=(5, 4)))\n da.where(da < 0.5)\n da.where(da < 0.5).to_masked_array(copy=True)\n\n- Added new flag \"drop_variables\" to :py:meth:`~xray.open_dataset` for\n excluding variables from being parsed. This may be useful to drop\n variables with problems or inconsistent values.\n\nBug fixes\n~~~~~~~~~\n\n- Fixed aggregation functions (e.g., sum and mean) on big-endian arrays when\n bottleneck is installed (:issue:`489`).\n- Dataset aggregation functions dropped variables with unsigned integer dtype\n (:issue:`505`).\n- ``.any()`` and ``.all()`` were not lazy when used on xray objects containing\n dask arrays.\n- Fixed an error when attempting to saving datetime64 variables to netCDF\n files when the first element is ``NaT`` (:issue:`528`).\n- Fix pickle on DataArray objects (:issue:`515`).\n- Fixed unnecessary coercion of float64 to float32 when using netcdf3 and\n netcdf4_classic formats (:issue:`526`).\n\nv0.5.2 (16 July 2015)\n---------------------\n\nThis release contains bug fixes, several additional options for opening and\nsaving netCDF files, and a backwards incompatible rewrite of the advanced\noptions for ``xray.concat``.\n\nBackwards incompatible changes\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- The optional arguments ``concat_over`` and ``mode`` in :py:func:`~xray.concat` have\n been removed and replaced by ``data_vars`` and ``coords``. The new arguments are both\n more easily understood and more robustly implemented, and allowed us to fix a bug\n where ``concat`` accidentally loaded data into memory. If you set values for\n these optional arguments manually, you will need to update your code. The default\n behavior should be unchanged.\n\nEnhancements\n~~~~~~~~~~~~\n\n- :py:func:`~xray.open_mfdataset` now supports a ``preprocess`` argument for\n preprocessing datasets prior to concatenaton. This is useful if datasets\n cannot be otherwise merged automatically, e.g., if the original datasets\n have conflicting index coordinates (:issue:`443`).\n- :py:func:`~xray.open_dataset` and :py:func:`~xray.open_mfdataset` now use a\n global thread lock by default for reading from netCDF files with dask. This\n avoids possible segmentation faults for reading from netCDF4 files when HDF5\n is not configured properly for concurrent access (:issue:`444`).\n- Added support for serializing arrays of complex numbers with `engine='h5netcdf'`.\n- The new :py:func:`~xray.save_mfdataset` function allows for saving multiple\n datasets to disk simultaneously. This is useful when processing large datasets\n with dask.array. For example, to save a dataset too big to fit into memory\n to one file per year, we could write:\n\n .. ipython::\n :verbatim:\n\n In [1]: years, datasets = zip(*ds.groupby('time.year'))\n\n In [2]: paths = ['%s.nc' % y for y in years]\n\n In [3]: xray.save_mfdataset(datasets, paths)\n\nBug fixes\n~~~~~~~~~\n\n- Fixed ``min``, ``max``, ``argmin`` and ``argmax`` for arrays with string or\n unicode types (:issue:`453`).\n- :py:func:`~xray.open_dataset` and :py:func:`~xray.open_mfdataset` support\n supplying chunks as a single integer.\n- Fixed a bug in serializing scalar datetime variable to netCDF.\n- Fixed a bug that could occur in serialization of 0-dimensional integer arrays.\n- Fixed a bug where concatenating DataArrays was not always lazy (:issue:`464`).\n- When reading datasets with h5netcdf, bytes attributes are decoded to strings.\n This allows conventions decoding to work properly on Python 3 (:issue:`451`).\n\nv0.5.1 (15 June 2015)\n---------------------\n\nThis minor release fixes a few bugs and an inconsistency with pandas. It also\nadds the ``pipe`` method, copied from pandas.\n\nEnhancements\n~~~~~~~~~~~~\n\n- Added :py:meth:`~xray.Dataset.pipe`, replicating the `new pandas method`_ in version\n 0.16.2. See :ref:`transforming datasets` for more details.\n- :py:meth:`~xray.Dataset.assign` and :py:meth:`~xray.Dataset.assign_coords`\n now assign new variables in sorted (alphabetical) order, mirroring the\n behavior in pandas. Previously, the order was arbitrary.\n\n.. _new pandas method: http://pandas.pydata.org/pandas-docs/version/0.16.2/whatsnew.html#pipe\n\nBug fixes\n~~~~~~~~~\n\n- ``xray.concat`` fails in an edge case involving identical coordinate variables (:issue:`425`)\n- We now decode variables loaded from netCDF3 files with the scipy engine using native\n endianness (:issue:`416`). This resolves an issue when aggregating these arrays with\n bottleneck installed.\n\nv0.5 (1 June 2015)\n------------------\n\nHighlights\n~~~~~~~~~~\n\nThe headline feature in this release is experimental support for out-of-core\ncomputing (data that doesn't fit into memory) with dask_. This includes a new\ntop-level function :py:func:`~xray.open_mfdataset` that makes it easy to open\na collection of netCDF (using dask) as a single ``xray.Dataset`` object. For\nmore on dask, read the `blog post introducing xray + dask`_ and the new\ndocumentation section :doc:`dask`.\n\n.. _blog post introducing xray + dask: https://www.anaconda.com/blog/developer-blog/xray-dask-out-core-labeled-arrays-python/\n\nDask makes it possible to harness parallelism and manipulate gigantic datasets\nwith xray. It is currently an optional dependency, but it may become required\nin the future.\n\nBackwards incompatible changes\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- The logic used for choosing which variables are concatenated with\n :py:func:`~xray.concat` has changed. Previously, by default any variables\n which were equal across a dimension were not concatenated. This lead to some\n surprising behavior, where the behavior of groupby and concat operations\n could depend on runtime values (:issue:`268`). For example:\n\n .. ipython::\n :verbatim:\n\n In [1]: ds = xray.Dataset({'x': 0})\n\n In [2]: xray.concat([ds, ds], dim='y')\n Out[2]:\n \n Dimensions: ()\n Coordinates:\n *empty*\n Data variables:\n x int64 0\n\n Now, the default always concatenates data variables:\n\n .. ipython:: python\n :suppress:\n\n ds = xray.Dataset({'x': 0})\n\n .. ipython:: python\n\n xray.concat([ds, ds], dim='y')\n\n To obtain the old behavior, supply the argument ``concat_over=[]``.\n\nEnhancements\n~~~~~~~~~~~~\n\n- New :py:meth:`~xray.Dataset.to_array` and enhanced\n :py:meth:`~xray.DataArray.to_dataset` methods make it easy to switch back\n and forth between arrays and datasets:\n\n .. ipython:: python\n\n ds = xray.Dataset({'a': 1, 'b': ('x', [1, 2, 3])},\n coords={'c': 42}, attrs={'Conventions': 'None'})\n ds.to_array()\n ds.to_array().to_dataset(dim='variable')\n\n- New :py:meth:`~xray.Dataset.fillna` method to fill missing values, modeled\n off the pandas method of the same name:\n\n .. ipython:: python\n\n array = xray.DataArray([np.nan, 1, np.nan, 3], dims='x')\n array.fillna(0)\n\n ``fillna`` works on both ``Dataset`` and ``DataArray`` objects, and uses\n index based alignment and broadcasting like standard binary operations. It\n also can be applied by group, as illustrated in\n :ref:`fill with climatology`.\n- New :py:meth:`~xray.Dataset.assign` and :py:meth:`~xray.Dataset.assign_coords`\n methods patterned off the new :py:meth:`DataFrame.assign `\n method in pandas:\n\n .. ipython:: python\n\n ds = xray.Dataset({'y': ('x', [1, 2, 3])})\n ds.assign(z = lambda ds: ds.y ** 2)\n ds.assign_coords(z = ('x', ['a', 'b', 'c']))\n\n These methods return a new Dataset (or DataArray) with updated data or\n coordinate variables.\n- :py:meth:`~xray.Dataset.sel` now supports the ``method`` parameter, which works\n like the paramter of the same name on :py:meth:`~xray.Dataset.reindex`. It\n provides a simple interface for doing nearest-neighbor interpolation:\n\n .. use verbatim because I can't seem to install pandas 0.16.1 on RTD :(\n\n .. ipython::\n :verbatim:\n\n In [12]: ds.sel(x=1.1, method='nearest')\n Out[12]:\n \n Dimensions: ()\n Coordinates:\n x int64 1\n Data variables:\n y int64 2\n\n In [13]: ds.sel(x=[1.1, 2.1], method='pad')\n Out[13]:\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) int64 1 2\n Data variables:\n y (x) int64 2 3\n\n See :ref:`nearest neighbor lookups` for more details.\n- You can now control the underlying backend used for accessing remote\n datasets (via OPeNDAP) by specifying ``engine='netcdf4'`` or\n ``engine='pydap'``.\n- xray now provides experimental support for reading and writing netCDF4 files directly\n via `h5py`_ with the `h5netcdf`_ package, avoiding the netCDF4-Python package. You\n will need to install h5netcdf and specify ``engine='h5netcdf'`` to try this\n feature.\n- Accessing data from remote datasets now has retrying logic (with exponential\n backoff) that should make it robust to occasional bad responses from DAP\n servers.\n- You can control the width of the Dataset repr with :py:class:`xray.set_options`.\n It can be used either as a context manager, in which case the default is restored\n outside the context:\n\n .. ipython:: python\n\n ds = xray.Dataset({'x': np.arange(1000)})\n with xray.set_options(display_width=40):\n print(ds)\n\n Or to set a global option:\n\n .. ipython::\n :verbatim:\n\n In [1]: xray.set_options(display_width=80)\n\n The default value for the ``display_width`` option is 80.\n\n.. _h5py: http://www.h5py.org/\n.. _h5netcdf: https://github.com/shoyer/h5netcdf\n\nDeprecations\n~~~~~~~~~~~~\n\n- The method ``load_data()`` has been renamed to the more succinct\n :py:meth:`~xray.Dataset.load`.\n\nv0.4.1 (18 March 2015)\n----------------------\n\nThe release contains bug fixes and several new features. All changes should be\nfully backwards compatible.\n\nEnhancements\n~~~~~~~~~~~~\n\n- New documentation sections on :ref:`time-series` and\n :ref:`combining multiple files`.\n- :py:meth:`~xray.Dataset.resample` lets you resample a dataset or data array to\n a new temporal resolution. The syntax is the `same as pandas`_, except you\n need to supply the time dimension explicitly:\n\n .. ipython:: python\n :verbatim:\n\n time = pd.date_range('2000-01-01', freq='6H', periods=10)\n array = xray.DataArray(np.arange(10), [('time', time)])\n array.resample('1D', dim='time')\n\n You can specify how to do the resampling with the ``how`` argument and other\n options such as ``closed`` and ``label`` let you control labeling:\n\n .. ipython:: python\n :verbatim:\n\n array.resample('1D', dim='time', how='sum', label='right')\n\n If the desired temporal resolution is higher than the original data\n (upsampling), xray will insert missing values:\n\n .. ipython:: python\n :verbatim:\n\n array.resample('3H', 'time')\n\n- ``first`` and ``last`` methods on groupby objects let you take the first or\n last examples from each group along the grouped axis:\n\n .. ipython:: python\n :verbatim:\n\n array.groupby('time.day').first()\n\n These methods combine well with ``resample``:\n\n .. ipython:: python\n :verbatim:\n\n array.resample('1D', dim='time', how='first')\n\n\n- :py:meth:`~xray.Dataset.swap_dims` allows for easily swapping one dimension\n out for another:\n\n .. ipython:: python\n\n ds = xray.Dataset({'x': range(3), 'y': ('x', list('abc'))})\n ds\n ds.swap_dims({'x': 'y'})\n\n This was possible in earlier versions of xray, but required some contortions.\n- :py:func:`~xray.open_dataset` and :py:meth:`~xray.Dataset.to_netcdf` now\n accept an ``engine`` argument to explicitly select which underlying library\n (netcdf4 or scipy) is used for reading/writing a netCDF file.\n\n.. _same as pandas: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#up-and-downsampling\n\nBug fixes\n~~~~~~~~~\n\n- Fixed a bug where data netCDF variables read from disk with\n ``engine='scipy'`` could still be associated with the file on disk, even\n after closing the file (:issue:`341`). This manifested itself in warnings\n about mmapped arrays and segmentation faults (if the data was accessed).\n- Silenced spurious warnings about all-NaN slices when using nan-aware\n aggregation methods (:issue:`344`).\n- Dataset aggregations with ``keep_attrs=True`` now preserve attributes on\n data variables, not just the dataset itself.\n- Tests for xray now pass when run on Windows (:issue:`360`).\n- Fixed a regression in v0.4 where saving to netCDF could fail with the error\n ``ValueError: could not automatically determine time units``.\n\nv0.4 (2 March, 2015)\n--------------------\n\nThis is one of the biggest releases yet for xray: it includes some major\nchanges that may break existing code, along with the usual collection of minor\nenhancements and bug fixes. On the plus side, this release includes all\nhitherto planned breaking changes, so the upgrade path for xray should be\nsmoother going forward.\n\nBreaking changes\n~~~~~~~~~~~~~~~~\n\n- We now automatically align index labels in arithmetic, dataset construction,\n merging and updating. This means the need for manually invoking methods like\n :py:func:`~xray.align` and :py:meth:`~xray.Dataset.reindex_like` should be\n vastly reduced.\n\n :ref:`For arithmetic`, we align\n based on the **intersection** of labels:\n\n .. ipython:: python\n\n lhs = xray.DataArray([1, 2, 3], [('x', [0, 1, 2])])\n rhs = xray.DataArray([2, 3, 4], [('x', [1, 2, 3])])\n lhs + rhs\n\n :ref:`For dataset construction and merging`, we align based on the\n **union** of labels:\n\n .. ipython:: python\n\n xray.Dataset({'foo': lhs, 'bar': rhs})\n\n :ref:`For update and __setitem__`, we align based on the **original**\n object:\n\n .. ipython:: python\n\n lhs.coords['rhs'] = rhs\n lhs\n\n- Aggregations like ``mean`` or ``median`` now skip missing values by default:\n\n .. ipython:: python\n\n xray.DataArray([1, 2, np.nan, 3]).mean()\n\n You can turn this behavior off by supplying the keyword arugment\n ``skipna=False``.\n\n These operations are lightning fast thanks to integration with bottleneck_,\n which is a new optional dependency for xray (numpy is used if bottleneck is\n not installed).\n- Scalar coordinates no longer conflict with constant arrays with the same\n value (e.g., in arithmetic, merging datasets and concat), even if they have\n different shape (:issue:`243`). For example, the coordinate ``c`` here\n persists through arithmetic, even though it has different shapes on each\n DataArray:\n\n .. ipython:: python\n\n a = xray.DataArray([1, 2], coords={'c': 0}, dims='x')\n b = xray.DataArray([1, 2], coords={'c': ('x', [0, 0])}, dims='x')\n (a + b).coords\n\n This functionality can be controlled through the ``compat`` option, which\n has also been added to the :py:class:`~xray.Dataset` constructor.\n- Datetime shortcuts such as ``'time.month'`` now return a ``DataArray`` with\n the name ``'month'``, not ``'time.month'`` (:issue:`345`). This makes it\n easier to index the resulting arrays when they are used with ``groupby``:\n\n .. ipython:: python\n\n time = xray.DataArray(pd.date_range('2000-01-01', periods=365),\n dims='time', name='time')\n counts = time.groupby('time.month').count()\n counts.sel(month=2)\n\n Previously, you would need to use something like\n ``counts.sel(**{'time.month': 2}})``, which is much more awkward.\n- The ``season`` datetime shortcut now returns an array of string labels\n such `'DJF'`:\n\n .. ipython:: python\n\n ds = xray.Dataset({'t': pd.date_range('2000-01-01', periods=12, freq='M')})\n ds['t.season']\n\n Previously, it returned numbered seasons 1 through 4.\n- We have updated our use of the terms of \"coordinates\" and \"variables\". What\n were known in previous versions of xray as \"coordinates\" and \"variables\" are\n now referred to throughout the documentation as \"coordinate variables\" and\n \"data variables\". This brings xray in closer alignment to `CF Conventions`_.\n The only visible change besides the documentation is that ``Dataset.vars``\n has been renamed ``Dataset.data_vars``.\n- You will need to update your code if you have been ignoring deprecation\n warnings: methods and attributes that were deprecated in xray v0.3 or earlier\n (e.g., ``dimensions``, ``attributes```) have gone away.\n\n.. _bottleneck: https://github.com/kwgoodman/bottleneck\n\nEnhancements\n~~~~~~~~~~~~\n\n- Support for :py:meth:`~xray.Dataset.reindex` with a fill method. This\n provides a useful shortcut for upsampling:\n\n .. ipython:: python\n\n data = xray.DataArray([1, 2, 3], [('x', range(3))])\n data.reindex(x=[0.5, 1, 1.5, 2, 2.5], method='pad')\n\n This will be especially useful once pandas 0.16 is released, at which point\n xray will immediately support reindexing with\n `method='nearest' `_.\n- Use functions that return generic ndarrays with DataArray.groupby.apply and\n Dataset.apply (:issue:`327` and :issue:`329`). Thanks Jeff Gerard!\n- Consolidated the functionality of ``dumps`` (writing a dataset to a netCDF3\n bytestring) into :py:meth:`~xray.Dataset.to_netcdf` (:issue:`333`).\n- :py:meth:`~xray.Dataset.to_netcdf` now supports writing to groups in netCDF4\n files (:issue:`333`). It also finally has a full docstring -- you should read\n it!\n- :py:func:`~xray.open_dataset` and :py:meth:`~xray.Dataset.to_netcdf` now\n work on netCDF3 files when netcdf4-python is not installed as long as scipy\n is available (:issue:`333`).\n- The new :py:meth:`Dataset.drop ` and\n :py:meth:`DataArray.drop ` methods makes it easy to drop\n explicitly listed variables or index labels:\n\n .. ipython:: python\n\n # drop variables\n ds = xray.Dataset({'x': 0, 'y': 1})\n ds.drop('x')\n\n # drop index labels\n arr = xray.DataArray([1, 2, 3], coords=[('x', list('abc'))])\n arr.drop(['a', 'c'], dim='x')\n\n- :py:meth:`~xray.Dataset.broadcast_equals` has been added to correspond to\n the new ``compat`` option.\n- Long attributes are now truncated at 500 characters when printing a dataset\n (:issue:`338`). This should make things more convenient for working with\n datasets interactively.\n- Added a new documentation example, :ref:`monthly means example`. Thanks Joe\n Hamman!\n\nBug fixes\n~~~~~~~~~\n\n- Several bug fixes related to decoding time units from netCDF files\n (:issue:`316`, :issue:`330`). Thanks Stefan Pfenninger!\n- xray no longer requires ``decode_coords=False`` when reading datasets with\n unparseable coordinate attributes (:issue:`308`).\n- Fixed ``DataArray.loc`` indexing with ``...`` (:issue:`318`).\n- Fixed an edge case that resulting in an error when reindexing\n multi-dimensional variables (:issue:`315`).\n- Slicing with negative step sizes (:issue:`312`).\n- Invalid conversion of string arrays to numeric dtype (:issue:`305`).\n- Fixed``repr()`` on dataset objects with non-standard dates (:issue:`347`).\n\nDeprecations\n~~~~~~~~~~~~\n\n- ``dump`` and ``dumps`` have been deprecated in favor of\n :py:meth:`~xray.Dataset.to_netcdf`.\n- ``drop_vars`` has been deprecated in favor of :py:meth:`~xray.Dataset.drop`.\n\nFuture plans\n~~~~~~~~~~~~\n\nThe biggest feature I'm excited about working toward in the immediate future\nis supporting out-of-core operations in xray using Dask_, a part of the Blaze_\nproject. For a preview of using Dask with weather data, read\n`this blog post`_ by Matthew Rocklin. See :issue:`328` for more details.\n\n.. _Dask: http://dask.pydata.org\n.. _Blaze: http://blaze.pydata.org\n.. _this blog post: http://matthewrocklin.com/blog/work/2015/02/13/Towards-OOC-Slicing-and-Stacking/\n\nv0.3.2 (23 December, 2014)\n--------------------------\n\nThis release focused on bug-fixes, speedups and resolving some niggling\ninconsistencies.\n\nThere are a few cases where the behavior of xray differs from the previous\nversion. However, I expect that in almost all cases your code will continue to\nrun unmodified.\n\n.. warning::\n\n xray now requires pandas v0.15.0 or later. This was necessary for\n supporting TimedeltaIndex without too many painful hacks.\n\nBackwards incompatible changes\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- Arrays of :py:class:`datetime.datetime` objects are now automatically cast to\n ``datetime64[ns]`` arrays when stored in an xray object, using machinery\n borrowed from pandas:\n\n .. ipython:: python\n\n from datetime import datetime\n xray.Dataset({'t': [datetime(2000, 1, 1)]})\n\n- xray now has support (including serialization to netCDF) for\n :py:class:`~pandas.TimedeltaIndex`. :py:class:`datetime.timedelta` objects\n are thus accordingly cast to ``timedelta64[ns]`` objects when appropriate.\n- Masked arrays are now properly coerced to use ``NaN`` as a sentinel value\n (:issue:`259`).\n\nEnhancements\n~~~~~~~~~~~~\n\n- Due to popular demand, we have added experimental attribute style access as\n a shortcut for dataset variables, coordinates and attributes:\n\n .. ipython:: python\n\n ds = xray.Dataset({'tmin': ([], 25, {'units': 'celcius'})})\n ds.tmin.units\n\n Tab-completion for these variables should work in editors such as IPython.\n However, setting variables or attributes in this fashion is not yet\n supported because there are some unresolved ambiguities (:issue:`300`).\n- You can now use a dictionary for indexing with labeled dimensions. This\n provides a safe way to do assignment with labeled dimensions:\n\n .. ipython:: python\n\n array = xray.DataArray(np.zeros(5), dims=['x'])\n array[dict(x=slice(3))] = 1\n array\n\n- Non-index coordinates can now be faithfully written to and restored from\n netCDF files. This is done according to CF conventions when possible by\n using the ``coordinates`` attribute on a data variable. When not possible,\n xray defines a global ``coordinates`` attribute.\n- Preliminary support for converting ``xray.DataArray`` objects to and from\n CDAT_ ``cdms2`` variables.\n- We sped up any operation that involves creating a new Dataset or DataArray\n (e.g., indexing, aggregation, arithmetic) by a factor of 30 to 50%. The full\n speed up requires cyordereddict_ to be installed.\n\n.. _CDAT: http://uvcdat.llnl.gov/\n.. _cyordereddict: https://github.com/shoyer/cyordereddict\n\nBug fixes\n~~~~~~~~~\n\n- Fix for ``to_dataframe()`` with 0d string/object coordinates (:issue:`287`)\n- Fix for ``to_netcdf`` with 0d string variable (:issue:`284`)\n- Fix writing datetime64 arrays to netcdf if NaT is present (:issue:`270`)\n- Fix align silently upcasts data arrays when NaNs are inserted (:issue:`264`)\n\nFuture plans\n~~~~~~~~~~~~\n\n- I am contemplating switching to the terms \"coordinate variables\" and \"data\n variables\" instead of the (currently used) \"coordinates\" and \"variables\",\n following their use in `CF Conventions`_ (:issue:`293`). This would mostly\n have implications for the documentation, but I would also change the\n ``Dataset`` attribute ``vars`` to ``data``.\n- I no longer certain that automatic label alignment for arithmetic would be a\n good idea for xray -- it is a feature from pandas that I have not missed\n (:issue:`186`).\n- The main API breakage that I *do* anticipate in the next release is finally\n making all aggregation operations skip missing values by default\n (:issue:`130`). I'm pretty sick of writing ``ds.reduce(np.nanmean, 'time')``.\n- The next version of xray (0.4) will remove deprecated features and aliases\n whose use currently raises a warning.\n\nIf you have opinions about any of these anticipated changes, I would love to\nhear them -- please add a note to any of the referenced GitHub issues.\n\n.. _CF Conventions: http://cfconventions.org/Data/cf-conventions/cf-conventions-1.6/build/cf-conventions.html\n\nv0.3.1 (22 October, 2014)\n-------------------------\n\nThis is mostly a bug-fix release to make xray compatible with the latest\nrelease of pandas (v0.15).\n\nWe added several features to better support working with missing values and\nexporting xray objects to pandas. We also reorganized the internal API for\nserializing and deserializing datasets, but this change should be almost\nentirely transparent to users.\n\nOther than breaking the experimental DataStore API, there should be no\nbackwards incompatible changes.\n\nNew features\n~~~~~~~~~~~~\n\n- Added :py:meth:`~xray.Dataset.count` and :py:meth:`~xray.Dataset.dropna`\n methods, copied from pandas, for working with missing values (:issue:`247`,\n :issue:`58`).\n- Added :py:meth:`DataArray.to_pandas ` for\n converting a data array into the pandas object with the same dimensionality\n (1D to Series, 2D to DataFrame, etc.) (:issue:`255`).\n- Support for reading gzipped netCDF3 files (:issue:`239`).\n- Reduced memory usage when writing netCDF files (:issue:`251`).\n- 'missing_value' is now supported as an alias for the '_FillValue' attribute\n on netCDF variables (:issue:`245`).\n- Trivial indexes, equivalent to ``range(n)`` where ``n`` is the length of the\n dimension, are no longer written to disk (:issue:`245`).\n\nBug fixes\n~~~~~~~~~\n\n- Compatibility fixes for pandas v0.15 (:issue:`262`).\n- Fixes for display and indexing of ``NaT`` (not-a-time) (:issue:`238`,\n :issue:`240`)\n- Fix slicing by label was an argument is a data array (:issue:`250`).\n- Test data is now shipped with the source distribution (:issue:`253`).\n- Ensure order does not matter when doing arithmetic with scalar data arrays\n (:issue:`254`).\n- Order of dimensions preserved with ``DataArray.to_dataframe`` (:issue:`260`).\n\nv0.3 (21 September 2014)\n------------------------\n\nNew features\n~~~~~~~~~~~~\n\n- **Revamped coordinates**: \"coordinates\" now refer to all arrays that are not\n used to index a dimension. Coordinates are intended to allow for keeping track\n of arrays of metadata that describe the grid on which the points in \"variable\"\n arrays lie. They are preserved (when unambiguous) even though mathematical\n operations.\n- **Dataset math** :py:class:`~xray.Dataset` objects now support all arithmetic\n operations directly. Dataset-array operations map across all dataset\n variables; dataset-dataset operations act on each pair of variables with the\n same name.\n- **GroupBy math**: This provides a convenient shortcut for normalizing by the\n average value of a group.\n- The dataset ``__repr__`` method has been entirely overhauled; dataset\n objects now show their values when printed.\n- You can now index a dataset with a list of variables to return a new dataset:\n ``ds[['foo', 'bar']]``.\n\nBackwards incompatible changes\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n- ``Dataset.__eq__`` and ``Dataset.__ne__`` are now element-wise operations\n instead of comparing all values to obtain a single boolean. Use the method\n :py:meth:`~xray.Dataset.equals` instead.\n\nDeprecations\n~~~~~~~~~~~~\n\n- ``Dataset.noncoords`` is deprecated: use ``Dataset.vars`` instead.\n- ``Dataset.select_vars`` deprecated: index a ``Dataset`` with a list of\n variable names instead.\n- ``DataArray.select_vars`` and ``DataArray.drop_vars`` deprecated: use\n :py:meth:`~xray.DataArray.reset_coords` instead.\n\nv0.2 (14 August 2014)\n---------------------\n\nThis is major release that includes some new features and quite a few bug\nfixes. Here are the highlights:\n\n- There is now a direct constructor for ``DataArray`` objects, which makes it\n possible to create a DataArray without using a Dataset. This is highlighted\n in the refreshed :doc:`tutorial`.\n- You can perform aggregation operations like ``mean`` directly on\n :py:class:`~xray.Dataset` objects, thanks to Joe Hamman. These aggregation\n methods also worked on grouped datasets.\n- xray now works on Python 2.6, thanks to Anna Kuznetsova.\n- A number of methods and attributes were given more sensible (usually shorter)\n names: ``labeled`` -> ``sel``, ``indexed`` -> ``isel``, ``select`` ->\n ``select_vars``, ``unselect`` -> ``drop_vars``, ``dimensions`` -> ``dims``,\n ``coordinates`` -> ``coords``, ``attributes`` -> ``attrs``.\n- New :py:meth:`~xray.Dataset.load_data` and :py:meth:`~xray.Dataset.close`\n methods for datasets facilitate lower level of control of data loaded from\n disk.\n\nv0.1.1 (20 May 2014)\n--------------------\n\nxray 0.1.1 is a bug-fix release that includes changes that should be almost\nentirely backwards compatible with v0.1:\n\n- Python 3 support (:issue:`53`)\n- Required numpy version relaxed to 1.7 (:issue:`129`)\n- Return numpy.datetime64 arrays for non-standard calendars (:issue:`126`)\n- Support for opening datasets associated with NetCDF4 groups (:issue:`127`)\n- Bug-fixes for concatenating datetime arrays (:issue:`134`)\n\nSpecial thanks to new contributors Thomas Kluyver, Joe Hamman and Alistair\nMiles.\n\nv0.1 (2 May 2014)\n-----------------\n\nInitial release.\n"},{"fileName":"memory.py","filePath":"xarray/backends","id":45,"nodeType":"File","text":"import copy\n\nimport numpy as np\n\nfrom ..core.variable import Variable\nfrom .common import AbstractWritableDataStore\n\n\nclass InMemoryDataStore(AbstractWritableDataStore):\n \"\"\"\n Stores dimensions, variables and attributes in ordered dictionaries, making\n this store fast compared to stores which save to disk.\n\n This store exists purely for internal testing purposes.\n \"\"\"\n\n def __init__(self, variables=None, attributes=None):\n self._variables = {} if variables is None else variables\n self._attributes = {} if attributes is None else attributes\n\n def get_attrs(self):\n return self._attributes\n\n def get_variables(self):\n return self._variables\n\n def get_dimensions(self):\n dims = {}\n for v in self._variables.values():\n for d, s in v.dims.items():\n dims[d] = s\n return dims\n\n def prepare_variable(self, k, v, *args, **kwargs):\n new_var = Variable(v.dims, np.empty_like(v), v.attrs)\n self._variables[k] = new_var\n return new_var, v.data\n\n def set_attribute(self, k, v):\n # copy to imitate writing to disk.\n self._attributes[k] = copy.deepcopy(v)\n\n def set_dimension(self, d, l, unlimited_dims=None):\n # in this model, dimensions are accounted for in the variables\n pass\n"},{"className":"Frozen","col":0,"comment":"Wrapper around an object implementing the mapping interface to make it\n immutable. If you really want to modify the mapping, the mutable version is\n saved under the `mapping` attribute.\n ","endLoc":396,"id":46,"nodeType":"Class","startLoc":372,"text":"class Frozen(Mapping[K, V]):\n \"\"\"Wrapper around an object implementing the mapping interface to make it\n immutable. If you really want to modify the mapping, the mutable version is\n saved under the `mapping` attribute.\n \"\"\"\n\n __slots__ = (\"mapping\",)\n\n def __init__(self, mapping: Mapping[K, V]):\n self.mapping = mapping\n\n def __getitem__(self, key: K) -> V:\n return self.mapping[key]\n\n def __iter__(self) -> Iterator[K]:\n return iter(self.mapping)\n\n def __len__(self) -> int:\n return len(self.mapping)\n\n def __contains__(self, key: object) -> bool:\n return key in self.mapping\n\n def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, self.mapping)"},{"col":0,"comment":"null","endLoc":400,"header":"def FrozenDict(*args, **kwargs) -> Frozen","id":47,"name":"FrozenDict","nodeType":"Function","startLoc":399,"text":"def FrozenDict(*args, **kwargs) -> Frozen:\n return Frozen(dict(*args, **kwargs))"},{"col":0,"comment":"null","endLoc":228,"header":"def where_method(data, cond, other=dtypes.NA)","id":48,"name":"where_method","nodeType":"Function","startLoc":225,"text":"def where_method(data, cond, other=dtypes.NA):\n if other is dtypes.NA:\n other = dtypes.get_fill_value(data.dtype)\n return where(cond, data, other)"},{"col":0,"comment":"Return an appropriate fill value for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : Missing value corresponding to this dtype.\n ","endLoc":96,"header":"def get_fill_value(dtype)","id":49,"name":"get_fill_value","nodeType":"Function","startLoc":84,"text":"def get_fill_value(dtype):\n \"\"\"Return an appropriate fill value for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : Missing value corresponding to this dtype.\n \"\"\"\n _, fill_value = maybe_promote(dtype)\n return fill_value"},{"className":"AbstractWritableDataStore","col":0,"comment":"null","endLoc":373,"id":50,"nodeType":"Class","startLoc":212,"text":"class AbstractWritableDataStore(AbstractDataStore):\n __slots__ = ()\n\n def encode(self, variables, attributes):\n \"\"\"\n Encode the variables and attributes in this store\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n\n Returns\n -------\n variables : dict-like\n attributes : dict-like\n\n \"\"\"\n variables = {k: self.encode_variable(v) for k, v in variables.items()}\n attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}\n return variables, attributes\n\n def encode_variable(self, v):\n \"\"\"encode one variable\"\"\"\n return v\n\n def encode_attribute(self, a):\n \"\"\"encode one attribute\"\"\"\n return a\n\n def set_dimension(self, d, l): # pragma: no cover\n raise NotImplementedError()\n\n def set_attribute(self, k, v): # pragma: no cover\n raise NotImplementedError()\n\n def set_variable(self, k, v): # pragma: no cover\n raise NotImplementedError()\n\n def store_dataset(self, dataset):\n \"\"\"\n in stores, variables are all variables AND coordinates\n in xarray.Dataset variables are variables NOT coordinates,\n so here we pass the whole dataset in instead of doing\n dataset.variables\n \"\"\"\n self.store(dataset, dataset.attrs)\n\n def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n ):\n \"\"\"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n if writer is None:\n writer = ArrayWriter()\n\n variables, attributes = self.encode(variables, attributes)\n\n self.set_attributes(attributes)\n self.set_dimensions(variables, unlimited_dims=unlimited_dims)\n self.set_variables(\n variables, check_encoding_set, writer, unlimited_dims=unlimited_dims\n )\n\n def set_attributes(self, attributes):\n \"\"\"\n This provides a centralized method to set the dataset attributes on the\n data store.\n\n Parameters\n ----------\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n \"\"\"\n for k, v in attributes.items():\n self.set_attribute(k, v)\n\n def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n\n for vn, v in variables.items():\n name = _encode_variable_name(vn)\n check = vn in check_encoding_set\n target, source = self.prepare_variable(\n name, v, check, unlimited_dims=unlimited_dims\n )\n\n writer.add(source, target)\n\n def set_dimensions(self, variables, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the dimensions on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n if unlimited_dims is None:\n unlimited_dims = set()\n\n existing_dims = self.get_dimensions()\n\n dims = {}\n for v in unlimited_dims: # put unlimited_dims first\n dims[v] = None\n for v in variables.values():\n dims.update(dict(zip(v.dims, v.shape)))\n\n for dim, length in dims.items():\n if dim in existing_dims and length != existing_dims[dim]:\n raise ValueError(\n \"Unable to update size for existing dimension\"\n \"%r (%d != %d)\" % (dim, length, existing_dims[dim])\n )\n elif dim not in existing_dims:\n is_unlimited = dim in unlimited_dims\n self.set_dimension(dim, length, is_unlimited)"},{"fileName":"common.py","filePath":"xarray/backends","id":51,"nodeType":"File","text":"import logging\nimport time\nimport traceback\nimport warnings\nfrom collections.abc import Mapping\n\nimport numpy as np\n\nfrom ..conventions import cf_encoder\nfrom ..core import indexing\nfrom ..core.pycompat import dask_array_type\nfrom ..core.utils import FrozenDict, NdimSizeLenMixin\n\n# Create a logger object, but don't add any handlers. Leave that to user code.\nlogger = logging.getLogger(__name__)\n\n\nNONE_VAR_NAME = \"__values__\"\n\n\ndef _encode_variable_name(name):\n if name is None:\n name = NONE_VAR_NAME\n return name\n\n\ndef _decode_variable_name(name):\n if name == NONE_VAR_NAME:\n name = None\n return name\n\n\ndef find_root_and_group(ds):\n \"\"\"Find the root and group name of a netCDF4/h5netcdf dataset.\"\"\"\n hierarchy = ()\n while ds.parent is not None:\n hierarchy = (ds.name,) + hierarchy\n ds = ds.parent\n group = \"/\" + \"/\".join(hierarchy)\n return ds, group\n\n\ndef robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500):\n \"\"\"\n Robustly index an array, using retry logic with exponential backoff if any\n of the errors ``catch`` are raised. The initial_delay is measured in ms.\n\n With the default settings, the maximum delay will be in the range of 32-64\n seconds.\n \"\"\"\n assert max_retries >= 0\n for n in range(max_retries + 1):\n try:\n return array[key]\n except catch:\n if n == max_retries:\n raise\n base_delay = initial_delay * 2 ** n\n next_delay = base_delay + np.random.randint(base_delay)\n msg = (\n \"getitem failed, waiting %s ms before trying again \"\n \"(%s tries remaining). Full traceback: %s\"\n % (next_delay, max_retries - n, traceback.format_exc())\n )\n logger.debug(msg)\n time.sleep(1e-3 * next_delay)\n\n\nclass BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed):\n __slots__ = ()\n\n def __array__(self, dtype=None):\n key = indexing.BasicIndexer((slice(None),) * self.ndim)\n return np.asarray(self[key], dtype=dtype)\n\n\nclass AbstractDataStore(Mapping):\n __slots__ = ()\n\n def __iter__(self):\n return iter(self.variables)\n\n def __getitem__(self, key):\n return self.variables[key]\n\n def __len__(self):\n return len(self.variables)\n\n def get_dimensions(self): # pragma: no cover\n raise NotImplementedError()\n\n def get_attrs(self): # pragma: no cover\n raise NotImplementedError()\n\n def get_variables(self): # pragma: no cover\n raise NotImplementedError()\n\n def get_encoding(self):\n return {}\n\n def load(self):\n \"\"\"\n This loads the variables and attributes simultaneously.\n A centralized loading function makes it easier to create\n data stores that do automatic encoding/decoding.\n\n For example::\n\n class SuffixAppendingDataStore(AbstractDataStore):\n\n def load(self):\n variables, attributes = AbstractDataStore.load(self)\n variables = {'%s_suffix' % k: v\n for k, v in variables.items()}\n attributes = {'%s_suffix' % k: v\n for k, v in attributes.items()}\n return variables, attributes\n\n This function will be called anytime variables or attributes\n are requested, so care should be taken to make sure its fast.\n \"\"\"\n variables = FrozenDict(\n (_decode_variable_name(k), v) for k, v in self.get_variables().items()\n )\n attributes = FrozenDict(self.get_attrs())\n return variables, attributes\n\n @property\n def variables(self): # pragma: no cover\n warnings.warn(\n \"The ``variables`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n variables, _ = self.load()\n return variables\n\n @property\n def attrs(self): # pragma: no cover\n warnings.warn(\n \"The ``attrs`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n _, attrs = self.load()\n return attrs\n\n @property\n def dimensions(self): # pragma: no cover\n warnings.warn(\n \"The ``dimensions`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n return self.get_dimensions()\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.close()\n\n\nclass ArrayWriter:\n __slots__ = (\"sources\", \"targets\", \"regions\", \"lock\")\n\n def __init__(self, lock=None):\n self.sources = []\n self.targets = []\n self.regions = []\n self.lock = lock\n\n def add(self, source, target, region=None):\n if isinstance(source, dask_array_type):\n self.sources.append(source)\n self.targets.append(target)\n self.regions.append(region)\n else:\n if region:\n target[region] = source\n else:\n target[...] = source\n\n def sync(self, compute=True):\n if self.sources:\n import dask.array as da\n\n # TODO: consider wrapping targets with dask.delayed, if this makes\n # for any discernable difference in perforance, e.g.,\n # targets = [dask.delayed(t) for t in self.targets]\n\n delayed_store = da.store(\n self.sources,\n self.targets,\n lock=self.lock,\n compute=compute,\n flush=True,\n regions=self.regions,\n )\n self.sources = []\n self.targets = []\n self.regions = []\n return delayed_store\n\n\nclass AbstractWritableDataStore(AbstractDataStore):\n __slots__ = ()\n\n def encode(self, variables, attributes):\n \"\"\"\n Encode the variables and attributes in this store\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n\n Returns\n -------\n variables : dict-like\n attributes : dict-like\n\n \"\"\"\n variables = {k: self.encode_variable(v) for k, v in variables.items()}\n attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}\n return variables, attributes\n\n def encode_variable(self, v):\n \"\"\"encode one variable\"\"\"\n return v\n\n def encode_attribute(self, a):\n \"\"\"encode one attribute\"\"\"\n return a\n\n def set_dimension(self, d, l): # pragma: no cover\n raise NotImplementedError()\n\n def set_attribute(self, k, v): # pragma: no cover\n raise NotImplementedError()\n\n def set_variable(self, k, v): # pragma: no cover\n raise NotImplementedError()\n\n def store_dataset(self, dataset):\n \"\"\"\n in stores, variables are all variables AND coordinates\n in xarray.Dataset variables are variables NOT coordinates,\n so here we pass the whole dataset in instead of doing\n dataset.variables\n \"\"\"\n self.store(dataset, dataset.attrs)\n\n def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n ):\n \"\"\"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n if writer is None:\n writer = ArrayWriter()\n\n variables, attributes = self.encode(variables, attributes)\n\n self.set_attributes(attributes)\n self.set_dimensions(variables, unlimited_dims=unlimited_dims)\n self.set_variables(\n variables, check_encoding_set, writer, unlimited_dims=unlimited_dims\n )\n\n def set_attributes(self, attributes):\n \"\"\"\n This provides a centralized method to set the dataset attributes on the\n data store.\n\n Parameters\n ----------\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n \"\"\"\n for k, v in attributes.items():\n self.set_attribute(k, v)\n\n def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n\n for vn, v in variables.items():\n name = _encode_variable_name(vn)\n check = vn in check_encoding_set\n target, source = self.prepare_variable(\n name, v, check, unlimited_dims=unlimited_dims\n )\n\n writer.add(source, target)\n\n def set_dimensions(self, variables, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the dimensions on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n if unlimited_dims is None:\n unlimited_dims = set()\n\n existing_dims = self.get_dimensions()\n\n dims = {}\n for v in unlimited_dims: # put unlimited_dims first\n dims[v] = None\n for v in variables.values():\n dims.update(dict(zip(v.dims, v.shape)))\n\n for dim, length in dims.items():\n if dim in existing_dims and length != existing_dims[dim]:\n raise ValueError(\n \"Unable to update size for existing dimension\"\n \"%r (%d != %d)\" % (dim, length, existing_dims[dim])\n )\n elif dim not in existing_dims:\n is_unlimited = dim in unlimited_dims\n self.set_dimension(dim, length, is_unlimited)\n\n\nclass WritableCFDataStore(AbstractWritableDataStore):\n __slots__ = ()\n\n def encode(self, variables, attributes):\n # All NetCDF files get CF encoded by default, without this attempting\n # to write times, for example, would fail.\n variables, attributes = cf_encoder(variables, attributes)\n variables = {k: self.encode_variable(v) for k, v in variables.items()}\n attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}\n return variables, attributes\n"},{"col":4,"comment":"null","endLoc":381,"header":"def __init__(self, mapping: Mapping[K, V])","id":52,"name":"__init__","nodeType":"Function","startLoc":380,"text":"def __init__(self, mapping: Mapping[K, V]):\n self.mapping = mapping"},{"col":4,"comment":"null","endLoc":384,"header":"def __getitem__(self, key: K) -> V","id":53,"name":"__getitem__","nodeType":"Function","startLoc":383,"text":"def __getitem__(self, key: K) -> V:\n return self.mapping[key]"},{"col":4,"comment":"null","endLoc":387,"header":"def __iter__(self) -> Iterator[K]","id":54,"name":"__iter__","nodeType":"Function","startLoc":386,"text":"def __iter__(self) -> Iterator[K]:\n return iter(self.mapping)"},{"col":4,"comment":"null","endLoc":390,"header":"def __len__(self) -> int","id":55,"name":"__len__","nodeType":"Function","startLoc":389,"text":"def __len__(self) -> int:\n return len(self.mapping)"},{"col":4,"comment":"null","endLoc":393,"header":"def __contains__(self, key: object) -> bool","id":56,"name":"__contains__","nodeType":"Function","startLoc":392,"text":"def __contains__(self, key: object) -> bool:\n return key in self.mapping"},{"col":4,"comment":"null","endLoc":396,"header":"def __repr__(self) -> str","id":57,"name":"__repr__","nodeType":"Function","startLoc":395,"text":"def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, self.mapping)"},{"className":"WritableCFDataStore","col":0,"comment":"null","endLoc":385,"id":58,"nodeType":"Class","startLoc":376,"text":"class WritableCFDataStore(AbstractWritableDataStore):\n __slots__ = ()\n\n def encode(self, variables, attributes):\n # All NetCDF files get CF encoded by default, without this attempting\n # to write times, for example, would fail.\n variables, attributes = cf_encoder(variables, attributes)\n variables = {k: self.encode_variable(v) for k, v in variables.items()}\n attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}\n return variables, attributes"},{"attributeType":"null","col":4,"comment":"null","endLoc":378,"id":59,"name":"__slots__","nodeType":"Attribute","startLoc":378,"text":"__slots__"},{"col":0,"comment":"Simpler equivalent of pandas.core.common._maybe_promote\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n dtype : Promoted dtype that can hold missing values.\n fill_value : Valid missing value for the promoted dtype.\n ","endLoc":78,"header":"def maybe_promote(dtype)","id":60,"name":"maybe_promote","nodeType":"Function","startLoc":45,"text":"def maybe_promote(dtype):\n \"\"\"Simpler equivalent of pandas.core.common._maybe_promote\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n dtype : Promoted dtype that can hold missing values.\n fill_value : Valid missing value for the promoted dtype.\n \"\"\"\n # N.B. these casting rules should match pandas\n if np.issubdtype(dtype, np.floating):\n fill_value = np.nan\n elif np.issubdtype(dtype, np.timedelta64):\n # See https://github.com/numpy/numpy/issues/10685\n # np.timedelta64 is a subclass of np.integer\n # Check np.timedelta64 before np.integer\n fill_value = np.timedelta64(\"NaT\")\n elif np.issubdtype(dtype, np.integer):\n if dtype.itemsize <= 2:\n dtype = np.float32\n else:\n dtype = np.float64\n fill_value = np.nan\n elif np.issubdtype(dtype, np.complexfloating):\n fill_value = np.nan + np.nan * 1j\n elif np.issubdtype(dtype, np.datetime64):\n fill_value = np.datetime64(\"NaT\")\n else:\n dtype = object\n fill_value = np.nan\n return np.dtype(dtype), fill_value"},{"attributeType":"null","col":8,"comment":"null","endLoc":381,"id":61,"name":"mapping","nodeType":"Attribute","startLoc":381,"text":"self.mapping"},{"col":0,"comment":"\n Encode a set of CF encoded variables and attributes.\n Takes a dicts of variables and attributes and encodes them\n to conform to CF conventions as much as possible.\n This includes masking, scaling, character array handling,\n and CF-time encoding.\n\n\n Parameters\n ----------\n variables : dict\n A dictionary mapping from variable name to xarray.Variable\n attributes : dict\n A dictionary mapping from attribute name to value\n\n Returns\n -------\n encoded_variables : dict\n A dictionary mapping from variable name to xarray.Variable,\n encoded_attributes : dict\n A dictionary mapping from attribute name to value\n\n See also\n --------\n decode_cf_variable, encode_cf_variable\n ","endLoc":770,"header":"def cf_encoder(variables, attributes)","id":62,"name":"cf_encoder","nodeType":"Function","startLoc":717,"text":"def cf_encoder(variables, attributes):\n \"\"\"\n Encode a set of CF encoded variables and attributes.\n Takes a dicts of variables and attributes and encodes them\n to conform to CF conventions as much as possible.\n This includes masking, scaling, character array handling,\n and CF-time encoding.\n\n\n Parameters\n ----------\n variables : dict\n A dictionary mapping from variable name to xarray.Variable\n attributes : dict\n A dictionary mapping from attribute name to value\n\n Returns\n -------\n encoded_variables : dict\n A dictionary mapping from variable name to xarray.Variable,\n encoded_attributes : dict\n A dictionary mapping from attribute name to value\n\n See also\n --------\n decode_cf_variable, encode_cf_variable\n \"\"\"\n\n # add encoding for time bounds variables if present.\n _update_bounds_encoding(variables)\n\n new_vars = {k: encode_cf_variable(v, name=k) for k, v in variables.items()}\n\n # Remove attrs from bounds variables (issue #2921)\n for var in new_vars.values():\n bounds = var.attrs[\"bounds\"] if \"bounds\" in var.attrs else None\n if bounds and bounds in new_vars:\n # see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries\n for attr in [\n \"units\",\n \"standard_name\",\n \"axis\",\n \"positive\",\n \"calendar\",\n \"long_name\",\n \"leap_month\",\n \"leap_year\",\n \"month_lengths\",\n ]:\n if attr in new_vars[bounds].attrs and attr in var.attrs:\n if new_vars[bounds].attrs[attr] == var.attrs[attr]:\n new_vars[bounds].attrs.pop(attr)\n\n return new_vars, attributes"},{"col":4,"comment":"null","endLoc":385,"header":"def encode(self, variables, attributes)","id":63,"name":"encode","nodeType":"Function","startLoc":379,"text":"def encode(self, variables, attributes):\n # All NetCDF files get CF encoded by default, without this attempting\n # to write times, for example, would fail.\n variables, attributes = cf_encoder(variables, attributes)\n variables = {k: self.encode_variable(v) for k, v in variables.items()}\n attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}\n return variables, attributes"},{"className":"AbstractDataStore","col":0,"comment":"null","endLoc":167,"id":64,"nodeType":"Class","startLoc":77,"text":"class AbstractDataStore(Mapping):\n __slots__ = ()\n\n def __iter__(self):\n return iter(self.variables)\n\n def __getitem__(self, key):\n return self.variables[key]\n\n def __len__(self):\n return len(self.variables)\n\n def get_dimensions(self): # pragma: no cover\n raise NotImplementedError()\n\n def get_attrs(self): # pragma: no cover\n raise NotImplementedError()\n\n def get_variables(self): # pragma: no cover\n raise NotImplementedError()\n\n def get_encoding(self):\n return {}\n\n def load(self):\n \"\"\"\n This loads the variables and attributes simultaneously.\n A centralized loading function makes it easier to create\n data stores that do automatic encoding/decoding.\n\n For example::\n\n class SuffixAppendingDataStore(AbstractDataStore):\n\n def load(self):\n variables, attributes = AbstractDataStore.load(self)\n variables = {'%s_suffix' % k: v\n for k, v in variables.items()}\n attributes = {'%s_suffix' % k: v\n for k, v in attributes.items()}\n return variables, attributes\n\n This function will be called anytime variables or attributes\n are requested, so care should be taken to make sure its fast.\n \"\"\"\n variables = FrozenDict(\n (_decode_variable_name(k), v) for k, v in self.get_variables().items()\n )\n attributes = FrozenDict(self.get_attrs())\n return variables, attributes\n\n @property\n def variables(self): # pragma: no cover\n warnings.warn(\n \"The ``variables`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n variables, _ = self.load()\n return variables\n\n @property\n def attrs(self): # pragma: no cover\n warnings.warn(\n \"The ``attrs`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n _, attrs = self.load()\n return attrs\n\n @property\n def dimensions(self): # pragma: no cover\n warnings.warn(\n \"The ``dimensions`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n return self.get_dimensions()\n\n def close(self):\n pass\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.close()"},{"col":4,"comment":"null","endLoc":81,"header":"def __iter__(self)","id":66,"name":"__iter__","nodeType":"Function","startLoc":80,"text":"def __iter__(self):\n return iter(self.variables)"},{"col":4,"comment":"null","endLoc":84,"header":"def __getitem__(self, key)","id":67,"name":"__getitem__","nodeType":"Function","startLoc":83,"text":"def __getitem__(self, key):\n return self.variables[key]"},{"col":4,"comment":"null","endLoc":87,"header":"def __len__(self)","id":68,"name":"__len__","nodeType":"Function","startLoc":86,"text":"def __len__(self):\n return len(self.variables)"},{"col":4,"comment":"null","endLoc":90,"header":"def get_dimensions(self)","id":69,"name":"get_dimensions","nodeType":"Function","startLoc":89,"text":"def get_dimensions(self): # pragma: no cover\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":93,"header":"def get_attrs(self)","id":70,"name":"get_attrs","nodeType":"Function","startLoc":92,"text":"def get_attrs(self): # pragma: no cover\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":96,"header":"def get_variables(self)","id":71,"name":"get_variables","nodeType":"Function","startLoc":95,"text":"def get_variables(self): # pragma: no cover\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":99,"header":"def get_encoding(self)","id":72,"name":"get_encoding","nodeType":"Function","startLoc":98,"text":"def get_encoding(self):\n return {}"},{"col":4,"comment":"\n This loads the variables and attributes simultaneously.\n A centralized loading function makes it easier to create\n data stores that do automatic encoding/decoding.\n\n For example::\n\n class SuffixAppendingDataStore(AbstractDataStore):\n\n def load(self):\n variables, attributes = AbstractDataStore.load(self)\n variables = {'%s_suffix' % k: v\n for k, v in variables.items()}\n attributes = {'%s_suffix' % k: v\n for k, v in attributes.items()}\n return variables, attributes\n\n This function will be called anytime variables or attributes\n are requested, so care should be taken to make sure its fast.\n ","endLoc":126,"header":"def load(self)","id":73,"name":"load","nodeType":"Function","startLoc":101,"text":"def load(self):\n \"\"\"\n This loads the variables and attributes simultaneously.\n A centralized loading function makes it easier to create\n data stores that do automatic encoding/decoding.\n\n For example::\n\n class SuffixAppendingDataStore(AbstractDataStore):\n\n def load(self):\n variables, attributes = AbstractDataStore.load(self)\n variables = {'%s_suffix' % k: v\n for k, v in variables.items()}\n attributes = {'%s_suffix' % k: v\n for k, v in attributes.items()}\n return variables, attributes\n\n This function will be called anytime variables or attributes\n are requested, so care should be taken to make sure its fast.\n \"\"\"\n variables = FrozenDict(\n (_decode_variable_name(k), v) for k, v in self.get_variables().items()\n )\n attributes = FrozenDict(self.get_attrs())\n return variables, attributes"},{"col":0,"comment":"Adds time encoding to time bounds variables.\n\n Variables handling time bounds (\"Cell boundaries\" in the CF\n conventions) do not necessarily carry the necessary attributes to be\n decoded. This copies the encoding from the time variable to the\n associated bounds variable so that we write CF-compliant files.\n\n See Also:\n\n http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/\n cf-conventions.html#cell-boundaries\n\n https://github.com/pydata/xarray/issues/2565\n ","endLoc":433,"header":"def _update_bounds_encoding(variables)","id":74,"name":"_update_bounds_encoding","nodeType":"Function","startLoc":386,"text":"def _update_bounds_encoding(variables):\n \"\"\"Adds time encoding to time bounds variables.\n\n Variables handling time bounds (\"Cell boundaries\" in the CF\n conventions) do not necessarily carry the necessary attributes to be\n decoded. This copies the encoding from the time variable to the\n associated bounds variable so that we write CF-compliant files.\n\n See Also:\n\n http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/\n cf-conventions.html#cell-boundaries\n\n https://github.com/pydata/xarray/issues/2565\n \"\"\"\n\n # For all time variables with bounds\n for v in variables.values():\n attrs = v.attrs\n encoding = v.encoding\n has_date_units = \"units\" in encoding and \"since\" in encoding[\"units\"]\n is_datetime_type = np.issubdtype(\n v.dtype, np.datetime64\n ) or contains_cftime_datetimes(v)\n\n if (\n is_datetime_type\n and not has_date_units\n and \"bounds\" in attrs\n and attrs[\"bounds\"] in variables\n ):\n warnings.warn(\n \"Variable '{0}' has datetime type and a \"\n \"bounds variable but {0}.encoding does not have \"\n \"units specified. The units encodings for '{0}' \"\n \"and '{1}' will be determined independently \"\n \"and may not be equal, counter to CF-conventions. \"\n \"If this is a concern, specify a units encoding for \"\n \"'{0}' before writing to a file.\".format(v.name, attrs[\"bounds\"]),\n UserWarning,\n )\n\n if has_date_units and \"bounds\" in attrs:\n if attrs[\"bounds\"] in variables:\n bounds_encoding = variables[attrs[\"bounds\"]].encoding\n bounds_encoding.setdefault(\"units\", encoding[\"units\"])\n if \"calendar\" in encoding:\n bounds_encoding.setdefault(\"calendar\", encoding[\"calendar\"])"},{"col":0,"comment":"\n Converts an Variable into an Variable which follows some\n of the CF conventions:\n\n - Nans are masked using _FillValue (or the deprecated missing_value)\n - Rescaling via: scale_factor and add_offset\n - datetimes are converted to the CF 'units since time' format\n - dtype encodings are enforced.\n\n Parameters\n ----------\n var : xarray.Variable\n A variable holding un-encoded data.\n\n Returns\n -------\n out : xarray.Variable\n A variable which has been encoded as described above.\n ","endLoc":257,"header":"def encode_cf_variable(var, needs_copy=True, name=None)","id":75,"name":"encode_cf_variable","nodeType":"Function","startLoc":221,"text":"def encode_cf_variable(var, needs_copy=True, name=None):\n \"\"\"\n Converts an Variable into an Variable which follows some\n of the CF conventions:\n\n - Nans are masked using _FillValue (or the deprecated missing_value)\n - Rescaling via: scale_factor and add_offset\n - datetimes are converted to the CF 'units since time' format\n - dtype encodings are enforced.\n\n Parameters\n ----------\n var : xarray.Variable\n A variable holding un-encoded data.\n\n Returns\n -------\n out : xarray.Variable\n A variable which has been encoded as described above.\n \"\"\"\n ensure_not_multiindex(var, name=name)\n\n for coder in [\n times.CFDatetimeCoder(),\n times.CFTimedeltaCoder(),\n variables.CFScaleOffsetCoder(),\n variables.CFMaskCoder(),\n variables.UnsignedIntegerCoder(),\n ]:\n var = coder.encode(var, name=name)\n\n # TODO(shoyer): convert all of these to use coders, too:\n var = maybe_encode_nonstring_dtype(var, name=name)\n var = maybe_default_fill_value(var)\n var = maybe_encode_bools(var)\n var = ensure_dtype_not_object(var, name=name)\n return var"},{"col":0,"comment":"null","endLoc":162,"header":"def ensure_not_multiindex(var, name=None)","id":76,"name":"ensure_not_multiindex","nodeType":"Function","startLoc":154,"text":"def ensure_not_multiindex(var, name=None):\n if isinstance(var, IndexVariable) and isinstance(var.to_index(), pd.MultiIndex):\n raise NotImplementedError(\n \"variable {!r} is a MultiIndex, which cannot yet be \"\n \"serialized to netCDF files \"\n \"(https://github.com/pydata/xarray/issues/1077). Use \"\n \"reset_index() to convert MultiIndex levels into coordinate \"\n \"variables instead.\".format(name)\n )"},{"col":0,"comment":"null","endLoc":30,"header":"def _decode_variable_name(name)","id":77,"name":"_decode_variable_name","nodeType":"Function","startLoc":27,"text":"def _decode_variable_name(name):\n if name == NONE_VAR_NAME:\n name = None\n return name"},{"col":0,"comment":"Test equivalence of two dict-like objects. If any of the values are\n numpy arrays, compare them correctly.\n\n Parameters\n ----------\n first, second : dict-like\n Dictionaries to compare for equality\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n\n Returns\n -------\n equals : bool\n True if the dictionaries are equal\n ","endLoc":341,"header":"def dict_equiv(\n first: Mapping[K, V],\n second: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> bool","id":78,"name":"dict_equiv","nodeType":"Function","startLoc":314,"text":"def dict_equiv(\n first: Mapping[K, V],\n second: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> bool:\n \"\"\"Test equivalence of two dict-like objects. If any of the values are\n numpy arrays, compare them correctly.\n\n Parameters\n ----------\n first, second : dict-like\n Dictionaries to compare for equality\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n\n Returns\n -------\n equals : bool\n True if the dictionaries are equal\n \"\"\"\n for k in first:\n if k not in second or not compat(first[k], second[k]):\n return False\n for k in second:\n if k not in first:\n return False\n return True"},{"col":0,"comment":"Check if an xarray.Variable contains cftime.datetime objects\n ","endLoc":1454,"header":"def contains_cftime_datetimes(var) -> bool","id":79,"name":"contains_cftime_datetimes","nodeType":"Function","startLoc":1451,"text":"def contains_cftime_datetimes(var) -> bool:\n \"\"\"Check if an xarray.Variable contains cftime.datetime objects\n \"\"\"\n return _contains_cftime_datetimes(var.data)"},{"col":0,"comment":"Convert an object into a Variable.\n\n Parameters\n ----------\n obj : object\n Object to convert into a Variable.\n\n - If the object is already a Variable, return a shallow copy.\n - Otherwise, if the object has 'dims' and 'data' attributes, convert\n it into a new Variable.\n - If all else fails, attempt to convert the object into a Variable by\n unpacking it into the arguments for creating a new Variable.\n name : str, optional\n If provided:\n\n - `obj` can be a 1D array, which is assumed to label coordinate values\n along a dimension of this given name.\n - Variables with name matching one of their dimensions are converted\n into `IndexVariable` objects.\n\n Returns\n -------\n var : Variable\n The newly created variable.\n\n ","endLoc":143,"header":"def as_variable(obj, name=None) -> \"Union[Variable, IndexVariable]\"","id":80,"name":"as_variable","nodeType":"Function","startLoc":65,"text":"def as_variable(obj, name=None) -> \"Union[Variable, IndexVariable]\":\n \"\"\"Convert an object into a Variable.\n\n Parameters\n ----------\n obj : object\n Object to convert into a Variable.\n\n - If the object is already a Variable, return a shallow copy.\n - Otherwise, if the object has 'dims' and 'data' attributes, convert\n it into a new Variable.\n - If all else fails, attempt to convert the object into a Variable by\n unpacking it into the arguments for creating a new Variable.\n name : str, optional\n If provided:\n\n - `obj` can be a 1D array, which is assumed to label coordinate values\n along a dimension of this given name.\n - Variables with name matching one of their dimensions are converted\n into `IndexVariable` objects.\n\n Returns\n -------\n var : Variable\n The newly created variable.\n\n \"\"\"\n from .dataarray import DataArray\n\n # TODO: consider extending this method to automatically handle Iris and\n if isinstance(obj, DataArray):\n # extract the primary Variable from DataArrays\n obj = obj.variable\n\n if isinstance(obj, Variable):\n obj = obj.copy(deep=False)\n elif isinstance(obj, tuple):\n try:\n obj = Variable(*obj)\n except (TypeError, ValueError) as error:\n # use .format() instead of % because it handles tuples consistently\n raise error.__class__(\n \"Could not convert tuple of form \"\n \"(dims, data[, attrs, encoding]): \"\n \"{} to Variable.\".format(obj)\n )\n elif utils.is_scalar(obj):\n obj = Variable([], obj)\n elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:\n obj = Variable(obj.name, obj)\n elif isinstance(obj, (set, dict)):\n raise TypeError(\"variable %r has invalid type %r\" % (name, type(obj)))\n elif name is not None:\n data = as_compatible_data(obj)\n if data.ndim != 1:\n raise MissingDimensionsError(\n \"cannot set variable %r with %r-dimensional data \"\n \"without explicit dimension names. Pass a tuple of \"\n \"(dims, data) instead.\" % (name, data.ndim)\n )\n obj = Variable(name, data, fastpath=True)\n else:\n raise TypeError(\n \"unable to convert object into a variable without an \"\n \"explicit list of dimensions: %r\" % obj\n )\n\n if name is not None and name in obj.dims:\n # convert the Variable into an Index\n if obj.ndim != 1:\n raise MissingDimensionsError(\n \"%r has more than 1-dimension and the same name as one of its \"\n \"dimensions %r. xarray disallows such variables because they \"\n \"conflict with the coordinates used to label \"\n \"dimensions.\" % (name, obj.dims)\n )\n obj = obj.to_index_variable()\n\n return obj"},{"className":"BackendArray","col":0,"comment":"null","endLoc":74,"id":81,"nodeType":"Class","startLoc":69,"text":"class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed):\n __slots__ = ()\n\n def __array__(self, dtype=None):\n key = indexing.BasicIndexer((slice(None),) * self.ndim)\n return np.asarray(self[key], dtype=dtype)"},{"col":0,"comment":"Check if an array contains cftime.datetime objects\n ","endLoc":1448,"header":"def _contains_cftime_datetimes(array) -> bool","id":82,"name":"_contains_cftime_datetimes","nodeType":"Function","startLoc":1432,"text":"def _contains_cftime_datetimes(array) -> bool:\n \"\"\"Check if an array contains cftime.datetime objects\n \"\"\"\n try:\n from cftime import datetime as cftime_datetime\n except ImportError:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n if isinstance(sample, dask_array_type):\n sample = sample.compute()\n if isinstance(sample, np.ndarray):\n sample = sample.item()\n return isinstance(sample, cftime_datetime)\n else:\n return False"},{"attributeType":"null","col":4,"comment":"null","endLoc":9,"id":83,"name":"dask_array_type","nodeType":"Attribute","startLoc":9,"text":"dask_array_type"},{"className":"NdimSizeLenMixin","col":0,"comment":"Mixin class that extends a class that defines a ``shape`` property to\n one that also defines ``ndim``, ``size`` and ``__len__``.\n ","endLoc":502,"id":84,"nodeType":"Class","startLoc":482,"text":"class NdimSizeLenMixin:\n \"\"\"Mixin class that extends a class that defines a ``shape`` property to\n one that also defines ``ndim``, ``size`` and ``__len__``.\n \"\"\"\n\n __slots__ = ()\n\n @property\n def ndim(self: Any) -> int:\n return len(self.shape)\n\n @property\n def size(self: Any) -> int:\n # cast to int so that shape = () gives size = 1\n return int(np.prod(self.shape))\n\n def __len__(self: Any) -> int:\n try:\n return self.shape[0]\n except IndexError:\n raise TypeError(\"len() of unsized object\")"},{"col":4,"comment":"null","endLoc":491,"header":"@property\n def ndim(self: Any) -> int","id":85,"name":"ndim","nodeType":"Function","startLoc":489,"text":"@property\n def ndim(self: Any) -> int:\n return len(self.shape)"},{"col":4,"comment":"null","endLoc":496,"header":"@property\n def size(self: Any) -> int","id":86,"name":"size","nodeType":"Function","startLoc":493,"text":"@property\n def size(self: Any) -> int:\n # cast to int so that shape = () gives size = 1\n return int(np.prod(self.shape))"},{"col":4,"comment":"null","endLoc":502,"header":"def __len__(self: Any) -> int","id":87,"name":"__len__","nodeType":"Function","startLoc":498,"text":"def __len__(self: Any) -> int:\n try:\n return self.shape[0]\n except IndexError:\n raise TypeError(\"len() of unsized object\")"},{"attributeType":"null","col":4,"comment":"null","endLoc":487,"id":88,"name":"__slots__","nodeType":"Attribute","startLoc":487,"text":"__slots__"},{"className":"ExplicitlyIndexed","col":0,"comment":"Mixin to mark support for Indexer subclasses in indexing.\n ","endLoc":469,"id":89,"nodeType":"Class","startLoc":465,"text":"class ExplicitlyIndexed:\n \"\"\"Mixin to mark support for Indexer subclasses in indexing.\n \"\"\"\n\n __slots__ = ()"},{"col":4,"comment":"null","endLoc":438,"header":"def __init__(self, use_cftime=None)","id":90,"name":"__init__","nodeType":"Function","startLoc":437,"text":"def __init__(self, use_cftime=None):\n self.use_cftime = use_cftime"},{"col":0,"comment":"\n replace nan in a by val, and returns the replaced array and the nan\n position\n ","endLoc":19,"header":"def _replace_nan(a, val)","id":91,"name":"_replace_nan","nodeType":"Function","startLoc":13,"text":"def _replace_nan(a, val):\n \"\"\"\n replace nan in a by val, and returns the replaced array and the nan\n position\n \"\"\"\n mask = isnull(a)\n return where_method(val, mask, a), mask"},{"col":0,"comment":"\n xarray version of pandas.core.nanops._maybe_null_out\n ","endLoc":43,"header":"def _maybe_null_out(result, axis, mask, min_count=1)","id":92,"name":"_maybe_null_out","nodeType":"Function","startLoc":22,"text":"def _maybe_null_out(result, axis, mask, min_count=1):\n \"\"\"\n xarray version of pandas.core.nanops._maybe_null_out\n \"\"\"\n if hasattr(axis, \"__len__\"): # if tuple or list\n raise ValueError(\n \"min_count is not available for reduction \" \"with more than one dimensions.\"\n )\n\n if axis is not None and getattr(result, \"ndim\", False):\n null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0\n if null_mask.any():\n dtype, fill_value = dtypes.maybe_promote(result.dtype)\n result = result.astype(dtype)\n result[null_mask] = fill_value\n\n elif getattr(result, \"dtype\", None) not in dtypes.NAT_TYPES:\n null_mask = mask.size - mask.sum()\n if null_mask < min_count:\n result = np.nan\n\n return result"},{"col":4,"comment":"null","endLoc":137,"header":"@property\n def variables(self)","id":93,"name":"variables","nodeType":"Function","startLoc":128,"text":"@property\n def variables(self): # pragma: no cover\n warnings.warn(\n \"The ``variables`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n variables, _ = self.load()\n return variables"},{"attributeType":"null","col":4,"comment":"null","endLoc":469,"id":94,"name":"__slots__","nodeType":"Attribute","startLoc":469,"text":"__slots__"},{"col":4,"comment":"null","endLoc":148,"header":"@property\n def attrs(self)","id":95,"name":"attrs","nodeType":"Function","startLoc":139,"text":"@property\n def attrs(self): # pragma: no cover\n warnings.warn(\n \"The ``attrs`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n _, attrs = self.load()\n return attrs"},{"col":4,"comment":"null","endLoc":74,"header":"def __array__(self, dtype=None)","id":96,"name":"__array__","nodeType":"Function","startLoc":72,"text":"def __array__(self, dtype=None):\n key = indexing.BasicIndexer((slice(None),) * self.ndim)\n return np.asarray(self[key], dtype=dtype)"},{"className":"ArrayWriter","col":0,"comment":"null","endLoc":209,"id":97,"nodeType":"Class","startLoc":170,"text":"class ArrayWriter:\n __slots__ = (\"sources\", \"targets\", \"regions\", \"lock\")\n\n def __init__(self, lock=None):\n self.sources = []\n self.targets = []\n self.regions = []\n self.lock = lock\n\n def add(self, source, target, region=None):\n if isinstance(source, dask_array_type):\n self.sources.append(source)\n self.targets.append(target)\n self.regions.append(region)\n else:\n if region:\n target[region] = source\n else:\n target[...] = source\n\n def sync(self, compute=True):\n if self.sources:\n import dask.array as da\n\n # TODO: consider wrapping targets with dask.delayed, if this makes\n # for any discernable difference in perforance, e.g.,\n # targets = [dask.delayed(t) for t in self.targets]\n\n delayed_store = da.store(\n self.sources,\n self.targets,\n lock=self.lock,\n compute=compute,\n flush=True,\n regions=self.regions,\n )\n self.sources = []\n self.targets = []\n self.regions = []\n return delayed_store"},{"col":4,"comment":"null","endLoc":177,"header":"def __init__(self, lock=None)","id":98,"name":"__init__","nodeType":"Function","startLoc":173,"text":"def __init__(self, lock=None):\n self.sources = []\n self.targets = []\n self.regions = []\n self.lock = lock"},{"col":4,"comment":"null","endLoc":188,"header":"def add(self, source, target, region=None)","id":99,"name":"add","nodeType":"Function","startLoc":179,"text":"def add(self, source, target, region=None):\n if isinstance(source, dask_array_type):\n self.sources.append(source)\n self.targets.append(target)\n self.regions.append(region)\n else:\n if region:\n target[region] = source\n else:\n target[...] = source"},{"col":4,"comment":"null","endLoc":158,"header":"@property\n def dimensions(self)","id":100,"name":"dimensions","nodeType":"Function","startLoc":150,"text":"@property\n def dimensions(self): # pragma: no cover\n warnings.warn(\n \"The ``dimensions`` property has been deprecated and \"\n \"will be removed in xarray v0.11.\",\n FutureWarning,\n stacklevel=2,\n )\n return self.get_dimensions()"},{"col":4,"comment":"null","endLoc":161,"header":"def close(self)","id":101,"name":"close","nodeType":"Function","startLoc":160,"text":"def close(self):\n pass"},{"col":4,"comment":"null","endLoc":164,"header":"def __enter__(self)","id":102,"name":"__enter__","nodeType":"Function","startLoc":163,"text":"def __enter__(self):\n return self"},{"col":4,"comment":"null","endLoc":167,"header":"def __exit__(self, exception_type, exception_value, traceback)","id":103,"name":"__exit__","nodeType":"Function","startLoc":166,"text":"def __exit__(self, exception_type, exception_value, traceback):\n self.close()"},{"attributeType":"null","col":4,"comment":"null","endLoc":78,"id":104,"name":"__slots__","nodeType":"Attribute","startLoc":78,"text":"__slots__"},{"col":4,"comment":"\n Encode the variables and attributes in this store\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n\n Returns\n -------\n variables : dict-like\n attributes : dict-like\n\n ","endLoc":234,"header":"def encode(self, variables, attributes)","id":105,"name":"encode","nodeType":"Function","startLoc":215,"text":"def encode(self, variables, attributes):\n \"\"\"\n Encode the variables and attributes in this store\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n\n Returns\n -------\n variables : dict-like\n attributes : dict-like\n\n \"\"\"\n variables = {k: self.encode_variable(v) for k, v in variables.items()}\n attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}\n return variables, attributes"},{"col":0,"comment":"null","endLoc":103,"header":"def maybe_encode_nonstring_dtype(var, name=None)","id":106,"name":"maybe_encode_nonstring_dtype","nodeType":"Function","startLoc":82,"text":"def maybe_encode_nonstring_dtype(var, name=None):\n if \"dtype\" in var.encoding and var.encoding[\"dtype\"] not in (\"S1\", str):\n dims, data, attrs, encoding = _var_as_tuple(var)\n dtype = np.dtype(encoding.pop(\"dtype\"))\n if dtype != var.dtype:\n if np.issubdtype(dtype, np.integer):\n if (\n np.issubdtype(var.dtype, np.floating)\n and \"_FillValue\" not in var.attrs\n and \"missing_value\" not in var.attrs\n ):\n warnings.warn(\n \"saving variable %s with floating \"\n \"point data as an integer dtype without \"\n \"any _FillValue to use for NaNs\" % name,\n SerializationWarning,\n stacklevel=10,\n )\n data = duck_array_ops.around(data)[...]\n data = data.astype(dtype=dtype)\n var = Variable(dims, data, attrs, encoding)\n return var"},{"col":0,"comment":"null","endLoc":79,"header":"def _var_as_tuple(var)","id":107,"name":"_var_as_tuple","nodeType":"Function","startLoc":78,"text":"def _var_as_tuple(var):\n return var.dims, var.data, var.attrs.copy(), var.encoding.copy()"},{"col":4,"comment":"null","endLoc":371,"header":"def __init__(self, key)","id":108,"name":"__init__","nodeType":"Function","startLoc":353,"text":"def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)"},{"col":4,"comment":"encode one variable","endLoc":238,"header":"def encode_variable(self, v)","id":109,"name":"encode_variable","nodeType":"Function","startLoc":236,"text":"def encode_variable(self, v):\n \"\"\"encode one variable\"\"\"\n return v"},{"col":4,"comment":"encode one attribute","endLoc":242,"header":"def encode_attribute(self, a)","id":110,"name":"encode_attribute","nodeType":"Function","startLoc":240,"text":"def encode_attribute(self, a):\n \"\"\"encode one attribute\"\"\"\n return a"},{"col":4,"comment":"null","endLoc":245,"header":"def set_dimension(self, d, l)","id":111,"name":"set_dimension","nodeType":"Function","startLoc":244,"text":"def set_dimension(self, d, l): # pragma: no cover\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":248,"header":"def set_attribute(self, k, v)","id":112,"name":"set_attribute","nodeType":"Function","startLoc":247,"text":"def set_attribute(self, k, v): # pragma: no cover\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":251,"header":"def set_variable(self, k, v)","id":113,"name":"set_variable","nodeType":"Function","startLoc":250,"text":"def set_variable(self, k, v): # pragma: no cover\n raise NotImplementedError()"},{"col":4,"comment":"\n in stores, variables are all variables AND coordinates\n in xarray.Dataset variables are variables NOT coordinates,\n so here we pass the whole dataset in instead of doing\n dataset.variables\n ","endLoc":260,"header":"def store_dataset(self, dataset)","id":114,"name":"store_dataset","nodeType":"Function","startLoc":253,"text":"def store_dataset(self, dataset):\n \"\"\"\n in stores, variables are all variables AND coordinates\n in xarray.Dataset variables are variables NOT coordinates,\n so here we pass the whole dataset in instead of doing\n dataset.variables\n \"\"\"\n self.store(dataset, dataset.attrs)"},{"col":4,"comment":"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n ","endLoc":299,"header":"def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n )","id":115,"name":"store","nodeType":"Function","startLoc":262,"text":"def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n ):\n \"\"\"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n if writer is None:\n writer = ArrayWriter()\n\n variables, attributes = self.encode(variables, attributes)\n\n self.set_attributes(attributes)\n self.set_dimensions(variables, unlimited_dims=unlimited_dims)\n self.set_variables(\n variables, check_encoding_set, writer, unlimited_dims=unlimited_dims\n )"},{"col":0,"comment":"Prepare and wrap data to put in a Variable.\n\n - If data does not have the necessary attributes, convert it to ndarray.\n - If data has dtype=datetime64, ensure that it has ns precision. If it's a\n pandas.Timestamp, convert it to datetime64.\n - If data is already a pandas or xarray object (other than an Index), just\n use the values.\n\n Finally, wrap it up with an adapter if necessary.\n ","endLoc":233,"header":"def as_compatible_data(data, fastpath=False)","id":117,"name":"as_compatible_data","nodeType":"Function","startLoc":166,"text":"def as_compatible_data(data, fastpath=False):\n \"\"\"Prepare and wrap data to put in a Variable.\n\n - If data does not have the necessary attributes, convert it to ndarray.\n - If data has dtype=datetime64, ensure that it has ns precision. If it's a\n pandas.Timestamp, convert it to datetime64.\n - If data is already a pandas or xarray object (other than an Index), just\n use the values.\n\n Finally, wrap it up with an adapter if necessary.\n \"\"\"\n if fastpath and getattr(data, \"ndim\", 0) > 0:\n # can't use fastpath (yet) for scalars\n return _maybe_wrap_data(data)\n\n if isinstance(data, Variable):\n return data.data\n\n if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):\n return _maybe_wrap_data(data)\n\n if isinstance(data, tuple):\n data = utils.to_0d_object_array(data)\n\n if isinstance(data, pd.Timestamp):\n # TODO: convert, handle datetime objects, too\n data = np.datetime64(data.value, \"ns\")\n\n if isinstance(data, timedelta):\n data = np.timedelta64(getattr(data, \"value\", data), \"ns\")\n\n # we don't want nested self-described arrays\n data = getattr(data, \"values\", data)\n\n if isinstance(data, np.ma.MaskedArray):\n mask = np.ma.getmaskarray(data)\n if mask.any():\n dtype, fill_value = dtypes.maybe_promote(data.dtype)\n data = np.asarray(data, dtype=dtype)\n data[mask] = fill_value\n else:\n data = np.asarray(data)\n\n if not isinstance(data, np.ndarray):\n if hasattr(data, \"__array_function__\"):\n if IS_NEP18_ACTIVE:\n return data\n else:\n raise TypeError(\n \"Got an NumPy-like array type providing the \"\n \"__array_function__ protocol but NEP18 is not enabled. \"\n \"Check that numpy >= v1.16 and that the environment \"\n 'variable \"NUMPY_EXPERIMENTAL_ARRAY_FUNCTION\" is set to '\n '\"1\"'\n )\n\n # validate whether the data is valid data types\n data = np.asarray(data)\n\n if isinstance(data, np.ndarray):\n if data.dtype.kind == \"O\":\n data = _possibly_convert_objects(data)\n elif data.dtype.kind == \"M\":\n data = np.asarray(data, \"datetime64[ns]\")\n elif data.dtype.kind == \"m\":\n data = np.asarray(data, \"timedelta64[ns]\")\n\n return _maybe_wrap_data(data)"},{"col":0,"comment":"null","endLoc":340,"header":"def as_integer_slice(value)","id":118,"name":"as_integer_slice","nodeType":"Function","startLoc":336,"text":"def as_integer_slice(value):\n start = as_integer_or_none(value.start)\n stop = as_integer_or_none(value.stop)\n step = as_integer_or_none(value.step)\n return slice(start, stop, step)"},{"col":0,"comment":"null","endLoc":333,"header":"def as_integer_or_none(value)","id":119,"name":"as_integer_or_none","nodeType":"Function","startLoc":332,"text":"def as_integer_or_none(value):\n return None if value is None else operator.index(value)"},{"col":0,"comment":"\n Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure\n they can be indexed properly.\n\n NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should\n all pass through unmodified.\n ","endLoc":156,"header":"def _maybe_wrap_data(data)","id":120,"name":"_maybe_wrap_data","nodeType":"Function","startLoc":146,"text":"def _maybe_wrap_data(data):\n \"\"\"\n Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure\n they can be indexed properly.\n\n NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should\n all pass through unmodified.\n \"\"\"\n if isinstance(data, pd.Index):\n return PandasIndexAdapter(data)\n return data"},{"col":4,"comment":"\n This provides a centralized method to set the dataset attributes on the\n data store.\n\n Parameters\n ----------\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n ","endLoc":312,"header":"def set_attributes(self, attributes)","id":121,"name":"set_attributes","nodeType":"Function","startLoc":301,"text":"def set_attributes(self, attributes):\n \"\"\"\n This provides a centralized method to set the dataset attributes on the\n data store.\n\n Parameters\n ----------\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n \"\"\"\n for k, v in attributes.items():\n self.set_attribute(k, v)"},{"col":4,"comment":"\n This provides a centralized method to set the dimensions on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n ","endLoc":373,"header":"def set_dimensions(self, variables, unlimited_dims=None)","id":122,"name":"set_dimensions","nodeType":"Function","startLoc":341,"text":"def set_dimensions(self, variables, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the dimensions on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n if unlimited_dims is None:\n unlimited_dims = set()\n\n existing_dims = self.get_dimensions()\n\n dims = {}\n for v in unlimited_dims: # put unlimited_dims first\n dims[v] = None\n for v in variables.values():\n dims.update(dict(zip(v.dims, v.shape)))\n\n for dim, length in dims.items():\n if dim in existing_dims and length != existing_dims[dim]:\n raise ValueError(\n \"Unable to update size for existing dimension\"\n \"%r (%d != %d)\" % (dim, length, existing_dims[dim])\n )\n elif dim not in existing_dims:\n is_unlimited = dim in unlimited_dims\n self.set_dimension(dim, length, is_unlimited)"},{"col":4,"comment":"null","endLoc":1353,"header":"def __init__(self, array: Any, dtype: DTypeLike = None)","id":123,"name":"__init__","nodeType":"Function","startLoc":1339,"text":"def __init__(self, array: Any, dtype: DTypeLike = None):\n self.array = utils.safe_cast_to_index(array)\n if dtype is None:\n if isinstance(array, pd.PeriodIndex):\n dtype = np.dtype(\"O\")\n elif hasattr(array, \"categories\"):\n # category isn't a real numpy dtype\n dtype = array.categories.dtype\n elif not utils.is_valid_numpy_dtype(array.dtype):\n dtype = np.dtype(\"O\")\n else:\n dtype = array.dtype\n else:\n dtype = np.dtype(dtype)\n self._dtype = dtype"},{"col":0,"comment":"Given an array, safely cast it to a pandas.Index.\n\n If it is already a pandas.Index, return it unchanged.\n\n Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,\n this function will not attempt to do automatic type conversion but will\n always return an index with dtype=object.\n ","endLoc":102,"header":"def safe_cast_to_index(array: Any) -> pd.Index","id":124,"name":"safe_cast_to_index","nodeType":"Function","startLoc":84,"text":"def safe_cast_to_index(array: Any) -> pd.Index:\n \"\"\"Given an array, safely cast it to a pandas.Index.\n\n If it is already a pandas.Index, return it unchanged.\n\n Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,\n this function will not attempt to do automatic type conversion but will\n always return an index with dtype=object.\n \"\"\"\n if isinstance(array, pd.Index):\n index = array\n elif hasattr(array, \"to_index\"):\n index = array.to_index()\n else:\n kwargs = {}\n if hasattr(array, \"dtype\") and array.dtype.kind == \"O\":\n kwargs[\"dtype\"] = object\n index = pd.Index(np.asarray(array), **kwargs)\n return _maybe_cast_to_cftimeindex(index)"},{"col":4,"comment":"null","endLoc":322,"header":"def __init__(self, key)","id":125,"name":"__init__","nodeType":"Function","startLoc":319,"text":"def __init__(self, key):\n if type(self) is ExplicitIndexer:\n raise TypeError(\"cannot instantiate base ExplicitIndexer objects\")\n self._key = tuple(key)"},{"col":0,"comment":"null","endLoc":75,"header":"def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index","id":126,"name":"_maybe_cast_to_cftimeindex","nodeType":"Function","startLoc":66,"text":"def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index:\n from ..coding.cftimeindex import CFTimeIndex\n\n if len(index) > 0 and index.dtype == \"O\":\n try:\n return CFTimeIndex(index)\n except (ImportError, TypeError):\n return index\n else:\n return index"},{"attributeType":"null","col":4,"comment":"null","endLoc":70,"id":127,"name":"__slots__","nodeType":"Attribute","startLoc":70,"text":"__slots__"},{"col":0,"comment":"null","endLoc":114,"header":"def maybe_default_fill_value(var)","id":128,"name":"maybe_default_fill_value","nodeType":"Function","startLoc":106,"text":"def maybe_default_fill_value(var):\n # make NaN the fill value for float types:\n if (\n \"_FillValue\" not in var.attrs\n and \"_FillValue\" not in var.encoding\n and np.issubdtype(var.dtype, np.floating)\n ):\n var.attrs[\"_FillValue\"] = var.dtype.type(np.nan)\n return var"},{"col":4,"comment":"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n ","endLoc":339,"header":"def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None)","id":129,"name":"set_variables","nodeType":"Function","startLoc":314,"text":"def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n\n for vn, v in variables.items():\n name = _encode_variable_name(vn)\n check = vn in check_encoding_set\n target, source = self.prepare_variable(\n name, v, check, unlimited_dims=unlimited_dims\n )\n\n writer.add(source, target)"},{"className":"CachingFileManager","col":0,"comment":"Wrapper for automatically opening and closing file objects.\n\n Unlike files, CachingFileManager objects can be safely pickled and passed\n between processes. They should be explicitly closed to release resources,\n but a per-process least-recently-used cache for open files ensures that you\n can safely create arbitrarily large numbers of FileManager objects.\n\n Don't directly close files acquired from a FileManager. Instead, call\n FileManager.close(), which ensures that closed files are removed from the\n cache as well.\n\n Example usage:\n\n manager = FileManager(open, 'example.txt', mode='w')\n f = manager.acquire()\n f.write(...)\n manager.close() # ensures file is closed\n\n Note that as long as previous files are still cached, acquiring a file\n multiple times from the same FileManager is essentially free:\n\n f1 = manager.acquire()\n f2 = manager.acquire()\n assert f1 is f2\n\n ","endLoc":273,"id":130,"nodeType":"Class","startLoc":50,"text":"class CachingFileManager(FileManager):\n \"\"\"Wrapper for automatically opening and closing file objects.\n\n Unlike files, CachingFileManager objects can be safely pickled and passed\n between processes. They should be explicitly closed to release resources,\n but a per-process least-recently-used cache for open files ensures that you\n can safely create arbitrarily large numbers of FileManager objects.\n\n Don't directly close files acquired from a FileManager. Instead, call\n FileManager.close(), which ensures that closed files are removed from the\n cache as well.\n\n Example usage:\n\n manager = FileManager(open, 'example.txt', mode='w')\n f = manager.acquire()\n f.write(...)\n manager.close() # ensures file is closed\n\n Note that as long as previous files are still cached, acquiring a file\n multiple times from the same FileManager is essentially free:\n\n f1 = manager.acquire()\n f2 = manager.acquire()\n assert f1 is f2\n\n \"\"\"\n\n def __init__(\n self,\n opener,\n *args,\n mode=_DEFAULT_MODE,\n kwargs=None,\n lock=None,\n cache=None,\n ref_counts=None\n ):\n \"\"\"Initialize a FileManager.\n\n The cache and ref_counts arguments exist solely to facilitate\n dependency injection, and should only be set for tests.\n\n Parameters\n ----------\n opener : callable\n Function that when called like ``opener(*args, **kwargs)`` returns\n an open file object. The file object must implement a ``close()``\n method.\n *args\n Positional arguments for opener. A ``mode`` argument should be\n provided as a keyword argument (see below). All arguments must be\n hashable.\n mode : optional\n If provided, passed as a keyword argument to ``opener`` along with\n ``**kwargs``. ``mode='w' `` has special treatment: after the first\n call it is replaced by ``mode='a'`` in all subsequent function to\n avoid overriding the newly created file.\n kwargs : dict, optional\n Keyword arguments for opener, excluding ``mode``. All values must\n be hashable.\n lock : duck-compatible threading.Lock, optional\n Lock to use when modifying the cache inside acquire() and close().\n By default, uses a new threading.Lock() object. If set, this object\n should be pickleable.\n cache : MutableMapping, optional\n Mapping to use as a cache for open files. By default, uses xarray's\n global LRU file cache. Because ``cache`` typically points to a\n global variable and contains non-picklable file objects, an\n unpickled FileManager objects will be restored with the default\n cache.\n ref_counts : dict, optional\n Optional dict to use for keeping track the number of references to\n the same file.\n \"\"\"\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n\n self._default_lock = lock is None or lock is False\n self._lock = threading.Lock() if self._default_lock else lock\n\n # cache[self._key] stores the file associated with this object.\n if cache is None:\n cache = FILE_CACHE\n self._cache = cache\n self._key = self._make_key()\n\n # ref_counts[self._key] stores the number of CachingFileManager objects\n # in memory referencing this same file. We use this to know if we can\n # close a file when the manager is deallocated.\n if ref_counts is None:\n ref_counts = REF_COUNTS\n self._ref_counter = _RefCounter(ref_counts)\n self._ref_counter.increment(self._key)\n\n def _make_key(self):\n \"\"\"Make a key for caching files in the LRU cache.\"\"\"\n value = (\n self._opener,\n self._args,\n \"a\" if self._mode == \"w\" else self._mode,\n tuple(sorted(self._kwargs.items())),\n )\n return _HashedSequence(value)\n\n @contextlib.contextmanager\n def _optional_lock(self, needs_lock):\n \"\"\"Context manager for optionally acquiring a lock.\"\"\"\n if needs_lock:\n with self._lock:\n yield\n else:\n yield\n\n def acquire(self, needs_lock=True):\n \"\"\"Acquire a file object from the manager.\n\n A new file is only opened if it has expired from the\n least-recently-used cache.\n\n This method uses a lock, which ensures that it is thread-safe. You can\n safely acquire a file in multiple threads at the same time, as long as\n the underlying file object is thread-safe.\n\n Returns\n -------\n An open file object, as returned by ``opener(*args, **kwargs)``.\n \"\"\"\n file, _ = self._acquire_with_cache_info(needs_lock)\n return file\n\n @contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n \"\"\"Context manager for acquiring a file.\"\"\"\n file, cached = self._acquire_with_cache_info(needs_lock)\n try:\n yield file\n except Exception:\n if not cached:\n self.close(needs_lock)\n raise\n\n def _acquire_with_cache_info(self, needs_lock=True):\n \"\"\"Acquire a file, returning the file and whether it was cached.\"\"\"\n with self._optional_lock(needs_lock):\n try:\n file = self._cache[self._key]\n except KeyError:\n kwargs = self._kwargs\n if self._mode is not _DEFAULT_MODE:\n kwargs = kwargs.copy()\n kwargs[\"mode\"] = self._mode\n file = self._opener(*self._args, **kwargs)\n if self._mode == \"w\":\n # ensure file doesn't get overriden when opened again\n self._mode = \"a\"\n self._cache[self._key] = file\n return file, False\n else:\n return file, True\n\n def close(self, needs_lock=True):\n \"\"\"Explicitly close any associated file object (if necessary).\"\"\"\n # TODO: remove needs_lock if/when we have a reentrant lock in\n # dask.distributed: https://github.com/dask/dask/issues/3832\n with self._optional_lock(needs_lock):\n default = None\n file = self._cache.pop(self._key, default)\n if file is not None:\n file.close()\n\n def __del__(self):\n # If we're the only CachingFileManger referencing a unclosed file, we\n # should remove it from the cache upon garbage collection.\n #\n # Keeping our own count of file references might seem like overkill,\n # but it's actually pretty common to reopen files with the same\n # variable name in a notebook or command line environment, e.g., to\n # fix the parameters used when opening a file:\n # >>> ds = xarray.open_dataset('myfile.nc')\n # >>> ds = xarray.open_dataset('myfile.nc', decode_times=False)\n # This second assignment to \"ds\" drops CPython's ref-count on the first\n # \"ds\" argument to zero, which can trigger garbage collections. So if\n # we didn't check whether another object is referencing 'myfile.nc',\n # the newly opened file would actually be immediately closed!\n ref_count = self._ref_counter.decrement(self._key)\n\n if not ref_count and self._key in self._cache:\n if acquire(self._lock, blocking=False):\n # Only close files if we can do so immediately.\n try:\n self.close(needs_lock=False)\n finally:\n self._lock.release()\n\n if OPTIONS[\"warn_for_unclosed_files\"]:\n warnings.warn(\n \"deallocating {}, but file is not already closed. \"\n \"This may indicate a bug.\".format(self),\n RuntimeWarning,\n stacklevel=2,\n )\n\n def __getstate__(self):\n \"\"\"State for pickling.\"\"\"\n # cache and ref_counts are intentionally omitted: we don't want to try\n # to serialize these global objects.\n lock = None if self._default_lock else self._lock\n return (self._opener, self._args, self._mode, self._kwargs, lock)\n\n def __setstate__(self, state):\n \"\"\"Restore from a pickle.\"\"\"\n opener, args, mode, kwargs, lock = state\n self.__init__(opener, *args, mode=mode, kwargs=kwargs, lock=lock)\n\n def __repr__(self):\n args_string = \", \".join(map(repr, self._args))\n if self._mode is not _DEFAULT_MODE:\n args_string += \", mode={!r}\".format(self._mode)\n return \"{}({!r}, {}, kwargs={})\".format(\n type(self).__name__, self._opener, args_string, self._kwargs\n )"},{"col":0,"comment":"null","endLoc":127,"header":"def maybe_encode_bools(var)","id":131,"name":"maybe_encode_bools","nodeType":"Function","startLoc":117,"text":"def maybe_encode_bools(var):\n if (\n (var.dtype == np.bool)\n and (\"dtype\" not in var.encoding)\n and (\"dtype\" not in var.attrs)\n ):\n dims, data, attrs, encoding = _var_as_tuple(var)\n attrs[\"dtype\"] = \"bool\"\n data = data.astype(dtype=\"i1\", copy=True)\n var = Variable(dims, data, attrs, encoding)\n return var"},{"col":0,"comment":"null","endLoc":24,"header":"def _encode_variable_name(name)","id":132,"name":"_encode_variable_name","nodeType":"Function","startLoc":21,"text":"def _encode_variable_name(name):\n if name is None:\n name = NONE_VAR_NAME\n return name"},{"attributeType":"null","col":4,"comment":"null","endLoc":213,"id":133,"name":"__slots__","nodeType":"Attribute","startLoc":213,"text":"__slots__"},{"className":"InMemoryDataStore","col":0,"comment":"\n Stores dimensions, variables and attributes in ordered dictionaries, making\n this store fast compared to stores which save to disk.\n\n This store exists purely for internal testing purposes.\n ","endLoc":45,"id":134,"nodeType":"Class","startLoc":9,"text":"class InMemoryDataStore(AbstractWritableDataStore):\n \"\"\"\n Stores dimensions, variables and attributes in ordered dictionaries, making\n this store fast compared to stores which save to disk.\n\n This store exists purely for internal testing purposes.\n \"\"\"\n\n def __init__(self, variables=None, attributes=None):\n self._variables = {} if variables is None else variables\n self._attributes = {} if attributes is None else attributes\n\n def get_attrs(self):\n return self._attributes\n\n def get_variables(self):\n return self._variables\n\n def get_dimensions(self):\n dims = {}\n for v in self._variables.values():\n for d, s in v.dims.items():\n dims[d] = s\n return dims\n\n def prepare_variable(self, k, v, *args, **kwargs):\n new_var = Variable(v.dims, np.empty_like(v), v.attrs)\n self._variables[k] = new_var\n return new_var, v.data\n\n def set_attribute(self, k, v):\n # copy to imitate writing to disk.\n self._attributes[k] = copy.deepcopy(v)\n\n def set_dimension(self, d, l, unlimited_dims=None):\n # in this model, dimensions are accounted for in the variables\n pass"},{"col":4,"comment":"null","endLoc":19,"header":"def __init__(self, variables=None, attributes=None)","id":135,"name":"__init__","nodeType":"Function","startLoc":17,"text":"def __init__(self, variables=None, attributes=None):\n self._variables = {} if variables is None else variables\n self._attributes = {} if attributes is None else attributes"},{"col":0,"comment":"null","endLoc":218,"header":"def ensure_dtype_not_object(var, name=None)","id":136,"name":"ensure_dtype_not_object","nodeType":"Function","startLoc":176,"text":"def ensure_dtype_not_object(var, name=None):\n # TODO: move this from conventions to backends? (it's not CF related)\n if var.dtype.kind == \"O\":\n dims, data, attrs, encoding = _var_as_tuple(var)\n\n if isinstance(data, dask_array_type):\n warnings.warn(\n \"variable {} has data in the form of a dask array with \"\n \"dtype=object, which means it is being loaded into memory \"\n \"to determine a data type that can be safely stored on disk. \"\n \"To avoid this, coerce this variable to a fixed-size dtype \"\n \"with astype() before saving it.\".format(name),\n SerializationWarning,\n )\n data = data.compute()\n\n missing = pd.isnull(data)\n if missing.any():\n # nb. this will fail for dask.array data\n non_missing_values = data[~missing]\n inferred_dtype = _infer_dtype(non_missing_values, name)\n\n # There is no safe bit-pattern for NA in typical binary string\n # formats, we so can't set a fill_value. Unfortunately, this means\n # we can't distinguish between missing values and empty strings.\n if strings.is_bytes_dtype(inferred_dtype):\n fill_value = b\"\"\n elif strings.is_unicode_dtype(inferred_dtype):\n fill_value = \"\"\n else:\n # insist on using float for numeric values\n if not np.issubdtype(inferred_dtype, np.floating):\n inferred_dtype = np.dtype(float)\n fill_value = inferred_dtype.type(np.nan)\n\n data = _copy_with_dtype(data, dtype=inferred_dtype)\n data[missing] = fill_value\n else:\n data = _copy_with_dtype(data, dtype=_infer_dtype(data, name))\n\n assert data.dtype.kind != \"O\" or data.dtype.metadata\n var = Variable(dims, data, attrs, encoding)\n return var"},{"col":4,"comment":"null","endLoc":22,"header":"def get_attrs(self)","id":137,"name":"get_attrs","nodeType":"Function","startLoc":21,"text":"def get_attrs(self):\n return self._attributes"},{"col":4,"comment":"null","endLoc":25,"header":"def get_variables(self)","id":138,"name":"get_variables","nodeType":"Function","startLoc":24,"text":"def get_variables(self):\n return self._variables"},{"col":4,"comment":"null","endLoc":32,"header":"def get_dimensions(self)","id":139,"name":"get_dimensions","nodeType":"Function","startLoc":27,"text":"def get_dimensions(self):\n dims = {}\n for v in self._variables.values():\n for d, s in v.dims.items():\n dims[d] = s\n return dims"},{"className":"FileManager","col":0,"comment":"Manager for acquiring and closing a file object.\n\n Use FileManager subclasses (CachingFileManager in particular) on backend\n storage classes to automatically handle issues related to keeping track of\n many open files and transferring them between multiple processes.\n ","endLoc":47,"id":140,"nodeType":"Class","startLoc":24,"text":"class FileManager:\n \"\"\"Manager for acquiring and closing a file object.\n\n Use FileManager subclasses (CachingFileManager in particular) on backend\n storage classes to automatically handle issues related to keeping track of\n many open files and transferring them between multiple processes.\n \"\"\"\n\n def acquire(self, needs_lock=True):\n \"\"\"Acquire the file object from this manager.\"\"\"\n raise NotImplementedError()\n\n def acquire_context(self, needs_lock=True):\n \"\"\"Context manager for acquiring a file. Yields a file object.\n\n The context manager unwinds any actions taken as part of acquisition\n (i.e., removes it from any cache) if an exception is raised from the\n context. It *does not* automatically close the file.\n \"\"\"\n raise NotImplementedError()\n\n def close(self, needs_lock=True):\n \"\"\"Close the file object associated with this manager, if needed.\"\"\"\n raise NotImplementedError()"},{"col":4,"comment":"Acquire the file object from this manager.","endLoc":34,"header":"def acquire(self, needs_lock=True)","id":141,"name":"acquire","nodeType":"Function","startLoc":32,"text":"def acquire(self, needs_lock=True):\n \"\"\"Acquire the file object from this manager.\"\"\"\n raise NotImplementedError()"},{"col":4,"comment":"Context manager for acquiring a file. Yields a file object.\n\n The context manager unwinds any actions taken as part of acquisition\n (i.e., removes it from any cache) if an exception is raised from the\n context. It *does not* automatically close the file.\n ","endLoc":43,"header":"def acquire_context(self, needs_lock=True)","id":142,"name":"acquire_context","nodeType":"Function","startLoc":36,"text":"def acquire_context(self, needs_lock=True):\n \"\"\"Context manager for acquiring a file. Yields a file object.\n\n The context manager unwinds any actions taken as part of acquisition\n (i.e., removes it from any cache) if an exception is raised from the\n context. It *does not* automatically close the file.\n \"\"\"\n raise NotImplementedError()"},{"col":4,"comment":"Close the file object associated with this manager, if needed.","endLoc":47,"header":"def close(self, needs_lock=True)","id":143,"name":"close","nodeType":"Function","startLoc":45,"text":"def close(self, needs_lock=True):\n \"\"\"Close the file object associated with this manager, if needed.\"\"\"\n raise NotImplementedError()"},{"col":4,"comment":"Initialize a FileManager.\n\n The cache and ref_counts arguments exist solely to facilitate\n dependency injection, and should only be set for tests.\n\n Parameters\n ----------\n opener : callable\n Function that when called like ``opener(*args, **kwargs)`` returns\n an open file object. The file object must implement a ``close()``\n method.\n *args\n Positional arguments for opener. A ``mode`` argument should be\n provided as a keyword argument (see below). All arguments must be\n hashable.\n mode : optional\n If provided, passed as a keyword argument to ``opener`` along with\n ``**kwargs``. ``mode='w' `` has special treatment: after the first\n call it is replaced by ``mode='a'`` in all subsequent function to\n avoid overriding the newly created file.\n kwargs : dict, optional\n Keyword arguments for opener, excluding ``mode``. All values must\n be hashable.\n lock : duck-compatible threading.Lock, optional\n Lock to use when modifying the cache inside acquire() and close().\n By default, uses a new threading.Lock() object. If set, this object\n should be pickleable.\n cache : MutableMapping, optional\n Mapping to use as a cache for open files. By default, uses xarray's\n global LRU file cache. Because ``cache`` typically points to a\n global variable and contains non-picklable file objects, an\n unpickled FileManager objects will be restored with the default\n cache.\n ref_counts : dict, optional\n Optional dict to use for keeping track the number of references to\n the same file.\n ","endLoc":145,"header":"def __init__(\n self,\n opener,\n *args,\n mode=_DEFAULT_MODE,\n kwargs=None,\n lock=None,\n cache=None,\n ref_counts=None\n )","id":144,"name":"__init__","nodeType":"Function","startLoc":78,"text":"def __init__(\n self,\n opener,\n *args,\n mode=_DEFAULT_MODE,\n kwargs=None,\n lock=None,\n cache=None,\n ref_counts=None\n ):\n \"\"\"Initialize a FileManager.\n\n The cache and ref_counts arguments exist solely to facilitate\n dependency injection, and should only be set for tests.\n\n Parameters\n ----------\n opener : callable\n Function that when called like ``opener(*args, **kwargs)`` returns\n an open file object. The file object must implement a ``close()``\n method.\n *args\n Positional arguments for opener. A ``mode`` argument should be\n provided as a keyword argument (see below). All arguments must be\n hashable.\n mode : optional\n If provided, passed as a keyword argument to ``opener`` along with\n ``**kwargs``. ``mode='w' `` has special treatment: after the first\n call it is replaced by ``mode='a'`` in all subsequent function to\n avoid overriding the newly created file.\n kwargs : dict, optional\n Keyword arguments for opener, excluding ``mode``. All values must\n be hashable.\n lock : duck-compatible threading.Lock, optional\n Lock to use when modifying the cache inside acquire() and close().\n By default, uses a new threading.Lock() object. If set, this object\n should be pickleable.\n cache : MutableMapping, optional\n Mapping to use as a cache for open files. By default, uses xarray's\n global LRU file cache. Because ``cache`` typically points to a\n global variable and contains non-picklable file objects, an\n unpickled FileManager objects will be restored with the default\n cache.\n ref_counts : dict, optional\n Optional dict to use for keeping track the number of references to\n the same file.\n \"\"\"\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n\n self._default_lock = lock is None or lock is False\n self._lock = threading.Lock() if self._default_lock else lock\n\n # cache[self._key] stores the file associated with this object.\n if cache is None:\n cache = FILE_CACHE\n self._cache = cache\n self._key = self._make_key()\n\n # ref_counts[self._key] stores the number of CachingFileManager objects\n # in memory referencing this same file. We use this to know if we can\n # close a file when the manager is deallocated.\n if ref_counts is None:\n ref_counts = REF_COUNTS\n self._ref_counter = _RefCounter(ref_counts)\n self._ref_counter.increment(self._key)"},{"col":4,"comment":"null","endLoc":253,"header":"def __new__(cls, data, name=None)","id":145,"name":"__new__","nodeType":"Function","startLoc":245,"text":"def __new__(cls, data, name=None):\n assert_all_valid_date_type(data)\n if name is None and hasattr(data, \"name\"):\n name = data.name\n\n result = object.__new__(cls)\n result._data = np.array(data, dtype=\"O\")\n result.name = name\n return result"},{"col":0,"comment":"Given an object array with no missing values, infer its dtype from its\n first element\n ","endLoc":151,"header":"def _infer_dtype(array, name=None)","id":146,"name":"_infer_dtype","nodeType":"Function","startLoc":130,"text":"def _infer_dtype(array, name=None):\n \"\"\"Given an object array with no missing values, infer its dtype from its\n first element\n \"\"\"\n if array.dtype.kind != \"O\":\n raise TypeError(\"infer_type must be called on a dtype=object array\")\n\n if array.size == 0:\n return np.dtype(float)\n\n element = array[(0,) * array.ndim]\n if isinstance(element, (bytes, str)):\n return strings.create_vlen_dtype(type(element))\n\n dtype = np.array(element).dtype\n if dtype.kind != \"O\":\n return dtype\n\n raise ValueError(\n \"unable to infer dtype on variable {!r}; xarray \"\n \"cannot serialize arbitrary Python objects\".format(name)\n )"},{"col":4,"comment":"Make a key for caching files in the LRU cache.","endLoc":155,"header":"def _make_key(self)","id":147,"name":"_make_key","nodeType":"Function","startLoc":147,"text":"def _make_key(self):\n \"\"\"Make a key for caching files in the LRU cache.\"\"\"\n value = (\n self._opener,\n self._args,\n \"a\" if self._mode == \"w\" else self._mode,\n tuple(sorted(self._kwargs.items())),\n )\n return _HashedSequence(value)"},{"col":0,"comment":"null","endLoc":212,"header":"def assert_all_valid_date_type(data)","id":148,"name":"assert_all_valid_date_type","nodeType":"Function","startLoc":197,"text":"def assert_all_valid_date_type(data):\n import cftime\n\n if len(data) > 0:\n sample = data[0]\n date_type = type(sample)\n if not isinstance(sample, cftime.datetime):\n raise TypeError(\n \"CFTimeIndex requires cftime.datetime \"\n \"objects. Got object of {}.\".format(date_type)\n )\n if not all(isinstance(value, date_type) for value in data):\n raise TypeError(\n \"CFTimeIndex requires using datetime \"\n \"objects of all the same type. Got\\n{}.\".format(data)\n )"},{"col":0,"comment":"null","endLoc":21,"header":"def create_vlen_dtype(element_type)","id":149,"name":"create_vlen_dtype","nodeType":"Function","startLoc":19,"text":"def create_vlen_dtype(element_type):\n # based on h5py.special_dtype\n return np.dtype(\"O\", metadata={\"element_type\": element_type})"},{"col":4,"comment":"null","endLoc":309,"header":"def __init__(self, tuple_value)","id":150,"name":"__init__","nodeType":"Function","startLoc":307,"text":"def __init__(self, tuple_value):\n self[:] = tuple_value\n self.hashvalue = hash(tuple_value)"},{"col":4,"comment":"null","endLoc":281,"header":"def __init__(self, counts)","id":151,"name":"__init__","nodeType":"Function","startLoc":279,"text":"def __init__(self, counts):\n self._counts = counts\n self._lock = threading.Lock()"},{"col":0,"comment":"null","endLoc":294,"header":"def is_valid_numpy_dtype(dtype: Any) -> bool","id":152,"name":"is_valid_numpy_dtype","nodeType":"Function","startLoc":288,"text":"def is_valid_numpy_dtype(dtype: Any) -> bool:\n try:\n np.dtype(dtype)\n except (TypeError, ValueError):\n return False\n else:\n return True"},{"col":4,"comment":"null","endLoc":209,"header":"def sync(self, compute=True)","id":153,"name":"sync","nodeType":"Function","startLoc":190,"text":"def sync(self, compute=True):\n if self.sources:\n import dask.array as da\n\n # TODO: consider wrapping targets with dask.delayed, if this makes\n # for any discernable difference in perforance, e.g.,\n # targets = [dask.delayed(t) for t in self.targets]\n\n delayed_store = da.store(\n self.sources,\n self.targets,\n lock=self.lock,\n compute=compute,\n flush=True,\n regions=self.regions,\n )\n self.sources = []\n self.targets = []\n self.regions = []\n return delayed_store"},{"col":4,"comment":"Context manager for optionally acquiring a lock.","endLoc":164,"header":"@contextlib.contextmanager\n def _optional_lock(self, needs_lock)","id":154,"name":"_optional_lock","nodeType":"Function","startLoc":157,"text":"@contextlib.contextmanager\n def _optional_lock(self, needs_lock):\n \"\"\"Context manager for optionally acquiring a lock.\"\"\"\n if needs_lock:\n with self._lock:\n yield\n else:\n yield"},{"col":4,"comment":"Acquire a file object from the manager.\n\n A new file is only opened if it has expired from the\n least-recently-used cache.\n\n This method uses a lock, which ensures that it is thread-safe. You can\n safely acquire a file in multiple threads at the same time, as long as\n the underlying file object is thread-safe.\n\n Returns\n -------\n An open file object, as returned by ``opener(*args, **kwargs)``.\n ","endLoc":181,"header":"def acquire(self, needs_lock=True)","id":155,"name":"acquire","nodeType":"Function","startLoc":166,"text":"def acquire(self, needs_lock=True):\n \"\"\"Acquire a file object from the manager.\n\n A new file is only opened if it has expired from the\n least-recently-used cache.\n\n This method uses a lock, which ensures that it is thread-safe. You can\n safely acquire a file in multiple threads at the same time, as long as\n the underlying file object is thread-safe.\n\n Returns\n -------\n An open file object, as returned by ``opener(*args, **kwargs)``.\n \"\"\"\n file, _ = self._acquire_with_cache_info(needs_lock)\n return file"},{"attributeType":"null","col":4,"comment":"null","endLoc":171,"id":156,"name":"__slots__","nodeType":"Attribute","startLoc":171,"text":"__slots__"},{"col":4,"comment":"Acquire a file, returning the file and whether it was cached.","endLoc":211,"header":"def _acquire_with_cache_info(self, needs_lock=True)","id":157,"name":"_acquire_with_cache_info","nodeType":"Function","startLoc":194,"text":"def _acquire_with_cache_info(self, needs_lock=True):\n \"\"\"Acquire a file, returning the file and whether it was cached.\"\"\"\n with self._optional_lock(needs_lock):\n try:\n file = self._cache[self._key]\n except KeyError:\n kwargs = self._kwargs\n if self._mode is not _DEFAULT_MODE:\n kwargs = kwargs.copy()\n kwargs[\"mode\"] = self._mode\n file = self._opener(*self._args, **kwargs)\n if self._mode == \"w\":\n # ensure file doesn't get overriden when opened again\n self._mode = \"a\"\n self._cache[self._key] = file\n return file, False\n else:\n return file, True"},{"col":0,"comment":"Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.\n ","endLoc":302,"header":"def to_0d_object_array(value: Any) -> np.ndarray","id":158,"name":"to_0d_object_array","nodeType":"Function","startLoc":297,"text":"def to_0d_object_array(value: Any) -> np.ndarray:\n \"\"\"Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.\n \"\"\"\n result = np.empty((), dtype=object)\n result[()] = value\n return result"},{"attributeType":"null","col":8,"comment":"null","endLoc":176,"id":159,"name":"regions","nodeType":"Attribute","startLoc":176,"text":"self.regions"},{"attributeType":"null","col":8,"comment":"null","endLoc":174,"id":160,"name":"sources","nodeType":"Attribute","startLoc":174,"text":"self.sources"},{"attributeType":"null","col":8,"comment":"null","endLoc":177,"id":161,"name":"lock","nodeType":"Attribute","startLoc":177,"text":"self.lock"},{"col":0,"comment":"null","endLoc":36,"header":"def is_bytes_dtype(dtype)","id":162,"name":"is_bytes_dtype","nodeType":"Function","startLoc":35,"text":"def is_bytes_dtype(dtype):\n return dtype.kind == \"S\" or check_vlen_dtype(dtype) == bytes"},{"col":0,"comment":"null","endLoc":28,"header":"def check_vlen_dtype(dtype)","id":163,"name":"check_vlen_dtype","nodeType":"Function","startLoc":24,"text":"def check_vlen_dtype(dtype):\n if dtype.kind != \"O\" or dtype.metadata is None:\n return None\n else:\n return dtype.metadata.get(\"element_type\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":175,"id":164,"name":"targets","nodeType":"Attribute","startLoc":175,"text":"self.targets"},{"col":0,"comment":"Find the root and group name of a netCDF4/h5netcdf dataset.","endLoc":40,"header":"def find_root_and_group(ds)","id":165,"name":"find_root_and_group","nodeType":"Function","startLoc":33,"text":"def find_root_and_group(ds):\n \"\"\"Find the root and group name of a netCDF4/h5netcdf dataset.\"\"\"\n hierarchy = ()\n while ds.parent is not None:\n hierarchy = (ds.name,) + hierarchy\n ds = ds.parent\n group = \"/\" + \"/\".join(hierarchy)\n return ds, group"},{"col":0,"comment":"\n Robustly index an array, using retry logic with exponential backoff if any\n of the errors ``catch`` are raised. The initial_delay is measured in ms.\n\n With the default settings, the maximum delay will be in the range of 32-64\n seconds.\n ","endLoc":66,"header":"def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500)","id":166,"name":"robust_getitem","nodeType":"Function","startLoc":43,"text":"def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500):\n \"\"\"\n Robustly index an array, using retry logic with exponential backoff if any\n of the errors ``catch`` are raised. The initial_delay is measured in ms.\n\n With the default settings, the maximum delay will be in the range of 32-64\n seconds.\n \"\"\"\n assert max_retries >= 0\n for n in range(max_retries + 1):\n try:\n return array[key]\n except catch:\n if n == max_retries:\n raise\n base_delay = initial_delay * 2 ** n\n next_delay = base_delay + np.random.randint(base_delay)\n msg = (\n \"getitem failed, waiting %s ms before trying again \"\n \"(%s tries remaining). Full traceback: %s\"\n % (next_delay, max_retries - n, traceback.format_exc())\n )\n logger.debug(msg)\n time.sleep(1e-3 * next_delay)"},{"col":0,"comment":"null","endLoc":32,"header":"def is_unicode_dtype(dtype)","id":167,"name":"is_unicode_dtype","nodeType":"Function","startLoc":31,"text":"def is_unicode_dtype(dtype):\n return dtype.kind == \"U\" or check_vlen_dtype(dtype) == str"},{"attributeType":"null","col":0,"comment":"null","endLoc":15,"id":168,"name":"logger","nodeType":"Attribute","startLoc":15,"text":"logger"},{"attributeType":"null","col":0,"comment":"null","endLoc":18,"id":169,"name":"NONE_VAR_NAME","nodeType":"Attribute","startLoc":18,"text":"NONE_VAR_NAME"},{"col":0,"comment":"Create a copy of an array with the given dtype.\n\n We use this instead of np.array() to ensure that custom object dtypes end\n up on the resulting array.\n ","endLoc":173,"header":"def _copy_with_dtype(data, dtype)","id":170,"name":"_copy_with_dtype","nodeType":"Function","startLoc":165,"text":"def _copy_with_dtype(data, dtype):\n \"\"\"Create a copy of an array with the given dtype.\n\n We use this instead of np.array() to ensure that custom object dtypes end\n up on the resulting array.\n \"\"\"\n result = np.empty(data.shape, dtype)\n result[...] = data\n return result"},{"col":0,"comment":"","endLoc":1,"header":"common.py#","id":171,"name":"","nodeType":"Function","startLoc":1,"text":"logger = logging.getLogger(__name__)\n\nNONE_VAR_NAME = \"__values__\""},{"col":4,"comment":"null","endLoc":37,"header":"def prepare_variable(self, k, v, *args, **kwargs)","id":172,"name":"prepare_variable","nodeType":"Function","startLoc":34,"text":"def prepare_variable(self, k, v, *args, **kwargs):\n new_var = Variable(v.dims, np.empty_like(v), v.attrs)\n self._variables[k] = new_var\n return new_var, v.data"},{"col":4,"comment":"Context manager for acquiring a file.","endLoc":192,"header":"@contextlib.contextmanager\n def acquire_context(self, needs_lock=True)","id":173,"name":"acquire_context","nodeType":"Function","startLoc":183,"text":"@contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n \"\"\"Context manager for acquiring a file.\"\"\"\n file, cached = self._acquire_with_cache_info(needs_lock)\n try:\n yield file\n except Exception:\n if not cached:\n self.close(needs_lock)\n raise"},{"col":0,"comment":"Convert arrays of datetime.datetime and datetime.timedelta objects into\n datetime64 and timedelta64, according to the pandas convention.\n ","endLoc":163,"header":"def _possibly_convert_objects(values)","id":174,"name":"_possibly_convert_objects","nodeType":"Function","startLoc":159,"text":"def _possibly_convert_objects(values):\n \"\"\"Convert arrays of datetime.datetime and datetime.timedelta objects into\n datetime64 and timedelta64, according to the pandas convention.\n \"\"\"\n return np.asarray(pd.Series(values.ravel())).reshape(values.shape)"},{"col":0,"comment":" In house nanargmin, nanargmax for object arrays. Always return integer\n type\n ","endLoc":58,"header":"def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs)","id":175,"name":"_nan_argminmax_object","nodeType":"Function","startLoc":46,"text":"def _nan_argminmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanargmin, nanargmax for object arrays. Always return integer\n type\n \"\"\"\n valid_count = count(value, axis=axis)\n value = fillna(value, fill_value)\n data = _dask_or_eager_func(func)(value, axis=axis, **kwargs)\n\n # TODO This will evaluate dask arrays and might be costly.\n if (valid_count == 0).any():\n raise ValueError(\"All-NaN slice encountered\")\n\n return data"},{"col":0,"comment":"null","endLoc":23,"header":"def pytest_runtest_setup(item)","id":176,"name":"pytest_runtest_setup","nodeType":"Function","startLoc":16,"text":"def pytest_runtest_setup(item):\n # based on https://stackoverflow.com/questions/47559524\n if \"flaky\" in item.keywords and not item.config.getoption(\"--run-flaky\"):\n pytest.skip(\"set --run-flaky option to run flaky tests\")\n if \"network\" in item.keywords and not item.config.getoption(\"--run-network-tests\"):\n pytest.skip(\n \"set --run-network-tests to run test requiring an \" \"internet connection\"\n )"},{"col":4,"comment":"Explicitly close any associated file object (if necessary).","endLoc":221,"header":"def close(self, needs_lock=True)","id":177,"name":"close","nodeType":"Function","startLoc":213,"text":"def close(self, needs_lock=True):\n \"\"\"Explicitly close any associated file object (if necessary).\"\"\"\n # TODO: remove needs_lock if/when we have a reentrant lock in\n # dask.distributed: https://github.com/dask/dask/issues/3832\n with self._optional_lock(needs_lock):\n default = None\n file = self._cache.pop(self._key, default)\n if file is not None:\n file.close()"},{"col":4,"comment":"null","endLoc":41,"header":"def set_attribute(self, k, v)","id":178,"name":"set_attribute","nodeType":"Function","startLoc":39,"text":"def set_attribute(self, k, v):\n # copy to imitate writing to disk.\n self._attributes[k] = copy.deepcopy(v)"},{"col":4,"comment":"null","endLoc":253,"header":"def __del__(self)","id":179,"name":"__del__","nodeType":"Function","startLoc":223,"text":"def __del__(self):\n # If we're the only CachingFileManger referencing a unclosed file, we\n # should remove it from the cache upon garbage collection.\n #\n # Keeping our own count of file references might seem like overkill,\n # but it's actually pretty common to reopen files with the same\n # variable name in a notebook or command line environment, e.g., to\n # fix the parameters used when opening a file:\n # >>> ds = xarray.open_dataset('myfile.nc')\n # >>> ds = xarray.open_dataset('myfile.nc', decode_times=False)\n # This second assignment to \"ds\" drops CPython's ref-count on the first\n # \"ds\" argument to zero, which can trigger garbage collections. So if\n # we didn't check whether another object is referencing 'myfile.nc',\n # the newly opened file would actually be immediately closed!\n ref_count = self._ref_counter.decrement(self._key)\n\n if not ref_count and self._key in self._cache:\n if acquire(self._lock, blocking=False):\n # Only close files if we can do so immediately.\n try:\n self.close(needs_lock=False)\n finally:\n self._lock.release()\n\n if OPTIONS[\"warn_for_unclosed_files\"]:\n warnings.warn(\n \"deallocating {}, but file is not already closed. \"\n \"This may indicate a bug.\".format(self),\n RuntimeWarning,\n stacklevel=2,\n )"},{"attributeType":"null","col":4,"comment":"null","endLoc":377,"id":180,"name":"__slots__","nodeType":"Attribute","startLoc":377,"text":"__slots__"},{"col":0,"comment":" In house nanmin and nanmax for object array ","endLoc":71,"header":"def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs)","id":181,"name":"_nan_minmax_object","nodeType":"Function","startLoc":61,"text":"def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):\n \"\"\" In house nanmin and nanmax for object array \"\"\"\n valid_count = count(value, axis=axis)\n filled_value = fillna(value, fill_value)\n data = getattr(np, func)(filled_value, axis=axis, **kwargs)\n if not hasattr(data, \"dtype\"): # scalar case\n data = fill_value if valid_count == 0 else data\n # we've computed a single min, max value of type object.\n # don't let np.array turn a tuple back into an array\n return utils.to_0d_object_array(data)\n return where_method(data, valid_count != 0)"},{"col":0,"comment":"","endLoc":1,"header":"conftest.py#","id":182,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Configuration for pytest.\"\"\""},{"attributeType":"null","col":0,"comment":"null","endLoc":20,"id":183,"name":"HDF5_LOCK","nodeType":"Attribute","startLoc":20,"text":"HDF5_LOCK"},{"col":0,"comment":"Combine a sequence of locks into a single lock.","endLoc":199,"header":"def combine_locks(locks)","id":184,"name":"combine_locks","nodeType":"Function","startLoc":184,"text":"def combine_locks(locks):\n \"\"\"Combine a sequence of locks into a single lock.\"\"\"\n all_locks = []\n for lock in locks:\n if isinstance(lock, CombinedLock):\n all_locks.extend(lock.locks)\n elif lock is not None:\n all_locks.append(lock)\n\n num_locks = len(all_locks)\n if num_locks > 1:\n return CombinedLock(all_locks)\n elif num_locks == 1:\n return all_locks[0]\n else:\n return DummyLock()"},{"className":"DummyLock","col":0,"comment":"DummyLock provides the lock API without any actual locking.","endLoc":181,"id":185,"nodeType":"Class","startLoc":165,"text":"class DummyLock:\n \"\"\"DummyLock provides the lock API without any actual locking.\"\"\"\n\n def acquire(self, blocking=True):\n pass\n\n def release(self):\n pass\n\n def __enter__(self):\n pass\n\n def __exit__(self, *args):\n pass\n\n def locked(self):\n return False"},{"col":4,"comment":"null","endLoc":169,"header":"def acquire(self, blocking=True)","id":186,"name":"acquire","nodeType":"Function","startLoc":168,"text":"def acquire(self, blocking=True):\n pass"},{"col":4,"comment":"null","endLoc":172,"header":"def release(self)","id":187,"name":"release","nodeType":"Function","startLoc":171,"text":"def release(self):\n pass"},{"col":4,"comment":"null","endLoc":175,"header":"def __enter__(self)","id":188,"name":"__enter__","nodeType":"Function","startLoc":174,"text":"def __enter__(self):\n pass"},{"col":4,"comment":"null","endLoc":178,"header":"def __exit__(self, *args)","id":189,"name":"__exit__","nodeType":"Function","startLoc":177,"text":"def __exit__(self, *args):\n pass"},{"col":4,"comment":"null","endLoc":181,"header":"def locked(self)","id":190,"name":"locked","nodeType":"Function","startLoc":180,"text":"def locked(self):\n return False"},{"col":0,"comment":"null","endLoc":32,"header":"def _get_threaded_lock(key)","id":191,"name":"_get_threaded_lock","nodeType":"Function","startLoc":27,"text":"def _get_threaded_lock(key):\n try:\n lock = _FILE_LOCKS[key]\n except KeyError:\n lock = _FILE_LOCKS[key] = threading.Lock()\n return lock"},{"col":4,"comment":"null","endLoc":45,"header":"def set_dimension(self, d, l, unlimited_dims=None)","id":192,"name":"set_dimension","nodeType":"Function","startLoc":43,"text":"def set_dimension(self, d, l, unlimited_dims=None):\n # in this model, dimensions are accounted for in the variables\n pass"},{"attributeType":"null","col":8,"comment":"null","endLoc":19,"id":193,"name":"_attributes","nodeType":"Attribute","startLoc":19,"text":"self._attributes"},{"col":0,"comment":"null","endLoc":39,"header":"def _get_multiprocessing_lock(key)","id":194,"name":"_get_multiprocessing_lock","nodeType":"Function","startLoc":35,"text":"def _get_multiprocessing_lock(key):\n # TODO: make use of the key -- maybe use locket.py?\n # https://github.com/mwilliamson/locket.py\n del key # unused\n return multiprocessing.Lock()"},{"col":0,"comment":"Returns an appropriate function for creating resource locks.\n\n Parameters\n ----------\n scheduler : str or None\n Dask scheduler being used.\n\n See Also\n --------\n dask.utils.get_scheduler_lock\n ","endLoc":62,"header":"def _get_lock_maker(scheduler=None)","id":195,"name":"_get_lock_maker","nodeType":"Function","startLoc":50,"text":"def _get_lock_maker(scheduler=None):\n \"\"\"Returns an appropriate function for creating resource locks.\n\n Parameters\n ----------\n scheduler : str or None\n Dask scheduler being used.\n\n See Also\n --------\n dask.utils.get_scheduler_lock\n \"\"\"\n return _LOCK_MAKERS[scheduler]"},{"col":0,"comment":"Determine the dask scheduler that is being used.\n\n None is returned if no dask scheduler is active.\n\n See also\n --------\n dask.base.get_scheduler\n ","endLoc":92,"header":"def _get_scheduler(get=None, collection=None)","id":196,"name":"_get_scheduler","nodeType":"Function","startLoc":65,"text":"def _get_scheduler(get=None, collection=None):\n \"\"\"Determine the dask scheduler that is being used.\n\n None is returned if no dask scheduler is active.\n\n See also\n --------\n dask.base.get_scheduler\n \"\"\"\n try:\n import dask # noqa: F401\n except ImportError:\n return None\n\n actual_get = dask.base.get_scheduler(get, collection)\n\n try:\n from dask.distributed import Client\n\n if isinstance(actual_get.__self__, Client):\n return \"distributed\"\n except (ImportError, AttributeError):\n pass\n\n if actual_get is dask.multiprocessing.get:\n return \"multiprocessing\"\n else:\n return \"threaded\""},{"col":0,"comment":"null","endLoc":79,"header":"def nanmin(a, axis=None, out=None)","id":197,"name":"nanmin","nodeType":"Function","startLoc":74,"text":"def nanmin(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"min\", dtypes.get_pos_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmin(a, axis=axis)"},{"col":0,"comment":"Get a scheduler appropriate lock for writing to the given resource.\n\n Parameters\n ----------\n key : str\n Name of the resource for which to acquire a lock. Typically a filename.\n\n Returns\n -------\n Lock object that can be used like a threading.Lock object.\n ","endLoc":109,"header":"def get_write_lock(key)","id":198,"name":"get_write_lock","nodeType":"Function","startLoc":95,"text":"def get_write_lock(key):\n \"\"\"Get a scheduler appropriate lock for writing to the given resource.\n\n Parameters\n ----------\n key : str\n Name of the resource for which to acquire a lock. Typically a filename.\n\n Returns\n -------\n Lock object that can be used like a threading.Lock object.\n \"\"\"\n scheduler = _get_scheduler()\n lock_maker = _get_lock_maker(scheduler)\n return lock_maker(key)"},{"col":0,"comment":"Whether to treat a value as a scalar.\n\n Any non-iterable, string, or 0-D array\n ","endLoc":285,"header":"def is_scalar(value: Any, include_0d: bool = True) -> bool","id":200,"name":"is_scalar","nodeType":"Function","startLoc":269,"text":"def is_scalar(value: Any, include_0d: bool = True) -> bool:\n \"\"\"Whether to treat a value as a scalar.\n\n Any non-iterable, string, or 0-D array\n \"\"\"\n from .variable import NON_NUMPY_SUPPORTED_ARRAY_TYPES\n\n if include_0d:\n include_0d = getattr(value, \"ndim\", None) == 0\n return (\n include_0d\n or isinstance(value, (str, bytes))\n or not (\n isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES)\n or hasattr(value, \"__array_function__\")\n )\n )"},{"col":0,"comment":"Ensure that the given object is a lock.","endLoc":206,"header":"def ensure_lock(lock)","id":201,"name":"ensure_lock","nodeType":"Function","startLoc":202,"text":"def ensure_lock(lock):\n \"\"\"Ensure that the given object is a lock.\"\"\"\n if lock is None or lock is False:\n return DummyLock()\n return lock"},{"col":4,"comment":"State for pickling.","endLoc":260,"header":"def __getstate__(self)","id":202,"name":"__getstate__","nodeType":"Function","startLoc":255,"text":"def __getstate__(self):\n \"\"\"State for pickling.\"\"\"\n # cache and ref_counts are intentionally omitted: we don't want to try\n # to serialize these global objects.\n lock = None if self._default_lock else self._lock\n return (self._opener, self._args, self._mode, self._kwargs, lock)"},{"col":4,"comment":"Restore from a pickle.","endLoc":265,"header":"def __setstate__(self, state)","id":203,"name":"__setstate__","nodeType":"Function","startLoc":262,"text":"def __setstate__(self, state):\n \"\"\"Restore from a pickle.\"\"\"\n opener, args, mode, kwargs, lock = state\n self.__init__(opener, *args, mode=mode, kwargs=kwargs, lock=lock)"},{"attributeType":"null","col":8,"comment":"null","endLoc":18,"id":204,"name":"_variables","nodeType":"Attribute","startLoc":18,"text":"self._variables"},{"className":"BaseNetCDF4Array","col":0,"comment":"null","endLoc":55,"id":205,"nodeType":"Class","startLoc":29,"text":"class BaseNetCDF4Array(BackendArray):\n __slots__ = (\"datastore\", \"dtype\", \"shape\", \"variable_name\")\n\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n\n array = self.get_array()\n self.shape = array.shape\n\n dtype = array.dtype\n if dtype is str:\n # use object dtype because that's the only way in numpy to\n # represent variable length strings; it also prevents automatic\n # string concatenation via conventions.decode_cf_variable\n dtype = np.dtype(\"O\")\n self.dtype = dtype\n\n def __setitem__(self, key, value):\n with self.datastore.lock:\n data = self.get_array(needs_lock=False)\n data[key] = value\n if self.datastore.autoclose:\n self.datastore.close(needs_lock=False)\n\n def get_array(self, needs_lock=True):\n raise NotImplementedError(\"Virtual Method\")"},{"attributeType":"null","col":16,"comment":"null","endLoc":3,"id":206,"name":"np","nodeType":"Attribute","startLoc":3,"text":"np"},{"col":4,"comment":"null","endLoc":273,"header":"def __repr__(self)","id":207,"name":"__repr__","nodeType":"Function","startLoc":267,"text":"def __repr__(self):\n args_string = \", \".join(map(repr, self._args))\n if self._mode is not _DEFAULT_MODE:\n args_string += \", mode={!r}\".format(self._mode)\n return \"{}({!r}, {}, kwargs={})\".format(\n type(self).__name__, self._opener, args_string, self._kwargs\n )"},{"attributeType":"null","col":4,"comment":"null","endLoc":10,"id":208,"name":"SerializableLock","nodeType":"Attribute","startLoc":10,"text":"SerializableLock"},{"attributeType":"null","col":8,"comment":"null","endLoc":130,"id":209,"name":"_default_lock","nodeType":"Attribute","startLoc":130,"text":"self._default_lock"},{"attributeType":"null","col":41,"comment":"null","endLoc":13,"id":210,"name":"DistributedLock","nodeType":"Attribute","startLoc":13,"text":"DistributedLock"},{"attributeType":"None","col":4,"comment":"null","endLoc":15,"id":211,"name":"DistributedLock","nodeType":"Attribute","startLoc":15,"text":"DistributedLock"},{"attributeType":"null","col":8,"comment":"null","endLoc":126,"id":212,"name":"_args","nodeType":"Attribute","startLoc":126,"text":"self._args"},{"attributeType":"null","col":0,"comment":"null","endLoc":21,"id":213,"name":"NETCDFC_LOCK","nodeType":"Attribute","startLoc":21,"text":"NETCDFC_LOCK"},{"attributeType":"null","col":8,"comment":"null","endLoc":125,"id":214,"name":"_opener","nodeType":"Attribute","startLoc":125,"text":"self._opener"},{"attributeType":"null","col":8,"comment":"null","endLoc":131,"id":215,"name":"_lock","nodeType":"Attribute","startLoc":131,"text":"self._lock"},{"attributeType":"null","col":8,"comment":"null","endLoc":128,"id":216,"name":"_kwargs","nodeType":"Attribute","startLoc":128,"text":"self._kwargs"},{"attributeType":"null","col":8,"comment":"null","endLoc":144,"id":217,"name":"_ref_counter","nodeType":"Attribute","startLoc":144,"text":"self._ref_counter"},{"attributeType":"null","col":8,"comment":"null","endLoc":137,"id":218,"name":"_key","nodeType":"Attribute","startLoc":137,"text":"self._key"},{"attributeType":"null","col":0,"comment":"null","endLoc":24,"id":219,"name":"_FILE_LOCKS","nodeType":"Attribute","startLoc":24,"text":"_FILE_LOCKS"},{"attributeType":"null","col":8,"comment":"null","endLoc":136,"id":220,"name":"_cache","nodeType":"Attribute","startLoc":136,"text":"self._cache"},{"attributeType":"null","col":8,"comment":"null","endLoc":127,"id":221,"name":"_mode","nodeType":"Attribute","startLoc":127,"text":"self._mode"},{"attributeType":"null","col":0,"comment":"null","endLoc":42,"id":222,"name":"_LOCK_MAKERS","nodeType":"Attribute","startLoc":42,"text":"_LOCK_MAKERS"},{"col":0,"comment":"","endLoc":1,"header":"locks.py#","id":223,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n from dask.utils import SerializableLock\nexcept ImportError:\n # no need to worry about serializing the lock\n SerializableLock = threading.Lock\n\ntry:\n from dask.distributed import Lock as DistributedLock\nexcept ImportError:\n DistributedLock = None\n\nHDF5_LOCK = SerializableLock()\n\nNETCDFC_LOCK = SerializableLock()\n\n_FILE_LOCKS: MutableMapping[Any, threading.Lock] = weakref.WeakValueDictionary()\n\n_LOCK_MAKERS = {\n None: _get_threaded_lock,\n \"threaded\": _get_threaded_lock,\n \"multiprocessing\": _get_multiprocessing_lock,\n \"distributed\": DistributedLock,\n}"},{"attributeType":"null","col":0,"comment":"null","endLoc":5,"id":224,"name":"K","nodeType":"Attribute","startLoc":5,"text":"K"},{"id":225,"name":"xarray/util","nodeType":"Package"},{"fileName":"__init__.py","filePath":"xarray/util","id":226,"nodeType":"File","text":""},{"col":0,"comment":"Return an appropriate positive infinity for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : positive infinity value corresponding to this dtype.\n ","endLoc":116,"header":"def get_pos_infinity(dtype)","id":227,"name":"get_pos_infinity","nodeType":"Function","startLoc":99,"text":"def get_pos_infinity(dtype):\n \"\"\"Return an appropriate positive infinity for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : positive infinity value corresponding to this dtype.\n \"\"\"\n if issubclass(dtype.type, (np.floating, np.integer)):\n return np.inf\n\n if issubclass(dtype.type, np.complexfloating):\n return np.inf + 1j * np.inf\n\n return INF"},{"className":"NioArrayWrapper","col":0,"comment":"null","endLoc":40,"id":228,"nodeType":"Class","startLoc":16,"text":"class NioArrayWrapper(BackendArray):\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_array()\n self.shape = array.shape\n self.dtype = np.dtype(array.typecode())\n\n def get_array(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.BASIC, self._getitem\n )\n\n def _getitem(self, key):\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n\n if key == () and self.ndim == 0:\n return array.get_value()\n\n return array[key]"},{"attributeType":"null","col":0,"comment":"null","endLoc":6,"id":229,"name":"V","nodeType":"Attribute","startLoc":6,"text":"V"},{"col":0,"comment":"null","endLoc":87,"header":"def nanmax(a, axis=None, out=None)","id":230,"name":"nanmax","nodeType":"Function","startLoc":82,"text":"def nanmax(a, axis=None, out=None):\n if a.dtype.kind == \"O\":\n return _nan_minmax_object(\"max\", dtypes.get_neg_infinity(a.dtype), a, axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanmax(a, axis=axis)"},{"col":4,"comment":"null","endLoc":45,"header":"def __init__(self, variable_name, datastore)","id":231,"name":"__init__","nodeType":"Function","startLoc":32,"text":"def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n\n array = self.get_array()\n self.shape = array.shape\n\n dtype = array.dtype\n if dtype is str:\n # use object dtype because that's the only way in numpy to\n # represent variable length strings; it also prevents automatic\n # string concatenation via conventions.decode_cf_variable\n dtype = np.dtype(\"O\")\n self.dtype = dtype"},{"col":0,"comment":"","endLoc":1,"header":"lru_cache.py#","id":232,"name":"","nodeType":"Function","startLoc":1,"text":"K = TypeVar(\"K\")\n\nV = TypeVar(\"V\")"},{"col":0,"comment":"Return an appropriate positive infinity for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : positive infinity value corresponding to this dtype.\n ","endLoc":136,"header":"def get_neg_infinity(dtype)","id":233,"name":"get_neg_infinity","nodeType":"Function","startLoc":119,"text":"def get_neg_infinity(dtype):\n \"\"\"Return an appropriate positive infinity for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : positive infinity value corresponding to this dtype.\n \"\"\"\n if issubclass(dtype.type, (np.floating, np.integer)):\n return -np.inf\n\n if issubclass(dtype.type, np.complexfloating):\n return -np.inf - 1j * np.inf\n\n return NINF"},{"col":4,"comment":"null","endLoc":22,"header":"def __init__(self, variable_name, datastore)","id":234,"name":"__init__","nodeType":"Function","startLoc":17,"text":"def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_array()\n self.shape = array.shape\n self.dtype = np.dtype(array.typecode())"},{"fileName":"netcdf3.py","filePath":"xarray/backends","id":235,"nodeType":"File","text":"import unicodedata\n\nimport numpy as np\n\nfrom .. import Variable, coding\n\n# Special characters that are permitted in netCDF names except in the\n# 0th position of the string\n_specialchars = '_.@+- !\"#$%&\\\\()*,:;<=>?[]^`{|}~'\n\n# The following are reserved names in CDL and may not be used as names of\n# variables, dimension, attributes\n_reserved_names = {\n \"byte\",\n \"char\",\n \"short\",\n \"ushort\",\n \"int\",\n \"uint\",\n \"int64\",\n \"uint64\",\n \"float\" \"real\",\n \"double\",\n \"bool\",\n \"string\",\n}\n\n# These data-types aren't supported by netCDF3, so they are automatically\n# coerced instead as indicated by the \"coerce_nc3_dtype\" function\n_nc3_dtype_coercions = {\"int64\": \"int32\", \"bool\": \"int8\"}\n\n# encode all strings as UTF-8\nSTRING_ENCODING = \"utf-8\"\n\n\ndef coerce_nc3_dtype(arr):\n \"\"\"Coerce an array to a data type that can be stored in a netCDF-3 file\n\n This function performs the following dtype conversions:\n int64 -> int32\n bool -> int8\n\n Data is checked for equality, or equivalence (non-NaN values) with\n `np.allclose` with the default keyword arguments.\n \"\"\"\n dtype = str(arr.dtype)\n if dtype in _nc3_dtype_coercions:\n new_dtype = _nc3_dtype_coercions[dtype]\n # TODO: raise a warning whenever casting the data-type instead?\n cast_arr = arr.astype(new_dtype)\n if not (cast_arr == arr).all():\n raise ValueError(\n \"could not safely cast array from dtype %s to %s\" % (dtype, new_dtype)\n )\n arr = cast_arr\n return arr\n\n\ndef encode_nc3_attr_value(value):\n if isinstance(value, bytes):\n pass\n elif isinstance(value, str):\n value = value.encode(STRING_ENCODING)\n else:\n value = coerce_nc3_dtype(np.atleast_1d(value))\n if value.ndim > 1:\n raise ValueError(\"netCDF attributes must be 1-dimensional\")\n return value\n\n\ndef encode_nc3_attrs(attrs):\n return {k: encode_nc3_attr_value(v) for k, v in attrs.items()}\n\n\ndef encode_nc3_variable(var):\n for coder in [\n coding.strings.EncodedStringCoder(allows_unicode=False),\n coding.strings.CharacterArrayCoder(),\n ]:\n var = coder.encode(var)\n data = coerce_nc3_dtype(var.data)\n attrs = encode_nc3_attrs(var.attrs)\n return Variable(var.dims, data, attrs, var.encoding)\n\n\ndef _isalnumMUTF8(c):\n \"\"\"Return True if the given UTF-8 encoded character is alphanumeric\n or multibyte.\n\n Input is not checked!\n \"\"\"\n return c.isalnum() or (len(c.encode(\"utf-8\")) > 1)\n\n\ndef is_valid_nc3_name(s):\n \"\"\"Test whether an object can be validly converted to a netCDF-3\n dimension, variable or attribute name\n\n Earlier versions of the netCDF C-library reference implementation\n enforced a more restricted set of characters in creating new names,\n but permitted reading names containing arbitrary bytes. This\n specification extends the permitted characters in names to include\n multi-byte UTF-8 encoded Unicode and additional printing characters\n from the US-ASCII alphabet. The first character of a name must be\n alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for\n special names with meaning to implementations, such as the\n \"_FillValue\" attribute). Subsequent characters may also include\n printing special characters, except for '/' which is not allowed in\n names. Names that have trailing space characters are also not\n permitted.\n \"\"\"\n if not isinstance(s, str):\n return False\n if not isinstance(s, str):\n s = s.decode(\"utf-8\")\n num_bytes = len(s.encode(\"utf-8\"))\n return (\n (unicodedata.normalize(\"NFC\", s) == s)\n and (s not in _reserved_names)\n and (num_bytes >= 0)\n and (\"/\" not in s)\n and (s[-1] != \" \")\n and (_isalnumMUTF8(s[0]) or (s[0] == \"_\"))\n and all(_isalnumMUTF8(c) or c in _specialchars for c in s)\n )\n"},{"col":0,"comment":"null","endLoc":96,"header":"def nanargmin(a, axis=None)","id":236,"name":"nanargmin","nodeType":"Function","startLoc":90,"text":"def nanargmin(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_pos_infinity(a.dtype)\n return _nan_argminmax_object(\"argmin\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmin(a, axis=axis)"},{"col":4,"comment":"null","endLoc":55,"header":"def get_array(self, needs_lock=True)","id":237,"name":"get_array","nodeType":"Function","startLoc":54,"text":"def get_array(self, needs_lock=True):\n raise NotImplementedError(\"Virtual Method\")"},{"col":4,"comment":"null","endLoc":52,"header":"def __setitem__(self, key, value)","id":238,"name":"__setitem__","nodeType":"Function","startLoc":47,"text":"def __setitem__(self, key, value):\n with self.datastore.lock:\n data = self.get_array(needs_lock=False)\n data[key] = value\n if self.datastore.autoclose:\n self.datastore.close(needs_lock=False)"},{"col":0,"comment":"Check for uniqueness of MultiIndex level names in all given\n variables.\n\n Not public API. Used for checking consistency of DataArray and Dataset\n objects.\n ","endLoc":2268,"header":"def assert_unique_multiindex_level_names(variables)","id":239,"name":"assert_unique_multiindex_level_names","nodeType":"Function","startLoc":2235,"text":"def assert_unique_multiindex_level_names(variables):\n \"\"\"Check for uniqueness of MultiIndex level names in all given\n variables.\n\n Not public API. Used for checking consistency of DataArray and Dataset\n objects.\n \"\"\"\n level_names = defaultdict(list)\n all_level_names = set()\n for var_name, var in variables.items():\n if isinstance(var._data, PandasIndexAdapter):\n idx_level_names = var.to_index_variable().level_names\n if idx_level_names is not None:\n for n in idx_level_names:\n level_names[n].append(\"%r (%s)\" % (n, var_name))\n if idx_level_names:\n all_level_names.update(idx_level_names)\n\n for k, v in level_names.items():\n if k in variables:\n v.append(\"(%s)\" % k)\n\n duplicate_names = [v for v in level_names.values() if len(v) > 1]\n if duplicate_names:\n conflict_str = \"\\n\".join([\", \".join(v) for v in duplicate_names])\n raise ValueError(\"conflicting MultiIndex level name(s):\\n%s\" % conflict_str)\n # Check confliction between level names and dimensions GH:2299\n for k, v in variables.items():\n for d in v.dims:\n if d in all_level_names:\n raise ValueError(\n \"conflicting level / dimension names. {} \"\n \"already exists as a level name.\".format(d)\n )"},{"col":0,"comment":"null","endLoc":105,"header":"def nanargmax(a, axis=None)","id":240,"name":"nanargmax","nodeType":"Function","startLoc":99,"text":"def nanargmax(a, axis=None):\n if a.dtype.kind == \"O\":\n fill_value = dtypes.get_neg_infinity(a.dtype)\n return _nan_argminmax_object(\"argmax\", fill_value, a, axis=axis)\n\n module = dask_array if isinstance(a, dask_array_type) else nputils\n return module.nanargmax(a, axis=axis)"},{"attributeType":"null","col":4,"comment":"null","endLoc":30,"id":241,"name":"__slots__","nodeType":"Attribute","startLoc":30,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":37,"id":242,"name":"shape","nodeType":"Attribute","startLoc":37,"text":"self.shape"},{"col":0,"comment":"Coerce an array to a data type that can be stored in a netCDF-3 file\n\n This function performs the following dtype conversions:\n int64 -> int32\n bool -> int8\n\n Data is checked for equality, or equivalence (non-NaN values) with\n `np.allclose` with the default keyword arguments.\n ","endLoc":56,"header":"def coerce_nc3_dtype(arr)","id":243,"name":"coerce_nc3_dtype","nodeType":"Function","startLoc":36,"text":"def coerce_nc3_dtype(arr):\n \"\"\"Coerce an array to a data type that can be stored in a netCDF-3 file\n\n This function performs the following dtype conversions:\n int64 -> int32\n bool -> int8\n\n Data is checked for equality, or equivalence (non-NaN values) with\n `np.allclose` with the default keyword arguments.\n \"\"\"\n dtype = str(arr.dtype)\n if dtype in _nc3_dtype_coercions:\n new_dtype = _nc3_dtype_coercions[dtype]\n # TODO: raise a warning whenever casting the data-type instead?\n cast_arr = arr.astype(new_dtype)\n if not (cast_arr == arr).all():\n raise ValueError(\n \"could not safely cast array from dtype %s to %s\" % (dtype, new_dtype)\n )\n arr = cast_arr\n return arr"},{"attributeType":"null","col":8,"comment":"null","endLoc":33,"id":244,"name":"datastore","nodeType":"Attribute","startLoc":33,"text":"self.datastore"},{"attributeType":"null","col":8,"comment":"null","endLoc":45,"id":245,"name":"dtype","nodeType":"Attribute","startLoc":45,"text":"self.dtype"},{"attributeType":"null","col":8,"comment":"null","endLoc":34,"id":246,"name":"variable_name","nodeType":"Attribute","startLoc":34,"text":"self.variable_name"},{"col":4,"comment":"null","endLoc":26,"header":"def get_array(self, needs_lock=True)","id":247,"name":"get_array","nodeType":"Function","startLoc":24,"text":"def get_array(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]"},{"col":4,"comment":"null","endLoc":31,"header":"def __getitem__(self, key)","id":248,"name":"__getitem__","nodeType":"Function","startLoc":28,"text":"def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.BASIC, self._getitem\n )"},{"col":0,"comment":"null","endLoc":114,"header":"def nansum(a, axis=None, dtype=None, out=None, min_count=None)","id":249,"name":"nansum","nodeType":"Function","startLoc":108,"text":"def nansum(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 0)\n result = _dask_or_eager_func(\"sum\")(a, axis=axis, dtype=dtype)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result"},{"col":0,"comment":"null","endLoc":104,"header":"def _encode_nc4_variable(var)","id":250,"name":"_encode_nc4_variable","nodeType":"Function","startLoc":98,"text":"def _encode_nc4_variable(var):\n for coder in [\n coding.strings.EncodedStringCoder(allows_unicode=True),\n coding.strings.CharacterArrayCoder(),\n ]:\n var = coder.encode(var)\n return var"},{"col":0,"comment":"Support explicit indexing by delegating to a raw indexing method.\n\n Outer and/or vectorized indexers are supported by indexing a second time\n with a NumPy array.\n\n Parameters\n ----------\n key : ExplicitIndexer\n Explicit indexing object.\n shape : Tuple[int, ...]\n Shape of the indexed array.\n indexing_support : IndexingSupport enum\n Form of indexing supported by raw_indexing_method.\n raw_indexing_method: callable\n Function (like ndarray.__getitem__) that when called with indexing key\n in the form of a tuple returns an indexed array.\n\n Returns\n -------\n Indexing result, in the form of a duck numpy-array.\n ","endLoc":840,"header":"def explicit_indexing_adapter(\n key: ExplicitIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n raw_indexing_method: Callable,\n) -> Any","id":251,"name":"explicit_indexing_adapter","nodeType":"Function","startLoc":808,"text":"def explicit_indexing_adapter(\n key: ExplicitIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n raw_indexing_method: Callable,\n) -> Any:\n \"\"\"Support explicit indexing by delegating to a raw indexing method.\n\n Outer and/or vectorized indexers are supported by indexing a second time\n with a NumPy array.\n\n Parameters\n ----------\n key : ExplicitIndexer\n Explicit indexing object.\n shape : Tuple[int, ...]\n Shape of the indexed array.\n indexing_support : IndexingSupport enum\n Form of indexing supported by raw_indexing_method.\n raw_indexing_method: callable\n Function (like ndarray.__getitem__) that when called with indexing key\n in the form of a tuple returns an indexed array.\n\n Returns\n -------\n Indexing result, in the form of a duck numpy-array.\n \"\"\"\n raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)\n result = raw_indexing_method(raw_key.tuple)\n if numpy_indices.tuple:\n # index the loaded np.ndarray\n result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]\n return result"},{"col":0,"comment":"null","endLoc":850,"header":"def decompose_indexer(\n indexer: ExplicitIndexer, shape: Tuple[int, ...], indexing_support: IndexingSupport\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]","id":252,"name":"decompose_indexer","nodeType":"Function","startLoc":843,"text":"def decompose_indexer(\n indexer: ExplicitIndexer, shape: Tuple[int, ...], indexing_support: IndexingSupport\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n if isinstance(indexer, VectorizedIndexer):\n return _decompose_vectorized_indexer(indexer, shape, indexing_support)\n if isinstance(indexer, (BasicIndexer, OuterIndexer)):\n return _decompose_outer_indexer(indexer, shape, indexing_support)\n raise TypeError(\"unexpected key type: {}\".format(indexer))"},{"col":0,"comment":"\n Decompose vectorized indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer: VectorizedIndexer\n indexing_support: one of IndexerSupport entries\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a vectorized indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports outer indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])\n >>> array = array[backend_indexer] # load subslice of the array\n >>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])\n >>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.\n ","endLoc":944,"header":"def _decompose_vectorized_indexer(\n indexer: VectorizedIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]","id":253,"name":"_decompose_vectorized_indexer","nodeType":"Function","startLoc":869,"text":"def _decompose_vectorized_indexer(\n indexer: VectorizedIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n \"\"\"\n Decompose vectorized indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer: VectorizedIndexer\n indexing_support: one of IndexerSupport entries\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a vectorized indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports outer indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])\n >>> array = array[backend_indexer] # load subslice of the array\n >>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])\n >>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.\n \"\"\"\n assert isinstance(indexer, VectorizedIndexer)\n\n if indexing_support is IndexingSupport.VECTORIZED:\n return indexer, BasicIndexer(())\n\n backend_indexer_elems = []\n np_indexer_elems = []\n # convert negative indices\n indexer_elems = [\n np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k\n for k, s in zip(indexer.tuple, shape)\n ]\n\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, slice):\n # If it is a slice, then we will slice it as-is\n # (but make its step positive) in the backend,\n # and then use all of it (slice(None)) for the in-memory portion.\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer_elems.append(bk_slice)\n np_indexer_elems.append(np_slice)\n else:\n # If it is a (multidimensional) np.ndarray, just pickup the used\n # keys without duplication and store them as a 1d-np.ndarray.\n oind, vind = np.unique(k, return_inverse=True)\n backend_indexer_elems.append(oind)\n np_indexer_elems.append(vind.reshape(*k.shape))\n\n backend_indexer = OuterIndexer(tuple(backend_indexer_elems))\n np_indexer = VectorizedIndexer(tuple(np_indexer_elems))\n\n if indexing_support is IndexingSupport.OUTER:\n return backend_indexer, np_indexer\n\n # If the backend does not support outer indexing,\n # backend_indexer (OuterIndexer) is also decomposed.\n backend_indexer1, np_indexer1 = _decompose_outer_indexer(\n backend_indexer, shape, indexing_support\n )\n np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)\n return backend_indexer1, np_indexer"},{"col":0,"comment":" In house nanmean. ddof argument will be used in _nanvar method ","endLoc":130,"header":"def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs)","id":254,"name":"_nanmean_ddof_object","nodeType":"Function","startLoc":117,"text":"def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs):\n \"\"\" In house nanmean. ddof argument will be used in _nanvar method \"\"\"\n from .duck_array_ops import count, fillna, _dask_or_eager_func, where_method\n\n valid_count = count(value, axis=axis)\n value = fillna(value, 0)\n # As dtype inference is impossible for object dtype, we assume float\n # https://github.com/dask/dask/issues/3162\n if dtype is None and value.dtype.kind == \"O\":\n dtype = value.dtype if value.dtype.kind in [\"cf\"] else float\n\n data = _dask_or_eager_func(\"sum\")(value, axis=axis, dtype=dtype, **kwargs)\n data = data / (valid_count - ddof)\n return where_method(data, valid_count != 0)"},{"col":0,"comment":"null","endLoc":68,"header":"def encode_nc3_attr_value(value)","id":255,"name":"encode_nc3_attr_value","nodeType":"Function","startLoc":59,"text":"def encode_nc3_attr_value(value):\n if isinstance(value, bytes):\n pass\n elif isinstance(value, str):\n value = value.encode(STRING_ENCODING)\n else:\n value = coerce_nc3_dtype(np.atleast_1d(value))\n if value.ndim > 1:\n raise ValueError(\"netCDF attributes must be 1-dimensional\")\n return value"},{"col":0,"comment":" convert a slice to successive two slices. The first slice always has\n a positive step.\n ","endLoc":866,"header":"def _decompose_slice(key, size)","id":256,"name":"_decompose_slice","nodeType":"Function","startLoc":853,"text":"def _decompose_slice(key, size):\n \"\"\" convert a slice to successive two slices. The first slice always has\n a positive step.\n \"\"\"\n start, stop, step = key.indices(size)\n if step > 0:\n # If key already has a positive step, use it as is in the backend\n return key, slice(None)\n else:\n # determine stop precisely for step > 1 case\n # e.g. [98:2:-2] -> [98:3:-2]\n stop = start + int((stop - start - 1) / step) * step + 1\n start, stop = stop + 1, start + 1\n return slice(start, stop, -step), slice(None, None, -1)"},{"id":257,"name":"LICENSE","nodeType":"TextFile","path":"","text":"Apache License\nVersion 2.0, January 2004\nhttp://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n\"License\" shall mean the terms and conditions for use, reproduction, and\ndistribution as defined by Sections 1 through 9 of this document.\n\n\"Licensor\" shall mean the copyright owner or entity authorized by the copyright\nowner that is granting the License.\n\n\"Legal Entity\" shall mean the union of the acting entity and all other entities\nthat control, are controlled by, or are under common control with that entity.\nFor the purposes of this definition, \"control\" means (i) the power, direct or\nindirect, to cause the direction or management of such entity, whether by\ncontract or otherwise, or (ii) ownership of fifty percent (50%) or more of the\noutstanding shares, or (iii) beneficial ownership of such entity.\n\n\"You\" (or \"Your\") shall mean an individual or Legal Entity exercising\npermissions granted by this License.\n\n\"Source\" form shall mean the preferred form for making modifications, including\nbut not limited to software source code, documentation source, and configuration\nfiles.\n\n\"Object\" form shall mean any form resulting from mechanical transformation or\ntranslation of a Source form, including but not limited to compiled object code,\ngenerated documentation, and conversions to other media types.\n\n\"Work\" shall mean the work of authorship, whether in Source or Object form, made\navailable under the License, as indicated by a copyright notice that is included\nin or attached to the work (an example is provided in the Appendix below).\n\n\"Derivative Works\" shall mean any work, whether in Source or Object form, that\nis based on (or derived from) the Work and for which the editorial revisions,\nannotations, elaborations, or other modifications represent, as a whole, an\noriginal work of authorship. For the purposes of this License, Derivative Works\nshall not include works that remain separable from, or merely link (or bind by\nname) to the interfaces of, the Work and Derivative Works thereof.\n\n\"Contribution\" shall mean any work of authorship, including the original version\nof the Work and any modifications or additions to that Work or Derivative Works\nthereof, that is intentionally submitted to Licensor for inclusion in the Work\nby the copyright owner or by an individual or Legal Entity authorized to submit\non behalf of the copyright owner. For the purposes of this definition,\n\"submitted\" means any form of electronic, verbal, or written communication sent\nto the Licensor or its representatives, including but not limited to\ncommunication on electronic mailing lists, source code control systems, and\nissue tracking systems that are managed by, or on behalf of, the Licensor for\nthe purpose of discussing and improving the Work, but excluding communication\nthat is conspicuously marked or otherwise designated in writing by the copyright\nowner as \"Not a Contribution.\"\n\n\"Contributor\" shall mean Licensor and any individual or Legal Entity on behalf\nof whom a Contribution has been received by Licensor and subsequently\nincorporated within the Work.\n\n2. Grant of Copyright License.\n\nSubject to the terms and conditions of this License, each Contributor hereby\ngrants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,\nirrevocable copyright license to reproduce, prepare Derivative Works of,\npublicly display, publicly perform, sublicense, and distribute the Work and such\nDerivative Works in Source or Object form.\n\n3. Grant of Patent License.\n\nSubject to the terms and conditions of this License, each Contributor hereby\ngrants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,\nirrevocable (except as stated in this section) patent license to make, have\nmade, use, offer to sell, sell, import, and otherwise transfer the Work, where\nsuch license applies only to those patent claims licensable by such Contributor\nthat are necessarily infringed by their Contribution(s) alone or by combination\nof their Contribution(s) with the Work to which such Contribution(s) was\nsubmitted. If You institute patent litigation against any entity (including a\ncross-claim or counterclaim in a lawsuit) alleging that the Work or a\nContribution incorporated within the Work constitutes direct or contributory\npatent infringement, then any patent licenses granted to You under this License\nfor that Work shall terminate as of the date such litigation is filed.\n\n4. Redistribution.\n\nYou may reproduce and distribute copies of the Work or Derivative Works thereof\nin any medium, with or without modifications, and in Source or Object form,\nprovided that You meet the following conditions:\n\nYou must give any other recipients of the Work or Derivative Works a copy of\nthis License; and\nYou must cause any modified files to carry prominent notices stating that You\nchanged the files; and\nYou must retain, in the Source form of any Derivative Works that You distribute,\nall copyright, patent, trademark, and attribution notices from the Source form\nof the Work, excluding those notices that do not pertain to any part of the\nDerivative Works; and\nIf the Work includes a \"NOTICE\" text file as part of its distribution, then any\nDerivative Works that You distribute must include a readable copy of the\nattribution notices contained within such NOTICE file, excluding those notices\nthat do not pertain to any part of the Derivative Works, in at least one of the\nfollowing places: within a NOTICE text file distributed as part of the\nDerivative Works; within the Source form or documentation, if provided along\nwith the Derivative Works; or, within a display generated by the Derivative\nWorks, if and wherever such third-party notices normally appear. The contents of\nthe NOTICE file are for informational purposes only and do not modify the\nLicense. You may add Your own attribution notices within Derivative Works that\nYou distribute, alongside or as an addendum to the NOTICE text from the Work,\nprovided that such additional attribution notices cannot be construed as\nmodifying the License.\nYou may add Your own copyright statement to Your modifications and may provide\nadditional or different license terms and conditions for use, reproduction, or\ndistribution of Your modifications, or for any such Derivative Works as a whole,\nprovided Your use, reproduction, and distribution of the Work otherwise complies\nwith the conditions stated in this License.\n\n5. Submission of Contributions.\n\nUnless You explicitly state otherwise, any Contribution intentionally submitted\nfor inclusion in the Work by You to the Licensor shall be under the terms and\nconditions of this License, without any additional terms or conditions.\nNotwithstanding the above, nothing herein shall supersede or modify the terms of\nany separate license agreement you may have executed with Licensor regarding\nsuch Contributions.\n\n6. Trademarks.\n\nThis License does not grant permission to use the trade names, trademarks,\nservice marks, or product names of the Licensor, except as required for\nreasonable and customary use in describing the origin of the Work and\nreproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty.\n\nUnless required by applicable law or agreed to in writing, Licensor provides the\nWork (and each Contributor provides its Contributions) on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,\nincluding, without limitation, any warranties or conditions of TITLE,\nNON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are\nsolely responsible for determining the appropriateness of using or\nredistributing the Work and assume any risks associated with Your exercise of\npermissions under this License.\n\n8. Limitation of Liability.\n\nIn no event and under no legal theory, whether in tort (including negligence),\ncontract, or otherwise, unless required by applicable law (such as deliberate\nand grossly negligent acts) or agreed to in writing, shall any Contributor be\nliable to You for damages, including any direct, indirect, special, incidental,\nor consequential damages of any character arising as a result of this License or\nout of the use or inability to use the Work (including but not limited to\ndamages for loss of goodwill, work stoppage, computer failure or malfunction, or\nany and all other commercial damages or losses), even if such Contributor has\nbeen advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability.\n\nWhile redistributing the Work or Derivative Works thereof, You may choose to\noffer, and charge a fee for, acceptance of support, warranty, indemnity, or\nother liability obligations and/or rights consistent with this License. However,\nin accepting such obligations, You may act only on Your own behalf and on Your\nsole responsibility, not on behalf of any other Contributor, and only if You\nagree to indemnify, defend, and hold each Contributor harmless for any liability\nincurred by, or claims asserted against, such Contributor by reason of your\naccepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work\n\nTo apply the Apache License to your work, attach the following boilerplate\nnotice, with the fields enclosed by brackets \"[]\" replaced with your own\nidentifying information. (Don't include the brackets!) The text should be\nenclosed in the appropriate comment syntax for the file format. We also\nrecommend that a file or class name and description of purpose be included on\nthe same \"printed page\" as the copyright notice for easier identification within\nthird-party archives.\n\n Copyright [yyyy] [name of copyright owner]\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"},{"id":258,"name":"doc/_templates","nodeType":"Package"},{"id":259,"name":"layout.html","nodeType":"TextFile","path":"doc/_templates","text":"{% extends \"!layout.html\" %}\n{% set css_files = css_files + [\"_static/style.css\"] %}\n"},{"id":260,"name":"internals.rst","nodeType":"TextFile","path":"doc","text":".. _internals:\n\nxarray Internals\n================\n\n.. currentmodule:: xarray\n\nxarray builds upon two of the foundational libraries of the scientific Python\nstack, NumPy and pandas. It is written in pure Python (no C or Cython\nextensions), which makes it easy to develop and extend. Instead, we push\ncompiled code to :ref:`optional dependencies`.\n\nVariable objects\n----------------\n\nThe core internal data structure in xarray is the :py:class:`~xarray.Variable`,\nwhich is used as the basic building block behind xarray's\n:py:class:`~xarray.Dataset` and :py:class:`~xarray.DataArray` types. A\n``Variable`` consists of:\n\n- ``dims``: A tuple of dimension names.\n- ``data``: The N-dimensional array (typically, a NumPy or Dask array) storing\n the Variable's data. It must have the same number of dimensions as the length\n of ``dims``.\n- ``attrs``: An ordered dictionary of metadata associated with this array. By\n convention, xarray's built-in operations never use this metadata.\n- ``encoding``: Another ordered dictionary used to store information about how\n these variable's data is represented on disk. See :ref:`io.encoding` for more\n details.\n\n``Variable`` has an interface similar to NumPy arrays, but extended to make use\nof named dimensions. For example, it uses ``dim`` in preference to an ``axis``\nargument for methods like ``mean``, and supports :ref:`compute.broadcasting`.\n\nHowever, unlike ``Dataset`` and ``DataArray``, the basic ``Variable`` does not\ninclude coordinate labels along each axis.\n\n``Variable`` is public API, but because of its incomplete support for labeled\ndata, it is mostly intended for advanced uses, such as in xarray itself or for\nwriting new backends. You can access the variable objects that correspond to\nxarray objects via the (readonly) :py:attr:`Dataset.variables\n` and\n:py:attr:`DataArray.variable ` attributes.\n\nExtending xarray\n----------------\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\nxarray is designed as a general purpose library, and hence tries to avoid\nincluding overly domain specific functionality. But inevitably, the need for more\ndomain specific logic arises.\n\nOne standard solution to this problem is to subclass Dataset and/or DataArray to\nadd domain specific functionality. However, inheritance is not very robust. It's\neasy to inadvertently use internal APIs when subclassing, which means that your\ncode may break when xarray upgrades. Furthermore, many builtin methods will\nonly return native xarray objects.\n\nThe standard advice is to use `composition over inheritance`__, but\nreimplementing an API as large as xarray's on your own objects can be an onerous\ntask, even if most methods are only forwarding to xarray implementations.\n\n__ https://github.com/pydata/xarray/issues/706\n\nIf you simply want the ability to call a function with the syntax of a\nmethod call, then the builtin :py:meth:`~xarray.DataArray.pipe` method (copied\nfrom pandas) may suffice.\n\nTo resolve this issue for more complex cases, xarray has the\n:py:func:`~xarray.register_dataset_accessor` and\n:py:func:`~xarray.register_dataarray_accessor` decorators for adding custom\n\"accessors\" on xarray objects. Here's how you might use these decorators to\nwrite a custom \"geo\" accessor implementing a geography specific extension to\nxarray:\n\n.. literalinclude:: examples/_code/accessor_example.py\n\nThis achieves the same result as if the ``Dataset`` class had a cached property\ndefined that returns an instance of your class:\n\n.. code-block:: python\n\n class Dataset:\n ...\n @property\n def geo(self)\n return GeoAccessor(self)\n\nHowever, using the register accessor decorators is preferable to simply adding\nyour own ad-hoc property (i.e., ``Dataset.geo = property(...)``), for several\nreasons:\n\n1. It ensures that the name of your property does not accidentally conflict with\n any other attributes or methods (including other accessors).\n2. Instances of accessor object will be cached on the xarray object that creates\n them. This means you can save state on them (e.g., to cache computed\n properties).\n3. Using an accessor provides an implicit namespace for your custom\n functionality that clearly identifies it as separate from built-in xarray\n methods.\n\n.. note::\n\n Accessors are created once per DataArray and Dataset instance. New\n instances, like those created from arithmetic operations or when accessing\n a DataArray from a Dataset (ex. ``ds[var_name]``), will have new\n accessors created.\n\nBack in an interactive IPython session, we can use these properties:\n\n.. ipython:: python\n :suppress:\n\n exec(open(\"examples/_code/accessor_example.py\").read())\n\n.. ipython:: python\n\n ds = xr.Dataset({'longitude': np.linspace(0, 10),\n 'latitude': np.linspace(0, 20)})\n ds.geo.center\n ds.geo.plot()\n\nThe intent here is that libraries that extend xarray could add such an accessor\nto implement subclass specific functionality rather than using actual subclasses\nor patching in a large number of domain specific methods. For further reading\non ways to write new accessors and the philosophy behind the approach, see\n:issue:`1080`.\n\nTo help users keep things straight, please `let us know\n`_ if you plan to write a new accessor\nfor an open source library. In the future, we will maintain a list of accessors\nand the libraries that implement them on this page.\n"},{"className":"Coordinates","col":0,"comment":"null","endLoc":181,"id":261,"nodeType":"Class","startLoc":33,"text":"class Coordinates(Mapping[Hashable, \"DataArray\"]):\n __slots__ = ()\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n raise NotImplementedError()\n\n def __setitem__(self, key: Hashable, value: Any) -> None:\n self.update({key: value})\n\n @property\n def _names(self) -> Set[Hashable]:\n raise NotImplementedError()\n\n @property\n def dims(self) -> Union[Mapping[Hashable, int], Tuple[Hashable, ...]]:\n raise NotImplementedError()\n\n @property\n def indexes(self) -> Indexes:\n return self._data.indexes # type: ignore\n\n @property\n def variables(self):\n raise NotImplementedError()\n\n def _update_coords(self, coords, indexes):\n raise NotImplementedError()\n\n def __iter__(self) -> Iterator[\"Hashable\"]:\n # needs to be in the same order as the dataset variables\n for k in self.variables:\n if k in self._names:\n yield k\n\n def __len__(self) -> int:\n return len(self._names)\n\n def __contains__(self, key: Hashable) -> bool:\n return key in self._names\n\n def __repr__(self) -> str:\n return formatting.coords_repr(self)\n\n def to_dataset(self) -> \"Dataset\":\n raise NotImplementedError()\n\n def to_index(self, ordered_dims: Sequence[Hashable] = None) -> pd.Index:\n \"\"\"Convert all index coordinates into a :py:class:`pandas.Index`.\n\n Parameters\n ----------\n ordered_dims : sequence of hashable, optional\n Possibly reordered version of this object's dimensions indicating\n the order in which dimensions should appear on the result.\n\n Returns\n -------\n pandas.Index\n Index subclass corresponding to the outer-product of all dimension\n coordinates. This will be a MultiIndex if this object is has more\n than more dimension.\n \"\"\"\n if ordered_dims is None:\n ordered_dims = list(self.dims)\n elif set(ordered_dims) != set(self.dims):\n raise ValueError(\n \"ordered_dims must match dims, but does not: \"\n \"{} vs {}\".format(ordered_dims, self.dims)\n )\n\n if len(ordered_dims) == 0:\n raise ValueError(\"no valid index for a 0-dimensional object\")\n elif len(ordered_dims) == 1:\n (dim,) = ordered_dims\n return self._data.get_index(dim) # type: ignore\n else:\n indexes = [self._data.get_index(k) for k in ordered_dims] # type: ignore\n names = list(ordered_dims)\n return pd.MultiIndex.from_product(indexes, names=names)\n\n def update(self, other: Mapping[Hashable, Any]) -> None:\n other_vars = getattr(other, \"variables\", other)\n coords, indexes = merge_coords(\n [self.variables, other_vars], priority_arg=1, indexes=self.indexes\n )\n self._update_coords(coords, indexes)\n\n def _merge_raw(self, other):\n \"\"\"For use with binary arithmetic.\"\"\"\n if other is None:\n variables = dict(self.variables)\n indexes = dict(self.indexes)\n else:\n variables, indexes = merge_coordinates_without_align([self, other])\n return variables, indexes\n\n @contextmanager\n def _merge_inplace(self, other):\n \"\"\"For use with in-place binary arithmetic.\"\"\"\n if other is None:\n yield\n else:\n # don't include indexes in prioritized, because we didn't align\n # first and we want indexes to be checked\n prioritized = {\n k: (v, None) for k, v in self.variables.items() if k not in self.indexes\n }\n variables, indexes = merge_coordinates_without_align(\n [self, other], prioritized\n )\n yield\n self._update_coords(variables, indexes)\n\n def merge(self, other: \"Coordinates\") -> \"Dataset\":\n \"\"\"Merge two sets of coordinates to create a new Dataset\n\n The method implements the logic used for joining coordinates in the\n result of a binary operation performed on xarray objects:\n\n - If two index coordinates conflict (are not equal), an exception is\n raised. You must align your data before passing it to this method.\n - If an index coordinate and a non-index coordinate conflict, the non-\n index coordinate is dropped.\n - If two non-index coordinates conflict, both are dropped.\n\n Parameters\n ----------\n other : DatasetCoordinates or DataArrayCoordinates\n The coordinates from another dataset or data array.\n\n Returns\n -------\n merged : Dataset\n A new Dataset with merged coordinates.\n \"\"\"\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n\n if not isinstance(other, Coordinates):\n other = Dataset(coords=other).coords\n\n coords, indexes = merge_coordinates_without_align([self, other])\n coord_names = set(coords)\n merged = Dataset._construct_direct(\n variables=coords, coord_names=coord_names, indexes=indexes\n )\n return merged"},{"col":0,"comment":"null","endLoc":266,"header":"def _extract_nc4_variable_encoding(\n variable,\n raise_on_invalid=False,\n lsd_okay=True,\n h5py_okay=False,\n backend=\"netCDF4\",\n unlimited_dims=None,\n)","id":262,"name":"_extract_nc4_variable_encoding","nodeType":"Function","startLoc":200,"text":"def _extract_nc4_variable_encoding(\n variable,\n raise_on_invalid=False,\n lsd_okay=True,\n h5py_okay=False,\n backend=\"netCDF4\",\n unlimited_dims=None,\n):\n if unlimited_dims is None:\n unlimited_dims = ()\n\n encoding = variable.encoding.copy()\n\n safe_to_drop = {\"source\", \"original_shape\"}\n valid_encodings = {\n \"zlib\",\n \"complevel\",\n \"fletcher32\",\n \"contiguous\",\n \"chunksizes\",\n \"shuffle\",\n \"_FillValue\",\n \"dtype\",\n }\n if lsd_okay:\n valid_encodings.add(\"least_significant_digit\")\n if h5py_okay:\n valid_encodings.add(\"compression\")\n valid_encodings.add(\"compression_opts\")\n\n if not raise_on_invalid and encoding.get(\"chunksizes\") is not None:\n # It's possible to get encoded chunksizes larger than a dimension size\n # if the original file had an unlimited dimension. This is problematic\n # if the new file no longer has an unlimited dimension.\n chunksizes = encoding[\"chunksizes\"]\n chunks_too_big = any(\n c > d and dim not in unlimited_dims\n for c, d, dim in zip(chunksizes, variable.shape, variable.dims)\n )\n has_original_shape = \"original_shape\" in encoding\n changed_shape = (\n has_original_shape and encoding.get(\"original_shape\") != variable.shape\n )\n if chunks_too_big or changed_shape:\n del encoding[\"chunksizes\"]\n\n var_has_unlim_dim = any(dim in unlimited_dims for dim in variable.dims)\n if not raise_on_invalid and var_has_unlim_dim and \"contiguous\" in encoding.keys():\n del encoding[\"contiguous\"]\n\n for k in safe_to_drop:\n if k in encoding:\n del encoding[k]\n\n if raise_on_invalid:\n invalid = [k for k in encoding if k not in valid_encodings]\n if invalid:\n raise ValueError(\n \"unexpected encoding parameters for %r backend: %r. Valid \"\n \"encodings are: %r\" % (backend, invalid, valid_encodings)\n )\n else:\n for k in list(encoding):\n if k not in valid_encodings:\n del encoding[k]\n\n return encoding"},{"fileName":"concat.py","filePath":"xarray/core","id":263,"nodeType":"File","text":"import pandas as pd\n\nfrom . import dtypes, utils\nfrom .alignment import align\nfrom .merge import _VALID_COMPAT, unique_variable\nfrom .variable import IndexVariable, Variable, as_variable\nfrom .variable import concat as concat_vars\n\n\ndef concat(\n objs,\n dim,\n data_vars=\"all\",\n coords=\"different\",\n compat=\"equals\",\n positions=None,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"Concatenate xarray objects along a new or existing dimension.\n\n Parameters\n ----------\n objs : sequence of Dataset and DataArray objects\n xarray objects to concatenate together. Each object is expected to\n consist of variables and coordinates with matching shapes except for\n along the concatenated dimension.\n dim : str or DataArray or pandas.Index\n Name of the dimension to concatenate along. This can either be a new\n dimension name, in which case it is added along axis=0, or an existing\n dimension name, in which case the location of the dimension is\n unchanged. If dimension is provided as a DataArray or Index, its name\n is used as the dimension to concatenate along and the values are added\n as a coordinate.\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n If objects are DataArrays, data_vars must be 'all'.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n These coordinate variables will be concatenated together:\n * 'minimal': Only coordinates in which the dimension already appears\n are included.\n * 'different': Coordinates which are not equal (ignoring attributes)\n across all datasets are also concatenated (as well as all for which\n dimension already appears). Beware: this option may load the data\n payload of coordinate variables into memory if they are not already\n loaded.\n * 'all': All coordinate variables will be concatenated, except\n those corresponding to other dimensions.\n * list of str: The listed coordinate variables will be concatenated,\n in addition to the 'minimal' coordinates.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare non-concatenated variables of the same name for\n potential conflicts. This is passed down to merge.\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to which\n to assign each dataset along the concatenated dimension. If not\n supplied, objects are concatenated in the provided order.\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n indexers, mode, concat_over : deprecated\n\n Returns\n -------\n concatenated : type of objs\n\n See also\n --------\n merge\n auto_combine\n \"\"\"\n # TODO: add ignore_index arguments copied from pandas.concat\n # TODO: support concatenating scalar coordinates even if the concatenated\n # dimension already exists\n from .dataset import Dataset\n from .dataarray import DataArray\n\n try:\n first_obj, objs = utils.peek_at(objs)\n except StopIteration:\n raise ValueError(\"must supply at least one object to concatenate\")\n\n if compat not in _VALID_COMPAT:\n raise ValueError(\n \"compat=%r invalid: must be 'broadcast_equals', 'equals', 'identical', 'no_conflicts' or 'override'\"\n % compat\n )\n\n if isinstance(first_obj, DataArray):\n f = _dataarray_concat\n elif isinstance(first_obj, Dataset):\n f = _dataset_concat\n else:\n raise TypeError(\n \"can only concatenate xarray Dataset and DataArray \"\n \"objects, got %s\" % type(first_obj)\n )\n return f(objs, dim, data_vars, coords, compat, positions, fill_value, join)\n\n\ndef _calc_concat_dim_coord(dim):\n \"\"\"\n Infer the dimension name and 1d coordinate variable (if appropriate)\n for concatenating along the new dimension.\n \"\"\"\n from .dataarray import DataArray\n\n if isinstance(dim, str):\n coord = None\n elif not isinstance(dim, (DataArray, Variable)):\n dim_name = getattr(dim, \"name\", None)\n if dim_name is None:\n dim_name = \"concat_dim\"\n coord = IndexVariable(dim_name, dim)\n dim = dim_name\n elif not isinstance(dim, DataArray):\n coord = as_variable(dim).to_index_variable()\n dim, = coord.dims\n else:\n coord = dim\n dim, = coord.dims\n return dim, coord\n\n\ndef _calc_concat_over(datasets, dim, dim_names, data_vars, coords, compat):\n \"\"\"\n Determine which dataset variables need to be concatenated in the result,\n \"\"\"\n # Return values\n concat_over = set()\n equals = {}\n\n if dim in dim_names:\n concat_over_existing_dim = True\n concat_over.add(dim)\n else:\n concat_over_existing_dim = False\n\n concat_dim_lengths = []\n for ds in datasets:\n if concat_over_existing_dim:\n if dim not in ds.dims:\n if dim in ds:\n ds = ds.set_coords(dim)\n concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)\n concat_dim_lengths.append(ds.dims.get(dim, 1))\n\n def process_subset_opt(opt, subset):\n if isinstance(opt, str):\n if opt == \"different\":\n if compat == \"override\":\n raise ValueError(\n \"Cannot specify both %s='different' and compat='override'.\"\n % subset\n )\n # all nonindexes that are not the same in each dataset\n for k in getattr(datasets[0], subset):\n if k not in concat_over:\n # Compare the variable of all datasets vs. the one\n # of the first dataset. Perform the minimum amount of\n # loads in order to avoid multiple loads from disk\n # while keeping the RAM footprint low.\n v_lhs = datasets[0].variables[k].load()\n # We'll need to know later on if variables are equal.\n computed = []\n for ds_rhs in datasets[1:]:\n v_rhs = ds_rhs.variables[k].compute()\n computed.append(v_rhs)\n if not getattr(v_lhs, compat)(v_rhs):\n concat_over.add(k)\n equals[k] = False\n # computed variables are not to be re-computed\n # again in the future\n for ds, v in zip(datasets[1:], computed):\n ds.variables[k].data = v.data\n break\n else:\n equals[k] = True\n\n elif opt == \"all\":\n concat_over.update(\n set(getattr(datasets[0], subset)) - set(datasets[0].dims)\n )\n elif opt == \"minimal\":\n pass\n else:\n raise ValueError(\"unexpected value for %s: %s\" % (subset, opt))\n else:\n invalid_vars = [k for k in opt if k not in getattr(datasets[0], subset)]\n if invalid_vars:\n if subset == \"coords\":\n raise ValueError(\n \"some variables in coords are not coordinates on \"\n \"the first dataset: %s\" % (invalid_vars,)\n )\n else:\n raise ValueError(\n \"some variables in data_vars are not data variables \"\n \"on the first dataset: %s\" % (invalid_vars,)\n )\n concat_over.update(opt)\n\n process_subset_opt(data_vars, \"data_vars\")\n process_subset_opt(coords, \"coords\")\n return concat_over, equals, concat_dim_lengths\n\n\n# determine dimensional coordinate names and a dict mapping name to DataArray\ndef _parse_datasets(datasets):\n\n dims = set()\n all_coord_names = set()\n data_vars = set() # list of data_vars\n dim_coords = {} # maps dim name to variable\n dims_sizes = {} # shared dimension sizes to expand variables\n\n for ds in datasets:\n dims_sizes.update(ds.dims)\n all_coord_names.update(ds.coords)\n data_vars.update(ds.data_vars)\n\n for dim in set(ds.dims) - dims:\n if dim not in dim_coords:\n dim_coords[dim] = ds.coords[dim].variable\n dims = dims | set(ds.dims)\n\n return dim_coords, dims_sizes, all_coord_names, data_vars\n\n\ndef _dataset_concat(\n datasets,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Concatenate a sequence of datasets along a new or existing dimension\n \"\"\"\n from .dataset import Dataset\n\n dim, coord = _calc_concat_dim_coord(dim)\n # Make sure we're working on a copy (we'll be loading variables)\n datasets = [ds.copy() for ds in datasets]\n datasets = align(\n *datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value\n )\n\n dim_coords, dims_sizes, coord_names, data_names = _parse_datasets(datasets)\n dim_names = set(dim_coords)\n unlabeled_dims = dim_names - coord_names\n\n both_data_and_coords = coord_names & data_names\n if both_data_and_coords:\n raise ValueError(\n \"%r is a coordinate in some datasets but not others.\" % both_data_and_coords\n )\n # we don't want the concat dimension in the result dataset yet\n dim_coords.pop(dim, None)\n dims_sizes.pop(dim, None)\n\n # case where concat dimension is a coordinate or data_var but not a dimension\n if (dim in coord_names or dim in data_names) and dim not in dim_names:\n datasets = [ds.expand_dims(dim) for ds in datasets]\n\n # determine which variables to concatentate\n concat_over, equals, concat_dim_lengths = _calc_concat_over(\n datasets, dim, dim_names, data_vars, coords, compat\n )\n\n # determine which variables to merge, and then merge them according to compat\n variables_to_merge = (coord_names | data_names) - concat_over - dim_names\n\n result_vars = {}\n if variables_to_merge:\n to_merge = {var: [] for var in variables_to_merge}\n\n for ds in datasets:\n absent_merge_vars = variables_to_merge - set(ds.variables)\n if absent_merge_vars:\n raise ValueError(\n \"variables %r are present in some datasets but not others. \"\n % absent_merge_vars\n )\n\n for var in variables_to_merge:\n to_merge[var].append(ds.variables[var])\n\n for var in variables_to_merge:\n result_vars[var] = unique_variable(\n var, to_merge[var], compat=compat, equals=equals.get(var, None)\n )\n else:\n result_vars = {}\n result_vars.update(dim_coords)\n\n # assign attrs and encoding from first dataset\n result_attrs = datasets[0].attrs\n result_encoding = datasets[0].encoding\n\n # check that global attributes are fixed across all datasets if necessary\n for ds in datasets[1:]:\n if compat == \"identical\" and not utils.dict_equiv(ds.attrs, result_attrs):\n raise ValueError(\"Dataset global attributes not equal.\")\n\n # we've already verified everything is consistent; now, calculate\n # shared dimension sizes so we can expand the necessary variables\n def ensure_common_dims(vars):\n # ensure each variable with the given name shares the same\n # dimensions and the same shape for all of them except along the\n # concat dimension\n common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))\n if dim not in common_dims:\n common_dims = (dim,) + common_dims\n for var, dim_len in zip(vars, concat_dim_lengths):\n if var.dims != common_dims:\n common_shape = tuple(dims_sizes.get(d, dim_len) for d in common_dims)\n var = var.set_dims(common_dims, common_shape)\n yield var\n\n # stack up each variable to fill-out the dataset (in order)\n # n.b. this loop preserves variable order, needed for groupby.\n for k in datasets[0].variables:\n if k in concat_over:\n try:\n vars = ensure_common_dims([ds.variables[k] for ds in datasets])\n except KeyError:\n raise ValueError(\"%r is not present in all datasets.\" % k)\n combined = concat_vars(vars, dim, positions)\n assert isinstance(combined, Variable)\n result_vars[k] = combined\n\n result = Dataset(result_vars, attrs=result_attrs)\n absent_coord_names = coord_names - set(result.variables)\n if absent_coord_names:\n raise ValueError(\n \"Variables %r are coordinates in some datasets but not others.\"\n % absent_coord_names\n )\n result = result.set_coords(coord_names)\n result.encoding = result_encoding\n\n result = result.drop(unlabeled_dims, errors=\"ignore\")\n\n if coord is not None:\n # add concat dimension last to ensure that its in the final Dataset\n result[coord.name] = coord\n\n return result\n\n\ndef _dataarray_concat(\n arrays,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n arrays = list(arrays)\n\n if data_vars != \"all\":\n raise ValueError(\n \"data_vars is not a valid argument when concatenating DataArray objects\"\n )\n\n datasets = []\n for n, arr in enumerate(arrays):\n if n == 0:\n name = arr.name\n elif name != arr.name:\n if compat == \"identical\":\n raise ValueError(\"array names not identical\")\n else:\n arr = arr.rename(name)\n datasets.append(arr._to_temp_dataset())\n\n ds = _dataset_concat(\n datasets,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=fill_value,\n join=join,\n )\n return arrays[0]._from_temp_dataset(ds, name)\n"},{"col":0,"comment":"null","endLoc":72,"header":"def encode_nc3_attrs(attrs)","id":264,"name":"encode_nc3_attrs","nodeType":"Function","startLoc":71,"text":"def encode_nc3_attrs(attrs):\n return {k: encode_nc3_attr_value(v) for k, v in attrs.items()}"},{"col":0,"comment":"null","endLoc":140,"header":"def nanmean(a, axis=None, dtype=None, out=None)","id":265,"name":"nanmean","nodeType":"Function","startLoc":133,"text":"def nanmean(a, axis=None, dtype=None, out=None):\n if a.dtype.kind == \"O\":\n return _nanmean_ddof_object(0, a, axis=axis, dtype=dtype)\n\n if isinstance(a, dask_array_type):\n return dask_array.nanmean(a, axis=axis, dtype=dtype)\n\n return np.nanmean(a, axis=axis, dtype=dtype)"},{"col":0,"comment":"null","endLoc":83,"header":"def encode_nc3_variable(var)","id":266,"name":"encode_nc3_variable","nodeType":"Function","startLoc":75,"text":"def encode_nc3_variable(var):\n for coder in [\n coding.strings.EncodedStringCoder(allows_unicode=False),\n coding.strings.CharacterArrayCoder(),\n ]:\n var = coder.encode(var)\n data = coerce_nc3_dtype(var.data)\n attrs = encode_nc3_attrs(var.attrs)\n return Variable(var.dims, data, attrs, var.encoding)"},{"col":0,"comment":"null","endLoc":144,"header":"def nanmedian(a, axis=None, out=None)","id":267,"name":"nanmedian","nodeType":"Function","startLoc":143,"text":"def nanmedian(a, axis=None, out=None):\n return _dask_or_eager_func(\"nanmedian\", eager_module=nputils)(a, axis=axis)"},{"col":0,"comment":"null","endLoc":152,"header":"def _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs)","id":268,"name":"_nanvar_object","nodeType":"Function","startLoc":147,"text":"def _nanvar_object(value, axis=None, ddof=0, keepdims=False, **kwargs):\n value_mean = _nanmean_ddof_object(\n ddof=0, value=value, axis=axis, keepdims=True, **kwargs\n )\n squared = (value.astype(value_mean.dtype) - value_mean) ** 2\n return _nanmean_ddof_object(ddof, squared, axis=axis, keepdims=keepdims, **kwargs)"},{"col":4,"comment":"null","endLoc":415,"header":"def __init__(self, key)","id":269,"name":"__init__","nodeType":"Function","startLoc":385,"text":"def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n \"invalid indexer array, does not have \"\n \"integer dtype: {!r}\".format(k)\n )\n if k.ndim != 1:\n raise TypeError(\n \"invalid indexer array for {}, must have \"\n \"exactly 1 dimension: \".format(type(self).__name__, k)\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)"},{"col":0,"comment":"\n Given any number of Dataset and/or DataArray objects, returns new\n objects with aligned indexes and dimension sizes.\n\n Array from the aligned objects are suitable as input to mathematical\n operators, because along each dimension they have the same index and size.\n\n Missing values (if ``join != 'inner'``) are filled with ``fill_value``.\n The default fill value is NaN.\n\n Parameters\n ----------\n *objects : Dataset or DataArray\n Objects to align.\n join : {'outer', 'inner', 'left', 'right', 'exact', 'override'}, optional\n Method for joining the indexes of the passed objects along each\n dimension:\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n copy : bool, optional\n If ``copy=True``, data in the return values is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed with\n only slice operations, then the output may share memory with the input.\n In either case, new xarray objects are always returned.\n indexes : dict-like, optional\n Any indexes explicitly provided with the `indexes` argument should be\n used in preference to the aligned indexes.\n exclude : sequence of str, optional\n Dimensions that must be excluded from alignment\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n aligned : same as *objects\n Tuple of objects with aligned coordinates.\n\n Raises\n ------\n ValueError\n If any dimensions without labels on the arguments have different sizes,\n or a different size than the size of the aligned dimension labels.\n\n Examples\n --------\n\n >>> import xarray as xr\n >>> x = xr.DataArray([[25, 35], [10, 24]], dims=('lat', 'lon'),\n ... coords={'lat': [35., 40.], 'lon': [100., 120.]})\n >>> y = xr.DataArray([[20, 5], [7, 13]], dims=('lat', 'lon'),\n ... coords={'lat': [35., 42.], 'lon': [100., 120.]})\n\n >>> x\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> y\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y)\n >>> a\n \n array([[25, 35]])\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5]])\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='outer')\n >>> a\n \n array([[25., 35.],\n [10., 24.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20., 5.],\n [nan, nan],\n [ 7., 13.]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='outer', fill_value=-999)\n >>> a\n \n array([[ 25, 35],\n [ 10, 24],\n [-999, -999]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[ 20, 5],\n [-999, -999],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='left')\n >>> a\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20., 5.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='right')\n >>> a\n \n array([[25., 35.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='exact')\n Traceback (most recent call last):\n ...\n \"indexes along dimension {!r} are not equal\".format(dim)\n ValueError: indexes along dimension 'lat' are not equal\n\n >>> a, b = xr.align(x, y, join='override')\n >>> a\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n ","endLoc":337,"header":"def align(\n *objects,\n join=\"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n fill_value=dtypes.NA\n)","id":270,"name":"align","nodeType":"Function","startLoc":61,"text":"def align(\n *objects,\n join=\"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n fill_value=dtypes.NA\n):\n \"\"\"\n Given any number of Dataset and/or DataArray objects, returns new\n objects with aligned indexes and dimension sizes.\n\n Array from the aligned objects are suitable as input to mathematical\n operators, because along each dimension they have the same index and size.\n\n Missing values (if ``join != 'inner'``) are filled with ``fill_value``.\n The default fill value is NaN.\n\n Parameters\n ----------\n *objects : Dataset or DataArray\n Objects to align.\n join : {'outer', 'inner', 'left', 'right', 'exact', 'override'}, optional\n Method for joining the indexes of the passed objects along each\n dimension:\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n copy : bool, optional\n If ``copy=True``, data in the return values is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed with\n only slice operations, then the output may share memory with the input.\n In either case, new xarray objects are always returned.\n indexes : dict-like, optional\n Any indexes explicitly provided with the `indexes` argument should be\n used in preference to the aligned indexes.\n exclude : sequence of str, optional\n Dimensions that must be excluded from alignment\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n aligned : same as *objects\n Tuple of objects with aligned coordinates.\n\n Raises\n ------\n ValueError\n If any dimensions without labels on the arguments have different sizes,\n or a different size than the size of the aligned dimension labels.\n\n Examples\n --------\n\n >>> import xarray as xr\n >>> x = xr.DataArray([[25, 35], [10, 24]], dims=('lat', 'lon'),\n ... coords={'lat': [35., 40.], 'lon': [100., 120.]})\n >>> y = xr.DataArray([[20, 5], [7, 13]], dims=('lat', 'lon'),\n ... coords={'lat': [35., 42.], 'lon': [100., 120.]})\n\n >>> x\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> y\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y)\n >>> a\n \n array([[25, 35]])\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5]])\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='outer')\n >>> a\n \n array([[25., 35.],\n [10., 24.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20., 5.],\n [nan, nan],\n [ 7., 13.]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='outer', fill_value=-999)\n >>> a\n \n array([[ 25, 35],\n [ 10, 24],\n [-999, -999]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[ 20, 5],\n [-999, -999],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='left')\n >>> a\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20., 5.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='right')\n >>> a\n \n array([[25., 35.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='exact')\n Traceback (most recent call last):\n ...\n \"indexes along dimension {!r} are not equal\".format(dim)\n ValueError: indexes along dimension 'lat' are not equal\n\n >>> a, b = xr.align(x, y, join='override')\n >>> a\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n \"\"\"\n if indexes is None:\n indexes = {}\n\n if not indexes and len(objects) == 1:\n # fast path for the trivial case\n obj, = objects\n return (obj.copy(deep=copy),)\n\n all_indexes = defaultdict(list)\n unlabeled_dim_sizes = defaultdict(set)\n for obj in objects:\n for dim in obj.dims:\n if dim not in exclude:\n try:\n index = obj.indexes[dim]\n except KeyError:\n unlabeled_dim_sizes[dim].add(obj.sizes[dim])\n else:\n all_indexes[dim].append(index)\n\n if join == \"override\":\n objects = _override_indexes(objects, all_indexes, exclude)\n\n # We don't reindex over dimensions with all equal indexes for two reasons:\n # - It's faster for the usual case (already aligned objects).\n # - It ensures it's possible to do operations that don't require alignment\n # on indexes with duplicate values (which cannot be reindexed with\n # pandas). This is useful, e.g., for overwriting such duplicate indexes.\n joiner = _get_joiner(join)\n joined_indexes = {}\n for dim, matching_indexes in all_indexes.items():\n if dim in indexes:\n index = utils.safe_cast_to_index(indexes[dim])\n if (\n any(not index.equals(other) for other in matching_indexes)\n or dim in unlabeled_dim_sizes\n ):\n joined_indexes[dim] = index\n else:\n if (\n any(\n not matching_indexes[0].equals(other)\n for other in matching_indexes[1:]\n )\n or dim in unlabeled_dim_sizes\n ):\n if join == \"exact\":\n raise ValueError(\n \"indexes along dimension {!r} are not equal\".format(dim)\n )\n index = joiner(matching_indexes)\n joined_indexes[dim] = index\n else:\n index = matching_indexes[0]\n\n if dim in unlabeled_dim_sizes:\n unlabeled_sizes = unlabeled_dim_sizes[dim]\n labeled_size = index.size\n if len(unlabeled_sizes | {labeled_size}) > 1:\n raise ValueError(\n \"arguments without labels along dimension %r cannot be \"\n \"aligned because they have different dimension size(s) %r \"\n \"than the size of the aligned dimension labels: %r\"\n % (dim, unlabeled_sizes, labeled_size)\n )\n\n for dim in unlabeled_dim_sizes:\n if dim not in all_indexes:\n sizes = unlabeled_dim_sizes[dim]\n if len(sizes) > 1:\n raise ValueError(\n \"arguments without labels along dimension %r cannot be \"\n \"aligned because they have different dimension sizes: %r\"\n % (dim, sizes)\n )\n\n result = []\n for obj in objects:\n valid_indexers = {k: v for k, v in joined_indexes.items() if k in obj.dims}\n if not valid_indexers:\n # fast path for no reindexing necessary\n new_obj = obj.copy(deep=copy)\n else:\n new_obj = obj.reindex(copy=copy, fill_value=fill_value, **valid_indexers)\n new_obj.encoding = obj.encoding\n result.append(new_obj)\n\n return tuple(result)"},{"col":0,"comment":"null","endLoc":128,"header":"def _get_datatype(var, nc_format=\"NETCDF4\", raise_on_invalid_encoding=False)","id":271,"name":"_get_datatype","nodeType":"Function","startLoc":115,"text":"def _get_datatype(var, nc_format=\"NETCDF4\", raise_on_invalid_encoding=False):\n if nc_format == \"NETCDF4\":\n datatype = _nc4_dtype(var)\n else:\n if \"dtype\" in var.encoding:\n encoded_dtype = var.encoding[\"dtype\"]\n _check_encoding_dtype_is_vlen_string(encoded_dtype)\n if raise_on_invalid_encoding:\n raise ValueError(\n \"encoding dtype=str for vlen strings is only supported \"\n \"with format='NETCDF4'.\"\n )\n datatype = var.dtype\n return datatype"},{"col":0,"comment":"null","endLoc":141,"header":"def _nc4_dtype(var)","id":272,"name":"_nc4_dtype","nodeType":"Function","startLoc":131,"text":"def _nc4_dtype(var):\n if \"dtype\" in var.encoding:\n dtype = var.encoding.pop(\"dtype\")\n _check_encoding_dtype_is_vlen_string(dtype)\n elif coding.strings.is_unicode_dtype(var.dtype):\n dtype = str\n elif var.dtype.kind in [\"i\", \"u\", \"f\", \"c\", \"S\"]:\n dtype = var.dtype\n else:\n raise ValueError(\"unsupported dtype for netCDF4 variable: {}\".format(var.dtype))\n return dtype"},{"col":0,"comment":"null","endLoc":112,"header":"def _check_encoding_dtype_is_vlen_string(dtype)","id":273,"name":"_check_encoding_dtype_is_vlen_string","nodeType":"Function","startLoc":107,"text":"def _check_encoding_dtype_is_vlen_string(dtype):\n if dtype is not str:\n raise AssertionError( # pragma: no cover\n \"unexpected dtype encoding %r. This shouldn't happen: please \"\n \"file a bug report at github.com/pydata/xarray\" % dtype\n )"},{"col":4,"comment":"null","endLoc":37,"header":"def __getitem__(self, key: Hashable) -> \"DataArray\"","id":274,"name":"__getitem__","nodeType":"Function","startLoc":36,"text":"def __getitem__(self, key: Hashable) -> \"DataArray\":\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":40,"header":"def __setitem__(self, key: Hashable, value: Any) -> None","id":275,"name":"__setitem__","nodeType":"Function","startLoc":39,"text":"def __setitem__(self, key: Hashable, value: Any) -> None:\n self.update({key: value})"},{"col":0,"comment":"Return True if the given UTF-8 encoded character is alphanumeric\n or multibyte.\n\n Input is not checked!\n ","endLoc":92,"header":"def _isalnumMUTF8(c)","id":276,"name":"_isalnumMUTF8","nodeType":"Function","startLoc":86,"text":"def _isalnumMUTF8(c):\n \"\"\"Return True if the given UTF-8 encoded character is alphanumeric\n or multibyte.\n\n Input is not checked!\n \"\"\"\n return c.isalnum() or (len(c.encode(\"utf-8\")) > 1)"},{"col":0,"comment":"Test whether an object can be validly converted to a netCDF-3\n dimension, variable or attribute name\n\n Earlier versions of the netCDF C-library reference implementation\n enforced a more restricted set of characters in creating new names,\n but permitted reading names containing arbitrary bytes. This\n specification extends the permitted characters in names to include\n multi-byte UTF-8 encoded Unicode and additional printing characters\n from the US-ASCII alphabet. The first character of a name must be\n alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for\n special names with meaning to implementations, such as the\n \"_FillValue\" attribute). Subsequent characters may also include\n printing special characters, except for '/' which is not allowed in\n names. Names that have trailing space characters are also not\n permitted.\n ","endLoc":125,"header":"def is_valid_nc3_name(s)","id":277,"name":"is_valid_nc3_name","nodeType":"Function","startLoc":95,"text":"def is_valid_nc3_name(s):\n \"\"\"Test whether an object can be validly converted to a netCDF-3\n dimension, variable or attribute name\n\n Earlier versions of the netCDF C-library reference implementation\n enforced a more restricted set of characters in creating new names,\n but permitted reading names containing arbitrary bytes. This\n specification extends the permitted characters in names to include\n multi-byte UTF-8 encoded Unicode and additional printing characters\n from the US-ASCII alphabet. The first character of a name must be\n alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for\n special names with meaning to implementations, such as the\n \"_FillValue\" attribute). Subsequent characters may also include\n printing special characters, except for '/' which is not allowed in\n names. Names that have trailing space characters are also not\n permitted.\n \"\"\"\n if not isinstance(s, str):\n return False\n if not isinstance(s, str):\n s = s.decode(\"utf-8\")\n num_bytes = len(s.encode(\"utf-8\"))\n return (\n (unicodedata.normalize(\"NFC\", s) == s)\n and (s not in _reserved_names)\n and (num_bytes >= 0)\n and (\"/\" not in s)\n and (s[-1] != \" \")\n and (_isalnumMUTF8(s[0]) or (s[0] == \"_\"))\n and all(_isalnumMUTF8(c) or c in _specialchars for c in s)\n )"},{"col":0,"comment":"null","endLoc":167,"header":"def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group)","id":278,"name":"_nc4_require_group","nodeType":"Function","startLoc":148,"text":"def _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):\n if group in {None, \"\", \"/\"}:\n # use the root group\n return ds\n else:\n # make sure it's a string\n if not isinstance(group, str):\n raise ValueError(\"group must be a string or None\")\n # support path-like syntax\n path = group.strip(\"/\").split(\"/\")\n for key in path:\n try:\n ds = ds.groups[key]\n except KeyError as e:\n if mode != \"r\":\n ds = create_group(ds, key)\n else:\n # wrap error to provide slightly more helpful message\n raise OSError(\"group not found: %s\" % key, e)\n return ds"},{"className":"H5NetCDFArrayWrapper","col":0,"comment":"null","endLoc":37,"id":279,"nodeType":"Class","startLoc":20,"text":"class H5NetCDFArrayWrapper(BaseNetCDF4Array):\n def get_array(self, needs_lock=True):\n ds = self.datastore._acquire(needs_lock)\n variable = ds.variables[self.variable_name]\n return variable\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem\n )\n\n def _getitem(self, key):\n # h5py requires using lists for fancy indexing:\n # https://github.com/h5py/h5py/issues/992\n key = tuple(list(k) if isinstance(k, np.ndarray) else k for k in key)\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n return array[key]"},{"col":4,"comment":"null","endLoc":462,"header":"def __init__(self, key)","id":280,"name":"__init__","nodeType":"Function","startLoc":430,"text":"def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n ndim = None\n for k in key:\n if isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n \"invalid indexer array, does not have \"\n \"integer dtype: {!r}\".format(k)\n )\n if ndim is None:\n ndim = k.ndim\n elif ndim != k.ndim:\n ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]\n raise ValueError(\n \"invalid indexer key: ndarray arguments \"\n \"have different numbers of dimensions: {}\".format(ndims)\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)"},{"col":4,"comment":"null","endLoc":24,"header":"def get_array(self, needs_lock=True)","id":281,"name":"get_array","nodeType":"Function","startLoc":21,"text":"def get_array(self, needs_lock=True):\n ds = self.datastore._acquire(needs_lock)\n variable = ds.variables[self.variable_name]\n return variable"},{"col":0,"comment":"null","endLoc":161,"header":"def nanvar(a, axis=None, dtype=None, out=None, ddof=0)","id":282,"name":"nanvar","nodeType":"Function","startLoc":155,"text":"def nanvar(a, axis=None, dtype=None, out=None, ddof=0):\n if a.dtype.kind == \"O\":\n return _nanvar_object(a, axis=axis, dtype=dtype, ddof=ddof)\n\n return _dask_or_eager_func(\"nanvar\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )"},{"col":0,"comment":"null","endLoc":58,"header":"def _override_indexes(objects, all_indexes, exclude)","id":283,"name":"_override_indexes","nodeType":"Function","startLoc":40,"text":"def _override_indexes(objects, all_indexes, exclude):\n for dim, dim_indexes in all_indexes.items():\n if dim not in exclude:\n lengths = {index.size for index in dim_indexes}\n if len(lengths) != 1:\n raise ValueError(\n \"Indexes along dimension %r don't have the same length.\"\n \" Cannot use join='override'.\" % dim\n )\n\n objects = list(objects)\n for idx, obj in enumerate(objects[1:]):\n new_indexes = {}\n for dim in obj.dims:\n if dim not in exclude:\n new_indexes[dim] = all_indexes[dim][0]\n objects[idx + 1] = obj._overwrite_indexes(new_indexes)\n\n return objects"},{"attributeType":"null","col":16,"comment":"null","endLoc":3,"id":284,"name":"np","nodeType":"Attribute","startLoc":3,"text":"np"},{"attributeType":"null","col":0,"comment":"null","endLoc":9,"id":285,"name":"_specialchars","nodeType":"Attribute","startLoc":9,"text":"_specialchars"},{"attributeType":"null","col":0,"comment":"null","endLoc":13,"id":286,"name":"_reserved_names","nodeType":"Attribute","startLoc":13,"text":"_reserved_names"},{"attributeType":"null","col":0,"comment":"null","endLoc":30,"id":287,"name":"_nc3_dtype_coercions","nodeType":"Attribute","startLoc":30,"text":"_nc3_dtype_coercions"},{"attributeType":"null","col":0,"comment":"null","endLoc":33,"id":288,"name":"STRING_ENCODING","nodeType":"Attribute","startLoc":33,"text":"STRING_ENCODING"},{"col":0,"comment":"null","endLoc":167,"header":"def nanstd(a, axis=None, dtype=None, out=None, ddof=0)","id":289,"name":"nanstd","nodeType":"Function","startLoc":164,"text":"def nanstd(a, axis=None, dtype=None, out=None, ddof=0):\n return _dask_or_eager_func(\"nanstd\", eager_module=nputils)(\n a, axis=axis, dtype=dtype, ddof=ddof\n )"},{"col":0,"comment":"","endLoc":1,"header":"netcdf3.py#","id":290,"name":"","nodeType":"Function","startLoc":1,"text":"_specialchars = '_.@+- !\"#$%&\\\\()*,:;<=>?[]^`{|}~'\n\n_reserved_names = {\n \"byte\",\n \"char\",\n \"short\",\n \"ushort\",\n \"int\",\n \"uint\",\n \"int64\",\n \"uint64\",\n \"float\" \"real\",\n \"double\",\n \"bool\",\n \"string\",\n}\n\n_nc3_dtype_coercions = {\"int64\": \"int32\", \"bool\": \"int8\"}\n\nSTRING_ENCODING = \"utf-8\""},{"col":0,"comment":"null","endLoc":37,"header":"def _get_joiner(join)","id":291,"name":"_get_joiner","nodeType":"Function","startLoc":20,"text":"def _get_joiner(join):\n if join == \"outer\":\n return functools.partial(functools.reduce, operator.or_)\n elif join == \"inner\":\n return functools.partial(functools.reduce, operator.and_)\n elif join == \"left\":\n return operator.itemgetter(0)\n elif join == \"right\":\n return operator.itemgetter(-1)\n elif join == \"exact\":\n # We cannot return a function to \"align\" in this case, because it needs\n # access to the dimension name to give a good error message.\n return None\n elif join == \"override\":\n # We rewrite all indexes and then use join='left'\n return operator.itemgetter(0)\n else:\n raise ValueError(\"invalid value for join: %s\" % join)"},{"col":4,"comment":"null","endLoc":118,"header":"def update(self, other: Mapping[Hashable, Any]) -> None","id":292,"name":"update","nodeType":"Function","startLoc":113,"text":"def update(self, other: Mapping[Hashable, Any]) -> None:\n other_vars = getattr(other, \"variables\", other)\n coords, indexes = merge_coords(\n [self.variables, other_vars], priority_arg=1, indexes=self.indexes\n )\n self._update_coords(coords, indexes)"},{"col":4,"comment":"null","endLoc":29,"header":"def __getitem__(self, key)","id":293,"name":"__getitem__","nodeType":"Function","startLoc":26,"text":"def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem\n )"},{"col":0,"comment":"null","endLoc":176,"header":"def nanprod(a, axis=None, dtype=None, out=None, min_count=None)","id":294,"name":"nanprod","nodeType":"Function","startLoc":170,"text":"def nanprod(a, axis=None, dtype=None, out=None, min_count=None):\n a, mask = _replace_nan(a, 1)\n result = _dask_or_eager_func(\"nanprod\")(a, axis=axis, dtype=dtype, out=out)\n if min_count is not None:\n return _maybe_null_out(result, axis, mask, min_count)\n else:\n return result"},{"col":0,"comment":"Merge coordinate variables.\n\n See merge_core below for argument descriptions. This works similarly to\n merge_core, except everything we don't worry about whether variables are\n coordinates or not.\n ","endLoc":447,"header":"def merge_coords(\n objects: Iterable[\"CoercibleMapping\"],\n compat: str = \"minimal\",\n join: str = \"outer\",\n priority_arg: Optional[int] = None,\n indexes: Optional[Mapping[Hashable, pd.Index]] = None,\n fill_value: object = dtypes.NA,\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]","id":295,"name":"merge_coords","nodeType":"Function","startLoc":424,"text":"def merge_coords(\n objects: Iterable[\"CoercibleMapping\"],\n compat: str = \"minimal\",\n join: str = \"outer\",\n priority_arg: Optional[int] = None,\n indexes: Optional[Mapping[Hashable, pd.Index]] = None,\n fill_value: object = dtypes.NA,\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Merge coordinate variables.\n\n See merge_core below for argument descriptions. This works similarly to\n merge_core, except everything we don't worry about whether variables are\n coordinates or not.\n \"\"\"\n _assert_compat_valid(compat)\n coerced = coerce_pandas_values(objects)\n aligned = deep_align(\n coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value\n )\n collected = collect_variables_and_indexes(aligned)\n prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)\n variables, out_indexes = merge_collected(collected, prioritized, compat=compat)\n assert_unique_multiindex_level_names(variables)\n return variables, out_indexes"},{"id":296,"name":".github","nodeType":"Package"},{"id":297,"name":"PULL_REQUEST_TEMPLATE.md","nodeType":"TextFile","path":".github","text":"\n\n - [ ] Closes #xxxx\n - [ ] Tests added\n - [ ] Passes `black . && mypy . && flake8`\n - [ ] Fully documented, including `whats-new.rst` for all changes and `api.rst` for new API\n"},{"col":4,"comment":"null","endLoc":37,"header":"def _getitem(self, key)","id":298,"name":"_getitem","nodeType":"Function","startLoc":31,"text":"def _getitem(self, key):\n # h5py requires using lists for fancy indexing:\n # https://github.com/h5py/h5py/issues/992\n key = tuple(list(k) if isinstance(k, np.ndarray) else k for k in key)\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n return array[key]"},{"id":299,"name":"combining.rst","nodeType":"TextFile","path":"doc","text":".. _combining data:\n\nCombining data\n--------------\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\n* For combining datasets or data arrays along a single dimension, see concatenate_.\n* For combining datasets with different variables, see merge_.\n* For combining datasets or data arrays with different indexes or missing values, see combine_.\n* For combining datasets or data arrays along multiple dimensions see combining.multi_.\n\n.. _concatenate:\n\nConcatenate\n~~~~~~~~~~~\n\nTo combine arrays along existing or new dimension into a larger array, you\ncan use :py:func:`~xarray.concat`. ``concat`` takes an iterable of ``DataArray``\nor ``Dataset`` objects, as well as a dimension name, and concatenates along\nthat dimension:\n\n.. ipython:: python\n\n arr = xr.DataArray(np.random.randn(2, 3),\n [('x', ['a', 'b']), ('y', [10, 20, 30])])\n arr[:, :1]\n # this resembles how you would use np.concatenate\n xr.concat([arr[:, :1], arr[:, 1:]], dim='y')\n\nIn addition to combining along an existing dimension, ``concat`` can create a\nnew dimension by stacking lower dimensional arrays together:\n\n.. ipython:: python\n\n arr[0]\n # to combine these 1d arrays into a 2d array in numpy, you would use np.array\n xr.concat([arr[0], arr[1]], 'x')\n\nIf the second argument to ``concat`` is a new dimension name, the arrays will\nbe concatenated along that new dimension, which is always inserted as the first\ndimension:\n\n.. ipython:: python\n\n xr.concat([arr[0], arr[1]], 'new_dim')\n\nThe second argument to ``concat`` can also be an :py:class:`~pandas.Index` or\n:py:class:`~xarray.DataArray` object as well as a string, in which case it is\nused to label the values along the new dimension:\n\n.. ipython:: python\n\n xr.concat([arr[0], arr[1]], pd.Index([-90, -100], name='new_dim'))\n\nOf course, ``concat`` also works on ``Dataset`` objects:\n\n.. ipython:: python\n\n ds = arr.to_dataset(name='foo')\n xr.concat([ds.sel(x='a'), ds.sel(x='b')], 'x')\n\n:py:func:`~xarray.concat` has a number of options which provide deeper control\nover which variables are concatenated and how it handles conflicting variables\nbetween datasets. With the default parameters, xarray will load some coordinate\nvariables into memory to compare them between datasets. This may be prohibitively\nexpensive if you are manipulating your dataset lazily using :ref:`dask`.\n\n.. _merge:\n\nMerge\n~~~~~\n\nTo combine variables and coordinates between multiple ``DataArray`` and/or\n``Dataset`` objects, use :py:func:`~xarray.merge`. It can merge a list of\n``Dataset``, ``DataArray`` or dictionaries of objects convertible to\n``DataArray`` objects:\n\n.. ipython:: python\n\n xr.merge([ds, ds.rename({'foo': 'bar'})])\n xr.merge([xr.DataArray(n, name='var%d' % n) for n in range(5)])\n\nIf you merge another dataset (or a dictionary including data array objects), by\ndefault the resulting dataset will be aligned on the **union** of all index\ncoordinates:\n\n.. ipython:: python\n\n other = xr.Dataset({'bar': ('x', [1, 2, 3, 4]), 'x': list('abcd')})\n xr.merge([ds, other])\n\nThis ensures that ``merge`` is non-destructive. ``xarray.MergeError`` is raised\nif you attempt to merge two variables with the same name but different values:\n\n.. ipython::\n\n @verbatim\n In [1]: xr.merge([ds, ds + 1])\n MergeError: conflicting values for variable 'foo' on objects to be combined:\n first value: \n array([[ 0.4691123 , -0.28286334, -1.5090585 ],\n [-1.13563237, 1.21211203, -0.17321465]])\n second value: \n array([[ 1.4691123 , 0.71713666, -0.5090585 ],\n [-0.13563237, 2.21211203, 0.82678535]])\n\nThe same non-destructive merging between ``DataArray`` index coordinates is\nused in the :py:class:`~xarray.Dataset` constructor:\n\n.. ipython:: python\n\n xr.Dataset({'a': arr[:-1], 'b': arr[1:]})\n\n.. _combine:\n\nCombine\n~~~~~~~\n\nThe instance method :py:meth:`~xarray.DataArray.combine_first` combines two\ndatasets/data arrays and defaults to non-null values in the calling object,\nusing values from the called object to fill holes. The resulting coordinates\nare the union of coordinate labels. Vacant cells as a result of the outer-join\nare filled with ``NaN``. For example:\n\n.. ipython:: python\n\n ar0 = xr.DataArray([[0, 0], [0, 0]], [('x', ['a', 'b']), ('y', [-1, 0])])\n ar1 = xr.DataArray([[1, 1], [1, 1]], [('x', ['b', 'c']), ('y', [0, 1])])\n ar0.combine_first(ar1)\n ar1.combine_first(ar0)\n\nFor datasets, ``ds0.combine_first(ds1)`` works similarly to\n``xr.merge([ds0, ds1])``, except that ``xr.merge`` raises ``MergeError`` when\nthere are conflicting values in variables to be merged, whereas\n``.combine_first`` defaults to the calling object's values.\n\n.. _update:\n\nUpdate\n~~~~~~\n\nIn contrast to ``merge``, :py:meth:`~xarray.Dataset.update` modifies a dataset\nin-place without checking for conflicts, and will overwrite any existing\nvariables with new values:\n\n.. ipython:: python\n\n ds.update({'space': ('space', [10.2, 9.4, 3.9])})\n\nHowever, dimensions are still required to be consistent between different\nDataset variables, so you cannot change the size of a dimension unless you\nreplace all dataset variables that use it.\n\n``update`` also performs automatic alignment if necessary. Unlike ``merge``, it\nmaintains the alignment of the original array instead of merging indexes:\n\n.. ipython:: python\n\n ds.update(other)\n\nThe exact same alignment logic when setting a variable with ``__setitem__``\nsyntax:\n\n.. ipython:: python\n\n ds['baz'] = xr.DataArray([9, 9, 9, 9, 9], coords=[('x', list('abcde'))])\n ds.baz\n\nEquals and identical\n~~~~~~~~~~~~~~~~~~~~\n\nxarray objects can be compared by using the :py:meth:`~xarray.Dataset.equals`,\n:py:meth:`~xarray.Dataset.identical` and\n:py:meth:`~xarray.Dataset.broadcast_equals` methods. These methods are used by\nthe optional ``compat`` argument on ``concat`` and ``merge``.\n\n:py:attr:`~xarray.Dataset.equals` checks dimension names, indexes and array\nvalues:\n\n.. ipython:: python\n\n arr.equals(arr.copy())\n\n:py:attr:`~xarray.Dataset.identical` also checks attributes, and the name of each\nobject:\n\n.. ipython:: python\n\n arr.identical(arr.rename('bar'))\n\n:py:attr:`~xarray.Dataset.broadcast_equals` does a more relaxed form of equality\ncheck that allows variables to have different dimensions, as long as values\nare constant along those new dimensions:\n\n.. ipython:: python\n\n left = xr.Dataset(coords={'x': 0})\n right = xr.Dataset({'x': [0, 0, 0]})\n left.broadcast_equals(right)\n\nLike pandas objects, two xarray objects are still equal or identical if they have\nmissing values marked by ``NaN`` in the same locations.\n\nIn contrast, the ``==`` operation performs element-wise comparison (like\nnumpy):\n\n.. ipython:: python\n\n arr == arr.copy()\n\nNote that ``NaN`` does not compare equal to ``NaN`` in element-wise comparison;\nyou may need to deal with missing values explicitly.\n\n.. _combining.no_conflicts:\n\nMerging with 'no_conflicts'\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThe ``compat`` argument ``'no_conflicts'`` is only available when\ncombining xarray objects with ``merge``. In addition to the above comparison\nmethods it allows the merging of xarray objects with locations where *either*\nhave ``NaN`` values. This can be used to combine data with overlapping\ncoordinates as long as any non-missing values agree or are disjoint:\n\n.. ipython:: python\n\n ds1 = xr.Dataset({'a': ('x', [10, 20, 30, np.nan])}, {'x': [1, 2, 3, 4]})\n ds2 = xr.Dataset({'a': ('x', [np.nan, 30, 40, 50])}, {'x': [2, 3, 4, 5]})\n xr.merge([ds1, ds2], compat='no_conflicts')\n\nNote that due to the underlying representation of missing values as floating\npoint numbers (``NaN``), variable data type is not always preserved when merging\nin this manner.\n\n.. _combining.multi:\n\nCombining along multiple dimensions\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. note::\n\n There are currently three combining functions with similar names:\n :py:func:`~xarray.auto_combine`, :py:func:`~xarray.combine_by_coords`, and\n :py:func:`~xarray.combine_nested`. This is because\n ``auto_combine`` is in the process of being deprecated in favour of the other\n two functions, which are more general. If your code currently relies on\n ``auto_combine``, then you will be able to get similar functionality by using\n ``combine_nested``.\n\nFor combining many objects along multiple dimensions xarray provides\n:py:func:`~xarray.combine_nested`` and :py:func:`~xarray.combine_by_coords`. These\nfunctions use a combination of ``concat`` and ``merge`` across different\nvariables to combine many objects into one.\n\n:py:func:`~xarray.combine_nested`` requires specifying the order in which the\nobjects should be combined, while :py:func:`~xarray.combine_by_coords` attempts to\ninfer this ordering automatically from the coordinates in the data.\n\n:py:func:`~xarray.combine_nested` is useful when you know the spatial\nrelationship between each object in advance. The datasets must be provided in\nthe form of a nested list, which specifies their relative position and\nordering. A common task is collecting data from a parallelized simulation where\neach processor wrote out data to a separate file. A domain which was decomposed\ninto 4 parts, 2 each along both the x and y axes, requires organising the\ndatasets into a doubly-nested list, e.g:\n\n.. ipython:: python\n\n arr = xr.DataArray(name='temperature', data=np.random.randint(5, size=(2, 2)), dims=['x', 'y'])\n arr\n ds_grid = [[arr, arr], [arr, arr]]\n xr.combine_nested(ds_grid, concat_dim=['x', 'y'])\n\n:py:func:`~xarray.combine_nested` can also be used to explicitly merge datasets\nwith different variables. For example if we have 4 datasets, which are divided\nalong two times, and contain two different variables, we can pass ``None``\nto ``'concat_dim'`` to specify the dimension of the nested list over which\nwe wish to use ``merge`` instead of ``concat``:\n\n.. ipython:: python\n\n temp = xr.DataArray(name='temperature', data=np.random.randn(2), dims=['t'])\n precip = xr.DataArray(name='precipitation', data=np.random.randn(2), dims=['t'])\n ds_grid = [[temp, precip], [temp, precip]]\n xr.combine_nested(ds_grid, concat_dim=['t', None])\n\n:py:func:`~xarray.combine_by_coords` is for combining objects which have dimension\ncoordinates which specify their relationship to and order relative to one\nanother, for example a linearly-increasing 'time' dimension coordinate.\n\nHere we combine two datasets using their common dimension coordinates. Notice\nthey are concatenated in order based on the values in their dimension\ncoordinates, not on their position in the list passed to ``combine_by_coords``.\n\n.. ipython:: python\n :okwarning:\n\n x1 = xr.DataArray(name='foo', data=np.random.randn(3), coords=[('x', [0, 1, 2])])\n x2 = xr.DataArray(name='foo', data=np.random.randn(3), coords=[('x', [3, 4, 5])])\n xr.combine_by_coords([x2, x1])\n\nThese functions can be used by :py:func:`~xarray.open_mfdataset` to open many\nfiles as one dataset. The particular function used is specified by setting the\nargument ``'combine'`` to ``'by_coords'`` or ``'nested'``. This is useful for\nsituations where your data is split across many files in multiple locations,\nwhich have some known relationship between one another."},{"id":300,"name":"doc/gallery","nodeType":"Package"},{"fileName":"plot_rasterio.py","filePath":"doc/gallery","id":301,"nodeType":"File","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. _recipes.rasterio:\n\n=================================\nParsing rasterio's geocoordinates\n=================================\n\n\nConverting a projection's cartesian coordinates into 2D longitudes and\nlatitudes.\n\nThese new coordinates might be handy for plotting and indexing, but it should\nbe kept in mind that a grid which is regular in projection coordinates will\nlikely be irregular in lon/lat. It is often recommended to work in the data's\noriginal map projection (see :ref:`recipes.rasterio_rgb`).\n\"\"\"\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom rasterio.warp import transform\n\nimport xarray as xr\n\n# Read the data\nurl = \"https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif\"\nda = xr.open_rasterio(url)\n\n# Compute the lon/lat coordinates with rasterio.warp.transform\nny, nx = len(da[\"y\"]), len(da[\"x\"])\nx, y = np.meshgrid(da[\"x\"], da[\"y\"])\n\n# Rasterio works with 1D arrays\nlon, lat = transform(da.crs, {\"init\": \"EPSG:4326\"}, x.flatten(), y.flatten())\nlon = np.asarray(lon).reshape((ny, nx))\nlat = np.asarray(lat).reshape((ny, nx))\nda.coords[\"lon\"] = ((\"y\", \"x\"), lon)\nda.coords[\"lat\"] = ((\"y\", \"x\"), lat)\n\n# Compute a greyscale out of the rgb image\ngreyscale = da.mean(dim=\"band\")\n\n# Plot on a map\nax = plt.subplot(projection=ccrs.PlateCarree())\ngreyscale.plot(\n ax=ax,\n x=\"lon\",\n y=\"lat\",\n transform=ccrs.PlateCarree(),\n cmap=\"Greys_r\",\n add_colorbar=False,\n)\nax.coastlines(\"10m\", color=\"r\")\nplt.show()\n"},{"col":0,"comment":"null","endLoc":147,"header":"def _assert_compat_valid(compat)","id":302,"name":"_assert_compat_valid","nodeType":"Function","startLoc":145,"text":"def _assert_compat_valid(compat):\n if compat not in _VALID_COMPAT:\n raise ValueError(\"compat=%r invalid: must be %s\" % (compat, set(_VALID_COMPAT)))"},{"col":0,"comment":"Convert pandas values found in a list of labeled objects.\n\n Parameters\n ----------\n objects : list of Dataset or mappings\n The mappings may contain any sort of objects coercible to\n xarray.Variables as keys, including pandas objects.\n\n Returns\n -------\n List of Dataset or dictionary objects. Any inputs or values in the inputs\n that were pandas objects have been converted into native xarray objects.\n ","endLoc":388,"header":"def coerce_pandas_values(objects: Iterable[\"CoercibleMapping\"]) -> List[\"DatasetLike\"]","id":303,"name":"coerce_pandas_values","nodeType":"Function","startLoc":358,"text":"def coerce_pandas_values(objects: Iterable[\"CoercibleMapping\"]) -> List[\"DatasetLike\"]:\n \"\"\"Convert pandas values found in a list of labeled objects.\n\n Parameters\n ----------\n objects : list of Dataset or mappings\n The mappings may contain any sort of objects coercible to\n xarray.Variables as keys, including pandas objects.\n\n Returns\n -------\n List of Dataset or dictionary objects. Any inputs or values in the inputs\n that were pandas objects have been converted into native xarray objects.\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n out = []\n for obj in objects:\n if isinstance(obj, Dataset):\n variables: \"DatasetLike\" = obj\n else:\n variables = {}\n if isinstance(obj, PANDAS_TYPES):\n obj = dict(obj.iteritems())\n for k, v in obj.items():\n if isinstance(v, PANDAS_TYPES):\n v = DataArray(v)\n variables[k] = v\n out.append(variables)\n return out"},{"col":0,"comment":"\n Decompose outer indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index the loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer: OuterIndexer or BasicIndexer\n indexing_support: One of the entries of IndexingSupport\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports basic indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))\n >>> array = array[backend_indexer] # load subslice of the array\n >>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])\n >>> array[np_indexer] # outer indexing for on-memory np.ndarray.\n ","endLoc":1066,"header":"def _decompose_outer_indexer(\n indexer: Union[BasicIndexer, OuterIndexer],\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]","id":304,"name":"_decompose_outer_indexer","nodeType":"Function","startLoc":947,"text":"def _decompose_outer_indexer(\n indexer: Union[BasicIndexer, OuterIndexer],\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n \"\"\"\n Decompose outer indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index the loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer: OuterIndexer or BasicIndexer\n indexing_support: One of the entries of IndexingSupport\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports basic indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))\n >>> array = array[backend_indexer] # load subslice of the array\n >>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])\n >>> array[np_indexer] # outer indexing for on-memory np.ndarray.\n \"\"\"\n if indexing_support == IndexingSupport.VECTORIZED:\n return indexer, BasicIndexer(())\n assert isinstance(indexer, (OuterIndexer, BasicIndexer))\n\n backend_indexer = []\n np_indexer = []\n # make indexer positive\n pos_indexer = []\n for k, s in zip(indexer.tuple, shape):\n if isinstance(k, np.ndarray):\n pos_indexer.append(np.where(k < 0, k + s, k))\n elif isinstance(k, integer_types) and k < 0:\n pos_indexer.append(k + s)\n else:\n pos_indexer.append(k)\n indexer_elems = pos_indexer\n\n if indexing_support is IndexingSupport.OUTER_1VECTOR:\n # some backends such as h5py supports only 1 vector in indexers\n # We choose the most efficient axis\n gains = [\n (np.max(k) - np.min(k) + 1.0) / len(np.unique(k))\n if isinstance(k, np.ndarray)\n else 0\n for k in indexer_elems\n ]\n array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None\n\n for i, (k, s) in enumerate(zip(indexer_elems, shape)):\n if isinstance(k, np.ndarray) and i != array_index:\n # np.ndarray key is converted to slice that covers the entire\n # entries of this key.\n backend_indexer.append(slice(np.min(k), np.max(k) + 1))\n np_indexer.append(k - np.min(k))\n elif isinstance(k, np.ndarray):\n # Remove duplicates and sort them in the increasing order\n pkey, ekey = np.unique(k, return_inverse=True)\n backend_indexer.append(pkey)\n np_indexer.append(ekey)\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n else: # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n\n return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n if indexing_support == IndexingSupport.OUTER:\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, slice):\n # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():\n backend_indexer.append(k)\n np_indexer.append(slice(None))\n else:\n # Remove duplicates and sort them in the increasing order\n oind, vind = np.unique(k, return_inverse=True)\n backend_indexer.append(oind)\n np_indexer.append(vind.reshape(*k.shape))\n\n return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n # basic indexer\n assert indexing_support == IndexingSupport.BASIC\n\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, np.ndarray):\n # np.ndarray key is converted to slice that covers the entire\n # entries of this key.\n backend_indexer.append(slice(np.min(k), np.max(k) + 1))\n np_indexer.append(k - np.min(k))\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n else: # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n\n return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))"},{"className":"H5NetCDFStore","col":0,"comment":"Store for reading and writing data via h5netcdf\n ","endLoc":264,"id":305,"nodeType":"Class","startLoc":68,"text":"class H5NetCDFStore(WritableCFDataStore):\n \"\"\"Store for reading and writing data via h5netcdf\n \"\"\"\n\n def __init__(\n self,\n filename,\n mode=\"r\",\n format=None,\n group=None,\n lock=None,\n autoclose=False,\n invalid_netcdf=None,\n ):\n import h5netcdf\n\n if format not in [None, \"NETCDF4\"]:\n raise ValueError(\"invalid format for h5netcdf backend\")\n\n kwargs = {\"invalid_netcdf\": invalid_netcdf}\n\n self._manager = CachingFileManager(\n h5netcdf.File, filename, mode=mode, kwargs=kwargs\n )\n\n if lock is None:\n if mode == \"r\":\n lock = HDF5_LOCK\n else:\n lock = combine_locks([HDF5_LOCK, get_write_lock(filename)])\n\n self._group = group\n self.format = format\n self._filename = filename\n self._mode = mode\n self.lock = ensure_lock(lock)\n self.autoclose = autoclose\n\n def _acquire(self, needs_lock=True):\n with self._manager.acquire_context(needs_lock) as root:\n ds = _nc4_require_group(\n root, self._group, self._mode, create_group=_h5netcdf_create_group\n )\n return ds\n\n @property\n def ds(self):\n return self._acquire()\n\n def open_store_variable(self, name, var):\n import h5py\n\n dimensions = var.dimensions\n data = indexing.LazilyOuterIndexedArray(H5NetCDFArrayWrapper(name, self))\n attrs = _read_attributes(var)\n\n # netCDF4 specific encoding\n encoding = {\n \"chunksizes\": var.chunks,\n \"fletcher32\": var.fletcher32,\n \"shuffle\": var.shuffle,\n }\n # Convert h5py-style compression options to NetCDF4-Python\n # style, if possible\n if var.compression == \"gzip\":\n encoding[\"zlib\"] = True\n encoding[\"complevel\"] = var.compression_opts\n elif var.compression is not None:\n encoding[\"compression\"] = var.compression\n encoding[\"compression_opts\"] = var.compression_opts\n\n # save source so __repr__ can detect if it's local or not\n encoding[\"source\"] = self._filename\n encoding[\"original_shape\"] = var.shape\n\n vlen_dtype = h5py.check_dtype(vlen=var.dtype)\n if vlen_dtype is str:\n encoding[\"dtype\"] = str\n elif vlen_dtype is not None: # pragma: no cover\n # xarray doesn't support writing arbitrary vlen dtypes yet.\n pass\n else:\n encoding[\"dtype\"] = var.dtype\n\n return Variable(dimensions, data, attrs, encoding)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return FrozenDict(_read_attributes(self.ds))\n\n def get_dimensions(self):\n return self.ds.dimensions\n\n def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v is None\n }\n return encoding\n\n def set_dimension(self, name, length, is_unlimited=False):\n if is_unlimited:\n self.ds.dimensions[name] = None\n self.ds.resize_dimension(name, length)\n else:\n self.ds.dimensions[name] = length\n\n def set_attribute(self, key, value):\n self.ds.attrs[key] = value\n\n def encode_variable(self, variable):\n return _encode_nc4_variable(variable)\n\n def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n import h5py\n\n attrs = variable.attrs.copy()\n dtype = _get_datatype(variable, raise_on_invalid_encoding=check_encoding)\n\n fillvalue = attrs.pop(\"_FillValue\", None)\n if dtype is str and fillvalue is not None:\n raise NotImplementedError(\n \"h5netcdf does not yet support setting a fill value for \"\n \"variable-length strings \"\n \"(https://github.com/shoyer/h5netcdf/issues/37). \"\n \"Either remove '_FillValue' from encoding on variable %r \"\n \"or set {'dtype': 'S1'} in encoding to use the fixed width \"\n \"NC_CHAR type.\" % name\n )\n\n if dtype is str:\n dtype = h5py.special_dtype(vlen=str)\n\n encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding)\n kwargs = {}\n\n # Convert from NetCDF4-Python style compression settings to h5py style\n # If both styles are used together, h5py takes precedence\n # If set_encoding=True, raise ValueError in case of mismatch\n if encoding.pop(\"zlib\", False):\n if check_encoding and encoding.get(\"compression\") not in (None, \"gzip\"):\n raise ValueError(\"'zlib' and 'compression' encodings mismatch\")\n encoding.setdefault(\"compression\", \"gzip\")\n\n if (\n check_encoding\n and \"complevel\" in encoding\n and \"compression_opts\" in encoding\n and encoding[\"complevel\"] != encoding[\"compression_opts\"]\n ):\n raise ValueError(\"'complevel' and 'compression_opts' encodings \" \"mismatch\")\n complevel = encoding.pop(\"complevel\", 0)\n if complevel != 0:\n encoding.setdefault(\"compression_opts\", complevel)\n\n encoding[\"chunks\"] = encoding.pop(\"chunksizes\", None)\n\n # Do not apply compression, filters or chunking to scalars.\n if variable.shape:\n for key in [\n \"compression\",\n \"compression_opts\",\n \"shuffle\",\n \"chunks\",\n \"fletcher32\",\n ]:\n if key in encoding:\n kwargs[key] = encoding[key]\n if name not in self.ds:\n nc4_var = self.ds.create_variable(\n name,\n dtype=dtype,\n dimensions=variable.dims,\n fillvalue=fillvalue,\n **kwargs\n )\n else:\n nc4_var = self.ds[name]\n\n for k, v in attrs.items():\n nc4_var.attrs[k] = v\n\n target = H5NetCDFArrayWrapper(name, self)\n\n return target, variable.data\n\n def sync(self):\n self.ds.sync()\n\n def close(self, **kwargs):\n self._manager.close(**kwargs)"},{"col":4,"comment":"null","endLoc":104,"header":"def __init__(\n self,\n filename,\n mode=\"r\",\n format=None,\n group=None,\n lock=None,\n autoclose=False,\n invalid_netcdf=None,\n )","id":306,"name":"__init__","nodeType":"Function","startLoc":72,"text":"def __init__(\n self,\n filename,\n mode=\"r\",\n format=None,\n group=None,\n lock=None,\n autoclose=False,\n invalid_netcdf=None,\n ):\n import h5netcdf\n\n if format not in [None, \"NETCDF4\"]:\n raise ValueError(\"invalid format for h5netcdf backend\")\n\n kwargs = {\"invalid_netcdf\": invalid_netcdf}\n\n self._manager = CachingFileManager(\n h5netcdf.File, filename, mode=mode, kwargs=kwargs\n )\n\n if lock is None:\n if mode == \"r\":\n lock = HDF5_LOCK\n else:\n lock = combine_locks([HDF5_LOCK, get_write_lock(filename)])\n\n self._group = group\n self.format = format\n self._filename = filename\n self._mode = mode\n self.lock = ensure_lock(lock)\n self.autoclose = autoclose"},{"col":0,"comment":"null","endLoc":182,"header":"def nancumsum(a, axis=None, dtype=None, out=None)","id":307,"name":"nancumsum","nodeType":"Function","startLoc":179,"text":"def nancumsum(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumsum\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )"},{"attributeType":"null","col":0,"comment":"null","endLoc":46,"id":308,"name":"_VALID_COMPAT","nodeType":"Attribute","startLoc":46,"text":"_VALID_COMPAT"},{"attributeType":"null","col":22,"comment":"null","endLoc":19,"id":309,"name":"ccrs","nodeType":"Attribute","startLoc":19,"text":"ccrs"},{"col":0,"comment":"Return the unique variable from a list of variables or raise MergeError.\n\n Parameters\n ----------\n name : hashable\n Name for this variable.\n variables : list of xarray.Variable\n List of Variable objects, all of which go by the same name in different\n inputs.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n Type of equality check to use.\n equals: None or bool,\n corresponding to result of compat test\n\n Returns\n -------\n Variable to use in the result.\n\n Raises\n ------\n MergeError: if any of the variables are not equal.\n ","endLoc":142,"header":"def unique_variable(\n name: Hashable,\n variables: List[Variable],\n compat: str = \"broadcast_equals\",\n equals: bool = None,\n) -> Variable","id":310,"name":"unique_variable","nodeType":"Function","startLoc":80,"text":"def unique_variable(\n name: Hashable,\n variables: List[Variable],\n compat: str = \"broadcast_equals\",\n equals: bool = None,\n) -> Variable:\n \"\"\"Return the unique variable from a list of variables or raise MergeError.\n\n Parameters\n ----------\n name : hashable\n Name for this variable.\n variables : list of xarray.Variable\n List of Variable objects, all of which go by the same name in different\n inputs.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n Type of equality check to use.\n equals: None or bool,\n corresponding to result of compat test\n\n Returns\n -------\n Variable to use in the result.\n\n Raises\n ------\n MergeError: if any of the variables are not equal.\n \"\"\"\n out = variables[0]\n\n if len(variables) == 1 or compat == \"override\":\n return out\n\n combine_method = None\n\n if compat == \"minimal\":\n compat = \"broadcast_equals\"\n\n if compat == \"broadcast_equals\":\n dim_lengths = broadcast_dimension_size(variables)\n out = out.set_dims(dim_lengths)\n\n if compat == \"no_conflicts\":\n combine_method = \"fillna\"\n\n if equals is None:\n out = out.compute()\n for var in variables[1:]:\n equals = getattr(out, compat)(var)\n if not equals:\n break\n\n if not equals:\n raise MergeError(\n \"conflicting values for variable {!r} on objects to be combined. \"\n \"You can skip this check by specifying compat='override'.\".format(name)\n )\n\n if combine_method:\n for var in variables[1:]:\n out = getattr(out, combine_method)(var)\n\n return out"},{"col":4,"comment":"null","endLoc":111,"header":"def _acquire(self, needs_lock=True)","id":311,"name":"_acquire","nodeType":"Function","startLoc":106,"text":"def _acquire(self, needs_lock=True):\n with self._manager.acquire_context(needs_lock) as root:\n ds = _nc4_require_group(\n root, self._group, self._mode, create_group=_h5netcdf_create_group\n )\n return ds"},{"col":0,"comment":"Extract dimension sizes from a dictionary of variables.\n\n Raises ValueError if any dimensions have different sizes.\n ","endLoc":69,"header":"def broadcast_dimension_size(variables: List[Variable],) -> Dict[Hashable, int]","id":312,"name":"broadcast_dimension_size","nodeType":"Function","startLoc":58,"text":"def broadcast_dimension_size(variables: List[Variable],) -> Dict[Hashable, int]:\n \"\"\"Extract dimension sizes from a dictionary of variables.\n\n Raises ValueError if any dimensions have different sizes.\n \"\"\"\n dims: Dict[Hashable, int] = {}\n for var in variables:\n for dim, size in zip(var.dims, var.shape):\n if dim in dims and size != dims[dim]:\n raise ValueError(\"index %r not aligned\" % dim)\n dims[dim] = size\n return dims"},{"col":0,"comment":"null","endLoc":188,"header":"def nancumprod(a, axis=None, dtype=None, out=None)","id":313,"name":"nancumprod","nodeType":"Function","startLoc":185,"text":"def nancumprod(a, axis=None, dtype=None, out=None):\n return _dask_or_eager_func(\"nancumprod\", eager_module=nputils)(\n a, axis=axis, dtype=dtype\n )"},{"attributeType":"null","col":28,"comment":"null","endLoc":20,"id":314,"name":"plt","nodeType":"Attribute","startLoc":20,"text":"plt"},{"attributeType":"null","col":16,"comment":"null","endLoc":21,"id":315,"name":"np","nodeType":"Attribute","startLoc":21,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":24,"id":316,"name":"xr","nodeType":"Attribute","startLoc":24,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":27,"id":317,"name":"url","nodeType":"Attribute","startLoc":27,"text":"url"},{"attributeType":"DataArray","col":0,"comment":"null","endLoc":28,"id":318,"name":"da","nodeType":"Attribute","startLoc":28,"text":"da"},{"className":"IndexVariable","col":0,"comment":"Wrapper for accommodating a pandas.Index in an xarray.Variable.\n\n IndexVariable preserve loaded values in the form of a pandas.Index instead\n of a NumPy array. Hence, their values are immutable and must always be one-\n dimensional.\n\n They also have a name property, which is the name of their sole dimension\n unless another name is given.\n ","endLoc":2127,"id":319,"nodeType":"Class","startLoc":1942,"text":"class IndexVariable(Variable):\n \"\"\"Wrapper for accommodating a pandas.Index in an xarray.Variable.\n\n IndexVariable preserve loaded values in the form of a pandas.Index instead\n of a NumPy array. Hence, their values are immutable and must always be one-\n dimensional.\n\n They also have a name property, which is the name of their sole dimension\n unless another name is given.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):\n super().__init__(dims, data, attrs, encoding, fastpath)\n if self.ndim != 1:\n raise ValueError(\"%s objects must be 1-dimensional\" % type(self).__name__)\n\n # Unlike in Variable, always eagerly load values into memory\n if not isinstance(self._data, PandasIndexAdapter):\n self._data = PandasIndexAdapter(self._data)\n\n def load(self):\n # data is already loaded into memory for IndexVariable\n return self\n\n # https://github.com/python/mypy/issues/1465\n @Variable.data.setter # type: ignore\n def data(self, data):\n Variable.data.fset(self, data)\n if not isinstance(self._data, PandasIndexAdapter):\n self._data = PandasIndexAdapter(self._data)\n\n def chunk(self, chunks=None, name=None, lock=False):\n # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()\n return self.copy(deep=False)\n\n def _finalize_indexing_result(self, dims, data):\n if getattr(data, \"ndim\", 0) != 1:\n # returns Variable rather than IndexVariable if multi-dimensional\n return Variable(dims, data, self._attrs, self._encoding)\n else:\n return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)\n\n def __setitem__(self, key, value):\n raise TypeError(\"%s values cannot be modified\" % type(self).__name__)\n\n @classmethod\n def concat(cls, variables, dim=\"concat_dim\", positions=None, shortcut=False):\n \"\"\"Specialized version of Variable.concat for IndexVariable objects.\n\n This exists because we want to avoid converting Index objects to NumPy\n arrays, if possible.\n \"\"\"\n if not isinstance(dim, str):\n dim, = dim.dims\n\n variables = list(variables)\n first_var = variables[0]\n\n if any(not isinstance(v, cls) for v in variables):\n raise TypeError(\n \"IndexVariable.concat requires that all input \"\n \"variables be IndexVariable objects\"\n )\n\n indexes = [v._data.array for v in variables]\n\n if not indexes:\n data = []\n else:\n data = indexes[0].append(indexes[1:])\n\n if positions is not None:\n indices = nputils.inverse_permutation(np.concatenate(positions))\n data = data.take(indices)\n\n attrs = dict(first_var.attrs)\n if not shortcut:\n for var in variables:\n if var.dims != first_var.dims:\n raise ValueError(\"inconsistent dimensions\")\n utils.remove_incompatible_items(attrs, var.attrs)\n\n return cls(first_var.dims, data, attrs)\n\n def copy(self, deep=True, data=None):\n \"\"\"Returns a copy of this object.\n\n `deep` is ignored since data is stored in the form of\n pandas.Index, which is already immutable. Dimensions, attributes\n and encodings are always copied.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Deep is ignored when data is given. Whether the data array is\n loaded into memory and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n\n Returns\n -------\n object : Variable\n New object with dimensions, attributes, encodings, and optionally\n data copied from original.\n \"\"\"\n if data is None:\n data = self._data.copy(deep=deep)\n else:\n data = as_compatible_data(data)\n if self.shape != data.shape:\n raise ValueError(\n \"Data shape {} must match shape of object {}\".format(\n data.shape, self.shape\n )\n )\n return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)\n\n def equals(self, other, equiv=None):\n # if equiv is specified, super up\n if equiv is not None:\n return super().equals(other, equiv)\n\n # otherwise use the native index equals, rather than looking at _data\n other = getattr(other, \"variable\", other)\n try:\n return self.dims == other.dims and self._data_equals(other)\n except (TypeError, AttributeError):\n return False\n\n def _data_equals(self, other):\n return self.to_index().equals(other.to_index())\n\n def to_index_variable(self):\n \"\"\"Return this variable as an xarray.IndexVariable\"\"\"\n return self\n\n to_coord = utils.alias(to_index_variable, \"to_coord\")\n\n def to_index(self):\n \"\"\"Convert this variable to a pandas.Index\"\"\"\n # n.b. creating a new pandas.Index from an old pandas.Index is\n # basically free as pandas.Index objects are immutable\n assert self.ndim == 1\n index = self._data.array\n if isinstance(index, pd.MultiIndex):\n # set default names for multi-index unnamed levels so that\n # we can safely rename dimension / coordinate later\n valid_level_names = [\n name or \"{}_level_{}\".format(self.dims[0], i)\n for i, name in enumerate(index.names)\n ]\n index = index.set_names(valid_level_names)\n else:\n index = index.set_names(self.name)\n return index\n\n @property\n def level_names(self):\n \"\"\"Return MultiIndex level names or None if this IndexVariable has no\n MultiIndex.\n \"\"\"\n index = self.to_index()\n if isinstance(index, pd.MultiIndex):\n return index.names\n else:\n return None\n\n def get_level_variable(self, level):\n \"\"\"Return a new IndexVariable from a given MultiIndex level.\"\"\"\n if self.level_names is None:\n raise ValueError(\"IndexVariable %r has no MultiIndex\" % self.name)\n index = self.to_index()\n return type(self)(self.dims, index.get_level_values(level))\n\n @property\n def name(self):\n return self.dims[0]\n\n @name.setter\n def name(self, value):\n raise AttributeError(\"cannot modify name of IndexVariable in-place\")"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":320,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"attributeType":"null","col":25,"comment":"null","endLoc":8,"id":321,"name":"dask_array","nodeType":"Attribute","startLoc":8,"text":"dask_array"},{"attributeType":"None","col":4,"comment":"null","endLoc":10,"id":322,"name":"dask_array","nodeType":"Attribute","startLoc":10,"text":"dask_array"},{"col":0,"comment":"","endLoc":1,"header":"nanops.py#","id":323,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import dask.array as dask_array\nexcept ImportError:\n dask_array = None"},{"col":4,"comment":"null","endLoc":115,"header":"@property\n def ds(self)","id":324,"name":"ds","nodeType":"Function","startLoc":113,"text":"@property\n def ds(self):\n return self._acquire()"},{"id":325,"name":"Makefile","nodeType":"TextFile","path":"doc","text":"# Makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS =\nSPHINXBUILD = sphinx-build\nSPHINXATUOBUILD = sphinx-autobuild\nPAPER =\nBUILDDIR = _build\n\n# Internal variables.\nPAPEROPT_a4 = -D latex_paper_size=a4\nPAPEROPT_letter = -D latex_paper_size=letter\nALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n# the i18n builder cannot share the environment and doctrees with the others\nI18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .\n\n.PHONY: help\nhelp:\n\t@echo \"Please use \\`make ' where is one of\"\n\t@echo \" html to make standalone HTML files\"\n\t@echo \" livehtml Make standalone HTML files and rebuild the documentation when a change is detected. Also includes a livereload enabled web server\"\n\t@echo \" dirhtml to make HTML files named index.html in directories\"\n\t@echo \" singlehtml to make a single large HTML file\"\n\t@echo \" pickle to make pickle files\"\n\t@echo \" json to make JSON files\"\n\t@echo \" htmlhelp to make HTML files and a HTML help project\"\n\t@echo \" qthelp to make HTML files and a qthelp project\"\n\t@echo \" applehelp to make an Apple Help Book\"\n\t@echo \" devhelp to make HTML files and a Devhelp project\"\n\t@echo \" epub to make an epub\"\n\t@echo \" epub3 to make an epub3\"\n\t@echo \" latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter\"\n\t@echo \" latexpdf to make LaTeX files and run them through pdflatex\"\n\t@echo \" latexpdfja to make LaTeX files and run them through platex/dvipdfmx\"\n\t@echo \" text to make text files\"\n\t@echo \" man to make manual pages\"\n\t@echo \" texinfo to make Texinfo files\"\n\t@echo \" info to make Texinfo files and run them through makeinfo\"\n\t@echo \" gettext to make PO message catalogs\"\n\t@echo \" changes to make an overview of all changed/added/deprecated items\"\n\t@echo \" xml to make Docutils-native XML files\"\n\t@echo \" pseudoxml to make pseudoxml-XML files for display purposes\"\n\t@echo \" linkcheck to check all external links for integrity\"\n\t@echo \" doctest to run all doctests embedded in the documentation (if enabled)\"\n\t@echo \" coverage to run coverage check of the documentation (if enabled)\"\n\t@echo \" dummy to check syntax errors of document sources\"\n\n.PHONY: clean\nclean:\n\trm -rf $(BUILDDIR)/*\n\trm -rf generated/*\n\trm -rf auto_gallery/\n\n.PHONY: html\nhtml:\n\t$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\n.PHONY: livehtml\nlivehtml:\n\t# @echo \"$(SPHINXATUOBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\"\n\t$(SPHINXATUOBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\n.PHONY: dirhtml\ndirhtml:\n\t$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/dirhtml.\"\n\n.PHONY: singlehtml\nsinglehtml:\n\t$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml\n\t@echo\n\t@echo \"Build finished. The HTML page is in $(BUILDDIR)/singlehtml.\"\n\n.PHONY: html-noplot\nhtml-noplot:\n\t$(SPHINXBUILD) -D plot_gallery=0 -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html\n\t@echo\n\t@echo \"Build finished. The HTML pages are in $(BUILDDIR)/html.\"\n\n.PHONY: pickle\npickle:\n\t$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle\n\t@echo\n\t@echo \"Build finished; now you can process the pickle files.\"\n\n.PHONY: json\njson:\n\t$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json\n\t@echo\n\t@echo \"Build finished; now you can process the JSON files.\"\n\n.PHONY: htmlhelp\nhtmlhelp:\n\t$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp\n\t@echo\n\t@echo \"Build finished; now you can run HTML Help Workshop with the\" \\\n\t \".hhp project file in $(BUILDDIR)/htmlhelp.\"\n\n.PHONY: qthelp\nqthelp:\n\t$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp\n\t@echo\n\t@echo \"Build finished; now you can run \"qcollectiongenerator\" with the\" \\\n\t \".qhcp project file in $(BUILDDIR)/qthelp, like this:\"\n\t@echo \"# qcollectiongenerator $(BUILDDIR)/qthelp/xarray.qhcp\"\n\t@echo \"To view the help file:\"\n\t@echo \"# assistant -collectionFile $(BUILDDIR)/qthelp/xarray.qhc\"\n\n.PHONY: applehelp\napplehelp:\n\t$(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp\n\t@echo\n\t@echo \"Build finished. The help book is in $(BUILDDIR)/applehelp.\"\n\t@echo \"N.B. You won't be able to view it unless you put it in\" \\\n\t \"~/Library/Documentation/Help or install it in your application\" \\\n\t \"bundle.\"\n\n.PHONY: devhelp\ndevhelp:\n\t$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp\n\t@echo\n\t@echo \"Build finished.\"\n\t@echo \"To view the help file:\"\n\t@echo \"# mkdir -p $$HOME/.local/share/devhelp/xarray\"\n\t@echo \"# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/xarray\"\n\t@echo \"# devhelp\"\n\n.PHONY: epub\nepub:\n\t$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub\n\t@echo\n\t@echo \"Build finished. The epub file is in $(BUILDDIR)/epub.\"\n\n.PHONY: epub3\nepub3:\n\t$(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3\n\t@echo\n\t@echo \"Build finished. The epub3 file is in $(BUILDDIR)/epub3.\"\n\n.PHONY: latex\nlatex:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo\n\t@echo \"Build finished; the LaTeX files are in $(BUILDDIR)/latex.\"\n\t@echo \"Run \\`make' in that directory to run these through (pdf)latex\" \\\n\t \"(use \\`make latexpdf' here to do that automatically).\"\n\n.PHONY: latexpdf\nlatexpdf:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through pdflatex...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\n.PHONY: latexpdfja\nlatexpdfja:\n\t$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex\n\t@echo \"Running LaTeX files through platex and dvipdfmx...\"\n\t$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja\n\t@echo \"pdflatex finished; the PDF files are in $(BUILDDIR)/latex.\"\n\n.PHONY: text\ntext:\n\t$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text\n\t@echo\n\t@echo \"Build finished. The text files are in $(BUILDDIR)/text.\"\n\n.PHONY: man\nman:\n\t$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man\n\t@echo\n\t@echo \"Build finished. The manual pages are in $(BUILDDIR)/man.\"\n\n.PHONY: texinfo\ntexinfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo\n\t@echo \"Build finished. The Texinfo files are in $(BUILDDIR)/texinfo.\"\n\t@echo \"Run \\`make' in that directory to run these through makeinfo\" \\\n\t \"(use \\`make info' here to do that automatically).\"\n\n.PHONY: info\ninfo:\n\t$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo\n\t@echo \"Running Texinfo files through makeinfo...\"\n\tmake -C $(BUILDDIR)/texinfo info\n\t@echo \"makeinfo finished; the Info files are in $(BUILDDIR)/texinfo.\"\n\n.PHONY: gettext\ngettext:\n\t$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale\n\t@echo\n\t@echo \"Build finished. The message catalogs are in $(BUILDDIR)/locale.\"\n\n.PHONY: changes\nchanges:\n\t$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes\n\t@echo\n\t@echo \"The overview file is in $(BUILDDIR)/changes.\"\n\n.PHONY: linkcheck\nlinkcheck:\n\t$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck\n\t@echo\n\t@echo \"Link check complete; look for any errors in the above output \" \\\n\t \"or in $(BUILDDIR)/linkcheck/output.txt.\"\n\n.PHONY: doctest\ndoctest:\n\t$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest\n\t@echo \"Testing of doctests in the sources finished, look at the \" \\\n\t \"results in $(BUILDDIR)/doctest/output.txt.\"\n\n.PHONY: coverage\ncoverage:\n\t$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage\n\t@echo \"Testing of coverage in the sources finished, look at the \" \\\n\t \"results in $(BUILDDIR)/coverage/python.txt.\"\n\n.PHONY: xml\nxml:\n\t$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml\n\t@echo\n\t@echo \"Build finished. The XML files are in $(BUILDDIR)/xml.\"\n\n.PHONY: pseudoxml\npseudoxml:\n\t$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml\n\t@echo\n\t@echo \"Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml.\"\n\n.PHONY: dummy\ndummy:\n\t$(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy\n\t@echo\n\t@echo \"Build finished. Dummy builder generates no files.\"\n"},{"fileName":"dask_array_ops.py","filePath":"xarray/core","id":326,"nodeType":"File","text":"import numpy as np\n\nfrom . import dtypes, nputils\n\n\ndef dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1):\n \"\"\"Wrapper to apply bottleneck moving window funcs on dask arrays\n \"\"\"\n import dask.array as da\n\n dtype, fill_value = dtypes.maybe_promote(a.dtype)\n a = a.astype(dtype)\n # inputs for overlap\n if axis < 0:\n axis = a.ndim + axis\n depth = {d: 0 for d in range(a.ndim)}\n depth[axis] = (window + 1) // 2\n boundary = {d: fill_value for d in range(a.ndim)}\n # Create overlap array.\n ag = da.overlap.overlap(a, depth=depth, boundary=boundary)\n # apply rolling func\n out = ag.map_blocks(\n moving_func, window, min_count=min_count, axis=axis, dtype=a.dtype\n )\n # trim array\n result = da.overlap.trim_internal(out, depth)\n return result\n\n\ndef rolling_window(a, axis, window, center, fill_value):\n \"\"\"Dask's equivalence to np.utils.rolling_window\n \"\"\"\n import dask.array as da\n\n orig_shape = a.shape\n if axis < 0:\n axis = a.ndim + axis\n depth = {d: 0 for d in range(a.ndim)}\n depth[axis] = int(window / 2)\n # For evenly sized window, we need to crop the first point of each block.\n offset = 1 if window % 2 == 0 else 0\n\n if depth[axis] > min(a.chunks[axis]):\n raise ValueError(\n \"For window size %d, every chunk should be larger than %d, \"\n \"but the smallest chunk size is %d. Rechunk your array\\n\"\n \"with a larger chunk size or a chunk size that\\n\"\n \"more evenly divides the shape of your array.\"\n % (window, depth[axis], min(a.chunks[axis]))\n )\n\n # Although da.overlap pads values to boundaries of the array,\n # the size of the generated array is smaller than what we want\n # if center == False.\n if center:\n start = int(window / 2) # 10 -> 5, 9 -> 4\n end = window - 1 - start\n else:\n start, end = window - 1, 0\n pad_size = max(start, end) + offset - depth[axis]\n drop_size = 0\n # pad_size becomes more than 0 when the overlapped array is smaller than\n # needed. In this case, we need to enlarge the original array by padding\n # before overlapping.\n if pad_size > 0:\n if pad_size < depth[axis]:\n # overlapping requires each chunk larger than depth. If pad_size is\n # smaller than the depth, we enlarge this and truncate it later.\n drop_size = depth[axis] - pad_size\n pad_size = depth[axis]\n shape = list(a.shape)\n shape[axis] = pad_size\n chunks = list(a.chunks)\n chunks[axis] = (pad_size,)\n fill_array = da.full(shape, fill_value, dtype=a.dtype, chunks=chunks)\n a = da.concatenate([fill_array, a], axis=axis)\n\n boundary = {d: fill_value for d in range(a.ndim)}\n\n # create overlap arrays\n ag = da.overlap.overlap(a, depth=depth, boundary=boundary)\n\n # apply rolling func\n def func(x, window, axis=-1):\n x = np.asarray(x)\n rolling = nputils._rolling_window(x, window, axis)\n return rolling[(slice(None),) * axis + (slice(offset, None),)]\n\n chunks = list(a.chunks)\n chunks.append(window)\n out = ag.map_blocks(\n func, dtype=a.dtype, new_axis=a.ndim, chunks=chunks, window=window, axis=axis\n )\n\n # crop boundary.\n index = (slice(None),) * axis + (slice(drop_size, drop_size + orig_shape[axis]),)\n return out[index]\n"},{"col":4,"comment":"null","endLoc":1962,"header":"def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False)","id":327,"name":"__init__","nodeType":"Function","startLoc":1955,"text":"def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):\n super().__init__(dims, data, attrs, encoding, fastpath)\n if self.ndim != 1:\n raise ValueError(\"%s objects must be 1-dimensional\" % type(self).__name__)\n\n # Unlike in Variable, always eagerly load values into memory\n if not isinstance(self._data, PandasIndexAdapter):\n self._data = PandasIndexAdapter(self._data)"},{"col":0,"comment":"Wrapper to apply bottleneck moving window funcs on dask arrays\n ","endLoc":27,"header":"def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1)","id":328,"name":"dask_rolling_wrapper","nodeType":"Function","startLoc":6,"text":"def dask_rolling_wrapper(moving_func, a, window, min_count=None, axis=-1):\n \"\"\"Wrapper to apply bottleneck moving window funcs on dask arrays\n \"\"\"\n import dask.array as da\n\n dtype, fill_value = dtypes.maybe_promote(a.dtype)\n a = a.astype(dtype)\n # inputs for overlap\n if axis < 0:\n axis = a.ndim + axis\n depth = {d: 0 for d in range(a.ndim)}\n depth[axis] = (window + 1) // 2\n boundary = {d: fill_value for d in range(a.ndim)}\n # Create overlap array.\n ag = da.overlap.overlap(a, depth=depth, boundary=boundary)\n # apply rolling func\n out = ag.map_blocks(\n moving_func, window, min_count=min_count, axis=axis, dtype=a.dtype\n )\n # trim array\n result = da.overlap.trim_internal(out, depth)\n return result"},{"col":4,"comment":"null","endLoc":152,"header":"def open_store_variable(self, name, var)","id":329,"name":"open_store_variable","nodeType":"Function","startLoc":117,"text":"def open_store_variable(self, name, var):\n import h5py\n\n dimensions = var.dimensions\n data = indexing.LazilyOuterIndexedArray(H5NetCDFArrayWrapper(name, self))\n attrs = _read_attributes(var)\n\n # netCDF4 specific encoding\n encoding = {\n \"chunksizes\": var.chunks,\n \"fletcher32\": var.fletcher32,\n \"shuffle\": var.shuffle,\n }\n # Convert h5py-style compression options to NetCDF4-Python\n # style, if possible\n if var.compression == \"gzip\":\n encoding[\"zlib\"] = True\n encoding[\"complevel\"] = var.compression_opts\n elif var.compression is not None:\n encoding[\"compression\"] = var.compression\n encoding[\"compression_opts\"] = var.compression_opts\n\n # save source so __repr__ can detect if it's local or not\n encoding[\"source\"] = self._filename\n encoding[\"original_shape\"] = var.shape\n\n vlen_dtype = h5py.check_dtype(vlen=var.dtype)\n if vlen_dtype is str:\n encoding[\"dtype\"] = str\n elif vlen_dtype is not None: # pragma: no cover\n # xarray doesn't support writing arbitrary vlen dtypes yet.\n pass\n else:\n encoding[\"dtype\"] = var.dtype\n\n return Variable(dimensions, data, attrs, encoding)"},{"attributeType":"null","col":0,"comment":"null","endLoc":31,"id":330,"name":"ny","nodeType":"Attribute","startLoc":31,"text":"ny"},{"attributeType":"null","col":4,"comment":"null","endLoc":31,"id":331,"name":"nx","nodeType":"Attribute","startLoc":31,"text":"nx"},{"attributeType":"null","col":0,"comment":"null","endLoc":32,"id":332,"name":"x","nodeType":"Attribute","startLoc":32,"text":"x"},{"attributeType":"null","col":3,"comment":"null","endLoc":32,"id":333,"name":"y","nodeType":"Attribute","startLoc":32,"text":"y"},{"attributeType":"null","col":0,"comment":"null","endLoc":35,"id":334,"name":"lon","nodeType":"Attribute","startLoc":35,"text":"lon"},{"attributeType":"null","col":5,"comment":"null","endLoc":35,"id":335,"name":"lat","nodeType":"Attribute","startLoc":35,"text":"lat"},{"attributeType":"null","col":0,"comment":"null","endLoc":36,"id":336,"name":"lon","nodeType":"Attribute","startLoc":36,"text":"lon"},{"attributeType":"null","col":0,"comment":"null","endLoc":37,"id":337,"name":"lat","nodeType":"Attribute","startLoc":37,"text":"lat"},{"col":0,"comment":"Dask's equivalence to np.utils.rolling_window\n ","endLoc":97,"header":"def rolling_window(a, axis, window, center, fill_value)","id":338,"name":"rolling_window","nodeType":"Function","startLoc":30,"text":"def rolling_window(a, axis, window, center, fill_value):\n \"\"\"Dask's equivalence to np.utils.rolling_window\n \"\"\"\n import dask.array as da\n\n orig_shape = a.shape\n if axis < 0:\n axis = a.ndim + axis\n depth = {d: 0 for d in range(a.ndim)}\n depth[axis] = int(window / 2)\n # For evenly sized window, we need to crop the first point of each block.\n offset = 1 if window % 2 == 0 else 0\n\n if depth[axis] > min(a.chunks[axis]):\n raise ValueError(\n \"For window size %d, every chunk should be larger than %d, \"\n \"but the smallest chunk size is %d. Rechunk your array\\n\"\n \"with a larger chunk size or a chunk size that\\n\"\n \"more evenly divides the shape of your array.\"\n % (window, depth[axis], min(a.chunks[axis]))\n )\n\n # Although da.overlap pads values to boundaries of the array,\n # the size of the generated array is smaller than what we want\n # if center == False.\n if center:\n start = int(window / 2) # 10 -> 5, 9 -> 4\n end = window - 1 - start\n else:\n start, end = window - 1, 0\n pad_size = max(start, end) + offset - depth[axis]\n drop_size = 0\n # pad_size becomes more than 0 when the overlapped array is smaller than\n # needed. In this case, we need to enlarge the original array by padding\n # before overlapping.\n if pad_size > 0:\n if pad_size < depth[axis]:\n # overlapping requires each chunk larger than depth. If pad_size is\n # smaller than the depth, we enlarge this and truncate it later.\n drop_size = depth[axis] - pad_size\n pad_size = depth[axis]\n shape = list(a.shape)\n shape[axis] = pad_size\n chunks = list(a.chunks)\n chunks[axis] = (pad_size,)\n fill_array = da.full(shape, fill_value, dtype=a.dtype, chunks=chunks)\n a = da.concatenate([fill_array, a], axis=axis)\n\n boundary = {d: fill_value for d in range(a.ndim)}\n\n # create overlap arrays\n ag = da.overlap.overlap(a, depth=depth, boundary=boundary)\n\n # apply rolling func\n def func(x, window, axis=-1):\n x = np.asarray(x)\n rolling = nputils._rolling_window(x, window, axis)\n return rolling[(slice(None),) * axis + (slice(offset, None),)]\n\n chunks = list(a.chunks)\n chunks.append(window)\n out = ag.map_blocks(\n func, dtype=a.dtype, new_axis=a.ndim, chunks=chunks, window=window, axis=axis\n )\n\n # crop boundary.\n index = (slice(None),) * axis + (slice(drop_size, drop_size + orig_shape[axis]),)\n return out[index]"},{"attributeType":"null","col":0,"comment":"null","endLoc":42,"id":339,"name":"greyscale","nodeType":"Attribute","startLoc":42,"text":"greyscale"},{"col":4,"comment":"null","endLoc":1966,"header":"def load(self)","id":340,"name":"load","nodeType":"Function","startLoc":1964,"text":"def load(self):\n # data is already loaded into memory for IndexVariable\n return self"},{"col":4,"comment":"null","endLoc":1973,"header":"@Variable.data.setter # type: ignore\n def data(self, data)","id":341,"name":"data","nodeType":"Function","startLoc":1969,"text":"@Variable.data.setter # type: ignore\n def data(self, data):\n Variable.data.fset(self, data)\n if not isinstance(self._data, PandasIndexAdapter):\n self._data = PandasIndexAdapter(self._data)"},{"col":0,"comment":"\n Make an ndarray with a rolling window along axis.\n\n Parameters\n ----------\n a : array_like\n Array to add rolling window to\n axis: int\n axis position along which rolling window will be applied.\n window : int\n Size of rolling window\n\n Returns\n -------\n Array that is a view of the original array with a added dimension\n of size w.\n\n Examples\n --------\n >>> x=np.arange(10).reshape((2,5))\n >>> np.rolling_window(x, 3, axis=-1)\n array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [[5, 6, 7], [6, 7, 8], [7, 8, 9]]])\n\n Calculate rolling mean of last dimension:\n >>> np.mean(np.rolling_window(x, 3, axis=-1), -1)\n array([[ 1., 2., 3.],\n [ 6., 7., 8.]])\n\n This function is taken from https://github.com/numpy/numpy/pull/31\n but slightly modified to accept axis option.\n ","endLoc":202,"header":"def _rolling_window(a, window, axis=-1)","id":342,"name":"_rolling_window","nodeType":"Function","startLoc":156,"text":"def _rolling_window(a, window, axis=-1):\n \"\"\"\n Make an ndarray with a rolling window along axis.\n\n Parameters\n ----------\n a : array_like\n Array to add rolling window to\n axis: int\n axis position along which rolling window will be applied.\n window : int\n Size of rolling window\n\n Returns\n -------\n Array that is a view of the original array with a added dimension\n of size w.\n\n Examples\n --------\n >>> x=np.arange(10).reshape((2,5))\n >>> np.rolling_window(x, 3, axis=-1)\n array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [[5, 6, 7], [6, 7, 8], [7, 8, 9]]])\n\n Calculate rolling mean of last dimension:\n >>> np.mean(np.rolling_window(x, 3, axis=-1), -1)\n array([[ 1., 2., 3.],\n [ 6., 7., 8.]])\n\n This function is taken from https://github.com/numpy/numpy/pull/31\n but slightly modified to accept axis option.\n \"\"\"\n axis = _validate_axis(a, axis)\n a = np.swapaxes(a, axis, -1)\n\n if window < 1:\n raise ValueError(\"`window` must be at least 1. Given : {}\".format(window))\n if window > a.shape[-1]:\n raise ValueError(\"`window` is too long. Given : {}\".format(window))\n\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n rolling = np.lib.stride_tricks.as_strided(\n a, shape=shape, strides=strides, writeable=False\n )\n return np.swapaxes(rolling, -2, axis)"},{"col":4,"comment":"null","endLoc":1977,"header":"def chunk(self, chunks=None, name=None, lock=False)","id":343,"name":"chunk","nodeType":"Function","startLoc":1975,"text":"def chunk(self, chunks=None, name=None, lock=False):\n # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()\n return self.copy(deep=False)"},{"col":4,"comment":"Returns a copy of this object.\n\n `deep` is ignored since data is stored in the form of\n pandas.Index, which is already immutable. Dimensions, attributes\n and encodings are always copied.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Deep is ignored when data is given. Whether the data array is\n loaded into memory and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n\n Returns\n -------\n object : Variable\n New object with dimensions, attributes, encodings, and optionally\n data copied from original.\n ","endLoc":2062,"header":"def copy(self, deep=True, data=None)","id":344,"name":"copy","nodeType":"Function","startLoc":2028,"text":"def copy(self, deep=True, data=None):\n \"\"\"Returns a copy of this object.\n\n `deep` is ignored since data is stored in the form of\n pandas.Index, which is already immutable. Dimensions, attributes\n and encodings are always copied.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Deep is ignored when data is given. Whether the data array is\n loaded into memory and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n\n Returns\n -------\n object : Variable\n New object with dimensions, attributes, encodings, and optionally\n data copied from original.\n \"\"\"\n if data is None:\n data = self._data.copy(deep=deep)\n else:\n data = as_compatible_data(data)\n if self.shape != data.shape:\n raise ValueError(\n \"Data shape {} must match shape of object {}\".format(\n data.shape, self.shape\n )\n )\n return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)"},{"col":0,"comment":" Combine two indexers.\n\n Parameters\n ----------\n old_key: ExplicitIndexer\n The first indexer for the original array\n shape: tuple of ints\n Shape of the original array to be indexed by old_key\n new_key:\n The second indexer for indexing original[old_key]\n ","endLoc":793,"header":"def _combine_indexers(old_key, shape, new_key)","id":345,"name":"_combine_indexers","nodeType":"Function","startLoc":768,"text":"def _combine_indexers(old_key, shape, new_key):\n \"\"\" Combine two indexers.\n\n Parameters\n ----------\n old_key: ExplicitIndexer\n The first indexer for the original array\n shape: tuple of ints\n Shape of the original array to be indexed by old_key\n new_key:\n The second indexer for indexing original[old_key]\n \"\"\"\n if not isinstance(old_key, VectorizedIndexer):\n old_key = _outer_to_vectorized_indexer(old_key, shape)\n if len(old_key.tuple) == 0:\n return new_key\n\n new_shape = np.broadcast(*old_key.tuple).shape\n if isinstance(new_key, VectorizedIndexer):\n new_key = _arrayize_vectorized_indexer(new_key, new_shape)\n else:\n new_key = _outer_to_vectorized_indexer(new_key, new_shape)\n\n return VectorizedIndexer(\n tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple))\n )"},{"col":4,"comment":"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : ExplicitIndexer, optional\n Array indexer. If provided, it is assumed to already be in\n canonical expanded form.\n ","endLoc":528,"header":"def __init__(self, array, key=None)","id":346,"name":"__init__","nodeType":"Function","startLoc":509,"text":"def __init__(self, array, key=None):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : ExplicitIndexer, optional\n Array indexer. If provided, it is assumed to already be in\n canonical expanded form.\n \"\"\"\n if isinstance(array, type(self)) and key is None:\n # unwrap\n key = array.key\n array = array.array\n\n if key is None:\n key = BasicIndexer((slice(None),) * array.ndim)\n\n self.array = as_indexable(array)\n self.key = key"},{"col":0,"comment":"null","endLoc":22,"header":"def _validate_axis(data, axis)","id":347,"name":"_validate_axis","nodeType":"Function","startLoc":16,"text":"def _validate_axis(data, axis):\n ndim = data.ndim\n if not -ndim <= axis < ndim:\n raise IndexError(\"axis %r out of bounds [-%r, %r)\" % (axis, ndim, ndim))\n if axis < 0:\n axis += ndim\n return axis"},{"col":0,"comment":"Convert an OuterIndexer into an vectorized indexer.\n\n Parameters\n ----------\n key : Outer/Basic Indexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n VectorizedIndexer\n Tuple suitable for use to index a NumPy array with vectorized indexing.\n Each element is an array: broadcasting them together gives the shape\n of the result.\n ","endLoc":741,"header":"def _outer_to_vectorized_indexer(key, shape)","id":348,"name":"_outer_to_vectorized_indexer","nodeType":"Function","startLoc":709,"text":"def _outer_to_vectorized_indexer(key, shape):\n \"\"\"Convert an OuterIndexer into an vectorized indexer.\n\n Parameters\n ----------\n key : Outer/Basic Indexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n VectorizedIndexer\n Tuple suitable for use to index a NumPy array with vectorized indexing.\n Each element is an array: broadcasting them together gives the shape\n of the result.\n \"\"\"\n key = key.tuple\n\n n_dim = len([k for k in key if not isinstance(k, integer_types)])\n i_dim = 0\n new_key = []\n for k, size in zip(key, shape):\n if isinstance(k, integer_types):\n new_key.append(np.array(k).reshape((1,) * n_dim))\n else: # np.ndarray or slice\n if isinstance(k, slice):\n k = np.arange(*k.indices(size))\n assert k.dtype.kind in {\"i\", \"u\"}\n shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)]\n new_key.append(k.reshape(*shape))\n i_dim += 1\n return VectorizedIndexer(tuple(new_key))"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":349,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"id":350,"name":"doc/examples/_code","nodeType":"Package"},{"fileName":"weather_data_setup.py","filePath":"doc/examples/_code","id":351,"nodeType":"File","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nimport xarray as xr\n\nnp.random.seed(123)\n\ntimes = pd.date_range(\"2000-01-01\", \"2001-12-31\", name=\"time\")\nannual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28))\n\nbase = 10 + 15 * annual_cycle.reshape(-1, 1)\ntmin_values = base + 3 * np.random.randn(annual_cycle.size, 3)\ntmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3)\n\nds = xr.Dataset(\n {\n \"tmin\": ((\"time\", \"location\"), tmin_values),\n \"tmax\": ((\"time\", \"location\"), tmax_values),\n },\n {\"time\": times, \"location\": [\"IA\", \"IN\", \"IL\"]},\n)\n"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":352,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"col":0,"comment":"\n This function always returns a ExplicitlyIndexed subclass,\n so that the vectorized indexing is always possible with the returned\n object.\n ","endLoc":706,"header":"def as_indexable(array)","id":353,"name":"as_indexable","nodeType":"Function","startLoc":689,"text":"def as_indexable(array):\n \"\"\"\n This function always returns a ExplicitlyIndexed subclass,\n so that the vectorized indexing is always possible with the returned\n object.\n \"\"\"\n if isinstance(array, ExplicitlyIndexed):\n return array\n if isinstance(array, np.ndarray):\n return NumpyIndexingAdapter(array)\n if isinstance(array, pd.Index):\n return PandasIndexAdapter(array)\n if isinstance(array, dask_array_type):\n return DaskIndexingAdapter(array)\n if hasattr(array, \"__array_function__\"):\n return NdArrayLikeIndexingAdapter(array)\n\n raise TypeError(\"Invalid array type: {}\".format(type(array)))"},{"attributeType":"null","col":0,"comment":"null","endLoc":45,"id":354,"name":"ax","nodeType":"Attribute","startLoc":45,"text":"ax"},{"attributeType":"null","col":17,"comment":"null","endLoc":2,"id":355,"name":"pd","nodeType":"Attribute","startLoc":2,"text":"pd"},{"attributeType":"null","col":18,"comment":"null","endLoc":3,"id":356,"name":"sns","nodeType":"Attribute","startLoc":3,"text":"sns"},{"col":0,"comment":"","endLoc":17,"header":"plot_rasterio.py#","id":357,"name":"","nodeType":"Function","startLoc":2,"text":"\"\"\"\n.. _recipes.rasterio:\n\n=================================\nParsing rasterio's geocoordinates\n=================================\n\n\nConverting a projection's cartesian coordinates into 2D longitudes and\nlatitudes.\n\nThese new coordinates might be handy for plotting and indexing, but it should\nbe kept in mind that a grid which is regular in projection coordinates will\nlikely be irregular in lon/lat. It is often recommended to work in the data's\noriginal map projection (see :ref:`recipes.rasterio_rgb`).\n\"\"\"\n\nurl = \"https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif\"\n\nda = xr.open_rasterio(url)\n\nny, nx = len(da[\"y\"]), len(da[\"x\"])\n\nx, y = np.meshgrid(da[\"x\"], da[\"y\"])\n\nlon, lat = transform(da.crs, {\"init\": \"EPSG:4326\"}, x.flatten(), y.flatten())\n\nlon = np.asarray(lon).reshape((ny, nx))\n\nlat = np.asarray(lat).reshape((ny, nx))\n\nda.coords[\"lon\"] = ((\"y\", \"x\"), lon)\n\nda.coords[\"lat\"] = ((\"y\", \"x\"), lat)\n\ngreyscale = da.mean(dim=\"band\")\n\nax = plt.subplot(projection=ccrs.PlateCarree())\n\ngreyscale.plot(\n ax=ax,\n x=\"lon\",\n y=\"lat\",\n transform=ccrs.PlateCarree(),\n cmap=\"Greys_r\",\n add_colorbar=False,\n)\n\nax.coastlines(\"10m\", color=\"r\")\n\nplt.show()"},{"attributeType":"null","col":17,"comment":"null","endLoc":5,"id":358,"name":"xr","nodeType":"Attribute","startLoc":5,"text":"xr"},{"col":0,"comment":"Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file's geoinformation, shifted to the center of each pixel (see\n `\"PixelIsArea\" Raster Space\n `_\n for more information).\n\n You can generate 2D coordinates from the file's attributes with::\n\n from affine import Affine\n da = xr.open_rasterio('path_to_file.tif')\n transform = Affine.from_gdal(*da.attrs['transform'])\n nx, ny = da.sizes['x'], da.sizes['y']\n x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform\n\n\n Parameters\n ----------\n filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates : bool, optional\n Whether to parse the x and y coordinates out of the file's\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don't need the coordinates.\n chunks : int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock : False, True or threading.Lock, optional\n If chunks is provided, this argument is passed on to\n :py:func:`dask.array.from_array`. By default, a global lock is\n used to avoid issues with concurrent access to the same file when using\n dask's multithreaded backend.\n\n Returns\n -------\n data : DataArray\n The newly created DataArray.\n ","endLoc":363,"header":"def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None)","id":359,"name":"open_rasterio","nodeType":"Function","startLoc":165,"text":"def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None):\n \"\"\"Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file's geoinformation, shifted to the center of each pixel (see\n `\"PixelIsArea\" Raster Space\n `_\n for more information).\n\n You can generate 2D coordinates from the file's attributes with::\n\n from affine import Affine\n da = xr.open_rasterio('path_to_file.tif')\n transform = Affine.from_gdal(*da.attrs['transform'])\n nx, ny = da.sizes['x'], da.sizes['y']\n x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform\n\n\n Parameters\n ----------\n filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates : bool, optional\n Whether to parse the x and y coordinates out of the file's\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don't need the coordinates.\n chunks : int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock : False, True or threading.Lock, optional\n If chunks is provided, this argument is passed on to\n :py:func:`dask.array.from_array`. By default, a global lock is\n used to avoid issues with concurrent access to the same file when using\n dask's multithreaded backend.\n\n Returns\n -------\n data : DataArray\n The newly created DataArray.\n \"\"\"\n import rasterio\n from rasterio.vrt import WarpedVRT\n\n vrt_params = None\n if isinstance(filename, rasterio.io.DatasetReader):\n filename = filename.name\n elif isinstance(filename, rasterio.vrt.WarpedVRT):\n vrt = filename\n filename = vrt.src_dataset.name\n vrt_params = dict(\n crs=vrt.crs.to_string(),\n resampling=vrt.resampling,\n src_nodata=vrt.src_nodata,\n dst_nodata=vrt.dst_nodata,\n tolerance=vrt.tolerance,\n transform=vrt.transform,\n width=vrt.width,\n height=vrt.height,\n warp_extras=vrt.warp_extras,\n )\n\n if lock is None:\n lock = RASTERIO_LOCK\n\n manager = CachingFileManager(rasterio.open, filename, lock=lock, mode=\"r\")\n riods = manager.acquire()\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n\n if cache is None:\n cache = chunks is None\n\n coords = {}\n\n # Get bands\n if riods.count < 1:\n raise ValueError(\"Unknown dims\")\n coords[\"band\"] = np.asarray(riods.indexes)\n\n # Get coordinates\n if riods.transform.is_rectilinear:\n # 1d coordinates\n parse = True if parse_coordinates is None else parse_coordinates\n if parse:\n nx, ny = riods.width, riods.height\n # xarray coordinates are pixel centered\n x, _ = (np.arange(nx) + 0.5, np.zeros(nx) + 0.5) * riods.transform\n _, y = (np.zeros(ny) + 0.5, np.arange(ny) + 0.5) * riods.transform\n coords[\"y\"] = y\n coords[\"x\"] = x\n else:\n # 2d coordinates\n parse = False if (parse_coordinates is None) else parse_coordinates\n if parse:\n warnings.warn(\n \"The file coordinates' transformation isn't \"\n \"rectilinear: xarray won't parse the coordinates \"\n \"in this case. Set `parse_coordinates=False` to \"\n \"suppress this warning.\",\n RuntimeWarning,\n stacklevel=3,\n )\n\n # Attributes\n attrs = {}\n # Affine transformation matrix (always available)\n # This describes coefficients mapping pixel coordinates to CRS\n # For serialization store as tuple of 6 floats, the last row being\n # always (0, 0, 1) per definition (see\n # https://github.com/sgillies/affine)\n attrs[\"transform\"] = tuple(riods.transform)[:6]\n if hasattr(riods, \"crs\") and riods.crs:\n # CRS is a dict-like object specific to rasterio\n # If CRS is not None, we convert it back to a PROJ4 string using\n # rasterio itself\n try:\n attrs[\"crs\"] = riods.crs.to_proj4()\n except AttributeError:\n attrs[\"crs\"] = riods.crs.to_string()\n if hasattr(riods, \"res\"):\n # (width, height) tuple of pixels in units of CRS\n attrs[\"res\"] = riods.res\n if hasattr(riods, \"is_tiled\"):\n # Is the TIF tiled? (bool)\n # We cast it to an int for netCDF compatibility\n attrs[\"is_tiled\"] = np.uint8(riods.is_tiled)\n if hasattr(riods, \"nodatavals\"):\n # The nodata values for the raster bands\n attrs[\"nodatavals\"] = tuple(\n np.nan if nodataval is None else nodataval for nodataval in riods.nodatavals\n )\n if hasattr(riods, \"scales\"):\n # The scale values for the raster bands\n attrs[\"scales\"] = riods.scales\n if hasattr(riods, \"offsets\"):\n # The offset values for the raster bands\n attrs[\"offsets\"] = riods.offsets\n if hasattr(riods, \"descriptions\") and any(riods.descriptions):\n # Descriptions for each dataset band\n attrs[\"descriptions\"] = riods.descriptions\n if hasattr(riods, \"units\") and any(riods.units):\n # A list of units string for each dataset band\n attrs[\"units\"] = riods.units\n\n # Parse extra metadata from tags, if supported\n parsers = {\"ENVI\": _parse_envi, \"GTiff\": lambda m: m}\n\n driver = riods.driver\n if driver in parsers:\n if driver == \"GTiff\":\n meta = parsers[driver](riods.tags())\n else:\n meta = parsers[driver](riods.tags(ns=driver))\n\n for k, v in meta.items():\n # Add values as coordinates if they match the band count,\n # as attributes otherwise\n if isinstance(v, (list, np.ndarray)) and len(v) == riods.count:\n coords[k] = (\"band\", np.asarray(v))\n else:\n attrs[k] = v\n\n data = indexing.LazilyOuterIndexedArray(\n RasterioArrayWrapper(manager, lock, vrt_params)\n )\n\n # this lets you write arrays loaded with rasterio\n data = indexing.CopyOnWriteArray(data)\n if cache and chunks is None:\n data = indexing.MemoryCachedArray(data)\n\n result = DataArray(data=data, dims=(\"band\", \"y\", \"x\"), coords=coords, attrs=attrs)\n\n if chunks is not None:\n from dask.base import tokenize\n\n # augment the token with the file modification time\n try:\n mtime = os.path.getmtime(filename)\n except OSError:\n # the filename is probably an s3 bucket rather than a regular file\n mtime = None\n token = tokenize(filename, mtime, chunks)\n name_prefix = \"open_rasterio-%s\" % token\n result = result.chunk(chunks, name_prefix=name_prefix, token=token)\n\n # Make the file closeable\n result._file_obj = manager\n\n return result"},{"attributeType":"null","col":0,"comment":"null","endLoc":9,"id":360,"name":"times","nodeType":"Attribute","startLoc":9,"text":"times"},{"attributeType":"null","col":0,"comment":"null","endLoc":10,"id":361,"name":"annual_cycle","nodeType":"Attribute","startLoc":10,"text":"annual_cycle"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":362,"name":"base","nodeType":"Attribute","startLoc":12,"text":"base"},{"attributeType":"null","col":0,"comment":"null","endLoc":13,"id":363,"name":"tmin_values","nodeType":"Attribute","startLoc":13,"text":"tmin_values"},{"col":4,"comment":"null","endLoc":1984,"header":"def _finalize_indexing_result(self, dims, data)","id":364,"name":"_finalize_indexing_result","nodeType":"Function","startLoc":1979,"text":"def _finalize_indexing_result(self, dims, data):\n if getattr(data, \"ndim\", 0) != 1:\n # returns Variable rather than IndexVariable if multi-dimensional\n return Variable(dims, data, self._attrs, self._encoding)\n else:\n return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)"},{"attributeType":"null","col":0,"comment":"null","endLoc":14,"id":365,"name":"tmax_values","nodeType":"Attribute","startLoc":14,"text":"tmax_values"},{"col":4,"comment":"null","endLoc":1237,"header":"def __init__(self, array)","id":366,"name":"__init__","nodeType":"Function","startLoc":1230,"text":"def __init__(self, array):\n # In NumpyIndexingAdapter we only allow to store bare np.ndarray\n if not isinstance(array, np.ndarray):\n raise TypeError(\n \"NumpyIndexingAdapter only wraps np.ndarray. \"\n \"Trying to wrap {}\".format(type(array))\n )\n self.array = array"},{"attributeType":"Dataset","col":0,"comment":"null","endLoc":16,"id":367,"name":"ds","nodeType":"Attribute","startLoc":16,"text":"ds"},{"col":0,"comment":" Return an identical vindex but slices are replaced by arrays ","endLoc":1086,"header":"def _arrayize_vectorized_indexer(indexer, shape)","id":368,"name":"_arrayize_vectorized_indexer","nodeType":"Function","startLoc":1069,"text":"def _arrayize_vectorized_indexer(indexer, shape):\n \"\"\" Return an identical vindex but slices are replaced by arrays \"\"\"\n slices = [v for v in indexer.tuple if isinstance(v, slice)]\n if len(slices) == 0:\n return indexer\n\n arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]\n n_dim = arrays[0].ndim if len(arrays) > 0 else 0\n i_dim = 0\n new_key = []\n for v, size in zip(indexer.tuple, shape):\n if isinstance(v, np.ndarray):\n new_key.append(np.reshape(v, v.shape + (1,) * len(slices)))\n else: # slice\n shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1)\n new_key.append(np.arange(*v.indices(size)).reshape(shape))\n i_dim += 1\n return VectorizedIndexer(tuple(new_key))"},{"col":4,"comment":"null","endLoc":1987,"header":"def __setitem__(self, key, value)","id":369,"name":"__setitem__","nodeType":"Function","startLoc":1986,"text":"def __setitem__(self, key, value):\n raise TypeError(\"%s values cannot be modified\" % type(self).__name__)"},{"col":4,"comment":"Specialized version of Variable.concat for IndexVariable objects.\n\n This exists because we want to avoid converting Index objects to NumPy\n arrays, if possible.\n ","endLoc":2026,"header":"@classmethod\n def concat(cls, variables, dim=\"concat_dim\", positions=None, shortcut=False)","id":370,"name":"concat","nodeType":"Function","startLoc":1989,"text":"@classmethod\n def concat(cls, variables, dim=\"concat_dim\", positions=None, shortcut=False):\n \"\"\"Specialized version of Variable.concat for IndexVariable objects.\n\n This exists because we want to avoid converting Index objects to NumPy\n arrays, if possible.\n \"\"\"\n if not isinstance(dim, str):\n dim, = dim.dims\n\n variables = list(variables)\n first_var = variables[0]\n\n if any(not isinstance(v, cls) for v in variables):\n raise TypeError(\n \"IndexVariable.concat requires that all input \"\n \"variables be IndexVariable objects\"\n )\n\n indexes = [v._data.array for v in variables]\n\n if not indexes:\n data = []\n else:\n data = indexes[0].append(indexes[1:])\n\n if positions is not None:\n indices = nputils.inverse_permutation(np.concatenate(positions))\n data = data.take(indices)\n\n attrs = dict(first_var.attrs)\n if not shortcut:\n for var in variables:\n if var.dims != first_var.dims:\n raise ValueError(\"inconsistent dimensions\")\n utils.remove_incompatible_items(attrs, var.attrs)\n\n return cls(first_var.dims, data, attrs)"},{"col":4,"comment":" This adapter is created in Variable.__getitem__ in\n Variable._broadcast_indexes.\n ","endLoc":1300,"header":"def __init__(self, array)","id":371,"name":"__init__","nodeType":"Function","startLoc":1296,"text":"def __init__(self, array):\n \"\"\" This adapter is created in Variable.__getitem__ in\n Variable._broadcast_indexes.\n \"\"\"\n self.array = array"},{"col":4,"comment":"null","endLoc":1288,"header":"def __init__(self, array)","id":372,"name":"__init__","nodeType":"Function","startLoc":1282,"text":"def __init__(self, array):\n if not hasattr(array, \"__array_function__\"):\n raise TypeError(\n \"NdArrayLikeIndexingAdapter must wrap an object that \"\n \"implements the __array_function__ protocol\"\n )\n self.array = array"},{"col":0,"comment":"Return indices for an inverse permutation.\n\n Parameters\n ----------\n indices : 1D np.ndarray with dtype=int\n Integer positions to assign elements to.\n\n Returns\n -------\n inverse_permutation : 1D np.ndarray with dtype=int\n Integer indices to take from the original array to create the\n permutation.\n ","endLoc":61,"header":"def inverse_permutation(indices)","id":373,"name":"inverse_permutation","nodeType":"Function","startLoc":44,"text":"def inverse_permutation(indices):\n \"\"\"Return indices for an inverse permutation.\n\n Parameters\n ----------\n indices : 1D np.ndarray with dtype=int\n Integer positions to assign elements to.\n\n Returns\n -------\n inverse_permutation : 1D np.ndarray with dtype=int\n Integer indices to take from the original array to create the\n permutation.\n \"\"\"\n # use intp instead of int64 because of windows :(\n inverse_permutation = np.empty(len(indices), dtype=np.intp)\n inverse_permutation[indices] = np.arange(len(indices), dtype=np.intp)\n return inverse_permutation"},{"col":0,"comment":"null","endLoc":56,"header":"def _read_attributes(h5netcdf_var)","id":374,"name":"_read_attributes","nodeType":"Function","startLoc":47,"text":"def _read_attributes(h5netcdf_var):\n # GH451\n # to ensure conventions decoding works properly on Python 3, decode all\n # bytes attributes to strings\n attrs = {}\n for k, v in h5netcdf_var.attrs.items():\n if k not in [\"_FillValue\", \"missing_value\"]:\n v = maybe_decode_bytes(v)\n attrs[k] = v\n return attrs"},{"col":0,"comment":"null","endLoc":44,"header":"def maybe_decode_bytes(txt)","id":375,"name":"maybe_decode_bytes","nodeType":"Function","startLoc":40,"text":"def maybe_decode_bytes(txt):\n if isinstance(txt, bytes):\n return txt.decode(\"utf-8\")\n else:\n return txt"},{"col":4,"comment":"\n Parameters\n ----------\n data : array_like\n Values for this array. Must be an ``numpy.ndarray``, ndarray like,\n or castable to an ``ndarray``. If a self-described xarray or pandas\n object, attempts are made to use this array's metadata to fill in\n other unspecified arguments. A view of the array's data is used\n instead of a copy if possible.\n coords : sequence or dict of array_like objects, optional\n Coordinates (tick labels) to use for indexing along each dimension.\n The following notations are accepted:\n\n - mapping {dimension name: array-like}\n - sequence of tuples that are valid arguments for xarray.Variable()\n - (dims, data)\n - (dims, data, attrs)\n - (dims, data, attrs, encoding)\n\n Additionally, it is possible to define a coord whose name\n does not match the dimension name, or a coord based on multiple\n dimensions, with one of the following notations:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n\n dims : hashable or sequence of hashable, optional\n Name(s) of the data dimension(s). Must be either a hashable (only\n for 1D data) or a sequence of hashables with length equal to the\n number of dimensions. If this argument is omitted, dimension names\n are taken from ``coords`` (if possible) and otherwise default to\n ``['dim_0', ... 'dim_n']``.\n name : str or None, optional\n Name of this array.\n attrs : dict_like or None, optional\n Attributes to assign to the new instance. By default, an empty\n attribute dictionary is initialized.\n ","endLoc":384,"header":"def __init__(\n self,\n data: Any = dtypes.NA,\n coords: Union[Sequence[Tuple], Mapping[Hashable, Any], None] = None,\n dims: Union[Hashable, Sequence[Hashable], None] = None,\n name: Hashable = None,\n attrs: Mapping = None,\n # deprecated parameters\n encoding=None,\n # internal parameters\n indexes: Dict[Hashable, pd.Index] = None,\n fastpath: bool = False,\n )","id":376,"name":"__init__","nodeType":"Function","startLoc":277,"text":"def __init__(\n self,\n data: Any = dtypes.NA,\n coords: Union[Sequence[Tuple], Mapping[Hashable, Any], None] = None,\n dims: Union[Hashable, Sequence[Hashable], None] = None,\n name: Hashable = None,\n attrs: Mapping = None,\n # deprecated parameters\n encoding=None,\n # internal parameters\n indexes: Dict[Hashable, pd.Index] = None,\n fastpath: bool = False,\n ):\n \"\"\"\n Parameters\n ----------\n data : array_like\n Values for this array. Must be an ``numpy.ndarray``, ndarray like,\n or castable to an ``ndarray``. If a self-described xarray or pandas\n object, attempts are made to use this array's metadata to fill in\n other unspecified arguments. A view of the array's data is used\n instead of a copy if possible.\n coords : sequence or dict of array_like objects, optional\n Coordinates (tick labels) to use for indexing along each dimension.\n The following notations are accepted:\n\n - mapping {dimension name: array-like}\n - sequence of tuples that are valid arguments for xarray.Variable()\n - (dims, data)\n - (dims, data, attrs)\n - (dims, data, attrs, encoding)\n\n Additionally, it is possible to define a coord whose name\n does not match the dimension name, or a coord based on multiple\n dimensions, with one of the following notations:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n\n dims : hashable or sequence of hashable, optional\n Name(s) of the data dimension(s). Must be either a hashable (only\n for 1D data) or a sequence of hashables with length equal to the\n number of dimensions. If this argument is omitted, dimension names\n are taken from ``coords`` (if possible) and otherwise default to\n ``['dim_0', ... 'dim_n']``.\n name : str or None, optional\n Name of this array.\n attrs : dict_like or None, optional\n Attributes to assign to the new instance. By default, an empty\n attribute dictionary is initialized.\n \"\"\"\n if encoding is not None:\n warnings.warn(\n \"The `encoding` argument to `DataArray` is deprecated, and . \"\n \"will be removed in 0.15. \"\n \"Instead, specify the encoding when writing to disk or \"\n \"set the `encoding` attribute directly.\",\n FutureWarning,\n stacklevel=2,\n )\n if fastpath:\n variable = data\n assert dims is None\n assert attrs is None\n assert encoding is None\n else:\n # try to fill in arguments from data if they weren't supplied\n if coords is None:\n\n if isinstance(data, DataArray):\n coords = data.coords\n elif isinstance(data, pd.Series):\n coords = [data.index]\n elif isinstance(data, pd.DataFrame):\n coords = [data.index, data.columns]\n elif isinstance(data, (pd.Index, IndexVariable)):\n coords = [data]\n elif isinstance(data, pdcompat.Panel):\n coords = [data.items, data.major_axis, data.minor_axis]\n\n if dims is None:\n dims = getattr(data, \"dims\", getattr(coords, \"dims\", None))\n if name is None:\n name = getattr(data, \"name\", None)\n if attrs is None:\n attrs = getattr(data, \"attrs\", None)\n if encoding is None:\n encoding = getattr(data, \"encoding\", None)\n\n data = _check_data_shape(data, coords, dims)\n data = as_compatible_data(data)\n coords, dims = _infer_coords_and_dims(data.shape, coords, dims)\n variable = Variable(dims, data, attrs, encoding, fastpath=True)\n\n # These fully describe a DataArray\n self._variable = variable\n assert isinstance(coords, dict)\n self._coords = coords\n self._name = name\n self._accessors = None\n\n # TODO(shoyer): document this argument, once it becomes part of the\n # public interface.\n self._indexes = indexes\n\n self._file_obj = None"},{"col":0,"comment":"Remove incompatible items from the first dictionary in-place.\n\n Items are retained if their keys are found in both dictionaries and the\n values are compatible.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n Mappings to merge.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n ","endLoc":232,"header":"def remove_incompatible_items(\n first_dict: MutableMapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> None","id":377,"name":"remove_incompatible_items","nodeType":"Function","startLoc":212,"text":"def remove_incompatible_items(\n first_dict: MutableMapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> None:\n \"\"\"Remove incompatible items from the first dictionary in-place.\n\n Items are retained if their keys are found in both dictionaries and the\n values are compatible.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n Mappings to merge.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n \"\"\"\n for k in list(first_dict):\n if k not in second_dict or not compat(first_dict[k], second_dict[k]):\n del first_dict[k]"},{"col":4,"comment":"null","endLoc":40,"header":"def _getitem(self, key)","id":378,"name":"_getitem","nodeType":"Function","startLoc":33,"text":"def _getitem(self, key):\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n\n if key == () and self.ndim == 0:\n return array.get_value()\n\n return array[key]"},{"col":4,"comment":"null","endLoc":2074,"header":"def equals(self, other, equiv=None)","id":379,"name":"equals","nodeType":"Function","startLoc":2064,"text":"def equals(self, other, equiv=None):\n # if equiv is specified, super up\n if equiv is not None:\n return super().equals(other, equiv)\n\n # otherwise use the native index equals, rather than looking at _data\n other = getattr(other, \"variable\", other)\n try:\n return self.dims == other.dims and self._data_equals(other)\n except (TypeError, AttributeError):\n return False"},{"col":4,"comment":"null","endLoc":157,"header":"def get_variables(self)","id":381,"name":"get_variables","nodeType":"Function","startLoc":154,"text":"def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )"},{"col":0,"comment":"","endLoc":1,"header":"weather_data_setup.py#","id":382,"name":"","nodeType":"Function","startLoc":1,"text":"np.random.seed(123)\n\ntimes = pd.date_range(\"2000-01-01\", \"2001-12-31\", name=\"time\")\n\nannual_cycle = np.sin(2 * np.pi * (times.dayofyear.values / 365.25 - 0.28))\n\nbase = 10 + 15 * annual_cycle.reshape(-1, 1)\n\ntmin_values = base + 3 * np.random.randn(annual_cycle.size, 3)\n\ntmax_values = base + 10 + 3 * np.random.randn(annual_cycle.size, 3)\n\nds = xr.Dataset(\n {\n \"tmin\": ((\"time\", \"location\"), tmin_values),\n \"tmax\": ((\"time\", \"location\"), tmax_values),\n },\n {\"time\": times, \"location\": [\"IA\", \"IN\", \"IL\"]},\n)"},{"col":4,"comment":"To load data from a file or file-like object, use the `open_dataset`\n function.\n\n Parameters\n ----------\n data_vars : dict-like, optional\n A mapping from variable names to :py:class:`~xarray.DataArray`\n objects, :py:class:`~xarray.Variable` objects or to tuples of the\n form ``(dims, data[, attrs])`` which can be used as arguments to\n create a new ``Variable``. Each dimension must have the same length\n in all variables in which it appears.\n\n The following notations are accepted:\n\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (it will be automatically moved to coords, see below)\n\n Each dimension must have the same length in all variables in which\n it appears.\n coords : dict-like, optional\n Another mapping in similar form as the `data_vars` argument,\n except the each item is saved on the dataset as a \"coordinate\".\n These variables have an associated meaning: they describe\n constant/fixed/independent quantities, unlike the\n varying/measured/dependent quantities that belong in `variables`.\n Coordinates values may be given by 1-dimensional arrays or scalars,\n in which case `dims` do not need to be supplied: 1D arrays will be\n assumed to give index values along the dimension with the same\n name.\n\n The following notations are accepted:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (the dimension name is implicitly set to be the same as the coord name)\n\n The last notation implies that the coord name is the same as the\n dimension name.\n\n attrs : dict-like, optional\n Global attributes to save on this dataset.\n compat : deprecated\n ","endLoc":534,"header":"def __init__(\n self,\n # could make a VariableArgs to use more generally, and refine these\n # categories\n data_vars: Mapping[Hashable, Any] = None,\n coords: Mapping[Hashable, Any] = None,\n attrs: Mapping[Hashable, Any] = None,\n compat=None,\n )","id":383,"name":"__init__","nodeType":"Function","startLoc":436,"text":"def __init__(\n self,\n # could make a VariableArgs to use more generally, and refine these\n # categories\n data_vars: Mapping[Hashable, Any] = None,\n coords: Mapping[Hashable, Any] = None,\n attrs: Mapping[Hashable, Any] = None,\n compat=None,\n ):\n \"\"\"To load data from a file or file-like object, use the `open_dataset`\n function.\n\n Parameters\n ----------\n data_vars : dict-like, optional\n A mapping from variable names to :py:class:`~xarray.DataArray`\n objects, :py:class:`~xarray.Variable` objects or to tuples of the\n form ``(dims, data[, attrs])`` which can be used as arguments to\n create a new ``Variable``. Each dimension must have the same length\n in all variables in which it appears.\n\n The following notations are accepted:\n\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (it will be automatically moved to coords, see below)\n\n Each dimension must have the same length in all variables in which\n it appears.\n coords : dict-like, optional\n Another mapping in similar form as the `data_vars` argument,\n except the each item is saved on the dataset as a \"coordinate\".\n These variables have an associated meaning: they describe\n constant/fixed/independent quantities, unlike the\n varying/measured/dependent quantities that belong in `variables`.\n Coordinates values may be given by 1-dimensional arrays or scalars,\n in which case `dims` do not need to be supplied: 1D arrays will be\n assumed to give index values along the dimension with the same\n name.\n\n The following notations are accepted:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (the dimension name is implicitly set to be the same as the coord name)\n\n The last notation implies that the coord name is the same as the\n dimension name.\n\n attrs : dict-like, optional\n Global attributes to save on this dataset.\n compat : deprecated\n \"\"\"\n if compat is not None:\n warnings.warn(\n \"The `compat` argument to Dataset is deprecated and will be \"\n \"removed in 0.15.\"\n \"Instead, use `merge` to control how variables are combined\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n compat = \"broadcast_equals\"\n\n # TODO(shoyer): expose indexes as a public argument in __init__\n\n if data_vars is None:\n data_vars = {}\n if coords is None:\n coords = {}\n\n both_data_and_coords = set(data_vars) & set(coords)\n if both_data_and_coords:\n raise ValueError(\n \"variables %r are found in both data_vars and coords\"\n % both_data_and_coords\n )\n\n if isinstance(coords, Dataset):\n coords = coords.variables\n\n variables, coord_names, dims, indexes = merge_data_and_coords(\n data_vars, coords, compat=compat\n )\n\n self._accessors = None\n self._attrs = dict(attrs) if attrs is not None else None\n self._file_obj = None\n self._encoding = None\n self._variables = variables\n self._coord_names = coord_names\n self._dims = dims\n self._indexes = indexes"},{"col":4,"comment":"null","endLoc":2077,"header":"def _data_equals(self, other)","id":384,"name":"_data_equals","nodeType":"Function","startLoc":2076,"text":"def _data_equals(self, other):\n return self.to_index().equals(other.to_index())"},{"col":4,"comment":"Convert this variable to a pandas.Index","endLoc":2101,"header":"def to_index(self)","id":385,"name":"to_index","nodeType":"Function","startLoc":2085,"text":"def to_index(self):\n \"\"\"Convert this variable to a pandas.Index\"\"\"\n # n.b. creating a new pandas.Index from an old pandas.Index is\n # basically free as pandas.Index objects are immutable\n assert self.ndim == 1\n index = self._data.array\n if isinstance(index, pd.MultiIndex):\n # set default names for multi-index unnamed levels so that\n # we can safely rename dimension / coordinate later\n valid_level_names = [\n name or \"{}_level_{}\".format(self.dims[0], i)\n for i, name in enumerate(index.names)\n ]\n index = index.set_names(valid_level_names)\n else:\n index = index.set_names(self.name)\n return index"},{"attributeType":"null","col":8,"comment":"null","endLoc":21,"id":386,"name":"shape","nodeType":"Attribute","startLoc":21,"text":"self.shape"},{"col":4,"comment":"Return this variable as an xarray.IndexVariable","endLoc":2081,"header":"def to_index_variable(self)","id":387,"name":"to_index_variable","nodeType":"Function","startLoc":2079,"text":"def to_index_variable(self):\n \"\"\"Return this variable as an xarray.IndexVariable\"\"\"\n return self"},{"col":4,"comment":"Return MultiIndex level names or None if this IndexVariable has no\n MultiIndex.\n ","endLoc":2112,"header":"@property\n def level_names(self)","id":388,"name":"level_names","nodeType":"Function","startLoc":2103,"text":"@property\n def level_names(self):\n \"\"\"Return MultiIndex level names or None if this IndexVariable has no\n MultiIndex.\n \"\"\"\n index = self.to_index()\n if isinstance(index, pd.MultiIndex):\n return index.names\n else:\n return None"},{"col":45,"endLoc":319,"id":389,"nodeType":"Lambda","startLoc":319,"text":"lambda m: m"},{"attributeType":"null","col":8,"comment":"null","endLoc":18,"id":390,"name":"datastore","nodeType":"Attribute","startLoc":18,"text":"self.datastore"},{"attributeType":"null","col":8,"comment":"null","endLoc":22,"id":391,"name":"dtype","nodeType":"Attribute","startLoc":22,"text":"self.dtype"},{"col":4,"comment":"Return a new IndexVariable from a given MultiIndex level.","endLoc":2119,"header":"def get_level_variable(self, level)","id":392,"name":"get_level_variable","nodeType":"Function","startLoc":2114,"text":"def get_level_variable(self, level):\n \"\"\"Return a new IndexVariable from a given MultiIndex level.\"\"\"\n if self.level_names is None:\n raise ValueError(\"IndexVariable %r has no MultiIndex\" % self.name)\n index = self.to_index()\n return type(self)(self.dims, index.get_level_values(level))"},{"attributeType":"null","col":8,"comment":"null","endLoc":19,"id":393,"name":"variable_name","nodeType":"Attribute","startLoc":19,"text":"self.variable_name"},{"className":"NioDataStore","col":0,"comment":"Store for accessing datasets via PyNIO\n ","endLoc":85,"id":394,"nodeType":"Class","startLoc":43,"text":"class NioDataStore(AbstractDataStore):\n \"\"\"Store for accessing datasets via PyNIO\n \"\"\"\n\n def __init__(self, filename, mode=\"r\", lock=None, **kwargs):\n import Nio\n\n if lock is None:\n lock = PYNIO_LOCK\n self.lock = ensure_lock(lock)\n self._manager = CachingFileManager(\n Nio.open_file, filename, lock=lock, mode=mode, kwargs=kwargs\n )\n # xarray provides its own support for FillValue,\n # so turn off PyNIO's support for the same.\n self.ds.set_option(\"MaskedArrayMode\", \"MaskedNever\")\n\n @property\n def ds(self):\n return self._manager.acquire()\n\n def open_store_variable(self, name, var):\n data = indexing.LazilyOuterIndexedArray(NioArrayWrapper(name, self))\n return Variable(var.dimensions, data, var.attributes)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen(self.ds.attributes)\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n return {\n \"unlimited_dims\": {k for k in self.ds.dimensions if self.ds.unlimited(k)}\n }\n\n def close(self):\n self._manager.close()"},{"col":4,"comment":"null","endLoc":58,"header":"def __init__(self, filename, mode=\"r\", lock=None, **kwargs)","id":395,"name":"__init__","nodeType":"Function","startLoc":47,"text":"def __init__(self, filename, mode=\"r\", lock=None, **kwargs):\n import Nio\n\n if lock is None:\n lock = PYNIO_LOCK\n self.lock = ensure_lock(lock)\n self._manager = CachingFileManager(\n Nio.open_file, filename, lock=lock, mode=mode, kwargs=kwargs\n )\n # xarray provides its own support for FillValue,\n # so turn off PyNIO's support for the same.\n self.ds.set_option(\"MaskedArrayMode\", \"MaskedNever\")"},{"col":4,"comment":"null","endLoc":2123,"header":"@property\n def name(self)","id":396,"name":"name","nodeType":"Function","startLoc":2121,"text":"@property\n def name(self):\n return self.dims[0]"},{"col":4,"comment":"null","endLoc":2127,"header":"@name.setter\n def name(self, value)","id":397,"name":"name","nodeType":"Function","startLoc":2125,"text":"@name.setter\n def name(self, value):\n raise AttributeError(\"cannot modify name of IndexVariable in-place\")"},{"attributeType":"null","col":4,"comment":"null","endLoc":1953,"id":398,"name":"__slots__","nodeType":"Attribute","startLoc":1953,"text":"__slots__"},{"attributeType":"null","col":4,"comment":"null","endLoc":2083,"id":399,"name":"to_coord","nodeType":"Attribute","startLoc":2083,"text":"to_coord"},{"col":4,"comment":"null","endLoc":160,"header":"def get_attrs(self)","id":400,"name":"get_attrs","nodeType":"Function","startLoc":159,"text":"def get_attrs(self):\n return FrozenDict(_read_attributes(self.ds))"},{"attributeType":"null","col":12,"comment":"null","endLoc":1962,"id":401,"name":"_data","nodeType":"Attribute","startLoc":1962,"text":"self._data"},{"col":0,"comment":"Concatenate variables along a new or existing dimension.\n\n Parameters\n ----------\n variables : iterable of Array\n Arrays to stack together. Each variable is expected to have\n matching dimensions and shape except for along the stacked\n dimension.\n dim : str or DataArray, optional\n Name of the dimension to stack along. This can either be a new\n dimension name, in which case it is added along axis=0, or an\n existing dimension name, in which case the location of the\n dimension is unchanged. Where to insert the new dimension is\n determined by the first variable.\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to which\n to assign each dataset along the concatenated dimension. If not\n supplied, objects are concatenated in the provided order.\n shortcut : bool, optional\n This option is used internally to speed-up groupby operations.\n If `shortcut` is True, some checks of internal consistency between\n arrays to concatenate are skipped.\n\n Returns\n -------\n stacked : Variable\n Concatenated Variable formed by stacking all the supplied variables\n along the given dimension.\n ","endLoc":2232,"header":"def concat(variables, dim=\"concat_dim\", positions=None, shortcut=False)","id":402,"name":"concat","nodeType":"Function","startLoc":2198,"text":"def concat(variables, dim=\"concat_dim\", positions=None, shortcut=False):\n \"\"\"Concatenate variables along a new or existing dimension.\n\n Parameters\n ----------\n variables : iterable of Array\n Arrays to stack together. Each variable is expected to have\n matching dimensions and shape except for along the stacked\n dimension.\n dim : str or DataArray, optional\n Name of the dimension to stack along. This can either be a new\n dimension name, in which case it is added along axis=0, or an\n existing dimension name, in which case the location of the\n dimension is unchanged. Where to insert the new dimension is\n determined by the first variable.\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to which\n to assign each dataset along the concatenated dimension. If not\n supplied, objects are concatenated in the provided order.\n shortcut : bool, optional\n This option is used internally to speed-up groupby operations.\n If `shortcut` is True, some checks of internal consistency between\n arrays to concatenate are skipped.\n\n Returns\n -------\n stacked : Variable\n Concatenated Variable formed by stacking all the supplied variables\n along the given dimension.\n \"\"\"\n variables = list(variables)\n if all(isinstance(v, IndexVariable) for v in variables):\n return IndexVariable.concat(variables, dim, positions, shortcut)\n else:\n return Variable.concat(variables, dim, positions, shortcut)"},{"col":4,"comment":"null","endLoc":163,"header":"def get_dimensions(self)","id":403,"name":"get_dimensions","nodeType":"Function","startLoc":162,"text":"def get_dimensions(self):\n return self.ds.dimensions"},{"col":4,"comment":"null","endLoc":170,"header":"def get_encoding(self)","id":404,"name":"get_encoding","nodeType":"Function","startLoc":165,"text":"def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v is None\n }\n return encoding"},{"col":4,"comment":"null","endLoc":42,"header":"def __init__(self, manager, lock, vrt_params=None)","id":406,"name":"__init__","nodeType":"Function","startLoc":26,"text":"def __init__(self, manager, lock, vrt_params=None):\n from rasterio.vrt import WarpedVRT\n\n self.manager = manager\n self.lock = lock\n\n # cannot save riods as an attribute: this would break pickleability\n riods = manager.acquire()\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n self.vrt_params = vrt_params\n self._shape = (riods.count, riods.height, riods.width)\n\n dtypes = riods.dtypes\n if not np.all(np.asarray(dtypes) == dtypes[0]):\n raise ValueError(\"All bands should have the same dtype\")\n self._dtype = np.dtype(dtypes[0])"},{"col":4,"comment":"null","endLoc":177,"header":"def set_dimension(self, name, length, is_unlimited=False)","id":407,"name":"set_dimension","nodeType":"Function","startLoc":172,"text":"def set_dimension(self, name, length, is_unlimited=False):\n if is_unlimited:\n self.ds.dimensions[name] = None\n self.ds.resize_dimension(name, length)\n else:\n self.ds.dimensions[name] = length"},{"col":4,"comment":"null","endLoc":180,"header":"def set_attribute(self, key, value)","id":408,"name":"set_attribute","nodeType":"Function","startLoc":179,"text":"def set_attribute(self, key, value):\n self.ds.attrs[key] = value"},{"col":4,"comment":"null","endLoc":183,"header":"def encode_variable(self, variable)","id":409,"name":"encode_variable","nodeType":"Function","startLoc":182,"text":"def encode_variable(self, variable):\n return _encode_nc4_variable(variable)"},{"col":4,"comment":"null","endLoc":62,"header":"@property\n def ds(self)","id":410,"name":"ds","nodeType":"Function","startLoc":60,"text":"@property\n def ds(self):\n return self._manager.acquire()"},{"col":4,"comment":"null","endLoc":66,"header":"def open_store_variable(self, name, var)","id":411,"name":"open_store_variable","nodeType":"Function","startLoc":64,"text":"def open_store_variable(self, name, var):\n data = indexing.LazilyOuterIndexedArray(NioArrayWrapper(name, self))\n return Variable(var.dimensions, data, var.attributes)"},{"col":0,"comment":"concatenate() with better dtype promotion rules.","endLoc":237,"header":"def concatenate(arrays, axis=0)","id":412,"name":"concatenate","nodeType":"Function","startLoc":235,"text":"def concatenate(arrays, axis=0):\n \"\"\"concatenate() with better dtype promotion rules.\"\"\"\n return _concatenate(as_shared_dtype(arrays), axis=axis)"},{"col":4,"comment":"null","endLoc":258,"header":"def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n )","id":413,"name":"prepare_variable","nodeType":"Function","startLoc":185,"text":"def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n import h5py\n\n attrs = variable.attrs.copy()\n dtype = _get_datatype(variable, raise_on_invalid_encoding=check_encoding)\n\n fillvalue = attrs.pop(\"_FillValue\", None)\n if dtype is str and fillvalue is not None:\n raise NotImplementedError(\n \"h5netcdf does not yet support setting a fill value for \"\n \"variable-length strings \"\n \"(https://github.com/shoyer/h5netcdf/issues/37). \"\n \"Either remove '_FillValue' from encoding on variable %r \"\n \"or set {'dtype': 'S1'} in encoding to use the fixed width \"\n \"NC_CHAR type.\" % name\n )\n\n if dtype is str:\n dtype = h5py.special_dtype(vlen=str)\n\n encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding)\n kwargs = {}\n\n # Convert from NetCDF4-Python style compression settings to h5py style\n # If both styles are used together, h5py takes precedence\n # If set_encoding=True, raise ValueError in case of mismatch\n if encoding.pop(\"zlib\", False):\n if check_encoding and encoding.get(\"compression\") not in (None, \"gzip\"):\n raise ValueError(\"'zlib' and 'compression' encodings mismatch\")\n encoding.setdefault(\"compression\", \"gzip\")\n\n if (\n check_encoding\n and \"complevel\" in encoding\n and \"compression_opts\" in encoding\n and encoding[\"complevel\"] != encoding[\"compression_opts\"]\n ):\n raise ValueError(\"'complevel' and 'compression_opts' encodings \" \"mismatch\")\n complevel = encoding.pop(\"complevel\", 0)\n if complevel != 0:\n encoding.setdefault(\"compression_opts\", complevel)\n\n encoding[\"chunks\"] = encoding.pop(\"chunksizes\", None)\n\n # Do not apply compression, filters or chunking to scalars.\n if variable.shape:\n for key in [\n \"compression\",\n \"compression_opts\",\n \"shuffle\",\n \"chunks\",\n \"fletcher32\",\n ]:\n if key in encoding:\n kwargs[key] = encoding[key]\n if name not in self.ds:\n nc4_var = self.ds.create_variable(\n name,\n dtype=dtype,\n dimensions=variable.dims,\n fillvalue=fillvalue,\n **kwargs\n )\n else:\n nc4_var = self.ds[name]\n\n for k, v in attrs.items():\n nc4_var.attrs[k] = v\n\n target = H5NetCDFArrayWrapper(name, self)\n\n return target, variable.data"},{"col":0,"comment":"stack() with better dtype promotion rules.","endLoc":242,"header":"def stack(arrays, axis=0)","id":414,"name":"stack","nodeType":"Function","startLoc":240,"text":"def stack(arrays, axis=0):\n \"\"\"stack() with better dtype promotion rules.\"\"\"\n return _stack(as_shared_dtype(arrays), axis=axis)"},{"col":4,"comment":"null","endLoc":644,"header":"def __init__(self, array)","id":415,"name":"__init__","nodeType":"Function","startLoc":642,"text":"def __init__(self, array):\n self.array = as_indexable(array)\n self._copied = False"},{"col":4,"comment":"null","endLoc":71,"header":"def get_variables(self)","id":416,"name":"get_variables","nodeType":"Function","startLoc":68,"text":"def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )"},{"col":4,"comment":"null","endLoc":669,"header":"def __init__(self, array)","id":417,"name":"__init__","nodeType":"Function","startLoc":668,"text":"def __init__(self, array):\n self.array = _wrap_numpy_scalars(as_indexable(array))"},{"col":0,"comment":"Concatenate xarray objects along a new or existing dimension.\n\n Parameters\n ----------\n objs : sequence of Dataset and DataArray objects\n xarray objects to concatenate together. Each object is expected to\n consist of variables and coordinates with matching shapes except for\n along the concatenated dimension.\n dim : str or DataArray or pandas.Index\n Name of the dimension to concatenate along. This can either be a new\n dimension name, in which case it is added along axis=0, or an existing\n dimension name, in which case the location of the dimension is\n unchanged. If dimension is provided as a DataArray or Index, its name\n is used as the dimension to concatenate along and the values are added\n as a coordinate.\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n If objects are DataArrays, data_vars must be 'all'.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n These coordinate variables will be concatenated together:\n * 'minimal': Only coordinates in which the dimension already appears\n are included.\n * 'different': Coordinates which are not equal (ignoring attributes)\n across all datasets are also concatenated (as well as all for which\n dimension already appears). Beware: this option may load the data\n payload of coordinate variables into memory if they are not already\n loaded.\n * 'all': All coordinate variables will be concatenated, except\n those corresponding to other dimensions.\n * list of str: The listed coordinate variables will be concatenated,\n in addition to the 'minimal' coordinates.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare non-concatenated variables of the same name for\n potential conflicts. This is passed down to merge.\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to which\n to assign each dataset along the concatenated dimension. If not\n supplied, objects are concatenated in the provided order.\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n indexers, mode, concat_over : deprecated\n\n Returns\n -------\n concatenated : type of objs\n\n See also\n --------\n merge\n auto_combine\n ","endLoc":131,"header":"def concat(\n objs,\n dim,\n data_vars=\"all\",\n coords=\"different\",\n compat=\"equals\",\n positions=None,\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":418,"name":"concat","nodeType":"Function","startLoc":10,"text":"def concat(\n objs,\n dim,\n data_vars=\"all\",\n coords=\"different\",\n compat=\"equals\",\n positions=None,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"Concatenate xarray objects along a new or existing dimension.\n\n Parameters\n ----------\n objs : sequence of Dataset and DataArray objects\n xarray objects to concatenate together. Each object is expected to\n consist of variables and coordinates with matching shapes except for\n along the concatenated dimension.\n dim : str or DataArray or pandas.Index\n Name of the dimension to concatenate along. This can either be a new\n dimension name, in which case it is added along axis=0, or an existing\n dimension name, in which case the location of the dimension is\n unchanged. If dimension is provided as a DataArray or Index, its name\n is used as the dimension to concatenate along and the values are added\n as a coordinate.\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n If objects are DataArrays, data_vars must be 'all'.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n These coordinate variables will be concatenated together:\n * 'minimal': Only coordinates in which the dimension already appears\n are included.\n * 'different': Coordinates which are not equal (ignoring attributes)\n across all datasets are also concatenated (as well as all for which\n dimension already appears). Beware: this option may load the data\n payload of coordinate variables into memory if they are not already\n loaded.\n * 'all': All coordinate variables will be concatenated, except\n those corresponding to other dimensions.\n * list of str: The listed coordinate variables will be concatenated,\n in addition to the 'minimal' coordinates.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare non-concatenated variables of the same name for\n potential conflicts. This is passed down to merge.\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to which\n to assign each dataset along the concatenated dimension. If not\n supplied, objects are concatenated in the provided order.\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n indexers, mode, concat_over : deprecated\n\n Returns\n -------\n concatenated : type of objs\n\n See also\n --------\n merge\n auto_combine\n \"\"\"\n # TODO: add ignore_index arguments copied from pandas.concat\n # TODO: support concatenating scalar coordinates even if the concatenated\n # dimension already exists\n from .dataset import Dataset\n from .dataarray import DataArray\n\n try:\n first_obj, objs = utils.peek_at(objs)\n except StopIteration:\n raise ValueError(\"must supply at least one object to concatenate\")\n\n if compat not in _VALID_COMPAT:\n raise ValueError(\n \"compat=%r invalid: must be 'broadcast_equals', 'equals', 'identical', 'no_conflicts' or 'override'\"\n % compat\n )\n\n if isinstance(first_obj, DataArray):\n f = _dataarray_concat\n elif isinstance(first_obj, Dataset):\n f = _dataset_concat\n else:\n raise TypeError(\n \"can only concatenate xarray Dataset and DataArray \"\n \"objects, got %s\" % type(first_obj)\n )\n return f(objs, dim, data_vars, coords, compat, positions, fill_value, join)"},{"col":0,"comment":"null","endLoc":182,"header":"def _check_data_shape(data, coords, dims)","id":419,"name":"_check_data_shape","nodeType":"Function","startLoc":167,"text":"def _check_data_shape(data, coords, dims):\n if data is dtypes.NA:\n data = np.nan\n if coords is not None and utils.is_scalar(data, include_0d=False):\n if utils.is_dict_like(coords):\n if dims is None:\n return data\n else:\n data_shape = tuple(\n as_variable(coords[k], k).size if k in coords.keys() else 1\n for k in dims\n )\n else:\n data_shape = tuple(as_variable(coord, \"foo\").size for coord in coords)\n data = np.full(data_shape, data)\n return data"},{"col":0,"comment":"Wrap NumPy scalars in 0d arrays.","endLoc":636,"header":"def _wrap_numpy_scalars(array)","id":420,"name":"_wrap_numpy_scalars","nodeType":"Function","startLoc":631,"text":"def _wrap_numpy_scalars(array):\n \"\"\"Wrap NumPy scalars in 0d arrays.\"\"\"\n if np.isscalar(array):\n return np.array(array)\n else:\n return array"},{"col":0,"comment":"Returns the first value from iterable, as well as a new iterator with\n the same content as the original iterable\n ","endLoc":181,"header":"def peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]","id":421,"name":"peek_at","nodeType":"Function","startLoc":175,"text":"def peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]:\n \"\"\"Returns the first value from iterable, as well as a new iterator with\n the same content as the original iterable\n \"\"\"\n gen = iter(iterable)\n peek = next(gen)\n return peek, itertools.chain([peek], gen)"},{"col":4,"comment":"null","endLoc":74,"header":"def get_attrs(self)","id":422,"name":"get_attrs","nodeType":"Function","startLoc":73,"text":"def get_attrs(self):\n return Frozen(self.ds.attributes)"},{"col":4,"comment":"null","endLoc":77,"header":"def get_dimensions(self)","id":423,"name":"get_dimensions","nodeType":"Function","startLoc":76,"text":"def get_dimensions(self):\n return Frozen(self.ds.dimensions)"},{"col":4,"comment":"null","endLoc":82,"header":"def get_encoding(self)","id":424,"name":"get_encoding","nodeType":"Function","startLoc":79,"text":"def get_encoding(self):\n return {\n \"unlimited_dims\": {k for k in self.ds.dimensions if self.ds.unlimited(k)}\n }"},{"col":0,"comment":"null","endLoc":426,"header":"def _dataarray_concat(\n arrays,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":425,"name":"_dataarray_concat","nodeType":"Function","startLoc":388,"text":"def _dataarray_concat(\n arrays,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n arrays = list(arrays)\n\n if data_vars != \"all\":\n raise ValueError(\n \"data_vars is not a valid argument when concatenating DataArray objects\"\n )\n\n datasets = []\n for n, arr in enumerate(arrays):\n if n == 0:\n name = arr.name\n elif name != arr.name:\n if compat == \"identical\":\n raise ValueError(\"array names not identical\")\n else:\n arr = arr.rename(name)\n datasets.append(arr._to_temp_dataset())\n\n ds = _dataset_concat(\n datasets,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=fill_value,\n join=join,\n )\n return arrays[0]._from_temp_dataset(ds, name)"},{"col":4,"comment":"null","endLoc":85,"header":"def close(self)","id":426,"name":"close","nodeType":"Function","startLoc":84,"text":"def close(self):\n self._manager.close()"},{"attributeType":"CachingFileManager","col":8,"comment":"null","endLoc":53,"id":427,"name":"_manager","nodeType":"Attribute","startLoc":53,"text":"self._manager"},{"attributeType":"null","col":8,"comment":"null","endLoc":52,"id":428,"name":"lock","nodeType":"Attribute","startLoc":52,"text":"self.lock"},{"col":4,"comment":"null","endLoc":261,"header":"def sync(self)","id":429,"name":"sync","nodeType":"Function","startLoc":260,"text":"def sync(self):\n self.ds.sync()"},{"col":4,"comment":"null","endLoc":264,"header":"def close(self, **kwargs)","id":430,"name":"close","nodeType":"Function","startLoc":263,"text":"def close(self, **kwargs):\n self._manager.close(**kwargs)"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":431,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":432,"name":"NCL_LOCK","nodeType":"Attribute","startLoc":12,"text":"NCL_LOCK"},{"attributeType":"null","col":8,"comment":"null","endLoc":104,"id":433,"name":"autoclose","nodeType":"Attribute","startLoc":104,"text":"self.autoclose"},{"attributeType":"null","col":0,"comment":"null","endLoc":13,"id":434,"name":"PYNIO_LOCK","nodeType":"Attribute","startLoc":13,"text":"PYNIO_LOCK"},{"col":0,"comment":"","endLoc":1,"header":"pynio_.py#","id":435,"name":"","nodeType":"Function","startLoc":1,"text":"NCL_LOCK = SerializableLock()\n\nPYNIO_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK, NCL_LOCK])"},{"fileName":"plot_lines_from_2d.py","filePath":"doc/gallery","id":436,"nodeType":"File","text":"# -*- coding: utf-8 -*-\n\"\"\"\n==================================\nMultiple lines from a 2d DataArray\n==================================\n\n\nUse :py:func:`xarray.plot.line` on a 2d DataArray to plot selections as\nmultiple lines.\n\nSee :ref:`plotting.multiplelines` for more details.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nimport xarray as xr\n\n# Load the data\nds = xr.tutorial.load_dataset(\"air_temperature\")\nair = ds.air - 273.15 # to celsius\n\n# Prepare the figure\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\n\n# Selected latitude indices\nisel_lats = [10, 15, 20]\n\n# Temperature vs longitude plot - illustrates the \"hue\" kwarg\nair.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue=\"lat\")\nax1.set_ylabel(\"°C\")\n\n# Temperature vs time plot - illustrates the \"x\" and \"add_legend\" kwargs\nair.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x=\"time\", add_legend=False)\nax2.set_ylabel(\"\")\n\n# Show\nplt.tight_layout()\nplt.show()\n"},{"attributeType":"null","col":28,"comment":"null","endLoc":15,"id":437,"name":"plt","nodeType":"Attribute","startLoc":15,"text":"plt"},{"attributeType":"CachingFileManager","col":8,"comment":"null","endLoc":89,"id":438,"name":"_manager","nodeType":"Attribute","startLoc":89,"text":"self._manager"},{"attributeType":"null","col":8,"comment":"null","endLoc":100,"id":439,"name":"format","nodeType":"Attribute","startLoc":100,"text":"self.format"},{"attributeType":"null","col":17,"comment":"null","endLoc":17,"id":440,"name":"xr","nodeType":"Attribute","startLoc":17,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":20,"id":441,"name":"ds","nodeType":"Attribute","startLoc":20,"text":"ds"},{"attributeType":"null","col":8,"comment":"null","endLoc":103,"id":442,"name":"lock","nodeType":"Attribute","startLoc":103,"text":"self.lock"},{"col":0,"comment":"All the logic for creating a new DataArray","endLoc":164,"header":"def _infer_coords_and_dims(\n shape, coords, dims\n) -> \"Tuple[Dict[Any, Variable], Tuple[Hashable, ...]]\"","id":443,"name":"_infer_coords_and_dims","nodeType":"Function","startLoc":82,"text":"def _infer_coords_and_dims(\n shape, coords, dims\n) -> \"Tuple[Dict[Any, Variable], Tuple[Hashable, ...]]\":\n \"\"\"All the logic for creating a new DataArray\"\"\"\n\n if (\n coords is not None\n and not utils.is_dict_like(coords)\n and len(coords) != len(shape)\n ):\n raise ValueError(\n \"coords is not dict-like, but it has %s items, \"\n \"which does not match the %s dimensions of the \"\n \"data\" % (len(coords), len(shape))\n )\n\n if isinstance(dims, str):\n dims = (dims,)\n\n if dims is None:\n dims = [\"dim_%s\" % n for n in range(len(shape))]\n if coords is not None and len(coords) == len(shape):\n # try to infer dimensions from coords\n if utils.is_dict_like(coords):\n # deprecated in GH993, removed in GH1539\n raise ValueError(\n \"inferring DataArray dimensions from \"\n \"dictionary like ``coords`` is no longer \"\n \"supported. Use an explicit list of \"\n \"``dims`` instead.\"\n )\n for n, (dim, coord) in enumerate(zip(dims, coords)):\n coord = as_variable(coord, name=dims[n]).to_index_variable()\n dims[n] = coord.name\n dims = tuple(dims)\n elif len(dims) != len(shape):\n raise ValueError(\n \"different number of dimensions on data \"\n \"and dims: %s vs %s\" % (len(shape), len(dims))\n )\n else:\n for d in dims:\n if not isinstance(d, str):\n raise TypeError(\"dimension %s is not a string\" % d)\n\n new_coords: Dict[Any, Variable] = {}\n\n if utils.is_dict_like(coords):\n for k, v in coords.items():\n new_coords[k] = as_variable(v, name=k)\n elif coords is not None:\n for dim, coord in zip(dims, coords):\n var = as_variable(coord, name=dim)\n var.dims = (dim,)\n new_coords[dim] = var.to_index_variable()\n\n sizes = dict(zip(dims, shape))\n for k, v in new_coords.items():\n if any(d not in dims for d in v.dims):\n raise ValueError(\n \"coordinate %s has dimensions %s, but these \"\n \"are not a subset of the DataArray \"\n \"dimensions %s\" % (k, v.dims, dims)\n )\n\n for d, s in zip(v.dims, v.shape):\n if s != sizes[d]:\n raise ValueError(\n \"conflicting sizes for dimension %r: \"\n \"length %s on the data but length %s on \"\n \"coordinate %r\" % (d, sizes[d], s, k)\n )\n\n if k in sizes and v.shape != (sizes[k],):\n raise ValueError(\n \"coordinate %r is a DataArray dimension, but \"\n \"it has shape %r rather than expected shape %r \"\n \"matching the dimension size\" % (k, v.shape, (sizes[k],))\n )\n\n assert_unique_multiindex_level_names(new_coords)\n\n return new_coords, dims"},{"attributeType":"null","col":8,"comment":"null","endLoc":99,"id":444,"name":"_group","nodeType":"Attribute","startLoc":99,"text":"self._group"},{"attributeType":"null","col":0,"comment":"null","endLoc":21,"id":445,"name":"air","nodeType":"Attribute","startLoc":21,"text":"air"},{"attributeType":"null","col":0,"comment":"null","endLoc":24,"id":446,"name":"f","nodeType":"Attribute","startLoc":24,"text":"f"},{"attributeType":"null","col":4,"comment":"null","endLoc":24,"id":447,"name":"ax1","nodeType":"Attribute","startLoc":24,"text":"ax1"},{"attributeType":"null","col":9,"comment":"null","endLoc":24,"id":448,"name":"ax2","nodeType":"Attribute","startLoc":24,"text":"ax2"},{"attributeType":"null","col":0,"comment":"null","endLoc":27,"id":449,"name":"isel_lats","nodeType":"Attribute","startLoc":27,"text":"isel_lats"},{"col":0,"comment":"","endLoc":13,"header":"plot_lines_from_2d.py#","id":450,"name":"","nodeType":"Function","startLoc":2,"text":"\"\"\"\n==================================\nMultiple lines from a 2d DataArray\n==================================\n\n\nUse :py:func:`xarray.plot.line` on a 2d DataArray to plot selections as\nmultiple lines.\n\nSee :ref:`plotting.multiplelines` for more details.\n\n\"\"\"\n\nds = xr.tutorial.load_dataset(\"air_temperature\")\n\nair = ds.air - 273.15 # to celsius\n\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharey=True)\n\nisel_lats = [10, 15, 20]\n\nair.isel(time=0, lat=isel_lats).plot.line(ax=ax1, hue=\"lat\")\n\nax1.set_ylabel(\"°C\")\n\nair.isel(lon=30, lat=isel_lats).plot.line(ax=ax2, x=\"time\", add_legend=False)\n\nax2.set_ylabel(\"\")\n\nplt.tight_layout()\n\nplt.show()"},{"col":0,"comment":"\n Open, load into memory, and close a dataset from the online repository\n (requires internet).\n\n See Also\n --------\n open_dataset\n ","endLoc":110,"header":"def load_dataset(*args, **kwargs)","id":451,"name":"load_dataset","nodeType":"Function","startLoc":100,"text":"def load_dataset(*args, **kwargs):\n \"\"\"\n Open, load into memory, and close a dataset from the online repository\n (requires internet).\n\n See Also\n --------\n open_dataset\n \"\"\"\n with open_dataset(*args, **kwargs) as ds:\n return ds.load()"},{"attributeType":"null","col":8,"comment":"null","endLoc":101,"id":452,"name":"_filename","nodeType":"Attribute","startLoc":101,"text":"self._filename"},{"attributeType":"null","col":8,"comment":"null","endLoc":102,"id":453,"name":"_mode","nodeType":"Attribute","startLoc":102,"text":"self._mode"},{"col":0,"comment":"null","endLoc":65,"header":"def _h5netcdf_create_group(dataset, name)","id":454,"name":"_h5netcdf_create_group","nodeType":"Function","startLoc":64,"text":"def _h5netcdf_create_group(dataset, name):\n return dataset.create_group(name)"},{"attributeType":"null","col":16,"comment":"null","endLoc":3,"id":455,"name":"np","nodeType":"Attribute","startLoc":3,"text":"np"},{"attributeType":"null","col":0,"comment":"null","endLoc":59,"id":456,"name":"_extract_h5nc_encoding","nodeType":"Attribute","startLoc":59,"text":"_extract_h5nc_encoding"},{"col":0,"comment":"","endLoc":1,"header":"h5netcdf_.py#","id":457,"name":"","nodeType":"Function","startLoc":1,"text":"_extract_h5nc_encoding = functools.partial(\n _extract_nc4_variable_encoding, lsd_okay=False, h5py_okay=True, backend=\"h5netcdf\"\n)"},{"fileName":"extensions.py","filePath":"xarray/core","id":458,"nodeType":"File","text":"import warnings\n\nfrom .dataarray import DataArray\nfrom .dataset import Dataset\n\n\nclass AccessorRegistrationWarning(Warning):\n \"\"\"Warning for conflicts in accessor registration.\"\"\"\n\n\nclass _CachedAccessor:\n \"\"\"Custom property-like object (descriptor) for caching accessors.\"\"\"\n\n def __init__(self, name, accessor):\n self._name = name\n self._accessor = accessor\n\n def __get__(self, obj, cls):\n if obj is None:\n # we're accessing the attribute of the class, i.e., Dataset.geo\n return self._accessor\n\n try:\n return obj._accessors[self._name]\n except TypeError:\n obj._accessors = {}\n except KeyError:\n pass\n\n try:\n accessor_obj = self._accessor(obj)\n except AttributeError:\n # __getattr__ on data object will swallow any AttributeErrors\n # raised when initializing the accessor, so we need to raise as\n # something else (GH933):\n raise RuntimeError(\"error initializing %r accessor.\" % self._name)\n\n obj._accessors[self._name] = accessor_obj\n return accessor_obj\n\n\ndef _register_accessor(name, cls):\n def decorator(accessor):\n if hasattr(cls, name):\n warnings.warn(\n \"registration of accessor %r under name %r for type %r is \"\n \"overriding a preexisting attribute with the same name.\"\n % (accessor, name, cls),\n AccessorRegistrationWarning,\n stacklevel=2,\n )\n setattr(cls, name, _CachedAccessor(name, accessor))\n return accessor\n\n return decorator\n\n\ndef register_dataarray_accessor(name):\n \"\"\"Register a custom accessor on xarray.DataArray objects.\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n See also\n --------\n register_dataset_accessor\n \"\"\"\n return _register_accessor(name, DataArray)\n\n\ndef register_dataset_accessor(name):\n \"\"\"Register a custom property on xarray.Dataset objects.\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n Examples\n --------\n\n In your library code::\n\n import xarray as xr\n\n @xr.register_dataset_accessor('geo')\n class GeoAccessor:\n def __init__(self, xarray_obj):\n self._obj = xarray_obj\n\n @property\n def center(self):\n # return the geographic center point of this dataset\n lon = self._obj.latitude\n lat = self._obj.longitude\n return (float(lon.mean()), float(lat.mean()))\n\n def plot(self):\n # plot this array's data on a map, e.g., using Cartopy\n pass\n\n Back in an interactive IPython session:\n\n >>> ds = xarray.Dataset({'longitude': np.linspace(0, 10),\n ... 'latitude': np.linspace(0, 20)})\n >>> ds.geo.center\n (5.0, 10.0)\n >>> ds.geo.plot()\n # plots data on a map\n\n See also\n --------\n register_dataarray_accessor\n \"\"\"\n return _register_accessor(name, Dataset)\n"},{"fileName":"rolling_exp.py","filePath":"xarray/core","id":459,"nodeType":"File","text":"import numpy as np\n\nfrom .pdcompat import count_not_none\nfrom .pycompat import dask_array_type\n\n\ndef _get_alpha(com=None, span=None, halflife=None, alpha=None):\n # pandas defines in terms of com (converting to alpha in the algo)\n # so use its function to get a com and then convert to alpha\n\n com = _get_center_of_mass(com, span, halflife, alpha)\n return 1 / (1 + com)\n\n\ndef move_exp_nanmean(array, *, axis, alpha):\n if isinstance(array, dask_array_type):\n raise TypeError(\"rolling_exp is not currently support for dask arrays\")\n import numbagg\n\n if axis == ():\n return array.astype(np.float64)\n else:\n return numbagg.move_exp_nanmean(array, axis=axis, alpha=alpha)\n\n\ndef _get_center_of_mass(comass, span, halflife, alpha):\n \"\"\"\n Vendored from pandas.core.window.common._get_center_of_mass\n\n See licenses/PANDAS_LICENSE for the function's license\n \"\"\"\n valid_count = count_not_none(comass, span, halflife, alpha)\n if valid_count > 1:\n raise ValueError(\"comass, span, halflife, and alpha \" \"are mutually exclusive\")\n\n # Convert to center of mass; domain checks ensure 0 < alpha <= 1\n if comass is not None:\n if comass < 0:\n raise ValueError(\"comass must satisfy: comass >= 0\")\n elif span is not None:\n if span < 1:\n raise ValueError(\"span must satisfy: span >= 1\")\n comass = (span - 1) / 2.0\n elif halflife is not None:\n if halflife <= 0:\n raise ValueError(\"halflife must satisfy: halflife > 0\")\n decay = 1 - np.exp(np.log(0.5) / halflife)\n comass = 1 / decay - 1\n elif alpha is not None:\n if alpha <= 0 or alpha > 1:\n raise ValueError(\"alpha must satisfy: 0 < alpha <= 1\")\n comass = (1.0 - alpha) / alpha\n else:\n raise ValueError(\"Must pass one of comass, span, halflife, or alpha\")\n\n return float(comass)\n\n\nclass RollingExp:\n \"\"\"\n Exponentially-weighted moving window object.\n Similar to EWM in pandas\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A single mapping from a single dimension name to window value\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'], default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n\n Returns\n -------\n RollingExp : type of input argument\n \"\"\"\n\n def __init__(self, obj, windows, window_type=\"span\"):\n self.obj = obj\n dim, window = next(iter(windows.items()))\n self.dim = dim\n self.alpha = _get_alpha(**{window_type: window})\n\n def mean(self):\n \"\"\"\n Exponentially weighted moving average\n\n Examples\n --------\n >>> da = xr.DataArray([1,1,2,2,2], dims='x')\n >>> da.rolling_exp(x=2, window_type='span').mean()\n \n array([1. , 1. , 1.692308, 1.9 , 1.966942])\n Dimensions without coordinates: x\n \"\"\"\n\n return self.obj.reduce(move_exp_nanmean, dim=self.dim, alpha=self.alpha)\n"},{"className":"DataArray","col":0,"comment":"N-dimensional array with labeled coordinates and dimensions.\n\n DataArray provides a wrapper around numpy ndarrays that uses labeled\n dimensions and coordinates to support metadata aware operations. The API is\n similar to that for the pandas Series or DataFrame, but DataArray objects\n can have any number of dimensions, and their contents have fixed data\n types.\n\n Additional features over raw numpy arrays:\n\n - Apply operations over dimensions by name: ``x.sum('time')``.\n - Select or assign values by integer location (like numpy): ``x[:10]``\n or by label (like pandas): ``x.loc['2014-01-01']`` or\n ``x.sel(time='2014-01-01')``.\n - Mathematical operations (e.g., ``x - y``) vectorize across multiple\n dimensions (known in numpy as \"broadcasting\") based on dimension names,\n regardless of their original order.\n - Keep track of arbitrary metadata in the form of a Python dictionary:\n ``x.attrs``\n - Convert to a pandas Series: ``x.to_series()``.\n\n Getting items from or doing mathematical operations with a DataArray\n always returns another DataArray.\n\n Attributes\n ----------\n dims : tuple\n Dimension names associated with this array.\n values : np.ndarray\n Access or modify DataArray values as a numpy array.\n coords : dict-like\n Dictionary of DataArray objects that label values along each dimension.\n name : str or None\n Name of this array.\n attrs : dict\n Dictionary for holding arbitrary metadata.\n ","endLoc":3121,"id":460,"nodeType":"Class","startLoc":213,"text":"class DataArray(AbstractArray, DataWithCoords):\n \"\"\"N-dimensional array with labeled coordinates and dimensions.\n\n DataArray provides a wrapper around numpy ndarrays that uses labeled\n dimensions and coordinates to support metadata aware operations. The API is\n similar to that for the pandas Series or DataFrame, but DataArray objects\n can have any number of dimensions, and their contents have fixed data\n types.\n\n Additional features over raw numpy arrays:\n\n - Apply operations over dimensions by name: ``x.sum('time')``.\n - Select or assign values by integer location (like numpy): ``x[:10]``\n or by label (like pandas): ``x.loc['2014-01-01']`` or\n ``x.sel(time='2014-01-01')``.\n - Mathematical operations (e.g., ``x - y``) vectorize across multiple\n dimensions (known in numpy as \"broadcasting\") based on dimension names,\n regardless of their original order.\n - Keep track of arbitrary metadata in the form of a Python dictionary:\n ``x.attrs``\n - Convert to a pandas Series: ``x.to_series()``.\n\n Getting items from or doing mathematical operations with a DataArray\n always returns another DataArray.\n\n Attributes\n ----------\n dims : tuple\n Dimension names associated with this array.\n values : np.ndarray\n Access or modify DataArray values as a numpy array.\n coords : dict-like\n Dictionary of DataArray objects that label values along each dimension.\n name : str or None\n Name of this array.\n attrs : dict\n Dictionary for holding arbitrary metadata.\n \"\"\"\n\n _accessors: Optional[Dict[str, Any]]\n _coords: Dict[Any, Variable]\n _indexes: Optional[Dict[Hashable, pd.Index]]\n _name: Optional[Hashable]\n _variable: Variable\n\n __slots__ = (\n \"_accessors\",\n \"_coords\",\n \"_file_obj\",\n \"_indexes\",\n \"_name\",\n \"_variable\",\n \"__weakref__\",\n )\n\n _groupby_cls = groupby.DataArrayGroupBy\n _rolling_cls = rolling.DataArrayRolling\n _coarsen_cls = rolling.DataArrayCoarsen\n _resample_cls = resample.DataArrayResample\n\n __default = ReprObject(\"\")\n\n dt = property(DatetimeAccessor)\n\n def __init__(\n self,\n data: Any = dtypes.NA,\n coords: Union[Sequence[Tuple], Mapping[Hashable, Any], None] = None,\n dims: Union[Hashable, Sequence[Hashable], None] = None,\n name: Hashable = None,\n attrs: Mapping = None,\n # deprecated parameters\n encoding=None,\n # internal parameters\n indexes: Dict[Hashable, pd.Index] = None,\n fastpath: bool = False,\n ):\n \"\"\"\n Parameters\n ----------\n data : array_like\n Values for this array. Must be an ``numpy.ndarray``, ndarray like,\n or castable to an ``ndarray``. If a self-described xarray or pandas\n object, attempts are made to use this array's metadata to fill in\n other unspecified arguments. A view of the array's data is used\n instead of a copy if possible.\n coords : sequence or dict of array_like objects, optional\n Coordinates (tick labels) to use for indexing along each dimension.\n The following notations are accepted:\n\n - mapping {dimension name: array-like}\n - sequence of tuples that are valid arguments for xarray.Variable()\n - (dims, data)\n - (dims, data, attrs)\n - (dims, data, attrs, encoding)\n\n Additionally, it is possible to define a coord whose name\n does not match the dimension name, or a coord based on multiple\n dimensions, with one of the following notations:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n\n dims : hashable or sequence of hashable, optional\n Name(s) of the data dimension(s). Must be either a hashable (only\n for 1D data) or a sequence of hashables with length equal to the\n number of dimensions. If this argument is omitted, dimension names\n are taken from ``coords`` (if possible) and otherwise default to\n ``['dim_0', ... 'dim_n']``.\n name : str or None, optional\n Name of this array.\n attrs : dict_like or None, optional\n Attributes to assign to the new instance. By default, an empty\n attribute dictionary is initialized.\n \"\"\"\n if encoding is not None:\n warnings.warn(\n \"The `encoding` argument to `DataArray` is deprecated, and . \"\n \"will be removed in 0.15. \"\n \"Instead, specify the encoding when writing to disk or \"\n \"set the `encoding` attribute directly.\",\n FutureWarning,\n stacklevel=2,\n )\n if fastpath:\n variable = data\n assert dims is None\n assert attrs is None\n assert encoding is None\n else:\n # try to fill in arguments from data if they weren't supplied\n if coords is None:\n\n if isinstance(data, DataArray):\n coords = data.coords\n elif isinstance(data, pd.Series):\n coords = [data.index]\n elif isinstance(data, pd.DataFrame):\n coords = [data.index, data.columns]\n elif isinstance(data, (pd.Index, IndexVariable)):\n coords = [data]\n elif isinstance(data, pdcompat.Panel):\n coords = [data.items, data.major_axis, data.minor_axis]\n\n if dims is None:\n dims = getattr(data, \"dims\", getattr(coords, \"dims\", None))\n if name is None:\n name = getattr(data, \"name\", None)\n if attrs is None:\n attrs = getattr(data, \"attrs\", None)\n if encoding is None:\n encoding = getattr(data, \"encoding\", None)\n\n data = _check_data_shape(data, coords, dims)\n data = as_compatible_data(data)\n coords, dims = _infer_coords_and_dims(data.shape, coords, dims)\n variable = Variable(dims, data, attrs, encoding, fastpath=True)\n\n # These fully describe a DataArray\n self._variable = variable\n assert isinstance(coords, dict)\n self._coords = coords\n self._name = name\n self._accessors = None\n\n # TODO(shoyer): document this argument, once it becomes part of the\n # public interface.\n self._indexes = indexes\n\n self._file_obj = None\n\n def _replace(\n self,\n variable: Variable = None,\n coords=None,\n name: Optional[Hashable] = __default,\n ) -> \"DataArray\":\n if variable is None:\n variable = self.variable\n if coords is None:\n coords = self._coords\n if name is self.__default:\n name = self.name\n return type(self)(variable, coords, name=name, fastpath=True)\n\n def _replace_maybe_drop_dims(\n self, variable: Variable, name: Optional[Hashable] = __default\n ) -> \"DataArray\":\n if variable.dims == self.dims and variable.shape == self.shape:\n coords = self._coords.copy()\n elif variable.dims == self.dims:\n # Shape has changed (e.g. from reduce(..., keepdims=True)\n new_sizes = dict(zip(self.dims, variable.shape))\n coords = {\n k: v\n for k, v in self._coords.items()\n if v.shape == tuple(new_sizes[d] for d in v.dims)\n }\n else:\n allowed_dims = set(variable.dims)\n coords = {\n k: v for k, v in self._coords.items() if set(v.dims) <= allowed_dims\n }\n return self._replace(variable, coords, name)\n\n def _overwrite_indexes(self, indexes: Mapping[Hashable, Any]) -> \"DataArray\":\n if not len(indexes):\n return self\n coords = self._coords.copy()\n for name, idx in indexes.items():\n coords[name] = IndexVariable(name, idx)\n obj = self._replace(coords=coords)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Any, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj\n\n def _to_temp_dataset(self) -> Dataset:\n return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False)\n\n def _from_temp_dataset(\n self, dataset: Dataset, name: Hashable = __default\n ) -> \"DataArray\":\n variable = dataset._variables.pop(_THIS_ARRAY)\n coords = dataset._variables\n return self._replace(variable, coords, name)\n\n def _to_dataset_split(self, dim: Hashable) -> Dataset:\n def subset(dim, label):\n array = self.loc[{dim: label}]\n if dim in array.coords:\n del array.coords[dim]\n array.attrs = {}\n return array\n\n variables = {label: subset(dim, label) for label in self.get_index(dim)}\n\n coords = self.coords.to_dataset()\n if dim in coords:\n del coords[dim]\n return Dataset(variables, coords, self.attrs)\n\n def _to_dataset_whole(\n self, name: Hashable = None, shallow_copy: bool = True\n ) -> Dataset:\n if name is None:\n name = self.name\n if name is None:\n raise ValueError(\n \"unable to convert unnamed DataArray to a \"\n \"Dataset without providing an explicit name\"\n )\n if name in self.coords:\n raise ValueError(\n \"cannot create a Dataset from a DataArray with \"\n \"the same name as one of its coordinates\"\n )\n # use private APIs for speed: this is called by _to_temp_dataset(),\n # which is used in the guts of a lot of operations (e.g., reindex)\n variables = self._coords.copy()\n variables[name] = self.variable\n if shallow_copy:\n for k in variables:\n variables[k] = variables[k].copy(deep=False)\n coord_names = set(self._coords)\n dataset = Dataset._from_vars_and_coord_names(variables, coord_names)\n return dataset\n\n def to_dataset(self, dim: Hashable = None, *, name: Hashable = None) -> Dataset:\n \"\"\"Convert a DataArray to a Dataset.\n\n Parameters\n ----------\n dim : hashable, optional\n Name of the dimension on this array along which to split this array\n into separate variables. If not provided, this array is converted\n into a Dataset of one variable.\n name : hashable, optional\n Name to substitute for this array's name. Only valid if ``dim`` is\n not provided.\n\n Returns\n -------\n dataset : Dataset\n \"\"\"\n if dim is not None and dim not in self.dims:\n raise TypeError(\n \"{} is not a dim. If supplying a ``name``, pass as a kwarg.\".format(dim)\n )\n\n if dim is not None:\n if name is not None:\n raise TypeError(\"cannot supply both dim and name arguments\")\n return self._to_dataset_split(dim)\n else:\n return self._to_dataset_whole(name)\n\n @property\n def name(self) -> Optional[Hashable]:\n \"\"\"The name of this array.\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: Optional[Hashable]) -> None:\n self._name = value\n\n @property\n def variable(self) -> Variable:\n \"\"\"Low level interface to the Variable object for this DataArray.\"\"\"\n return self._variable\n\n @property\n def dtype(self) -> np.dtype:\n return self.variable.dtype\n\n @property\n def shape(self) -> Tuple[int, ...]:\n return self.variable.shape\n\n @property\n def size(self) -> int:\n return self.variable.size\n\n @property\n def nbytes(self) -> int:\n return self.variable.nbytes\n\n @property\n def ndim(self) -> int:\n return self.variable.ndim\n\n def __len__(self) -> int:\n return len(self.variable)\n\n @property\n def data(self) -> Any:\n \"\"\"The array's data as a dask or numpy array\n \"\"\"\n return self.variable.data\n\n @data.setter\n def data(self, value: Any) -> None:\n self.variable.data = value\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"The array's data as a numpy.ndarray\"\"\"\n return self.variable.values\n\n @values.setter\n def values(self, value: Any) -> None:\n self.variable.values = value\n\n @property\n def _in_memory(self) -> bool:\n return self.variable._in_memory\n\n def to_index(self) -> pd.Index:\n \"\"\"Convert this variable to a pandas.Index. Only possible for 1D\n arrays.\n \"\"\"\n return self.variable.to_index()\n\n @property\n def dims(self) -> Tuple[Hashable, ...]:\n \"\"\"Tuple of dimension names associated with this array.\n\n Note that the type of this property is inconsistent with\n `Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for\n consistently named properties.\n \"\"\"\n return self.variable.dims\n\n @dims.setter\n def dims(self, value):\n raise AttributeError(\n \"you cannot assign dims on a DataArray. Use \"\n \".rename() or .swap_dims() instead.\"\n )\n\n def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]:\n if utils.is_dict_like(key):\n return key\n else:\n key = indexing.expanded_indexer(key, self.ndim)\n return dict(zip(self.dims, key))\n\n @property\n def _level_coords(self) -> Dict[Hashable, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[Hashable, Hashable] = {}\n\n for cname, var in self._coords.items():\n if var.ndim == 1 and isinstance(var, IndexVariable):\n level_names = var.level_names\n if level_names is not None:\n dim, = var.dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords\n\n def _getitem_coord(self, key):\n from .dataset import _get_virtual_variable\n\n try:\n var = self._coords[key]\n except KeyError:\n dim_sizes = dict(zip(self.dims, self.shape))\n _, key, var = _get_virtual_variable(\n self._coords, key, self._level_coords, dim_sizes\n )\n\n return self._replace_maybe_drop_dims(var, name=key)\n\n def __getitem__(self, key: Any) -> \"DataArray\":\n if isinstance(key, str):\n return self._getitem_coord(key)\n else:\n # xarray-style array indexing\n return self.isel(indexers=self._item_key_to_dict(key))\n\n def __setitem__(self, key: Any, value: Any) -> None:\n if isinstance(key, str):\n self.coords[key] = value\n else:\n # Coordinates in key, value and self[key] should be consistent.\n # TODO Coordinate consistency in key is checked here, but it\n # causes unnecessary indexing. It should be optimized.\n obj = self[key]\n if isinstance(value, DataArray):\n assert_coordinate_consistent(value, obj.coords.variables)\n # DataArray key -> Variable key\n key = {\n k: v.variable if isinstance(v, DataArray) else v\n for k, v in self._item_key_to_dict(key).items()\n }\n self.variable[key] = value\n\n def __delitem__(self, key: Any) -> None:\n del self.coords[key]\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return self._item_sources + [self.attrs]\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-completion\n \"\"\"\n return [\n self.coords,\n {d: self.coords[d] for d in self.dims},\n LevelCoordinatesSource(self),\n ]\n\n def __contains__(self, key: Any) -> bool:\n return key in self.data\n\n @property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing like pandas.\n \"\"\"\n return _LocIndexer(self)\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary storing arbitrary metadata with this array.\"\"\"\n return self.variable.attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n # Disable type checking to work around mypy bug - see mypy#4167\n self.variable.attrs = value # type: ignore\n\n @property\n def encoding(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of format-specific settings for how this array should be\n serialized.\"\"\"\n return self.variable.encoding\n\n @encoding.setter\n def encoding(self, value: Mapping[Hashable, Any]) -> None:\n self.variable.encoding = value\n\n @property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\n \"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._coords, self.dims)\n return Indexes(self._indexes)\n\n @property\n def coords(self) -> DataArrayCoordinates:\n \"\"\"Dictionary-like container of coordinate arrays.\n \"\"\"\n return DataArrayCoordinates(self)\n\n def reset_coords(\n self,\n names: Union[Iterable[Hashable], Hashable, None] = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> Union[None, \"DataArray\", Dataset]:\n \"\"\"Given names of coordinates, reset them to become variables.\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset, or DataArray if ``drop == True``\n \"\"\"\n _check_inplace(inplace)\n if names is None:\n names = set(self.coords) - set(self.dims)\n dataset = self.coords.to_dataset().reset_coords(names, drop)\n if drop:\n return self._replace(coords=dataset._variables)\n else:\n if self.name is None:\n raise ValueError(\n \"cannot reset_coords with drop=False on an unnamed DataArrray\"\n )\n dataset[self.name] = self.variable\n return dataset\n\n def __dask_graph__(self):\n return self._to_temp_dataset().__dask_graph__()\n\n def __dask_keys__(self):\n return self._to_temp_dataset().__dask_keys__()\n\n def __dask_layers__(self):\n return self._to_temp_dataset().__dask_layers__()\n\n @property\n def __dask_optimize__(self):\n return self._to_temp_dataset().__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n return self._to_temp_dataset().__dask_scheduler__\n\n def __dask_postcompute__(self):\n func, args = self._to_temp_dataset().__dask_postcompute__()\n return self._dask_finalize, (func, args, self.name)\n\n def __dask_postpersist__(self):\n func, args = self._to_temp_dataset().__dask_postpersist__()\n return self._dask_finalize, (func, args, self.name)\n\n @staticmethod\n def _dask_finalize(results, func, args, name):\n ds = func(results, *args)\n variable = ds._variables.pop(_THIS_ARRAY)\n coords = ds._variables\n return DataArray(variable, coords, name=name, fastpath=True)\n\n def load(self, **kwargs) -> \"DataArray\":\n \"\"\"Manually trigger loading of this array's data from disk or a\n remote source into memory and return this array.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n ds = self._to_temp_dataset().load(**kwargs)\n new = self._from_temp_dataset(ds)\n self._variable = new._variable\n self._coords = new._coords\n return self\n\n def compute(self, **kwargs) -> \"DataArray\":\n \"\"\"Manually trigger loading of this array's data from disk or a\n remote source into memory and return a new array. The original is\n left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def persist(self, **kwargs) -> \"DataArray\":\n \"\"\" Trigger computation in constituent dask arrays\n\n This keeps them as dask arrays but encourages them to keep data in\n memory. This is particularly useful when on a distributed machine.\n When on a single machine consider using ``.compute()`` instead.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n ds = self._to_temp_dataset().persist(**kwargs)\n return self._from_temp_dataset(ds)\n\n def copy(self, deep: bool = True, data: Any = None) -> \"DataArray\":\n \"\"\"Returns a copy of this array.\n\n If `deep=True`, a deep copy is made of the data array.\n Otherwise, a shallow copy is made, so each variable in the new\n array's dataset is also a variable in this array's dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether the data array and its coordinates are loaded into memory\n and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n When `data` is used, `deep` is ignored for all data variables,\n and only used for coords.\n\n Returns\n -------\n object : DataArray\n New object with dimensions, attributes, coordinates, name,\n encoding, and optionally data copied from original.\n\n Examples\n --------\n\n Shallow versus deep copy\n\n >>> array = xr.DataArray([1, 2, 3], dims='x',\n ... coords={'x': ['a', 'b', 'c']})\n >>> array.copy()\n \n array([1, 2, 3])\n Coordinates:\n * x (x) >> array_0 = array.copy(deep=False)\n >>> array_0[0] = 7\n >>> array_0\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array.copy(data=[0.1, 0.2, 0.3])\n \n array([ 0.1, 0.2, 0.3])\n Coordinates:\n * x (x) >> array\n \n array([1, 2, 3])\n Coordinates:\n * x (x) \"DataArray\":\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None) -> \"DataArray\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore\n\n @property\n def chunks(self) -> Optional[Tuple[Tuple[int, ...], ...]]:\n \"\"\"Block dimensions for this array's data or None if it's not a dask\n array.\n \"\"\"\n return self.variable.chunks\n\n def chunk(\n self,\n chunks: Union[\n None,\n Number,\n Tuple[Number, ...],\n Tuple[Tuple[Number, ...], ...],\n Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]],\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"DataArray\":\n \"\"\"Coerce this array's data into a dask arrays with the given chunks.\n\n If this variable is a non-dask array, it will be converted to dask\n array. If it's a dask array, it will be rechunked to the given chunk\n sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, tuple or mapping, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of the new dask array.\n token : str, optional\n Token uniquely identifying this array.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.DataArray\n \"\"\"\n if isinstance(chunks, (tuple, list)):\n chunks = dict(zip(self.dims, chunks))\n\n ds = self._to_temp_dataset().chunk(\n chunks, name_prefix=name_prefix, token=token, lock=lock\n )\n return self._from_temp_dataset(ds)\n\n def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by integer indexing\n along the specified dimension(s).\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n ds = self._to_temp_dataset().isel(drop=drop, indexers=indexers)\n return self._from_temp_dataset(ds)\n\n def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by selecting index\n labels along the specified dimension(s).\n\n .. warning::\n\n Do not try to assign values when using any of the indexing methods\n ``isel`` or ``sel``::\n\n da = xr.DataArray([0, 1, 2, 3], dims=['x'])\n # DO NOT do this\n da.isel(x=[0, 1, 2])[1] = -1\n\n Assigning values with the chained indexing using ``.sel`` or\n ``.isel`` fails silently.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n\n \"\"\"\n ds = self._to_temp_dataset().sel(\n indexers=indexers,\n drop=drop,\n method=method,\n tolerance=tolerance,\n **indexers_kwargs\n )\n return self._from_temp_dataset(ds)\n\n def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by the the first `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.head\n DataArray.tail\n DataArray.thin\n \"\"\"\n ds = self._to_temp_dataset().head(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)\n\n def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by the the last `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.tail\n DataArray.head\n DataArray.thin\n \"\"\"\n ds = self._to_temp_dataset().tail(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)\n\n def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by each `n` value\n along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.thin\n DataArray.head\n DataArray.tail\n \"\"\"\n ds = self._to_temp_dataset().thin(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)\n\n def broadcast_like(\n self, other: Union[\"DataArray\", Dataset], exclude: Iterable[Hashable] = None\n ) -> \"DataArray\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n\n This is equivalent to xr.broadcast(other, self)[1]\n\n xarray objects are broadcast against each other in arithmetic\n operations, so this method is not be necessary for most uses.\n\n If no change is needed, the input data is returned to the output\n without being copied.\n\n If new coords are added by the broadcast, their values are\n NaN filled.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n Returns\n -------\n new_da: xr.DataArray\n\n Examples\n --------\n\n >>> arr1\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115]])\n Coordinates:\n * x (x) >> arr2\n \n array([[0.612611, 0.125753],\n [0.853181, 0.948818],\n [0.180885, 0.33363 ]])\n Coordinates:\n * x (x) >> arr1.broadcast_like(arr2)\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115],\n [ nan, nan, nan]])\n Coordinates:\n * x (x) object 'a' 'b' 'c'\n * y (y) object 'a' 'b' 'c'\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)\n\n def reindex_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n ) -> \"DataArray\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found on this\n data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but coordinates from\n the other object.\n\n See Also\n --------\n DataArray.reindex\n align\n \"\"\"\n indexers = reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n tolerance=tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n\n def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict, optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found on\n this data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.reindex_like\n align\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n ds = self._to_temp_dataset().reindex(\n indexers=indexers,\n method=method,\n tolerance=tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n return self._from_temp_dataset(ds)\n\n def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any\n ) -> \"DataArray\":\n \"\"\" Multidimensional interpolation of variables.\n\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n new coordinate can be an scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array.\n assume_sorted: boolean, optional\n If False, values of x can be in any order and they are sorted\n first. If True, x has to be an array of monotonically increasing\n values.\n kwargs: dictionary\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.DataArray\n New dataarray on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n\n Examples\n --------\n >>> da = xr.DataArray([1, 3], [('x', np.arange(2))])\n >>> da.interp(x=0.5)\n \n array(2.0)\n Coordinates:\n x float64 0.5\n \"\"\"\n if self.dtype.kind not in \"uifc\":\n raise TypeError(\n \"interp only works for a numeric type array. \"\n \"Given {}.\".format(self.dtype)\n )\n ds = self._to_temp_dataset().interp(\n coords,\n method=method,\n kwargs=kwargs,\n assume_sorted=assume_sorted,\n **coords_kwargs\n )\n return self._from_temp_dataset(ds)\n\n def interp_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"DataArray\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.DataArray\n Another dataarray by interpolating this dataarray's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataarray has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n DataArray.interp\n DataArray.reindex_like\n \"\"\"\n if self.dtype.kind not in \"uifc\":\n raise TypeError(\n \"interp only works for a numeric type array. \"\n \"Given {}.\".format(self.dtype)\n )\n ds = self._to_temp_dataset().interp_like(\n other, method=method, kwargs=kwargs, assume_sorted=assume_sorted\n )\n return self._from_temp_dataset(ds)\n\n def rename(\n self,\n new_name_or_name_dict: Union[Hashable, Mapping[Hashable, Hashable]] = None,\n **names: Hashable\n ) -> \"DataArray\":\n \"\"\"Returns a new DataArray with renamed coordinates or a new name.\n\n Parameters\n ----------\n new_name_or_name_dict : str or dict-like, optional\n If the argument is dict-like, it used as a mapping from old\n names to new names for coordinates. Otherwise, use the argument\n as the new name for this array.\n **names: hashable, optional\n The keyword arguments form of a mapping from old names to\n new names for coordinates.\n One of new_name_or_name_dict or names must be provided.\n\n Returns\n -------\n renamed : DataArray\n Renamed array or array with renamed coordinates.\n\n See Also\n --------\n Dataset.rename\n DataArray.swap_dims\n \"\"\"\n if names or utils.is_dict_like(new_name_or_name_dict):\n new_name_or_name_dict = cast(\n Mapping[Hashable, Hashable], new_name_or_name_dict\n )\n name_dict = either_dict_or_kwargs(new_name_or_name_dict, names, \"rename\")\n dataset = self._to_temp_dataset().rename(name_dict)\n return self._from_temp_dataset(dataset)\n else:\n new_name_or_name_dict = cast(Hashable, new_name_or_name_dict)\n return self._replace(name=new_name_or_name_dict)\n\n def swap_dims(self, dims_dict: Mapping[Hashable, Hashable]) -> \"DataArray\":\n \"\"\"Returns a new DataArray with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names. Each value must already be a coordinate on this\n array.\n\n Returns\n -------\n swapped : DataArray\n DataArray with swapped dimensions.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=[0, 1], dims=\"x\",\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> arr\n \n array([0, 1])\n Coordinates:\n * x (x) >> arr.swap_dims({\"x\": \"y\"})\n \n array([0, 1])\n Coordinates:\n x (y) \"DataArray\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, dict, or None\n Dimensions to include on the new variable.\n If provided as str or sequence of str, then dimensions are inserted\n with length 1. If provided as a dict, then the keys are the new\n dimensions and the values are either integers (giving the length of\n the new dimensions) or sequence/ndarray (giving the coordinates of\n the new dimensions).\n axis : integer, list (or tuple) of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if isinstance(dim, int):\n raise TypeError(\"dim should be hashable or sequence/mapping of hashables\")\n elif isinstance(dim, Sequence) and not isinstance(dim, str):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = dict.fromkeys(dim, 1)\n elif dim is not None and not isinstance(dim, Mapping):\n dim = {cast(Hashable, dim): 1}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n ds = self._to_temp_dataset().expand_dims(dim, axis)\n return self._from_temp_dataset(ds)\n\n def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]]\n ) -> Optional[\"DataArray\"]:\n \"\"\"Set DataArray (multi-)indexes using one or more existing\n coordinates.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another DataArray, with this data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> arr\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n >>> arr.set_index(x='a')\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n\n See Also\n --------\n DataArray.reset_index\n \"\"\"\n _check_inplace(inplace)\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n coords, _ = merge_indexes(indexes, self._coords, set(), append=append)\n return self._replace(coords=coords)\n\n def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> Optional[\"DataArray\"]:\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : hashable or sequence of hashables\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.set_index\n \"\"\"\n _check_inplace(inplace)\n coords, _ = split_indexes(\n dims_or_levels, self._coords, set(), self._level_coords, drop=drop\n )\n return self._replace(coords=coords)\n\n def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int]\n ) -> \"DataArray\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n \"\"\"\n _check_inplace(inplace)\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n replace_coords = {}\n for dim, order in dim_order.items():\n coord = self._coords[dim]\n index = coord.to_index()\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\"coordinate %r has no MultiIndex\" % dim)\n replace_coords[dim] = IndexVariable(coord.dims, index.reorder_levels(order))\n coords = self._coords.copy()\n coords.update(replace_coords)\n return self._replace(coords=coords)\n\n def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable]\n ) -> \"DataArray\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : DataArray\n DataArray with stacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n\n See also\n --------\n DataArray.unstack\n \"\"\"\n ds = self._to_temp_dataset().stack(dimensions, **dimensions_kwargs)\n return self._from_temp_dataset(ds)\n\n def unstack(\n self, dim: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : DataArray\n Array with unstacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n >>> roundtripped = stacked.unstack()\n >>> arr.identical(roundtripped)\n True\n\n See also\n --------\n DataArray.stack\n \"\"\"\n ds = self._to_temp_dataset().unstack(dim)\n return self._from_temp_dataset(ds)\n\n def to_unstacked_dataset(self, dim, level=0):\n \"\"\"Unstack DataArray expanding to Dataset along a given level of a\n stacked coordinate.\n\n This is the inverse operation of Dataset.to_stacked_array.\n\n Parameters\n ----------\n dim : str\n Name of existing dimension to unstack\n level : int or str\n The MultiIndex level to expand to a dataset along. Can either be\n the integer index of the level or its name.\n label : int, default 0\n Label of the level to expand dataset along. Overrides the label\n argument if given.\n\n Returns\n -------\n unstacked: Dataset\n\n Examples\n --------\n >>> import xarray as xr\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> data = xr.Dataset({'a': arr, 'b': arr.isel(y=0)})\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) >> stacked = data.to_stacked_array(\"z\", ['y'])\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n labels=[[0, 0, 0, 1], [0, 1, 2, -1]],\n names=['variable', 'y'])\n >>> roundtripped = stacked.to_unstacked_dataset(dim='z')\n >>> data.identical(roundtripped)\n True\n\n See Also\n --------\n Dataset.to_stacked_array\n \"\"\"\n\n idx = self.indexes[dim]\n if not isinstance(idx, pd.MultiIndex):\n raise ValueError(\"'{}' is not a stacked coordinate\".format(dim))\n\n level_number = idx._get_level_number(level)\n variables = idx.levels[level_number]\n variable_dim = idx.names[level_number]\n\n # pull variables out of datarray\n data_dict = {}\n for k in variables:\n data_dict[k] = self.sel({variable_dim: k}).squeeze(drop=True)\n\n # unstacked dataset\n return Dataset(data_dict)\n\n def transpose(self, *dims: Hashable, transpose_coords: bool = None) -> \"DataArray\":\n \"\"\"Return a new DataArray object with transposed dimensions.\n\n Parameters\n ----------\n *dims : hashable, optional\n By default, reverse the dimensions. Otherwise, reorder the\n dimensions to this order.\n transpose_coords : boolean, optional\n If True, also transpose the coordinates of this DataArray.\n\n Returns\n -------\n transposed : DataArray\n The returned DataArray's array is transposed.\n\n Notes\n -----\n This operation returns a view of this array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded.\n\n See Also\n --------\n numpy.transpose\n Dataset.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims):\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted array dimensions (%s)\" % (dims, tuple(self.dims))\n )\n\n variable = self.variable.transpose(*dims)\n if transpose_coords:\n coords: Dict[Hashable, Variable] = {}\n for name, coord in self.coords.items():\n coord_dims = tuple(dim for dim in dims if dim in coord.dims)\n coords[name] = coord.variable.transpose(*coord_dims)\n return self._replace(variable, coords)\n else:\n if transpose_coords is None and any(self[c].ndim > 1 for c in self.coords):\n warnings.warn(\n \"This DataArray contains multi-dimensional \"\n \"coordinates. In the future, these coordinates \"\n \"will be transposed as well unless you specify \"\n \"transpose_coords=False.\",\n FutureWarning,\n stacklevel=2,\n )\n return self._replace(variable)\n\n @property\n def T(self) -> \"DataArray\":\n return self.transpose()\n\n # Drop coords\n @overload\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"DataArray\":\n ...\n\n # Drop index labels along dimension\n @overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"DataArray\":\n ...\n\n def drop(self, labels, dim=None, *, errors=\"raise\"): # noqa: F811\n \"\"\"Drop coordinates or index labels from this DataArray.\n\n Parameters\n ----------\n labels : hashable or sequence of hashables\n Name(s) of coordinates or index labels to drop.\n If dim is not None, labels can be any array-like.\n dim : hashable, optional\n Dimension along which to drop index labels. By default (if\n ``dim is None``), drops coordinates rather than index labels.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the coordinates or index labels passed are not\n in the array. If 'ignore', any given labels that are in the\n array are dropped and no error is raised.\n Returns\n -------\n dropped : DataArray\n \"\"\"\n ds = self._to_temp_dataset().drop(labels, dim, errors=errors)\n return self._from_temp_dataset(ds)\n\n def dropna(\n self, dim: Hashable, how: str = \"any\", thresh: int = None\n ) -> \"DataArray\":\n \"\"\"Returns a new array with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {'any', 'all'}, optional\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n If supplied, require this many non-NA values.\n\n Returns\n -------\n DataArray\n \"\"\"\n ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh)\n return self._from_temp_dataset(ds)\n\n def fillna(self, value: Any) -> \"DataArray\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray or DataArray\n Used to fill all matching missing values in this array. If the\n argument is a DataArray, it is first aligned with (reindexed to)\n this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n if utils.is_dict_like(value):\n raise TypeError(\n \"cannot provide fill value as a dictionary with \"\n \"fillna on a DataArray\"\n )\n out = ops.fillna(self, value)\n return out\n\n def interpolate_na(\n self,\n dim=None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, str] = True,\n **kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline', 'akima'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline', and `akima`: use their\n respective``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n\n Returns\n -------\n DataArray\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n \"\"\"\n from .missing import interp_na\n\n return interp_na(\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n **kwargs\n )\n\n def ffill(self, dim: Hashable, limit: int = None) -> \"DataArray\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n \"\"\"\n from .missing import ffill\n\n return ffill(self, dim, limit=limit)\n\n def bfill(self, dim: Hashable, limit: int = None) -> \"DataArray\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n \"\"\"\n from .missing import bfill\n\n return bfill(self, dim, limit=limit)\n\n def combine_first(self, other: \"DataArray\") -> \"DataArray\":\n \"\"\"Combine two DataArray objects, with union of coordinates.\n\n This operation follows the normal broadcasting and alignment rules of\n ``join='outer'``. Default to non-null values of array calling the\n method. Use np.nan to fill in vacant cells after alignment.\n\n Parameters\n ----------\n other : DataArray\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n return ops.fillna(self, other, join=\"outer\")\n\n def reduce(\n self,\n func: Callable[..., Any],\n dim: Union[None, Hashable, Sequence[Hashable]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n **kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Reduce this array by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : hashable or sequence of hashables, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to repeatedly apply `func`. Only one of the\n 'dim' and 'axis' arguments can be supplied. If neither are\n supplied, then the reduction is calculated over the flattened array\n (by calling `f(x)` without an axis argument).\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n DataArray with this object's array replaced with an array with\n summarized data and the indicated dimension(s) removed.\n \"\"\"\n\n var = self.variable.reduce(func, dim, axis, keep_attrs, keepdims, **kwargs)\n return self._replace_maybe_drop_dims(var)\n\n def to_pandas(self) -> Union[\"DataArray\", pd.Series, pd.DataFrame]:\n \"\"\"Convert this array into a pandas object with the same shape.\n\n The type of the returned object depends on the number of DataArray\n dimensions:\n\n * 0D -> `xarray.DataArray`\n * 1D -> `pandas.Series`\n * 2D -> `pandas.DataFrame`\n * 3D -> `pandas.Panel` *(deprecated)*\n\n Only works for arrays with 3 or fewer dimensions.\n\n The DataArray constructor performs the inverse transformation.\n \"\"\"\n # TODO: consolidate the info about pandas constructors and the\n # attributes that correspond to their indexes into a separate module?\n constructors = {\n 0: lambda x: x,\n 1: pd.Series,\n 2: pd.DataFrame,\n 3: pdcompat.Panel,\n }\n try:\n constructor = constructors[self.ndim]\n except KeyError:\n raise ValueError(\n \"cannot convert arrays with %s dimensions into \"\n \"pandas objects\" % self.ndim\n )\n indexes = [self.get_index(dim) for dim in self.dims]\n return constructor(self.values, *indexes)\n\n def to_dataframe(self, name: Hashable = None) -> pd.DataFrame:\n \"\"\"Convert this array and its coordinates into a tidy pandas.DataFrame.\n\n The DataFrame is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n\n Other coordinates are included as columns in the DataFrame.\n \"\"\"\n if name is None:\n name = self.name\n if name is None:\n raise ValueError(\n \"cannot convert an unnamed DataArray to a \"\n \"DataFrame: use the ``name`` parameter\"\n )\n\n dims = dict(zip(self.dims, self.shape))\n # By using a unique name, we can convert a DataArray into a DataFrame\n # even if it shares a name with one of its coordinates.\n # I would normally use unique_name = object() but that results in a\n # dataframe with columns in the wrong order, for reasons I have not\n # been able to debug (possibly a pandas bug?).\n unique_name = \"__unique_name_identifier_z98xfz98xugfg73ho__\"\n ds = self._to_dataset_whole(name=unique_name)\n df = ds._to_dataframe(dims)\n df.columns = [name if c == unique_name else c for c in df.columns]\n return df\n\n def to_series(self) -> pd.Series:\n \"\"\"Convert this array into a pandas.Series.\n\n The Series is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n \"\"\"\n index = self.coords.to_index()\n return pd.Series(self.values.reshape(-1), index=index, name=self.name)\n\n def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray:\n \"\"\"Convert this array into a numpy.ma.MaskedArray\n\n Parameters\n ----------\n copy : bool\n If True (default) make a copy of the array in the result. If False,\n a MaskedArray view of DataArray.values is returned.\n\n Returns\n -------\n result : MaskedArray\n Masked where invalid values (nan or inf) occur.\n \"\"\"\n values = self.values # only compute lazy arrays once\n isnull = pd.isnull(values)\n return np.ma.MaskedArray(data=values, mask=isnull, copy=copy)\n\n def to_netcdf(self, *args, **kwargs) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write DataArray contents to a netCDF file.\n\n All parameters are passed directly to `xarray.Dataset.to_netcdf`.\n\n Notes\n -----\n Only xarray.Dataset objects can be written to netCDF files, so\n the xarray.DataArray is converted to a xarray.Dataset object\n containing a single variable. If the DataArray has no name, or if the\n name is the same as a co-ordinate name, then it is given the name\n '__xarray_dataarray_variable__'.\n \"\"\"\n from ..backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE\n\n if self.name is None:\n # If no name is set then use a generic xarray name\n dataset = self.to_dataset(name=DATAARRAY_VARIABLE)\n elif self.name in self.coords or self.name in self.dims:\n # The name is the same as one of the coords names, which netCDF\n # doesn't support, so rename it but keep track of the old name\n dataset = self.to_dataset(name=DATAARRAY_VARIABLE)\n dataset.attrs[DATAARRAY_NAME] = self.name\n else:\n # No problems with the name - so we're fine!\n dataset = self.to_dataset()\n\n return dataset.to_netcdf(*args, **kwargs)\n\n def to_dict(self, data: bool = True) -> dict:\n \"\"\"\n Convert this xarray.DataArray into a dictionary following xarray\n naming conventions.\n\n Converts all variables and attributes to native Python objects.\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n DataArray.from_dict\n \"\"\"\n d = self.variable.to_dict(data=data)\n d.update({\"coords\": {}, \"name\": self.name})\n for k in self.coords:\n d[\"coords\"][k] = self.coords[k].variable.to_dict(data=data)\n return d\n\n @classmethod\n def from_dict(cls, d: dict) -> \"DataArray\":\n \"\"\"\n Convert a dictionary into an xarray.DataArray\n\n Input dict can take several forms::\n\n d = {'dims': ('t'), 'data': x}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data': x,\n 'name': 'a'}\n\n where 't' is the name of the dimesion, 'a' is the name of the array,\n and x and t are lists, numpy.arrays, or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'dims': [..], 'data': [..]}\n\n Returns\n -------\n obj : xarray.DataArray\n\n See also\n --------\n DataArray.to_dict\n Dataset.from_dict\n \"\"\"\n coords = None\n if \"coords\" in d:\n try:\n coords = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\"))\n for k, v in d[\"coords\"].items()\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict when coords are missing the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n try:\n data = d[\"data\"]\n except KeyError:\n raise ValueError(\"cannot convert dict without the key 'data''\")\n else:\n obj = cls(data, coords, d.get(\"dims\"), d.get(\"name\"), d.get(\"attrs\"))\n return obj\n\n @classmethod\n def from_series(cls, series: pd.Series, sparse: bool = False) -> \"DataArray\":\n \"\"\"Convert a pandas.Series into an xarray.DataArray.\n\n If the series's index is a MultiIndex, it will be expanded into a\n tensor product of one-dimensional coordinates (filling in missing\n values with NaN). Thus this operation should be the inverse of the\n `to_series` method.\n\n If sparse=True, creates a sparse array instead of a dense NumPy array.\n Requires the pydata/sparse package.\n\n See also\n --------\n xarray.Dataset.from_dataframe\n \"\"\"\n temp_name = \"__temporary_name\"\n df = pd.DataFrame({temp_name: series})\n ds = Dataset.from_dataframe(df, sparse=sparse)\n result = cast(DataArray, ds[temp_name])\n result.name = series.name\n return result\n\n def to_cdms2(self) -> \"cdms2_Variable\":\n \"\"\"Convert this array into a cdms2.Variable\n \"\"\"\n from ..convert import to_cdms2\n\n return to_cdms2(self)\n\n @classmethod\n def from_cdms2(cls, variable: \"cdms2_Variable\") -> \"DataArray\":\n \"\"\"Convert a cdms2.Variable into an xarray.DataArray\n \"\"\"\n from ..convert import from_cdms2\n\n return from_cdms2(variable)\n\n def to_iris(self) -> \"iris_Cube\":\n \"\"\"Convert this array into a iris.cube.Cube\n \"\"\"\n from ..convert import to_iris\n\n return to_iris(self)\n\n @classmethod\n def from_iris(cls, cube: \"iris_Cube\") -> \"DataArray\":\n \"\"\"Convert a iris.cube.Cube into an xarray.DataArray\n \"\"\"\n from ..convert import from_iris\n\n return from_iris(cube)\n\n def _all_compat(self, other: \"DataArray\", compat_str: str) -> bool:\n \"\"\"Helper function for equals, broadcast_equals, and identical\n \"\"\"\n\n def compat(x, y):\n return getattr(x.variable, compat_str)(y.variable)\n\n return utils.dict_equiv(self.coords, other.coords, compat=compat) and compat(\n self, other\n )\n\n def broadcast_equals(self, other: \"DataArray\") -> bool:\n \"\"\"Two DataArrays are broadcast equal if they are equal after\n broadcasting them against each other such that they have the same\n dimensions.\n\n See Also\n --------\n DataArray.equals\n DataArray.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False\n\n def equals(self, other: \"DataArray\") -> bool:\n \"\"\"True if two DataArrays have the same dimensions, coordinates and\n values; otherwise False.\n\n DataArrays can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``DataArray``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False\n\n def identical(self, other: \"DataArray\") -> bool:\n \"\"\"Like equals, but also checks the array name and attributes, and\n attributes on all coordinates.\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.equal\n \"\"\"\n try:\n return self.name == other.name and self._all_compat(other, \"identical\")\n except (TypeError, AttributeError):\n return False\n\n __default_name = object()\n\n def _result_name(self, other: Any = None) -> Optional[Hashable]:\n # use the same naming heuristics as pandas:\n # https://github.com/ContinuumIO/blaze/issues/458#issuecomment-51936356\n other_name = getattr(other, \"name\", self.__default_name)\n if other_name is self.__default_name or other_name == self.name:\n return self.name\n else:\n return None\n\n def __array_wrap__(self, obj, context=None) -> \"DataArray\":\n new_var = self.variable.__array_wrap__(obj, context)\n return self._replace(new_var)\n\n def __matmul__(self, obj):\n return self.dot(obj)\n\n def __rmatmul__(self, other):\n # currently somewhat duplicative, as only other DataArrays are\n # compatible with matmul\n return computation.dot(other, self)\n\n @staticmethod\n def _unary_op(f: Callable[..., Any]) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n with np.errstate(all=\"ignore\"):\n return self.__array_wrap__(f(self.variable.data, *args, **kwargs))\n\n return func\n\n @staticmethod\n def _binary_op(\n f: Callable[..., Any],\n reflexive: bool = False,\n join: str = None, # see xarray.align\n **ignored_kwargs\n ) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, (Dataset, groupby.GroupBy)):\n return NotImplemented\n if isinstance(other, DataArray):\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n self, other = align(self, other, join=align_type, copy=False)\n other_variable = getattr(other, \"variable\", other)\n other_coords = getattr(other, \"coords\", None)\n\n variable = (\n f(self.variable, other_variable)\n if not reflexive\n else f(other_variable, self.variable)\n )\n coords, indexes = self.coords._merge_raw(other_coords)\n name = self._result_name(other)\n\n return self._replace(variable, coords, name)\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f: Callable) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a DataArray and \"\n \"a grouped object are not permitted\"\n )\n # n.b. we can't align other to self (with other.reindex_like(self))\n # because `other` may be converted into floats, which would cause\n # in-place arithmetic to fail unpredictably. Instead, we simply\n # don't support automatic alignment with in-place arithmetic.\n other_coords = getattr(other, \"coords\", None)\n other_variable = getattr(other, \"variable\", other)\n with self.coords._merge_inplace(other_coords):\n f(self.variable, other_variable)\n return self\n\n return func\n\n def _copy_attrs_from(self, other: Union[\"DataArray\", Dataset, Variable]) -> None:\n self.attrs = other.attrs\n\n @property\n def plot(self) -> _PlotMethods:\n \"\"\"\n Access plotting functions\n\n >>> d = DataArray([[1, 2], [3, 4]])\n\n For convenience just call this directly\n\n >>> d.plot()\n\n Or use it as a namespace to use xarray.plot functions as\n DataArray methods\n\n >>> d.plot.imshow() # equivalent to xarray.plot.imshow(d)\n\n \"\"\"\n return _PlotMethods(self)\n\n def _title_for_slice(self, truncate: int = 50) -> str:\n \"\"\"\n If the dataarray has 1 dimensional coordinates or comes from a slice\n we can show that info in the title\n\n Parameters\n ----------\n truncate : integer\n maximum number of characters for title\n\n Returns\n -------\n title : string\n Can be used for plot titles\n\n \"\"\"\n one_dims = []\n for dim, coord in self.coords.items():\n if coord.size == 1:\n one_dims.append(\n \"{dim} = {v}\".format(dim=dim, v=format_item(coord.values))\n )\n\n title = \", \".join(one_dims)\n if len(title) > truncate:\n title = title[: (truncate - 3)] + \"...\"\n\n return title\n\n def diff(self, dim: Hashable, n: int = 1, label: Hashable = \"upper\") -> \"DataArray\":\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : hashable, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : hashable, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ['x'])\n >>> arr.diff('x')\n \n array([0, 1, 0])\n Coordinates:\n * x (x) int64 2 3 4\n >>> arr.diff('x', 2)\n \n array([ 1, -1])\n Coordinates:\n * x (x) int64 3 4\n\n See Also\n --------\n DataArray.differentiate\n \"\"\"\n ds = self._to_temp_dataset().diff(n=n, dim=dim, label=label)\n return self._from_temp_dataset(ds)\n\n def shift(\n self,\n shifts: Mapping[Hashable, int] = None,\n fill_value: Any = dtypes.NA,\n **shifts_kwargs: int\n ) -> \"DataArray\":\n \"\"\"Shift this array by an offset along one or more dimensions.\n\n Only the data is moved; coordinates stay in place. Values shifted from\n beyond array bounds are replaced by NaN. This is consistent with the\n behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : DataArray\n DataArray with the same coordinates and attributes but shifted\n data.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.shift(x=1)\n \n array([ nan, 5., 6.])\n Coordinates:\n * x (x) int64 0 1 2\n \"\"\"\n variable = self.variable.shift(\n shifts=shifts, fill_value=fill_value, **shifts_kwargs\n )\n return self._replace(variable=variable)\n\n def roll(\n self,\n shifts: Mapping[Hashable, int] = None,\n roll_coords: bool = None,\n **shifts_kwargs: int\n ) -> \"DataArray\":\n \"\"\"Roll this array by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to rotate each of the given dimensions.\n Positive offsets roll to the right; negative offsets roll to the\n left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n rolled : DataArray\n DataArray with the same attributes but rolled data and coordinates.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.roll(x=1)\n \n array([7, 5, 6])\n Coordinates:\n * x (x) int64 2 0 1\n \"\"\"\n ds = self._to_temp_dataset().roll(\n shifts=shifts, roll_coords=roll_coords, **shifts_kwargs\n )\n return self._from_temp_dataset(ds)\n\n @property\n def real(self) -> \"DataArray\":\n return self._replace(self.variable.real)\n\n @property\n def imag(self) -> \"DataArray\":\n return self._replace(self.variable.imag)\n\n def dot(\n self, other: \"DataArray\", dims: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\":\n \"\"\"Perform dot product of two DataArrays along their shared dims.\n\n Equivalent to taking taking tensordot over all shared dims.\n\n Parameters\n ----------\n other : DataArray\n The other array with which the dot product is performed.\n dims: hashable or sequence of hashables, optional\n Along which dimensions to be summed over. Default all the common\n dimensions are summed over.\n\n Returns\n -------\n result : DataArray\n Array resulting from the dot product over all shared dimensions.\n\n See also\n --------\n dot\n numpy.tensordot\n\n Examples\n --------\n\n >>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))\n >>> da = DataArray(da_vals, dims=['x', 'y', 'z'])\n >>> dm_vals = np.arange(4)\n >>> dm = DataArray(dm_vals, dims=['z'])\n\n >>> dm.dims\n ('z')\n >>> da.dims\n ('x', 'y', 'z')\n\n >>> dot_result = da.dot(dm)\n >>> dot_result.dims\n ('x', 'y')\n \"\"\"\n if isinstance(other, Dataset):\n raise NotImplementedError(\n \"dot products are not yet supported with Dataset objects.\"\n )\n if not isinstance(other, DataArray):\n raise TypeError(\"dot only operates on DataArrays.\")\n\n return computation.dot(self, other, dims=dims)\n\n def sortby(\n self,\n variables: Union[Hashable, \"DataArray\", Sequence[Union[Hashable, \"DataArray\"]]],\n ascending: bool = True,\n ) -> \"DataArray\":\n \"\"\"Sort object by labels or values (along an axis).\n\n Sorts the dataarray, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: hashable, DataArray, or sequence of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords whose values are used to sort this array.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: DataArray\n A new dataarray where all the specified dims are sorted by dim\n labels.\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.random.rand(5),\n ... coords=[pd.date_range('1/1/2000', periods=5)],\n ... dims='time')\n >>> da\n \n array([ 0.965471, 0.615637, 0.26532 , 0.270962, 0.552878])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n\n >>> da.sortby(da)\n \n array([ 0.26532 , 0.270962, 0.552878, 0.615637, 0.965471])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-03 2000-01-04 2000-01-05 ...\n \"\"\"\n ds = self._to_temp_dataset().sortby(variables, ascending=ascending)\n return self._from_temp_dataset(ds)\n\n def quantile(\n self,\n q: Any,\n dim: Union[Hashable, Sequence[Hashable], None] = None,\n interpolation: str = \"linear\",\n keep_attrs: bool = None,\n ) -> \"DataArray\":\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n - linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n - lower: ``i``.\n - higher: ``j``.\n - nearest: ``i`` or ``j``, whichever is nearest.\n - midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n quantiles : DataArray\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile\n \"\"\"\n\n ds = self._to_temp_dataset().quantile(\n q, dim=dim, keep_attrs=keep_attrs, interpolation=interpolation\n )\n return self._from_temp_dataset(ds)\n\n def rank(\n self, dim: Hashable, pct: bool = False, keep_attrs: bool = None\n ) -> \"DataArray\":\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within that\n set. Ranks begin at 1, not 0. If pct, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : hashable\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : DataArray\n DataArray with the same coordinates and dtype 'float64'.\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.rank('x')\n \n array([ 1., 2., 3.])\n Dimensions without coordinates: x\n \"\"\"\n\n ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs)\n return self._from_temp_dataset(ds)\n\n def differentiate(\n self, coord: Hashable, edge_order: int = 1, datetime_unit: str = None\n ) -> \"DataArray\":\n \"\"\" Differentiate the array with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: hashable\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: DataArray\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.differentiate('x')\n \n array([[30. , 30. , 30. ],\n [27.545455, 27.545455, 27.545455],\n [27.545455, 27.545455, 27.545455],\n [30. , 30. , 30. ]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n \"\"\"\n ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit)\n return self._from_temp_dataset(ds)\n\n def integrate(\n self, dim: Union[Hashable, Sequence[Hashable]], datetime_unit: str = None\n ) -> \"DataArray\":\n \"\"\" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: hashable, or a sequence of hashable\n Coordinate(s) used for the integration.\n datetime_unit: str, optional\n Can be used to specify the unit if datetime coordinate is used.\n One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns',\n 'ps', 'fs', 'as'}\n\n Returns\n -------\n integrated: DataArray\n\n See also\n --------\n numpy.trapz: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.integrate('x')\n \n array([5.4, 6.6, 7.8])\n Dimensions without coordinates: y\n \"\"\"\n ds = self._to_temp_dataset().integrate(dim, datetime_unit)\n return self._from_temp_dataset(ds)\n\n def unify_chunks(self) -> \"DataArray\":\n \"\"\" Unify chunk size along all chunked dimensions of this DataArray.\n\n Returns\n -------\n\n DataArray with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n \"\"\"\n ds = self._to_temp_dataset().unify_chunks()\n return self._from_temp_dataset(ds)\n\n def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each chunk of this DataArray. This method is experimental\n and its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a DataArray as its first parameter. The\n function will receive a subset of this DataArray, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this array\n but has sizes 0, to determine properties of the returned object such as\n dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this DataArray is backed by dask, calling this\n method is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.Dataset.map_blocks\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs)\n\n # this needs to be at the end, or mypy will confuse with `str`\n # https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names\n str = property(StringAccessor)"},{"col":0,"comment":"Compute the number of non-None arguments.\n\n Copied from pandas.core.common.count_not_none (not part of the public API)\n ","endLoc":58,"header":"def count_not_none(*args) -> int","id":461,"name":"count_not_none","nodeType":"Function","startLoc":53,"text":"def count_not_none(*args) -> int:\n \"\"\"Compute the number of non-None arguments.\n\n Copied from pandas.core.common.count_not_none (not part of the public API)\n \"\"\"\n return sum([arg is not None for arg in args])"},{"className":"RollingExp","col":0,"comment":"\n Exponentially-weighted moving window object.\n Similar to EWM in pandas\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A single mapping from a single dimension name to window value\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'], default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n\n Returns\n -------\n RollingExp : type of input argument\n ","endLoc":104,"id":462,"nodeType":"Class","startLoc":59,"text":"class RollingExp:\n \"\"\"\n Exponentially-weighted moving window object.\n Similar to EWM in pandas\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A single mapping from a single dimension name to window value\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'], default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n\n Returns\n -------\n RollingExp : type of input argument\n \"\"\"\n\n def __init__(self, obj, windows, window_type=\"span\"):\n self.obj = obj\n dim, window = next(iter(windows.items()))\n self.dim = dim\n self.alpha = _get_alpha(**{window_type: window})\n\n def mean(self):\n \"\"\"\n Exponentially weighted moving average\n\n Examples\n --------\n >>> da = xr.DataArray([1,1,2,2,2], dims='x')\n >>> da.rolling_exp(x=2, window_type='span').mean()\n \n array([1. , 1. , 1.692308, 1.9 , 1.966942])\n Dimensions without coordinates: x\n \"\"\"\n\n return self.obj.reduce(move_exp_nanmean, dim=self.dim, alpha=self.alpha)"},{"col":4,"comment":"null","endLoc":89,"header":"def __init__(self, obj, windows, window_type=\"span\")","id":463,"name":"__init__","nodeType":"Function","startLoc":85,"text":"def __init__(self, obj, windows, window_type=\"span\"):\n self.obj = obj\n dim, window = next(iter(windows.items()))\n self.dim = dim\n self.alpha = _get_alpha(**{window_type: window})"},{"col":0,"comment":"\n Open a dataset from the online repository (requires internet).\n\n If a local copy is found then always use that to avoid network traffic.\n\n Parameters\n ----------\n name : str\n Name of the netcdf file containing the dataset\n ie. 'air_temperature'\n cache_dir : string, optional\n The directory in which to search for and write cached data.\n cache : boolean, optional\n If True, then cache data locally for use on subsequent calls\n github_url : string\n Github repository where the data is stored\n branch : string\n The git branch to download from\n kws : dict, optional\n Passed to xarray.open_dataset\n\n See Also\n --------\n xarray.open_dataset\n\n ","endLoc":97,"header":"def open_dataset(\n name,\n cache=True,\n cache_dir=_default_cache_dir,\n github_url=\"https://github.com/pydata/xarray-data\",\n branch=\"master\",\n **kws\n)","id":464,"name":"open_dataset","nodeType":"Function","startLoc":29,"text":"def open_dataset(\n name,\n cache=True,\n cache_dir=_default_cache_dir,\n github_url=\"https://github.com/pydata/xarray-data\",\n branch=\"master\",\n **kws\n):\n \"\"\"\n Open a dataset from the online repository (requires internet).\n\n If a local copy is found then always use that to avoid network traffic.\n\n Parameters\n ----------\n name : str\n Name of the netcdf file containing the dataset\n ie. 'air_temperature'\n cache_dir : string, optional\n The directory in which to search for and write cached data.\n cache : boolean, optional\n If True, then cache data locally for use on subsequent calls\n github_url : string\n Github repository where the data is stored\n branch : string\n The git branch to download from\n kws : dict, optional\n Passed to xarray.open_dataset\n\n See Also\n --------\n xarray.open_dataset\n\n \"\"\"\n longdir = _os.path.expanduser(cache_dir)\n fullname = name + \".nc\"\n localfile = _os.sep.join((longdir, fullname))\n md5name = name + \".md5\"\n md5file = _os.sep.join((longdir, md5name))\n\n if not _os.path.exists(localfile):\n\n # This will always leave this directory on disk.\n # May want to add an option to remove it.\n if not _os.path.isdir(longdir):\n _os.mkdir(longdir)\n\n url = \"/\".join((github_url, \"raw\", branch, fullname))\n urlretrieve(url, localfile)\n url = \"/\".join((github_url, \"raw\", branch, md5name))\n urlretrieve(url, md5file)\n\n localmd5 = file_md5_checksum(localfile)\n with open(md5file, \"r\") as f:\n remotemd5 = f.read()\n if localmd5 != remotemd5:\n _os.remove(localfile)\n msg = \"\"\"\n MD5 checksum does not match, try downloading dataset again.\n \"\"\"\n raise OSError(msg)\n\n ds = _open_dataset(localfile, **kws)\n\n if not cache:\n ds = ds.load()\n _os.remove(localfile)\n\n return ds"},{"col":0,"comment":"null","endLoc":12,"header":"def _get_alpha(com=None, span=None, halflife=None, alpha=None)","id":465,"name":"_get_alpha","nodeType":"Function","startLoc":7,"text":"def _get_alpha(com=None, span=None, halflife=None, alpha=None):\n # pandas defines in terms of com (converting to alpha in the algo)\n # so use its function to get a com and then convert to alpha\n\n com = _get_center_of_mass(com, span, halflife, alpha)\n return 1 / (1 + com)"},{"col":0,"comment":"\n Concatenate a sequence of datasets along a new or existing dimension\n ","endLoc":385,"header":"def _dataset_concat(\n datasets,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":466,"name":"_dataset_concat","nodeType":"Function","startLoc":263,"text":"def _dataset_concat(\n datasets,\n dim,\n data_vars,\n coords,\n compat,\n positions,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Concatenate a sequence of datasets along a new or existing dimension\n \"\"\"\n from .dataset import Dataset\n\n dim, coord = _calc_concat_dim_coord(dim)\n # Make sure we're working on a copy (we'll be loading variables)\n datasets = [ds.copy() for ds in datasets]\n datasets = align(\n *datasets, join=join, copy=False, exclude=[dim], fill_value=fill_value\n )\n\n dim_coords, dims_sizes, coord_names, data_names = _parse_datasets(datasets)\n dim_names = set(dim_coords)\n unlabeled_dims = dim_names - coord_names\n\n both_data_and_coords = coord_names & data_names\n if both_data_and_coords:\n raise ValueError(\n \"%r is a coordinate in some datasets but not others.\" % both_data_and_coords\n )\n # we don't want the concat dimension in the result dataset yet\n dim_coords.pop(dim, None)\n dims_sizes.pop(dim, None)\n\n # case where concat dimension is a coordinate or data_var but not a dimension\n if (dim in coord_names or dim in data_names) and dim not in dim_names:\n datasets = [ds.expand_dims(dim) for ds in datasets]\n\n # determine which variables to concatentate\n concat_over, equals, concat_dim_lengths = _calc_concat_over(\n datasets, dim, dim_names, data_vars, coords, compat\n )\n\n # determine which variables to merge, and then merge them according to compat\n variables_to_merge = (coord_names | data_names) - concat_over - dim_names\n\n result_vars = {}\n if variables_to_merge:\n to_merge = {var: [] for var in variables_to_merge}\n\n for ds in datasets:\n absent_merge_vars = variables_to_merge - set(ds.variables)\n if absent_merge_vars:\n raise ValueError(\n \"variables %r are present in some datasets but not others. \"\n % absent_merge_vars\n )\n\n for var in variables_to_merge:\n to_merge[var].append(ds.variables[var])\n\n for var in variables_to_merge:\n result_vars[var] = unique_variable(\n var, to_merge[var], compat=compat, equals=equals.get(var, None)\n )\n else:\n result_vars = {}\n result_vars.update(dim_coords)\n\n # assign attrs and encoding from first dataset\n result_attrs = datasets[0].attrs\n result_encoding = datasets[0].encoding\n\n # check that global attributes are fixed across all datasets if necessary\n for ds in datasets[1:]:\n if compat == \"identical\" and not utils.dict_equiv(ds.attrs, result_attrs):\n raise ValueError(\"Dataset global attributes not equal.\")\n\n # we've already verified everything is consistent; now, calculate\n # shared dimension sizes so we can expand the necessary variables\n def ensure_common_dims(vars):\n # ensure each variable with the given name shares the same\n # dimensions and the same shape for all of them except along the\n # concat dimension\n common_dims = tuple(pd.unique([d for v in vars for d in v.dims]))\n if dim not in common_dims:\n common_dims = (dim,) + common_dims\n for var, dim_len in zip(vars, concat_dim_lengths):\n if var.dims != common_dims:\n common_shape = tuple(dims_sizes.get(d, dim_len) for d in common_dims)\n var = var.set_dims(common_dims, common_shape)\n yield var\n\n # stack up each variable to fill-out the dataset (in order)\n # n.b. this loop preserves variable order, needed for groupby.\n for k in datasets[0].variables:\n if k in concat_over:\n try:\n vars = ensure_common_dims([ds.variables[k] for ds in datasets])\n except KeyError:\n raise ValueError(\"%r is not present in all datasets.\" % k)\n combined = concat_vars(vars, dim, positions)\n assert isinstance(combined, Variable)\n result_vars[k] = combined\n\n result = Dataset(result_vars, attrs=result_attrs)\n absent_coord_names = coord_names - set(result.variables)\n if absent_coord_names:\n raise ValueError(\n \"Variables %r are coordinates in some datasets but not others.\"\n % absent_coord_names\n )\n result = result.set_coords(coord_names)\n result.encoding = result_encoding\n\n result = result.drop(unlabeled_dims, errors=\"ignore\")\n\n if coord is not None:\n # add concat dimension last to ensure that its in the final Dataset\n result[coord.name] = coord\n\n return result"},{"col":0,"comment":"\n Infer the dimension name and 1d coordinate variable (if appropriate)\n for concatenating along the new dimension.\n ","endLoc":155,"header":"def _calc_concat_dim_coord(dim)","id":467,"name":"_calc_concat_dim_coord","nodeType":"Function","startLoc":134,"text":"def _calc_concat_dim_coord(dim):\n \"\"\"\n Infer the dimension name and 1d coordinate variable (if appropriate)\n for concatenating along the new dimension.\n \"\"\"\n from .dataarray import DataArray\n\n if isinstance(dim, str):\n coord = None\n elif not isinstance(dim, (DataArray, Variable)):\n dim_name = getattr(dim, \"name\", None)\n if dim_name is None:\n dim_name = \"concat_dim\"\n coord = IndexVariable(dim_name, dim)\n dim = dim_name\n elif not isinstance(dim, DataArray):\n coord = as_variable(dim).to_index_variable()\n dim, = coord.dims\n else:\n coord = dim\n dim, = coord.dims\n return dim, coord"},{"col":0,"comment":"\n Vendored from pandas.core.window.common._get_center_of_mass\n\n See licenses/PANDAS_LICENSE for the function's license\n ","endLoc":56,"header":"def _get_center_of_mass(comass, span, halflife, alpha)","id":468,"name":"_get_center_of_mass","nodeType":"Function","startLoc":26,"text":"def _get_center_of_mass(comass, span, halflife, alpha):\n \"\"\"\n Vendored from pandas.core.window.common._get_center_of_mass\n\n See licenses/PANDAS_LICENSE for the function's license\n \"\"\"\n valid_count = count_not_none(comass, span, halflife, alpha)\n if valid_count > 1:\n raise ValueError(\"comass, span, halflife, and alpha \" \"are mutually exclusive\")\n\n # Convert to center of mass; domain checks ensure 0 < alpha <= 1\n if comass is not None:\n if comass < 0:\n raise ValueError(\"comass must satisfy: comass >= 0\")\n elif span is not None:\n if span < 1:\n raise ValueError(\"span must satisfy: span >= 1\")\n comass = (span - 1) / 2.0\n elif halflife is not None:\n if halflife <= 0:\n raise ValueError(\"halflife must satisfy: halflife > 0\")\n decay = 1 - np.exp(np.log(0.5) / halflife)\n comass = 1 / decay - 1\n elif alpha is not None:\n if alpha <= 0 or alpha > 1:\n raise ValueError(\"alpha must satisfy: 0 < alpha <= 1\")\n comass = (1.0 - alpha) / alpha\n else:\n raise ValueError(\"Must pass one of comass, span, halflife, or alpha\")\n\n return float(comass)"},{"col":4,"comment":"\n Exponentially weighted moving average\n\n Examples\n --------\n >>> da = xr.DataArray([1,1,2,2,2], dims='x')\n >>> da.rolling_exp(x=2, window_type='span').mean()\n \n array([1. , 1. , 1.692308, 1.9 , 1.966942])\n Dimensions without coordinates: x\n ","endLoc":104,"header":"def mean(self)","id":469,"name":"mean","nodeType":"Function","startLoc":91,"text":"def mean(self):\n \"\"\"\n Exponentially weighted moving average\n\n Examples\n --------\n >>> da = xr.DataArray([1,1,2,2,2], dims='x')\n >>> da.rolling_exp(x=2, window_type='span').mean()\n \n array([1. , 1. , 1.692308, 1.9 , 1.966942])\n Dimensions without coordinates: x\n \"\"\"\n\n return self.obj.reduce(move_exp_nanmean, dim=self.dim, alpha=self.alpha)"},{"className":"DataWithCoords","col":0,"comment":"Shared base class for Dataset and DataArray.","endLoc":1197,"id":470,"nodeType":"Class","startLoc":329,"text":"class DataWithCoords(SupportsArithmetic, AttrAccessMixin):\n \"\"\"Shared base class for Dataset and DataArray.\"\"\"\n\n __slots__ = ()\n\n _rolling_exp_cls = RollingExp\n\n def squeeze(\n self,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n drop: bool = False,\n axis: Union[int, Iterable[int], None] = None,\n ):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or Hashable or iterable of Hashable, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n drop : bool, optional\n If ``drop=True``, drop squeezed coordinates instead of making them\n scalar.\n axis : None or int or iterable of int, optional\n Like dim, but positional.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = get_squeeze_dims(self, dim, axis)\n return self.isel(drop=drop, **{d: 0 for d in dims})\n\n def get_index(self, key: Hashable) -> pd.Index:\n \"\"\"Get an index for a dimension, with fall-back to a default RangeIndex\n \"\"\"\n if key not in self.dims:\n raise KeyError(key)\n\n try:\n return self.indexes[key]\n except KeyError:\n # need to ensure dtype=int64 in case range is empty on Python 2\n return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)\n\n def _calc_assign_results(\n self: C, kwargs: Mapping[Hashable, Union[T, Callable[[C], T]]]\n ) -> Dict[Hashable, T]:\n return {k: v(self) if callable(v) else v for k, v in kwargs.items()}\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign new coordinates to this object.\n\n Returns a new object with all the original data in addition to the new\n coordinates.\n\n Parameters\n ----------\n coords : dict, optional\n A dict with keys which are variables names. If the values are\n callable, they are computed on this object and assigned to new\n coordinate variables. If the values are not callable,\n (e.g. a ``DataArray``, scalar, or array), they are simply assigned.\n\n **coords_kwargs : keyword, value pairs, optional\n The keyword arguments form of ``coords``.\n One of ``coords`` or ``coords_kwargs`` must be provided.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new coordinates in addition to the existing\n data.\n\n Examples\n --------\n Convert longitude coordinates from 0-359 to -180-179:\n\n >>> da = xr.DataArray(np.random.rand(4),\n ... coords=[np.array([358, 359, 0, 1])],\n ... dims='lon')\n >>> da\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n The function also accepts dictionary arguments:\n\n >>> da.assign_coords({'lon': (((da.lon + 180) % 360) - 180)})\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n Notes\n -----\n Since ``coords_kwargs`` is a dictionary, the order of your arguments may\n not be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign_coords``\n is possible, but you cannot reference other variables created within\n the same ``assign_coords`` call.\n\n See also\n --------\n Dataset.assign\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n data = self.copy(deep=False)\n results = self._calc_assign_results(coords_kwargs)\n data.coords.update(results)\n return data\n\n def assign_attrs(self, *args, **kwargs):\n \"\"\"Assign new attrs to this object.\n\n Returns a new object equivalent to self.attrs.update(*args, **kwargs).\n\n Parameters\n ----------\n args : positional arguments passed into ``attrs.update``.\n kwargs : keyword arguments passed into ``attrs.update``.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new attrs in addition to the existing data.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n out = self.copy(deep=False)\n out.attrs.update(*args, **kwargs)\n return out\n\n def pipe(\n self,\n func: Union[Callable[..., T], Tuple[Callable[..., T], str]],\n *args,\n **kwargs\n ) -> T:\n \"\"\"\n Apply func(self, *args, **kwargs)\n\n This method replicates the pandas method of the same name.\n\n Parameters\n ----------\n func : function\n function to apply to this xarray object (Dataset/DataArray).\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the xarray object.\n args : positional arguments passed into ``func``.\n kwargs : a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n xarray or pandas objects, e.g., instead of writing\n\n >>> f(g(h(ds), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n >>> def adder(data, arg):\n ... return data + arg\n ...\n >>> def div(data, arg):\n ... return data / arg\n ...\n >>> def sub_mult(data, sub_arg, mult_arg):\n ... return (data * mult_arg) - sub_arg\n ...\n >>> x.pipe(adder, 2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> x.pipe(adder, arg=2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> (\n ... x\n ... .pipe(adder, arg=2)\n ... .pipe(div, arg=2)\n ... .pipe(sub_mult, sub_arg=2, mult_arg=2)\n ... )\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n See Also\n --------\n pandas.DataFrame.pipe\n \"\"\"\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword \" \"argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose unique values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n\n Examples\n --------\n Calculate daily anomalies for daily data:\n\n >>> da = xr.DataArray(np.linspace(0, 1826, num=1827),\n ... coords=[pd.date_range('1/1/2000', '31/12/2004',\n ... freq='D')],\n ... dims='time')\n >>> da\n \n array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n >>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time')\n \n array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...\n\n See Also\n --------\n core.groupby.DataArrayGroupBy\n core.groupby.DatasetGroupBy\n \"\"\"\n return self._groupby_cls(\n self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims\n )\n\n def groupby_bins(\n self,\n group,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n squeeze: bool = True,\n restore_coord_dims: bool = None,\n ):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Rather than using all unique values of `group`, the values are discretized\n first by applying `pandas.cut` [1]_ to `group`.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose binned values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n bins : int or array of scalars\n If bins is an int, it defines the number of equal-width bins in the\n range of x. However, in this case, the range of x is extended by .1%\n on each side to include the min or max values of x. If bins is a\n sequence it defines the bin edges allowing for non-uniform bin\n width. No extension of the range of x is done in this case.\n right : boolean, optional\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array or boolean, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, string bin labels are assigned by\n `pandas.cut`.\n precision : int\n The precision at which to store and display the bins labels.\n include_lowest : bool\n Whether the first interval should be left-inclusive or not.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n The name of the group has the added suffix `_bins` in order to\n distinguish it from the original variable.\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\n \"\"\"\n return self._groupby_cls(\n self,\n group,\n squeeze=squeeze,\n bins=bins,\n restore_coord_dims=restore_coord_dims,\n cut_kwargs={\n \"right\": right,\n \"labels\": labels,\n \"precision\": precision,\n \"include_lowest\": include_lowest,\n },\n )\n\n def rolling(\n self,\n dim: Mapping[Hashable, int] = None,\n min_periods: int = None,\n center: bool = False,\n **window_kwargs: int\n ):\n \"\"\"\n Rolling window object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to create the rolling iterator\n along (e.g. `time`) to its moving window size.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n **window_kwargs : optional\n The keyword arguments form of ``dim``.\n One of dim or window_kwargs must be provided.\n\n Returns\n -------\n Rolling object (core.rolling.DataArrayRolling for DataArray,\n core.rolling.DatasetRolling for Dataset.)\n\n Examples\n --------\n Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.rolling(time=3, center=True).mean()\n \n array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n\n Remove the NaNs using ``dropna()``:\n\n >>> da.rolling(time=3, center=True).mean().dropna('time')\n \n array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...\n\n See Also\n --------\n core.rolling.DataArrayRolling\n core.rolling.DatasetRolling\n \"\"\"\n dim = either_dict_or_kwargs(dim, window_kwargs, \"rolling\")\n return self._rolling_cls(self, dim, min_periods=min_periods, center=center)\n\n def rolling_exp(\n self,\n window: Mapping[Hashable, int] = None,\n window_type: str = \"span\",\n **window_kwargs\n ):\n \"\"\"\n Exponentially-weighted moving window.\n Similar to EWM in pandas\n\n Requires the optional Numbagg dependency.\n\n Parameters\n ----------\n window : A single mapping from a dimension name to window value,\n optional\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'],\n default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n **window_kwargs : optional\n The keyword arguments form of ``window``.\n One of window or window_kwargs must be provided.\n\n See Also\n --------\n core.rolling_exp.RollingExp\n \"\"\"\n window = either_dict_or_kwargs(window, window_kwargs, \"rolling_exp\")\n\n return self._rolling_exp_cls(self, window, window_type)\n\n def coarsen(\n self,\n dim: Mapping[Hashable, int] = None,\n boundary: str = \"exact\",\n side: Union[str, Mapping[Hashable, str]] = \"left\",\n coord_func: str = \"mean\",\n **window_kwargs: int\n ):\n \"\"\"\n Coarsen object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to the window size.\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of the window size. If 'trim', the excess entries are\n dropped. If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: function (name) that is applied to the coordintes,\n or a mapping from coordinate name to function (name).\n\n Returns\n -------\n Coarsen object (core.rolling.DataArrayCoarsen for DataArray,\n core.rolling.DatasetCoarsen for Dataset.)\n\n Examples\n --------\n Coarsen the long time series by averaging over every four days.\n\n >>> da = xr.DataArray(np.linspace(0, 364, num=364),\n ... dims='time',\n ... coords={'time': pd.date_range(\n ... '15/12/1999', periods=364)})\n >>> da\n \n array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,\n 364. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12\n >>>\n >>> da.coarsen(time=3, boundary='trim').mean()\n \n array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,\n 361.99449 ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10\n >>>\n\n See Also\n --------\n core.rolling.DataArrayCoarsen\n core.rolling.DatasetCoarsen\n \"\"\"\n dim = either_dict_or_kwargs(dim, window_kwargs, \"coarsen\")\n return self._coarsen_cls(\n self, dim, boundary=boundary, side=side, coord_func=coord_func\n )\n\n def resample(\n self,\n indexer: Mapping[Hashable, str] = None,\n skipna=None,\n closed: str = None,\n label: str = None,\n base: int = 0,\n keep_attrs: bool = None,\n loffset=None,\n restore_coord_dims: bool = None,\n **indexer_kwargs: str\n ):\n \"\"\"Returns a Resample object for performing resampling operations.\n\n Handles both downsampling and upsampling. If any intervals contain no\n values from the original object, they will be given the value ``NaN``.\n\n Parameters\n ----------\n indexer : {dim: freq}, optional\n Mapping from the dimension name to resample frequency.\n skipna : bool, optional\n Whether to skip missing values when aggregating in downsampling.\n closed : 'left' or 'right', optional\n Side of each interval to treat as closed.\n label : 'left or 'right', optional\n Side of each interval to use for labeling.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '24H' frequency, base could\n range from 0 through 23.\n loffset : timedelta or str, optional\n Offset used to adjust the resampled time labels. Some pandas date\n offset strings are supported.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n **indexer_kwargs : {dim: freq}\n The keyword arguments form of ``indexer``.\n One of indexer or indexer_kwargs must be provided.\n\n Returns\n -------\n resampled : same type as caller\n This object resampled.\n\n Examples\n --------\n Downsample monthly time-series data to seasonal data:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.resample(time=\"QS-DEC\").mean()\n \n array([ 1., 4., 7., 10.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01\n\n Upsample monthly time-series data to daily data:\n\n >>> da.resample(time='1D').interpolate('linear')\n \n array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...\n\n Limit scope of upsampling method\n >>> da.resample(time='1D').nearest(tolerance='1D')\n \n array([ 0., 0., nan, ..., nan, 11., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n References\n ----------\n\n .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n \"\"\"\n # TODO support non-string indexer after removing the old API.\n\n from .dataarray import DataArray\n from .resample import RESAMPLE_DIM\n from ..coding.cftimeindex import CFTimeIndex\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n # note: the second argument (now 'skipna') use to be 'dim'\n if (\n (skipna is not None and not isinstance(skipna, bool))\n or (\"how\" in indexer_kwargs and \"how\" not in self.dims)\n or (\"dim\" in indexer_kwargs and \"dim\" not in self.dims)\n ):\n raise TypeError(\n \"resample() no longer supports the `how` or \"\n \"`dim` arguments. Instead call methods on resample \"\n \"objects, e.g., data.resample(time='1D').mean()\"\n )\n\n indexer = either_dict_or_kwargs(indexer, indexer_kwargs, \"resample\")\n if len(indexer) != 1:\n raise ValueError(\"Resampling only supported along single dimensions.\")\n dim, freq = next(iter(indexer.items()))\n\n dim_name = dim\n dim_coord = self[dim]\n\n if isinstance(self.indexes[dim_name], CFTimeIndex):\n from .resample_cftime import CFTimeGrouper\n\n grouper = CFTimeGrouper(freq, closed, label, base, loffset)\n else:\n grouper = pd.Grouper(\n freq=freq, closed=closed, label=label, base=base, loffset=loffset\n )\n group = DataArray(\n dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM\n )\n resampler = self._resample_cls(\n self,\n group=group,\n dim=dim_name,\n grouper=grouper,\n resample_dim=RESAMPLE_DIM,\n restore_coord_dims=restore_coord_dims,\n )\n\n return resampler\n\n def where(self, cond, other=dtypes.NA, drop: bool = False):\n \"\"\"Filter elements from this object according to a condition.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this object's values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, these locations filled with NA.\n drop : boolean, optional\n If True, coordinate labels that only correspond to False values of\n the condition are dropped from the result. Mutually exclusive with\n ``other``.\n\n Returns\n -------\n Same type as caller.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))\n >>> a.where(a.x + a.y < 4)\n \n array([[ 0., 1., 2., 3., nan],\n [ 5., 6., 7., nan, nan],\n [ 10., 11., nan, nan, nan],\n [ 15., nan, nan, nan, nan],\n [ nan, nan, nan, nan, nan]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 5, -1)\n \n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, -1],\n [10, 11, 12, -1, -1],\n [15, 16, -1, -1, -1],\n [20, -1, -1, -1, -1]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 4, drop=True)\n \n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n where : equivalent function\n \"\"\"\n from .alignment import align\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if drop:\n if other is not dtypes.NA:\n raise ValueError(\"cannot set `other` if drop=True\")\n\n if not isinstance(cond, (Dataset, DataArray)):\n raise TypeError(\n \"cond argument is %r but must be a %r or %r\"\n % (cond, Dataset, DataArray)\n )\n\n # align so we can use integer indexing\n self, cond = align(self, cond)\n\n # get cond with the minimal size needed for the Dataset\n if isinstance(cond, Dataset):\n clipcond = cond.to_array().any(\"variable\")\n else:\n clipcond = cond\n\n # clip the data corresponding to coordinate dims that are not used\n nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))\n indexers = {k: np.unique(v) for k, v in nonzeros}\n\n self = self.isel(**indexers)\n cond = cond.isel(**indexers)\n\n return ops.where_method(self, cond, other)\n\n def close(self: Any) -> None:\n \"\"\"Close any files linked to this object\n \"\"\"\n if self._file_obj is not None:\n self._file_obj.close()\n self._file_obj = None\n\n def isin(self, test_elements):\n \"\"\"Tests each value in the array for whether it is in test elements.\n\n Parameters\n ----------\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if an array or array_like.\n See numpy notes for behavior with non-array-like parameters.\n\n Returns\n -------\n isin : same as object, bool\n Has the same shape as this object.\n\n Examples\n --------\n\n >>> array = xr.DataArray([1, 2, 3], dims='x')\n >>> array.isin([1, 3])\n \n array([ True, False, True])\n Dimensions without coordinates: x\n\n See also\n --------\n numpy.isin\n \"\"\"\n from .computation import apply_ufunc\n from .dataset import Dataset\n from .dataarray import DataArray\n from .variable import Variable\n\n if isinstance(test_elements, Dataset):\n raise TypeError(\n \"isin() argument must be convertible to an array: {}\".format(\n test_elements\n )\n )\n elif isinstance(test_elements, (Variable, DataArray)):\n # need to explicitly pull out data to support dask arrays as the\n # second argument\n test_elements = test_elements.data\n\n return apply_ufunc(\n duck_array_ops.isin,\n self,\n kwargs=dict(test_elements=test_elements),\n dask=\"allowed\",\n )\n\n def __enter__(self: T) -> T:\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n self.close()\n\n def __getitem__(self, value):\n # implementations of this class should implement this method\n raise NotImplementedError()"},{"col":0,"comment":"Collect variables and indexes from list of mappings of xarray objects.\n\n Mappings must either be Dataset objects, or have values of one of the\n following types:\n - an xarray.Variable\n - a tuple `(dims, data[, attrs[, encoding]])` that can be converted in\n an xarray.Variable\n - or an xarray.DataArray\n ","endLoc":274,"header":"def collect_variables_and_indexes(\n list_of_mappings: \"List[DatasetLike]\",\n) -> Dict[Hashable, List[MergeElement]]","id":471,"name":"collect_variables_and_indexes","nodeType":"Function","startLoc":227,"text":"def collect_variables_and_indexes(\n list_of_mappings: \"List[DatasetLike]\",\n) -> Dict[Hashable, List[MergeElement]]:\n \"\"\"Collect variables and indexes from list of mappings of xarray objects.\n\n Mappings must either be Dataset objects, or have values of one of the\n following types:\n - an xarray.Variable\n - a tuple `(dims, data[, attrs[, encoding]])` that can be converted in\n an xarray.Variable\n - or an xarray.DataArray\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n grouped: Dict[Hashable, List[Tuple[Variable, pd.Index]]] = {}\n\n def append(name, variable, index):\n values = grouped.setdefault(name, [])\n values.append((variable, index))\n\n def append_all(variables, indexes):\n for name, variable in variables.items():\n append(name, variable, indexes.get(name))\n\n for mapping in list_of_mappings:\n if isinstance(mapping, Dataset):\n append_all(mapping.variables, mapping.indexes)\n continue\n\n for name, variable in mapping.items():\n if isinstance(variable, DataArray):\n coords = variable._coords.copy() # use private API for speed\n indexes = dict(variable.indexes)\n # explicitly overwritten variables should take precedence\n coords.pop(name, None)\n indexes.pop(name, None)\n append_all(coords, indexes)\n\n variable = as_variable(variable, name=name)\n if variable.dims == (name,):\n variable = variable.to_index_variable()\n index = variable.to_index()\n else:\n index = None\n append(name, variable, index)\n\n return grouped"},{"attributeType":"null","col":8,"comment":"null","endLoc":86,"id":472,"name":"obj","nodeType":"Attribute","startLoc":86,"text":"self.obj"},{"attributeType":"null","col":8,"comment":"null","endLoc":89,"id":473,"name":"alpha","nodeType":"Attribute","startLoc":89,"text":"self.alpha"},{"col":0,"comment":"null","endLoc":260,"header":"def _parse_datasets(datasets)","id":474,"name":"_parse_datasets","nodeType":"Function","startLoc":242,"text":"def _parse_datasets(datasets):\n\n dims = set()\n all_coord_names = set()\n data_vars = set() # list of data_vars\n dim_coords = {} # maps dim name to variable\n dims_sizes = {} # shared dimension sizes to expand variables\n\n for ds in datasets:\n dims_sizes.update(ds.dims)\n all_coord_names.update(ds.coords)\n data_vars.update(ds.data_vars)\n\n for dim in set(ds.dims) - dims:\n if dim not in dim_coords:\n dim_coords[dim] = ds.coords[dim].variable\n dims = dims | set(ds.dims)\n\n return dim_coords, dims_sizes, all_coord_names, data_vars"},{"attributeType":"null","col":8,"comment":"null","endLoc":88,"id":477,"name":"dim","nodeType":"Attribute","startLoc":88,"text":"self.dim"},{"col":0,"comment":"null","endLoc":23,"header":"def move_exp_nanmean(array, *, axis, alpha)","id":478,"name":"move_exp_nanmean","nodeType":"Function","startLoc":15,"text":"def move_exp_nanmean(array, *, axis, alpha):\n if isinstance(array, dask_array_type):\n raise TypeError(\"rolling_exp is not currently support for dask arrays\")\n import numbagg\n\n if axis == ():\n return array.astype(np.float64)\n else:\n return numbagg.move_exp_nanmean(array, axis=axis, alpha=alpha)"},{"col":0,"comment":"null","endLoc":25,"header":"def file_md5_checksum(fname)","id":479,"name":"file_md5_checksum","nodeType":"Function","startLoc":21,"text":"def file_md5_checksum(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n hash_md5.update(f.read())\n return hash_md5.hexdigest()"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":480,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"className":"SupportsArithmetic","col":0,"comment":"Base class for xarray types that support arithmetic.\n\n Used by Dataset, DataArray, Variable and GroupBy.\n ","endLoc":104,"id":481,"nodeType":"Class","startLoc":11,"text":"class SupportsArithmetic:\n \"\"\"Base class for xarray types that support arithmetic.\n\n Used by Dataset, DataArray, Variable and GroupBy.\n \"\"\"\n\n __slots__ = ()\n\n # TODO: implement special methods for arithmetic here rather than injecting\n # them in xarray/core/ops.py. Ideally, do so by inheriting from\n # numpy.lib.mixins.NDArrayOperatorsMixin.\n\n # TODO: allow extending this with some sort of registration system\n _HANDLED_TYPES = (\n np.ndarray,\n np.generic,\n numbers.Number,\n bytes,\n str,\n ) + dask_array_type\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n from .computation import apply_ufunc\n\n # See the docstring example for numpy.lib.mixins.NDArrayOperatorsMixin.\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if not isinstance(x, self._HANDLED_TYPES + (SupportsArithmetic,)):\n return NotImplemented\n\n if ufunc.signature is not None:\n raise NotImplementedError(\n \"{} not supported: xarray objects do not directly implement \"\n \"generalized ufuncs. Instead, use xarray.apply_ufunc or \"\n \"explicitly convert to xarray objects to NumPy arrays \"\n \"(e.g., with `.values`).\".format(ufunc)\n )\n\n if method != \"__call__\":\n # TODO: support other methods, e.g., reduce and accumulate.\n raise NotImplementedError(\n \"{} method for ufunc {} is not implemented on xarray objects, \"\n \"which currently only support the __call__ method. As an \"\n \"alternative, consider explicitly converting xarray objects \"\n \"to NumPy arrays (e.g., with `.values`).\".format(method, ufunc)\n )\n\n if any(isinstance(o, SupportsArithmetic) for o in out):\n # TODO: implement this with logic like _inplace_binary_op. This\n # will be necessary to use NDArrayOperatorsMixin.\n raise NotImplementedError(\n \"xarray objects are not yet supported in the `out` argument \"\n \"for ufuncs. As an alternative, consider explicitly \"\n \"converting xarray objects to NumPy arrays (e.g., with \"\n \"`.values`).\"\n )\n\n join = dataset_join = OPTIONS[\"arithmetic_join\"]\n\n return apply_ufunc(\n ufunc,\n *inputs,\n input_core_dims=((),) * ufunc.nin,\n output_core_dims=((),) * ufunc.nout,\n join=join,\n dataset_join=dataset_join,\n dataset_fill_value=np.nan,\n kwargs=kwargs,\n dask=\"allowed\"\n )\n\n # this has no runtime function - these are listed so IDEs know these\n # methods are defined and don't warn on these operations\n __lt__ = (\n __le__\n ) = (\n __ge__\n ) = (\n __gt__\n ) = (\n __add__\n ) = (\n __sub__\n ) = (\n __mul__\n ) = (\n __truediv__\n ) = (\n __floordiv__\n ) = (\n __mod__\n ) = (\n __pow__\n ) = __and__ = __xor__ = __or__ = __div__ = __eq__ = __ne__ = not_implemented"},{"col":4,"comment":"null","endLoc":80,"header":"def __array_ufunc__(self, ufunc, method, *inputs, **kwargs)","id":482,"name":"__array_ufunc__","nodeType":"Function","startLoc":32,"text":"def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n from .computation import apply_ufunc\n\n # See the docstring example for numpy.lib.mixins.NDArrayOperatorsMixin.\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if not isinstance(x, self._HANDLED_TYPES + (SupportsArithmetic,)):\n return NotImplemented\n\n if ufunc.signature is not None:\n raise NotImplementedError(\n \"{} not supported: xarray objects do not directly implement \"\n \"generalized ufuncs. Instead, use xarray.apply_ufunc or \"\n \"explicitly convert to xarray objects to NumPy arrays \"\n \"(e.g., with `.values`).\".format(ufunc)\n )\n\n if method != \"__call__\":\n # TODO: support other methods, e.g., reduce and accumulate.\n raise NotImplementedError(\n \"{} method for ufunc {} is not implemented on xarray objects, \"\n \"which currently only support the __call__ method. As an \"\n \"alternative, consider explicitly converting xarray objects \"\n \"to NumPy arrays (e.g., with `.values`).\".format(method, ufunc)\n )\n\n if any(isinstance(o, SupportsArithmetic) for o in out):\n # TODO: implement this with logic like _inplace_binary_op. This\n # will be necessary to use NDArrayOperatorsMixin.\n raise NotImplementedError(\n \"xarray objects are not yet supported in the `out` argument \"\n \"for ufuncs. As an alternative, consider explicitly \"\n \"converting xarray objects to NumPy arrays (e.g., with \"\n \"`.values`).\"\n )\n\n join = dataset_join = OPTIONS[\"arithmetic_join\"]\n\n return apply_ufunc(\n ufunc,\n *inputs,\n input_core_dims=((),) * ufunc.nin,\n output_core_dims=((),) * ufunc.nout,\n join=join,\n dataset_join=dataset_join,\n dataset_fill_value=np.nan,\n kwargs=kwargs,\n dask=\"allowed\"\n )"},{"col":0,"comment":"\n Determine which dataset variables need to be concatenated in the result,\n ","endLoc":238,"header":"def _calc_concat_over(datasets, dim, dim_names, data_vars, coords, compat)","id":483,"name":"_calc_concat_over","nodeType":"Function","startLoc":158,"text":"def _calc_concat_over(datasets, dim, dim_names, data_vars, coords, compat):\n \"\"\"\n Determine which dataset variables need to be concatenated in the result,\n \"\"\"\n # Return values\n concat_over = set()\n equals = {}\n\n if dim in dim_names:\n concat_over_existing_dim = True\n concat_over.add(dim)\n else:\n concat_over_existing_dim = False\n\n concat_dim_lengths = []\n for ds in datasets:\n if concat_over_existing_dim:\n if dim not in ds.dims:\n if dim in ds:\n ds = ds.set_coords(dim)\n concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)\n concat_dim_lengths.append(ds.dims.get(dim, 1))\n\n def process_subset_opt(opt, subset):\n if isinstance(opt, str):\n if opt == \"different\":\n if compat == \"override\":\n raise ValueError(\n \"Cannot specify both %s='different' and compat='override'.\"\n % subset\n )\n # all nonindexes that are not the same in each dataset\n for k in getattr(datasets[0], subset):\n if k not in concat_over:\n # Compare the variable of all datasets vs. the one\n # of the first dataset. Perform the minimum amount of\n # loads in order to avoid multiple loads from disk\n # while keeping the RAM footprint low.\n v_lhs = datasets[0].variables[k].load()\n # We'll need to know later on if variables are equal.\n computed = []\n for ds_rhs in datasets[1:]:\n v_rhs = ds_rhs.variables[k].compute()\n computed.append(v_rhs)\n if not getattr(v_lhs, compat)(v_rhs):\n concat_over.add(k)\n equals[k] = False\n # computed variables are not to be re-computed\n # again in the future\n for ds, v in zip(datasets[1:], computed):\n ds.variables[k].data = v.data\n break\n else:\n equals[k] = True\n\n elif opt == \"all\":\n concat_over.update(\n set(getattr(datasets[0], subset)) - set(datasets[0].dims)\n )\n elif opt == \"minimal\":\n pass\n else:\n raise ValueError(\"unexpected value for %s: %s\" % (subset, opt))\n else:\n invalid_vars = [k for k in opt if k not in getattr(datasets[0], subset)]\n if invalid_vars:\n if subset == \"coords\":\n raise ValueError(\n \"some variables in coords are not coordinates on \"\n \"the first dataset: %s\" % (invalid_vars,)\n )\n else:\n raise ValueError(\n \"some variables in data_vars are not data variables \"\n \"on the first dataset: %s\" % (invalid_vars,)\n )\n concat_over.update(opt)\n\n process_subset_opt(data_vars, \"data_vars\")\n process_subset_opt(coords, \"coords\")\n return concat_over, equals, concat_dim_lengths"},{"id":484,"name":"ci/azure","nodeType":"Package"},{"id":485,"name":"install.yml","nodeType":"TextFile","path":"ci/azure","text":"parameters:\n env_file: ci/requirements/$CONDA_ENV.yml\n\nsteps:\n\n- template: add-conda-to-path.yml\n\n- bash: |\n conda env create -n xarray-tests --file ${{ parameters.env_file }}\n displayName: Install conda dependencies\n\n- bash: |\n source activate xarray-tests\n pip install -f https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com \\\n --no-deps \\\n --pre \\\n --upgrade \\\n numpy \\\n matplotlib \\\n pandas \\\n scipy\n pip install \\\n --no-deps \\\n --upgrade \\\n git+https://github.com/dask/dask \\\n git+https://github.com/dask/distributed \\\n git+https://github.com/zarr-developers/zarr\n condition: eq(variables['UPSTREAM_DEV'], 'true')\n displayName: Install upstream dev dependencies\n\n- bash: |\n source activate xarray-tests\n pip install --no-deps -e .\n displayName: Install xarray\n\n- bash: |\n source activate xarray-tests\n conda info -a\n conda list\n python xarray/util/print_versions.py\n displayName: Version info\n"},{"col":0,"comment":"Open and decode a dataset from a file or file-like object.\n\n Parameters\n ----------\n filename_or_obj : str, Path, file or xarray.backends.*DataStore\n Strings and Path objects are interpreted as a path to a netCDF file\n or an OpenDAP URL and opened with python-netCDF4, unless the filename\n ends with .gz, in which case the file is gunzipped and opened with\n scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like\n objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n netCDF4 files).\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA. mask_and_scale defaults to True except for the\n pseudonetcdf backend.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n autoclose : bool, optional\n If True, automatically close files to avoid OS Error of too many files\n being open. However, this option doesn't work with streams, e.g.,\n BytesIO.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \n 'pseudonetcdf'}, optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n chunks : int or dict, optional\n If chunks is provided, it used to load the new dataset into dask\n arrays. ``chunks={}`` loads the dataset with dask using a single\n chunk for all arrays.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False. Does not\n change the behavior of coordinates corresponding to dimensions, which\n always load their data from disk into a ``pandas.Index``.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n backend_kwargs: dictionary, optional\n A dictionary of keyword arguments to pass on to the backend. This\n may be useful when backend options would improve performance or\n allow user control of dataset processing.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n dataset : Dataset\n The newly created dataset.\n\n Notes\n -----\n ``open_dataset`` opens the file with read-only access. When you modify\n values of a Dataset, even one linked to files on disk, only the in-memory\n copy you are manipulating in xarray is modified: the original file on disk\n is never touched.\n\n See Also\n --------\n open_mfdataset\n ","endLoc":542,"header":"def open_dataset(\n filename_or_obj,\n group=None,\n decode_cf=True,\n mask_and_scale=None,\n decode_times=True,\n autoclose=None,\n concat_characters=True,\n decode_coords=True,\n engine=None,\n chunks=None,\n lock=None,\n cache=None,\n drop_variables=None,\n backend_kwargs=None,\n use_cftime=None,\n)","id":486,"name":"open_dataset","nodeType":"Function","startLoc":287,"text":"def open_dataset(\n filename_or_obj,\n group=None,\n decode_cf=True,\n mask_and_scale=None,\n decode_times=True,\n autoclose=None,\n concat_characters=True,\n decode_coords=True,\n engine=None,\n chunks=None,\n lock=None,\n cache=None,\n drop_variables=None,\n backend_kwargs=None,\n use_cftime=None,\n):\n \"\"\"Open and decode a dataset from a file or file-like object.\n\n Parameters\n ----------\n filename_or_obj : str, Path, file or xarray.backends.*DataStore\n Strings and Path objects are interpreted as a path to a netCDF file\n or an OpenDAP URL and opened with python-netCDF4, unless the filename\n ends with .gz, in which case the file is gunzipped and opened with\n scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like\n objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n netCDF4 files).\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA. mask_and_scale defaults to True except for the\n pseudonetcdf backend.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n autoclose : bool, optional\n If True, automatically close files to avoid OS Error of too many files\n being open. However, this option doesn't work with streams, e.g.,\n BytesIO.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \\\n 'pseudonetcdf'}, optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n chunks : int or dict, optional\n If chunks is provided, it used to load the new dataset into dask\n arrays. ``chunks={}`` loads the dataset with dask using a single\n chunk for all arrays.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False. Does not\n change the behavior of coordinates corresponding to dimensions, which\n always load their data from disk into a ``pandas.Index``.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n backend_kwargs: dictionary, optional\n A dictionary of keyword arguments to pass on to the backend. This\n may be useful when backend options would improve performance or\n allow user control of dataset processing.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n dataset : Dataset\n The newly created dataset.\n\n Notes\n -----\n ``open_dataset`` opens the file with read-only access. When you modify\n values of a Dataset, even one linked to files on disk, only the in-memory\n copy you are manipulating in xarray is modified: the original file on disk\n is never touched.\n\n See Also\n --------\n open_mfdataset\n \"\"\"\n engines = [\n None,\n \"netcdf4\",\n \"scipy\",\n \"pydap\",\n \"h5netcdf\",\n \"pynio\",\n \"cfgrib\",\n \"pseudonetcdf\",\n ]\n if engine not in engines:\n raise ValueError(\n \"unrecognized engine for open_dataset: {}\\n\"\n \"must be one of: {}\".format(engine, engines)\n )\n\n if autoclose is not None:\n warnings.warn(\n \"The autoclose argument is no longer used by \"\n \"xarray.open_dataset() and is now ignored; it will be removed in \"\n \"a future version of xarray. If necessary, you can control the \"\n \"maximum number of simultaneous open files with \"\n \"xarray.set_options(file_cache_maxsize=...).\",\n FutureWarning,\n stacklevel=2,\n )\n\n if mask_and_scale is None:\n mask_and_scale = not engine == \"pseudonetcdf\"\n\n if not decode_cf:\n mask_and_scale = False\n decode_times = False\n concat_characters = False\n decode_coords = False\n\n if cache is None:\n cache = chunks is None\n\n if backend_kwargs is None:\n backend_kwargs = {}\n\n def maybe_decode_store(store, lock=False):\n ds = conventions.decode_cf(\n store,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n concat_characters=concat_characters,\n decode_coords=decode_coords,\n drop_variables=drop_variables,\n use_cftime=use_cftime,\n )\n\n _protect_dataset_variables_inplace(ds, cache)\n\n if chunks is not None:\n from dask.base import tokenize\n\n # if passed an actual file path, augment the token with\n # the file modification time\n if isinstance(filename_or_obj, str) and not is_remote_uri(filename_or_obj):\n mtime = os.path.getmtime(filename_or_obj)\n else:\n mtime = None\n token = tokenize(\n filename_or_obj,\n mtime,\n group,\n decode_cf,\n mask_and_scale,\n decode_times,\n concat_characters,\n decode_coords,\n engine,\n chunks,\n drop_variables,\n use_cftime,\n )\n name_prefix = \"open_dataset-%s\" % token\n ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token)\n ds2._file_obj = ds._file_obj\n else:\n ds2 = ds\n\n return ds2\n\n if isinstance(filename_or_obj, Path):\n filename_or_obj = str(filename_or_obj)\n\n if isinstance(filename_or_obj, AbstractDataStore):\n store = filename_or_obj\n\n elif isinstance(filename_or_obj, str):\n filename_or_obj = _normalize_path(filename_or_obj)\n\n if engine is None:\n engine = _get_default_engine(filename_or_obj, allow_remote=True)\n if engine == \"netcdf4\":\n store = backends.NetCDF4DataStore.open(\n filename_or_obj, group=group, lock=lock, **backend_kwargs\n )\n elif engine == \"scipy\":\n store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)\n elif engine == \"pydap\":\n store = backends.PydapDataStore.open(filename_or_obj, **backend_kwargs)\n elif engine == \"h5netcdf\":\n store = backends.H5NetCDFStore(\n filename_or_obj, group=group, lock=lock, **backend_kwargs\n )\n elif engine == \"pynio\":\n store = backends.NioDataStore(filename_or_obj, lock=lock, **backend_kwargs)\n elif engine == \"pseudonetcdf\":\n store = backends.PseudoNetCDFDataStore.open(\n filename_or_obj, lock=lock, **backend_kwargs\n )\n elif engine == \"cfgrib\":\n store = backends.CfGribDataStore(\n filename_or_obj, lock=lock, **backend_kwargs\n )\n\n else:\n if engine not in [None, \"scipy\", \"h5netcdf\"]:\n raise ValueError(\n \"can only read bytes or file-like objects \"\n \"with engine='scipy' or 'h5netcdf'\"\n )\n engine = _get_engine_from_magic_number(filename_or_obj)\n if engine == \"scipy\":\n store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)\n elif engine == \"h5netcdf\":\n store = backends.H5NetCDFStore(\n filename_or_obj, group=group, lock=lock, **backend_kwargs\n )\n\n with close_on_error(store):\n ds = maybe_decode_store(store)\n\n # Ensure source filename always stored in dataset object (GH issue #2550)\n if \"source\" not in ds.encoding:\n if isinstance(filename_or_obj, str):\n ds.encoding[\"source\"] = filename_or_obj\n\n return ds"},{"fileName":"formatting.py","filePath":"xarray/core","id":487,"nodeType":"File","text":"\"\"\"String formatting routines for __repr__.\n\"\"\"\nimport contextlib\nimport functools\nfrom datetime import datetime, timedelta\nfrom itertools import zip_longest\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import OutOfBoundsDatetime\n\nfrom .duck_array_ops import array_equiv\nfrom .options import OPTIONS\nfrom .pycompat import dask_array_type, sparse_array_type\n\n\ndef pretty_print(x, numchars):\n \"\"\"Given an object `x`, call `str(x)` and format the returned string so\n that it is numchars long, padding with trailing spaces or truncating with\n ellipses as necessary\n \"\"\"\n s = maybe_truncate(x, numchars)\n return s + \" \" * max(numchars - len(s), 0)\n\n\ndef maybe_truncate(obj, maxlen=500):\n s = str(obj)\n if len(s) > maxlen:\n s = s[: (maxlen - 3)] + \"...\"\n return s\n\n\ndef wrap_indent(text, start=\"\", length=None):\n if length is None:\n length = len(start)\n indent = \"\\n\" + \" \" * length\n return start + indent.join(x for x in text.splitlines())\n\n\ndef _get_indexer_at_least_n_items(shape, n_desired, from_end):\n assert 0 < n_desired <= np.prod(shape)\n cum_items = np.cumprod(shape[::-1])\n n_steps = np.argmax(cum_items >= n_desired)\n stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))\n indexer = (\n ((-1 if from_end else 0),) * (len(shape) - 1 - n_steps)\n + ((slice(-stop, None) if from_end else slice(stop)),)\n + (slice(None),) * n_steps\n )\n return indexer\n\n\ndef first_n_items(array, n_desired):\n \"\"\"Returns the first n_desired items of an array\"\"\"\n # Unfortunately, we can't just do array.flat[:n_desired] here because it\n # might not be a numpy.ndarray. Moreover, access to elements of the array\n # could be very expensive (e.g. if it's only available over DAP), so go out\n # of our way to get them in a single call to __getitem__ using only slices.\n if n_desired < 1:\n raise ValueError(\"must request at least one item\")\n\n if array.size == 0:\n # work around for https://github.com/numpy/numpy/issues/5195\n return []\n\n if n_desired < array.size:\n indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False)\n array = array[indexer]\n return np.asarray(array).flat[:n_desired]\n\n\ndef last_n_items(array, n_desired):\n \"\"\"Returns the last n_desired items of an array\"\"\"\n # Unfortunately, we can't just do array.flat[-n_desired:] here because it\n # might not be a numpy.ndarray. Moreover, access to elements of the array\n # could be very expensive (e.g. if it's only available over DAP), so go out\n # of our way to get them in a single call to __getitem__ using only slices.\n if (n_desired == 0) or (array.size == 0):\n return []\n\n if n_desired < array.size:\n indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True)\n array = array[indexer]\n return np.asarray(array).flat[-n_desired:]\n\n\ndef last_item(array):\n \"\"\"Returns the last item of an array in a list or an empty list.\"\"\"\n if array.size == 0:\n # work around for https://github.com/numpy/numpy/issues/5195\n return []\n\n indexer = (slice(-1, None),) * array.ndim\n return np.ravel(np.asarray(array[indexer])).tolist()\n\n\ndef format_timestamp(t):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n # Timestamp is only valid for 1678 to 2262\n try:\n datetime_str = str(pd.Timestamp(t))\n except OutOfBoundsDatetime:\n datetime_str = str(t)\n\n try:\n date_str, time_str = datetime_str.split()\n except ValueError:\n # catch NaT and others that don't split nicely\n return datetime_str\n else:\n if time_str == \"00:00:00\":\n return date_str\n else:\n return \"{}T{}\".format(date_str, time_str)\n\n\ndef format_timedelta(t, timedelta_format=None):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n timedelta_str = str(pd.Timedelta(t))\n try:\n days_str, time_str = timedelta_str.split(\" days \")\n except ValueError:\n # catch NaT and others that don't split nicely\n return timedelta_str\n else:\n if timedelta_format == \"date\":\n return days_str + \" days\"\n elif timedelta_format == \"time\":\n return time_str\n else:\n return timedelta_str\n\n\ndef format_item(x, timedelta_format=None, quote_strings=True):\n \"\"\"Returns a succinct summary of an object as a string\"\"\"\n if isinstance(x, (np.datetime64, datetime)):\n return format_timestamp(x)\n if isinstance(x, (np.timedelta64, timedelta)):\n return format_timedelta(x, timedelta_format=timedelta_format)\n elif isinstance(x, (str, bytes)):\n return repr(x) if quote_strings else x\n elif isinstance(x, (float, np.float)):\n return \"{:.4}\".format(x)\n else:\n return str(x)\n\n\ndef format_items(x):\n \"\"\"Returns a succinct summaries of all items in a sequence as strings\"\"\"\n x = np.asarray(x)\n timedelta_format = \"datetime\"\n if np.issubdtype(x.dtype, np.timedelta64):\n x = np.asarray(x, dtype=\"timedelta64[ns]\")\n day_part = x[~pd.isnull(x)].astype(\"timedelta64[D]\").astype(\"timedelta64[ns]\")\n time_needed = x[~pd.isnull(x)] != day_part\n day_needed = day_part != np.timedelta64(0, \"ns\")\n if np.logical_not(day_needed).all():\n timedelta_format = \"time\"\n elif np.logical_not(time_needed).all():\n timedelta_format = \"date\"\n\n formatted = [format_item(xi, timedelta_format) for xi in x]\n return formatted\n\n\ndef format_array_flat(array, max_width):\n \"\"\"Return a formatted string for as many items in the flattened version of\n array that will fit within max_width characters.\n \"\"\"\n # every item will take up at least two characters, but we always want to\n # print at least first and last items\n max_possibly_relevant = min(\n max(array.size, 1), max(int(np.ceil(max_width / 2.0)), 2)\n )\n relevant_front_items = format_items(\n first_n_items(array, (max_possibly_relevant + 1) // 2)\n )\n relevant_back_items = format_items(last_n_items(array, max_possibly_relevant // 2))\n # interleave relevant front and back items:\n # [a, b, c] and [y, z] -> [a, z, b, y, c]\n relevant_items = sum(\n zip_longest(relevant_front_items, reversed(relevant_back_items)), ()\n )[:max_possibly_relevant]\n\n cum_len = np.cumsum([len(s) + 1 for s in relevant_items]) - 1\n if (array.size > 2) and (\n (max_possibly_relevant < array.size) or (cum_len > max_width).any()\n ):\n padding = \" ... \"\n count = min(\n array.size, max(np.argmax(cum_len + len(padding) - 1 > max_width), 2)\n )\n else:\n count = array.size\n padding = \"\" if (count <= 1) else \" \"\n\n num_front = (count + 1) // 2\n num_back = count - num_front\n # note that num_back is 0 <--> array.size is 0 or 1\n # <--> relevant_back_items is []\n pprint_str = (\n \" \".join(relevant_front_items[:num_front])\n + padding\n + \" \".join(relevant_back_items[-num_back:])\n )\n return pprint_str\n\n\n_KNOWN_TYPE_REPRS = {np.ndarray: \"np.ndarray\"}\nwith contextlib.suppress(ImportError):\n import sparse\n\n _KNOWN_TYPE_REPRS[sparse.COO] = \"sparse.COO\"\n\n\ndef inline_dask_repr(array):\n \"\"\"Similar to dask.array.DataArray.__repr__, but without\n redundant information that's already printed by the repr\n function of the xarray wrapper.\n \"\"\"\n assert isinstance(array, dask_array_type), array\n\n chunksize = tuple(c[0] for c in array.chunks)\n\n if hasattr(array, \"_meta\"):\n meta = array._meta\n if type(meta) in _KNOWN_TYPE_REPRS:\n meta_repr = _KNOWN_TYPE_REPRS[type(meta)]\n else:\n meta_repr = type(meta).__name__\n meta_string = \", meta={}\".format(meta_repr)\n else:\n meta_string = \"\"\n\n return \"dask.array\".format(chunksize, meta_string)\n\n\ndef inline_sparse_repr(array):\n \"\"\"Similar to sparse.COO.__repr__, but without the redundant shape/dtype.\"\"\"\n assert isinstance(array, sparse_array_type), array\n return \"<{}: nnz={:d}, fill_value={!s}>\".format(\n type(array).__name__, array.nnz, array.fill_value\n )\n\n\ndef inline_variable_array_repr(var, max_width):\n \"\"\"Build a one-line summary of a variable's data.\"\"\"\n if var._in_memory:\n return format_array_flat(var, max_width)\n elif isinstance(var._data, dask_array_type):\n return inline_dask_repr(var.data)\n elif isinstance(var._data, sparse_array_type):\n return inline_sparse_repr(var.data)\n elif hasattr(var._data, \"__array_function__\"):\n return maybe_truncate(repr(var._data).replace(\"\\n\", \" \"), max_width)\n else:\n # internal xarray array type\n return \"...\"\n\n\ndef summarize_variable(name, var, col_width, marker=\" \", max_width=None):\n \"\"\"Summarize a variable in one line, e.g., for the Dataset.__repr__.\"\"\"\n if max_width is None:\n max_width = OPTIONS[\"display_width\"]\n first_col = pretty_print(\" {} {} \".format(marker, name), col_width)\n if var.dims:\n dims_str = \"({}) \".format(\", \".join(map(str, var.dims)))\n else:\n dims_str = \"\"\n front_str = \"{}{}{} \".format(first_col, dims_str, var.dtype)\n\n values_width = max_width - len(front_str)\n values_str = inline_variable_array_repr(var, values_width)\n\n return front_str + values_str\n\n\ndef _summarize_coord_multiindex(coord, col_width, marker):\n first_col = pretty_print(\" {} {} \".format(marker, coord.name), col_width)\n return \"{}({}) MultiIndex\".format(first_col, str(coord.dims[0]))\n\n\ndef _summarize_coord_levels(coord, col_width, marker=\"-\"):\n return \"\\n\".join(\n [\n summarize_variable(\n lname, coord.get_level_variable(lname), col_width, marker=marker\n )\n for lname in coord.level_names\n ]\n )\n\n\ndef summarize_datavar(name, var, col_width):\n return summarize_variable(name, var.variable, col_width)\n\n\ndef summarize_coord(name, var, col_width):\n is_index = name in var.dims\n marker = \"*\" if is_index else \" \"\n if is_index:\n coord = var.variable.to_index_variable()\n if coord.level_names is not None:\n return \"\\n\".join(\n [\n _summarize_coord_multiindex(coord, col_width, marker),\n _summarize_coord_levels(coord, col_width),\n ]\n )\n return summarize_variable(name, var.variable, col_width, marker)\n\n\ndef summarize_attr(key, value, col_width=None):\n \"\"\"Summary for __repr__ - use ``X.attrs[key]`` for full value.\"\"\"\n # Indent key and add ':', then right-pad if col_width is not None\n k_str = \" {}:\".format(key)\n if col_width is not None:\n k_str = pretty_print(k_str, col_width)\n # Replace tabs and newlines, so we print on one line in known width\n v_str = str(value).replace(\"\\t\", \"\\\\t\").replace(\"\\n\", \"\\\\n\")\n # Finally, truncate to the desired display width\n return maybe_truncate(\"{} {}\".format(k_str, v_str), OPTIONS[\"display_width\"])\n\n\nEMPTY_REPR = \" *empty*\"\n\n\ndef _get_col_items(mapping):\n \"\"\"Get all column items to format, including both keys of `mapping`\n and MultiIndex levels if any.\n \"\"\"\n from .variable import IndexVariable\n\n col_items = []\n for k, v in mapping.items():\n col_items.append(k)\n var = getattr(v, \"variable\", v)\n if isinstance(var, IndexVariable):\n level_names = var.to_index_variable().level_names\n if level_names is not None:\n col_items += list(level_names)\n return col_items\n\n\ndef _calculate_col_width(col_items):\n max_name_length = max(len(str(s)) for s in col_items) if col_items else 0\n col_width = max(max_name_length, 7) + 6\n return col_width\n\n\ndef _mapping_repr(mapping, title, summarizer, col_width=None):\n if col_width is None:\n col_width = _calculate_col_width(mapping)\n summary = [\"{}:\".format(title)]\n if mapping:\n summary += [summarizer(k, v, col_width) for k, v in mapping.items()]\n else:\n summary += [EMPTY_REPR]\n return \"\\n\".join(summary)\n\n\ndata_vars_repr = functools.partial(\n _mapping_repr, title=\"Data variables\", summarizer=summarize_datavar\n)\n\n\nattrs_repr = functools.partial(\n _mapping_repr, title=\"Attributes\", summarizer=summarize_attr\n)\n\n\ndef coords_repr(coords, col_width=None):\n if col_width is None:\n col_width = _calculate_col_width(_get_col_items(coords))\n return _mapping_repr(\n coords, title=\"Coordinates\", summarizer=summarize_coord, col_width=col_width\n )\n\n\ndef indexes_repr(indexes):\n summary = []\n for k, v in indexes.items():\n summary.append(wrap_indent(repr(v), \"{}: \".format(k)))\n return \"\\n\".join(summary)\n\n\ndef dim_summary(obj):\n elements = [\"{}: {}\".format(k, v) for k, v in obj.sizes.items()]\n return \", \".join(elements)\n\n\ndef unindexed_dims_repr(dims, coords):\n unindexed_dims = [d for d in dims if d not in coords]\n if unindexed_dims:\n dims_str = \", \".join(\"{}\".format(d) for d in unindexed_dims)\n return \"Dimensions without coordinates: \" + dims_str\n else:\n return None\n\n\n@contextlib.contextmanager\ndef set_numpy_options(*args, **kwargs):\n original = np.get_printoptions()\n np.set_printoptions(*args, **kwargs)\n try:\n yield\n finally:\n np.set_printoptions(**original)\n\n\ndef short_numpy_repr(array):\n array = np.asarray(array)\n\n # default to lower precision so a full (abbreviated) line can fit on\n # one line with the default display_width\n options = {\"precision\": 6, \"linewidth\": OPTIONS[\"display_width\"], \"threshold\": 200}\n if array.ndim < 3:\n edgeitems = 3\n elif array.ndim == 3:\n edgeitems = 2\n else:\n edgeitems = 1\n options[\"edgeitems\"] = edgeitems\n with set_numpy_options(**options):\n return repr(array)\n\n\ndef short_data_repr(array):\n \"\"\"Format \"data\" for DataArray and Variable.\"\"\"\n internal_data = getattr(array, \"variable\", array)._data\n if isinstance(array, np.ndarray):\n return short_numpy_repr(array)\n elif hasattr(internal_data, \"__array_function__\") or isinstance(\n internal_data, dask_array_type\n ):\n return repr(array.data)\n elif array._in_memory or array.size < 1e5:\n return short_numpy_repr(array)\n else:\n # internal xarray array type\n return \"[{} values with dtype={}]\".format(array.size, array.dtype)\n\n\ndef array_repr(arr):\n # used for DataArray, Variable and IndexVariable\n if hasattr(arr, \"name\") and arr.name is not None:\n name_str = \"{!r} \".format(arr.name)\n else:\n name_str = \"\"\n\n summary = [\n \"\".format(type(arr).__name__, name_str, dim_summary(arr)),\n short_data_repr(arr),\n ]\n\n if hasattr(arr, \"coords\"):\n if arr.coords:\n summary.append(repr(arr.coords))\n\n unindexed_dims_str = unindexed_dims_repr(arr.dims, arr.coords)\n if unindexed_dims_str:\n summary.append(unindexed_dims_str)\n\n if arr.attrs:\n summary.append(attrs_repr(arr.attrs))\n\n return \"\\n\".join(summary)\n\n\ndef dataset_repr(ds):\n summary = [\"\".format(type(ds).__name__)]\n\n col_width = _calculate_col_width(_get_col_items(ds.variables))\n\n dims_start = pretty_print(\"Dimensions:\", col_width)\n summary.append(\"{}({})\".format(dims_start, dim_summary(ds)))\n\n if ds.coords:\n summary.append(coords_repr(ds.coords, col_width=col_width))\n\n unindexed_dims_str = unindexed_dims_repr(ds.dims, ds.coords)\n if unindexed_dims_str:\n summary.append(unindexed_dims_str)\n\n summary.append(data_vars_repr(ds.data_vars, col_width=col_width))\n\n if ds.attrs:\n summary.append(attrs_repr(ds.attrs))\n\n return \"\\n\".join(summary)\n\n\ndef diff_dim_summary(a, b):\n if a.dims != b.dims:\n return \"Differing dimensions:\\n ({}) != ({})\".format(\n dim_summary(a), dim_summary(b)\n )\n else:\n return \"\"\n\n\ndef _diff_mapping_repr(a_mapping, b_mapping, compat, title, summarizer, col_width=None):\n def extra_items_repr(extra_keys, mapping, ab_side):\n extra_repr = [summarizer(k, mapping[k], col_width) for k in extra_keys]\n if extra_repr:\n header = \"{} only on the {} object:\".format(title, ab_side)\n return [header] + extra_repr\n else:\n return []\n\n a_keys = set(a_mapping)\n b_keys = set(b_mapping)\n\n summary = []\n\n diff_items = []\n\n for k in a_keys & b_keys:\n try:\n # compare xarray variable\n compatible = getattr(a_mapping[k], compat)(b_mapping[k])\n is_variable = True\n except AttributeError:\n # compare attribute value\n compatible = a_mapping[k] == b_mapping[k]\n is_variable = False\n\n if not compatible:\n temp = [\n summarizer(k, vars[k], col_width) for vars in (a_mapping, b_mapping)\n ]\n\n if compat == \"identical\" and is_variable:\n attrs_summary = []\n\n for m in (a_mapping, b_mapping):\n attr_s = \"\\n\".join(\n [summarize_attr(ak, av) for ak, av in m[k].attrs.items()]\n )\n attrs_summary.append(attr_s)\n\n temp = [\n \"\\n\".join([var_s, attr_s]) if attr_s else var_s\n for var_s, attr_s in zip(temp, attrs_summary)\n ]\n\n diff_items += [ab_side + s[1:] for ab_side, s in zip((\"L\", \"R\"), temp)]\n\n if diff_items:\n summary += [\"Differing {}:\".format(title.lower())] + diff_items\n\n summary += extra_items_repr(a_keys - b_keys, a_mapping, \"left\")\n summary += extra_items_repr(b_keys - a_keys, b_mapping, \"right\")\n\n return \"\\n\".join(summary)\n\n\ndiff_coords_repr = functools.partial(\n _diff_mapping_repr, title=\"Coordinates\", summarizer=summarize_coord\n)\n\n\ndiff_data_vars_repr = functools.partial(\n _diff_mapping_repr, title=\"Data variables\", summarizer=summarize_datavar\n)\n\n\ndiff_attrs_repr = functools.partial(\n _diff_mapping_repr, title=\"Attributes\", summarizer=summarize_attr\n)\n\n\ndef _compat_to_str(compat):\n if compat == \"equals\":\n return \"equal\"\n else:\n return compat\n\n\ndef diff_array_repr(a, b, compat):\n # used for DataArray, Variable and IndexVariable\n summary = [\n \"Left and right {} objects are not {}\".format(\n type(a).__name__, _compat_to_str(compat)\n )\n ]\n\n summary.append(diff_dim_summary(a, b))\n\n if not array_equiv(a.data, b.data):\n temp = [wrap_indent(short_numpy_repr(obj), start=\" \") for obj in (a, b)]\n diff_data_repr = [\n ab_side + \"\\n\" + ab_data_repr\n for ab_side, ab_data_repr in zip((\"L\", \"R\"), temp)\n ]\n summary += [\"Differing values:\"] + diff_data_repr\n\n if hasattr(a, \"coords\"):\n col_width = _calculate_col_width(set(a.coords) | set(b.coords))\n summary.append(\n diff_coords_repr(a.coords, b.coords, compat, col_width=col_width)\n )\n\n if compat == \"identical\":\n summary.append(diff_attrs_repr(a.attrs, b.attrs, compat))\n\n return \"\\n\".join(summary)\n\n\ndef diff_dataset_repr(a, b, compat):\n summary = [\n \"Left and right {} objects are not {}\".format(\n type(a).__name__, _compat_to_str(compat)\n )\n ]\n\n col_width = _calculate_col_width(\n set(_get_col_items(a.variables) + _get_col_items(b.variables))\n )\n\n summary.append(diff_dim_summary(a, b))\n summary.append(diff_coords_repr(a.coords, b.coords, compat, col_width=col_width))\n summary.append(\n diff_data_vars_repr(a.data_vars, b.data_vars, compat, col_width=col_width)\n )\n\n if compat == \"identical\":\n summary.append(diff_attrs_repr(a.attrs, b.attrs, compat))\n\n return \"\\n\".join(summary)\n"},{"col":0,"comment":"Extract the priority variable from a list of mappings.\n\n We need this method because in some cases the priority argument itself\n might have conflicting values (e.g., if it is a dict with two DataArray\n values with conflicting coordinate values).\n\n Parameters\n ----------\n objects : list of dictionaries of variables\n Dictionaries in which to find the priority variables.\n priority_arg : int or None\n Integer object whose variable should take priority.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional\n Compatibility checks to use when merging variables.\n\n Returns\n -------\n A dictionary of variables and associated indexes (if any) to prioritize.\n ","endLoc":421,"header":"def _get_priority_vars_and_indexes(\n objects: List[\"DatasetLike\"], priority_arg: Optional[int], compat: str = \"equals\"\n) -> Dict[Hashable, MergeElement]","id":489,"name":"_get_priority_vars_and_indexes","nodeType":"Function","startLoc":391,"text":"def _get_priority_vars_and_indexes(\n objects: List[\"DatasetLike\"], priority_arg: Optional[int], compat: str = \"equals\"\n) -> Dict[Hashable, MergeElement]:\n \"\"\"Extract the priority variable from a list of mappings.\n\n We need this method because in some cases the priority argument itself\n might have conflicting values (e.g., if it is a dict with two DataArray\n values with conflicting coordinate values).\n\n Parameters\n ----------\n objects : list of dictionaries of variables\n Dictionaries in which to find the priority variables.\n priority_arg : int or None\n Integer object whose variable should take priority.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts'}, optional\n Compatibility checks to use when merging variables.\n\n Returns\n -------\n A dictionary of variables and associated indexes (if any) to prioritize.\n \"\"\"\n if priority_arg is None:\n return {}\n\n collected = collect_variables_and_indexes([objects[priority_arg]])\n variables, indexes = merge_collected(collected, compat=compat)\n grouped: Dict[Hashable, MergeElement] = {}\n for name, variable in variables.items():\n grouped[name] = (variable, indexes.get(name))\n return grouped"},{"col":0,"comment":"Merge dicts of variables, while resolving conflicts appropriately.\n\n Parameters\n ----------\n\n Type of equality check to use when checking for conflicts.\n\n Returns\n -------\n Dict with keys taken by the union of keys on list_of_mappings,\n and Variable values corresponding to those that should be found on the\n merged result.\n ","endLoc":224,"header":"def merge_collected(\n grouped: Dict[Hashable, List[MergeElement]],\n prioritized: Mapping[Hashable, MergeElement] = None,\n compat: str = \"minimal\",\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]","id":490,"name":"merge_collected","nodeType":"Function","startLoc":153,"text":"def merge_collected(\n grouped: Dict[Hashable, List[MergeElement]],\n prioritized: Mapping[Hashable, MergeElement] = None,\n compat: str = \"minimal\",\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Merge dicts of variables, while resolving conflicts appropriately.\n\n Parameters\n ----------\n\n Type of equality check to use when checking for conflicts.\n\n Returns\n -------\n Dict with keys taken by the union of keys on list_of_mappings,\n and Variable values corresponding to those that should be found on the\n merged result.\n \"\"\"\n if prioritized is None:\n prioritized = {}\n\n _assert_compat_valid(compat)\n\n merged_vars: Dict[Hashable, Variable] = {}\n merged_indexes: Dict[Hashable, pd.Index] = {}\n\n for name, elements_list in grouped.items():\n if name in prioritized:\n variable, index = prioritized[name]\n merged_vars[name] = variable\n if index is not None:\n merged_indexes[name] = index\n else:\n indexed_elements = [\n (variable, index)\n for variable, index in elements_list\n if index is not None\n ]\n\n if indexed_elements:\n # TODO(shoyer): consider adjusting this logic. Are we really\n # OK throwing away variable without an index in favor of\n # indexed variables, without even checking if values match?\n variable, index = indexed_elements[0]\n for _, other_index in indexed_elements[1:]:\n if not index.equals(other_index):\n raise MergeError(\n \"conflicting values for index %r on objects to be \"\n \"combined:\\nfirst value: %r\\nsecond value: %r\"\n % (name, index, other_index)\n )\n if compat == \"identical\":\n for other_variable, _ in indexed_elements[1:]:\n if not dict_equiv(variable.attrs, other_variable.attrs):\n raise MergeError(\n \"conflicting attribute values on combined \"\n \"variable %r:\\nfirst value: %r\\nsecond value: %r\"\n % (name, variable.attrs, other_variable.attrs)\n )\n merged_vars[name] = variable\n merged_indexes[name] = index\n else:\n variables = [variable for variable, _ in elements_list]\n try:\n merged_vars[name] = unique_variable(name, variables, compat)\n except MergeError:\n if compat != \"minimal\":\n # we need more than \"minimal\" compatibility (for which\n # we drop conflicting coordinates)\n raise\n\n return merged_vars, merged_indexes"},{"col":0,"comment":"Apply a vectorized function for unlabeled arrays on xarray objects.\n\n The function will be mapped over the data variable(s) of the input\n arguments using xarray's standard rules for labeled computation, including\n alignment, broadcasting, looping over GroupBy/Dataset variables, and\n merging of coordinates.\n\n Parameters\n ----------\n func : callable\n Function to call like ``func(*args, **kwargs)`` on unlabeled arrays\n (``.data``) that returns an array or tuple of arrays. If multiple\n arguments with non-matching dimensions are supplied, this function is\n expected to vectorize (broadcast) over axes of positional arguments in\n the style of NumPy universal functions [1]_ (if this is not the case,\n set ``vectorize=True``). If this function returns multiple outputs, you\n must set ``output_core_dims`` as well.\n *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars\n Mix of labeled and/or unlabeled arrays to which to apply the function.\n input_core_dims : Sequence[Sequence], optional\n List of the same length as ``args`` giving the list of core dimensions\n on each input argument that should not be broadcast. By default, we\n assume there are no core dimensions on any input arguments.\n\n For example, ``input_core_dims=[[], ['time']]`` indicates that all\n dimensions on the first argument and all dimensions other than 'time'\n on the second argument should be broadcast.\n\n Core dimensions are automatically moved to the last axes of input\n variables before applying ``func``, which facilitates using NumPy style\n generalized ufuncs [2]_.\n output_core_dims : List[tuple], optional\n List of the same length as the number of output arguments from\n ``func``, giving the list of core dimensions on each output that were\n not broadcast on the inputs. By default, we assume that ``func``\n outputs exactly one array, with axes corresponding to each broadcast\n dimension.\n\n Core dimensions are assumed to appear as the last dimensions of each\n output in the provided order.\n exclude_dims : set, optional\n Core dimensions on the inputs to exclude from alignment and\n broadcasting entirely. Any input coordinates along these dimensions\n will be dropped. Each excluded dimension must also appear in\n ``input_core_dims`` for at least one argument. Only dimensions listed\n here are allowed to change size between input and output objects.\n vectorize : bool, optional\n If True, then assume ``func`` only takes arrays defined over core\n dimensions as input and vectorize it automatically with\n :py:func:`numpy.vectorize`. This option exists for convenience, but is\n almost always slower than supplying a pre-vectorized function.\n Using this option requires NumPy version 1.12 or newer.\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining the indexes of the passed objects along each\n dimension, and the variables of Dataset objects with mismatched\n data variables:\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': raise `ValueError` instead of aligning when indexes to be\n aligned are not equal\n dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining variables of Dataset objects with mismatched\n data variables.\n\n - 'outer': take variables from both Dataset objects\n - 'inner': take only overlapped variables\n - 'left': take only variables from the first object\n - 'right': take only variables from the last object\n - 'exact': data variables on all Dataset objects must match exactly\n dataset_fill_value : optional\n Value used in place of missing variables on Dataset inputs when the\n datasets do not share the exact same ``data_vars``. Required if\n ``dataset_join not in {'inner', 'exact'}``, otherwise ignored.\n keep_attrs: boolean, Optional\n Whether to copy attributes from the first argument to the output.\n kwargs: dict, optional\n Optional keyword arguments passed directly on to call ``func``.\n dask: 'forbidden', 'allowed' or 'parallelized', optional\n How to handle applying to objects containing lazy data in the form of\n dask arrays:\n\n - 'forbidden' (default): raise an error if a dask array is encountered.\n - 'allowed': pass dask arrays directly on to ``func``.\n - 'parallelized': automatically parallelize ``func`` if any of the\n inputs are a dask array. If used, the ``output_dtypes`` argument must\n also be provided. Multiple output arguments are not yet supported.\n output_dtypes : list of dtypes, optional\n Optional list of output dtypes. Only used if dask='parallelized'.\n output_sizes : dict, optional\n Optional mapping from dimension names to sizes for outputs. Only used\n if dask='parallelized' and new dimensions (not found on inputs) appear\n on outputs.\n\n Returns\n -------\n Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or\n numpy.ndarray, the first type on that list to appear on an input.\n\n Examples\n --------\n\n Calculate the vector magnitude of two arguments:\n\n >>> def magnitude(a, b):\n ... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)\n ... return xr.apply_ufunc(func, a, b)\n\n You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``\n objects, with automatically preserved dimensions and coordinates, e.g.,\n\n >>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])\n >>> magnitude(array, -array)\n \n array([1.414214, 2.828427, 4.242641])\n Coordinates:\n * x (x) float64 0.1 0.2 0.3\n\n Plain scalars, numpy arrays and a mix of these with xarray objects is also\n supported:\n\n >>> magnitude(4, 5)\n 5.0\n >>> magnitude(3, np.array([0, 4]))\n array([3., 5.])\n >>> magnitude(array, 0)\n \n array([1., 2., 3.])\n Coordinates:\n * x (x) float64 0.1 0.2 0.3\n\n Other examples of how you could use ``apply_ufunc`` to write functions to\n (very nearly) replicate existing xarray functionality:\n\n Compute the mean (``.mean``) over one dimension::\n\n def mean(obj, dim):\n # note: apply always moves core dimensions to the end\n return apply_ufunc(np.mean, obj,\n input_core_dims=[[dim]],\n kwargs={'axis': -1})\n\n Inner product over a specific dimension (like ``xr.dot``)::\n\n def _inner(x, y):\n result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])\n return result[..., 0, 0]\n\n def inner_product(a, b, dim):\n return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])\n\n Stack objects along a new dimension (like ``xr.concat``)::\n\n def stack(objects, dim, new_coord):\n # note: this version does not stack coordinates\n func = lambda *x: np.stack(x, axis=-1)\n result = apply_ufunc(func, *objects,\n output_core_dims=[[dim]],\n join='outer',\n dataset_fill_value=np.nan)\n result[dim] = new_coord\n return result\n\n If your function is not vectorized but can be applied only to core\n dimensions, you can use ``vectorize=True`` to turn into a vectorized\n function. This wraps :py:func:`numpy.vectorize`, so the operation isn't\n terribly fast. Here we'll use it to calculate the distance between\n empirical samples from two probability distributions, using a scipy\n function that needs to be applied to vectors::\n\n import scipy.stats\n\n def earth_mover_distance(first_samples,\n second_samples,\n dim='ensemble'):\n return apply_ufunc(scipy.stats.wasserstein_distance,\n first_samples, second_samples,\n input_core_dims=[[dim], [dim]],\n vectorize=True)\n\n Most of NumPy's builtin functions already broadcast their inputs\n appropriately for use in `apply`. You may find helper functions such as\n numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also\n works well with numba's vectorize and guvectorize. Further explanation with\n examples are provided in the xarray documentation [3].\n\n See also\n --------\n numpy.broadcast_arrays\n numba.vectorize\n numba.guvectorize\n\n References\n ----------\n .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html\n .. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation\n ","endLoc":1049,"header":"def apply_ufunc(\n func: Callable,\n *args: Any,\n input_core_dims: Sequence[Sequence] = None,\n output_core_dims: Optional[Sequence[Sequence]] = ((),),\n exclude_dims","id":491,"name":"apply_ufunc","nodeType":"Function","startLoc":750,"text":"def apply_ufunc(\n func: Callable,\n *args: Any,\n input_core_dims: Sequence[Sequence] = None,\n output_core_dims: Optional[Sequence[Sequence]] = ((),),\n exclude_dims: AbstractSet = frozenset(),\n vectorize: bool = False,\n join: str = \"exact\",\n dataset_join: str = \"exact\",\n dataset_fill_value: object = _NO_FILL_VALUE,\n keep_attrs: bool = False,\n kwargs: Mapping = None,\n dask: str = \"forbidden\",\n output_dtypes: Sequence = None,\n output_sizes: Mapping[Any, int] = None\n) -> Any:\n \"\"\"Apply a vectorized function for unlabeled arrays on xarray objects.\n\n The function will be mapped over the data variable(s) of the input\n arguments using xarray's standard rules for labeled computation, including\n alignment, broadcasting, looping over GroupBy/Dataset variables, and\n merging of coordinates.\n\n Parameters\n ----------\n func : callable\n Function to call like ``func(*args, **kwargs)`` on unlabeled arrays\n (``.data``) that returns an array or tuple of arrays. If multiple\n arguments with non-matching dimensions are supplied, this function is\n expected to vectorize (broadcast) over axes of positional arguments in\n the style of NumPy universal functions [1]_ (if this is not the case,\n set ``vectorize=True``). If this function returns multiple outputs, you\n must set ``output_core_dims`` as well.\n *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars\n Mix of labeled and/or unlabeled arrays to which to apply the function.\n input_core_dims : Sequence[Sequence], optional\n List of the same length as ``args`` giving the list of core dimensions\n on each input argument that should not be broadcast. By default, we\n assume there are no core dimensions on any input arguments.\n\n For example, ``input_core_dims=[[], ['time']]`` indicates that all\n dimensions on the first argument and all dimensions other than 'time'\n on the second argument should be broadcast.\n\n Core dimensions are automatically moved to the last axes of input\n variables before applying ``func``, which facilitates using NumPy style\n generalized ufuncs [2]_.\n output_core_dims : List[tuple], optional\n List of the same length as the number of output arguments from\n ``func``, giving the list of core dimensions on each output that were\n not broadcast on the inputs. By default, we assume that ``func``\n outputs exactly one array, with axes corresponding to each broadcast\n dimension.\n\n Core dimensions are assumed to appear as the last dimensions of each\n output in the provided order.\n exclude_dims : set, optional\n Core dimensions on the inputs to exclude from alignment and\n broadcasting entirely. Any input coordinates along these dimensions\n will be dropped. Each excluded dimension must also appear in\n ``input_core_dims`` for at least one argument. Only dimensions listed\n here are allowed to change size between input and output objects.\n vectorize : bool, optional\n If True, then assume ``func`` only takes arrays defined over core\n dimensions as input and vectorize it automatically with\n :py:func:`numpy.vectorize`. This option exists for convenience, but is\n almost always slower than supplying a pre-vectorized function.\n Using this option requires NumPy version 1.12 or newer.\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining the indexes of the passed objects along each\n dimension, and the variables of Dataset objects with mismatched\n data variables:\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': raise `ValueError` instead of aligning when indexes to be\n aligned are not equal\n dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining variables of Dataset objects with mismatched\n data variables.\n\n - 'outer': take variables from both Dataset objects\n - 'inner': take only overlapped variables\n - 'left': take only variables from the first object\n - 'right': take only variables from the last object\n - 'exact': data variables on all Dataset objects must match exactly\n dataset_fill_value : optional\n Value used in place of missing variables on Dataset inputs when the\n datasets do not share the exact same ``data_vars``. Required if\n ``dataset_join not in {'inner', 'exact'}``, otherwise ignored.\n keep_attrs: boolean, Optional\n Whether to copy attributes from the first argument to the output.\n kwargs: dict, optional\n Optional keyword arguments passed directly on to call ``func``.\n dask: 'forbidden', 'allowed' or 'parallelized', optional\n How to handle applying to objects containing lazy data in the form of\n dask arrays:\n\n - 'forbidden' (default): raise an error if a dask array is encountered.\n - 'allowed': pass dask arrays directly on to ``func``.\n - 'parallelized': automatically parallelize ``func`` if any of the\n inputs are a dask array. If used, the ``output_dtypes`` argument must\n also be provided. Multiple output arguments are not yet supported.\n output_dtypes : list of dtypes, optional\n Optional list of output dtypes. Only used if dask='parallelized'.\n output_sizes : dict, optional\n Optional mapping from dimension names to sizes for outputs. Only used\n if dask='parallelized' and new dimensions (not found on inputs) appear\n on outputs.\n\n Returns\n -------\n Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or\n numpy.ndarray, the first type on that list to appear on an input.\n\n Examples\n --------\n\n Calculate the vector magnitude of two arguments:\n\n >>> def magnitude(a, b):\n ... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)\n ... return xr.apply_ufunc(func, a, b)\n\n You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``\n objects, with automatically preserved dimensions and coordinates, e.g.,\n\n >>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])\n >>> magnitude(array, -array)\n \n array([1.414214, 2.828427, 4.242641])\n Coordinates:\n * x (x) float64 0.1 0.2 0.3\n\n Plain scalars, numpy arrays and a mix of these with xarray objects is also\n supported:\n\n >>> magnitude(4, 5)\n 5.0\n >>> magnitude(3, np.array([0, 4]))\n array([3., 5.])\n >>> magnitude(array, 0)\n \n array([1., 2., 3.])\n Coordinates:\n * x (x) float64 0.1 0.2 0.3\n\n Other examples of how you could use ``apply_ufunc`` to write functions to\n (very nearly) replicate existing xarray functionality:\n\n Compute the mean (``.mean``) over one dimension::\n\n def mean(obj, dim):\n # note: apply always moves core dimensions to the end\n return apply_ufunc(np.mean, obj,\n input_core_dims=[[dim]],\n kwargs={'axis': -1})\n\n Inner product over a specific dimension (like ``xr.dot``)::\n\n def _inner(x, y):\n result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])\n return result[..., 0, 0]\n\n def inner_product(a, b, dim):\n return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])\n\n Stack objects along a new dimension (like ``xr.concat``)::\n\n def stack(objects, dim, new_coord):\n # note: this version does not stack coordinates\n func = lambda *x: np.stack(x, axis=-1)\n result = apply_ufunc(func, *objects,\n output_core_dims=[[dim]],\n join='outer',\n dataset_fill_value=np.nan)\n result[dim] = new_coord\n return result\n\n If your function is not vectorized but can be applied only to core\n dimensions, you can use ``vectorize=True`` to turn into a vectorized\n function. This wraps :py:func:`numpy.vectorize`, so the operation isn't\n terribly fast. Here we'll use it to calculate the distance between\n empirical samples from two probability distributions, using a scipy\n function that needs to be applied to vectors::\n\n import scipy.stats\n\n def earth_mover_distance(first_samples,\n second_samples,\n dim='ensemble'):\n return apply_ufunc(scipy.stats.wasserstein_distance,\n first_samples, second_samples,\n input_core_dims=[[dim], [dim]],\n vectorize=True)\n\n Most of NumPy's builtin functions already broadcast their inputs\n appropriately for use in `apply`. You may find helper functions such as\n numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also\n works well with numba's vectorize and guvectorize. Further explanation with\n examples are provided in the xarray documentation [3].\n\n See also\n --------\n numpy.broadcast_arrays\n numba.vectorize\n numba.guvectorize\n\n References\n ----------\n .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html\n .. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation\n \"\"\"\n from .groupby import GroupBy\n from .dataarray import DataArray\n from .variable import Variable\n\n if input_core_dims is None:\n input_core_dims = ((),) * (len(args))\n elif len(input_core_dims) != len(args):\n raise ValueError(\n \"input_core_dims must be None or a tuple with the length same to \"\n \"the number of arguments. Given input_core_dims: {}, \"\n \"number of args: {}.\".format(input_core_dims, len(args))\n )\n\n if kwargs is None:\n kwargs = {}\n\n signature = _UFuncSignature(input_core_dims, output_core_dims)\n\n if exclude_dims and not exclude_dims <= signature.all_core_dims:\n raise ValueError(\n \"each dimension in `exclude_dims` must also be a \"\n \"core dimension in the function signature\"\n )\n\n if kwargs:\n func = functools.partial(func, **kwargs)\n\n if vectorize:\n if signature.all_core_dims:\n func = np.vectorize(\n func, otypes=output_dtypes, signature=signature.to_gufunc_string()\n )\n else:\n func = np.vectorize(func, otypes=output_dtypes)\n\n variables_vfunc = functools.partial(\n apply_variable_ufunc,\n func,\n signature=signature,\n exclude_dims=exclude_dims,\n keep_attrs=keep_attrs,\n dask=dask,\n output_dtypes=output_dtypes,\n output_sizes=output_sizes,\n )\n\n if any(isinstance(a, GroupBy) for a in args):\n this_apply = functools.partial(\n apply_ufunc,\n func,\n input_core_dims=input_core_dims,\n output_core_dims=output_core_dims,\n exclude_dims=exclude_dims,\n join=join,\n dataset_join=dataset_join,\n dataset_fill_value=dataset_fill_value,\n keep_attrs=keep_attrs,\n dask=dask,\n )\n return apply_groupby_func(this_apply, *args)\n elif any(is_dict_like(a) for a in args):\n return apply_dataset_vfunc(\n variables_vfunc,\n *args,\n signature=signature,\n join=join,\n exclude_dims=exclude_dims,\n dataset_join=dataset_join,\n fill_value=dataset_fill_value,\n keep_attrs=keep_attrs\n )\n elif any(isinstance(a, DataArray) for a in args):\n return apply_dataarray_vfunc(\n variables_vfunc,\n *args,\n signature=signature,\n join=join,\n exclude_dims=exclude_dims,\n keep_attrs=keep_attrs\n )\n elif any(isinstance(a, Variable) for a in args):\n return variables_vfunc(*args)\n else:\n return apply_array_ufunc(func, *args, dask=dask)"},{"col":0,"comment":"Like np.array_equal, but also allows values to be NaN in both arrays\n ","endLoc":197,"header":"def array_equiv(arr1, arr2)","id":493,"name":"array_equiv","nodeType":"Function","startLoc":187,"text":"def array_equiv(arr1, arr2):\n \"\"\"Like np.array_equal, but also allows values to be NaN in both arrays\n \"\"\"\n arr1 = asarray(arr1)\n arr2 = asarray(arr2)\n if arr1.shape != arr2.shape:\n return False\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"In the future, 'NAT == x'\")\n flag_array = (arr1 == arr2) | (isnull(arr1) & isnull(arr2))\n return bool(flag_array.all())"},{"col":0,"comment":"Decode the given Dataset or Datastore according to CF conventions into\n a new Dataset.\n\n Parameters\n ----------\n obj : Dataset or DataStore\n Object to decode.\n concat_characters : bool, optional\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool, optional\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue).\n decode_times : bool, optional\n Decode cf times (e.g., integers since 'hours since 2000-01-01') to\n np.datetime64.\n decode_coords : bool, optional\n Use the 'coordinates' attribute on variable (or the dataset itself) to\n identify coordinates.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n decoded : Dataset\n ","endLoc":592,"header":"def decode_cf(\n obj,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_coords=True,\n drop_variables=None,\n use_cftime=None,\n)","id":494,"name":"decode_cf","nodeType":"Function","startLoc":513,"text":"def decode_cf(\n obj,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_coords=True,\n drop_variables=None,\n use_cftime=None,\n):\n \"\"\"Decode the given Dataset or Datastore according to CF conventions into\n a new Dataset.\n\n Parameters\n ----------\n obj : Dataset or DataStore\n Object to decode.\n concat_characters : bool, optional\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool, optional\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue).\n decode_times : bool, optional\n Decode cf times (e.g., integers since 'hours since 2000-01-01') to\n np.datetime64.\n decode_coords : bool, optional\n Use the 'coordinates' attribute on variable (or the dataset itself) to\n identify coordinates.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n decoded : Dataset\n \"\"\"\n from .core.dataset import Dataset\n from .backends.common import AbstractDataStore\n\n if isinstance(obj, Dataset):\n vars = obj._variables\n attrs = obj.attrs\n extra_coords = set(obj.coords)\n file_obj = obj._file_obj\n encoding = obj.encoding\n elif isinstance(obj, AbstractDataStore):\n vars, attrs = obj.load()\n extra_coords = set()\n file_obj = obj\n encoding = obj.get_encoding()\n else:\n raise TypeError(\"can only decode Dataset or DataStore objects\")\n\n vars, attrs, coord_names = decode_cf_variables(\n vars,\n attrs,\n concat_characters,\n mask_and_scale,\n decode_times,\n decode_coords,\n drop_variables=drop_variables,\n use_cftime=use_cftime,\n )\n ds = Dataset(vars, attrs=attrs)\n ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))\n ds._file_obj = file_obj\n ds.encoding = encoding\n\n return ds"},{"col":0,"comment":"\n Decode several CF encoded variables.\n\n See: decode_cf_variable\n ","endLoc":510,"header":"def decode_cf_variables(\n variables,\n attributes,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_coords=True,\n drop_variables=None,\n use_cftime=None,\n)","id":495,"name":"decode_cf_variables","nodeType":"Function","startLoc":436,"text":"def decode_cf_variables(\n variables,\n attributes,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_coords=True,\n drop_variables=None,\n use_cftime=None,\n):\n \"\"\"\n Decode several CF encoded variables.\n\n See: decode_cf_variable\n \"\"\"\n dimensions_used_by = defaultdict(list)\n for v in variables.values():\n for d in v.dims:\n dimensions_used_by[d].append(v)\n\n def stackable(dim):\n # figure out if a dimension can be concatenated over\n if dim in variables:\n return False\n for v in dimensions_used_by[dim]:\n if v.dtype.kind != \"S\" or dim != v.dims[-1]:\n return False\n return True\n\n coord_names = set()\n\n if isinstance(drop_variables, str):\n drop_variables = [drop_variables]\n elif drop_variables is None:\n drop_variables = []\n drop_variables = set(drop_variables)\n\n # Time bounds coordinates might miss the decoding attributes\n if decode_times:\n _update_bounds_attributes(variables)\n\n new_vars = {}\n for k, v in variables.items():\n if k in drop_variables:\n continue\n stack_char_dim = (\n concat_characters\n and v.dtype == \"S1\"\n and v.ndim > 0\n and stackable(v.dims[-1])\n )\n new_vars[k] = decode_cf_variable(\n k,\n v,\n concat_characters=concat_characters,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n stack_char_dim=stack_char_dim,\n use_cftime=use_cftime,\n )\n if decode_coords:\n var_attrs = new_vars[k].attrs\n if \"coordinates\" in var_attrs:\n coord_str = var_attrs[\"coordinates\"]\n var_coord_names = coord_str.split()\n if all(k in variables for k in var_coord_names):\n new_vars[k].encoding[\"coordinates\"] = coord_str\n del var_attrs[\"coordinates\"]\n coord_names.update(var_coord_names)\n\n if decode_coords and \"coordinates\" in attributes:\n attributes = dict(attributes)\n coord_names.update(attributes.pop(\"coordinates\").split())\n\n return new_vars, attributes, coord_names"},{"attributeType":"null","col":0,"comment":"null","endLoc":13,"id":496,"name":"OPTIONS","nodeType":"Attribute","startLoc":13,"text":"OPTIONS"},{"col":4,"comment":"null","endLoc":59,"header":"def _update_coords(self, coords, indexes)","id":497,"name":"_update_coords","nodeType":"Function","startLoc":58,"text":"def _update_coords(self, coords, indexes):\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":44,"header":"@property\n def _names(self) -> Set[Hashable]","id":498,"name":"_names","nodeType":"Function","startLoc":42,"text":"@property\n def _names(self) -> Set[Hashable]:\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":48,"header":"@property\n def dims(self) -> Union[Mapping[Hashable, int], Tuple[Hashable, ...]]","id":499,"name":"dims","nodeType":"Function","startLoc":46,"text":"@property\n def dims(self) -> Union[Mapping[Hashable, int], Tuple[Hashable, ...]]:\n raise NotImplementedError()"},{"col":0,"comment":"Used in Dataset.__init__.","endLoc":457,"header":"def merge_data_and_coords(data, coords, compat=\"broadcast_equals\", join=\"outer\")","id":500,"name":"merge_data_and_coords","nodeType":"Function","startLoc":450,"text":"def merge_data_and_coords(data, coords, compat=\"broadcast_equals\", join=\"outer\"):\n \"\"\"Used in Dataset.__init__.\"\"\"\n objects = [data, coords]\n explicit_coords = coords.keys()\n indexes = dict(_extract_indexes_from_coords(coords))\n return merge_core(\n objects, compat, join, explicit_coords=explicit_coords, indexes=indexes\n )"},{"col":4,"comment":"null","endLoc":52,"header":"@property\n def indexes(self) -> Indexes","id":501,"name":"indexes","nodeType":"Function","startLoc":50,"text":"@property\n def indexes(self) -> Indexes:\n return self._data.indexes # type: ignore"},{"col":4,"comment":"null","endLoc":56,"header":"@property\n def variables(self)","id":502,"name":"variables","nodeType":"Function","startLoc":54,"text":"@property\n def variables(self):\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":65,"header":"def __iter__(self) -> Iterator[\"Hashable\"]","id":503,"name":"__iter__","nodeType":"Function","startLoc":61,"text":"def __iter__(self) -> Iterator[\"Hashable\"]:\n # needs to be in the same order as the dataset variables\n for k in self.variables:\n if k in self._names:\n yield k"},{"col":4,"comment":"null","endLoc":68,"header":"def __len__(self) -> int","id":504,"name":"__len__","nodeType":"Function","startLoc":67,"text":"def __len__(self) -> int:\n return len(self._names)"},{"col":0,"comment":"Yields the name & index of valid indexes from a mapping of coords","endLoc":465,"header":"def _extract_indexes_from_coords(coords)","id":505,"name":"_extract_indexes_from_coords","nodeType":"Function","startLoc":460,"text":"def _extract_indexes_from_coords(coords):\n \"\"\"Yields the name & index of valid indexes from a mapping of coords\"\"\"\n for name, variable in coords.items():\n variable = as_variable(variable, name=name)\n if variable.dims == (name,):\n yield name, variable.to_index()"},{"col":4,"comment":"null","endLoc":71,"header":"def __contains__(self, key: Hashable) -> bool","id":506,"name":"__contains__","nodeType":"Function","startLoc":70,"text":"def __contains__(self, key: Hashable) -> bool:\n return key in self._names"},{"col":4,"comment":"null","endLoc":74,"header":"def __repr__(self) -> str","id":507,"name":"__repr__","nodeType":"Function","startLoc":73,"text":"def __repr__(self) -> str:\n return formatting.coords_repr(self)"},{"attributeType":"null","col":4,"comment":"null","endLoc":17,"id":508,"name":"sparse_array_type","nodeType":"Attribute","startLoc":17,"text":"sparse_array_type"},{"col":0,"comment":"Adds time attributes to time bounds variables.\n\n Variables handling time bounds (\"Cell boundaries\" in the CF\n conventions) do not necessarily carry the necessary attributes to be\n decoded. This copies the attributes from the time variable to the\n associated boundaries.\n\n See Also:\n\n http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/\n cf-conventions.html#cell-boundaries\n\n https://github.com/pydata/xarray/issues/2565\n ","endLoc":383,"header":"def _update_bounds_attributes(variables)","id":509,"name":"_update_bounds_attributes","nodeType":"Function","startLoc":358,"text":"def _update_bounds_attributes(variables):\n \"\"\"Adds time attributes to time bounds variables.\n\n Variables handling time bounds (\"Cell boundaries\" in the CF\n conventions) do not necessarily carry the necessary attributes to be\n decoded. This copies the attributes from the time variable to the\n associated boundaries.\n\n See Also:\n\n http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/\n cf-conventions.html#cell-boundaries\n\n https://github.com/pydata/xarray/issues/2565\n \"\"\"\n\n # For all time variables with bounds\n for v in variables.values():\n attrs = v.attrs\n has_date_units = \"units\" in attrs and \"since\" in attrs[\"units\"]\n if has_date_units and \"bounds\" in attrs:\n if attrs[\"bounds\"] in variables:\n bounds_attrs = variables[attrs[\"bounds\"]].attrs\n bounds_attrs.setdefault(\"units\", attrs[\"units\"])\n if \"calendar\" in attrs:\n bounds_attrs.setdefault(\"calendar\", attrs[\"calendar\"])"},{"col":0,"comment":"null","endLoc":377,"header":"def coords_repr(coords, col_width=None)","id":510,"name":"coords_repr","nodeType":"Function","startLoc":372,"text":"def coords_repr(coords, col_width=None):\n if col_width is None:\n col_width = _calculate_col_width(_get_col_items(coords))\n return _mapping_repr(\n coords, title=\"Coordinates\", summarizer=summarize_coord, col_width=col_width\n )"},{"col":0,"comment":"Given an object `x`, call `str(x)` and format the returned string so\n that it is numchars long, padding with trailing spaces or truncating with\n ellipses as necessary\n ","endLoc":23,"header":"def pretty_print(x, numchars)","id":511,"name":"pretty_print","nodeType":"Function","startLoc":17,"text":"def pretty_print(x, numchars):\n \"\"\"Given an object `x`, call `str(x)` and format the returned string so\n that it is numchars long, padding with trailing spaces or truncating with\n ellipses as necessary\n \"\"\"\n s = maybe_truncate(x, numchars)\n return s + \" \" * max(numchars - len(s), 0)"},{"col":0,"comment":"Core logic for merging labeled objects.\n\n This is not public API.\n\n Parameters\n ----------\n objects : list of mappings\n All values must be convertable to labeled arrays.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n Compatibility checks to use when merging variables.\n join : {'outer', 'inner', 'left', 'right'}, optional\n How to combine objects with different indexes.\n priority_arg : integer, optional\n Optional argument in `objects` that takes precedence over the others.\n explicit_coords : set, optional\n An explicit list of variables from `objects` that are coordinates.\n indexes : dict, optional\n Dictionary with values given by pandas.Index objects.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n variables : dict\n Dictionary of Variable objects.\n coord_names : set\n Set of coordinate names.\n dims : dict\n Dictionary mapping from dimension names to sizes.\n\n Raises\n ------\n MergeError if the merge cannot be done successfully.\n ","endLoc":563,"header":"def merge_core(\n objects: Iterable[\"CoercibleMapping\"],\n compat: str = \"broadcast_equals\",\n join: str = \"outer\",\n priority_arg: Optional[int] = None,\n explicit_coords: Optional[Sequence] = None,\n indexes: Optional[Mapping[Hashable, pd.Index]] = None,\n fill_value: object = dtypes.NA,\n) -> _MergeResult","id":512,"name":"merge_core","nodeType":"Function","startLoc":490,"text":"def merge_core(\n objects: Iterable[\"CoercibleMapping\"],\n compat: str = \"broadcast_equals\",\n join: str = \"outer\",\n priority_arg: Optional[int] = None,\n explicit_coords: Optional[Sequence] = None,\n indexes: Optional[Mapping[Hashable, pd.Index]] = None,\n fill_value: object = dtypes.NA,\n) -> _MergeResult:\n \"\"\"Core logic for merging labeled objects.\n\n This is not public API.\n\n Parameters\n ----------\n objects : list of mappings\n All values must be convertable to labeled arrays.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n Compatibility checks to use when merging variables.\n join : {'outer', 'inner', 'left', 'right'}, optional\n How to combine objects with different indexes.\n priority_arg : integer, optional\n Optional argument in `objects` that takes precedence over the others.\n explicit_coords : set, optional\n An explicit list of variables from `objects` that are coordinates.\n indexes : dict, optional\n Dictionary with values given by pandas.Index objects.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n variables : dict\n Dictionary of Variable objects.\n coord_names : set\n Set of coordinate names.\n dims : dict\n Dictionary mapping from dimension names to sizes.\n\n Raises\n ------\n MergeError if the merge cannot be done successfully.\n \"\"\"\n from .dataset import calculate_dimensions\n\n _assert_compat_valid(compat)\n\n coerced = coerce_pandas_values(objects)\n aligned = deep_align(\n coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value\n )\n collected = collect_variables_and_indexes(aligned)\n\n prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)\n variables, out_indexes = merge_collected(collected, prioritized, compat=compat)\n assert_unique_multiindex_level_names(variables)\n\n dims = calculate_dimensions(variables)\n\n coord_names, noncoord_names = determine_coords(coerced)\n if explicit_coords is not None:\n assert_valid_explicit_coords(variables, dims, explicit_coords)\n coord_names.update(explicit_coords)\n for dim, size in dims.items():\n if dim in variables:\n coord_names.add(dim)\n ambiguous_coords = coord_names.intersection(noncoord_names)\n if ambiguous_coords:\n raise MergeError(\n \"unable to determine if these variables should be \"\n \"coordinates or not in the merged result: %s\" % ambiguous_coords\n )\n\n return _MergeResult(variables, coord_names, dims, out_indexes)"},{"col":0,"comment":"\n Decodes a variable which may hold CF encoded information.\n\n This includes variables that have been masked and scaled, which\n hold CF style time variables (this is almost always the case if\n the dataset has been serialized) and which have strings encoded\n as character arrays.\n\n Parameters\n ----------\n name: str\n Name of the variable. Used for better error messages.\n var : Variable\n A variable holding potentially CF encoded information.\n concat_characters : bool\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue). If the _Unsigned attribute is present\n treat integer arrays as unsigned.\n decode_times : bool\n Decode cf times ('hours since 2000-01-01') to np.datetime64.\n decode_endianness : bool\n Decode arrays from non-native to native endianness.\n stack_char_dim : bool\n Whether to stack characters into bytes along the last dimension of this\n array. Passed as an argument because we need to look at the full\n dataset to figure out if this is appropriate.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n out : Variable\n A variable holding the decoded equivalent of var.\n ","endLoc":355,"header":"def decode_cf_variable(\n name,\n var,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_endianness=True,\n stack_char_dim=True,\n use_cftime=None,\n)","id":514,"name":"decode_cf_variable","nodeType":"Function","startLoc":260,"text":"def decode_cf_variable(\n name,\n var,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_endianness=True,\n stack_char_dim=True,\n use_cftime=None,\n):\n \"\"\"\n Decodes a variable which may hold CF encoded information.\n\n This includes variables that have been masked and scaled, which\n hold CF style time variables (this is almost always the case if\n the dataset has been serialized) and which have strings encoded\n as character arrays.\n\n Parameters\n ----------\n name: str\n Name of the variable. Used for better error messages.\n var : Variable\n A variable holding potentially CF encoded information.\n concat_characters : bool\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue). If the _Unsigned attribute is present\n treat integer arrays as unsigned.\n decode_times : bool\n Decode cf times ('hours since 2000-01-01') to np.datetime64.\n decode_endianness : bool\n Decode arrays from non-native to native endianness.\n stack_char_dim : bool\n Whether to stack characters into bytes along the last dimension of this\n array. Passed as an argument because we need to look at the full\n dataset to figure out if this is appropriate.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n out : Variable\n A variable holding the decoded equivalent of var.\n \"\"\"\n var = as_variable(var)\n original_dtype = var.dtype\n\n if concat_characters:\n if stack_char_dim:\n var = strings.CharacterArrayCoder().decode(var, name=name)\n var = strings.EncodedStringCoder().decode(var)\n\n if mask_and_scale:\n for coder in [\n variables.UnsignedIntegerCoder(),\n variables.CFMaskCoder(),\n variables.CFScaleOffsetCoder(),\n ]:\n var = coder.decode(var, name=name)\n\n if decode_times:\n for coder in [\n times.CFTimedeltaCoder(),\n times.CFDatetimeCoder(use_cftime=use_cftime),\n ]:\n var = coder.decode(var, name=name)\n\n dimensions, data, attributes, encoding = variables.unpack_for_decoding(var)\n # TODO(shoyer): convert everything below to use coders\n\n if decode_endianness and not data.dtype.isnative:\n # do this last, so it's only done if we didn't already unmask/scale\n data = NativeEndiannessArray(data)\n original_dtype = data.dtype\n\n encoding.setdefault(\"dtype\", original_dtype)\n\n if \"dtype\" in attributes and attributes[\"dtype\"] == \"bool\":\n del attributes[\"dtype\"]\n data = BoolTypeArray(data)\n\n if not isinstance(data, dask_array_type):\n data = indexing.LazilyOuterIndexedArray(data)\n\n return Variable(dimensions, data, attributes, encoding=encoding)"},{"col":0,"comment":"Get all column items to format, including both keys of `mapping`\n and MultiIndex levels if any.\n ","endLoc":342,"header":"def _get_col_items(mapping)","id":515,"name":"_get_col_items","nodeType":"Function","startLoc":328,"text":"def _get_col_items(mapping):\n \"\"\"Get all column items to format, including both keys of `mapping`\n and MultiIndex levels if any.\n \"\"\"\n from .variable import IndexVariable\n\n col_items = []\n for k, v in mapping.items():\n col_items.append(k)\n var = getattr(v, \"variable\", v)\n if isinstance(var, IndexVariable):\n level_names = var.to_index_variable().level_names\n if level_names is not None:\n col_items += list(level_names)\n return col_items"},{"col":0,"comment":"null","endLoc":30,"header":"def maybe_truncate(obj, maxlen=500)","id":516,"name":"maybe_truncate","nodeType":"Function","startLoc":26,"text":"def maybe_truncate(obj, maxlen=500):\n s = str(obj)\n if len(s) > maxlen:\n s = s[: (maxlen - 3)] + \"...\"\n return s"},{"col":0,"comment":"null","endLoc":37,"header":"def wrap_indent(text, start=\"\", length=None)","id":517,"name":"wrap_indent","nodeType":"Function","startLoc":33,"text":"def wrap_indent(text, start=\"\", length=None):\n if length is None:\n length = len(start)\n indent = \"\\n\" + \" \" * length\n return start + indent.join(x for x in text.splitlines())"},{"col":4,"comment":"null","endLoc":68,"header":"def __init__(self, input_core_dims, output_core_dims=((),))","id":518,"name":"__init__","nodeType":"Function","startLoc":63,"text":"def __init__(self, input_core_dims, output_core_dims=((),)):\n self.input_core_dims = tuple(tuple(a) for a in input_core_dims)\n self.output_core_dims = tuple(tuple(a) for a in output_core_dims)\n self._all_input_core_dims = None\n self._all_output_core_dims = None\n self._all_core_dims = None"},{"col":4,"comment":"null","endLoc":125,"header":"def decode(self, variable, name=None)","id":519,"name":"decode","nodeType":"Function","startLoc":118,"text":"def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if data.dtype == \"S1\" and dims:\n encoding[\"char_dim_name\"] = dims[-1]\n dims = dims[:-1]\n data = char_to_bytes(data)\n return Variable(dims, data, attrs, encoding)"},{"col":0,"comment":"null","endLoc":348,"header":"def _calculate_col_width(col_items)","id":520,"name":"_calculate_col_width","nodeType":"Function","startLoc":345,"text":"def _calculate_col_width(col_items):\n max_name_length = max(len(str(s)) for s in col_items) if col_items else 0\n col_width = max(max_name_length, 7) + 6\n return col_width"},{"col":0,"comment":"null","endLoc":111,"header":"def unpack_for_decoding(var)","id":521,"name":"unpack_for_decoding","nodeType":"Function","startLoc":110,"text":"def unpack_for_decoding(var):\n return var.dims, var._data, var.attrs.copy(), var.encoding.copy()"},{"col":0,"comment":"null","endLoc":359,"header":"def _mapping_repr(mapping, title, summarizer, col_width=None)","id":522,"name":"_mapping_repr","nodeType":"Function","startLoc":351,"text":"def _mapping_repr(mapping, title, summarizer, col_width=None):\n if col_width is None:\n col_width = _calculate_col_width(mapping)\n summary = [\"{}:\".format(title)]\n if mapping:\n summary += [summarizer(k, v, col_width) for k, v in mapping.items()]\n else:\n summary += [EMPTY_REPR]\n return \"\\n\".join(summary)"},{"col":0,"comment":"Calculate the dimensions corresponding to a set of variables.\n\n Returns dictionary mapping from dimension names to sizes. Raises ValueError\n if any of the dimension sizes conflict.\n ","endLoc":188,"header":"def calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashable, int]","id":523,"name":"calculate_dimensions","nodeType":"Function","startLoc":164,"text":"def calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashable, int]:\n \"\"\"Calculate the dimensions corresponding to a set of variables.\n\n Returns dictionary mapping from dimension names to sizes. Raises ValueError\n if any of the dimension sizes conflict.\n \"\"\"\n dims: Dict[Hashable, int] = {}\n last_used = {}\n scalar_vars = {k for k, v in variables.items() if not v.dims}\n for k, var in variables.items():\n for dim, size in zip(var.dims, var.shape):\n if dim in scalar_vars:\n raise ValueError(\n \"dimension %r already exists as a scalar \" \"variable\" % dim\n )\n if dim not in dims:\n dims[dim] = size\n last_used[dim] = k\n elif dims[dim] != size:\n raise ValueError(\n \"conflicting sizes for dimension %r: \"\n \"length %s on %r and length %s on %r\"\n % (dim, size, k, dims[dim], last_used[dim])\n )\n return dims"},{"col":4,"comment":"null","endLoc":77,"header":"def to_dataset(self) -> \"Dataset\"","id":524,"name":"to_dataset","nodeType":"Function","startLoc":76,"text":"def to_dataset(self) -> \"Dataset\":\n raise NotImplementedError()"},{"col":4,"comment":"Convert all index coordinates into a :py:class:`pandas.Index`.\n\n Parameters\n ----------\n ordered_dims : sequence of hashable, optional\n Possibly reordered version of this object's dimensions indicating\n the order in which dimensions should appear on the result.\n\n Returns\n -------\n pandas.Index\n Index subclass corresponding to the outer-product of all dimension\n coordinates. This will be a MultiIndex if this object is has more\n than more dimension.\n ","endLoc":111,"header":"def to_index(self, ordered_dims: Sequence[Hashable] = None) -> pd.Index","id":525,"name":"to_index","nodeType":"Function","startLoc":79,"text":"def to_index(self, ordered_dims: Sequence[Hashable] = None) -> pd.Index:\n \"\"\"Convert all index coordinates into a :py:class:`pandas.Index`.\n\n Parameters\n ----------\n ordered_dims : sequence of hashable, optional\n Possibly reordered version of this object's dimensions indicating\n the order in which dimensions should appear on the result.\n\n Returns\n -------\n pandas.Index\n Index subclass corresponding to the outer-product of all dimension\n coordinates. This will be a MultiIndex if this object is has more\n than more dimension.\n \"\"\"\n if ordered_dims is None:\n ordered_dims = list(self.dims)\n elif set(ordered_dims) != set(self.dims):\n raise ValueError(\n \"ordered_dims must match dims, but does not: \"\n \"{} vs {}\".format(ordered_dims, self.dims)\n )\n\n if len(ordered_dims) == 0:\n raise ValueError(\"no valid index for a 0-dimensional object\")\n elif len(ordered_dims) == 1:\n (dim,) = ordered_dims\n return self._data.get_index(dim) # type: ignore\n else:\n indexes = [self._data.get_index(k) for k in ordered_dims] # type: ignore\n names = list(ordered_dims)\n return pd.MultiIndex.from_product(indexes, names=names)"},{"col":0,"comment":"Convert numpy/dask arrays from characters to fixed width bytes.","endLoc":188,"header":"def char_to_bytes(arr)","id":526,"name":"char_to_bytes","nodeType":"Function","startLoc":155,"text":"def char_to_bytes(arr):\n \"\"\"Convert numpy/dask arrays from characters to fixed width bytes.\"\"\"\n if arr.dtype != \"S1\":\n raise ValueError(\"argument must have dtype='S1'\")\n\n if not arr.ndim:\n # no dimension to concatenate along\n return arr\n\n size = arr.shape[-1]\n\n if not size:\n # can't make an S0 dtype\n return np.zeros(arr.shape[:-1], dtype=np.string_)\n\n if isinstance(arr, dask_array_type):\n import dask.array as da\n\n if len(arr.chunks[-1]) > 1:\n raise ValueError(\n \"cannot stacked dask character array with \"\n \"multiple chunks in the last dimension: {}\".format(arr)\n )\n\n dtype = np.dtype(\"S\" + str(arr.shape[-1]))\n return da.map_blocks(\n _numpy_char_to_bytes,\n arr,\n dtype=dtype,\n chunks=arr.chunks[:-1],\n drop_axis=[arr.ndim - 1],\n )\n else:\n return StackedBytesArray(arr)"},{"col":4,"comment":"For use with binary arithmetic.","endLoc":127,"header":"def _merge_raw(self, other)","id":527,"name":"_merge_raw","nodeType":"Function","startLoc":120,"text":"def _merge_raw(self, other):\n \"\"\"For use with binary arithmetic.\"\"\"\n if other is None:\n variables = dict(self.variables)\n indexes = dict(self.indexes)\n else:\n variables, indexes = merge_coordinates_without_align([self, other])\n return variables, indexes"},{"col":0,"comment":"Given a list of dicts with xarray object values, identify coordinates.\n\n Parameters\n ----------\n list_of_mappings : list of dict or Dataset objects\n Of the same form as the arguments to expand_variable_dicts.\n\n Returns\n -------\n coord_names : set of variable names\n noncoord_names : set of variable names\n All variable found in the input should appear in either the set of\n coordinate or non-coordinate names.\n ","endLoc":355,"header":"def determine_coords(\n list_of_mappings: Iterable[\"DatasetLike\"]\n) -> Tuple[Set[Hashable], Set[Hashable]]","id":528,"name":"determine_coords","nodeType":"Function","startLoc":320,"text":"def determine_coords(\n list_of_mappings: Iterable[\"DatasetLike\"]\n) -> Tuple[Set[Hashable], Set[Hashable]]:\n \"\"\"Given a list of dicts with xarray object values, identify coordinates.\n\n Parameters\n ----------\n list_of_mappings : list of dict or Dataset objects\n Of the same form as the arguments to expand_variable_dicts.\n\n Returns\n -------\n coord_names : set of variable names\n noncoord_names : set of variable names\n All variable found in the input should appear in either the set of\n coordinate or non-coordinate names.\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n coord_names: Set[Hashable] = set()\n noncoord_names: Set[Hashable] = set()\n\n for mapping in list_of_mappings:\n if isinstance(mapping, Dataset):\n coord_names.update(mapping.coords)\n noncoord_names.update(mapping.data_vars)\n else:\n for name, var in mapping.items():\n if isinstance(var, DataArray):\n coords = set(var._coords) # use private API for speed\n # explicitly overwritten variables should take precedence\n coords.discard(name)\n coord_names.update(coords)\n\n return coord_names, noncoord_names"},{"col":0,"comment":"Merge variables/indexes from coordinates without automatic alignments.\n\n This function is used for merging coordinate from pre-existing xarray\n objects.\n ","endLoc":317,"header":"def merge_coordinates_without_align(\n objects: \"List[Coordinates]\",\n prioritized: Mapping[Hashable, MergeElement] = None,\n exclude_dims: AbstractSet = frozenset(),\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]","id":529,"name":"merge_coordinates_without_align","nodeType":"Function","startLoc":292,"text":"def merge_coordinates_without_align(\n objects: \"List[Coordinates]\",\n prioritized: Mapping[Hashable, MergeElement] = None,\n exclude_dims: AbstractSet = frozenset(),\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Merge variables/indexes from coordinates without automatic alignments.\n\n This function is used for merging coordinate from pre-existing xarray\n objects.\n \"\"\"\n collected = collect_from_coordinates(objects)\n\n if exclude_dims:\n filtered: Dict[Hashable, List[MergeElement]] = {}\n for name, elements in collected.items():\n new_elements = [\n (variable, index)\n for variable, index in elements\n if exclude_dims.isdisjoint(variable.dims)\n ]\n if new_elements:\n filtered[name] = new_elements\n else:\n filtered = collected\n\n return merge_collected(filtered, prioritized)"},{"col":4,"comment":"\n Parameters\n ----------\n array : array-like\n Original array of values to wrap.\n ","endLoc":220,"header":"def __init__(self, array)","id":530,"name":"__init__","nodeType":"Function","startLoc":209,"text":"def __init__(self, array):\n \"\"\"\n Parameters\n ----------\n array : array-like\n Original array of values to wrap.\n \"\"\"\n if array.dtype != \"S1\":\n raise ValueError(\n \"can only use StackedBytesArray if argument has dtype='S1'\"\n )\n self.array = indexing.as_indexable(array)"},{"col":0,"comment":"Apply a dataset or datarray level function over GroupBy, Dataset,\n DataArray, Variable and/or ndarray objects.\n ","endLoc":458,"header":"def apply_groupby_func(func, *args)","id":531,"name":"apply_groupby_func","nodeType":"Function","startLoc":415,"text":"def apply_groupby_func(func, *args):\n \"\"\"Apply a dataset or datarray level function over GroupBy, Dataset,\n DataArray, Variable and/or ndarray objects.\n \"\"\"\n from .groupby import GroupBy, peek_at\n from .variable import Variable\n\n groupbys = [arg for arg in args if isinstance(arg, GroupBy)]\n assert groupbys, \"must have at least one groupby to iterate over\"\n first_groupby = groupbys[0]\n if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):\n raise ValueError(\n \"apply_ufunc can only perform operations over \"\n \"multiple GroupBy objets at once if they are all \"\n \"grouped the same way\"\n )\n\n grouped_dim = first_groupby._group.name\n unique_values = first_groupby._unique_coord.values\n\n iterators = []\n for arg in args:\n if isinstance(arg, GroupBy):\n iterator = (value for _, value in arg)\n elif hasattr(arg, \"dims\") and grouped_dim in arg.dims:\n if isinstance(arg, Variable):\n raise ValueError(\n \"groupby operations cannot be performed with \"\n \"xarray.Variable objects that share a dimension with \"\n \"the grouped dimension\"\n )\n iterator = _iter_over_selections(arg, grouped_dim, unique_values)\n else:\n iterator = itertools.repeat(arg)\n iterators.append(iterator)\n\n applied = (func(*zipped_args) for zipped_args in zip(*iterators))\n applied_example, applied = peek_at(applied)\n combine = first_groupby._combine\n if isinstance(applied_example, tuple):\n combined = tuple(combine(output) for output in zip(*applied))\n else:\n combined = combine(applied)\n return combined"},{"col":0,"comment":"Validate explicit coordinate names/dims.\n\n Raise a MergeError if an explicit coord shares a name with a dimension\n but is comprised of arbitrary dimensions.\n ","endLoc":480,"header":"def assert_valid_explicit_coords(variables, dims, explicit_coords)","id":532,"name":"assert_valid_explicit_coords","nodeType":"Function","startLoc":468,"text":"def assert_valid_explicit_coords(variables, dims, explicit_coords):\n \"\"\"Validate explicit coordinate names/dims.\n\n Raise a MergeError if an explicit coord shares a name with a dimension\n but is comprised of arbitrary dimensions.\n \"\"\"\n for coord_name in explicit_coords:\n if coord_name in dims and variables[coord_name].dims != (coord_name,):\n raise MergeError(\n \"coordinate %s shares a name with a dataset dimension, but is \"\n \"not a 1D variable along that dimension. This is disallowed \"\n \"by the xarray data model.\" % coord_name\n )"},{"col":0,"comment":"Iterate over selections of an xarray object in the provided order.","endLoc":412,"header":"def _iter_over_selections(obj, dim, values)","id":533,"name":"_iter_over_selections","nodeType":"Function","startLoc":400,"text":"def _iter_over_selections(obj, dim, values):\n \"\"\"Iterate over selections of an xarray object in the provided order.\"\"\"\n from .groupby import _dummy_copy\n\n dummy = None\n for value in values:\n try:\n obj_sel = obj.sel(**{dim: value})\n except (KeyError, IndexError):\n if dummy is None:\n dummy = _dummy_copy(obj)\n obj_sel = dummy\n yield obj_sel"},{"col":4,"comment":"Given names of one or more variables, set them as coordinates\n\n Parameters\n ----------\n names : hashable or iterable of hashables\n Name(s) of variables in this dataset to convert into coordinates.\n\n Returns\n -------\n Dataset\n\n See also\n --------\n Dataset.swap_dims\n ","endLoc":1390,"header":"def set_coords(\n self, names: \"Union[Hashable, Iterable[Hashable]]\", inplace: bool = None\n ) -> \"Dataset\"","id":534,"name":"set_coords","nodeType":"Function","startLoc":1360,"text":"def set_coords(\n self, names: \"Union[Hashable, Iterable[Hashable]]\", inplace: bool = None\n ) -> \"Dataset\":\n \"\"\"Given names of one or more variables, set them as coordinates\n\n Parameters\n ----------\n names : hashable or iterable of hashables\n Name(s) of variables in this dataset to convert into coordinates.\n\n Returns\n -------\n Dataset\n\n See also\n --------\n Dataset.swap_dims\n \"\"\"\n # TODO: allow inserting new coordinates with this method, like\n # DataFrame.set_index?\n # nb. check in self._variables, not self.data_vars to insure that the\n # operation is idempotent\n _check_inplace(inplace)\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n obj = self.copy()\n obj._coord_names.update(names)\n return obj"},{"col":0,"comment":"null","endLoc":83,"header":"def _dummy_copy(xarray_obj)","id":535,"name":"_dummy_copy","nodeType":"Function","startLoc":52,"text":"def _dummy_copy(xarray_obj):\n from .dataset import Dataset\n from .dataarray import DataArray\n\n if isinstance(xarray_obj, Dataset):\n res = Dataset(\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.data_vars.items()\n },\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.coords.items()\n if k not in xarray_obj.dims\n },\n xarray_obj.attrs,\n )\n elif isinstance(xarray_obj, DataArray):\n res = DataArray(\n dtypes.get_fill_value(xarray_obj.dtype),\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.coords.items()\n if k not in xarray_obj.dims\n },\n dims=[],\n name=xarray_obj.name,\n attrs=xarray_obj.attrs,\n )\n else: # pragma: no cover\n raise AssertionError\n return res"},{"col":4,"comment":"null","endLoc":43,"header":"def __init__(self, allows_unicode=True)","id":536,"name":"__init__","nodeType":"Function","startLoc":42,"text":"def __init__(self, allows_unicode=True):\n self.allows_unicode = allows_unicode"},{"col":0,"comment":"null","endLoc":41,"header":"def _check_inplace(inplace: Optional[bool]) -> None","id":537,"name":"_check_inplace","nodeType":"Function","startLoc":36,"text":"def _check_inplace(inplace: Optional[bool]) -> None:\n if inplace is not None:\n raise TypeError(\n \"The `inplace` argument has been removed from xarray. \"\n \"You can achieve an identical effect with python's standard assignment.\"\n )"},{"col":0,"comment":"Collect variables and indexes to be merged from Coordinate objects.","endLoc":289,"header":"def collect_from_coordinates(\n list_of_coords: \"List[Coordinates]\"\n) -> Dict[Hashable, List[MergeElement]]","id":538,"name":"collect_from_coordinates","nodeType":"Function","startLoc":277,"text":"def collect_from_coordinates(\n list_of_coords: \"List[Coordinates]\"\n) -> Dict[Hashable, List[MergeElement]]:\n \"\"\"Collect variables and indexes to be merged from Coordinate objects.\"\"\"\n grouped: Dict[Hashable, List[Tuple[Variable, pd.Index]]] = {}\n\n for coords in list_of_coords:\n variables = coords.variables\n indexes = coords.indexes\n for name, variable in variables.items():\n value = grouped.setdefault(name, [])\n value.append((variable, indexes.get(name)))\n return grouped"},{"col":4,"comment":"null","endLoc":77,"header":"def decode(self, variable, name=None)","id":539,"name":"decode","nodeType":"Function","startLoc":69,"text":"def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"_Encoding\" in attrs:\n string_encoding = pop_to(attrs, encoding, \"_Encoding\")\n func = partial(decode_bytes_array, encoding=string_encoding)\n data = lazy_elemwise_func(data, func, np.dtype(object))\n\n return Variable(dims, data, attrs, encoding)"},{"col":0,"comment":"\n A convenience function which pops a key k from source to dest.\n None values are not passed on. If k already exists in dest an\n error is raised.\n ","endLoc":135,"header":"def pop_to(source, dest, key, name=None)","id":540,"name":"pop_to","nodeType":"Function","startLoc":126,"text":"def pop_to(source, dest, key, name=None):\n \"\"\"\n A convenience function which pops a key k from source to dest.\n None values are not passed on. If k already exists in dest an\n error is raised.\n \"\"\"\n value = source.pop(key, None)\n if value is not None:\n safe_setitem(dest, key, value, name=name)\n return value"},{"col":0,"comment":"null","endLoc":123,"header":"def safe_setitem(dest, key, value, name=None)","id":541,"name":"safe_setitem","nodeType":"Function","startLoc":114,"text":"def safe_setitem(dest, key, value, name=None):\n if key in dest:\n var_str = \" on variable {!r}\".format(name) if name else \"\"\n raise ValueError(\n \"failed to prevent overwriting existing key {} in attrs{}. \"\n \"This is probably an encoding field used by xarray to describe \"\n \"how a variable is serialized. To proceed, remove this key from \"\n \"the variable's attributes manually.\".format(key, var_str)\n )\n dest[key] = value"},{"col":4,"comment":"null","endLoc":3518,"header":"def _assert_all_in_dataset(\n self, names: Iterable[Hashable], virtual_okay: bool = False\n ) -> None","id":542,"name":"_assert_all_in_dataset","nodeType":"Function","startLoc":3508,"text":"def _assert_all_in_dataset(\n self, names: Iterable[Hashable], virtual_okay: bool = False\n ) -> None:\n bad_names = set(names) - set(self._variables)\n if virtual_okay:\n bad_names -= self.virtual_variables\n if bad_names:\n raise ValueError(\n \"One or more of the specified variables \"\n \"cannot be found in this dataset\"\n )"},{"col":0,"comment":"Lazily apply an element-wise function to an array.\n\n Parameters\n ----------\n array : any valid value of Variable._data\n func : callable\n Function to apply to indexed slices of an array. For use with dask,\n this should be a pickle-able object.\n dtype : coercible to np.dtype\n Dtype for the result of this function.\n\n Returns\n -------\n Either a dask.array.Array or _ElementwiseFunctionArray.\n ","endLoc":103,"header":"def lazy_elemwise_func(array, func, dtype)","id":543,"name":"lazy_elemwise_func","nodeType":"Function","startLoc":84,"text":"def lazy_elemwise_func(array, func, dtype):\n \"\"\"Lazily apply an element-wise function to an array.\n\n Parameters\n ----------\n array : any valid value of Variable._data\n func : callable\n Function to apply to indexed slices of an array. For use with dask,\n this should be a pickle-able object.\n dtype : coercible to np.dtype\n Dtype for the result of this function.\n\n Returns\n -------\n Either a dask.array.Array or _ElementwiseFunctionArray.\n \"\"\"\n if isinstance(array, dask_array_type):\n return array.map_blocks(func, dtype=dtype)\n else:\n return _ElementwiseFunctionArray(array, func, dtype)"},{"col":4,"comment":"Returns a copy of this dataset.\n\n If `deep=True`, a deep copy is made of each of the component variables.\n Otherwise, a shallow copy of each of the component variable is made, so\n that the underlying memory region of the new dataset is the same as in\n the original dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether each component variable is loaded into memory and copied onto\n the new object. Default is False.\n data : dict-like, optional\n Data to use in the new object. Each item in `data` must have same\n shape as corresponding data variable in original. When `data` is\n used, `deep` is ignored for the data variables and only used for\n coords.\n\n Returns\n -------\n object : Dataset\n New object with dimensions, attributes, coordinates, name, encoding,\n and optionally data copied from original.\n\n Examples\n --------\n\n Shallow copy versus deep copy\n\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])},\n coords={'x': ['one', 'two']})\n >>> ds.copy()\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds_0 = ds.copy(deep=False)\n >>> ds_0['foo'][0, 0] = 7\n >>> ds_0\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds.copy(data={'foo': np.arange(6).reshape(2, 3), 'bar': ['a', 'b']})\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) \"Dataset\"","id":544,"name":"copy","nodeType":"Function","startLoc":970,"text":"def copy(self, deep: bool = False, data: Mapping = None) -> \"Dataset\":\n \"\"\"Returns a copy of this dataset.\n\n If `deep=True`, a deep copy is made of each of the component variables.\n Otherwise, a shallow copy of each of the component variable is made, so\n that the underlying memory region of the new dataset is the same as in\n the original dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether each component variable is loaded into memory and copied onto\n the new object. Default is False.\n data : dict-like, optional\n Data to use in the new object. Each item in `data` must have same\n shape as corresponding data variable in original. When `data` is\n used, `deep` is ignored for the data variables and only used for\n coords.\n\n Returns\n -------\n object : Dataset\n New object with dimensions, attributes, coordinates, name, encoding,\n and optionally data copied from original.\n\n Examples\n --------\n\n Shallow copy versus deep copy\n\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])},\n coords={'x': ['one', 'two']})\n >>> ds.copy()\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds_0 = ds.copy(deep=False)\n >>> ds_0['foo'][0, 0] = 7\n >>> ds_0\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds.copy(data={'foo': np.arange(6).reshape(2, 3), 'bar': ['a', 'b']})\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) \"Dataset\"","id":547,"name":"merge","nodeType":"Function","startLoc":146,"text":"def merge(self, other: \"Coordinates\") -> \"Dataset\":\n \"\"\"Merge two sets of coordinates to create a new Dataset\n\n The method implements the logic used for joining coordinates in the\n result of a binary operation performed on xarray objects:\n\n - If two index coordinates conflict (are not equal), an exception is\n raised. You must align your data before passing it to this method.\n - If an index coordinate and a non-index coordinate conflict, the non-\n index coordinate is dropped.\n - If two non-index coordinates conflict, both are dropped.\n\n Parameters\n ----------\n other : DatasetCoordinates or DataArrayCoordinates\n The coordinates from another dataset or data array.\n\n Returns\n -------\n merged : Dataset\n A new Dataset with merged coordinates.\n \"\"\"\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n\n if not isinstance(other, Coordinates):\n other = Dataset(coords=other).coords\n\n coords, indexes = merge_coordinates_without_align([self, other])\n coord_names = set(coords)\n merged = Dataset._construct_direct(\n variables=coords, coord_names=coord_names, indexes=indexes\n )\n return merged"},{"id":548,"name":"xarray","nodeType":"Package"},{"fileName":"ufuncs.py","filePath":"xarray","id":549,"nodeType":"File","text":"\"\"\"xarray specific universal functions\n\nHandles unary and binary operations for the following types, in ascending\npriority order:\n- scalars\n- numpy.ndarray\n- dask.array.Array\n- xarray.Variable\n- xarray.DataArray\n- xarray.Dataset\n- xarray.core.groupby.GroupBy\n\nOnce NumPy 1.10 comes out with support for overriding ufuncs, this module will\nhopefully no longer be necessary.\n\"\"\"\nimport warnings as _warnings\n\nimport numpy as _np\n\nfrom .core.dataarray import DataArray as _DataArray\nfrom .core.dataset import Dataset as _Dataset\nfrom .core.duck_array_ops import _dask_or_eager_func\nfrom .core.groupby import GroupBy as _GroupBy\nfrom .core.pycompat import dask_array_type as _dask_array_type\nfrom .core.variable import Variable as _Variable\n\n_xarray_types = (_Variable, _DataArray, _Dataset, _GroupBy)\n_dispatch_order = (_np.ndarray, _dask_array_type) + _xarray_types\n\n\ndef _dispatch_priority(obj):\n for priority, cls in enumerate(_dispatch_order):\n if isinstance(obj, cls):\n return priority\n return -1\n\n\nclass _UFuncDispatcher:\n \"\"\"Wrapper for dispatching ufuncs.\"\"\"\n\n def __init__(self, name):\n self._name = name\n\n def __call__(self, *args, **kwargs):\n if self._name not in [\"angle\", \"iscomplex\"]:\n _warnings.warn(\n \"xarray.ufuncs will be deprecated when xarray no longer \"\n \"supports versions of numpy older than v1.17. Instead, use \"\n \"numpy ufuncs directly.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n\n new_args = args\n f = _dask_or_eager_func(self._name, array_args=slice(len(args)))\n if len(args) > 2 or len(args) == 0:\n raise TypeError(\n \"cannot handle %s arguments for %r\" % (len(args), self._name)\n )\n elif len(args) == 1:\n if isinstance(args[0], _xarray_types):\n f = args[0]._unary_op(self)\n else: # len(args) = 2\n p1, p2 = map(_dispatch_priority, args)\n if p1 >= p2:\n if isinstance(args[0], _xarray_types):\n f = args[0]._binary_op(self)\n else:\n if isinstance(args[1], _xarray_types):\n f = args[1]._binary_op(self, reflexive=True)\n new_args = tuple(reversed(args))\n res = f(*new_args, **kwargs)\n if res is NotImplemented:\n raise TypeError(\n \"%r not implemented for types (%r, %r)\"\n % (self._name, type(args[0]), type(args[1]))\n )\n return res\n\n\ndef _create_op(name):\n func = _UFuncDispatcher(name)\n func.__name__ = name\n doc = getattr(_np, name).__doc__\n func.__doc__ = (\n \"xarray specific variant of numpy.%s. Handles \"\n \"xarray.Dataset, xarray.DataArray, xarray.Variable, \"\n \"numpy.ndarray and dask.array.Array objects with \"\n \"automatic dispatching.\\n\\n\"\n \"Documentation from numpy:\\n\\n%s\" % (name, doc)\n )\n return func\n\n\n__all__ = \"\"\"logaddexp logaddexp2 conj exp log log2 log10 log1p expm1 sqrt\n square sin cos tan arcsin arccos arctan arctan2 hypot sinh cosh\n tanh arcsinh arccosh arctanh deg2rad rad2deg logical_and\n logical_or logical_xor logical_not maximum minimum fmax fmin\n isreal iscomplex isfinite isinf isnan signbit copysign nextafter\n ldexp fmod floor ceil trunc degrees radians rint fix angle real\n imag fabs sign frexp fmod\n \"\"\".split()\n\nfor name in __all__:\n globals()[name] = _create_op(name)\n"},{"className":"Dataset","col":0,"comment":"A multi-dimensional, in memory, array database.\n\n A dataset resembles an in-memory representation of a NetCDF file, and\n consists of variables, coordinates and attributes which together form a\n self describing dataset.\n\n Dataset implements the mapping interface with keys given by variable names\n and values given by DataArray objects for each variable name.\n\n One dimensional variables with name equal to their dimension are index\n coordinates used for label based indexing.\n ","endLoc":5447,"id":550,"nodeType":"Class","startLoc":397,"text":"class Dataset(Mapping, ImplementsDatasetReduce, DataWithCoords):\n \"\"\"A multi-dimensional, in memory, array database.\n\n A dataset resembles an in-memory representation of a NetCDF file, and\n consists of variables, coordinates and attributes which together form a\n self describing dataset.\n\n Dataset implements the mapping interface with keys given by variable names\n and values given by DataArray objects for each variable name.\n\n One dimensional variables with name equal to their dimension are index\n coordinates used for label based indexing.\n \"\"\"\n\n _accessors: Optional[Dict[str, Any]]\n _attrs: Optional[Dict[Hashable, Any]]\n _coord_names: Set[Hashable]\n _dims: Dict[Hashable, int]\n _encoding: Optional[Dict[Hashable, Any]]\n _indexes: Optional[Dict[Hashable, pd.Index]]\n _variables: Dict[Hashable, Variable]\n\n __slots__ = (\n \"_accessors\",\n \"_attrs\",\n \"_coord_names\",\n \"_dims\",\n \"_encoding\",\n \"_file_obj\",\n \"_indexes\",\n \"_variables\",\n \"__weakref__\",\n )\n\n _groupby_cls = groupby.DatasetGroupBy\n _rolling_cls = rolling.DatasetRolling\n _coarsen_cls = rolling.DatasetCoarsen\n _resample_cls = resample.DatasetResample\n\n def __init__(\n self,\n # could make a VariableArgs to use more generally, and refine these\n # categories\n data_vars: Mapping[Hashable, Any] = None,\n coords: Mapping[Hashable, Any] = None,\n attrs: Mapping[Hashable, Any] = None,\n compat=None,\n ):\n \"\"\"To load data from a file or file-like object, use the `open_dataset`\n function.\n\n Parameters\n ----------\n data_vars : dict-like, optional\n A mapping from variable names to :py:class:`~xarray.DataArray`\n objects, :py:class:`~xarray.Variable` objects or to tuples of the\n form ``(dims, data[, attrs])`` which can be used as arguments to\n create a new ``Variable``. Each dimension must have the same length\n in all variables in which it appears.\n\n The following notations are accepted:\n\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (it will be automatically moved to coords, see below)\n\n Each dimension must have the same length in all variables in which\n it appears.\n coords : dict-like, optional\n Another mapping in similar form as the `data_vars` argument,\n except the each item is saved on the dataset as a \"coordinate\".\n These variables have an associated meaning: they describe\n constant/fixed/independent quantities, unlike the\n varying/measured/dependent quantities that belong in `variables`.\n Coordinates values may be given by 1-dimensional arrays or scalars,\n in which case `dims` do not need to be supplied: 1D arrays will be\n assumed to give index values along the dimension with the same\n name.\n\n The following notations are accepted:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (the dimension name is implicitly set to be the same as the coord name)\n\n The last notation implies that the coord name is the same as the\n dimension name.\n\n attrs : dict-like, optional\n Global attributes to save on this dataset.\n compat : deprecated\n \"\"\"\n if compat is not None:\n warnings.warn(\n \"The `compat` argument to Dataset is deprecated and will be \"\n \"removed in 0.15.\"\n \"Instead, use `merge` to control how variables are combined\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n compat = \"broadcast_equals\"\n\n # TODO(shoyer): expose indexes as a public argument in __init__\n\n if data_vars is None:\n data_vars = {}\n if coords is None:\n coords = {}\n\n both_data_and_coords = set(data_vars) & set(coords)\n if both_data_and_coords:\n raise ValueError(\n \"variables %r are found in both data_vars and coords\"\n % both_data_and_coords\n )\n\n if isinstance(coords, Dataset):\n coords = coords.variables\n\n variables, coord_names, dims, indexes = merge_data_and_coords(\n data_vars, coords, compat=compat\n )\n\n self._accessors = None\n self._attrs = dict(attrs) if attrs is not None else None\n self._file_obj = None\n self._encoding = None\n self._variables = variables\n self._coord_names = coord_names\n self._dims = dims\n self._indexes = indexes\n\n @classmethod\n def load_store(cls, store, decoder=None) -> \"Dataset\":\n \"\"\"Create a new dataset from the contents of a backends.*DataStore\n object\n \"\"\"\n variables, attributes = store.load()\n if decoder:\n variables, attributes = decoder(variables, attributes)\n obj = cls(variables, attrs=attributes)\n obj._file_obj = store\n return obj\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n \"\"\"Low level interface to Dataset contents as dict of Variable objects.\n\n This ordered dictionary is frozen to prevent mutation that could\n violate Dataset invariants. It contains all variable objects\n constituting the Dataset, including both data variables and\n coordinates.\n \"\"\"\n return Frozen(self._variables)\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of global attributes on this dataset\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n self._attrs = dict(value)\n\n @property\n def encoding(self) -> Dict:\n \"\"\"Dictionary of global encoding attributes on this dataset\n \"\"\"\n if self._encoding is None:\n self._encoding = {}\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: Mapping) -> None:\n self._encoding = dict(value)\n\n @property\n def dims(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n Note that type of this object differs from `DataArray.dims`.\n See `Dataset.sizes` and `DataArray.sizes` for consistently named\n properties.\n \"\"\"\n return Frozen(SortedKeysDict(self._dims))\n\n @property\n def sizes(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n This is an alias for `Dataset.dims` provided for the benefit of\n consistency with `DataArray.sizes`.\n\n See also\n --------\n DataArray.sizes\n \"\"\"\n return self.dims\n\n def load(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return this dataset.\n Unlike compute, the original dataset is modified and returned.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data\n for k, v in self.variables.items()\n if isinstance(v._data, dask_array_type)\n }\n if lazy_data:\n import dask.array as da\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = da.compute(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n # load everything else sequentially\n for k, v in self.variables.items():\n if k not in lazy_data:\n v.load()\n\n return self\n\n def __dask_graph__(self):\n graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}\n graphs = {k: v for k, v in graphs.items() if v is not None}\n if not graphs:\n return None\n else:\n try:\n from dask.highlevelgraph import HighLevelGraph\n\n return HighLevelGraph.merge(*graphs.values())\n except ImportError:\n from dask import sharedict\n\n return sharedict.merge(*graphs.values())\n\n def __dask_keys__(self):\n import dask\n\n return [\n v.__dask_keys__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ]\n\n def __dask_layers__(self):\n import dask\n\n return sum(\n [\n v.__dask_layers__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ],\n (),\n )\n\n @property\n def __dask_optimize__(self):\n import dask.array as da\n\n return da.Array.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n import dask.array as da\n\n return da.Array.__dask_scheduler__\n\n def __dask_postcompute__(self):\n import dask\n\n info = [\n (True, k, v.__dask_postcompute__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._variables.items()\n ]\n args = (\n info,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._file_obj,\n )\n return self._dask_postcompute, args\n\n def __dask_postpersist__(self):\n import dask\n\n info = [\n (True, k, v.__dask_postpersist__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._variables.items()\n ]\n args = (\n info,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._file_obj,\n )\n return self._dask_postpersist, args\n\n @staticmethod\n def _dask_postcompute(results, info, *args):\n variables = {}\n results2 = list(results[::-1])\n for is_dask, k, v in info:\n if is_dask:\n func, args2 = v\n r = results2.pop()\n result = func(r, *args2)\n else:\n result = v\n variables[k] = result\n\n final = Dataset._construct_direct(variables, *args)\n return final\n\n @staticmethod\n def _dask_postpersist(dsk, info, *args):\n variables = {}\n for is_dask, k, v in info:\n if is_dask:\n func, args2 = v\n result = func(dsk, *args2)\n else:\n result = v\n variables[k] = result\n\n return Dataset._construct_direct(variables, *args)\n\n def compute(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return a new dataset.\n Unlike load, the original dataset is left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def _persist_inplace(self, **kwargs) -> \"Dataset\":\n \"\"\"Persist all Dask arrays in memory\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data\n for k, v in self.variables.items()\n if isinstance(v._data, dask_array_type)\n }\n if lazy_data:\n import dask\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = dask.persist(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n return self\n\n def persist(self, **kwargs) -> \"Dataset\":\n \"\"\" Trigger computation, keeping data as dask arrays\n\n This operation can be used to trigger computation on underlying dask\n arrays, similar to ``.compute()`` or ``.load()``. However this\n operation keeps the data as dask arrays. This is particularly useful\n when using the dask.distributed scheduler and you want to load a large\n amount of data into distributed memory.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n new = self.copy(deep=False)\n return new._persist_inplace(**kwargs)\n\n @classmethod\n def _construct_direct(\n cls,\n variables,\n coord_names,\n dims=None,\n attrs=None,\n indexes=None,\n encoding=None,\n file_obj=None,\n ):\n \"\"\"Shortcut around __init__ for internal use when we want to skip\n costly validation\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n obj = object.__new__(cls)\n obj._variables = variables\n obj._coord_names = coord_names\n obj._dims = dims\n obj._indexes = indexes\n obj._attrs = attrs\n obj._file_obj = file_obj\n obj._encoding = encoding\n obj._accessors = None\n return obj\n\n __default = object()\n\n @classmethod\n def _from_vars_and_coord_names(cls, variables, coord_names, attrs=None):\n return cls._construct_direct(variables, coord_names, attrs=attrs)\n\n # TODO(shoyer): renable type checking on this signature when pytype has a\n # good way to handle defaulting arguments to a sentinel value:\n # https://github.com/python/mypy/issues/1803\n def _replace( # type: ignore\n self,\n variables: Dict[Hashable, Variable] = None,\n coord_names: Set[Hashable] = None,\n dims: Dict[Any, int] = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Optional[Dict[Any, pd.Index]] = __default,\n encoding: Optional[dict] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Fastpath constructor for internal use.\n\n Returns an object with optionally with replaced attributes.\n\n Explicitly passed arguments are *not* copied when placed on the new\n dataset. It is up to the caller to ensure that they have the right type\n and are not used elsewhere.\n \"\"\"\n if inplace:\n if variables is not None:\n self._variables = variables\n if coord_names is not None:\n self._coord_names = coord_names\n if dims is not None:\n self._dims = dims\n if attrs is not self.__default:\n self._attrs = attrs\n if indexes is not self.__default:\n self._indexes = indexes\n if encoding is not self.__default:\n self._encoding = encoding\n obj = self\n else:\n if variables is None:\n variables = self._variables.copy()\n if coord_names is None:\n coord_names = self._coord_names.copy()\n if dims is None:\n dims = self._dims.copy()\n if attrs is self.__default:\n attrs = copy.copy(self._attrs)\n if indexes is self.__default:\n indexes = copy.copy(self._indexes)\n if encoding is self.__default:\n encoding = copy.copy(self._encoding)\n obj = self._construct_direct(\n variables, coord_names, dims, attrs, indexes, encoding\n )\n return obj\n\n def _replace_with_new_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Dict[Hashable, pd.Index] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Replace variables with recalculated dimensions.\"\"\"\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes, inplace=inplace\n )\n\n def _replace_vars_and_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n dims: Dict[Hashable, int] = None,\n attrs: Dict[Hashable, Any] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Deprecated version of _replace_with_new_dims().\n\n Unlike _replace_with_new_dims(), this method always recalculates\n indexes from variables.\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes=None, inplace=inplace\n )\n\n def _overwrite_indexes(self, indexes: Mapping[Any, pd.Index]) -> \"Dataset\":\n if not indexes:\n return self\n\n variables = self._variables.copy()\n new_indexes = dict(self.indexes)\n for name, idx in indexes.items():\n variables[name] = IndexVariable(name, idx)\n new_indexes[name] = idx\n obj = self._replace(variables, indexes=new_indexes)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Hashable, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj\n\n def copy(self, deep: bool = False, data: Mapping = None) -> \"Dataset\":\n \"\"\"Returns a copy of this dataset.\n\n If `deep=True`, a deep copy is made of each of the component variables.\n Otherwise, a shallow copy of each of the component variable is made, so\n that the underlying memory region of the new dataset is the same as in\n the original dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether each component variable is loaded into memory and copied onto\n the new object. Default is False.\n data : dict-like, optional\n Data to use in the new object. Each item in `data` must have same\n shape as corresponding data variable in original. When `data` is\n used, `deep` is ignored for the data variables and only used for\n coords.\n\n Returns\n -------\n object : Dataset\n New object with dimensions, attributes, coordinates, name, encoding,\n and optionally data copied from original.\n\n Examples\n --------\n\n Shallow copy versus deep copy\n\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])},\n coords={'x': ['one', 'two']})\n >>> ds.copy()\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds_0 = ds.copy(deep=False)\n >>> ds_0['foo'][0, 0] = 7\n >>> ds_0\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds.copy(data={'foo': np.arange(6).reshape(2, 3), 'bar': ['a', 'b']})\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) Dict[str, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[str, Hashable] = {}\n for name, index in self.indexes.items():\n if isinstance(index, pd.MultiIndex):\n level_names = index.names\n (dim,) = self.variables[name].dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords\n\n def _copy_listed(self, names: Iterable[Hashable]) -> \"Dataset\":\n \"\"\"Create a new Dataset with the listed variables from this dataset and\n the all relevant coordinates. Skips all validation.\n \"\"\"\n variables: Dict[Hashable, Variable] = {}\n coord_names = set()\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name in names:\n try:\n variables[name] = self._variables[name]\n except KeyError:\n ref_name, var_name, var = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n variables[var_name] = var\n if ref_name in self._coord_names or ref_name in self.dims:\n coord_names.add(var_name)\n if (var_name,) == var.dims:\n indexes[var_name] = var.to_index()\n\n needed_dims: Set[Hashable] = set()\n for v in variables.values():\n needed_dims.update(v.dims)\n\n dims = {k: self.dims[k] for k in needed_dims}\n\n for k in self._coord_names:\n if set(self.variables[k].dims) <= needed_dims:\n variables[k] = self._variables[k]\n coord_names.add(k)\n if k in self.indexes:\n indexes[k] = self.indexes[k]\n\n return self._replace(variables, coord_names, dims, indexes=indexes)\n\n def _construct_dataarray(self, name: Hashable) -> \"DataArray\":\n \"\"\"Construct a DataArray by indexing this dataset\n \"\"\"\n from .dataarray import DataArray\n\n try:\n variable = self._variables[name]\n except KeyError:\n _, name, variable = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n\n needed_dims = set(variable.dims)\n\n coords: Dict[Hashable, Variable] = {}\n for k in self.coords:\n if set(self.variables[k].dims) <= needed_dims:\n coords[k] = self.variables[k]\n\n if self._indexes is None:\n indexes = None\n else:\n indexes = {k: v for k, v in self._indexes.items() if k in coords}\n\n return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True)\n\n def __copy__(self) -> \"Dataset\":\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None) -> \"Dataset\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return self._item_sources + [self.attrs]\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-completion\n \"\"\"\n return [\n self.data_vars,\n self.coords,\n {d: self[d] for d in self.dims},\n LevelCoordinatesSource(self),\n ]\n\n def __contains__(self, key: object) -> bool:\n \"\"\"The 'in' operator will return true or false depending on whether\n 'key' is an array in the dataset or not.\n \"\"\"\n return key in self._variables\n\n def __len__(self) -> int:\n return len(self.data_vars)\n\n def __bool__(self) -> bool:\n return bool(self.data_vars)\n\n def __iter__(self) -> Iterator[Hashable]:\n return iter(self.data_vars)\n\n def __array__(self, dtype=None):\n raise TypeError(\n \"cannot directly convert an xarray.Dataset into a \"\n \"numpy array. Instead, create an xarray.DataArray \"\n \"first, either with indexing on the Dataset or by \"\n \"invoking the `to_array()` method.\"\n )\n\n @property\n def nbytes(self) -> int:\n return sum(v.nbytes for v in self.variables.values())\n\n @property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing. Only supports __getitem__,\n and only when the key is a dict of the form {dim: labels}.\n \"\"\"\n return _LocIndexer(self)\n\n def __getitem__(self, key: Any) -> \"Union[DataArray, Dataset]\":\n \"\"\"Access variables or coordinates this dataset as a\n :py:class:`~xarray.DataArray`.\n\n Indexing with a list of names will return a new ``Dataset`` object.\n \"\"\"\n # TODO(shoyer): type this properly: https://github.com/python/mypy/issues/7328\n if utils.is_dict_like(key):\n return self.isel(**cast(Mapping, key))\n\n if hashable(key):\n return self._construct_dataarray(key)\n else:\n return self._copy_listed(np.asarray(key))\n\n def __setitem__(self, key: Hashable, value) -> None:\n \"\"\"Add an array to this dataset.\n\n If value is a `DataArray`, call its `select_vars()` method, rename it\n to `key` and merge the contents of the resulting dataset into this\n dataset.\n\n If value is an `Variable` object (or tuple of form\n ``(dims, data[, attrs])``), add it to this dataset as a new\n variable.\n \"\"\"\n if utils.is_dict_like(key):\n raise NotImplementedError(\n \"cannot yet use a dictionary as a key \" \"to set Dataset values\"\n )\n\n self.update({key: value})\n\n def __delitem__(self, key: Hashable) -> None:\n \"\"\"Remove a variable from this dataset.\n \"\"\"\n del self._variables[key]\n self._coord_names.discard(key)\n if key in self.indexes:\n assert self._indexes is not None\n del self._indexes[key]\n self._dims = calculate_dimensions(self._variables)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore\n\n def _all_compat(self, other: \"Dataset\", compat_str: str) -> bool:\n \"\"\"Helper function for equals and identical\n \"\"\"\n\n # some stores (e.g., scipy) do not seem to preserve order, so don't\n # require matching order for equality\n def compat(x: Variable, y: Variable) -> bool:\n return getattr(x, compat_str)(y)\n\n return self._coord_names == other._coord_names and utils.dict_equiv(\n self._variables, other._variables, compat=compat\n )\n\n def broadcast_equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are broadcast equal if they are equal after\n broadcasting all variables against each other.\n\n For example, variables that are scalar in one dataset but non-scalar in\n the other dataset can still be broadcast equal if the the non-scalar\n variable is a constant.\n\n See Also\n --------\n Dataset.equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False\n\n def equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are equal if they have matching variables and\n coordinates, all of which are equal.\n\n Datasets can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``Dataset``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False\n\n def identical(self, other: \"Dataset\") -> bool:\n \"\"\"Like equals, but also checks all dataset attributes and the\n attributes on all variables and coordinates.\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.equals\n \"\"\"\n try:\n return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(\n other, \"identical\"\n )\n except (TypeError, AttributeError):\n return False\n\n @property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\n \"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._variables, self._dims)\n return Indexes(self._indexes)\n\n @property\n def coords(self) -> DatasetCoordinates:\n \"\"\"Dictionary of xarray.DataArray objects corresponding to coordinate\n variables\n \"\"\"\n return DatasetCoordinates(self)\n\n @property\n def data_vars(self) -> DataVariables:\n \"\"\"Dictionary of DataArray objects corresponding to data variables\n \"\"\"\n return DataVariables(self)\n\n def set_coords(\n self, names: \"Union[Hashable, Iterable[Hashable]]\", inplace: bool = None\n ) -> \"Dataset\":\n \"\"\"Given names of one or more variables, set them as coordinates\n\n Parameters\n ----------\n names : hashable or iterable of hashables\n Name(s) of variables in this dataset to convert into coordinates.\n\n Returns\n -------\n Dataset\n\n See also\n --------\n Dataset.swap_dims\n \"\"\"\n # TODO: allow inserting new coordinates with this method, like\n # DataFrame.set_index?\n # nb. check in self._variables, not self.data_vars to insure that the\n # operation is idempotent\n _check_inplace(inplace)\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n obj = self.copy()\n obj._coord_names.update(names)\n return obj\n\n def reset_coords(\n self,\n names: \"Union[Hashable, Iterable[Hashable], None]\" = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\":\n \"\"\"Given names of coordinates, reset them to become variables\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset\n \"\"\"\n _check_inplace(inplace)\n if names is None:\n names = self._coord_names - set(self.dims)\n else:\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n bad_coords = set(names) & set(self.dims)\n if bad_coords:\n raise ValueError(\n \"cannot remove index coordinates with reset_coords: %s\" % bad_coords\n )\n obj = self.copy()\n obj._coord_names.difference_update(names)\n if drop:\n for name in names:\n del obj._variables[name]\n return obj\n\n def dump_to_store(self, store: \"AbstractDataStore\", **kwargs) -> None:\n \"\"\"Store dataset contents to a backends.*DataStore object.\n \"\"\"\n from ..backends.api import dump_to_store\n\n # TODO: rename and/or cleanup this method to make it more consistent\n # with to_netcdf()\n dump_to_store(self, store, **kwargs)\n\n def to_netcdf(\n self,\n path=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n invalid_netcdf: bool = False,\n ) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write dataset contents to a netCDF file.\n\n Parameters\n ----------\n path : str, Path or file-like object, optional\n Path to which to save this dataset. File-like objects are only\n supported by the scipy engine. If no path is provided, this\n function returns the resulting netCDF file as bytes; in this case,\n we need to use scipy, which does not support netCDF version 4 (the\n default format becomes NETCDF3_64BIT).\n mode : {'w', 'a'}, optional\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n this location will be overwritten. If mode='a', existing variables\n will be overwritten.\n format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',\n 'NETCDF3_CLASSIC'}, optional\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n format='NETCDF4'). The group(s) will be created if necessary.\n engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,\n 'zlib': True}, ...}``\n\n The `h5netcdf` engine supports both the NetCDF4-style compression\n encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py\n ones ``{'compression': 'gzip', 'compression_opts': 9}``.\n This allows using any compression plugin installed in the HDF5\n library, e.g. LZF.\n\n unlimited_dims : iterable of hashable, optional\n Dimension(s) that should be serialized as unlimited dimensions.\n By default, no dimensions are treated as unlimited dimensions.\n Note that unlimited_dims may also be set via\n ``dataset.encoding['unlimited_dims']``.\n compute: boolean\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n invalid_netcdf: boolean\n Only valid along with engine='h5netcdf'. If True, allow writing\n hdf5 files which are valid netcdf as described in\n https://github.com/shoyer/h5netcdf. Default: False.\n \"\"\"\n if encoding is None:\n encoding = {}\n from ..backends.api import to_netcdf\n\n return to_netcdf(\n self,\n path,\n mode,\n format=format,\n group=group,\n engine=engine,\n encoding=encoding,\n unlimited_dims=unlimited_dims,\n compute=compute,\n invalid_netcdf=invalid_netcdf,\n )\n\n def to_zarr(\n self,\n store: Union[MutableMapping, str, Path] = None,\n mode: str = None,\n synchronizer=None,\n group: str = None,\n encoding: Mapping = None,\n compute: bool = True,\n consolidated: bool = False,\n append_dim: Hashable = None,\n ) -> \"ZarrStore\":\n \"\"\"Write dataset contents to a zarr group.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n Parameters\n ----------\n store : MutableMapping, str or Path, optional\n Store or path to directory in file system.\n mode : {'w', 'w-', 'a', None}\n Persistence mode: 'w' means create (overwrite if exists);\n 'w-' means create (fail if exists);\n 'a' means append (create if does not exist).\n If ``append_dim`` is set, ``mode`` can be omitted as it is\n internally set to ``'a'``. Otherwise, ``mode`` will default to\n `w-` if not set.\n synchronizer : object, optional\n Array synchronizer\n group : str, optional\n Group path. (a.k.a. `path` in zarr terminology.)\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,}, ...}``\n compute: bool, optional\n If True compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n consolidated: bool, optional\n If True, apply zarr's `consolidate_metadata` function to the store\n after writing.\n append_dim: hashable, optional\n If set, the dimension on which the data will be appended.\n\n References\n ----------\n https://zarr.readthedocs.io/\n \"\"\"\n if encoding is None:\n encoding = {}\n if (mode == \"a\") or (append_dim is not None):\n if mode is None:\n mode = \"a\"\n elif mode != \"a\":\n raise ValueError(\n \"append_dim was set along with mode='{}', either set \"\n \"mode='a' or don't set it.\".format(mode)\n )\n elif mode is None:\n mode = \"w-\"\n if mode not in [\"w\", \"w-\", \"a\"]:\n # TODO: figure out how to handle 'r+'\n raise ValueError(\n \"The only supported options for mode are 'w',\" \"'w-' and 'a'.\"\n )\n from ..backends.api import to_zarr\n\n return to_zarr(\n self,\n store=store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n encoding=encoding,\n compute=compute,\n consolidated=consolidated,\n append_dim=append_dim,\n )\n\n def __repr__(self) -> str:\n return formatting.dataset_repr(self)\n\n def info(self, buf=None) -> None:\n \"\"\"\n Concise summary of a Dataset variables and attributes.\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n\n See Also\n --------\n pandas.DataFrame.assign\n ncdump: netCDF's ncdump\n \"\"\"\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n lines.append(\"xarray.Dataset {\")\n lines.append(\"dimensions:\")\n for name, size in self.dims.items():\n lines.append(\"\\t{name} = {size} ;\".format(name=name, size=size))\n lines.append(\"\\nvariables:\")\n for name, da in self.variables.items():\n dims = \", \".join(da.dims)\n lines.append(\n \"\\t{type} {name}({dims}) ;\".format(type=da.dtype, name=name, dims=dims)\n )\n for k, v in da.attrs.items():\n lines.append(\"\\t\\t{name}:{k} = {v} ;\".format(name=name, k=k, v=v))\n lines.append(\"\\n// global attributes:\")\n for k, v in self.attrs.items():\n lines.append(\"\\t:{k} = {v} ;\".format(k=k, v=v))\n lines.append(\"}\")\n\n buf.write(\"\\n\".join(lines))\n\n @property\n def chunks(self) -> Mapping[Hashable, Tuple[int, ...]]:\n \"\"\"Block dimensions for this dataset's data or None if it's not a dask\n array.\n \"\"\"\n chunks: Dict[Hashable, Tuple[int, ...]] = {}\n for v in self.variables.values():\n if v.chunks is not None:\n for dim, c in zip(v.dims, v.chunks):\n if dim in chunks and c != chunks[dim]:\n raise ValueError(\n f\"Object has inconsistent chunks along dimension {dim}. \"\n \"This can be fixed by calling unify_chunks().\"\n )\n chunks[dim] = c\n return Frozen(SortedKeysDict(chunks))\n\n def chunk(\n self,\n chunks: Union[\n None, Number, Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]]\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"Dataset\":\n \"\"\"Coerce all arrays in this dataset into dask arrays with the given\n chunks.\n\n Non-dask arrays in this dataset will be converted to dask arrays. Dask\n arrays will be rechunked to the given chunk sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int or mapping, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of any new dask arrays.\n token : str, optional\n Token uniquely identifying this dataset.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Dataset\n \"\"\"\n from dask.base import tokenize\n\n if isinstance(chunks, Number):\n chunks = dict.fromkeys(self.dims, chunks)\n\n if chunks is not None:\n bad_dims = chunks.keys() - self.dims.keys()\n if bad_dims:\n raise ValueError(\n \"some chunks keys are not dimensions on this \"\n \"object: %s\" % bad_dims\n )\n\n def selkeys(dict_, keys):\n if dict_ is None:\n return None\n return {d: dict_[d] for d in keys if d in dict_}\n\n def maybe_chunk(name, var, chunks):\n chunks = selkeys(chunks, var.dims)\n if not chunks:\n chunks = None\n if var.ndim > 0:\n token2 = tokenize(name, token if token else var._data)\n name2 = \"%s%s-%s\" % (name_prefix, name, token2)\n return var.chunk(chunks, name=name2, lock=lock)\n else:\n return var\n\n variables = {k: maybe_chunk(k, v, chunks) for k, v in self.variables.items()}\n return self._replace(variables)\n\n def _validate_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]:\n \"\"\" Here we make sure\n + indexer has a valid keys\n + indexer is in a valid data type\n + string indexers are cast to the appropriate date type if the\n associated index is a DatetimeIndex or CFTimeIndex\n \"\"\"\n from .dataarray import DataArray\n\n invalid = indexers.keys() - self.dims.keys()\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n # all indexers should be int, slice, np.ndarrays, or Variable\n for k, v in indexers.items():\n if isinstance(v, (int, slice, Variable)):\n yield k, v\n elif isinstance(v, DataArray):\n yield k, v.variable\n elif isinstance(v, tuple):\n yield k, as_variable(v)\n elif isinstance(v, Dataset):\n raise TypeError(\"cannot use a Dataset as an indexer\")\n elif isinstance(v, Sequence) and len(v) == 0:\n yield k, np.empty((0,), dtype=\"int64\")\n else:\n v = np.asarray(v)\n\n if v.dtype.kind in \"US\":\n index = self.indexes[k]\n if isinstance(index, pd.DatetimeIndex):\n v = v.astype(\"datetime64[ns]\")\n elif isinstance(index, xr.CFTimeIndex):\n v = _parse_array_of_cftime_strings(v, index.date_type)\n\n if v.ndim > 1:\n raise IndexError(\n \"Unlabeled multi-dimensional array cannot be \"\n \"used for indexing: {}\".format(k)\n )\n yield k, v\n\n def _validate_interp_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Variable]]:\n \"\"\"Variant of _validate_indexers to be used for interpolation\n \"\"\"\n for k, v in self._validate_indexers(indexers):\n if isinstance(v, Variable):\n if v.ndim == 1:\n yield k, v.to_index_variable()\n else:\n yield k, v\n elif isinstance(v, int):\n yield k, Variable((), v)\n elif isinstance(v, np.ndarray):\n if v.ndim == 0:\n yield k, Variable((), v)\n elif v.ndim == 1:\n yield k, IndexVariable((k,), v)\n else:\n raise AssertionError() # Already tested by _validate_indexers\n else:\n raise TypeError(type(v))\n\n def _get_indexers_coords_and_indexes(self, indexers):\n \"\"\"Extract coordinates and indexes from indexers.\n\n Only coordinate with a name different from any of self.variables will\n be attached.\n \"\"\"\n from .dataarray import DataArray\n\n coords_list = []\n for k, v in indexers.items():\n if isinstance(v, DataArray):\n if v.dtype.kind == \"b\":\n if v.ndim != 1: # we only support 1-d boolean array\n raise ValueError(\n \"{:d}d-boolean array is used for indexing along \"\n \"dimension {!r}, but only 1d boolean arrays are \"\n \"supported.\".format(v.ndim, k)\n )\n # Make sure in case of boolean DataArray, its\n # coordinate also should be indexed.\n v_coords = v[v.values.nonzero()[0]].coords\n else:\n v_coords = v.coords\n coords_list.append(v_coords)\n\n # we don't need to call align() explicitly or check indexes for\n # alignment, because merge_variables already checks for exact alignment\n # between dimension coordinates\n coords, indexes = merge_coordinates_without_align(coords_list)\n assert_coordinate_consistent(self, coords)\n\n # silently drop the conflicted variables.\n attached_coords = {k: v for k, v in coords.items() if k not in self._variables}\n attached_indexes = {\n k: v for k, v in indexes.items() if k not in self._variables\n }\n return attached_coords, attached_indexes\n\n def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along the specified\n dimension(s).\n\n This method selects values from each array using its `__getitem__`\n method, except this method does not require knowing the order of\n each array's dimensions.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by integers, slice objects or arrays.\n indexer can be a integer, slice, array-like or DataArray.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables indexed by integers\n instead of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n # Note: we need to preserve the original indexers variable in order to merge the\n # coords below\n indexers_list = list(self._validate_indexers(indexers))\n\n variables: Dict[Hashable, Variable] = {}\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name, var in self.variables.items():\n var_indexers = {k: v for k, v in indexers_list if k in var.dims}\n if drop and name in var_indexers:\n continue # drop this variable\n\n if name in self.indexes:\n new_var, new_index = isel_variable_and_index(\n name, var, self.indexes[name], var_indexers\n )\n if new_index is not None:\n indexes[name] = new_index\n elif var_indexers:\n new_var = var.isel(indexers=var_indexers)\n else:\n new_var = var.copy(deep=False)\n\n variables[name] = new_var\n\n coord_names = self._coord_names & variables.keys()\n selected = self._replace_with_new_dims(variables, coord_names, indexes)\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n coord_names = self._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed by tick labels\n along the specified dimension(s).\n\n In contrast to `Dataset.isel`, indexers for this method should use\n labels instead of integers.\n\n Under the hood, this method is powered by using pandas's powerful Index\n objects. This makes label based indexing essentially just as fast as\n using integer indexing.\n\n It also means this method uses pandas's (well documented) logic for\n indexing. This means you can use string shortcuts for datetime indexes\n (e.g., '2000-01' to select all values in January 2000). It also means\n that slices are treated as inclusive of both the start and stop values,\n unlike normal Python indexing.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by scalars, slices or arrays of tick labels. For dimensions with\n multi-index, the indexer may also be a dict-like object with keys\n matching index level names.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for inexact matches:\n\n * None (default): only exact matches\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables in `indexers` instead\n of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n variable and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"sel\")\n pos_indexers, new_indexes = remap_label_indexers(\n self, indexers=indexers, method=method, tolerance=tolerance\n )\n result = self.isel(indexers=pos_indexers, drop=drop)\n return result._overwrite_indexes(new_indexes)\n\n def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the first `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.tail\n Dataset.thin\n DataArray.head\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"head\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {k: slice(val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the last `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.thin\n DataArray.tail\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"tail\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {\n k: slice(-val, None) if val != 0 else slice(val)\n for k, val in indexers.items()\n }\n return self.isel(indexers_slices)\n\n def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along every `n`th\n value for the specified dimension(s)\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.tail\n DataArray.thin\n \"\"\"\n if (\n not indexers_kwargs\n and not isinstance(indexers, int)\n and not is_dict_like(indexers)\n ):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"thin\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n elif v == 0:\n raise ValueError(\"step cannot be zero\")\n indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def broadcast_like(\n self, other: Union[\"Dataset\", \"DataArray\"], exclude: Iterable[Hashable] = None\n ) -> \"Dataset\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n This is equivalent to xr.broadcast(other, self)[1]\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)\n\n def reindex_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found in this\n dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but coordinates from the\n other object.\n\n See Also\n --------\n Dataset.reindex\n align\n \"\"\"\n indexers = alignment.reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n copy=copy,\n fill_value=fill_value,\n tolerance=tolerance,\n )\n\n def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto a new set of indexes, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict. optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n Keyword arguments in the same form as ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.reindex_like\n align\n pandas.Index.get_indexer\n\n Examples\n --------\n\n Create a dataset with some fictional data.\n\n >>> import xarray as xr\n >>> import pandas as pd\n >>> x = xr.Dataset(\n ... {\n ... \"temperature\": (\"station\", 20 * np.random.rand(4)),\n ... \"pressure\": (\"station\", 500 * np.random.rand(4))\n ... },\n ... coords={\"station\": [\"boston\", \"nyc\", \"seattle\", \"denver\"]})\n >>> x\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) >> x.indexes\n station: Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')\n\n Create a new index and reindex the dataset. By default values in the new index that\n do not have corresponding records in the dataset are assigned `NaN`.\n\n >>> new_index = ['boston', 'austin', 'seattle', 'lincoln']\n >>> x.reindex({'station': new_index})\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 nan 19.22 nan\n pressure (station) float64 324.1 nan 122.8 nan\n\n We can fill in the missing values by passing a value to the keyword `fill_value`.\n\n >>> x.reindex({'station': new_index}, fill_value=0)\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 0.0 19.22 0.0\n pressure (station) float64 324.1 0.0 122.8 0.0\n\n Because the index is not monotonically increasing or decreasing, we cannot use arguments\n to the keyword method to fill the `NaN` values.\n\n >>> x.reindex({'station': new_index}, method='nearest')\n Traceback (most recent call last):\n ...\n raise ValueError('index must be monotonic increasing or decreasing')\n ValueError: index must be monotonic increasing or decreasing\n\n To further illustrate the filling functionality in reindex, we will create a\n dataset with a monotonically increasing index (for example, a sequence of dates).\n\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": (\"time\", [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12]),\n ... \"pressure\": (\"time\", 500 * np.random.rand(6))\n ... },\n ... coords={\"time\": pd.date_range('01/01/2019', periods=6, freq='D')})\n >>> x2\n \n Dimensions: (time: 6)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06\n Data variables:\n temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12\n pressure (time) float64 103.4 122.7 452.0 444.0 399.2 486.0\n\n Suppose we decide to expand the dataset to cover a wider date range.\n\n >>> time_index2 = pd.date_range('12/29/2018', periods=10, freq='D')\n >>> x2.reindex({'time': time_index2})\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan\n pressure (time) float64 nan nan nan 103.4 ... 444.0 399.2 486.0 nan\n\n The index entries that did not have a value in the original data frame (for example, `2018-12-29`)\n are by default filled with NaN. If desired, we can fill in the missing values using one of several options.\n\n For example, to back-propagate the last valid value to fill the `NaN` values,\n pass `bfill` as an argument to the `method` keyword.\n\n >>> x3 = x2.reindex({'time': time_index2}, method='bfill')\n >>> x3\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan\n pressure (time) float64 103.4 103.4 103.4 103.4 ... 399.2 486.0 nan\n\n Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)\n will not be filled by any of the value propagation schemes.\n\n >>> x2.where(x2.temperature.isnull(), drop=True)\n \n Dimensions: (time: 1)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03\n Data variables:\n temperature (time) float64 nan\n pressure (time) float64 452.0\n >>> x3.where(x3.temperature.isnull(), drop=True)\n \n Dimensions: (time: 2)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03 2019-01-07\n Data variables:\n temperature (time) float64 nan nan\n pressure (time) float64 452.0 nan\n\n This is because filling while reindexing does not look at dataset values, but only compares\n the original and desired indexes. If you do want to fill in the `NaN` values present in the\n original dataset, use the :py:meth:`~Dataset.fillna()` method.\n\n \"\"\"\n indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n\n bad_dims = [d for d in indexers if d not in self.dims]\n if bad_dims:\n raise ValueError(\"invalid reindex dimensions: %s\" % bad_dims)\n\n variables, indexes = alignment.reindex_variables(\n self.variables,\n self.sizes,\n self.indexes,\n indexers,\n method,\n tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n coord_names = set(self._coord_names)\n coord_names.update(indexers)\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\" Multidimensional interpolation of Dataset.\n\n Parameters\n ----------\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n New coordinate can be a scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.Dataset\n New dataset on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n \"\"\"\n from . import missing\n\n if kwargs is None:\n kwargs = {}\n\n coords = either_dict_or_kwargs(coords, coords_kwargs, \"interp\")\n indexers = dict(self._validate_interp_indexers(coords))\n\n obj = self if assume_sorted else self.sortby([k for k in coords])\n\n def maybe_variable(obj, k):\n # workaround to get variable for dimension without coordinate.\n try:\n return obj._variables[k]\n except KeyError:\n return as_variable((k, range(obj.dims[k])))\n\n def _validate_interp_indexer(x, new_x):\n # In the case of datetimes, the restrictions placed on indexers\n # used with interp are stronger than those which are placed on\n # isel, so we need an additional check after _validate_indexers.\n if _contains_datetime_like_objects(\n x\n ) and not _contains_datetime_like_objects(new_x):\n raise TypeError(\n \"When interpolating over a datetime-like \"\n \"coordinate, the coordinates to \"\n \"interpolate to must be either datetime \"\n \"strings or datetimes. \"\n \"Instead got\\n{}\".format(new_x)\n )\n return x, new_x\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in obj._variables.items():\n if name in indexers:\n continue\n\n if var.dtype.kind in \"uifc\":\n var_indexers = {\n k: _validate_interp_indexer(maybe_variable(obj, k), v)\n for k, v in indexers.items()\n if k in var.dims\n }\n variables[name] = missing.interp(var, var_indexers, method, **kwargs)\n elif all(d not in indexers for d in var.dims):\n # keep unrelated object array\n variables[name] = var\n\n coord_names = obj._coord_names & variables.keys()\n indexes = {k: v for k, v in obj.indexes.items() if k not in indexers}\n selected = self._replace_with_new_dims(\n variables.copy(), coord_names, indexes=indexes\n )\n\n # attach indexer as coordinate\n variables.update(indexers)\n for k, v in indexers.items():\n assert isinstance(v, Variable)\n if v.dims == (k,):\n indexes[k] = v.to_index()\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n\n coord_names = obj._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"Dataset\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling the out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.Dataset\n Another dataset by interpolating this dataset's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataset has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n Dataset.interp\n Dataset.reindex_like\n \"\"\"\n if kwargs is None:\n kwargs = {}\n coords = alignment.reindex_like_indexers(self, other)\n\n numeric_coords: Dict[Hashable, pd.Index] = {}\n object_coords: Dict[Hashable, pd.Index] = {}\n for k, v in coords.items():\n if v.dtype.kind in \"uifcMm\":\n numeric_coords[k] = v\n else:\n object_coords[k] = v\n\n ds = self\n if object_coords:\n # We do not support interpolation along object coordinate.\n # reindex instead.\n ds = self.reindex(object_coords)\n return ds.interp(numeric_coords, method, assume_sorted, kwargs)\n\n # Helper methods for rename()\n def _rename_vars(self, name_dict, dims_dict):\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n var = v.copy(deep=False)\n var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)\n name = name_dict.get(k, k)\n if name in variables:\n raise ValueError(\"the new name %r conflicts\" % (name,))\n variables[name] = var\n if k in self._coord_names:\n coord_names.add(name)\n return variables, coord_names\n\n def _rename_dims(self, name_dict):\n return {name_dict.get(k, k): v for k, v in self.dims.items()}\n\n def _rename_indexes(self, name_dict, dims_set):\n if self._indexes is None:\n return None\n indexes = {}\n for k, v in self.indexes.items():\n new_name = name_dict.get(k, k)\n if new_name not in dims_set:\n continue\n if isinstance(v, pd.MultiIndex):\n new_names = [name_dict.get(k, k) for k in v.names]\n index = pd.MultiIndex(\n v.levels,\n v.labels,\n v.sortorder,\n names=new_names,\n verify_integrity=False,\n )\n else:\n index = pd.Index(v, name=new_name)\n indexes[new_name] = index\n return indexes\n\n def _rename_all(self, name_dict, dims_dict):\n variables, coord_names = self._rename_vars(name_dict, dims_dict)\n dims = self._rename_dims(dims_dict)\n indexes = self._rename_indexes(name_dict, dims.keys())\n return variables, coord_names, dims, indexes\n\n def rename(\n self,\n name_dict: Mapping[Hashable, Hashable] = None,\n inplace: bool = None,\n **names: Hashable,\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables and dimensions.\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or dimension names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables and dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename_vars\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n _check_inplace(inplace)\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename\")\n for k in name_dict.keys():\n if k not in self and k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or dimension in this dataset\" % k\n )\n\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict=name_dict\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def rename_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed dimensions only.\n\n Parameters\n ----------\n dims_dict : dict-like, optional\n Dictionary whose keys are current dimension names and\n whose values are the desired names.\n **dims, optional\n Keyword form of ``dims_dict``.\n One of dims_dict or dims must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_vars\n DataArray.rename\n \"\"\"\n dims_dict = either_dict_or_kwargs(dims_dict, dims, \"rename_dims\")\n for k in dims_dict:\n if k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"dimension in this dataset\" % k\n )\n\n variables, coord_names, sizes, indexes = self._rename_all(\n name_dict={}, dims_dict=dims_dict\n )\n return self._replace(variables, coord_names, dims=sizes, indexes=indexes)\n\n def rename_vars(\n self, name_dict: Mapping[Hashable, Hashable] = None, **names: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables including coordinates\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or coordinate names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables including coordinates\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename_vars\")\n for k in name_dict:\n if k not in self:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or coordinate in this dataset\" % k\n )\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict={}\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def swap_dims(\n self, dims_dict: Mapping[Hashable, Hashable], inplace: bool = None\n ) -> \"Dataset\":\n \"\"\"Returns a new object with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names. Each value must already be a variable in the\n dataset.\n\n Returns\n -------\n swapped : Dataset\n Dataset with swapped dimensions.\n\n Examples\n --------\n >>> ds = xr.Dataset(data_vars={\"a\": (\"x\", [5, 7]), \"b\": (\"x\", [0.1, 2.4])},\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> ds\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) >> ds.swap_dims({\"x\": \"y\"})\n \n Dimensions: (y: 2)\n Coordinates:\n x (y) \"Dataset\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, mapping, or None\n Dimensions to include on the new variable. If provided as hashable\n or sequence of hashable, then dimensions are inserted with length\n 1. If provided as a mapping, then the keys are the new dimensions\n and the values are either integers (giving the length of the new\n dimensions) or array-like (giving the coordinates of the new\n dimensions).\n axis : integer, sequence of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if dim is None:\n pass\n elif isinstance(dim, Mapping):\n # We're later going to modify dim in place; don't tamper with\n # the input\n dim = dict(dim)\n elif isinstance(dim, int):\n raise TypeError(\n \"dim should be hashable or sequence of hashables or mapping\"\n )\n elif isinstance(dim, str) or not isinstance(dim, Sequence):\n dim = {dim: 1}\n elif isinstance(dim, Sequence):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = {d: 1 for d in dim}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n assert isinstance(dim, MutableMapping)\n\n if axis is None:\n axis = list(range(len(dim)))\n elif not isinstance(axis, Sequence):\n axis = [axis]\n\n if len(dim) != len(axis):\n raise ValueError(\"lengths of dim and axis should be identical.\")\n for d in dim:\n if d in self.dims:\n raise ValueError(\"Dimension {dim} already exists.\".format(dim=d))\n if d in self._variables and not utils.is_scalar(self._variables[d]):\n raise ValueError(\n \"{dim} already exists as coordinate or\"\n \" variable name.\".format(dim=d)\n )\n\n variables: Dict[Hashable, Variable] = {}\n coord_names = self._coord_names.copy()\n # If dim is a dict, then ensure that the values are either integers\n # or iterables.\n for k, v in dim.items():\n if hasattr(v, \"__iter__\"):\n # If the value for the new dimension is an iterable, then\n # save the coordinates to the variables dict, and set the\n # value within the dim dict to the length of the iterable\n # for later use.\n variables[k] = xr.IndexVariable((k,), v)\n coord_names.add(k)\n dim[k] = variables[k].size\n elif isinstance(v, int):\n pass # Do nothing if the dimensions value is just an int\n else:\n raise TypeError(\n \"The value of new dimension {k} must be \"\n \"an iterable or an int\".format(k=k)\n )\n\n for k, v in self._variables.items():\n if k not in dim:\n if k in coord_names: # Do not change coordinates\n variables[k] = v\n else:\n result_ndim = len(v.dims) + len(axis)\n for a in axis:\n if a < -result_ndim or result_ndim - 1 < a:\n raise IndexError(\n \"Axis {a} is out of bounds of the expanded\"\n \" dimension size {dim}.\".format(\n a=a, v=k, dim=result_ndim\n )\n )\n\n axis_pos = [a if a >= 0 else result_ndim + a for a in axis]\n if len(axis_pos) != len(set(axis_pos)):\n raise ValueError(\"axis should not contain duplicate\" \" values.\")\n # We need to sort them to make sure `axis` equals to the\n # axis positions of the result array.\n zip_axis_dim = sorted(zip(axis_pos, dim.items()))\n\n all_dims = list(zip(v.dims, v.shape))\n for d, c in zip_axis_dim:\n all_dims.insert(d, c)\n variables[k] = v.set_dims(dict(all_dims))\n else:\n # If dims includes a label of a non-dimension coordinate,\n # it will be promoted to a 1D coordinate with a single value.\n variables[k] = v.set_dims(k).to_index_variable()\n\n new_dims = self._dims.copy()\n new_dims.update(dim)\n\n return self._replace_vars_and_dims(\n variables, dims=new_dims, coord_names=coord_names\n )\n\n def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]],\n ) -> \"Dataset\":\n \"\"\"Set Dataset (multi-)indexes using one or more existing coordinates\n or variables.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> ds = xr.Dataset({'v': arr})\n >>> ds\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n >>> ds.set_index(x='a')\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n\n See Also\n --------\n Dataset.reset_index\n Dataset.swap_dims\n \"\"\"\n _check_inplace(inplace)\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n variables, coord_names = merge_indexes(\n indexes, self._variables, self._coord_names, append=append\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\":\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : str or list\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.set_index\n \"\"\"\n _check_inplace(inplace)\n variables, coord_names = split_indexes(\n dims_or_levels,\n self._variables,\n self._coord_names,\n cast(Mapping[Hashable, Hashable], self._level_coords),\n drop=drop,\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int],\n ) -> \"Dataset\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced\n coordinates.\n \"\"\"\n _check_inplace(inplace)\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n variables = self._variables.copy()\n indexes = dict(self.indexes)\n for dim, order in dim_order.items():\n coord = self._variables[dim]\n index = self.indexes[dim]\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\"coordinate %r has no MultiIndex\" % dim)\n new_index = index.reorder_levels(order)\n variables[dim] = IndexVariable(coord.dims, new_index)\n indexes[dim] = new_index\n\n return self._replace(variables, indexes=indexes)\n\n def _stack_once(self, dims, new_dim):\n variables = {}\n for name, var in self.variables.items():\n if name not in dims:\n if any(d in var.dims for d in dims):\n add_dims = [d for d in dims if d not in var.dims]\n vdims = list(var.dims) + add_dims\n shape = [self.dims[d] for d in vdims]\n exp_var = var.set_dims(vdims, shape)\n stacked_var = exp_var.stack(**{new_dim: dims})\n variables[name] = stacked_var\n else:\n variables[name] = var.copy(deep=False)\n\n # consider dropping levels that are unused?\n levels = [self.get_index(dim) for dim in dims]\n idx = utils.multiindex_from_product_levels(levels, names=dims)\n variables[new_dim] = IndexVariable(new_dim, idx)\n\n coord_names = set(self._coord_names) - set(dims) | {new_dim}\n\n indexes = {k: v for k, v in self.indexes.items() if k not in dims}\n indexes[new_dim] = idx\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable],\n ) -> \"Dataset\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Dataset\n Dataset with stacked data.\n\n See also\n --------\n Dataset.unstack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"stack\")\n result = self\n for new_dim, dims in dimensions.items():\n result = result._stack_once(dims, new_dim)\n return result\n\n def to_stacked_array(\n self,\n new_dim: Hashable,\n sample_dims: Sequence[Hashable],\n variable_dim: str = \"variable\",\n name: Hashable = None,\n ) -> \"DataArray\":\n \"\"\"Combine variables of differing dimensionality into a DataArray\n without broadcasting.\n\n This method is similar to Dataset.to_array but does not broadcast the\n variables.\n\n Parameters\n ----------\n new_dim : Hashable\n Name of the new stacked coordinate\n sample_dims : Sequence[Hashable]\n Dimensions that **will not** be stacked. Each array in the dataset\n must share these dimensions. For machine learning applications,\n these define the dimensions over which samples are drawn.\n variable_dim : str, optional\n Name of the level in the stacked coordinate which corresponds to\n the variables.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n stacked : DataArray\n DataArray with the specified dimensions and data variables\n stacked together. The stacked coordinate is named ``new_dim``\n and represented by a MultiIndex object with a level containing the\n data variable names. The name of this level is controlled using\n the ``variable_dim`` argument.\n\n See Also\n --------\n Dataset.to_array\n Dataset.stack\n DataArray.to_unstacked_dataset\n\n Examples\n --------\n >>> data = Dataset(\n ... data_vars={'a': (('x', 'y'), [[0, 1, 2], [3, 4, 5]]),\n ... 'b': ('x', [6, 7])},\n ... coords={'y': ['u', 'v', 'w']}\n ... )\n\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) >> data.to_stacked_array(\"z\", sample_dims=['x'])\n \n array([[0, 1, 2, 6],\n [3, 4, 5, 7]])\n Coordinates:\n * z (z) MultiIndex\n - variable (z) object 'a' 'a' 'a' 'b'\n - y (z) object 'u' 'v' 'w' nan\n Dimensions without coordinates: x\n\n \"\"\"\n stacking_dims = tuple(dim for dim in self.dims if dim not in sample_dims)\n\n for variable in self:\n dims = self[variable].dims\n dims_include_sample_dims = set(sample_dims) <= set(dims)\n if not dims_include_sample_dims:\n raise ValueError(\n \"All variables in the dataset must contain the \"\n \"dimensions {}.\".format(dims)\n )\n\n def ensure_stackable(val):\n assign_coords = {variable_dim: val.name}\n for dim in stacking_dims:\n if dim not in val.dims:\n assign_coords[dim] = None\n\n expand_dims = set(stacking_dims).difference(set(val.dims))\n expand_dims.add(variable_dim)\n # must be list for .expand_dims\n expand_dims = list(expand_dims)\n\n return (\n val.assign_coords(**assign_coords)\n .expand_dims(expand_dims)\n .stack({new_dim: (variable_dim,) + stacking_dims})\n )\n\n # concatenate the arrays\n stackable_vars = [ensure_stackable(self[key]) for key in self.data_vars]\n data_array = xr.concat(stackable_vars, dim=new_dim)\n\n # coerce the levels of the MultiIndex to have the same type as the\n # input dimensions. This code is messy, so it might be better to just\n # input a dummy value for the singleton dimension.\n idx = data_array.indexes[new_dim]\n levels = [idx.levels[0]] + [\n level.astype(self[level.name].dtype) for level in idx.levels[1:]\n ]\n new_idx = idx.set_levels(levels)\n data_array[new_dim] = IndexVariable(new_dim, new_idx)\n\n if name is not None:\n data_array.name = name\n\n return data_array\n\n def _unstack_once(self, dim: Hashable) -> \"Dataset\":\n index = self.get_index(dim)\n index = index.remove_unused_levels()\n full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)\n\n # take a shortcut in case the MultiIndex was not modified.\n if index.equals(full_idx):\n obj = self\n else:\n obj = self.reindex({dim: full_idx}, copy=False)\n\n new_dim_names = index.names\n new_dim_sizes = [lev.size for lev in index.levels]\n\n variables: Dict[Hashable, Variable] = {}\n indexes = {k: v for k, v in self.indexes.items() if k != dim}\n\n for name, var in obj.variables.items():\n if name != dim:\n if dim in var.dims:\n new_dims = dict(zip(new_dim_names, new_dim_sizes))\n variables[name] = var.unstack({dim: new_dims})\n else:\n variables[name] = var\n\n for name, lev in zip(new_dim_names, index.levels):\n variables[name] = IndexVariable(name, lev)\n indexes[name] = lev\n\n coord_names = set(self._coord_names) - {dim} | set(new_dim_names)\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def unstack(self, dim: Union[Hashable, Iterable[Hashable]] = None) -> \"Dataset\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : Hashable or iterable of Hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : Dataset\n Dataset with unstacked data.\n\n See also\n --------\n Dataset.stack\n \"\"\"\n if dim is None:\n dims = [\n d for d in self.dims if isinstance(self.get_index(d), pd.MultiIndex)\n ]\n else:\n if isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = [dim]\n else:\n dims = list(dim)\n\n missing_dims = [d for d in dims if d not in self.dims]\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n non_multi_dims = [\n d for d in dims if not isinstance(self.get_index(d), pd.MultiIndex)\n ]\n if non_multi_dims:\n raise ValueError(\n \"cannot unstack dimensions that do not \"\n \"have a MultiIndex: %s\" % non_multi_dims\n )\n\n result = self.copy(deep=False)\n for dim in dims:\n result = result._unstack_once(dim)\n return result\n\n def update(self, other: \"CoercibleMapping\", inplace: bool = None) -> \"Dataset\":\n \"\"\"Update this dataset's variables with those from another dataset.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Variables with which to update this dataset. One of:\n\n - Dataset\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n\n\n Returns\n -------\n updated : Dataset\n Updated dataset.\n\n Raises\n ------\n ValueError\n If any dimensions would have inconsistent sizes in the updated\n dataset.\n \"\"\"\n _check_inplace(inplace)\n merge_result = dataset_update_method(self, other)\n return self._replace(inplace=True, **merge_result._asdict())\n\n def merge(\n self,\n other: \"CoercibleMapping\",\n inplace: bool = None,\n overwrite_vars: Union[Hashable, Iterable[Hashable]] = frozenset(),\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Merge the arrays of two datasets into a single dataset.\n\n This method generally does not allow for overriding data, with the\n exception of attributes, which are ignored on the second dataset.\n Variables with the same name are checked for conflicts via the equals\n or identical methods.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Dataset or variables to merge with this dataset.\n overwrite_vars : Hashable or iterable of Hashable, optional\n If provided, update variables of these name(s) without checking for\n conflicts in this dataset.\n compat : {'broadcast_equals', 'equals', 'identical',\n 'no_conflicts'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining ``self`` and ``other`` along shared dimensions:\n\n - 'outer': use the union of the indexes\n - 'inner': use the intersection of the indexes\n - 'left': use indexes from ``self``\n - 'right': use indexes from ``other``\n - 'exact': error instead of aligning non-equal indexes\n fill_value: scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n merged : Dataset\n Merged dataset.\n\n Raises\n ------\n MergeError\n If any variables conflict (see ``compat``).\n \"\"\"\n _check_inplace(inplace)\n merge_result = dataset_merge_method(\n self,\n other,\n overwrite_vars=overwrite_vars,\n compat=compat,\n join=join,\n fill_value=fill_value,\n )\n return self._replace(**merge_result._asdict())\n\n def _assert_all_in_dataset(\n self, names: Iterable[Hashable], virtual_okay: bool = False\n ) -> None:\n bad_names = set(names) - set(self._variables)\n if virtual_okay:\n bad_names -= self.virtual_variables\n if bad_names:\n raise ValueError(\n \"One or more of the specified variables \"\n \"cannot be found in this dataset\"\n )\n\n # Drop variables\n @overload # noqa: F811\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n ...\n\n # Drop index labels along dimension\n @overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"Dataset\":\n ...\n\n def drop( # noqa: F811\n self, labels=None, dim=None, *, errors=\"raise\", **labels_kwargs\n ):\n \"\"\"Drop variables or index labels from this dataset.\n\n Parameters\n ----------\n labels : hashable or iterable of hashables\n Name(s) of variables or index labels to drop.\n If dim is not None, labels can be any array-like.\n dim : None or hashable, optional\n Dimension along which to drop index labels. By default (if\n ``dim is None``), drops variables rather than index labels.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the variable or index labels passed are not\n in the dataset. If 'ignore', any given labels that are in the\n dataset are dropped and no error is raised.\n **labels_kwargs : {dim: label, ...}, optional\n The keyword arguments form of ``dim`` and ``labels``.\n\n Returns\n -------\n dropped : Dataset\n\n Examples\n --------\n >>> data = np.random.randn(2, 3)\n >>> labels = ['a', 'b', 'c']\n >>> ds = xr.Dataset({'A': (['x', 'y'], data), 'y': labels})\n >>> ds.drop(y=['a', 'c'])\n \n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) >> ds.drop(y='b')\n \n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) \"Dataset\":\n if errors == \"raise\":\n self._assert_all_in_dataset(names)\n\n variables = {k: v for k, v in self._variables.items() if k not in names}\n coord_names = {k for k in self._coord_names if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k not in names}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def drop_dims(\n self, drop_dims: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n \"\"\"Drop dimensions and associated variables from this dataset.\n\n Parameters\n ----------\n drop_dims : hashable or iterable of hashable\n Dimension or dimensions to drop.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if any of the\n dimensions passed are not in the dataset. If 'ignore', any given\n labels that are in the dataset are dropped and no error is raised.\n\n Returns\n -------\n obj : Dataset\n The dataset without the given dimensions (or any variables\n containing those dimensions)\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the dimensions passed are not\n in the dataset. If 'ignore', any given dimensions that are in the\n dataset are dropped and no error is raised.\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable):\n drop_dims = {drop_dims}\n else:\n drop_dims = set(drop_dims)\n\n if errors == \"raise\":\n missing_dims = drop_dims - set(self.dims)\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims}\n return self._drop_vars(drop_vars)\n\n def transpose(self, *dims: Hashable) -> \"Dataset\":\n \"\"\"Return a new Dataset object with all array dimensions transposed.\n\n Although the order of dimensions on each array will change, the dataset\n dimensions themselves will remain in fixed (sorted) order.\n\n Parameters\n ----------\n *dims : Hashable, optional\n By default, reverse the dimensions on each array. Otherwise,\n reorder the dimensions to this order.\n\n Returns\n -------\n transposed : Dataset\n Each array in the dataset (including) coordinates will be\n transposed to the given order.\n\n Notes\n -----\n This operation returns a view of each array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded into memory.\n\n See Also\n --------\n numpy.transpose\n DataArray.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims):\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted dataset dimensions (%s)\" % (dims, tuple(self.dims))\n )\n ds = self.copy()\n for name, var in self._variables.items():\n var_dims = tuple(dim for dim in dims if dim in var.dims)\n ds._variables[name] = var.transpose(*var_dims)\n return ds\n\n def dropna(\n self,\n dim: Hashable,\n how: str = \"any\",\n thresh: int = None,\n subset: Iterable[Hashable] = None,\n ):\n \"\"\"Returns a new dataset with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : Hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {'any', 'all'}, optional\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n If supplied, require this many non-NA values.\n subset : iterable of hashable, optional\n Which variables to check for missing values. By default, all\n variables in the dataset are checked.\n\n Returns\n -------\n Dataset\n \"\"\"\n # TODO: consider supporting multiple dimensions? Or not, given that\n # there are some ugly edge cases, e.g., pandas's dropna differs\n # depending on the order of the supplied axes.\n\n if dim not in self.dims:\n raise ValueError(\"%s must be a single dataset dimension\" % dim)\n\n if subset is None:\n subset = iter(self.data_vars)\n\n count = np.zeros(self.dims[dim], dtype=np.int64)\n size = 0\n\n for k in subset:\n array = self._variables[k]\n if dim in array.dims:\n dims = [d for d in array.dims if d != dim]\n count += np.asarray(array.count(dims)) # type: ignore\n size += np.prod([self.dims[d] for d in dims])\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == size\n elif how == \"all\":\n mask = count > 0\n elif how is not None:\n raise ValueError(\"invalid how option: %s\" % how)\n else:\n raise TypeError(\"must specify how or thresh\")\n\n return self.isel({dim: mask})\n\n def fillna(self, value: Any) -> \"Dataset\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray, DataArray, dict or Dataset\n Used to fill all matching missing values in this dataset's data\n variables. Scalars, ndarrays or DataArrays arguments are used to\n fill all data with aligned coordinates (for DataArrays).\n Dictionaries or datasets match data variables and then align\n coordinates if necessary.\n\n Returns\n -------\n Dataset\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, 4])\n ... },\n ... coords={\"x\": [0, 1, 2, 3]})\n >>> ds\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 nan 2.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0\n C (x) float64 nan nan nan 5.0\n D (x) float64 nan 3.0 nan 4.0\n\n Replace all `NaN` values with 0s.\n\n >>> ds.fillna(0)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 0.0 1.0\n C (x) float64 0.0 0.0 0.0 5.0\n D (x) float64 0.0 3.0 0.0 4.0\n\n Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> ds.fillna(value=values)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 1.0 1.0\n C (x) float64 2.0 2.0 2.0 5.0\n D (x) float64 3.0 3.0 3.0 4.0\n \"\"\"\n if utils.is_dict_like(value):\n value_keys = getattr(value, \"data_vars\", value).keys()\n if not set(value_keys) <= set(self.data_vars.keys()):\n raise ValueError(\n \"all variables in the argument to `fillna` \"\n \"must be contained in the original dataset\"\n )\n out = ops.fillna(self, value)\n return out\n\n def interpolate_na(\n self,\n dim: Hashable = None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, Hashable] = True,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline': use their respective\n ``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n kwargs : any\n parameters passed verbatim to the underlying interplation function\n\n Returns\n -------\n Dataset\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n \"\"\"\n from .missing import interp_na, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(\n interp_na,\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n **kwargs,\n )\n return new\n\n def ffill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import ffill, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)\n return new\n\n def bfill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import bfill, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)\n return new\n\n def combine_first(self, other: \"Dataset\") -> \"Dataset\":\n \"\"\"Combine two Datasets, default to data_vars of self.\n\n The new coordinates follow the normal broadcasting and alignment rules\n of ``join='outer'``. Vacant cells in the expanded coordinates are\n filled with np.nan.\n\n Parameters\n ----------\n other : Dataset\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n out = ops.fillna(self, other, join=\"outer\", dataset_join=\"outer\")\n return out\n\n def reduce(\n self,\n func: Callable,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n numeric_only: bool = False,\n allow_lazy: bool = False,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Reduce this dataset by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`. By default `func` is\n applied over all dimensions.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n **kwargs : Any\n Additional keyword arguments passed on to ``func``.\n\n Returns\n -------\n reduced : Dataset\n Dataset with this object's DataArrays replaced with new DataArrays\n of summarized data and the indicated dimension(s) removed.\n \"\"\"\n if dim is None or dim is ALL_DIMS:\n dims = set(self.dims)\n elif isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = {dim}\n else:\n dims = set(dim)\n\n missing_dimensions = [d for d in dims if d not in self.dims]\n if missing_dimensions:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dimensions\n )\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in self._variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if name in self.coords:\n if not reduce_dims:\n variables[name] = var\n else:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or (var.dtype == np.bool_)\n ):\n if len(reduce_dims) == 1:\n # unpack dimensions for the benefit of functions\n # like np.argmin which can't handle tuple arguments\n reduce_dims, = reduce_dims\n elif len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None # type: ignore\n variables[name] = var.reduce(\n func,\n dim=reduce_dims,\n keep_attrs=keep_attrs,\n keepdims=keepdims,\n allow_lazy=allow_lazy,\n **kwargs,\n )\n\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n attrs = self.attrs if keep_attrs else None\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n\n def apply(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Apply a function over the data variables in this dataset.\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form `func(x, *args, **kwargs)`\n to transform each DataArray `x` in this dataset into another\n DataArray.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False, the new object will\n be returned without attributes.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs : Any\n Keyword arguments passed on to `func`.\n\n Returns\n -------\n applied : Dataset\n Resulting dataset from applying ``func`` over each data variable.\n\n Examples\n --------\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})\n >>> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948\n bar (x) int64 -1 2\n >>> ds.apply(np.fabs)\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948\n bar (x) float64 1.0 2.0\n \"\"\"\n variables = {\n k: maybe_wrap_array(v, func(v, *args, **kwargs))\n for k, v in self.data_vars.items()\n }\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return type(self)(variables, attrs=attrs)\n\n def assign(\n self, variables: Mapping[Hashable, Any] = None, **variables_kwargs: Hashable\n ) -> \"Dataset\":\n \"\"\"Assign new data variables to a Dataset, returning a new object\n with all the original variables in addition to the new ones.\n\n Parameters\n ----------\n variables : mapping, value pairs\n Mapping from variables names to the new values. If the new values\n are callable, they are computed on the Dataset and assigned to new\n data variables. If the values are not callable, (e.g. a DataArray,\n scalar, or array), they are simply assigned.\n **variables_kwargs:\n The keyword arguments form of ``variables``.\n One of variables or variables_kwargs must be provided.\n\n Returns\n -------\n ds : Dataset\n A new Dataset with the new variables in addition to all the\n existing variables.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your arguments may not\n be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign`` is\n possible, but you cannot reference other variables created within the\n same ``assign`` call.\n\n See Also\n --------\n pandas.DataFrame.assign\n\n Examples\n --------\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n\n Where the value is a callable, evaluated on dataset:\n\n >>> x.assign(temperature_f = lambda x: x.temperature_c * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:\n\n >>> x.assign(temperature_f=x[\"temperature_c\"] * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n \"\"\"\n variables = either_dict_or_kwargs(variables, variables_kwargs, \"assign\")\n data = self.copy()\n # do all calculations first...\n results = data._calc_assign_results(variables)\n # ... and then assign\n data.update(results)\n return data\n\n def to_array(self, dim=\"variable\", name=None):\n \"\"\"Convert this dataset into an xarray.DataArray\n\n The data variables of this dataset will be broadcast against each other\n and stacked along the first axis of the new array. All coordinates of\n this dataset will remain coordinates.\n\n Parameters\n ----------\n dim : str, optional\n Name of the new dimension.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n array : xarray.DataArray\n \"\"\"\n from .dataarray import DataArray\n\n data_vars = [self.variables[k] for k in self.data_vars]\n broadcast_vars = broadcast_variables(*data_vars)\n data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)\n\n coords = dict(self.coords)\n coords[dim] = list(self.data_vars)\n\n dims = (dim,) + broadcast_vars[0].dims\n\n return DataArray(data, coords, dims, attrs=self.attrs, name=name)\n\n def _to_dataframe(self, ordered_dims):\n columns = [k for k in self.variables if k not in self.dims]\n data = [\n self._variables[k].set_dims(ordered_dims).values.reshape(-1)\n for k in columns\n ]\n index = self.coords.to_index(ordered_dims)\n return pd.DataFrame(dict(zip(columns, data)), index=index)\n\n def to_dataframe(self):\n \"\"\"Convert this dataset into a pandas.DataFrame.\n\n Non-index variables in this dataset form the columns of the\n DataFrame. The DataFrame is be indexed by the Cartesian product of\n this dataset's indices.\n \"\"\"\n return self._to_dataframe(self.dims)\n\n def _set_sparse_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None:\n from sparse import COO\n\n idx = dataframe.index\n if isinstance(idx, pd.MultiIndex):\n try:\n codes = idx.codes\n except AttributeError:\n # deprecated since pandas 0.24\n codes = idx.labels\n coords = np.stack([np.asarray(code) for code in codes], axis=0)\n is_sorted = idx.is_lexsorted\n else:\n coords = np.arange(idx.size).reshape(1, -1)\n is_sorted = True\n\n for name, series in dataframe.items():\n # Cast to a NumPy array first, in case the Series is a pandas\n # Extension array (which doesn't have a valid NumPy dtype)\n values = np.asarray(series)\n\n # In virtually all real use cases, the sparse array will now have\n # missing values and needs a fill_value. For consistency, don't\n # special case the rare exceptions (e.g., dtype=int without a\n # MultiIndex).\n dtype, fill_value = dtypes.maybe_promote(values.dtype)\n values = np.asarray(values, dtype=dtype)\n\n data = COO(\n coords,\n values,\n shape,\n has_duplicates=False,\n sorted=is_sorted,\n fill_value=fill_value,\n )\n self[name] = (dims, data)\n\n def _set_numpy_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None:\n idx = dataframe.index\n if isinstance(idx, pd.MultiIndex):\n # expand the DataFrame to include the product of all levels\n full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)\n dataframe = dataframe.reindex(full_idx)\n\n for name, series in dataframe.items():\n data = np.asarray(series).reshape(shape)\n self[name] = (dims, data)\n\n @classmethod\n def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> \"Dataset\":\n \"\"\"Convert a pandas.DataFrame into an xarray.Dataset\n\n Each column will be converted into an independent variable in the\n Dataset. If the dataframe's index is a MultiIndex, it will be expanded\n into a tensor product of one-dimensional indices (filling in missing\n values with NaN). This method will produce a Dataset very similar to\n that on which the 'to_dataframe' method was called, except with\n possibly redundant dimensions (since all dataset variables will have\n the same dimensionality)\n\n Parameters\n ----------\n dataframe : pandas.DataFrame\n DataFrame from which to copy data and indices.\n sparse : bool\n If true, create a sparse arrays instead of dense numpy arrays. This\n can potentially save a large amount of memory if the DataFrame has\n a MultiIndex. Requires the sparse package (sparse.pydata.org).\n\n Returns\n -------\n New Dataset.\n\n See also\n --------\n xarray.DataArray.from_series\n \"\"\"\n # TODO: Add an option to remove dimensions along which the variables\n # are constant, to enable consistent serialization to/from a dataframe,\n # even if some variables have different dimensionality.\n\n if not dataframe.columns.is_unique:\n raise ValueError(\"cannot convert DataFrame with non-unique columns\")\n\n idx = dataframe.index\n obj = cls()\n\n if isinstance(idx, pd.MultiIndex):\n dims = tuple(\n name if name is not None else \"level_%i\" % n\n for n, name in enumerate(idx.names)\n )\n for dim, lev in zip(dims, idx.levels):\n obj[dim] = (dim, lev)\n shape = tuple(lev.size for lev in idx.levels)\n else:\n index_name = idx.name if idx.name is not None else \"index\"\n dims = (index_name,)\n obj[index_name] = (dims, idx)\n shape = (idx.size,)\n\n if sparse:\n obj._set_sparse_data_from_dataframe(dataframe, dims, shape)\n else:\n obj._set_numpy_data_from_dataframe(dataframe, dims, shape)\n return obj\n\n def to_dask_dataframe(self, dim_order=None, set_index=False):\n \"\"\"\n Convert this dataset into a dask.dataframe.DataFrame.\n\n The dimensions, coordinates and data variables in this dataset form\n the columns of the DataFrame.\n\n Parameters\n ----------\n dim_order : list, optional\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting dask\n dataframe.\n\n If provided, must include all dimensions on this dataset. By\n default, dimensions are sorted alphabetically.\n set_index : bool, optional\n If set_index=True, the dask DataFrame is indexed by this dataset's\n coordinate. Since dask DataFrames to not support multi-indexes,\n set_index only works if the dataset only contains one dimension.\n\n Returns\n -------\n dask.dataframe.DataFrame\n \"\"\"\n\n import dask.array as da\n import dask.dataframe as dd\n\n if dim_order is None:\n dim_order = list(self.dims)\n elif set(dim_order) != set(self.dims):\n raise ValueError(\n \"dim_order {} does not match the set of dimensions on this \"\n \"Dataset: {}\".format(dim_order, list(self.dims))\n )\n\n ordered_dims = {k: self.dims[k] for k in dim_order}\n\n columns = list(ordered_dims)\n columns.extend(k for k in self.coords if k not in self.dims)\n columns.extend(self.data_vars)\n\n series_list = []\n for name in columns:\n try:\n var = self.variables[name]\n except KeyError:\n # dimension without a matching coordinate\n size = self.dims[name]\n data = da.arange(size, chunks=size, dtype=np.int64)\n var = Variable((name,), data)\n\n # IndexVariable objects have a dummy .chunk() method\n if isinstance(var, IndexVariable):\n var = var.to_base_variable()\n\n dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data\n series = dd.from_array(dask_array.reshape(-1), columns=[name])\n series_list.append(series)\n\n df = dd.concat(series_list, axis=1)\n\n if set_index:\n if len(dim_order) == 1:\n (dim,) = dim_order\n df = df.set_index(dim)\n else:\n # triggers an error about multi-indexes, even if only one\n # dimension is passed\n df = df.set_index(dim_order)\n\n return df\n\n def to_dict(self, data=True):\n \"\"\"\n Convert this dataset to a dictionary following xarray naming\n conventions.\n\n Converts all variables and attributes to native Python objects\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n Dataset.from_dict\n \"\"\"\n d = {\n \"coords\": {},\n \"attrs\": decode_numpy_dict_values(self.attrs),\n \"dims\": dict(self.dims),\n \"data_vars\": {},\n }\n for k in self.coords:\n d[\"coords\"].update({k: self[k].variable.to_dict(data=data)})\n for k in self.data_vars:\n d[\"data_vars\"].update({k: self[k].variable.to_dict(data=data)})\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Convert a dictionary into an xarray.Dataset.\n\n Input dict can take several forms::\n\n d = {'t': {'dims': ('t'), 'data': t},\n 'a': {'dims': ('t'), 'data': x},\n 'b': {'dims': ('t'), 'data': y}}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data_vars': {'a': {'dims': 't', 'data': x, },\n 'b': {'dims': 't', 'data': y}}}\n\n where 't' is the name of the dimesion, 'a' and 'b' are names of data\n variables and t, x, and y are lists, numpy.arrays or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'var_0': {'dims': [..], \\\n 'data': [..]}, \\\n ...}\n\n Returns\n -------\n obj : xarray.Dataset\n\n See also\n --------\n Dataset.to_dict\n DataArray.from_dict\n \"\"\"\n\n if not {\"coords\", \"data_vars\"}.issubset(set(d)):\n variables = d.items()\n else:\n import itertools\n\n variables = itertools.chain(\n d.get(\"coords\", {}).items(), d.get(\"data_vars\", {}).items()\n )\n try:\n variable_dict = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\")) for k, v in variables\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict without the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n obj = cls(variable_dict)\n\n # what if coords aren't dims?\n coords = set(d.get(\"coords\", {})) - set(d.get(\"dims\", {}))\n obj = obj.set_coords(coords)\n\n obj.attrs.update(d.get(\"attrs\", {}))\n\n return obj\n\n @staticmethod\n def _unary_op(f, keep_attrs=False):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n variables = {}\n for k, v in self._variables.items():\n if k in self._coord_names:\n variables[k] = v\n else:\n variables[k] = f(v, *args, **kwargs)\n attrs = self._attrs if keep_attrs else None\n return self._replace_with_new_dims(variables, attrs=attrs)\n\n return func\n\n @staticmethod\n def _binary_op(f, reflexive=False, join=None):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n return NotImplemented\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n if isinstance(other, (DataArray, Dataset)):\n self, other = align(self, other, join=align_type, copy=False)\n g = f if not reflexive else lambda x, y: f(y, x)\n ds = self._calculate_binary_op(g, other, join=align_type)\n return ds\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a Dataset and \"\n \"a grouped object are not permitted\"\n )\n # we don't actually modify arrays in-place with in-place Dataset\n # arithmetic -- this lets us automatically align things\n if isinstance(other, (DataArray, Dataset)):\n other = other.reindex_like(self, copy=False)\n g = ops.inplace_to_noninplace_op(f)\n ds = self._calculate_binary_op(g, other, inplace=True)\n self._replace_with_new_dims(\n ds._variables,\n ds._coord_names,\n attrs=ds._attrs,\n indexes=ds._indexes,\n inplace=True,\n )\n return self\n\n return func\n\n def _calculate_binary_op(self, f, other, join=\"inner\", inplace=False):\n def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):\n if inplace and set(lhs_data_vars) != set(rhs_data_vars):\n raise ValueError(\n \"datasets must have the same data variables \"\n \"for in-place arithmetic operations: %s, %s\"\n % (list(lhs_data_vars), list(rhs_data_vars))\n )\n\n dest_vars = {}\n\n for k in lhs_data_vars:\n if k in rhs_data_vars:\n dest_vars[k] = f(lhs_vars[k], rhs_vars[k])\n elif join in [\"left\", \"outer\"]:\n dest_vars[k] = f(lhs_vars[k], np.nan)\n for k in rhs_data_vars:\n if k not in dest_vars and join in [\"right\", \"outer\"]:\n dest_vars[k] = f(rhs_vars[k], np.nan)\n return dest_vars\n\n if utils.is_dict_like(other) and not isinstance(other, Dataset):\n # can't use our shortcut of doing the binary operation with\n # Variable objects, so apply over our data vars instead.\n new_data_vars = apply_over_both(\n self.data_vars, other, self.data_vars, other\n )\n return Dataset(new_data_vars)\n\n other_coords = getattr(other, \"coords\", None)\n ds = self.coords.merge(other_coords)\n\n if isinstance(other, Dataset):\n new_vars = apply_over_both(\n self.data_vars, other.data_vars, self.variables, other.variables\n )\n else:\n other_variable = getattr(other, \"variable\", other)\n new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars}\n ds._variables.update(new_vars)\n ds._dims = calculate_dimensions(ds._variables)\n return ds\n\n def _copy_attrs_from(self, other):\n self.attrs = other.attrs\n for v in other.variables:\n if v in self.variables:\n self.variables[v].attrs = other.variables[v].attrs\n\n def diff(self, dim, n=1, label=\"upper\"):\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : str, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> ds = xr.Dataset({'foo': ('x', [5, 5, 6, 6])})\n >>> ds.diff('x')\n \n Dimensions: (x: 3)\n Coordinates:\n * x (x) int64 1 2 3\n Data variables:\n foo (x) int64 0 1 0\n >>> ds.diff('x', 2)\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) int64 2 3\n Data variables:\n foo (x) int64 1 -1\n\n See Also\n --------\n Dataset.differentiate\n \"\"\"\n if n == 0:\n return self\n if n < 0:\n raise ValueError(\"order `n` must be non-negative but got {}\".format(n))\n\n # prepare slices\n kwargs_start = {dim: slice(None, -1)}\n kwargs_end = {dim: slice(1, None)}\n\n # prepare new coordinate\n if label == \"upper\":\n kwargs_new = kwargs_end\n elif label == \"lower\":\n kwargs_new = kwargs_start\n else:\n raise ValueError(\n \"The 'label' argument has to be either \" \"'upper' or 'lower'\"\n )\n\n variables = {}\n\n for name, var in self.variables.items():\n if dim in var.dims:\n if name in self.data_vars:\n variables[name] = var.isel(**kwargs_end) - var.isel(**kwargs_start)\n else:\n variables[name] = var.isel(**kwargs_new)\n else:\n variables[name] = var\n\n indexes = dict(self.indexes)\n if dim in indexes:\n indexes[dim] = indexes[dim][kwargs_new[dim]]\n\n difference = self._replace_with_new_dims(variables, indexes=indexes)\n\n if n > 1:\n return difference.diff(dim, n - 1)\n else:\n return difference\n\n def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):\n \"\"\"Shift this dataset by an offset along one or more dimensions.\n\n Only data variables are moved; coordinates stay in place. This is\n consistent with the behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Dataset\n Dataset with the same coordinates and attributes but shifted data\n variables.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.shift(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n foo (x) object nan nan 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"shift\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n var_shifts = {k: v for k, v in shifts.items() if k in var.dims}\n variables[name] = var.shift(fill_value=fill_value, shifts=var_shifts)\n else:\n variables[name] = var\n\n return self._replace(variables)\n\n def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):\n \"\"\"Roll this dataset by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n\n shifts : dict, optional\n A dict with keys matching dimensions and values given\n by integers to rotate each of the given dimensions. Positive\n offsets roll to the right; negative offsets roll to the left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : {dim: offset, ...}, optional\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n Returns\n -------\n rolled : Dataset\n Dataset with the same coordinates and attributes but rolled\n variables.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.roll(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 3 4 0 1 2\n Data variables:\n foo (x) object 'd' 'e' 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"roll\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n if roll_coords is None:\n warnings.warn(\n \"roll_coords will be set to False in the future.\"\n \" Explicitly set roll_coords to silence warning.\",\n FutureWarning,\n stacklevel=2,\n )\n roll_coords = True\n\n unrolled_vars = () if roll_coords else self.coords\n\n variables = {}\n for k, v in self.variables.items():\n if k not in unrolled_vars:\n variables[k] = v.roll(\n **{k: s for k, s in shifts.items() if k in v.dims}\n )\n else:\n variables[k] = v\n\n if roll_coords:\n indexes = {}\n for k, v in self.indexes.items():\n (dim,) = self.variables[k].dims\n if dim in shifts:\n indexes[k] = roll_index(v, shifts[dim])\n else:\n indexes = dict(self.indexes)\n\n return self._replace(variables, indexes=indexes)\n\n def sortby(self, variables, ascending=True):\n \"\"\"\n Sort object by labels or values (along an axis).\n\n Sorts the dataset, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: str, DataArray, or list of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords/data_vars whose values are used to sort the dataset.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: Dataset\n A new dataset where all the specified dims are sorted by dim\n labels.\n \"\"\"\n from .dataarray import DataArray\n\n if not isinstance(variables, list):\n variables = [variables]\n else:\n variables = variables\n variables = [v if isinstance(v, DataArray) else self[v] for v in variables]\n aligned_vars = align(self, *variables, join=\"left\")\n aligned_self = aligned_vars[0]\n aligned_other_vars = aligned_vars[1:]\n vars_by_dim = defaultdict(list)\n for data_array in aligned_other_vars:\n if data_array.ndim != 1:\n raise ValueError(\"Input DataArray is not 1-D.\")\n (key,) = data_array.dims\n vars_by_dim[key].append(data_array)\n\n indices = {}\n for key, arrays in vars_by_dim.items():\n order = np.lexsort(tuple(reversed(arrays)))\n indices[key] = order if ascending else order[::-1]\n return aligned_self.isel(**indices)\n\n def quantile(\n self, q, dim=None, interpolation=\"linear\", numeric_only=False, keep_attrs=None\n ):\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements for each variable\n in the Dataset.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n\n Returns\n -------\n quantiles : Dataset\n If `q` is a single quantile, then the result is a scalar for each\n variable in data_vars. If multiple percentiles are given, first\n axis of the result corresponds to the quantile and a quantile\n dimension is added to the return Dataset. The other dimensions are\n the dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile\n \"\"\"\n\n if isinstance(dim, str):\n dims = {dim}\n elif dim is None or dim is ALL_DIMS:\n dims = set(self.dims)\n else:\n dims = set(dim)\n\n _assert_empty(\n [d for d in dims if d not in self.dims],\n \"Dataset does not contain the dimensions: %s\",\n )\n\n q = np.asarray(q, dtype=np.float64)\n\n variables = {}\n for name, var in self.variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if reduce_dims or not var.dims:\n if name not in self.coords:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or var.dtype == np.bool_\n ):\n if len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None\n variables[name] = var.quantile(\n q,\n dim=reduce_dims,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n )\n\n else:\n variables[name] = var\n\n # construct the new dataset\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n new = self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n if \"quantile\" in new.dims:\n new.coords[\"quantile\"] = Variable(\"quantile\", q)\n else:\n new.coords[\"quantile\"] = q\n return new\n\n def rank(self, dim, pct=False, keep_attrs=None):\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within\n that set.\n Ranks begin at 1, not 0. If pct is True, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : Dataset\n Variables that do not depend on `dim` are dropped.\n \"\"\"\n if dim not in self.dims:\n raise ValueError(\"Dataset does not contain the dimension: %s\" % dim)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n if dim in var.dims:\n variables[name] = var.rank(dim, pct=pct)\n else:\n variables[name] = var\n\n coord_names = set(self.coords)\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return self._replace(variables, coord_names, attrs=attrs)\n\n def differentiate(self, coord, edge_order=1, datetime_unit=None):\n \"\"\" Differentiate with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: str\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: Dataset\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n \"\"\"\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(\"Coordinate {} does not exist.\".format(coord))\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = coord_var._to_numeric(datetime_unit=datetime_unit)\n\n variables = {}\n for k, v in self.variables.items():\n if k in self.data_vars and dim in v.dims and k not in self.coords:\n if _contains_datetime_like_objects(v):\n v = v._to_numeric(datetime_unit=datetime_unit)\n grad = duck_array_ops.gradient(\n v.data, coord_var, edge_order=edge_order, axis=v.get_axis_num(dim)\n )\n variables[k] = Variable(v.dims, grad)\n else:\n variables[k] = v\n return self._replace(variables)\n\n def integrate(self, coord, datetime_unit=None):\n \"\"\" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: str, or a sequence of str\n Coordinate(s) used for the integration.\n datetime_unit\n Can be specify the unit if datetime coordinate is used. One of\n {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs',\n 'as'}\n\n Returns\n -------\n integrated: Dataset\n\n See also\n --------\n DataArray.integrate\n numpy.trapz: corresponding numpy function\n \"\"\"\n if not isinstance(coord, (list, tuple)):\n coord = (coord,)\n result = self\n for c in coord:\n result = result._integrate_one(c, datetime_unit=datetime_unit)\n return result\n\n def _integrate_one(self, coord, datetime_unit=None):\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(\"Coordinate {} does not exist.\".format(coord))\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = datetime_to_numeric(coord_var, datetime_unit=datetime_unit)\n\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n if k in self.coords:\n if dim not in v.dims:\n variables[k] = v\n coord_names.add(k)\n else:\n if k in self.data_vars and dim in v.dims:\n if _contains_datetime_like_objects(v):\n v = datetime_to_numeric(v, datetime_unit=datetime_unit)\n integ = duck_array_ops.trapz(\n v.data, coord_var.data, axis=v.get_axis_num(dim)\n )\n v_dims = list(v.dims)\n v_dims.remove(dim)\n variables[k] = Variable(v_dims, integ)\n else:\n variables[k] = v\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n @property\n def real(self):\n return self._unary_op(lambda x: x.real, keep_attrs=True)(self)\n\n @property\n def imag(self):\n return self._unary_op(lambda x: x.imag, keep_attrs=True)(self)\n\n @property\n def plot(self):\n \"\"\"\n Access plotting functions. Use it as a namespace to use\n xarray.plot functions as Dataset methods\n\n >>> ds.plot.scatter(...) # equivalent to xarray.plot.scatter(ds,...)\n\n \"\"\"\n return _Dataset_PlotMethods(self)\n\n def filter_by_attrs(self, **kwargs):\n \"\"\"Returns a ``Dataset`` with variables that match specific conditions.\n\n Can pass in ``key=value`` or ``key=callable``. A Dataset is returned\n containing only the variables for which all the filter tests pass.\n These tests are either ``key=value`` for which the attribute ``key``\n has the exact value ``value`` or the callable passed into\n ``key=callable`` returns True. The callable will be passed a single\n value, either the value of the attribute ``key`` or ``None`` if the\n DataArray does not have an attribute with the name ``key``.\n\n Parameters\n ----------\n **kwargs : key=value\n key : str\n Attribute name.\n value : callable or obj\n If value is a callable, it should return a boolean in the form\n of bool = func(attr) where attr is da.attrs[key].\n Otherwise, value will be compared to the each\n DataArray's attrs[key].\n\n Returns\n -------\n new : Dataset\n New dataset with variables filtered by attribute.\n\n Examples\n --------\n >>> # Create an example dataset:\n >>> import numpy as np\n >>> import pandas as pd\n >>> import xarray as xr\n >>> temp = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precip = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> dims = ['x', 'y', 'time']\n >>> temp_attr = dict(standard_name='air_potential_temperature')\n >>> precip_attr = dict(standard_name='convective_precipitation_flux')\n >>> ds = xr.Dataset({\n ... 'temperature': (dims, temp, temp_attr),\n ... 'precipitation': (dims, precip, precip_attr)},\n ... coords={\n ... 'lon': (['x', 'y'], lon),\n ... 'lat': (['x', 'y'], lat),\n ... 'time': pd.date_range('2014-09-06', periods=3),\n ... 'reference_time': pd.Timestamp('2014-09-05')})\n >>> # Get variables matching a specific standard_name.\n >>> ds.filter_by_attrs(standard_name='convective_precipitation_flux')\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n * x (x) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * y (y) int64 0 1\n reference_time datetime64[ns] 2014-09-05\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n Data variables:\n precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ...\n >>> # Get all variables that have a standard_name attribute.\n >>> standard_name = lambda v: v is not None\n >>> ds.filter_by_attrs(standard_name=standard_name)\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * x (x) int64 0 1\n * y (y) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Data variables:\n temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...\n precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...\n\n \"\"\"\n selection = []\n for var_name, variable in self.variables.items():\n has_value_flag = False\n for attr_name, pattern in kwargs.items():\n attr_value = variable.attrs.get(attr_name)\n if (callable(pattern) and pattern(attr_value)) or attr_value == pattern:\n has_value_flag = True\n else:\n has_value_flag = False\n break\n if has_value_flag is True:\n selection.append(var_name)\n return self[selection]\n\n def unify_chunks(self) -> \"Dataset\":\n \"\"\" Unify chunk size along all chunked dimensions of this Dataset.\n\n Returns\n -------\n\n Dataset with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n \"\"\"\n\n try:\n self.chunks\n except ValueError: # \"inconsistent chunks\"\n pass\n else:\n # No variables with dask backend, or all chunks are already aligned\n return self.copy()\n\n # import dask is placed after the quick exit test above to allow\n # running this method if dask isn't installed and there are no chunks\n import dask.array\n\n ds = self.copy()\n\n dims_pos_map = {dim: index for index, dim in enumerate(ds.dims)}\n\n dask_array_names = []\n dask_unify_args = []\n for name, variable in ds.variables.items():\n if isinstance(variable.data, dask.array.Array):\n dims_tuple = [dims_pos_map[dim] for dim in variable.dims]\n dask_array_names.append(name)\n dask_unify_args.append(variable.data)\n dask_unify_args.append(dims_tuple)\n\n _, rechunked_arrays = dask.array.core.unify_chunks(*dask_unify_args)\n\n for name, new_array in zip(dask_array_names, rechunked_arrays):\n ds.variables[name]._data = new_array\n\n return ds\n\n def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each chunk of this Dataset. This method is experimental and\n its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a Dataset as its first parameter. The\n function will receive a subset of this Dataset, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this\n Dataset but has sizes 0, to determine properties of the returned object such\n as dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this Dataset is backed by dask, calling this method\n is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.DataArray.map_blocks\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs)"},{"col":4,"comment":"Shortcut around __init__ for internal use when we want to skip\n costly validation\n ","endLoc":856,"header":"@classmethod\n def _construct_direct(\n cls,\n variables,\n coord_names,\n dims=None,\n attrs=None,\n indexes=None,\n encoding=None,\n file_obj=None,\n )","id":551,"name":"_construct_direct","nodeType":"Function","startLoc":831,"text":"@classmethod\n def _construct_direct(\n cls,\n variables,\n coord_names,\n dims=None,\n attrs=None,\n indexes=None,\n encoding=None,\n file_obj=None,\n ):\n \"\"\"Shortcut around __init__ for internal use when we want to skip\n costly validation\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n obj = object.__new__(cls)\n obj._variables = variables\n obj._coord_names = coord_names\n obj._dims = dims\n obj._indexes = indexes\n obj._attrs = attrs\n obj._file_obj = file_obj\n obj._encoding = encoding\n obj._accessors = None\n return obj"},{"attributeType":"null","col":4,"comment":"null","endLoc":34,"id":552,"name":"__slots__","nodeType":"Attribute","startLoc":34,"text":"__slots__"},{"className":"ImplementsDatasetReduce","col":0,"comment":"null","endLoc":110,"id":553,"nodeType":"Class","startLoc":76,"text":"class ImplementsDatasetReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, skipna=None, **kwargs):\n return self.reduce(\n func,\n dim,\n skipna=skipna,\n numeric_only=numeric_only,\n allow_lazy=True,\n **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, **kwargs): # type: ignore\n return self.reduce(\n func, dim, numeric_only=numeric_only, allow_lazy=True, **kwargs\n )\n\n return wrapped_func\n\n _reduce_extra_args_docstring = \"\"\"dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`. By default `{name}` is\n applied over all dimensions.\"\"\"\n\n _cum_extra_args_docstring = \"\"\"dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\"\"\""},{"col":4,"comment":"null","endLoc":100,"header":"@classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool)","id":554,"name":"_reduce_method","nodeType":"Function","startLoc":79,"text":"@classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, skipna=None, **kwargs):\n return self.reduce(\n func,\n dim,\n skipna=skipna,\n numeric_only=numeric_only,\n allow_lazy=True,\n **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, **kwargs): # type: ignore\n return self.reduce(\n func, dim, numeric_only=numeric_only, allow_lazy=True, **kwargs\n )\n\n return wrapped_func"},{"col":4,"comment":"null","endLoc":37,"header":"def __init__(self, array)","id":555,"name":"__init__","nodeType":"Function","startLoc":36,"text":"def __init__(self, array):\n self.array = indexing.as_indexable(array)"},{"className":"MergeError","col":0,"comment":"Error class for merge failures due to incompatible arguments.\n ","endLoc":77,"id":556,"nodeType":"Class","startLoc":72,"text":"class MergeError(ValueError):\n \"\"\"Error class for merge failures due to incompatible arguments.\n \"\"\"\n\n # inherits from ValueError for backward compatibility\n # TODO: move this to an xarray.exceptions module?"},{"className":"_MergeResult","col":0,"comment":"null","endLoc":487,"id":557,"nodeType":"Class","startLoc":483,"text":"class _MergeResult(NamedTuple):\n variables: Dict[Hashable, Variable]\n coord_names: Set[Hashable]\n dims: Dict[Hashable, int]\n indexes: Dict[Hashable, pd.Index]"},{"col":4,"comment":"Fastpath constructor for internal use.\n\n Returns an object with optionally with replaced attributes.\n\n Explicitly passed arguments are *not* copied when placed on the new\n dataset. It is up to the caller to ensure that they have the right type\n and are not used elsewhere.\n ","endLoc":915,"header":"def _replace( # type: ignore\n self,\n variables: Dict[Hashable, Variable] = None,\n coord_names: Set[Hashable] = None,\n dims: Dict[Any, int] = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Optional[Dict[Any, pd.Index]] = __default,\n encoding: Optional[dict] = __default,\n inplace: bool = False,\n ) -> \"Dataset\"","id":558,"name":"_replace","nodeType":"Function","startLoc":867,"text":"def _replace( # type: ignore\n self,\n variables: Dict[Hashable, Variable] = None,\n coord_names: Set[Hashable] = None,\n dims: Dict[Any, int] = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Optional[Dict[Any, pd.Index]] = __default,\n encoding: Optional[dict] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Fastpath constructor for internal use.\n\n Returns an object with optionally with replaced attributes.\n\n Explicitly passed arguments are *not* copied when placed on the new\n dataset. It is up to the caller to ensure that they have the right type\n and are not used elsewhere.\n \"\"\"\n if inplace:\n if variables is not None:\n self._variables = variables\n if coord_names is not None:\n self._coord_names = coord_names\n if dims is not None:\n self._dims = dims\n if attrs is not self.__default:\n self._attrs = attrs\n if indexes is not self.__default:\n self._indexes = indexes\n if encoding is not self.__default:\n self._encoding = encoding\n obj = self\n else:\n if variables is None:\n variables = self._variables.copy()\n if coord_names is None:\n coord_names = self._coord_names.copy()\n if dims is None:\n dims = self._dims.copy()\n if attrs is self.__default:\n attrs = copy.copy(self._attrs)\n if indexes is self.__default:\n indexes = copy.copy(self._indexes)\n if encoding is self.__default:\n encoding = copy.copy(self._encoding)\n obj = self._construct_direct(\n variables, coord_names, dims, attrs, indexes, encoding\n )\n return obj"},{"attributeType":"null","col":4,"comment":"null","endLoc":484,"id":559,"name":"variables","nodeType":"Attribute","startLoc":484,"text":"variables"},{"attributeType":"null","col":4,"comment":"null","endLoc":485,"id":560,"name":"coord_names","nodeType":"Attribute","startLoc":485,"text":"coord_names"},{"attributeType":"null","col":4,"comment":"null","endLoc":486,"id":561,"name":"dims","nodeType":"Attribute","startLoc":486,"text":"dims"},{"attributeType":"null","col":4,"comment":"null","endLoc":487,"id":562,"name":"indexes","nodeType":"Attribute","startLoc":487,"text":"indexes"},{"col":4,"comment":"null","endLoc":68,"header":"def __init__(self, array)","id":563,"name":"__init__","nodeType":"Function","startLoc":67,"text":"def __init__(self, array):\n self.array = indexing.as_indexable(array)"},{"col":0,"comment":"Merge any number of xarray objects into a single Dataset as variables.\n\n Parameters\n ----------\n objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]]\n Merge together all variables from these objects. If any of them are\n DataArray objects, they must have a name.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes in objects.\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n Dataset\n Dataset with combined variables from each object.\n\n Examples\n --------\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... [[1.0, 2.0], [3.0, 5.0]],\n ... dims=(\"lat\", \"lon\"),\n ... coords={\"lat\": [35.0, 40.0], \"lon\": [100.0, 120.0]},\n ... name=\"var1\",\n ... )\n >>> y = xr.DataArray(\n ... [[5.0, 6.0], [7.0, 8.0]],\n ... dims=(\"lat\", \"lon\"),\n ... coords={\"lat\": [35.0, 42.0], \"lon\": [100.0, 150.0]},\n ... name=\"var2\",\n ... )\n >>> z = xr.DataArray(\n ... [[0.0, 3.0], [4.0, 9.0]],\n ... dims=(\"time\", \"lon\"),\n ... coords={\"time\": [30.0, 60.0], \"lon\": [100.0, 150.0]},\n ... name=\"var3\",\n ... )\n\n >>> x\n \n array([[1., 2.],\n [3., 5.]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> y\n \n array([[5., 6.],\n [7., 8.]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 150.0\n\n >>> z\n \n array([[0., 3.],\n [4., 9.]])\n Coordinates:\n * time (time) float64 30.0 60.0\n * lon (lon) float64 100.0 150.0\n\n >>> xr.merge([x, y, z])\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='identical')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='equals')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='equals', fill_value=-999.)\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0\n var2 (lat, lon) float64 5.0 -999.0 6.0 -999.0 ... -999.0 7.0 -999.0 8.0\n var3 (time, lon) float64 0.0 -999.0 3.0 4.0 -999.0 9.0\n\n >>> xr.merge([x, y, z], join='override')\n \n Dimensions: (lat: 2, lon: 2, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 3.0 5.0\n var2 (lat, lon) float64 5.0 6.0 7.0 8.0\n var3 (time, lon) float64 0.0 3.0 4.0 9.0\n\n >>> xr.merge([x, y, z], join='inner')\n \n Dimensions: (lat: 1, lon: 1, time: 2)\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0\n var2 (lat, lon) float64 5.0\n var3 (time, lon) float64 0.0 4.0\n\n >>> xr.merge([x, y, z], compat='identical', join='inner')\n \n Dimensions: (lat: 1, lon: 1, time: 2)\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0\n var2 (lat, lon) float64 5.0\n var3 (time, lon) float64 0.0 4.0\n\n >>> xr.merge([x, y, z], compat='broadcast_equals', join='outer')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], join='exact')\n Traceback (most recent call last):\n ...\n ValueError: indexes along dimension 'lat' are not equal\n\n Raises\n ------\n xarray.MergeError\n If any variables with the same name have conflicting values.\n\n See also\n --------\n concat\n ","endLoc":784,"header":"def merge(\n objects: Iterable[Union[\"DataArray\", \"CoercibleMapping\"]],\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: object = dtypes.NA,\n) -> \"Dataset\"","id":564,"name":"merge","nodeType":"Function","startLoc":566,"text":"def merge(\n objects: Iterable[Union[\"DataArray\", \"CoercibleMapping\"]],\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: object = dtypes.NA,\n) -> \"Dataset\":\n \"\"\"Merge any number of xarray objects into a single Dataset as variables.\n\n Parameters\n ----------\n objects : Iterable[Union[xarray.Dataset, xarray.DataArray, dict]]\n Merge together all variables from these objects. If any of them are\n DataArray objects, they must have a name.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes in objects.\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n Dataset\n Dataset with combined variables from each object.\n\n Examples\n --------\n >>> import xarray as xr\n >>> x = xr.DataArray(\n ... [[1.0, 2.0], [3.0, 5.0]],\n ... dims=(\"lat\", \"lon\"),\n ... coords={\"lat\": [35.0, 40.0], \"lon\": [100.0, 120.0]},\n ... name=\"var1\",\n ... )\n >>> y = xr.DataArray(\n ... [[5.0, 6.0], [7.0, 8.0]],\n ... dims=(\"lat\", \"lon\"),\n ... coords={\"lat\": [35.0, 42.0], \"lon\": [100.0, 150.0]},\n ... name=\"var2\",\n ... )\n >>> z = xr.DataArray(\n ... [[0.0, 3.0], [4.0, 9.0]],\n ... dims=(\"time\", \"lon\"),\n ... coords={\"time\": [30.0, 60.0], \"lon\": [100.0, 150.0]},\n ... name=\"var3\",\n ... )\n\n >>> x\n \n array([[1., 2.],\n [3., 5.]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> y\n \n array([[5., 6.],\n [7., 8.]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 150.0\n\n >>> z\n \n array([[0., 3.],\n [4., 9.]])\n Coordinates:\n * time (time) float64 30.0 60.0\n * lon (lon) float64 100.0 150.0\n\n >>> xr.merge([x, y, z])\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='identical')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='equals')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], compat='equals', fill_value=-999.)\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 -999.0 3.0 ... -999.0 -999.0 -999.0\n var2 (lat, lon) float64 5.0 -999.0 6.0 -999.0 ... -999.0 7.0 -999.0 8.0\n var3 (time, lon) float64 0.0 -999.0 3.0 4.0 -999.0 9.0\n\n >>> xr.merge([x, y, z], join='override')\n \n Dimensions: (lat: 2, lon: 2, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 3.0 5.0\n var2 (lat, lon) float64 5.0 6.0 7.0 8.0\n var3 (time, lon) float64 0.0 3.0 4.0 9.0\n\n >>> xr.merge([x, y, z], join='inner')\n \n Dimensions: (lat: 1, lon: 1, time: 2)\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0\n var2 (lat, lon) float64 5.0\n var3 (time, lon) float64 0.0 4.0\n\n >>> xr.merge([x, y, z], compat='identical', join='inner')\n \n Dimensions: (lat: 1, lon: 1, time: 2)\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0\n var2 (lat, lon) float64 5.0\n var3 (time, lon) float64 0.0 4.0\n\n >>> xr.merge([x, y, z], compat='broadcast_equals', join='outer')\n \n Dimensions: (lat: 3, lon: 3, time: 2)\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0 150.0\n * time (time) float64 30.0 60.0\n Data variables:\n var1 (lat, lon) float64 1.0 2.0 nan 3.0 5.0 nan nan nan nan\n var2 (lat, lon) float64 5.0 nan 6.0 nan nan nan 7.0 nan 8.0\n var3 (time, lon) float64 0.0 nan 3.0 4.0 nan 9.0\n\n >>> xr.merge([x, y, z], join='exact')\n Traceback (most recent call last):\n ...\n ValueError: indexes along dimension 'lat' are not equal\n\n Raises\n ------\n xarray.MergeError\n If any variables with the same name have conflicting values.\n\n See also\n --------\n concat\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n dict_like_objects = list()\n for obj in objects:\n if not isinstance(obj, (DataArray, Dataset, dict)):\n raise TypeError(\n \"objects must be an iterable containing only \"\n \"Dataset(s), DataArray(s), and dictionaries.\"\n )\n\n obj = obj.to_dataset() if isinstance(obj, DataArray) else obj\n dict_like_objects.append(obj)\n\n merge_result = merge_core(dict_like_objects, compat, join, fill_value=fill_value)\n merged = Dataset._construct_direct(**merge_result._asdict())\n return merged"},{"col":4,"comment":"Convert a DataArray to a Dataset.\n\n Parameters\n ----------\n dim : hashable, optional\n Name of the dimension on this array along which to split this array\n into separate variables. If not provided, this array is converted\n into a Dataset of one variable.\n name : hashable, optional\n Name to substitute for this array's name. Only valid if ``dim`` is\n not provided.\n\n Returns\n -------\n dataset : Dataset\n ","endLoc":515,"header":"def to_dataset(self, dim: Hashable = None, *, name: Hashable = None) -> Dataset","id":565,"name":"to_dataset","nodeType":"Function","startLoc":488,"text":"def to_dataset(self, dim: Hashable = None, *, name: Hashable = None) -> Dataset:\n \"\"\"Convert a DataArray to a Dataset.\n\n Parameters\n ----------\n dim : hashable, optional\n Name of the dimension on this array along which to split this array\n into separate variables. If not provided, this array is converted\n into a Dataset of one variable.\n name : hashable, optional\n Name to substitute for this array's name. Only valid if ``dim`` is\n not provided.\n\n Returns\n -------\n dataset : Dataset\n \"\"\"\n if dim is not None and dim not in self.dims:\n raise TypeError(\n \"{} is not a dim. If supplying a ``name``, pass as a kwarg.\".format(dim)\n )\n\n if dim is not None:\n if name is not None:\n raise TypeError(\"cannot supply both dim and name arguments\")\n return self._to_dataset_split(dim)\n else:\n return self._to_dataset_whole(name)"},{"col":4,"comment":"null","endLoc":460,"header":"def _to_dataset_split(self, dim: Hashable) -> Dataset","id":566,"name":"_to_dataset_split","nodeType":"Function","startLoc":447,"text":"def _to_dataset_split(self, dim: Hashable) -> Dataset:\n def subset(dim, label):\n array = self.loc[{dim: label}]\n if dim in array.coords:\n del array.coords[dim]\n array.attrs = {}\n return array\n\n variables = {label: subset(dim, label) for label in self.get_index(dim)}\n\n coords = self.coords.to_dataset()\n if dim in coords:\n del coords[dim]\n return Dataset(variables, coords, self.attrs)"},{"col":0,"comment":"Apply a variable level function over Dataset, dict of DataArray,\n DataArray, Variable and/or ndarray objects.\n ","endLoc":397,"header":"def apply_dataset_vfunc(\n func,\n *args,\n signature,\n join=\"inner\",\n dataset_join=\"exact\",\n fill_value=_NO_FILL_VALUE,\n exclude_dims=frozenset(),\n keep_attrs=False\n)","id":567,"name":"apply_dataset_vfunc","nodeType":"Function","startLoc":350,"text":"def apply_dataset_vfunc(\n func,\n *args,\n signature,\n join=\"inner\",\n dataset_join=\"exact\",\n fill_value=_NO_FILL_VALUE,\n exclude_dims=frozenset(),\n keep_attrs=False\n):\n \"\"\"Apply a variable level function over Dataset, dict of DataArray,\n DataArray, Variable and/or ndarray objects.\n \"\"\"\n from .dataset import Dataset\n\n first_obj = args[0] # we'll copy attrs from this in case keep_attrs=True\n\n if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE:\n raise TypeError(\n \"to apply an operation to datasets with different \"\n \"data variables with apply_ufunc, you must supply the \"\n \"dataset_fill_value argument.\"\n )\n\n if len(args) > 1:\n args = deep_align(\n args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False\n )\n\n list_of_coords = build_output_coords(args, signature, exclude_dims)\n args = [getattr(arg, \"data_vars\", arg) for arg in args]\n\n result_vars = apply_dict_of_variables_vfunc(\n func, *args, signature=signature, join=dataset_join, fill_value=fill_value\n )\n\n if signature.num_outputs > 1:\n out = tuple(_fast_dataset(*args) for args in zip(result_vars, list_of_coords))\n else:\n coord_vars, = list_of_coords\n out = _fast_dataset(result_vars, coord_vars)\n\n if keep_attrs and isinstance(first_obj, Dataset):\n if isinstance(out, tuple):\n out = tuple(ds._copy_attrs_from(first_obj) for ds in out)\n else:\n out._copy_attrs_from(first_obj)\n return out"},{"col":4,"comment":"Get an index for a dimension, with fall-back to a default RangeIndex\n ","endLoc":379,"header":"def get_index(self, key: Hashable) -> pd.Index","id":569,"name":"get_index","nodeType":"Function","startLoc":369,"text":"def get_index(self, key: Hashable) -> pd.Index:\n \"\"\"Get an index for a dimension, with fall-back to a default RangeIndex\n \"\"\"\n if key not in self.dims:\n raise KeyError(key)\n\n try:\n return self.indexes[key]\n except KeyError:\n # need to ensure dtype=int64 in case range is empty on Python 2\n return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)"},{"col":0,"comment":"Build output coordinates for an operation.\n\n Parameters\n ----------\n args : list\n List of raw operation arguments. Any valid types for xarray operations\n are OK, e.g., scalars, Variable, DataArray, Dataset.\n signature : _UfuncSignature\n Core dimensions signature for the operation.\n exclude_dims : optional set\n Dimensions excluded from the operation. Coordinates along these\n dimensions are dropped.\n\n Returns\n -------\n Dictionary of Variable objects with merged coordinates.\n ","endLoc":211,"header":"def build_output_coords(\n args: list, signature: _UFuncSignature, exclude_dims: AbstractSet = frozenset()\n) -> \"List[Dict[Any, Variable]]\"","id":570,"name":"build_output_coords","nodeType":"Function","startLoc":168,"text":"def build_output_coords(\n args: list, signature: _UFuncSignature, exclude_dims: AbstractSet = frozenset()\n) -> \"List[Dict[Any, Variable]]\":\n \"\"\"Build output coordinates for an operation.\n\n Parameters\n ----------\n args : list\n List of raw operation arguments. Any valid types for xarray operations\n are OK, e.g., scalars, Variable, DataArray, Dataset.\n signature : _UfuncSignature\n Core dimensions signature for the operation.\n exclude_dims : optional set\n Dimensions excluded from the operation. Coordinates along these\n dimensions are dropped.\n\n Returns\n -------\n Dictionary of Variable objects with merged coordinates.\n \"\"\"\n coords_list = _get_coords_list(args)\n\n if len(coords_list) == 1 and not exclude_dims:\n # we can skip the expensive merge\n unpacked_coords, = coords_list\n merged_vars = dict(unpacked_coords.variables)\n else:\n # TODO: save these merged indexes, instead of re-computing them later\n merged_vars, unused_indexes = merge_coordinates_without_align(\n coords_list, exclude_dims=exclude_dims\n )\n\n output_coords = []\n for output_dims in signature.output_core_dims:\n dropped_dims = signature.all_input_core_dims - set(output_dims)\n if dropped_dims:\n filtered = {\n k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims)\n }\n else:\n filtered = merged_vars\n output_coords.append(filtered)\n\n return output_coords"},{"col":0,"comment":"null","endLoc":165,"header":"def _get_coords_list(args) -> List[\"Coordinates\"]","id":571,"name":"_get_coords_list","nodeType":"Function","startLoc":156,"text":"def _get_coords_list(args) -> List[\"Coordinates\"]:\n coords_list = []\n for arg in args:\n try:\n coords = arg.coords\n except AttributeError:\n pass # skip this argument\n else:\n coords_list.append(coords)\n return coords_list"},{"col":0,"comment":"null","endLoc":50,"header":"def _get_indexer_at_least_n_items(shape, n_desired, from_end)","id":572,"name":"_get_indexer_at_least_n_items","nodeType":"Function","startLoc":40,"text":"def _get_indexer_at_least_n_items(shape, n_desired, from_end):\n assert 0 < n_desired <= np.prod(shape)\n cum_items = np.cumprod(shape[::-1])\n n_steps = np.argmax(cum_items >= n_desired)\n stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))\n indexer = (\n ((-1 if from_end else 0),) * (len(shape) - 1 - n_steps)\n + ((slice(-stop, None) if from_end else slice(stop)),)\n + (slice(None),) * n_steps\n )\n return indexer"},{"col":0,"comment":"Returns the first n_desired items of an array","endLoc":69,"header":"def first_n_items(array, n_desired)","id":573,"name":"first_n_items","nodeType":"Function","startLoc":53,"text":"def first_n_items(array, n_desired):\n \"\"\"Returns the first n_desired items of an array\"\"\"\n # Unfortunately, we can't just do array.flat[:n_desired] here because it\n # might not be a numpy.ndarray. Moreover, access to elements of the array\n # could be very expensive (e.g. if it's only available over DAP), so go out\n # of our way to get them in a single call to __getitem__ using only slices.\n if n_desired < 1:\n raise ValueError(\"must request at least one item\")\n\n if array.size == 0:\n # work around for https://github.com/numpy/numpy/issues/5195\n return []\n\n if n_desired < array.size:\n indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False)\n array = array[indexer]\n return np.asarray(array).flat[:n_desired]"},{"col":4,"comment":"null","endLoc":3525,"header":"@overload # noqa: F811\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\"","id":574,"name":"drop","nodeType":"Function","startLoc":3521,"text":"@overload # noqa: F811\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n ..."},{"col":4,"comment":"null","endLoc":3532,"header":"@overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"Dataset\"","id":575,"name":"drop","nodeType":"Function","startLoc":3528,"text":"@overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"Dataset\":\n ..."},{"col":0,"comment":"Returns the last n_desired items of an array","endLoc":84,"header":"def last_n_items(array, n_desired)","id":576,"name":"last_n_items","nodeType":"Function","startLoc":72,"text":"def last_n_items(array, n_desired):\n \"\"\"Returns the last n_desired items of an array\"\"\"\n # Unfortunately, we can't just do array.flat[-n_desired:] here because it\n # might not be a numpy.ndarray. Moreover, access to elements of the array\n # could be very expensive (e.g. if it's only available over DAP), so go out\n # of our way to get them in a single call to __getitem__ using only slices.\n if (n_desired == 0) or (array.size == 0):\n return []\n\n if n_desired < array.size:\n indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True)\n array = array[indexer]\n return np.asarray(array).flat[-n_desired:]"},{"attributeType":"null","col":17,"comment":"null","endLoc":1,"id":577,"name":"pd","nodeType":"Attribute","startLoc":1,"text":"pd"},{"attributeType":"null","col":32,"comment":"null","endLoc":7,"id":578,"name":"concat_vars","nodeType":"Attribute","startLoc":7,"text":"concat_vars"},{"col":0,"comment":"Apply a variable level function over dicts of DataArray, DataArray,\n Variable and ndarray objects.\n ","endLoc":333,"header":"def apply_dict_of_variables_vfunc(\n func, *args, signature, join=\"inner\", fill_value=None\n)","id":579,"name":"apply_dict_of_variables_vfunc","nodeType":"Function","startLoc":316,"text":"def apply_dict_of_variables_vfunc(\n func, *args, signature, join=\"inner\", fill_value=None\n):\n \"\"\"Apply a variable level function over dicts of DataArray, DataArray,\n Variable and ndarray objects.\n \"\"\"\n args = [_as_variables_or_variable(arg) for arg in args]\n names = join_dict_keys(args, how=join)\n grouped_by_name = collect_dict_values(args, names, fill_value)\n\n result_vars = {}\n for name, variable_args in zip(names, grouped_by_name):\n result_vars[name] = func(*variable_args)\n\n if signature.num_outputs > 1:\n return _unpack_dict_tuples(result_vars, signature.num_outputs)\n else:\n return result_vars"},{"col":0,"comment":"null","endLoc":303,"header":"def _as_variables_or_variable(arg)","id":580,"name":"_as_variables_or_variable","nodeType":"Function","startLoc":296,"text":"def _as_variables_or_variable(arg):\n try:\n return arg.variables\n except AttributeError:\n try:\n return arg.variable\n except AttributeError:\n return arg"},{"col":0,"comment":"null","endLoc":284,"header":"def join_dict_keys(\n objects: Iterable[Union[Mapping, Any]], how: str = \"inner\"\n) -> Iterable","id":581,"name":"join_dict_keys","nodeType":"Function","startLoc":279,"text":"def join_dict_keys(\n objects: Iterable[Union[Mapping, Any]], how: str = \"inner\"\n) -> Iterable:\n joiner = _JOINERS[how]\n all_keys = [obj.keys() for obj in objects if hasattr(obj, \"keys\")]\n return joiner(all_keys)"},{"col":0,"comment":"Returns the last item of an array in a list or an empty list.","endLoc":94,"header":"def last_item(array)","id":582,"name":"last_item","nodeType":"Function","startLoc":87,"text":"def last_item(array):\n \"\"\"Returns the last item of an array in a list or an empty list.\"\"\"\n if array.size == 0:\n # work around for https://github.com/numpy/numpy/issues/5195\n return []\n\n indexer = (slice(-1, None),) * array.ndim\n return np.ravel(np.asarray(array[indexer])).tolist()"},{"col":0,"comment":"null","endLoc":293,"header":"def collect_dict_values(\n objects: Iterable[Union[Mapping, Any]], keys: Iterable, fill_value: object = None\n) -> List[list]","id":583,"name":"collect_dict_values","nodeType":"Function","startLoc":287,"text":"def collect_dict_values(\n objects: Iterable[Union[Mapping, Any]], keys: Iterable, fill_value: object = None\n) -> List[list]:\n return [\n [obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects]\n for key in keys\n ]"},{"col":0,"comment":"Cast given object to a Timestamp and return a nicely formatted string","endLoc":114,"header":"def format_timestamp(t)","id":584,"name":"format_timestamp","nodeType":"Function","startLoc":97,"text":"def format_timestamp(t):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n # Timestamp is only valid for 1678 to 2262\n try:\n datetime_str = str(pd.Timestamp(t))\n except OutOfBoundsDatetime:\n datetime_str = str(t)\n\n try:\n date_str, time_str = datetime_str.split()\n except ValueError:\n # catch NaT and others that don't split nicely\n return datetime_str\n else:\n if time_str == \"00:00:00\":\n return date_str\n else:\n return \"{}T{}\".format(date_str, time_str)"},{"id":585,"name":"ci","nodeType":"Package"},{"fileName":"min_deps_check.py","filePath":"ci","id":586,"nodeType":"File","text":"\"\"\"Fetch from conda database all available versions of the xarray dependencies and their\npublication date. Compare it against requirements/py36-min-all-deps.yml to verify the\npolicy on obsolete dependencies is being followed. Print a pretty report :)\n\"\"\"\nimport subprocess\nimport sys\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import datetime, timedelta\nfrom typing import Dict, Iterator, Tuple\n\nimport yaml\n\nIGNORE_DEPS = {\n \"black\",\n \"coveralls\",\n \"flake8\",\n \"hypothesis\",\n \"mypy\",\n \"pip\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-env\",\n}\n\nPOLICY_MONTHS = {\"python\": 42, \"numpy\": 24, \"pandas\": 12, \"scipy\": 12}\nPOLICY_MONTHS_DEFAULT = 6\n\nhas_errors = False\n\n\ndef error(msg: str) -> None:\n global has_errors\n has_errors = True\n print(\"ERROR:\", msg)\n\n\ndef parse_requirements(fname) -> Iterator[Tuple[str, int, int]]:\n \"\"\"Load requirements/py36-min-all-deps.yml\n\n Yield (package name, major version, minor version)\n \"\"\"\n global has_errors\n\n with open(fname) as fh:\n contents = yaml.safe_load(fh)\n for row in contents[\"dependencies\"]:\n if isinstance(row, dict) and list(row) == [\"pip\"]:\n continue\n pkg, eq, version = row.partition(\"=\")\n if pkg.rstrip(\"<>\") in IGNORE_DEPS:\n continue\n if pkg.endswith(\"<\") or pkg.endswith(\">\") or eq != \"=\":\n error(\"package should be pinned with exact version: \" + row)\n continue\n try:\n major, minor = version.split(\".\")\n except ValueError:\n error(\"expected major.minor (without patch): \" + row)\n continue\n try:\n yield pkg, int(major), int(minor)\n except ValueError:\n error(\"failed to parse version: \" + row)\n\n\ndef query_conda(pkg: str) -> Dict[Tuple[int, int], datetime]:\n \"\"\"Query the conda repository for a specific package\n\n Return map of {(major version, minor version): publication date}\n \"\"\"\n stdout = subprocess.check_output(\n [\"conda\", \"search\", pkg, \"--info\", \"-c\", \"defaults\", \"-c\", \"conda-forge\"]\n )\n out = {} # type: Dict[Tuple[int, int], datetime]\n major = None\n minor = None\n\n for row in stdout.decode(\"utf-8\").splitlines():\n label, _, value = row.partition(\":\")\n label = label.strip()\n if label == \"file name\":\n value = value.strip()[len(pkg) :]\n major, minor = value.split(\"-\")[1].split(\".\")[:2]\n major = int(major)\n minor = int(minor)\n if label == \"timestamp\":\n assert major is not None\n assert minor is not None\n ts = datetime.strptime(value.split()[0].strip(), \"%Y-%m-%d\")\n\n if (major, minor) in out:\n out[major, minor] = min(out[major, minor], ts)\n else:\n out[major, minor] = ts\n\n # Hardcoded fix to work around incorrect dates in conda\n if pkg == \"python\":\n out.update(\n {\n (2, 7): datetime(2010, 6, 3),\n (3, 5): datetime(2015, 9, 13),\n (3, 6): datetime(2016, 12, 23),\n (3, 7): datetime(2018, 6, 27),\n (3, 8): datetime(2019, 10, 14),\n }\n )\n\n return out\n\n\ndef process_pkg(\n pkg: str, req_major: int, req_minor: int\n) -> Tuple[str, int, int, str, int, int, str, str]:\n \"\"\"Compare package version from requirements file to available versions in conda.\n Return row to build pandas dataframe:\n\n - package name\n - major version in requirements file\n - minor version in requirements file\n - publication date of version in requirements file (YYYY-MM-DD)\n - major version suggested by policy\n - minor version suggested by policy\n - publication date of version suggested by policy (YYYY-MM-DD)\n - status (\"<\", \"=\", \"> (!)\")\n \"\"\"\n print(\"Analyzing %s...\" % pkg)\n versions = query_conda(pkg)\n\n try:\n req_published = versions[req_major, req_minor]\n except KeyError:\n error(\"not found in conda: \" + pkg)\n return pkg, req_major, req_minor, \"-\", 0, 0, \"-\", \"(!)\"\n\n policy_months = POLICY_MONTHS.get(pkg, POLICY_MONTHS_DEFAULT)\n policy_published = datetime.now() - timedelta(days=policy_months * 30)\n\n policy_major = req_major\n policy_minor = req_minor\n policy_published_actual = req_published\n for (major, minor), published in reversed(sorted(versions.items())):\n if published < policy_published:\n break\n policy_major = major\n policy_minor = minor\n policy_published_actual = published\n\n if (req_major, req_minor) < (policy_major, policy_minor):\n status = \"<\"\n elif (req_major, req_minor) > (policy_major, policy_minor):\n status = \"> (!)\"\n error(\"Package is too new: \" + pkg)\n else:\n status = \"=\"\n\n return (\n pkg,\n req_major,\n req_minor,\n req_published.strftime(\"%Y-%m-%d\"),\n policy_major,\n policy_minor,\n policy_published_actual.strftime(\"%Y-%m-%d\"),\n status,\n )\n\n\ndef main() -> None:\n fname = sys.argv[1]\n with ThreadPoolExecutor(8) as ex:\n futures = [\n ex.submit(process_pkg, pkg, major, minor)\n for pkg, major, minor in parse_requirements(fname)\n ]\n rows = [f.result() for f in futures]\n\n print(\"Package Required Policy Status\")\n print(\"------------- ----------------- ----------------- ------\")\n fmt = \"{:13} {:>1d}.{:<2d} ({:10}) {:>1d}.{:<2d} ({:10}) {}\"\n for row in rows:\n print(fmt.format(*row))\n\n assert not has_errors\n\n\nif __name__ == \"__main__\":\n main()\n"},{"col":0,"comment":"Cast given object to a Timestamp and return a nicely formatted string","endLoc":131,"header":"def format_timedelta(t, timedelta_format=None)","id":587,"name":"format_timedelta","nodeType":"Function","startLoc":117,"text":"def format_timedelta(t, timedelta_format=None):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n timedelta_str = str(pd.Timedelta(t))\n try:\n days_str, time_str = timedelta_str.split(\" days \")\n except ValueError:\n # catch NaT and others that don't split nicely\n return timedelta_str\n else:\n if timedelta_format == \"date\":\n return days_str + \" days\"\n elif timedelta_format == \"time\":\n return time_str\n else:\n return timedelta_str"},{"col":4,"comment":"null","endLoc":486,"header":"def _to_dataset_whole(\n self, name: Hashable = None, shallow_copy: bool = True\n ) -> Dataset","id":588,"name":"_to_dataset_whole","nodeType":"Function","startLoc":462,"text":"def _to_dataset_whole(\n self, name: Hashable = None, shallow_copy: bool = True\n ) -> Dataset:\n if name is None:\n name = self.name\n if name is None:\n raise ValueError(\n \"unable to convert unnamed DataArray to a \"\n \"Dataset without providing an explicit name\"\n )\n if name in self.coords:\n raise ValueError(\n \"cannot create a Dataset from a DataArray with \"\n \"the same name as one of its coordinates\"\n )\n # use private APIs for speed: this is called by _to_temp_dataset(),\n # which is used in the guts of a lot of operations (e.g., reindex)\n variables = self._coords.copy()\n variables[name] = self.variable\n if shallow_copy:\n for k in variables:\n variables[k] = variables[k].copy(deep=False)\n coord_names = set(self._coords)\n dataset = Dataset._from_vars_and_coord_names(variables, coord_names)\n return dataset"},{"col":0,"comment":"Returns a succinct summary of an object as a string","endLoc":145,"header":"def format_item(x, timedelta_format=None, quote_strings=True)","id":589,"name":"format_item","nodeType":"Function","startLoc":134,"text":"def format_item(x, timedelta_format=None, quote_strings=True):\n \"\"\"Returns a succinct summary of an object as a string\"\"\"\n if isinstance(x, (np.datetime64, datetime)):\n return format_timestamp(x)\n if isinstance(x, (np.timedelta64, timedelta)):\n return format_timedelta(x, timedelta_format=timedelta_format)\n elif isinstance(x, (str, bytes)):\n return repr(x) if quote_strings else x\n elif isinstance(x, (float, np.float)):\n return \"{:.4}\".format(x)\n else:\n return str(x)"},{"col":0,"comment":"null","endLoc":313,"header":"def _unpack_dict_tuples(\n result_vars: Mapping[Hashable, Tuple[Variable, ...]], num_outputs: int\n) -> Tuple[Dict[Hashable, Variable], ...]","id":590,"name":"_unpack_dict_tuples","nodeType":"Function","startLoc":306,"text":"def _unpack_dict_tuples(\n result_vars: Mapping[Hashable, Tuple[Variable, ...]], num_outputs: int\n) -> Tuple[Dict[Hashable, Variable], ...]:\n out = tuple({} for _ in range(num_outputs)) # type: ignore\n for name, values in result_vars.items():\n for value, results_dict in zip(values, out):\n results_dict[name] = value\n return out"},{"col":0,"comment":"Returns a succinct summaries of all items in a sequence as strings","endLoc":163,"header":"def format_items(x)","id":591,"name":"format_items","nodeType":"Function","startLoc":148,"text":"def format_items(x):\n \"\"\"Returns a succinct summaries of all items in a sequence as strings\"\"\"\n x = np.asarray(x)\n timedelta_format = \"datetime\"\n if np.issubdtype(x.dtype, np.timedelta64):\n x = np.asarray(x, dtype=\"timedelta64[ns]\")\n day_part = x[~pd.isnull(x)].astype(\"timedelta64[D]\").astype(\"timedelta64[ns]\")\n time_needed = x[~pd.isnull(x)] != day_part\n day_needed = day_part != np.timedelta64(0, \"ns\")\n if np.logical_not(day_needed).all():\n timedelta_format = \"time\"\n elif np.logical_not(time_needed).all():\n timedelta_format = \"date\"\n\n formatted = [format_item(xi, timedelta_format) for xi in x]\n return formatted"},{"col":0,"comment":"Return a formatted string for as many items in the flattened version of\n array that will fit within max_width characters.\n ","endLoc":206,"header":"def format_array_flat(array, max_width)","id":592,"name":"format_array_flat","nodeType":"Function","startLoc":166,"text":"def format_array_flat(array, max_width):\n \"\"\"Return a formatted string for as many items in the flattened version of\n array that will fit within max_width characters.\n \"\"\"\n # every item will take up at least two characters, but we always want to\n # print at least first and last items\n max_possibly_relevant = min(\n max(array.size, 1), max(int(np.ceil(max_width / 2.0)), 2)\n )\n relevant_front_items = format_items(\n first_n_items(array, (max_possibly_relevant + 1) // 2)\n )\n relevant_back_items = format_items(last_n_items(array, max_possibly_relevant // 2))\n # interleave relevant front and back items:\n # [a, b, c] and [y, z] -> [a, z, b, y, c]\n relevant_items = sum(\n zip_longest(relevant_front_items, reversed(relevant_back_items)), ()\n )[:max_possibly_relevant]\n\n cum_len = np.cumsum([len(s) + 1 for s in relevant_items]) - 1\n if (array.size > 2) and (\n (max_possibly_relevant < array.size) or (cum_len > max_width).any()\n ):\n padding = \" ... \"\n count = min(\n array.size, max(np.argmax(cum_len + len(padding) - 1 > max_width), 2)\n )\n else:\n count = array.size\n padding = \"\" if (count <= 1) else \" \"\n\n num_front = (count + 1) // 2\n num_back = count - num_front\n # note that num_back is 0 <--> array.size is 0 or 1\n # <--> relevant_back_items is []\n pprint_str = (\n \" \".join(relevant_front_items[:num_front])\n + padding\n + \" \".join(relevant_back_items[-num_back:])\n )\n return pprint_str"},{"col":0,"comment":"Create a dataset as quickly as possible.\n\n Beware: the `variables` dict is modified INPLACE.\n ","endLoc":347,"header":"def _fast_dataset(\n variables: Dict[Hashable, Variable], coord_variables: Mapping[Hashable, Variable]\n) -> \"Dataset\"","id":593,"name":"_fast_dataset","nodeType":"Function","startLoc":336,"text":"def _fast_dataset(\n variables: Dict[Hashable, Variable], coord_variables: Mapping[Hashable, Variable]\n) -> \"Dataset\":\n \"\"\"Create a dataset as quickly as possible.\n\n Beware: the `variables` dict is modified INPLACE.\n \"\"\"\n from .dataset import Dataset\n\n variables.update(coord_variables)\n coord_names = set(coord_variables)\n return Dataset._from_vars_and_coord_names(variables, coord_names)"},{"col":4,"comment":"null","endLoc":862,"header":"@classmethod\n def _from_vars_and_coord_names(cls, variables, coord_names, attrs=None)","id":594,"name":"_from_vars_and_coord_names","nodeType":"Function","startLoc":860,"text":"@classmethod\n def _from_vars_and_coord_names(cls, variables, coord_names, attrs=None):\n return cls._construct_direct(variables, coord_names, attrs=attrs)"},{"col":0,"comment":"null","endLoc":226,"header":"def _protect_dataset_variables_inplace(dataset, cache)","id":595,"name":"_protect_dataset_variables_inplace","nodeType":"Function","startLoc":219,"text":"def _protect_dataset_variables_inplace(dataset, cache):\n for name, variable in dataset.variables.items():\n if name not in variable.dims:\n # no need to protect IndexVariable objects\n data = indexing.CopyOnWriteArray(variable._data)\n if cache:\n data = indexing.MemoryCachedArray(data)\n variable.data = data"},{"col":0,"comment":"Similar to dask.array.DataArray.__repr__, but without\n redundant information that's already printed by the repr\n function of the xarray wrapper.\n ","endLoc":235,"header":"def inline_dask_repr(array)","id":596,"name":"inline_dask_repr","nodeType":"Function","startLoc":216,"text":"def inline_dask_repr(array):\n \"\"\"Similar to dask.array.DataArray.__repr__, but without\n redundant information that's already printed by the repr\n function of the xarray wrapper.\n \"\"\"\n assert isinstance(array, dask_array_type), array\n\n chunksize = tuple(c[0] for c in array.chunks)\n\n if hasattr(array, \"_meta\"):\n meta = array._meta\n if type(meta) in _KNOWN_TYPE_REPRS:\n meta_repr = _KNOWN_TYPE_REPRS[type(meta)]\n else:\n meta_repr = type(meta).__name__\n meta_string = \", meta={}\".format(meta_repr)\n else:\n meta_string = \"\"\n\n return \"dask.array\".format(chunksize, meta_string)"},{"col":0,"comment":"Similar to sparse.COO.__repr__, but without the redundant shape/dtype.","endLoc":243,"header":"def inline_sparse_repr(array)","id":597,"name":"inline_sparse_repr","nodeType":"Function","startLoc":238,"text":"def inline_sparse_repr(array):\n \"\"\"Similar to sparse.COO.__repr__, but without the redundant shape/dtype.\"\"\"\n assert isinstance(array, sparse_array_type), array\n return \"<{}: nnz={:d}, fill_value={!s}>\".format(\n type(array).__name__, array.nnz, array.fill_value\n )"},{"col":0,"comment":"Build a one-line summary of a variable's data.","endLoc":258,"header":"def inline_variable_array_repr(var, max_width)","id":598,"name":"inline_variable_array_repr","nodeType":"Function","startLoc":246,"text":"def inline_variable_array_repr(var, max_width):\n \"\"\"Build a one-line summary of a variable's data.\"\"\"\n if var._in_memory:\n return format_array_flat(var, max_width)\n elif isinstance(var._data, dask_array_type):\n return inline_dask_repr(var.data)\n elif isinstance(var._data, sparse_array_type):\n return inline_sparse_repr(var.data)\n elif hasattr(var._data, \"__array_function__\"):\n return maybe_truncate(repr(var._data).replace(\"\\n\", \" \"), max_width)\n else:\n # internal xarray array type\n return \"...\""},{"col":0,"comment":"null","endLoc":34,"header":"def error(msg: str) -> None","id":599,"name":"error","nodeType":"Function","startLoc":31,"text":"def error(msg: str) -> None:\n global has_errors\n has_errors = True\n print(\"ERROR:\", msg)"},{"col":0,"comment":"Load requirements/py36-min-all-deps.yml\n\n Yield (package name, major version, minor version)\n ","endLoc":63,"header":"def parse_requirements(fname) -> Iterator[Tuple[str, int, int]]","id":600,"name":"parse_requirements","nodeType":"Function","startLoc":37,"text":"def parse_requirements(fname) -> Iterator[Tuple[str, int, int]]:\n \"\"\"Load requirements/py36-min-all-deps.yml\n\n Yield (package name, major version, minor version)\n \"\"\"\n global has_errors\n\n with open(fname) as fh:\n contents = yaml.safe_load(fh)\n for row in contents[\"dependencies\"]:\n if isinstance(row, dict) and list(row) == [\"pip\"]:\n continue\n pkg, eq, version = row.partition(\"=\")\n if pkg.rstrip(\"<>\") in IGNORE_DEPS:\n continue\n if pkg.endswith(\"<\") or pkg.endswith(\">\") or eq != \"=\":\n error(\"package should be pinned with exact version: \" + row)\n continue\n try:\n major, minor = version.split(\".\")\n except ValueError:\n error(\"expected major.minor (without patch): \" + row)\n continue\n try:\n yield pkg, int(major), int(minor)\n except ValueError:\n error(\"failed to parse version: \" + row)"},{"col":0,"comment":"null","endLoc":564,"header":"def is_remote_uri(path: str) -> bool","id":601,"name":"is_remote_uri","nodeType":"Function","startLoc":563,"text":"def is_remote_uri(path: str) -> bool:\n return bool(re.search(r\"^https?\\://\", path))"},{"col":0,"comment":"null","endLoc":158,"header":"def _normalize_path(path)","id":602,"name":"_normalize_path","nodeType":"Function","startLoc":154,"text":"def _normalize_path(path):\n if is_remote_uri(path):\n return path\n else:\n return os.path.abspath(os.path.expanduser(path))"},{"col":0,"comment":"Query the conda repository for a specific package\n\n Return map of {(major version, minor version): publication date}\n ","endLoc":108,"header":"def query_conda(pkg: str) -> Dict[Tuple[int, int], datetime]","id":603,"name":"query_conda","nodeType":"Function","startLoc":66,"text":"def query_conda(pkg: str) -> Dict[Tuple[int, int], datetime]:\n \"\"\"Query the conda repository for a specific package\n\n Return map of {(major version, minor version): publication date}\n \"\"\"\n stdout = subprocess.check_output(\n [\"conda\", \"search\", pkg, \"--info\", \"-c\", \"defaults\", \"-c\", \"conda-forge\"]\n )\n out = {} # type: Dict[Tuple[int, int], datetime]\n major = None\n minor = None\n\n for row in stdout.decode(\"utf-8\").splitlines():\n label, _, value = row.partition(\":\")\n label = label.strip()\n if label == \"file name\":\n value = value.strip()[len(pkg) :]\n major, minor = value.split(\"-\")[1].split(\".\")[:2]\n major = int(major)\n minor = int(minor)\n if label == \"timestamp\":\n assert major is not None\n assert minor is not None\n ts = datetime.strptime(value.split()[0].strip(), \"%Y-%m-%d\")\n\n if (major, minor) in out:\n out[major, minor] = min(out[major, minor], ts)\n else:\n out[major, minor] = ts\n\n # Hardcoded fix to work around incorrect dates in conda\n if pkg == \"python\":\n out.update(\n {\n (2, 7): datetime(2010, 6, 3),\n (3, 5): datetime(2015, 9, 13),\n (3, 6): datetime(2016, 12, 23),\n (3, 7): datetime(2018, 6, 27),\n (3, 8): datetime(2019, 10, 14),\n }\n )\n\n return out"},{"col":0,"comment":"null","endLoc":151,"header":"def _get_default_engine(path, allow_remote=False)","id":604,"name":"_get_default_engine","nodeType":"Function","startLoc":142,"text":"def _get_default_engine(path, allow_remote=False):\n if allow_remote and is_remote_uri(path):\n engine = _get_default_engine_remote_uri()\n elif is_grib_path(path):\n engine = _get_default_engine_grib()\n elif path.endswith(\".gz\"):\n engine = _get_default_engine_gz()\n else:\n engine = _get_default_engine_netcdf()\n return engine"},{"col":0,"comment":"null","endLoc":58,"header":"def _get_default_engine_remote_uri()","id":605,"name":"_get_default_engine_remote_uri","nodeType":"Function","startLoc":43,"text":"def _get_default_engine_remote_uri():\n try:\n import netCDF4 # noqa: F401\n\n engine = \"netcdf4\"\n except ImportError: # pragma: no cover\n try:\n import pydap # noqa: F401\n\n engine = \"pydap\"\n except ImportError:\n raise ValueError(\n \"netCDF4 or pydap is required for accessing \"\n \"remote datasets via OPeNDAP\"\n )\n return engine"},{"col":0,"comment":"null","endLoc":569,"header":"def is_grib_path(path: str) -> bool","id":606,"name":"is_grib_path","nodeType":"Function","startLoc":567,"text":"def is_grib_path(path: str) -> bool:\n _, ext = os.path.splitext(path)\n return ext in [\".grib\", \".grb\", \".grib2\", \".grb2\"]"},{"col":0,"comment":"null","endLoc":78,"header":"def _get_default_engine_grib()","id":607,"name":"_get_default_engine_grib","nodeType":"Function","startLoc":61,"text":"def _get_default_engine_grib():\n msgs = []\n try:\n import Nio # noqa: F401\n\n msgs += [\"set engine='pynio' to access GRIB files with PyNIO\"]\n except ImportError: # pragma: no cover\n pass\n try:\n import cfgrib # noqa: F401\n\n msgs += [\"set engine='cfgrib' to access GRIB files with cfgrib\"]\n except ImportError: # pragma: no cover\n pass\n if msgs:\n raise ValueError(\" or\\n\".join(msgs))\n else:\n raise ValueError(\"PyNIO or cfgrib is required for accessing \" \"GRIB files\")"},{"col":0,"comment":"null","endLoc":88,"header":"def _get_default_engine_gz()","id":608,"name":"_get_default_engine_gz","nodeType":"Function","startLoc":81,"text":"def _get_default_engine_gz():\n try:\n import scipy # noqa: F401\n\n engine = \"scipy\"\n except ImportError: # pragma: no cover\n raise ValueError(\"scipy is required for accessing .gz files\")\n return engine"},{"attributeType":"null","col":4,"comment":"null","endLoc":77,"id":609,"name":"__slots__","nodeType":"Attribute","startLoc":77,"text":"__slots__"},{"col":0,"comment":"null","endLoc":106,"header":"def _get_default_engine_netcdf()","id":610,"name":"_get_default_engine_netcdf","nodeType":"Function","startLoc":91,"text":"def _get_default_engine_netcdf():\n try:\n import netCDF4 # noqa: F401\n\n engine = \"netcdf4\"\n except ImportError: # pragma: no cover\n try:\n import scipy.io.netcdf # noqa: F401\n\n engine = \"scipy\"\n except ImportError:\n raise ValueError(\n \"cannot read or write netCDF files without \"\n \"netCDF4-python or scipy installed\"\n )\n return engine"},{"attributeType":"null","col":4,"comment":"null","endLoc":102,"id":611,"name":"_reduce_extra_args_docstring","nodeType":"Attribute","startLoc":102,"text":"_reduce_extra_args_docstring"},{"col":0,"comment":"Summarize a variable in one line, e.g., for the Dataset.__repr__.","endLoc":275,"header":"def summarize_variable(name, var, col_width, marker=\" \", max_width=None)","id":612,"name":"summarize_variable","nodeType":"Function","startLoc":261,"text":"def summarize_variable(name, var, col_width, marker=\" \", max_width=None):\n \"\"\"Summarize a variable in one line, e.g., for the Dataset.__repr__.\"\"\"\n if max_width is None:\n max_width = OPTIONS[\"display_width\"]\n first_col = pretty_print(\" {} {} \".format(marker, name), col_width)\n if var.dims:\n dims_str = \"({}) \".format(\", \".join(map(str, var.dims)))\n else:\n dims_str = \"\"\n front_str = \"{}{}{} \".format(first_col, dims_str, var.dtype)\n\n values_width = max_width - len(front_str)\n values_str = inline_variable_array_repr(var, values_width)\n\n return front_str + values_str"},{"attributeType":"null","col":4,"comment":"null","endLoc":106,"id":613,"name":"_cum_extra_args_docstring","nodeType":"Attribute","startLoc":106,"text":"_cum_extra_args_docstring"},{"col":4,"comment":"Create a new dataset from the contents of a backends.*DataStore\n object\n ","endLoc":546,"header":"@classmethod\n def load_store(cls, store, decoder=None) -> \"Dataset\"","id":614,"name":"load_store","nodeType":"Function","startLoc":536,"text":"@classmethod\n def load_store(cls, store, decoder=None) -> \"Dataset\":\n \"\"\"Create a new dataset from the contents of a backends.*DataStore\n object\n \"\"\"\n variables, attributes = store.load()\n if decoder:\n variables, attributes = decoder(variables, attributes)\n obj = cls(variables, attrs=attributes)\n obj._file_obj = store\n return obj"},{"col":0,"comment":"null","endLoc":280,"header":"def _summarize_coord_multiindex(coord, col_width, marker)","id":615,"name":"_summarize_coord_multiindex","nodeType":"Function","startLoc":278,"text":"def _summarize_coord_multiindex(coord, col_width, marker):\n first_col = pretty_print(\" {} {} \".format(marker, coord.name), col_width)\n return \"{}({}) MultiIndex\".format(first_col, str(coord.dims[0]))"},{"col":0,"comment":"null","endLoc":291,"header":"def _summarize_coord_levels(coord, col_width, marker=\"-\")","id":616,"name":"_summarize_coord_levels","nodeType":"Function","startLoc":283,"text":"def _summarize_coord_levels(coord, col_width, marker=\"-\"):\n return \"\\n\".join(\n [\n summarize_variable(\n lname, coord.get_level_variable(lname), col_width, marker=marker\n )\n for lname in coord.level_names\n ]\n )"},{"col":0,"comment":"Apply a variable level function over DataArray, Variable and/or ndarray\n objects.\n ","endLoc":245,"header":"def apply_dataarray_vfunc(\n func, *args, signature, join=\"inner\", exclude_dims=frozenset(), keep_attrs=False\n)","id":617,"name":"apply_dataarray_vfunc","nodeType":"Function","startLoc":214,"text":"def apply_dataarray_vfunc(\n func, *args, signature, join=\"inner\", exclude_dims=frozenset(), keep_attrs=False\n):\n \"\"\"Apply a variable level function over DataArray, Variable and/or ndarray\n objects.\n \"\"\"\n from .dataarray import DataArray\n\n if len(args) > 1:\n args = deep_align(\n args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False\n )\n\n if keep_attrs and hasattr(args[0], \"name\"):\n name = args[0].name\n else:\n name = result_name(args)\n result_coords = build_output_coords(args, signature, exclude_dims)\n\n data_vars = [getattr(a, \"variable\", a) for a in args]\n result_var = func(*data_vars)\n\n if signature.num_outputs > 1:\n out = tuple(\n DataArray(variable, coords, name=name, fastpath=True)\n for variable, coords in zip(result_var, result_coords)\n )\n else:\n coords, = result_coords\n out = DataArray(result_var, coords, name=name, fastpath=True)\n\n return out"},{"col":0,"comment":"null","endLoc":295,"header":"def summarize_datavar(name, var, col_width)","id":618,"name":"summarize_datavar","nodeType":"Function","startLoc":294,"text":"def summarize_datavar(name, var, col_width):\n return summarize_variable(name, var.variable, col_width)"},{"col":0,"comment":"null","endLoc":310,"header":"def summarize_coord(name, var, col_width)","id":619,"name":"summarize_coord","nodeType":"Function","startLoc":298,"text":"def summarize_coord(name, var, col_width):\n is_index = name in var.dims\n marker = \"*\" if is_index else \" \"\n if is_index:\n coord = var.variable.to_index_variable()\n if coord.level_names is not None:\n return \"\\n\".join(\n [\n _summarize_coord_multiindex(coord, col_width, marker),\n _summarize_coord_levels(coord, col_width),\n ]\n )\n return summarize_variable(name, var.variable, col_width, marker)"},{"col":0,"comment":"Compare package version from requirements file to available versions in conda.\n Return row to build pandas dataframe:\n\n - package name\n - major version in requirements file\n - minor version in requirements file\n - publication date of version in requirements file (YYYY-MM-DD)\n - major version suggested by policy\n - minor version suggested by policy\n - publication date of version suggested by policy (YYYY-MM-DD)\n - status (\"<\", \"=\", \"> (!)\")\n ","endLoc":165,"header":"def process_pkg(\n pkg: str, req_major: int, req_minor: int\n) -> Tuple[str, int, int, str, int, int, str, str]","id":620,"name":"process_pkg","nodeType":"Function","startLoc":111,"text":"def process_pkg(\n pkg: str, req_major: int, req_minor: int\n) -> Tuple[str, int, int, str, int, int, str, str]:\n \"\"\"Compare package version from requirements file to available versions in conda.\n Return row to build pandas dataframe:\n\n - package name\n - major version in requirements file\n - minor version in requirements file\n - publication date of version in requirements file (YYYY-MM-DD)\n - major version suggested by policy\n - minor version suggested by policy\n - publication date of version suggested by policy (YYYY-MM-DD)\n - status (\"<\", \"=\", \"> (!)\")\n \"\"\"\n print(\"Analyzing %s...\" % pkg)\n versions = query_conda(pkg)\n\n try:\n req_published = versions[req_major, req_minor]\n except KeyError:\n error(\"not found in conda: \" + pkg)\n return pkg, req_major, req_minor, \"-\", 0, 0, \"-\", \"(!)\"\n\n policy_months = POLICY_MONTHS.get(pkg, POLICY_MONTHS_DEFAULT)\n policy_published = datetime.now() - timedelta(days=policy_months * 30)\n\n policy_major = req_major\n policy_minor = req_minor\n policy_published_actual = req_published\n for (major, minor), published in reversed(sorted(versions.items())):\n if published < policy_published:\n break\n policy_major = major\n policy_minor = minor\n policy_published_actual = published\n\n if (req_major, req_minor) < (policy_major, policy_minor):\n status = \"<\"\n elif (req_major, req_minor) > (policy_major, policy_minor):\n status = \"> (!)\"\n error(\"Package is too new: \" + pkg)\n else:\n status = \"=\"\n\n return (\n pkg,\n req_major,\n req_minor,\n req_published.strftime(\"%Y-%m-%d\"),\n policy_major,\n policy_minor,\n policy_published_actual.strftime(\"%Y-%m-%d\"),\n status,\n )"},{"col":0,"comment":"Summary for __repr__ - use ``X.attrs[key]`` for full value.","endLoc":322,"header":"def summarize_attr(key, value, col_width=None)","id":621,"name":"summarize_attr","nodeType":"Function","startLoc":313,"text":"def summarize_attr(key, value, col_width=None):\n \"\"\"Summary for __repr__ - use ``X.attrs[key]`` for full value.\"\"\"\n # Indent key and add ':', then right-pad if col_width is not None\n k_str = \" {}:\".format(key)\n if col_width is not None:\n k_str = pretty_print(k_str, col_width)\n # Replace tabs and newlines, so we print on one line in known width\n v_str = str(value).replace(\"\\t\", \"\\\\t\").replace(\"\\n\", \"\\\\n\")\n # Finally, truncate to the desired display width\n return maybe_truncate(\"{} {}\".format(k_str, v_str), OPTIONS[\"display_width\"])"},{"col":4,"comment":"Low level interface to Dataset contents as dict of Variable objects.\n\n This ordered dictionary is frozen to prevent mutation that could\n violate Dataset invariants. It contains all variable objects\n constituting the Dataset, including both data variables and\n coordinates.\n ","endLoc":557,"header":"@property\n def variables(self) -> Mapping[Hashable, Variable]","id":622,"name":"variables","nodeType":"Function","startLoc":548,"text":"@property\n def variables(self) -> Mapping[Hashable, Variable]:\n \"\"\"Low level interface to Dataset contents as dict of Variable objects.\n\n This ordered dictionary is frozen to prevent mutation that could\n violate Dataset invariants. It contains all variable objects\n constituting the Dataset, including both data variables and\n coordinates.\n \"\"\"\n return Frozen(self._variables)"},{"col":0,"comment":"null","endLoc":384,"header":"def indexes_repr(indexes)","id":623,"name":"indexes_repr","nodeType":"Function","startLoc":380,"text":"def indexes_repr(indexes):\n summary = []\n for k, v in indexes.items():\n summary.append(wrap_indent(repr(v), \"{}: \".format(k)))\n return \"\\n\".join(summary)"},{"col":0,"comment":"Guts of the Dataset.merge method.\n ","endLoc":825,"header":"def dataset_merge_method(\n dataset: \"Dataset\",\n other: \"CoercibleMapping\",\n overwrite_vars: Union[Hashable, Iterable[Hashable]],\n compat: str,\n join: str,\n fill_value: Any,\n) -> _MergeResult","id":624,"name":"dataset_merge_method","nodeType":"Function","startLoc":787,"text":"def dataset_merge_method(\n dataset: \"Dataset\",\n other: \"CoercibleMapping\",\n overwrite_vars: Union[Hashable, Iterable[Hashable]],\n compat: str,\n join: str,\n fill_value: Any,\n) -> _MergeResult:\n \"\"\"Guts of the Dataset.merge method.\n \"\"\"\n # we are locked into supporting overwrite_vars for the Dataset.merge\n # method due for backwards compatibility\n # TODO: consider deprecating it?\n\n if isinstance(overwrite_vars, Iterable) and not isinstance(overwrite_vars, str):\n overwrite_vars = set(overwrite_vars)\n else:\n overwrite_vars = {overwrite_vars}\n\n if not overwrite_vars:\n objs = [dataset, other]\n priority_arg = None\n elif overwrite_vars == set(other):\n objs = [dataset, other]\n priority_arg = 1\n else:\n other_overwrite: Dict[Hashable, CoercibleValue] = {}\n other_no_overwrite: Dict[Hashable, CoercibleValue] = {}\n for k, v in other.items():\n if k in overwrite_vars:\n other_overwrite[k] = v\n else:\n other_no_overwrite[k] = v\n objs = [dataset, other_no_overwrite, other_overwrite]\n priority_arg = 2\n\n return merge_core(\n objs, compat, join, priority_arg=priority_arg, fill_value=fill_value\n )"},{"col":4,"comment":"Dictionary of global attributes on this dataset\n ","endLoc":565,"header":"@property\n def attrs(self) -> Dict[Hashable, Any]","id":625,"name":"attrs","nodeType":"Function","startLoc":559,"text":"@property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of global attributes on this dataset\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs"},{"col":4,"comment":"null","endLoc":569,"header":"@attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None","id":626,"name":"attrs","nodeType":"Function","startLoc":567,"text":"@attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n self._attrs = dict(value)"},{"col":4,"comment":"Dictionary of global encoding attributes on this dataset\n ","endLoc":577,"header":"@property\n def encoding(self) -> Dict","id":627,"name":"encoding","nodeType":"Function","startLoc":571,"text":"@property\n def encoding(self) -> Dict:\n \"\"\"Dictionary of global encoding attributes on this dataset\n \"\"\"\n if self._encoding is None:\n self._encoding = {}\n return self._encoding"},{"col":4,"comment":"null","endLoc":581,"header":"@encoding.setter\n def encoding(self, value: Mapping) -> None","id":628,"name":"encoding","nodeType":"Function","startLoc":579,"text":"@encoding.setter\n def encoding(self, value: Mapping) -> None:\n self._encoding = dict(value)"},{"col":4,"comment":"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n Note that type of this object differs from `DataArray.dims`.\n See `Dataset.sizes` and `DataArray.sizes` for consistently named\n properties.\n ","endLoc":593,"header":"@property\n def dims(self) -> Mapping[Hashable, int]","id":629,"name":"dims","nodeType":"Function","startLoc":583,"text":"@property\n def dims(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n Note that type of this object differs from `DataArray.dims`.\n See `Dataset.sizes` and `DataArray.sizes` for consistently named\n properties.\n \"\"\"\n return Frozen(SortedKeysDict(self._dims))"},{"col":0,"comment":"Guts of the Dataset.update method.\n\n This drops a duplicated coordinates from `other` if `other` is not an\n `xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068,\n GH2180).\n ","endLoc":853,"header":"def dataset_update_method(\n dataset: \"Dataset\", other: \"CoercibleMapping\"\n) -> _MergeResult","id":630,"name":"dataset_update_method","nodeType":"Function","startLoc":828,"text":"def dataset_update_method(\n dataset: \"Dataset\", other: \"CoercibleMapping\"\n) -> _MergeResult:\n \"\"\"Guts of the Dataset.update method.\n\n This drops a duplicated coordinates from `other` if `other` is not an\n `xarray.Dataset`, e.g., if it's a dict with DataArray values (GH2068,\n GH2180).\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if not isinstance(other, Dataset):\n other = dict(other)\n for key, value in other.items():\n if isinstance(value, DataArray):\n # drop conflicting coordinates\n coord_names = [\n c\n for c in value.coords\n if c not in value.dims and c in dataset.coords\n ]\n if coord_names:\n other[key] = value.drop(coord_names)\n\n return merge_core([dataset, other], priority_arg=1, indexes=dataset.indexes)"},{"col":4,"comment":"null","endLoc":357,"header":"@classmethod\n def open(\n cls,\n filename,\n mode=\"r\",\n format=\"NETCDF4\",\n group=None,\n clobber=True,\n diskless=False,\n persist=False,\n lock=None,\n lock_maker=None,\n autoclose=False,\n )","id":631,"name":"open","nodeType":"Function","startLoc":319,"text":"@classmethod\n def open(\n cls,\n filename,\n mode=\"r\",\n format=\"NETCDF4\",\n group=None,\n clobber=True,\n diskless=False,\n persist=False,\n lock=None,\n lock_maker=None,\n autoclose=False,\n ):\n import netCDF4\n\n if format is None:\n format = \"NETCDF4\"\n\n if lock is None:\n if mode == \"r\":\n if is_remote_uri(filename):\n lock = NETCDFC_LOCK\n else:\n lock = NETCDF4_PYTHON_LOCK\n else:\n if format is None or format.startswith(\"NETCDF4\"):\n base_lock = NETCDF4_PYTHON_LOCK\n else:\n base_lock = NETCDFC_LOCK\n lock = combine_locks([base_lock, get_write_lock(filename)])\n\n kwargs = dict(\n clobber=clobber, diskless=diskless, persist=persist, format=format\n )\n manager = CachingFileManager(\n netCDF4.Dataset, filename, mode=mode, kwargs=kwargs\n )\n return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)"},{"col":0,"comment":"null","endLoc":153,"header":"def result_name(objects: list) -> Any","id":632,"name":"result_name","nodeType":"Function","startLoc":144,"text":"def result_name(objects: list) -> Any:\n # use the same naming heuristics as pandas:\n # https://github.com/blaze/blaze/issues/458#issuecomment-51936356\n names = {getattr(obj, \"name\", _DEFAULT_NAME) for obj in objects}\n names.discard(_DEFAULT_NAME)\n if len(names) == 1:\n name, = names\n else:\n name = None\n return name"},{"col":4,"comment":"null","endLoc":412,"header":"def __init__(self, mapping: MutableMapping[K, V] = None)","id":633,"name":"__init__","nodeType":"Function","startLoc":411,"text":"def __init__(self, mapping: MutableMapping[K, V] = None):\n self.mapping = {} if mapping is None else mapping"},{"col":0,"comment":"Apply a ndarray level function over ndarray objects.","endLoc":747,"header":"def apply_array_ufunc(func, *args, dask=\"forbidden\")","id":634,"name":"apply_array_ufunc","nodeType":"Function","startLoc":727,"text":"def apply_array_ufunc(func, *args, dask=\"forbidden\"):\n \"\"\"Apply a ndarray level function over ndarray objects.\"\"\"\n if any(isinstance(arg, dask_array_type) for arg in args):\n if dask == \"forbidden\":\n raise ValueError(\n \"apply_ufunc encountered a dask array on an \"\n \"argument, but handling for dask arrays has not \"\n \"been enabled. Either set the ``dask`` argument \"\n \"or load your data into memory first with \"\n \"``.load()`` or ``.compute()``\"\n )\n elif dask == \"parallelized\":\n raise ValueError(\n \"cannot use dask='parallelized' for apply_ufunc \"\n \"unless at least one input is an xarray object\"\n )\n elif dask == \"allowed\":\n pass\n else:\n raise ValueError(\"unknown setting for dask array handling: {}\".format(dask))\n return func(*args)"},{"col":4,"comment":"null","endLoc":317,"header":"def __init__(\n self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False\n )","id":635,"name":"__init__","nodeType":"Function","startLoc":293,"text":"def __init__(\n self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False\n ):\n import netCDF4\n\n if isinstance(manager, netCDF4.Dataset):\n if group is None:\n root, group = find_root_and_group(manager)\n else:\n if not type(manager) is netCDF4.Dataset:\n raise ValueError(\n \"must supply a root netCDF4.Dataset if the group \"\n \"argument is provided\"\n )\n root = manager\n manager = DummyFileManager(root)\n\n self._manager = manager\n self._group = group\n self._mode = mode\n self.format = self.ds.data_model\n self._filename = self.ds.filepath()\n self.is_remote = is_remote_uri(self._filename)\n self.lock = ensure_lock(lock)\n self.autoclose = autoclose"},{"col":0,"comment":"null","endLoc":183,"header":"def main() -> None","id":636,"name":"main","nodeType":"Function","startLoc":168,"text":"def main() -> None:\n fname = sys.argv[1]\n with ThreadPoolExecutor(8) as ex:\n futures = [\n ex.submit(process_pkg, pkg, major, minor)\n for pkg, major, minor in parse_requirements(fname)\n ]\n rows = [f.result() for f in futures]\n\n print(\"Package Required Policy Status\")\n print(\"------------- ----------------- ----------------- ------\")\n fmt = \"{:13} {:>1d}.{:<2d} ({:10}) {:>1d}.{:<2d} ({:10}) {}\"\n for row in rows:\n print(fmt.format(*row))\n\n assert not has_errors"},{"attributeType":"null","col":4,"comment":"null","endLoc":17,"id":637,"name":"__slots__","nodeType":"Attribute","startLoc":17,"text":"__slots__"},{"col":4,"comment":"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n This is an alias for `Dataset.dims` provided for the benefit of\n consistency with `DataArray.sizes`.\n\n See also\n --------\n DataArray.sizes\n ","endLoc":608,"header":"@property\n def sizes(self) -> Mapping[Hashable, int]","id":638,"name":"sizes","nodeType":"Function","startLoc":595,"text":"@property\n def sizes(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n This is an alias for `Dataset.dims` provided for the benefit of\n consistency with `DataArray.sizes`.\n\n See also\n --------\n DataArray.sizes\n \"\"\"\n return self.dims"},{"attributeType":"null","col":4,"comment":"null","endLoc":24,"id":639,"name":"_HANDLED_TYPES","nodeType":"Attribute","startLoc":24,"text":"_HANDLED_TYPES"},{"attributeType":"null","col":4,"comment":"null","endLoc":84,"id":640,"name":"__lt__","nodeType":"Attribute","startLoc":84,"text":"__lt__"},{"col":4,"comment":"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return this dataset.\n Unlike compute, the original dataset is modified and returned.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n ","endLoc":649,"header":"def load(self, **kwargs) -> \"Dataset\"","id":641,"name":"load","nodeType":"Function","startLoc":610,"text":"def load(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return this dataset.\n Unlike compute, the original dataset is modified and returned.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data\n for k, v in self.variables.items()\n if isinstance(v._data, dask_array_type)\n }\n if lazy_data:\n import dask.array as da\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = da.compute(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n # load everything else sequentially\n for k, v in self.variables.items():\n if k not in lazy_data:\n v.load()\n\n return self"},{"attributeType":"null","col":8,"comment":"null","endLoc":85,"id":642,"name":"__le__","nodeType":"Attribute","startLoc":85,"text":"__le__"},{"attributeType":"null","col":8,"comment":"null","endLoc":87,"id":643,"name":"__ge__","nodeType":"Attribute","startLoc":87,"text":"__ge__"},{"attributeType":"null","col":8,"comment":"null","endLoc":89,"id":644,"name":"__gt__","nodeType":"Attribute","startLoc":89,"text":"__gt__"},{"attributeType":"null","col":8,"comment":"null","endLoc":91,"id":645,"name":"__add__","nodeType":"Attribute","startLoc":91,"text":"__add__"},{"attributeType":"null","col":8,"comment":"null","endLoc":93,"id":646,"name":"__sub__","nodeType":"Attribute","startLoc":93,"text":"__sub__"},{"col":4,"comment":"null","endLoc":320,"header":"def __init__(self, value)","id":647,"name":"__init__","nodeType":"Function","startLoc":319,"text":"def __init__(self, value):\n self._value = value"},{"attributeType":"null","col":8,"comment":"null","endLoc":95,"id":648,"name":"__mul__","nodeType":"Attribute","startLoc":95,"text":"__mul__"},{"attributeType":"null","col":8,"comment":"null","endLoc":97,"id":649,"name":"__truediv__","nodeType":"Attribute","startLoc":97,"text":"__truediv__"},{"attributeType":"null","col":8,"comment":"null","endLoc":99,"id":650,"name":"__floordiv__","nodeType":"Attribute","startLoc":99,"text":"__floordiv__"},{"attributeType":"null","col":8,"comment":"null","endLoc":101,"id":651,"name":"__mod__","nodeType":"Attribute","startLoc":101,"text":"__mod__"},{"attributeType":"null","col":8,"comment":"null","endLoc":103,"id":652,"name":"__pow__","nodeType":"Attribute","startLoc":103,"text":"__pow__"},{"attributeType":"null","col":8,"comment":"null","endLoc":104,"id":653,"name":"__and__","nodeType":"Attribute","startLoc":104,"text":"__and__"},{"attributeType":"null","col":0,"comment":"null","endLoc":13,"id":654,"name":"IGNORE_DEPS","nodeType":"Attribute","startLoc":13,"text":"IGNORE_DEPS"},{"attributeType":"null","col":18,"comment":"null","endLoc":104,"id":655,"name":"__xor__","nodeType":"Attribute","startLoc":104,"text":"__xor__"},{"attributeType":"null","col":0,"comment":"null","endLoc":25,"id":656,"name":"POLICY_MONTHS","nodeType":"Attribute","startLoc":25,"text":"POLICY_MONTHS"},{"attributeType":"null","col":0,"comment":"null","endLoc":26,"id":657,"name":"POLICY_MONTHS_DEFAULT","nodeType":"Attribute","startLoc":26,"text":"POLICY_MONTHS_DEFAULT"},{"attributeType":"null","col":28,"comment":"null","endLoc":104,"id":658,"name":"__or__","nodeType":"Attribute","startLoc":104,"text":"__or__"},{"attributeType":"null","col":0,"comment":"null","endLoc":28,"id":659,"name":"has_errors","nodeType":"Attribute","startLoc":28,"text":"has_errors"},{"col":0,"comment":"","endLoc":4,"header":"min_deps_check.py#","id":660,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Fetch from conda database all available versions of the xarray dependencies and their\npublication date. Compare it against requirements/py36-min-all-deps.yml to verify the\npolicy on obsolete dependencies is being followed. Print a pretty report :)\n\"\"\"\n\nIGNORE_DEPS = {\n \"black\",\n \"coveralls\",\n \"flake8\",\n \"hypothesis\",\n \"mypy\",\n \"pip\",\n \"pytest\",\n \"pytest-cov\",\n \"pytest-env\",\n}\n\nPOLICY_MONTHS = {\"python\": 42, \"numpy\": 24, \"pandas\": 12, \"scipy\": 12}\n\nPOLICY_MONTHS_DEFAULT = 6\n\nhas_errors = False\n\nif __name__ == \"__main__\":\n main()"},{"attributeType":"null","col":37,"comment":"null","endLoc":104,"id":661,"name":"__div__","nodeType":"Attribute","startLoc":104,"text":"__div__"},{"attributeType":"null","col":47,"comment":"null","endLoc":104,"id":662,"name":"__eq__","nodeType":"Attribute","startLoc":104,"text":"__eq__"},{"attributeType":"null","col":56,"comment":"null","endLoc":104,"id":663,"name":"__ne__","nodeType":"Attribute","startLoc":104,"text":"__ne__"},{"className":"AttrAccessMixin","col":0,"comment":"Mixin class that allows getting keys with attribute access\n ","endLoc":292,"id":664,"nodeType":"Class","startLoc":185,"text":"class AttrAccessMixin:\n \"\"\"Mixin class that allows getting keys with attribute access\n \"\"\"\n\n __slots__ = ()\n\n def __init_subclass__(cls):\n \"\"\"Verify that all subclasses explicitly define ``__slots__``. If they don't,\n raise error in the core xarray module and a FutureWarning in third-party\n extensions.\n \"\"\"\n if not hasattr(object.__new__(cls), \"__dict__\"):\n pass\n elif cls.__module__.startswith(\"xarray.\"):\n raise AttributeError(\"%s must explicitly define __slots__\" % cls.__name__)\n else:\n cls.__setattr__ = cls._setattr_dict\n warnings.warn(\n \"xarray subclass %s should explicitly define __slots__\" % cls.__name__,\n FutureWarning,\n stacklevel=2,\n )\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return []\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-autocompletion\n \"\"\"\n return []\n\n def __getattr__(self, name: str) -> Any:\n if name not in {\"__dict__\", \"__setstate__\"}:\n # this avoids an infinite loop when pickle looks for the\n # __setstate__ attribute before the xarray object is initialized\n for source in self._attr_sources:\n with suppress(KeyError):\n return source[name]\n raise AttributeError(\n \"%r object has no attribute %r\" % (type(self).__name__, name)\n )\n\n # This complicated two-method design boosts overall performance of simple operations\n # - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by\n # a whopping 8% compared to a single method that checks hasattr(self, \"__dict__\") at\n # runtime before every single assignment. All of this is just temporary until the\n # FutureWarning can be changed into a hard crash.\n def _setattr_dict(self, name: str, value: Any) -> None:\n \"\"\"Deprecated third party subclass (see ``__init_subclass__`` above)\n \"\"\"\n object.__setattr__(self, name, value)\n if name in self.__dict__:\n # Custom, non-slotted attr, or improperly assigned variable?\n warnings.warn(\n \"Setting attribute %r on a %r object. Explicitly define __slots__ \"\n \"to suppress this warning for legitimate custom attributes and \"\n \"raise an error when attempting variables assignments.\"\n % (name, type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Objects with ``__slots__`` raise AttributeError if you try setting an\n undeclared attribute. This is desirable, but the error message could use some\n improvement.\n \"\"\"\n try:\n object.__setattr__(self, name, value)\n except AttributeError as e:\n # Don't accidentally shadow custom AttributeErrors, e.g.\n # DataArray.dims.setter\n if str(e) != \"%r object has no attribute %r\" % (type(self).__name__, name):\n raise\n raise AttributeError(\n \"cannot set attribute %r on a %r object. Use __setitem__ style\"\n \"assignment (e.g., `ds['name'] = ...`) instead of assigning variables.\"\n % (name, type(self).__name__)\n ) from e\n\n def __dir__(self) -> List[str]:\n \"\"\"Provide method name lookup and completion. Only provide 'public'\n methods.\n \"\"\"\n extra_attrs = [\n item\n for sublist in self._attr_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return sorted(set(dir(type(self)) + extra_attrs))\n\n def _ipython_key_completions_(self) -> List[str]:\n \"\"\"Provide method for the key-autocompletions in IPython.\n See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion\n For the details.\n \"\"\"\n item_lists = [\n item\n for sublist in self._item_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return list(set(item_lists))"},{"col":4,"comment":"Verify that all subclasses explicitly define ``__slots__``. If they don't,\n raise error in the core xarray module and a FutureWarning in third-party\n extensions.\n ","endLoc":206,"header":"def __init_subclass__(cls)","id":665,"name":"__init_subclass__","nodeType":"Function","startLoc":191,"text":"def __init_subclass__(cls):\n \"\"\"Verify that all subclasses explicitly define ``__slots__``. If they don't,\n raise error in the core xarray module and a FutureWarning in third-party\n extensions.\n \"\"\"\n if not hasattr(object.__new__(cls), \"__dict__\"):\n pass\n elif cls.__module__.startswith(\"xarray.\"):\n raise AttributeError(\"%s must explicitly define __slots__\" % cls.__name__)\n else:\n cls.__setattr__ = cls._setattr_dict\n warnings.warn(\n \"xarray subclass %s should explicitly define __slots__\" % cls.__name__,\n FutureWarning,\n stacklevel=2,\n )"},{"fileName":"resample.py","filePath":"xarray/core","id":666,"nodeType":"File","text":"from . import ops\nfrom .groupby import DataArrayGroupBy, DatasetGroupBy\n\nRESAMPLE_DIM = \"__resample_dim__\"\n\n\nclass Resample:\n \"\"\"An object that extends the `GroupBy` object with additional logic\n for handling specialized re-sampling operations.\n\n You should create a `Resample` object by using the `DataArray.resample` or\n `Dataset.resample` methods. The dimension along re-sampling\n\n See Also\n --------\n DataArray.resample\n Dataset.resample\n\n \"\"\"\n\n def _upsample(self, method, *args, **kwargs):\n \"\"\"Dispatch function to call appropriate up-sampling methods on\n data.\n\n This method should not be called directly; instead, use one of the\n wrapper functions supplied by `Resample`.\n\n Parameters\n ----------\n method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest',\n 'interpolate'}\n Method to use for up-sampling\n\n See Also\n --------\n Resample.asfreq\n Resample.pad\n Resample.backfill\n Resample.interpolate\n\n \"\"\"\n\n upsampled_index = self._full_index\n\n # Drop non-dimension coordinates along the resampled dimension\n for k, v in self._obj.coords.items():\n if k == self._dim:\n continue\n if self._dim in v.dims:\n self._obj = self._obj.drop(k)\n\n if method == \"asfreq\":\n return self.mean(self._dim)\n\n elif method in [\"pad\", \"ffill\", \"backfill\", \"bfill\", \"nearest\"]:\n kwargs = kwargs.copy()\n kwargs.update(**{self._dim: upsampled_index})\n return self._obj.reindex(method=method, *args, **kwargs)\n\n elif method == \"interpolate\":\n return self._interpolate(*args, **kwargs)\n\n else:\n raise ValueError(\n 'Specified method was \"{}\" but must be one of'\n '\"asfreq\", \"ffill\", \"bfill\", or \"interpolate\"'.format(method)\n )\n\n def asfreq(self):\n \"\"\"Return values of original object at the new up-sampling frequency;\n essentially a re-index with new times set to NaN.\n \"\"\"\n return self._upsample(\"asfreq\")\n\n def pad(self, tolerance=None):\n \"\"\"Forward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"pad\", tolerance=tolerance)\n\n ffill = pad\n\n def backfill(self, tolerance=None):\n \"\"\"Backward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"backfill\", tolerance=tolerance)\n\n bfill = backfill\n\n def nearest(self, tolerance=None):\n \"\"\"Take new values from nearest original coordinate to up-sampled\n frequency coordinates.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"nearest\", tolerance=tolerance)\n\n def interpolate(self, kind=\"linear\"):\n \"\"\"Interpolate up-sampled data using the original data\n as knots.\n\n Parameters\n ----------\n kind : str {'linear', 'nearest', 'zero', 'slinear',\n 'quadratic', 'cubic'}\n Interpolation scheme to use\n\n See Also\n --------\n scipy.interpolate.interp1d\n\n \"\"\"\n return self._interpolate(kind=kind)\n\n def _interpolate(self, kind=\"linear\"):\n \"\"\"Apply scipy.interpolate.interp1d along resampling dimension.\"\"\"\n # drop any existing non-dimension coordinates along the resampling\n # dimension\n dummy = self._obj.copy()\n for k, v in self._obj.coords.items():\n if k != self._dim and self._dim in v.dims:\n dummy = dummy.drop(k)\n return dummy.interp(\n assume_sorted=True,\n method=kind,\n kwargs={\"bounds_error\": False},\n **{self._dim: self._full_index}\n )\n\n\nclass DataArrayResample(DataArrayGroupBy, Resample):\n \"\"\"DataArrayGroupBy object specialized to time resampling operations over a\n specified dimension\n \"\"\"\n\n def __init__(self, *args, dim=None, resample_dim=None, **kwargs):\n\n if dim == resample_dim:\n raise ValueError(\n \"Proxy resampling dimension ('{}') \"\n \"cannot have the same name as actual dimension \"\n \"('{}')! \".format(resample_dim, dim)\n )\n self._dim = dim\n self._resample_dim = resample_dim\n\n super().__init__(*args, **kwargs)\n\n def apply(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n \"\"\"\n combined = super().apply(func, shortcut=shortcut, args=args, **kwargs)\n\n # If the aggregation function didn't drop the original resampling\n # dimension, then we need to do so before we can rename the proxy\n # dimension we used.\n if self._dim in combined.coords:\n combined = combined.drop(self._dim)\n\n if self._resample_dim in combined.dims:\n combined = combined.rename({self._resample_dim: self._dim})\n\n return combined\n\n\nops.inject_reduce_methods(DataArrayResample)\nops.inject_binary_ops(DataArrayResample)\n\n\nclass DatasetResample(DatasetGroupBy, Resample):\n \"\"\"DatasetGroupBy object specialized to resampling a specified dimension\n \"\"\"\n\n def __init__(self, *args, dim=None, resample_dim=None, **kwargs):\n\n if dim == resample_dim:\n raise ValueError(\n \"Proxy resampling dimension ('{}') \"\n \"cannot have the same name as actual dimension \"\n \"('{}')! \".format(resample_dim, dim)\n )\n self._dim = dim\n self._resample_dim = resample_dim\n\n super().__init__(*args, **kwargs)\n\n def apply(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"Apply a function over each Dataset in the groups generated for\n resampling and concatenate them together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n \"\"\"\n # ignore shortcut if set (for now)\n applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())\n combined = self._combine(applied)\n\n return combined.rename({self._resample_dim: self._dim})\n\n def reduce(self, func, dim=None, keep_attrs=None, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along the\n pre-defined resampling dimension.\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n return super().reduce(func, dim, keep_attrs, **kwargs)\n\n\nops.inject_reduce_methods(DatasetResample)\nops.inject_binary_ops(DatasetResample)\n"},{"col":4,"comment":"List of places to look-up items for attribute-style access\n ","endLoc":212,"header":"@property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]","id":667,"name":"_attr_sources","nodeType":"Function","startLoc":208,"text":"@property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return []"},{"col":4,"comment":"List of places to look-up items for key-autocompletion\n ","endLoc":218,"header":"@property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]","id":668,"name":"_item_sources","nodeType":"Function","startLoc":214,"text":"@property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-autocompletion\n \"\"\"\n return []"},{"col":4,"comment":"null","endLoc":229,"header":"def __getattr__(self, name: str) -> Any","id":669,"name":"__getattr__","nodeType":"Function","startLoc":220,"text":"def __getattr__(self, name: str) -> Any:\n if name not in {\"__dict__\", \"__setstate__\"}:\n # this avoids an infinite loop when pickle looks for the\n # __setstate__ attribute before the xarray object is initialized\n for source in self._attr_sources:\n with suppress(KeyError):\n return source[name]\n raise AttributeError(\n \"%r object has no attribute %r\" % (type(self).__name__, name)\n )"},{"col":4,"comment":"Deprecated third party subclass (see ``__init_subclass__`` above)\n ","endLoc":249,"header":"def _setattr_dict(self, name: str, value: Any) -> None","id":670,"name":"_setattr_dict","nodeType":"Function","startLoc":236,"text":"def _setattr_dict(self, name: str, value: Any) -> None:\n \"\"\"Deprecated third party subclass (see ``__init_subclass__`` above)\n \"\"\"\n object.__setattr__(self, name, value)\n if name in self.__dict__:\n # Custom, non-slotted attr, or improperly assigned variable?\n warnings.warn(\n \"Setting attribute %r on a %r object. Explicitly define __slots__ \"\n \"to suppress this warning for legitimate custom attributes and \"\n \"raise an error when attempting variables assignments.\"\n % (name, type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )"},{"className":"DataArrayGroupBy","col":0,"comment":"GroupBy object specialized to grouping DataArray objects\n ","endLoc":806,"id":671,"nodeType":"Class","startLoc":602,"text":"class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):\n \"\"\"GroupBy object specialized to grouping DataArray objects\n \"\"\"\n\n def _iter_grouped_shortcut(self):\n \"\"\"Fast version of `_iter_grouped` that yields Variables without\n metadata\n \"\"\"\n var = self._obj.variable\n for indices in self._group_indices:\n yield var[{self._group_dim: indices}]\n\n def _concat_shortcut(self, applied, dim, positions=None):\n # nb. don't worry too much about maintaining this method -- it does\n # speed things up, but it's not very interpretable and there are much\n # faster alternatives (e.g., doing the grouped aggregation in a\n # compiled language)\n stacked = Variable.concat(applied, dim, shortcut=True)\n reordered = _maybe_reorder(stacked, dim, positions)\n result = self._obj._replace_maybe_drop_dims(reordered)\n return result\n\n def _restore_dim_order(self, stacked):\n def lookup_order(dimension):\n if dimension == self._group.name:\n dimension, = self._group.dims\n if dimension in self._obj.dims:\n axis = self._obj.get_axis_num(dimension)\n else:\n axis = 1e6 # some arbitrarily high value\n return axis\n\n new_order = sorted(stacked.dims, key=lookup_order)\n return stacked.transpose(*new_order, transpose_coords=self._restore_coord_dims)\n\n def apply(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n \"\"\"\n if shortcut:\n grouped = self._iter_grouped_shortcut()\n else:\n grouped = self._iter_grouped()\n applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped)\n return self._combine(applied, shortcut=shortcut)\n\n def _combine(self, applied, restore_coord_dims=False, shortcut=False):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n if shortcut:\n combined = self._concat_shortcut(applied, dim, positions)\n else:\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n\n if isinstance(combined, type(self._obj)):\n # only restore dimension order for arrays\n combined = self._restore_dim_order(combined)\n if coord is not None:\n if shortcut:\n combined._coords[coord.name] = as_variable(coord)\n else:\n combined.coords[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined\n\n def quantile(self, q, dim=None, interpolation=\"linear\", keep_attrs=None):\n \"\"\"Compute the qth quantile over each array in the groups and\n concatenate them together into a new array.\n\n Parameters\n ----------\n q : float in range of [0,1] (or sequence of floats)\n Quantile to compute, which must be between 0 and 1\n inclusive.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n Defaults to the grouped dimension.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n\n Returns\n -------\n quantiles : Variable\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,\n DataArray.quantile\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n out = self.apply(\n self._obj.__class__.quantile,\n shortcut=False,\n q=q,\n dim=dim,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n )\n\n if np.asarray(q, dtype=np.float64).ndim == 0:\n out = out.drop(\"quantile\")\n return out\n\n def reduce(\n self, func, dim=None, axis=None, keep_attrs=None, shortcut=True, **kwargs\n ):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n if dim is not ALL_DIMS and dim not in self.dims:\n raise ValueError(\n \"cannot reduce over dimension %r. expected either xarray.ALL_DIMS to reduce over all dimensions or one or more of %r.\"\n % (dim, self.dims)\n )\n\n def reduce_array(ar):\n return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)\n\n return self.apply(reduce_array, shortcut=shortcut)"},{"col":4,"comment":"Objects with ``__slots__`` raise AttributeError if you try setting an\n undeclared attribute. This is desirable, but the error message could use some\n improvement.\n ","endLoc":267,"header":"def __setattr__(self, name: str, value: Any) -> None","id":672,"name":"__setattr__","nodeType":"Function","startLoc":251,"text":"def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Objects with ``__slots__`` raise AttributeError if you try setting an\n undeclared attribute. This is desirable, but the error message could use some\n improvement.\n \"\"\"\n try:\n object.__setattr__(self, name, value)\n except AttributeError as e:\n # Don't accidentally shadow custom AttributeErrors, e.g.\n # DataArray.dims.setter\n if str(e) != \"%r object has no attribute %r\" % (type(self).__name__, name):\n raise\n raise AttributeError(\n \"cannot set attribute %r on a %r object. Use __setitem__ style\"\n \"assignment (e.g., `ds['name'] = ...`) instead of assigning variables.\"\n % (name, type(self).__name__)\n ) from e"},{"className":"GroupBy","col":0,"comment":"A object that implements the split-apply-combine pattern.\n\n Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over\n (unique_value, grouped_array) pairs, but the main way to interact with a\n groupby object are with the `apply` or `reduce` methods. You can also\n directly call numpy methods like `mean` or `std`.\n\n You should create a GroupBy object by using the `DataArray.groupby` or\n `Dataset.groupby` methods.\n\n See Also\n --------\n Dataset.groupby\n DataArray.groupby\n ","endLoc":590,"id":673,"nodeType":"Class","startLoc":223,"text":"class GroupBy(SupportsArithmetic):\n \"\"\"A object that implements the split-apply-combine pattern.\n\n Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over\n (unique_value, grouped_array) pairs, but the main way to interact with a\n groupby object are with the `apply` or `reduce` methods. You can also\n directly call numpy methods like `mean` or `std`.\n\n You should create a GroupBy object by using the `DataArray.groupby` or\n `Dataset.groupby` methods.\n\n See Also\n --------\n Dataset.groupby\n DataArray.groupby\n \"\"\"\n\n __slots__ = (\n \"_full_index\",\n \"_inserted_dims\",\n \"_group\",\n \"_group_dim\",\n \"_group_indices\",\n \"_groups\",\n \"_obj\",\n \"_restore_coord_dims\",\n \"_stacked_dim\",\n \"_unique_coord\",\n \"_dims\",\n )\n\n def __init__(\n self,\n obj,\n group,\n squeeze=False,\n grouper=None,\n bins=None,\n restore_coord_dims=None,\n cut_kwargs={},\n ):\n \"\"\"Create a GroupBy object\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to group.\n group : DataArray\n Array with the group values.\n squeeze : boolean, optional\n If \"group\" is a coordinate of object, `squeeze` controls whether\n the subarrays have a dimension of length 1 along that coordinate or\n if the dimension is squeezed out.\n grouper : pd.Grouper, optional\n Used for grouping values along the `group` array.\n bins : array-like, optional\n If `bins` is specified, the groups will be discretized into the\n specified bins by `pandas.cut`.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n cut_kwargs : dict, optional\n Extra keyword arguments to pass to `pandas.cut`\n\n \"\"\"\n from .dataarray import DataArray\n\n if grouper is not None and bins is not None:\n raise TypeError(\"can't specify both `grouper` and `bins`\")\n\n if not isinstance(group, (DataArray, IndexVariable)):\n if not hashable(group):\n raise TypeError(\n \"`group` must be an xarray.DataArray or the \"\n \"name of an xarray variable or dimension\"\n )\n group = obj[group]\n if len(group) == 0:\n raise ValueError(\"{} must not be empty\".format(group.name))\n\n if group.name not in obj.coords and group.name in obj.dims:\n # DummyGroups should not appear on groupby results\n group = _DummyGroup(obj, group.name, group.coords)\n\n if getattr(group, \"name\", None) is None:\n raise ValueError(\"`group` must have a name\")\n\n group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)\n group_dim, = group.dims\n\n expected_size = obj.sizes[group_dim]\n if group.size != expected_size:\n raise ValueError(\n \"the group variable's length does not \"\n \"match the length of this variable along its \"\n \"dimension\"\n )\n\n full_index = None\n\n if bins is not None:\n if np.isnan(bins).all():\n raise ValueError(\"All bin edges are NaN.\")\n binned = pd.cut(group.values, bins, **cut_kwargs)\n new_dim_name = group.name + \"_bins\"\n group = DataArray(binned, group.coords, name=new_dim_name)\n full_index = binned.categories\n\n if grouper is not None:\n index = safe_cast_to_index(group)\n if not index.is_monotonic:\n # TODO: sort instead of raising an error\n raise ValueError(\"index must be monotonic for resampling\")\n full_index, first_items = self._get_index_and_items(index, grouper)\n sbins = first_items.values.astype(np.int64)\n group_indices = [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])] + [\n slice(sbins[-1], None)\n ]\n unique_coord = IndexVariable(group.name, first_items.index)\n elif group.dims == (group.name,) and _unique_and_monotonic(group):\n # no need to factorize\n group_indices = np.arange(group.size)\n if not squeeze:\n # use slices to do views instead of fancy indexing\n # equivalent to: group_indices = group_indices.reshape(-1, 1)\n group_indices = [slice(i, i + 1) for i in group_indices]\n unique_coord = group\n else:\n # look through group to find the unique values\n unique_values, group_indices = unique_value_groups(\n safe_cast_to_index(group), sort=(bins is None)\n )\n unique_coord = IndexVariable(group.name, unique_values)\n\n if len(group_indices) == 0:\n if bins is not None:\n raise ValueError(\n \"None of the data falls within bins with edges %r\" % bins\n )\n else:\n raise ValueError(\n \"Failed to group data. Are you grouping by a variable that is all NaN?\"\n )\n\n if (\n isinstance(obj, DataArray)\n and restore_coord_dims is None\n and any(obj[c].ndim > 1 for c in obj.coords)\n ):\n warnings.warn(\n \"This DataArray contains multi-dimensional \"\n \"coordinates. In the future, the dimension order \"\n \"of these coordinates will be restored as well \"\n \"unless you specify restore_coord_dims=False.\",\n FutureWarning,\n stacklevel=2,\n )\n restore_coord_dims = False\n\n # specification for the groupby operation\n self._obj = obj\n self._group = group\n self._group_dim = group_dim\n self._group_indices = group_indices\n self._unique_coord = unique_coord\n self._stacked_dim = stacked_dim\n self._inserted_dims = inserted_dims\n self._full_index = full_index\n self._restore_coord_dims = restore_coord_dims\n\n # cached attributes\n self._groups = None\n self._dims = None\n\n @property\n def dims(self):\n if self._dims is None:\n self._dims = self._obj.isel(\n **{self._group_dim: self._group_indices[0]}\n ).dims\n\n return self._dims\n\n @property\n def groups(self):\n # provided to mimic pandas.groupby\n if self._groups is None:\n self._groups = dict(zip(self._unique_coord.values, self._group_indices))\n return self._groups\n\n def __len__(self):\n return self._unique_coord.size\n\n def __iter__(self):\n return zip(self._unique_coord.values, self._iter_grouped())\n\n def __repr__(self):\n return \"%s, grouped over %r \\n%r groups with labels %s.\" % (\n self.__class__.__name__,\n self._unique_coord.name,\n self._unique_coord.size,\n \", \".join(format_array_flat(self._unique_coord, 30).split()),\n )\n\n def _get_index_and_items(self, index, grouper):\n from .resample_cftime import CFTimeGrouper\n\n s = pd.Series(np.arange(index.size), index)\n if isinstance(grouper, CFTimeGrouper):\n first_items = grouper.first_items(index)\n else:\n first_items = s.groupby(grouper).first()\n _apply_loffset(grouper, first_items)\n full_index = first_items.index\n if first_items.isnull().any():\n first_items = first_items.dropna()\n return full_index, first_items\n\n def _iter_grouped(self):\n \"\"\"Iterate over each element in this group\"\"\"\n for indices in self._group_indices:\n yield self._obj.isel(**{self._group_dim: indices})\n\n def _infer_concat_args(self, applied_example):\n if self._group_dim in applied_example.dims:\n coord = self._group\n positions = self._group_indices\n else:\n coord = self._unique_coord\n positions = None\n dim, = coord.dims\n if isinstance(coord, _DummyGroup):\n coord = None\n return coord, dim, positions\n\n @staticmethod\n def _binary_op(f, reflexive=False, **ignored_kwargs):\n @functools.wraps(f)\n def func(self, other):\n g = f if not reflexive else lambda x, y: f(y, x)\n applied = self._yield_binary_applied(g, other)\n combined = self._combine(applied)\n return combined\n\n return func\n\n def _yield_binary_applied(self, func, other):\n dummy = None\n\n for group_value, obj in self:\n try:\n other_sel = other.sel(**{self._group.name: group_value})\n except AttributeError:\n raise TypeError(\n \"GroupBy objects only support binary ops \"\n \"when the other argument is a Dataset or \"\n \"DataArray\"\n )\n except (KeyError, ValueError):\n if self._group.name not in other.dims:\n raise ValueError(\n \"incompatible dimensions for a grouped \"\n \"binary operation: the group variable %r \"\n \"is not a dimension on the other argument\" % self._group.name\n )\n if dummy is None:\n dummy = _dummy_copy(other)\n other_sel = dummy\n\n result = func(obj, other_sel)\n yield result\n\n def _maybe_restore_empty_groups(self, combined):\n \"\"\"Our index contained empty groups (e.g., from a resampling). If we\n reduced on that dimension, we want to restore the full index.\n \"\"\"\n if self._full_index is not None and self._group.name in combined.dims:\n indexers = {self._group.name: self._full_index}\n combined = combined.reindex(**indexers)\n return combined\n\n def _maybe_unstack(self, obj):\n \"\"\"This gets called if we are applying on an array with a\n multidimensional group.\"\"\"\n if self._stacked_dim is not None and self._stacked_dim in obj.dims:\n obj = obj.unstack(self._stacked_dim)\n for dim in self._inserted_dims:\n if dim in obj.coords:\n del obj.coords[dim]\n return obj\n\n def fillna(self, value):\n \"\"\"Fill missing values in this object by group.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : valid type for the grouped object's fillna method\n Used to fill all matching missing values by group.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.fillna\n DataArray.fillna\n \"\"\"\n out = ops.fillna(self, value)\n return out\n\n def where(self, cond, other=dtypes.NA):\n \"\"\"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this objects values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.where\n \"\"\"\n return ops.where_method(self, cond, other)\n\n def _first_or_last(self, op, skipna, keep_attrs):\n if isinstance(self._group_indices[0], integer_types):\n # NB. this is currently only used for reductions along an existing\n # dimension\n return self._obj\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=True)\n return self.reduce(\n op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs, allow_lazy=True\n )\n\n def first(self, skipna=None, keep_attrs=None):\n \"\"\"Return the first element of each group along the group dimension\n \"\"\"\n return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)\n\n def last(self, skipna=None, keep_attrs=None):\n \"\"\"Return the last element of each group along the group dimension\n \"\"\"\n return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign coordinates by group.\n\n See also\n --------\n Dataset.assign_coords\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n return self.apply(lambda ds: ds.assign_coords(**coords_kwargs))"},{"col":4,"comment":"null","endLoc":142,"header":"def __init__(\n self, filename_or_obj, mode=\"r\", format=None, group=None, mmap=None, lock=None\n )","id":674,"name":"__init__","nodeType":"Function","startLoc":108,"text":"def __init__(\n self, filename_or_obj, mode=\"r\", format=None, group=None, mmap=None, lock=None\n ):\n if group is not None:\n raise ValueError(\n \"cannot save to a group with the \" \"scipy.io.netcdf backend\"\n )\n\n if format is None or format == \"NETCDF3_64BIT\":\n version = 2\n elif format == \"NETCDF3_CLASSIC\":\n version = 1\n else:\n raise ValueError(\"invalid format for scipy.io.netcdf backend: %r\" % format)\n\n if lock is None and mode != \"r\" and isinstance(filename_or_obj, str):\n lock = get_write_lock(filename_or_obj)\n\n self.lock = ensure_lock(lock)\n\n if isinstance(filename_or_obj, str):\n manager = CachingFileManager(\n _open_scipy_netcdf,\n filename_or_obj,\n mode=mode,\n lock=lock,\n kwargs=dict(mmap=mmap, version=version),\n )\n else:\n scipy_dataset = _open_scipy_netcdf(\n filename_or_obj, mode=mode, mmap=mmap, version=version\n )\n manager = DummyFileManager(scipy_dataset)\n\n self._manager = manager"},{"col":4,"comment":"Provide method name lookup and completion. Only provide 'public'\n methods.\n ","endLoc":279,"header":"def __dir__(self) -> List[str]","id":675,"name":"__dir__","nodeType":"Function","startLoc":269,"text":"def __dir__(self) -> List[str]:\n \"\"\"Provide method name lookup and completion. Only provide 'public'\n methods.\n \"\"\"\n extra_attrs = [\n item\n for sublist in self._attr_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return sorted(set(dir(type(self)) + extra_attrs))"},{"col":0,"comment":"null","endLoc":389,"header":"def dim_summary(obj)","id":676,"name":"dim_summary","nodeType":"Function","startLoc":387,"text":"def dim_summary(obj):\n elements = [\"{}: {}\".format(k, v) for k, v in obj.sizes.items()]\n return \", \".join(elements)"},{"col":4,"comment":"null","endLoc":664,"header":"def __dask_graph__(self)","id":677,"name":"__dask_graph__","nodeType":"Function","startLoc":651,"text":"def __dask_graph__(self):\n graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}\n graphs = {k: v for k, v in graphs.items() if v is not None}\n if not graphs:\n return None\n else:\n try:\n from dask.highlevelgraph import HighLevelGraph\n\n return HighLevelGraph.merge(*graphs.values())\n except ImportError:\n from dask import sharedict\n\n return sharedict.merge(*graphs.values())"},{"col":4,"comment":"Provide method for the key-autocompletions in IPython.\n See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion\n For the details.\n ","endLoc":292,"header":"def _ipython_key_completions_(self) -> List[str]","id":678,"name":"_ipython_key_completions_","nodeType":"Function","startLoc":281,"text":"def _ipython_key_completions_(self) -> List[str]:\n \"\"\"Provide method for the key-autocompletions in IPython.\n See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion\n For the details.\n \"\"\"\n item_lists = [\n item\n for sublist in self._item_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return list(set(item_lists))"},{"attributeType":"null","col":4,"comment":"null","endLoc":189,"id":679,"name":"__slots__","nodeType":"Attribute","startLoc":189,"text":"__slots__"},{"attributeType":"function","col":12,"comment":"null","endLoc":201,"id":680,"name":"__setattr__","nodeType":"Attribute","startLoc":201,"text":"cls.__setattr__"},{"col":4,"comment":"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or Hashable or iterable of Hashable, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n drop : bool, optional\n If ``drop=True``, drop squeezed coordinates instead of making them\n scalar.\n axis : None or int or iterable of int, optional\n Like dim, but positional.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n ","endLoc":367,"header":"def squeeze(\n self,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n drop: bool = False,\n axis: Union[int, Iterable[int], None] = None,\n )","id":681,"name":"squeeze","nodeType":"Function","startLoc":336,"text":"def squeeze(\n self,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n drop: bool = False,\n axis: Union[int, Iterable[int], None] = None,\n ):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or Hashable or iterable of Hashable, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n drop : bool, optional\n If ``drop=True``, drop squeezed coordinates instead of making them\n scalar.\n axis : None or int or iterable of int, optional\n Like dim, but positional.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = get_squeeze_dims(self, dim, axis)\n return self.isel(drop=drop, **{d: 0 for d in dims})"},{"col":4,"comment":"Create a GroupBy object\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to group.\n group : DataArray\n Array with the group values.\n squeeze : boolean, optional\n If \"group\" is a coordinate of object, `squeeze` controls whether\n the subarrays have a dimension of length 1 along that coordinate or\n if the dimension is squeezed out.\n grouper : pd.Grouper, optional\n Used for grouping values along the `group` array.\n bins : array-like, optional\n If `bins` is specified, the groups will be discretized into the\n specified bins by `pandas.cut`.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n cut_kwargs : dict, optional\n Extra keyword arguments to pass to `pandas.cut`\n\n ","endLoc":395,"header":"def __init__(\n self,\n obj,\n group,\n squeeze=False,\n grouper=None,\n bins=None,\n restore_coord_dims=None,\n cut_kwargs={},\n )","id":682,"name":"__init__","nodeType":"Function","startLoc":254,"text":"def __init__(\n self,\n obj,\n group,\n squeeze=False,\n grouper=None,\n bins=None,\n restore_coord_dims=None,\n cut_kwargs={},\n ):\n \"\"\"Create a GroupBy object\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to group.\n group : DataArray\n Array with the group values.\n squeeze : boolean, optional\n If \"group\" is a coordinate of object, `squeeze` controls whether\n the subarrays have a dimension of length 1 along that coordinate or\n if the dimension is squeezed out.\n grouper : pd.Grouper, optional\n Used for grouping values along the `group` array.\n bins : array-like, optional\n If `bins` is specified, the groups will be discretized into the\n specified bins by `pandas.cut`.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n cut_kwargs : dict, optional\n Extra keyword arguments to pass to `pandas.cut`\n\n \"\"\"\n from .dataarray import DataArray\n\n if grouper is not None and bins is not None:\n raise TypeError(\"can't specify both `grouper` and `bins`\")\n\n if not isinstance(group, (DataArray, IndexVariable)):\n if not hashable(group):\n raise TypeError(\n \"`group` must be an xarray.DataArray or the \"\n \"name of an xarray variable or dimension\"\n )\n group = obj[group]\n if len(group) == 0:\n raise ValueError(\"{} must not be empty\".format(group.name))\n\n if group.name not in obj.coords and group.name in obj.dims:\n # DummyGroups should not appear on groupby results\n group = _DummyGroup(obj, group.name, group.coords)\n\n if getattr(group, \"name\", None) is None:\n raise ValueError(\"`group` must have a name\")\n\n group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)\n group_dim, = group.dims\n\n expected_size = obj.sizes[group_dim]\n if group.size != expected_size:\n raise ValueError(\n \"the group variable's length does not \"\n \"match the length of this variable along its \"\n \"dimension\"\n )\n\n full_index = None\n\n if bins is not None:\n if np.isnan(bins).all():\n raise ValueError(\"All bin edges are NaN.\")\n binned = pd.cut(group.values, bins, **cut_kwargs)\n new_dim_name = group.name + \"_bins\"\n group = DataArray(binned, group.coords, name=new_dim_name)\n full_index = binned.categories\n\n if grouper is not None:\n index = safe_cast_to_index(group)\n if not index.is_monotonic:\n # TODO: sort instead of raising an error\n raise ValueError(\"index must be monotonic for resampling\")\n full_index, first_items = self._get_index_and_items(index, grouper)\n sbins = first_items.values.astype(np.int64)\n group_indices = [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])] + [\n slice(sbins[-1], None)\n ]\n unique_coord = IndexVariable(group.name, first_items.index)\n elif group.dims == (group.name,) and _unique_and_monotonic(group):\n # no need to factorize\n group_indices = np.arange(group.size)\n if not squeeze:\n # use slices to do views instead of fancy indexing\n # equivalent to: group_indices = group_indices.reshape(-1, 1)\n group_indices = [slice(i, i + 1) for i in group_indices]\n unique_coord = group\n else:\n # look through group to find the unique values\n unique_values, group_indices = unique_value_groups(\n safe_cast_to_index(group), sort=(bins is None)\n )\n unique_coord = IndexVariable(group.name, unique_values)\n\n if len(group_indices) == 0:\n if bins is not None:\n raise ValueError(\n \"None of the data falls within bins with edges %r\" % bins\n )\n else:\n raise ValueError(\n \"Failed to group data. Are you grouping by a variable that is all NaN?\"\n )\n\n if (\n isinstance(obj, DataArray)\n and restore_coord_dims is None\n and any(obj[c].ndim > 1 for c in obj.coords)\n ):\n warnings.warn(\n \"This DataArray contains multi-dimensional \"\n \"coordinates. In the future, the dimension order \"\n \"of these coordinates will be restored as well \"\n \"unless you specify restore_coord_dims=False.\",\n FutureWarning,\n stacklevel=2,\n )\n restore_coord_dims = False\n\n # specification for the groupby operation\n self._obj = obj\n self._group = group\n self._group_dim = group_dim\n self._group_indices = group_indices\n self._unique_coord = unique_coord\n self._stacked_dim = stacked_dim\n self._inserted_dims = inserted_dims\n self._full_index = full_index\n self._restore_coord_dims = restore_coord_dims\n\n # cached attributes\n self._groups = None\n self._dims = None"},{"col":0,"comment":"Get a list of dimensions to squeeze out.\n ","endLoc":326,"header":"def get_squeeze_dims(\n xarray_obj,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n axis: Union[int, Iterable[int], None] = None,\n) -> List[Hashable]","id":683,"name":"get_squeeze_dims","nodeType":"Function","startLoc":295,"text":"def get_squeeze_dims(\n xarray_obj,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n axis: Union[int, Iterable[int], None] = None,\n) -> List[Hashable]:\n \"\"\"Get a list of dimensions to squeeze out.\n \"\"\"\n if dim is not None and axis is not None:\n raise ValueError(\"cannot use both parameters `axis` and `dim`\")\n if dim is None and axis is None:\n return [d for d, s in xarray_obj.sizes.items() if s == 1]\n\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n dim = list(dim)\n elif dim is not None:\n dim = [dim]\n else:\n assert axis is not None\n if isinstance(axis, int):\n axis = [axis]\n axis = list(axis)\n if any(not isinstance(a, int) for a in axis):\n raise TypeError(\"parameter `axis` must be int or iterable of int.\")\n alldims = list(xarray_obj.sizes.keys())\n dim = [alldims[a] for a in axis]\n\n if any(xarray_obj.sizes[k] > 1 for k in dim):\n raise ValueError(\n \"cannot select a dimension to squeeze out \"\n \"which has length greater than one\"\n )\n return dim"},{"col":0,"comment":"Determine whether `v` can be hashed.\n ","endLoc":594,"header":"def hashable(v: Any) -> bool","id":684,"name":"hashable","nodeType":"Function","startLoc":587,"text":"def hashable(v: Any) -> bool:\n \"\"\"Determine whether `v` can be hashed.\n \"\"\"\n try:\n hash(v)\n except TypeError:\n return False\n return True"},{"col":4,"comment":"null","endLoc":148,"header":"def __init__(self, obj, name, coords)","id":685,"name":"__init__","nodeType":"Function","startLoc":145,"text":"def __init__(self, obj, name, coords):\n self.name = name\n self.coords = coords\n self.size = obj.sizes[name]"},{"col":4,"comment":"null","endLoc":384,"header":"def _calc_assign_results(\n self: C, kwargs: Mapping[Hashable, Union[T, Callable[[C], T]]]\n ) -> Dict[Hashable, T]","id":686,"name":"_calc_assign_results","nodeType":"Function","startLoc":381,"text":"def _calc_assign_results(\n self: C, kwargs: Mapping[Hashable, Union[T, Callable[[C], T]]]\n ) -> Dict[Hashable, T]:\n return {k: v(self) if callable(v) else v for k, v in kwargs.items()}"},{"col":0,"comment":"null","endLoc":186,"header":"def _ensure_1d(group, obj)","id":687,"name":"_ensure_1d","nodeType":"Function","startLoc":172,"text":"def _ensure_1d(group, obj):\n if group.ndim != 1:\n # try to stack the dims of the group into a single dim\n orig_dims = group.dims\n stacked_dim = \"stacked_\" + \"_\".join(orig_dims)\n # these dimensions get created by the stack operation\n inserted_dims = [dim for dim in group.dims if dim not in group.coords]\n # the copy is necessary here, otherwise read only array raises error\n # in pandas: https://github.com/pydata/pandas/issues/12813\n group = group.stack(**{stacked_dim: orig_dims}).copy()\n obj = obj.stack(**{stacked_dim: orig_dims})\n else:\n stacked_dim = None\n inserted_dims = []\n return group, obj, stacked_dim, inserted_dims"},{"col":0,"comment":"null","endLoc":398,"header":"def unindexed_dims_repr(dims, coords)","id":688,"name":"unindexed_dims_repr","nodeType":"Function","startLoc":392,"text":"def unindexed_dims_repr(dims, coords):\n unindexed_dims = [d for d in dims if d not in coords]\n if unindexed_dims:\n dims_str = \", \".join(\"{}\".format(d) for d in unindexed_dims)\n return \"Dimensions without coordinates: \" + dims_str\n else:\n return None"},{"col":0,"comment":"null","endLoc":408,"header":"@contextlib.contextmanager\ndef set_numpy_options(*args, **kwargs)","id":689,"name":"set_numpy_options","nodeType":"Function","startLoc":401,"text":"@contextlib.contextmanager\ndef set_numpy_options(*args, **kwargs):\n original = np.get_printoptions()\n np.set_printoptions(*args, **kwargs)\n try:\n yield\n finally:\n np.set_printoptions(**original)"},{"col":0,"comment":"null","endLoc":425,"header":"def short_numpy_repr(array)","id":690,"name":"short_numpy_repr","nodeType":"Function","startLoc":411,"text":"def short_numpy_repr(array):\n array = np.asarray(array)\n\n # default to lower precision so a full (abbreviated) line can fit on\n # one line with the default display_width\n options = {\"precision\": 6, \"linewidth\": OPTIONS[\"display_width\"], \"threshold\": 200}\n if array.ndim < 3:\n edgeitems = 3\n elif array.ndim == 3:\n edgeitems = 2\n else:\n edgeitems = 1\n options[\"edgeitems\"] = edgeitems\n with set_numpy_options(**options):\n return repr(array)"},{"col":4,"comment":"null","endLoc":1900,"header":"@overload\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"DataArray\"","id":691,"name":"drop","nodeType":"Function","startLoc":1896,"text":"@overload\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"DataArray\":\n ..."},{"col":4,"comment":"null","endLoc":1907,"header":"@overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"DataArray\"","id":692,"name":"drop","nodeType":"Function","startLoc":1903,"text":"@overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"DataArray\":\n ..."},{"col":0,"comment":"null","endLoc":96,"header":"def _open_scipy_netcdf(filename, mode, mmap, version)","id":693,"name":"_open_scipy_netcdf","nodeType":"Function","startLoc":59,"text":"def _open_scipy_netcdf(filename, mode, mmap, version):\n import scipy.io\n import gzip\n\n # if the string ends with .gz, then gunzip and open as netcdf file\n if isinstance(filename, str) and filename.endswith(\".gz\"):\n try:\n return scipy.io.netcdf_file(\n gzip.open(filename), mode=mode, mmap=mmap, version=version\n )\n except TypeError as e:\n # TODO: gzipped loading only works with NetCDF3 files.\n if \"is not a valid NetCDF 3 file\" in e.message:\n raise ValueError(\n \"gzipped file loading only supports \" \"NetCDF 3 files.\"\n )\n else:\n raise\n\n if isinstance(filename, bytes) and filename.startswith(b\"CDF\"):\n # it's a NetCDF3 bytestring\n filename = BytesIO(filename)\n\n try:\n return scipy.io.netcdf_file(filename, mode=mode, mmap=mmap, version=version)\n except TypeError as e: # netcdf3 message is obscure in this case\n errmsg = e.args[0]\n if \"is not a valid NetCDF 3 file\" in errmsg:\n msg = \"\"\"\n If this is a NetCDF4 file, you may need to install the\n netcdf4 library, e.g.,\n\n $ pip install netcdf4\n \"\"\"\n errmsg += msg\n raise TypeError(errmsg)\n else:\n raise"},{"attributeType":"null","col":17,"comment":"null","endLoc":18,"id":694,"name":"pd","nodeType":"Attribute","startLoc":18,"text":"pd"},{"attributeType":"null","col":4,"comment":"null","endLoc":30,"id":695,"name":"DimsLike","nodeType":"Attribute","startLoc":30,"text":"DimsLike"},{"col":4,"comment":"null","endLoc":673,"header":"def __dask_keys__(self)","id":696,"name":"__dask_keys__","nodeType":"Function","startLoc":666,"text":"def __dask_keys__(self):\n import dask\n\n return [\n v.__dask_keys__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ]"},{"col":4,"comment":"Assign new coordinates to this object.\n\n Returns a new object with all the original data in addition to the new\n coordinates.\n\n Parameters\n ----------\n coords : dict, optional\n A dict with keys which are variables names. If the values are\n callable, they are computed on this object and assigned to new\n coordinate variables. If the values are not callable,\n (e.g. a ``DataArray``, scalar, or array), they are simply assigned.\n\n **coords_kwargs : keyword, value pairs, optional\n The keyword arguments form of ``coords``.\n One of ``coords`` or ``coords_kwargs`` must be provided.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new coordinates in addition to the existing\n data.\n\n Examples\n --------\n Convert longitude coordinates from 0-359 to -180-179:\n\n >>> da = xr.DataArray(np.random.rand(4),\n ... coords=[np.array([358, 359, 0, 1])],\n ... dims='lon')\n >>> da\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n The function also accepts dictionary arguments:\n\n >>> da.assign_coords({'lon': (((da.lon + 180) % 360) - 180)})\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n Notes\n -----\n Since ``coords_kwargs`` is a dictionary, the order of your arguments may\n not be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign_coords``\n is possible, but you cannot reference other variables created within\n the same ``assign_coords`` call.\n\n See also\n --------\n Dataset.assign\n Dataset.swap_dims\n ","endLoc":453,"header":"def assign_coords(self, coords=None, **coords_kwargs)","id":697,"name":"assign_coords","nodeType":"Function","startLoc":386,"text":"def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign new coordinates to this object.\n\n Returns a new object with all the original data in addition to the new\n coordinates.\n\n Parameters\n ----------\n coords : dict, optional\n A dict with keys which are variables names. If the values are\n callable, they are computed on this object and assigned to new\n coordinate variables. If the values are not callable,\n (e.g. a ``DataArray``, scalar, or array), they are simply assigned.\n\n **coords_kwargs : keyword, value pairs, optional\n The keyword arguments form of ``coords``.\n One of ``coords`` or ``coords_kwargs`` must be provided.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new coordinates in addition to the existing\n data.\n\n Examples\n --------\n Convert longitude coordinates from 0-359 to -180-179:\n\n >>> da = xr.DataArray(np.random.rand(4),\n ... coords=[np.array([358, 359, 0, 1])],\n ... dims='lon')\n >>> da\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n The function also accepts dictionary arguments:\n\n >>> da.assign_coords({'lon': (((da.lon + 180) % 360) - 180)})\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n Notes\n -----\n Since ``coords_kwargs`` is a dictionary, the order of your arguments may\n not be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign_coords``\n is possible, but you cannot reference other variables created within\n the same ``assign_coords`` call.\n\n See also\n --------\n Dataset.assign\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n data = self.copy(deep=False)\n results = self._calc_assign_results(coords_kwargs)\n data.coords.update(results)\n return data"},{"col":0,"comment":"null","endLoc":266,"header":"def either_dict_or_kwargs(\n pos_kwargs: Optional[Mapping[Hashable, T]],\n kw_kwargs: Mapping[str, T],\n func_name: str,\n) -> Mapping[Hashable, T]","id":698,"name":"either_dict_or_kwargs","nodeType":"Function","startLoc":247,"text":"def either_dict_or_kwargs(\n pos_kwargs: Optional[Mapping[Hashable, T]],\n kw_kwargs: Mapping[str, T],\n func_name: str,\n) -> Mapping[Hashable, T]:\n if pos_kwargs is not None:\n if not is_dict_like(pos_kwargs):\n raise ValueError(\n \"the first argument to .%s must be a dictionary\" % func_name\n )\n if kw_kwargs:\n raise ValueError(\n \"cannot specify both keyword and positional \"\n \"arguments to .%s\" % func_name\n )\n return pos_kwargs\n else:\n # Need an explicit cast to appease mypy due to invariance; see\n # https://github.com/python/mypy/issues/6228\n return cast(Mapping[Hashable, T], kw_kwargs)"},{"attributeType":"null","col":4,"comment":"null","endLoc":31,"id":699,"name":"ArrayLike","nodeType":"Attribute","startLoc":31,"text":"ArrayLike"},{"attributeType":"null","col":4,"comment":"null","endLoc":32,"id":700,"name":"VariableLike","nodeType":"Attribute","startLoc":32,"text":"VariableLike"},{"attributeType":"null","col":4,"comment":"null","endLoc":38,"id":701,"name":"XarrayValue","nodeType":"Attribute","startLoc":38,"text":"XarrayValue"},{"attributeType":"null","col":4,"comment":"null","endLoc":39,"id":702,"name":"DatasetLike","nodeType":"Attribute","startLoc":39,"text":"DatasetLike"},{"attributeType":"null","col":4,"comment":"null","endLoc":40,"id":703,"name":"CoercibleValue","nodeType":"Attribute","startLoc":40,"text":"CoercibleValue"},{"col":0,"comment":"Format \"data\" for DataArray and Variable.","endLoc":441,"header":"def short_data_repr(array)","id":704,"name":"short_data_repr","nodeType":"Function","startLoc":428,"text":"def short_data_repr(array):\n \"\"\"Format \"data\" for DataArray and Variable.\"\"\"\n internal_data = getattr(array, \"variable\", array)._data\n if isinstance(array, np.ndarray):\n return short_numpy_repr(array)\n elif hasattr(internal_data, \"__array_function__\") or isinstance(\n internal_data, dask_array_type\n ):\n return repr(array.data)\n elif array._in_memory or array.size < 1e5:\n return short_numpy_repr(array)\n else:\n # internal xarray array type\n return \"[{} values with dtype={}]\".format(array.size, array.dtype)"},{"attributeType":"null","col":4,"comment":"null","endLoc":41,"id":705,"name":"CoercibleMapping","nodeType":"Attribute","startLoc":41,"text":"CoercibleMapping"},{"attributeType":"null","col":0,"comment":"null","endLoc":44,"id":706,"name":"PANDAS_TYPES","nodeType":"Attribute","startLoc":44,"text":"PANDAS_TYPES"},{"attributeType":"null","col":0,"comment":"null","endLoc":150,"id":707,"name":"MergeElement","nodeType":"Attribute","startLoc":150,"text":"MergeElement"},{"col":0,"comment":"","endLoc":16,"header":"merge.py#","id":708,"name":"","nodeType":"Function","startLoc":1,"text":"if TYPE_CHECKING:\n from .coordinates import Coordinates\n from .dataarray import DataArray\n from .dataset import Dataset\n\n DimsLike = Union[Hashable, Sequence[Hashable]]\n ArrayLike = Any\n VariableLike = Union[\n ArrayLike,\n Tuple[DimsLike, ArrayLike],\n Tuple[DimsLike, ArrayLike, Mapping],\n Tuple[DimsLike, ArrayLike, Mapping, Mapping],\n ]\n XarrayValue = Union[DataArray, Variable, VariableLike]\n DatasetLike = Union[Dataset, Mapping[Hashable, XarrayValue]]\n CoercibleValue = Union[XarrayValue, pd.Series, pd.DataFrame]\n CoercibleMapping = Union[Dataset, Mapping[Hashable, CoercibleValue]]\n\nPANDAS_TYPES = (pd.Series, pd.DataFrame, pdcompat.Panel)\n\n_VALID_COMPAT = Frozen(\n {\n \"identical\": 0,\n \"equals\": 1,\n \"broadcast_equals\": 2,\n \"minimal\": 3,\n \"no_conflicts\": 4,\n \"override\": 5,\n }\n)\n\nMergeElement = Tuple[Variable, Optional[pd.Index]]"},{"col":0,"comment":"null","endLoc":467,"header":"def array_repr(arr)","id":709,"name":"array_repr","nodeType":"Function","startLoc":444,"text":"def array_repr(arr):\n # used for DataArray, Variable and IndexVariable\n if hasattr(arr, \"name\") and arr.name is not None:\n name_str = \"{!r} \".format(arr.name)\n else:\n name_str = \"\"\n\n summary = [\n \"\".format(type(arr).__name__, name_str, dim_summary(arr)),\n short_data_repr(arr),\n ]\n\n if hasattr(arr, \"coords\"):\n if arr.coords:\n summary.append(repr(arr.coords))\n\n unindexed_dims_str = unindexed_dims_repr(arr.dims, arr.coords)\n if unindexed_dims_str:\n summary.append(unindexed_dims_str)\n\n if arr.attrs:\n summary.append(attrs_repr(arr.attrs))\n\n return \"\\n\".join(summary)"},{"col":4,"comment":"null","endLoc":685,"header":"def __dask_layers__(self)","id":710,"name":"__dask_layers__","nodeType":"Function","startLoc":675,"text":"def __dask_layers__(self):\n import dask\n\n return sum(\n [\n v.__dask_layers__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ],\n (),\n )"},{"id":711,"name":"ci/requirements","nodeType":"Package"},{"id":712,"name":"py36-min-all-deps.yml","nodeType":"TextFile","path":"ci/requirements","text":"name: xarray-tests\nchannels:\n - conda-forge\ndependencies:\n # MINIMUM VERSIONS POLICY: see doc/installing.rst\n # Run ci/min_deps_check.py to verify that this file respects the policy.\n # When upgrading python, numpy, or pandas, must also change\n # doc/installing.rst and setup.py.\n - python=3.6\n - black\n - boto3=1.9\n - bottleneck=1.2\n - cartopy=0.17\n - cdms2=3.1\n - cfgrib=0.9\n - cftime=1.0\n - coveralls\n - dask=1.2\n - distributed=1.27\n - flake8\n - h5netcdf=0.7\n - h5py=2.9 # Policy allows for 2.10, but it's a conflict-fest\n - hdf5=1.10\n - hypothesis\n - iris=2.2\n - lxml=4.4 # Optional dep of pydap\n - matplotlib=3.1\n - mypy=0.730 # Must match .pre-commit-config.yaml\n - nc-time-axis=1.2\n - netcdf4=1.4\n - numba=0.44\n - numpy=1.14\n - pandas=0.24\n - pip\n - pseudonetcdf=3.0\n - pydap=3.2\n - pynio=1.5\n - pytest\n - pytest-cov\n - pytest-env\n - rasterio=1.0\n - scipy=1.0 # Policy allows for 1.2, but scipy>=1.1 breaks numpy=1.14\n - seaborn=0.9\n # - sparse # See py36-min-nep18.yml\n - toolz=0.10\n - zarr=2.3\n - pip:\n - numbagg==0.1\n"},{"id":713,"name":"terminology.rst","nodeType":"TextFile","path":"doc","text":".. _terminology:\n\nTerminology\n===========\n\n*Xarray terminology differs slightly from CF, mathematical conventions, and pandas; and therefore using xarray, understanding the documentation, and parsing error messages is easier once key terminology is defined. This glossary was designed so that more fundamental concepts come first. Thus for new users, this page is best read top-to-bottom. Throughout the glossary,* ``arr`` *will refer to an xarray* :py:class:`DataArray` *in any small examples. For more complete examples, please consult the relevant documentation.*\n\n----\n\n**DataArray:** A multi-dimensional array with labeled or named dimensions. ``DataArray`` objects add metadata such as dimension names, coordinates, and attributes (defined below) to underlying \"unlabeled\" data structures such as numpy and Dask arrays. If its optional ``name`` property is set, it is a *named DataArray*.\n\n----\n\n**Dataset:** A dict-like collection of ``DataArray`` objects with aligned dimensions. Thus, most operations that can be performed on the dimensions of a single ``DataArray`` can be performed on a dataset. Datasets have data variables (see **Variable** below), dimensions, coordinates, and attributes.\n\n----\n\n**Variable:** A `NetCDF-like variable `_ consisting of dimensions, data, and attributes which describe a single array. The main functional difference between variables and numpy arrays is that numerical operations on variables implement array broadcasting by dimension name. Each ``DataArray`` has an underlying variable that can be accessed via ``arr.variable``. However, a variable is not fully described outside of either a ``Dataset`` or a ``DataArray``.\n\n.. note::\n\n The :py:class:`Variable` class is low-level interface and can typically be ignored. However, the word \"variable\" appears often enough in the code and documentation that is useful to understand.\n\n----\n\n**Dimension:** In mathematics, the *dimension* of data is loosely the number of degrees of freedom for it. A *dimension axis* is a set of all points in which all but one of these degrees of freedom is fixed. We can think of each dimension axis as having a name, for example the \"x dimension\". In xarray, a ``DataArray`` object's *dimensions* are its named dimension axes, and the name of the ``i``-th dimension is ``arr.dims[i]``. If an array is created without dimensions, the default dimension names are ``dim_0``, ``dim_1``, and so forth.\n\n----\n\n**Coordinate:** An array that labels a dimension of another ``DataArray``. Loosely, the coordinate array's values can be thought of as tick labels along a dimension. There are two types of coordinate arrays: *dimension coordinates* and *non-dimension coordinates* (see below). A coordinate named ``x`` can be retrieved from ``arr.coords[x]``. A ``DataArray`` can have more coordinates than dimensions because a single dimension can be assigned multiple coordinate arrays. However, only one coordinate array can be a assigned as a particular dimension's dimension coordinate array. As a consequence, ``len(arr.dims) <= len(arr.coords)`` in general.\n\n----\n\n**Dimension coordinate:** A coordinate array assigned to ``arr`` with both a name and dimension name in ``arr.dims``. Dimension coordinates are used for label-based indexing and alignment, like the index found on a :py:class:`pandas.DataFrame` or :py:class:`pandas.Series`. In fact, dimension coordinates use :py:class:`pandas.Index` objects under the hood for efficient computation. Dimension coordinates are marked by ``*`` when printing a ``DataArray`` or ``Dataset``.\n\n----\n\n**Non-dimension coordinate:** A coordinate array assigned to ``arr`` with a name in ``arr.dims`` but a dimension name *not* in ``arr.dims``. These coordinate arrays are useful for auxiliary labeling. However, non-dimension coordinates are not indexed, and any operation on non-dimension coordinates that leverages indexing will fail. Printing ``arr.coords`` will print all of ``arr``'s coordinate names, with the assigned dimensions in parentheses. For example, ``coord_name (dim_name) 1 2 3 ...``.\n\n----\n\n**Index:** An *index* is a data structure optimized for efficient selecting and slicing of an associated array. Xarray creates indexes for dimension coordinates so that operations along dimensions are fast, while non-dimension coordinates are not indexed. Under the hood, indexes are implemented as :py:class:`pandas.Index` objects. The index associated with dimension name ``x`` can be retrieved by ``arr.indexes[x]``. By construction, ``len(arr.dims) == len(arr.indexes)``"},{"col":4,"comment":"null","endLoc":79,"header":"@classmethod\n def open(cls, url, session=None)","id":714,"name":"open","nodeType":"Function","startLoc":74,"text":"@classmethod\n def open(cls, url, session=None):\n import pydap.client\n\n ds = pydap.client.open_url(url, session=session)\n return cls(ds)"},{"col":4,"comment":"Assign new attrs to this object.\n\n Returns a new object equivalent to self.attrs.update(*args, **kwargs).\n\n Parameters\n ----------\n args : positional arguments passed into ``attrs.update``.\n kwargs : keyword arguments passed into ``attrs.update``.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new attrs in addition to the existing data.\n\n See also\n --------\n Dataset.assign\n ","endLoc":476,"header":"def assign_attrs(self, *args, **kwargs)","id":715,"name":"assign_attrs","nodeType":"Function","startLoc":455,"text":"def assign_attrs(self, *args, **kwargs):\n \"\"\"Assign new attrs to this object.\n\n Returns a new object equivalent to self.attrs.update(*args, **kwargs).\n\n Parameters\n ----------\n args : positional arguments passed into ``attrs.update``.\n kwargs : keyword arguments passed into ``attrs.update``.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new attrs in addition to the existing data.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n out = self.copy(deep=False)\n out.attrs.update(*args, **kwargs)\n return out"},{"col":4,"comment":"null","endLoc":691,"header":"@property\n def __dask_optimize__(self)","id":716,"name":"__dask_optimize__","nodeType":"Function","startLoc":687,"text":"@property\n def __dask_optimize__(self):\n import dask.array as da\n\n return da.Array.__dask_optimize__"},{"col":4,"comment":"null","endLoc":697,"header":"@property\n def __dask_scheduler__(self)","id":717,"name":"__dask_scheduler__","nodeType":"Function","startLoc":693,"text":"@property\n def __dask_scheduler__(self):\n import dask.array as da\n\n return da.Array.__dask_scheduler__"},{"col":4,"comment":"null","endLoc":717,"header":"def __dask_postcompute__(self)","id":718,"name":"__dask_postcompute__","nodeType":"Function","startLoc":699,"text":"def __dask_postcompute__(self):\n import dask\n\n info = [\n (True, k, v.__dask_postcompute__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._variables.items()\n ]\n args = (\n info,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._file_obj,\n )\n return self._dask_postcompute, args"},{"col":4,"comment":"\n Apply func(self, *args, **kwargs)\n\n This method replicates the pandas method of the same name.\n\n Parameters\n ----------\n func : function\n function to apply to this xarray object (Dataset/DataArray).\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the xarray object.\n args : positional arguments passed into ``func``.\n kwargs : a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n xarray or pandas objects, e.g., instead of writing\n\n >>> f(g(h(ds), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n >>> def adder(data, arg):\n ... return data + arg\n ...\n >>> def div(data, arg):\n ... return data / arg\n ...\n >>> def sub_mult(data, sub_arg, mult_arg):\n ... return (data * mult_arg) - sub_arg\n ...\n >>> x.pipe(adder, 2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> x.pipe(adder, arg=2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> (\n ... x\n ... .pipe(adder, arg=2)\n ... .pipe(div, arg=2)\n ... .pipe(sub_mult, sub_arg=2, mult_arg=2)\n ... )\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n See Also\n --------\n pandas.DataFrame.pipe\n ","endLoc":607,"header":"def pipe(\n self,\n func: Union[Callable[..., T], Tuple[Callable[..., T], str]],\n *args,\n **kwargs\n ) -> T","id":719,"name":"pipe","nodeType":"Function","startLoc":478,"text":"def pipe(\n self,\n func: Union[Callable[..., T], Tuple[Callable[..., T], str]],\n *args,\n **kwargs\n ) -> T:\n \"\"\"\n Apply func(self, *args, **kwargs)\n\n This method replicates the pandas method of the same name.\n\n Parameters\n ----------\n func : function\n function to apply to this xarray object (Dataset/DataArray).\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the xarray object.\n args : positional arguments passed into ``func``.\n kwargs : a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n xarray or pandas objects, e.g., instead of writing\n\n >>> f(g(h(ds), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n >>> def adder(data, arg):\n ... return data + arg\n ...\n >>> def div(data, arg):\n ... return data / arg\n ...\n >>> def sub_mult(data, sub_arg, mult_arg):\n ... return (data * mult_arg) - sub_arg\n ...\n >>> x.pipe(adder, 2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> x.pipe(adder, arg=2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> (\n ... x\n ... .pipe(adder, arg=2)\n ... .pipe(div, arg=2)\n ... .pipe(sub_mult, sub_arg=2, mult_arg=2)\n ... )\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n See Also\n --------\n pandas.DataFrame.pipe\n \"\"\"\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword \" \"argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)"},{"id":720,"name":"py36.yml","nodeType":"TextFile","path":"ci/requirements","text":"name: xarray-tests\nchannels:\n - conda-forge\ndependencies:\n - python=3.6\n - black\n - boto3\n - bottleneck\n - cartopy\n - cdms2\n - cfgrib\n - cftime\n - coveralls\n - dask\n - distributed\n - flake8\n - h5netcdf\n - h5py\n - hdf5\n - hypothesis\n - iris\n - lxml # optional dep of pydap\n - matplotlib\n - mypy=0.730 # Must match .pre-commit-config.yaml\n - nc-time-axis\n - netcdf4\n - numba\n - numpy\n - pandas\n - pip\n - pseudonetcdf\n - pydap\n - pynio\n - pytest\n - pytest-cov\n - pytest-env\n - rasterio\n - scipy\n - seaborn\n - sparse\n - toolz\n - zarr\n - pip:\n - numbagg\n"},{"col":4,"comment":"\n Parameters\n ----------\n ds : pydap DatasetType\n ","endLoc":72,"header":"def __init__(self, ds)","id":721,"name":"__init__","nodeType":"Function","startLoc":66,"text":"def __init__(self, ds):\n \"\"\"\n Parameters\n ----------\n ds : pydap DatasetType\n \"\"\"\n self.ds = ds"},{"col":4,"comment":"Returns a GroupBy object for performing grouped operations.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose unique values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n\n Examples\n --------\n Calculate daily anomalies for daily data:\n\n >>> da = xr.DataArray(np.linspace(0, 1826, num=1827),\n ... coords=[pd.date_range('1/1/2000', '31/12/2004',\n ... freq='D')],\n ... dims='time')\n >>> da\n \n array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n >>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time')\n \n array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...\n\n See Also\n --------\n core.groupby.DataArrayGroupBy\n core.groupby.DatasetGroupBy\n ","endLoc":658,"header":"def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None)","id":722,"name":"groupby","nodeType":"Function","startLoc":609,"text":"def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose unique values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n\n Examples\n --------\n Calculate daily anomalies for daily data:\n\n >>> da = xr.DataArray(np.linspace(0, 1826, num=1827),\n ... coords=[pd.date_range('1/1/2000', '31/12/2004',\n ... freq='D')],\n ... dims='time')\n >>> da\n \n array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n >>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time')\n \n array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...\n\n See Also\n --------\n core.groupby.DataArrayGroupBy\n core.groupby.DatasetGroupBy\n \"\"\"\n return self._groupby_cls(\n self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims\n )"},{"id":723,"name":"doc.yml","nodeType":"TextFile","path":"ci/requirements","text":"name: xarray-docs\nchannels:\n # Don't change to pkgs/main, as it causes random timeouts in readthedocs\n - conda-forge\ndependencies:\n - python=3.7\n - bottleneck\n - cartopy\n - h5netcdf\n - ipython\n - iris\n - netcdf4\n - numpy\n - numpydoc\n - pandas<0.25 # Hack around https://github.com/pydata/xarray/issues/3369\n - rasterio\n - seaborn\n - sphinx\n - sphinx-gallery\n - sphinx_rtd_theme\n - zarr\n"},{"col":4,"comment":"Returns a GroupBy object for performing grouped operations.\n\n Rather than using all unique values of `group`, the values are discretized\n first by applying `pandas.cut` [1]_ to `group`.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose binned values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n bins : int or array of scalars\n If bins is an int, it defines the number of equal-width bins in the\n range of x. However, in this case, the range of x is extended by .1%\n on each side to include the min or max values of x. If bins is a\n sequence it defines the bin edges allowing for non-uniform bin\n width. No extension of the range of x is done in this case.\n right : boolean, optional\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array or boolean, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, string bin labels are assigned by\n `pandas.cut`.\n precision : int\n The precision at which to store and display the bins labels.\n include_lowest : bool\n Whether the first interval should be left-inclusive or not.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n The name of the group has the added suffix `_bins` in order to\n distinguish it from the original variable.\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\n ","endLoc":731,"header":"def groupby_bins(\n self,\n group,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n squeeze: bool = True,\n restore_coord_dims: bool = None,\n )","id":724,"name":"groupby_bins","nodeType":"Function","startLoc":660,"text":"def groupby_bins(\n self,\n group,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n squeeze: bool = True,\n restore_coord_dims: bool = None,\n ):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Rather than using all unique values of `group`, the values are discretized\n first by applying `pandas.cut` [1]_ to `group`.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose binned values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n bins : int or array of scalars\n If bins is an int, it defines the number of equal-width bins in the\n range of x. However, in this case, the range of x is extended by .1%\n on each side to include the min or max values of x. If bins is a\n sequence it defines the bin edges allowing for non-uniform bin\n width. No extension of the range of x is done in this case.\n right : boolean, optional\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array or boolean, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, string bin labels are assigned by\n `pandas.cut`.\n precision : int\n The precision at which to store and display the bins labels.\n include_lowest : bool\n Whether the first interval should be left-inclusive or not.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n The name of the group has the added suffix `_bins` in order to\n distinguish it from the original variable.\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\n \"\"\"\n return self._groupby_cls(\n self,\n group,\n squeeze=squeeze,\n bins=bins,\n restore_coord_dims=restore_coord_dims,\n cut_kwargs={\n \"right\": right,\n \"labels\": labels,\n \"precision\": precision,\n \"include_lowest\": include_lowest,\n },\n )"},{"col":4,"comment":"\n Rolling window object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to create the rolling iterator\n along (e.g. `time`) to its moving window size.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n **window_kwargs : optional\n The keyword arguments form of ``dim``.\n One of dim or window_kwargs must be provided.\n\n Returns\n -------\n Rolling object (core.rolling.DataArrayRolling for DataArray,\n core.rolling.DatasetRolling for Dataset.)\n\n Examples\n --------\n Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.rolling(time=3, center=True).mean()\n \n array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n\n Remove the NaNs using ``dropna()``:\n\n >>> da.rolling(time=3, center=True).mean().dropna('time')\n \n array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...\n\n See Also\n --------\n core.rolling.DataArrayRolling\n core.rolling.DatasetRolling\n ","endLoc":796,"header":"def rolling(\n self,\n dim: Mapping[Hashable, int] = None,\n min_periods: int = None,\n center: bool = False,\n **window_kwargs: int\n )","id":725,"name":"rolling","nodeType":"Function","startLoc":733,"text":"def rolling(\n self,\n dim: Mapping[Hashable, int] = None,\n min_periods: int = None,\n center: bool = False,\n **window_kwargs: int\n ):\n \"\"\"\n Rolling window object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to create the rolling iterator\n along (e.g. `time`) to its moving window size.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n **window_kwargs : optional\n The keyword arguments form of ``dim``.\n One of dim or window_kwargs must be provided.\n\n Returns\n -------\n Rolling object (core.rolling.DataArrayRolling for DataArray,\n core.rolling.DatasetRolling for Dataset.)\n\n Examples\n --------\n Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.rolling(time=3, center=True).mean()\n \n array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n\n Remove the NaNs using ``dropna()``:\n\n >>> da.rolling(time=3, center=True).mean().dropna('time')\n \n array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...\n\n See Also\n --------\n core.rolling.DataArrayRolling\n core.rolling.DatasetRolling\n \"\"\"\n dim = either_dict_or_kwargs(dim, window_kwargs, \"rolling\")\n return self._rolling_cls(self, dim, min_periods=min_periods, center=center)"},{"col":4,"comment":"null","endLoc":737,"header":"def __dask_postpersist__(self)","id":726,"name":"__dask_postpersist__","nodeType":"Function","startLoc":719,"text":"def __dask_postpersist__(self):\n import dask\n\n info = [\n (True, k, v.__dask_postpersist__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._variables.items()\n ]\n args = (\n info,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._file_obj,\n )\n return self._dask_postpersist, args"},{"id":727,"name":"py37.yml","nodeType":"TextFile","path":"ci/requirements","text":"name: xarray-tests\nchannels:\n - conda-forge\ndependencies:\n - python=3.7\n - black\n - boto3\n - bottleneck\n - cartopy\n - cdms2\n - cfgrib\n - cftime\n - coveralls\n - dask\n - distributed\n - flake8\n - h5netcdf\n - h5py\n - hdf5\n - hypothesis\n - iris\n - lxml # Optional dep of pydap\n - matplotlib\n - mypy=0.730 # Must match .pre-commit-config.yaml\n - nc-time-axis\n - netcdf4\n - numba\n - numpy\n - pandas\n - pip\n - pseudonetcdf\n - pydap\n - pynio\n - pytest\n - pytest-cov\n - pytest-env\n - rasterio\n - scipy\n - seaborn\n - sparse\n - toolz\n - zarr\n - pip:\n - numbagg\n"},{"col":4,"comment":"\n Exponentially-weighted moving window.\n Similar to EWM in pandas\n\n Requires the optional Numbagg dependency.\n\n Parameters\n ----------\n window : A single mapping from a dimension name to window value,\n optional\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'],\n default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n **window_kwargs : optional\n The keyword arguments form of ``window``.\n One of window or window_kwargs must be provided.\n\n See Also\n --------\n core.rolling_exp.RollingExp\n ","endLoc":835,"header":"def rolling_exp(\n self,\n window: Mapping[Hashable, int] = None,\n window_type: str = \"span\",\n **window_kwargs\n )","id":728,"name":"rolling_exp","nodeType":"Function","startLoc":798,"text":"def rolling_exp(\n self,\n window: Mapping[Hashable, int] = None,\n window_type: str = \"span\",\n **window_kwargs\n ):\n \"\"\"\n Exponentially-weighted moving window.\n Similar to EWM in pandas\n\n Requires the optional Numbagg dependency.\n\n Parameters\n ----------\n window : A single mapping from a dimension name to window value,\n optional\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'],\n default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n **window_kwargs : optional\n The keyword arguments form of ``window``.\n One of window or window_kwargs must be provided.\n\n See Also\n --------\n core.rolling_exp.RollingExp\n \"\"\"\n window = either_dict_or_kwargs(window, window_kwargs, \"rolling_exp\")\n\n return self._rolling_exp_cls(self, window, window_type)"},{"fileName":"scipy_.py","filePath":"xarray/backends","id":729,"nodeType":"File","text":"from io import BytesIO\n\nimport numpy as np\n\nfrom .. import Variable\nfrom ..core.indexing import NumpyIndexingAdapter\nfrom ..core.utils import Frozen, FrozenDict\nfrom .common import BackendArray, WritableCFDataStore\nfrom .file_manager import CachingFileManager, DummyFileManager\nfrom .locks import ensure_lock, get_write_lock\nfrom .netcdf3 import encode_nc3_attr_value, encode_nc3_variable, is_valid_nc3_name\n\n\ndef _decode_string(s):\n if isinstance(s, bytes):\n return s.decode(\"utf-8\", \"replace\")\n return s\n\n\ndef _decode_attrs(d):\n # don't decode _FillValue from bytes -> unicode, because we want to ensure\n # that its type matches the data exactly\n return {k: v if k == \"_FillValue\" else _decode_string(v) for (k, v) in d.items()}\n\n\nclass ScipyArrayWrapper(BackendArray):\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_variable().data\n self.shape = array.shape\n self.dtype = np.dtype(array.dtype.kind + str(array.dtype.itemsize))\n\n def get_variable(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]\n\n def __getitem__(self, key):\n data = NumpyIndexingAdapter(self.get_variable().data)[key]\n # Copy data if the source file is mmapped. This makes things consistent\n # with the netCDF4 library by ensuring we can safely read arrays even\n # after closing associated files.\n copy = self.datastore.ds.use_mmap\n return np.array(data, dtype=self.dtype, copy=copy)\n\n def __setitem__(self, key, value):\n with self.datastore.lock:\n data = self.get_variable(needs_lock=False)\n try:\n data[key] = value\n except TypeError:\n if key is Ellipsis:\n # workaround for GH: scipy/scipy#6880\n data[:] = value\n else:\n raise\n\n\ndef _open_scipy_netcdf(filename, mode, mmap, version):\n import scipy.io\n import gzip\n\n # if the string ends with .gz, then gunzip and open as netcdf file\n if isinstance(filename, str) and filename.endswith(\".gz\"):\n try:\n return scipy.io.netcdf_file(\n gzip.open(filename), mode=mode, mmap=mmap, version=version\n )\n except TypeError as e:\n # TODO: gzipped loading only works with NetCDF3 files.\n if \"is not a valid NetCDF 3 file\" in e.message:\n raise ValueError(\n \"gzipped file loading only supports \" \"NetCDF 3 files.\"\n )\n else:\n raise\n\n if isinstance(filename, bytes) and filename.startswith(b\"CDF\"):\n # it's a NetCDF3 bytestring\n filename = BytesIO(filename)\n\n try:\n return scipy.io.netcdf_file(filename, mode=mode, mmap=mmap, version=version)\n except TypeError as e: # netcdf3 message is obscure in this case\n errmsg = e.args[0]\n if \"is not a valid NetCDF 3 file\" in errmsg:\n msg = \"\"\"\n If this is a NetCDF4 file, you may need to install the\n netcdf4 library, e.g.,\n\n $ pip install netcdf4\n \"\"\"\n errmsg += msg\n raise TypeError(errmsg)\n else:\n raise\n\n\nclass ScipyDataStore(WritableCFDataStore):\n \"\"\"Store for reading and writing data via scipy.io.netcdf.\n\n This store has the advantage of being able to be initialized with a\n StringIO object, allow for serialization without writing to disk.\n\n It only supports the NetCDF3 file-format.\n \"\"\"\n\n def __init__(\n self, filename_or_obj, mode=\"r\", format=None, group=None, mmap=None, lock=None\n ):\n if group is not None:\n raise ValueError(\n \"cannot save to a group with the \" \"scipy.io.netcdf backend\"\n )\n\n if format is None or format == \"NETCDF3_64BIT\":\n version = 2\n elif format == \"NETCDF3_CLASSIC\":\n version = 1\n else:\n raise ValueError(\"invalid format for scipy.io.netcdf backend: %r\" % format)\n\n if lock is None and mode != \"r\" and isinstance(filename_or_obj, str):\n lock = get_write_lock(filename_or_obj)\n\n self.lock = ensure_lock(lock)\n\n if isinstance(filename_or_obj, str):\n manager = CachingFileManager(\n _open_scipy_netcdf,\n filename_or_obj,\n mode=mode,\n lock=lock,\n kwargs=dict(mmap=mmap, version=version),\n )\n else:\n scipy_dataset = _open_scipy_netcdf(\n filename_or_obj, mode=mode, mmap=mmap, version=version\n )\n manager = DummyFileManager(scipy_dataset)\n\n self._manager = manager\n\n @property\n def ds(self):\n return self._manager.acquire()\n\n def open_store_variable(self, name, var):\n return Variable(\n var.dimensions,\n ScipyArrayWrapper(name, self),\n _decode_attrs(var._attributes),\n )\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen(_decode_attrs(self.ds._attributes))\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v is None\n }\n return encoding\n\n def set_dimension(self, name, length, is_unlimited=False):\n if name in self.ds.dimensions:\n raise ValueError(\n \"%s does not support modifying dimensions\" % type(self).__name__\n )\n dim_length = length if not is_unlimited else None\n self.ds.createDimension(name, dim_length)\n\n def _validate_attr_key(self, key):\n if not is_valid_nc3_name(key):\n raise ValueError(\"Not a valid attribute name\")\n\n def set_attribute(self, key, value):\n self._validate_attr_key(key)\n value = encode_nc3_attr_value(value)\n setattr(self.ds, key, value)\n\n def encode_variable(self, variable):\n variable = encode_nc3_variable(variable)\n return variable\n\n def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n if check_encoding and variable.encoding:\n if variable.encoding != {\"_FillValue\": None}:\n raise ValueError(\n \"unexpected encoding for scipy backend: %r\"\n % list(variable.encoding)\n )\n\n data = variable.data\n # nb. this still creates a numpy array in all memory, even though we\n # don't write the data yet; scipy.io.netcdf does not not support\n # incremental writes.\n if name not in self.ds.variables:\n self.ds.createVariable(name, data.dtype, variable.dims)\n scipy_var = self.ds.variables[name]\n for k, v in variable.attrs.items():\n self._validate_attr_key(k)\n setattr(scipy_var, k, v)\n\n target = ScipyArrayWrapper(name, self)\n\n return target, data\n\n def sync(self):\n self.ds.sync()\n\n def close(self):\n self._manager.close()\n"},{"className":"NumpyIndexingAdapter","col":0,"comment":"Wrap a NumPy array to use explicit indexing.","endLoc":1276,"id":730,"nodeType":"Class","startLoc":1225,"text":"class NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a NumPy array to use explicit indexing.\"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n # In NumpyIndexingAdapter we only allow to store bare np.ndarray\n if not isinstance(array, np.ndarray):\n raise TypeError(\n \"NumpyIndexingAdapter only wraps np.ndarray. \"\n \"Trying to wrap {}\".format(type(array))\n )\n self.array = array\n\n def _indexing_array_and_key(self, key):\n if isinstance(key, OuterIndexer):\n array = self.array\n key = _outer_to_numpy_indexer(key, self.array.shape)\n elif isinstance(key, VectorizedIndexer):\n array = nputils.NumpyVIndexAdapter(self.array)\n key = key.tuple\n elif isinstance(key, BasicIndexer):\n array = self.array\n # We want 0d slices rather than scalars. This is achieved by\n # appending an ellipsis (see\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).\n key = key.tuple + (Ellipsis,)\n else:\n raise TypeError(\"unexpected key type: {}\".format(type(key)))\n\n return array, key\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __getitem__(self, key):\n array, key = self._indexing_array_and_key(key)\n return array[key]\n\n def __setitem__(self, key, value):\n array, key = self._indexing_array_and_key(key)\n try:\n array[key] = value\n except ValueError:\n # More informative exception if read-only view\n if not array.flags.writeable and not array.flags.owndata:\n raise ValueError(\n \"Assignment destination is a view. \"\n \"Do you want to .copy() array first?\"\n )\n else:\n raise"},{"className":"ExplicitlyIndexedNDArrayMixin","col":0,"comment":"null","endLoc":477,"id":731,"nodeType":"Class","startLoc":472,"text":"class ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):\n __slots__ = ()\n\n def __array__(self, dtype=None):\n key = BasicIndexer((slice(None),) * self.ndim)\n return np.asarray(self[key], dtype=dtype)"},{"col":0,"comment":"null","endLoc":490,"header":"def dataset_repr(ds)","id":732,"name":"dataset_repr","nodeType":"Function","startLoc":470,"text":"def dataset_repr(ds):\n summary = [\"\".format(type(ds).__name__)]\n\n col_width = _calculate_col_width(_get_col_items(ds.variables))\n\n dims_start = pretty_print(\"Dimensions:\", col_width)\n summary.append(\"{}({})\".format(dims_start, dim_summary(ds)))\n\n if ds.coords:\n summary.append(coords_repr(ds.coords, col_width=col_width))\n\n unindexed_dims_str = unindexed_dims_repr(ds.dims, ds.coords)\n if unindexed_dims_str:\n summary.append(unindexed_dims_str)\n\n summary.append(data_vars_repr(ds.data_vars, col_width=col_width))\n\n if ds.attrs:\n summary.append(attrs_repr(ds.attrs))\n\n return \"\\n\".join(summary)"},{"col":4,"comment":"\n Coarsen object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to the window size.\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of the window size. If 'trim', the excess entries are\n dropped. If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: function (name) that is applied to the coordintes,\n or a mapping from coordinate name to function (name).\n\n Returns\n -------\n Coarsen object (core.rolling.DataArrayCoarsen for DataArray,\n core.rolling.DatasetCoarsen for Dataset.)\n\n Examples\n --------\n Coarsen the long time series by averaging over every four days.\n\n >>> da = xr.DataArray(np.linspace(0, 364, num=364),\n ... dims='time',\n ... coords={'time': pd.date_range(\n ... '15/12/1999', periods=364)})\n >>> da\n \n array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,\n 364. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12\n >>>\n >>> da.coarsen(time=3, boundary='trim').mean()\n \n array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,\n 361.99449 ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10\n >>>\n\n See Also\n --------\n core.rolling.DataArrayCoarsen\n core.rolling.DatasetCoarsen\n ","endLoc":901,"header":"def coarsen(\n self,\n dim: Mapping[Hashable, int] = None,\n boundary: str = \"exact\",\n side: Union[str, Mapping[Hashable, str]] = \"left\",\n coord_func: str = \"mean\",\n **window_kwargs: int\n )","id":733,"name":"coarsen","nodeType":"Function","startLoc":837,"text":"def coarsen(\n self,\n dim: Mapping[Hashable, int] = None,\n boundary: str = \"exact\",\n side: Union[str, Mapping[Hashable, str]] = \"left\",\n coord_func: str = \"mean\",\n **window_kwargs: int\n ):\n \"\"\"\n Coarsen object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to the window size.\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of the window size. If 'trim', the excess entries are\n dropped. If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: function (name) that is applied to the coordintes,\n or a mapping from coordinate name to function (name).\n\n Returns\n -------\n Coarsen object (core.rolling.DataArrayCoarsen for DataArray,\n core.rolling.DatasetCoarsen for Dataset.)\n\n Examples\n --------\n Coarsen the long time series by averaging over every four days.\n\n >>> da = xr.DataArray(np.linspace(0, 364, num=364),\n ... dims='time',\n ... coords={'time': pd.date_range(\n ... '15/12/1999', periods=364)})\n >>> da\n \n array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,\n 364. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12\n >>>\n >>> da.coarsen(time=3, boundary='trim').mean()\n \n array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,\n 361.99449 ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10\n >>>\n\n See Also\n --------\n core.rolling.DataArrayCoarsen\n core.rolling.DatasetCoarsen\n \"\"\"\n dim = either_dict_or_kwargs(dim, window_kwargs, \"coarsen\")\n return self._coarsen_cls(\n self, dim, boundary=boundary, side=side, coord_func=coord_func\n )"},{"className":"NDArrayMixin","col":0,"comment":"Mixin class for making wrappers of N-dimensional arrays that conform to\n the ndarray interface required for the data argument to Variable objects.\n\n A subclass should set the `array` property and override one or more of\n `dtype`, `shape` and `__getitem__`.\n ","endLoc":527,"id":734,"nodeType":"Class","startLoc":505,"text":"class NDArrayMixin(NdimSizeLenMixin):\n \"\"\"Mixin class for making wrappers of N-dimensional arrays that conform to\n the ndarray interface required for the data argument to Variable objects.\n\n A subclass should set the `array` property and override one or more of\n `dtype`, `shape` and `__getitem__`.\n \"\"\"\n\n __slots__ = ()\n\n @property\n def dtype(self: Any) -> np.dtype:\n return self.array.dtype\n\n @property\n def shape(self: Any) -> Tuple[int]:\n return self.array.shape\n\n def __getitem__(self: Any, key):\n return self.array[key]\n\n def __repr__(self: Any) -> str:\n return \"%s(array=%r)\" % (type(self).__name__, self.array)"},{"col":4,"comment":"null","endLoc":517,"header":"@property\n def dtype(self: Any) -> np.dtype","id":735,"name":"dtype","nodeType":"Function","startLoc":515,"text":"@property\n def dtype(self: Any) -> np.dtype:\n return self.array.dtype"},{"col":4,"comment":"null","endLoc":521,"header":"@property\n def shape(self: Any) -> Tuple[int]","id":736,"name":"shape","nodeType":"Function","startLoc":519,"text":"@property\n def shape(self: Any) -> Tuple[int]:\n return self.array.shape"},{"col":4,"comment":"null","endLoc":524,"header":"def __getitem__(self: Any, key)","id":737,"name":"__getitem__","nodeType":"Function","startLoc":523,"text":"def __getitem__(self: Any, key):\n return self.array[key]"},{"col":4,"comment":"null","endLoc":527,"header":"def __repr__(self: Any) -> str","id":738,"name":"__repr__","nodeType":"Function","startLoc":526,"text":"def __repr__(self: Any) -> str:\n return \"%s(array=%r)\" % (type(self).__name__, self.array)"},{"attributeType":"null","col":4,"comment":"null","endLoc":513,"id":739,"name":"__slots__","nodeType":"Attribute","startLoc":513,"text":"__slots__"},{"col":4,"comment":"null","endLoc":753,"header":"@staticmethod\n def _dask_postcompute(results, info, *args)","id":740,"name":"_dask_postcompute","nodeType":"Function","startLoc":739,"text":"@staticmethod\n def _dask_postcompute(results, info, *args):\n variables = {}\n results2 = list(results[::-1])\n for is_dask, k, v in info:\n if is_dask:\n func, args2 = v\n r = results2.pop()\n result = func(r, *args2)\n else:\n result = v\n variables[k] = result\n\n final = Dataset._construct_direct(variables, *args)\n return final"},{"col":4,"comment":"null","endLoc":477,"header":"def __array__(self, dtype=None)","id":741,"name":"__array__","nodeType":"Function","startLoc":475,"text":"def __array__(self, dtype=None):\n key = BasicIndexer((slice(None),) * self.ndim)\n return np.asarray(self[key], dtype=dtype)"},{"col":4,"comment":"Returns a Resample object for performing resampling operations.\n\n Handles both downsampling and upsampling. If any intervals contain no\n values from the original object, they will be given the value ``NaN``.\n\n Parameters\n ----------\n indexer : {dim: freq}, optional\n Mapping from the dimension name to resample frequency.\n skipna : bool, optional\n Whether to skip missing values when aggregating in downsampling.\n closed : 'left' or 'right', optional\n Side of each interval to treat as closed.\n label : 'left or 'right', optional\n Side of each interval to use for labeling.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '24H' frequency, base could\n range from 0 through 23.\n loffset : timedelta or str, optional\n Offset used to adjust the resampled time labels. Some pandas date\n offset strings are supported.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n **indexer_kwargs : {dim: freq}\n The keyword arguments form of ``indexer``.\n One of indexer or indexer_kwargs must be provided.\n\n Returns\n -------\n resampled : same type as caller\n This object resampled.\n\n Examples\n --------\n Downsample monthly time-series data to seasonal data:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.resample(time=\"QS-DEC\").mean()\n \n array([ 1., 4., 7., 10.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01\n\n Upsample monthly time-series data to daily data:\n\n >>> da.resample(time='1D').interpolate('linear')\n \n array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...\n\n Limit scope of upsampling method\n >>> da.resample(time='1D').nearest(tolerance='1D')\n \n array([ 0., 0., nan, ..., nan, 11., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n References\n ----------\n\n .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n ","endLoc":1041,"header":"def resample(\n self,\n indexer: Mapping[Hashable, str] = None,\n skipna=None,\n closed: str = None,\n label: str = None,\n base: int = 0,\n keep_attrs: bool = None,\n loffset=None,\n restore_coord_dims: bool = None,\n **indexer_kwargs: str\n )","id":742,"name":"resample","nodeType":"Function","startLoc":903,"text":"def resample(\n self,\n indexer: Mapping[Hashable, str] = None,\n skipna=None,\n closed: str = None,\n label: str = None,\n base: int = 0,\n keep_attrs: bool = None,\n loffset=None,\n restore_coord_dims: bool = None,\n **indexer_kwargs: str\n ):\n \"\"\"Returns a Resample object for performing resampling operations.\n\n Handles both downsampling and upsampling. If any intervals contain no\n values from the original object, they will be given the value ``NaN``.\n\n Parameters\n ----------\n indexer : {dim: freq}, optional\n Mapping from the dimension name to resample frequency.\n skipna : bool, optional\n Whether to skip missing values when aggregating in downsampling.\n closed : 'left' or 'right', optional\n Side of each interval to treat as closed.\n label : 'left or 'right', optional\n Side of each interval to use for labeling.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '24H' frequency, base could\n range from 0 through 23.\n loffset : timedelta or str, optional\n Offset used to adjust the resampled time labels. Some pandas date\n offset strings are supported.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n **indexer_kwargs : {dim: freq}\n The keyword arguments form of ``indexer``.\n One of indexer or indexer_kwargs must be provided.\n\n Returns\n -------\n resampled : same type as caller\n This object resampled.\n\n Examples\n --------\n Downsample monthly time-series data to seasonal data:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.resample(time=\"QS-DEC\").mean()\n \n array([ 1., 4., 7., 10.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01\n\n Upsample monthly time-series data to daily data:\n\n >>> da.resample(time='1D').interpolate('linear')\n \n array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...\n\n Limit scope of upsampling method\n >>> da.resample(time='1D').nearest(tolerance='1D')\n \n array([ 0., 0., nan, ..., nan, 11., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n References\n ----------\n\n .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n \"\"\"\n # TODO support non-string indexer after removing the old API.\n\n from .dataarray import DataArray\n from .resample import RESAMPLE_DIM\n from ..coding.cftimeindex import CFTimeIndex\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n # note: the second argument (now 'skipna') use to be 'dim'\n if (\n (skipna is not None and not isinstance(skipna, bool))\n or (\"how\" in indexer_kwargs and \"how\" not in self.dims)\n or (\"dim\" in indexer_kwargs and \"dim\" not in self.dims)\n ):\n raise TypeError(\n \"resample() no longer supports the `how` or \"\n \"`dim` arguments. Instead call methods on resample \"\n \"objects, e.g., data.resample(time='1D').mean()\"\n )\n\n indexer = either_dict_or_kwargs(indexer, indexer_kwargs, \"resample\")\n if len(indexer) != 1:\n raise ValueError(\"Resampling only supported along single dimensions.\")\n dim, freq = next(iter(indexer.items()))\n\n dim_name = dim\n dim_coord = self[dim]\n\n if isinstance(self.indexes[dim_name], CFTimeIndex):\n from .resample_cftime import CFTimeGrouper\n\n grouper = CFTimeGrouper(freq, closed, label, base, loffset)\n else:\n grouper = pd.Grouper(\n freq=freq, closed=closed, label=label, base=base, loffset=loffset\n )\n group = DataArray(\n dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM\n )\n resampler = self._resample_cls(\n self,\n group=group,\n dim=dim_name,\n grouper=grouper,\n resample_dim=RESAMPLE_DIM,\n restore_coord_dims=restore_coord_dims,\n )\n\n return resampler"},{"col":0,"comment":"null","endLoc":71,"header":"def _get_keep_attrs(default)","id":743,"name":"_get_keep_attrs","nodeType":"Function","startLoc":61,"text":"def _get_keep_attrs(default):\n global_choice = OPTIONS[\"keep_attrs\"]\n\n if global_choice == \"default\":\n return default\n elif global_choice in [True, False]:\n return global_choice\n else:\n raise ValueError(\n \"The global option keep_attrs must be one of\" \" True, False or 'default'.\"\n )"},{"col":4,"comment":"null","endLoc":54,"header":"@classmethod\n def open(cls, filename, lock=None, mode=None, **format_kwargs)","id":744,"name":"open","nodeType":"Function","startLoc":41,"text":"@classmethod\n def open(cls, filename, lock=None, mode=None, **format_kwargs):\n from PseudoNetCDF import pncopen\n\n keywords = {\"kwargs\": format_kwargs}\n # only include mode if explicitly passed\n if mode is not None:\n keywords[\"mode\"] = mode\n\n if lock is None:\n lock = PNETCDF_LOCK\n\n manager = CachingFileManager(pncopen, filename, lock=lock, **keywords)\n return cls(manager, lock)"},{"col":4,"comment":"null","endLoc":766,"header":"@staticmethod\n def _dask_postpersist(dsk, info, *args)","id":745,"name":"_dask_postpersist","nodeType":"Function","startLoc":755,"text":"@staticmethod\n def _dask_postpersist(dsk, info, *args):\n variables = {}\n for is_dask, k, v in info:\n if is_dask:\n func, args2 = v\n result = func(dsk, *args2)\n else:\n result = v\n variables[k] = result\n\n return Dataset._construct_direct(variables, *args)"},{"col":4,"comment":"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return a new dataset.\n Unlike load, the original dataset is left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n ","endLoc":788,"header":"def compute(self, **kwargs) -> \"Dataset\"","id":746,"name":"compute","nodeType":"Function","startLoc":768,"text":"def compute(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return a new dataset.\n Unlike load, the original dataset is left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)"},{"attributeType":"null","col":4,"comment":"null","endLoc":473,"id":747,"name":"__slots__","nodeType":"Attribute","startLoc":473,"text":"__slots__"},{"col":4,"comment":"null","endLoc":1255,"header":"def _indexing_array_and_key(self, key)","id":748,"name":"_indexing_array_and_key","nodeType":"Function","startLoc":1239,"text":"def _indexing_array_and_key(self, key):\n if isinstance(key, OuterIndexer):\n array = self.array\n key = _outer_to_numpy_indexer(key, self.array.shape)\n elif isinstance(key, VectorizedIndexer):\n array = nputils.NumpyVIndexAdapter(self.array)\n key = key.tuple\n elif isinstance(key, BasicIndexer):\n array = self.array\n # We want 0d slices rather than scalars. This is achieved by\n # appending an ellipsis (see\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).\n key = key.tuple + (Ellipsis,)\n else:\n raise TypeError(\"unexpected key type: {}\".format(type(key)))\n\n return array, key"},{"col":0,"comment":"Convert an OuterIndexer into an indexer for NumPy.\n\n Parameters\n ----------\n key : Basic/OuterIndexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n tuple\n Tuple suitable for use to index a NumPy array.\n ","endLoc":765,"header":"def _outer_to_numpy_indexer(key, shape)","id":749,"name":"_outer_to_numpy_indexer","nodeType":"Function","startLoc":744,"text":"def _outer_to_numpy_indexer(key, shape):\n \"\"\"Convert an OuterIndexer into an indexer for NumPy.\n\n Parameters\n ----------\n key : Basic/OuterIndexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n tuple\n Tuple suitable for use to index a NumPy array.\n \"\"\"\n if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:\n # If there is only one vector and all others are slice,\n # it can be safely used in mixed basic/advanced indexing.\n # Boolean index should already be converted to integer array.\n return key.tuple\n else:\n return _outer_to_vectorized_indexer(key, shape).tuple"},{"col":4,"comment":"Persist all Dask arrays in memory\n ","endLoc":808,"header":"def _persist_inplace(self, **kwargs) -> \"Dataset\"","id":750,"name":"_persist_inplace","nodeType":"Function","startLoc":790,"text":"def _persist_inplace(self, **kwargs) -> \"Dataset\":\n \"\"\"Persist all Dask arrays in memory\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data\n for k, v in self.variables.items()\n if isinstance(v._data, dask_array_type)\n }\n if lazy_data:\n import dask\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = dask.persist(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n return self"},{"col":4,"comment":"null","endLoc":131,"header":"def __init__(self, array)","id":751,"name":"__init__","nodeType":"Function","startLoc":130,"text":"def __init__(self, array):\n self._array = array"},{"col":4,"comment":" Trigger computation, keeping data as dask arrays\n\n This operation can be used to trigger computation on underlying dask\n arrays, similar to ``.compute()`` or ``.load()``. However this\n operation keeps the data as dask arrays. This is particularly useful\n when using the dask.distributed scheduler and you want to load a large\n amount of data into distributed memory.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n ","endLoc":829,"header":"def persist(self, **kwargs) -> \"Dataset\"","id":752,"name":"persist","nodeType":"Function","startLoc":810,"text":"def persist(self, **kwargs) -> \"Dataset\":\n \"\"\" Trigger computation, keeping data as dask arrays\n\n This operation can be used to trigger computation on underlying dask\n arrays, similar to ``.compute()`` or ``.load()``. However this\n operation keeps the data as dask arrays. This is particularly useful\n when using the dask.distributed scheduler and you want to load a large\n amount of data into distributed memory.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n new = self.copy(deep=False)\n return new._persist_inplace(**kwargs)"},{"col":4,"comment":"null","endLoc":1258,"header":"def transpose(self, order)","id":753,"name":"transpose","nodeType":"Function","startLoc":1257,"text":"def transpose(self, order):\n return self.array.transpose(order)"},{"col":4,"comment":"Replace variables with recalculated dimensions.","endLoc":929,"header":"def _replace_with_new_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Dict[Hashable, pd.Index] = __default,\n inplace: bool = False,\n ) -> \"Dataset\"","id":754,"name":"_replace_with_new_dims","nodeType":"Function","startLoc":917,"text":"def _replace_with_new_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Dict[Hashable, pd.Index] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Replace variables with recalculated dimensions.\"\"\"\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes, inplace=inplace\n )"},{"col":4,"comment":"null","endLoc":1262,"header":"def __getitem__(self, key)","id":755,"name":"__getitem__","nodeType":"Function","startLoc":1260,"text":"def __getitem__(self, key):\n array, key = self._indexing_array_and_key(key)\n return array[key]"},{"col":0,"comment":"null","endLoc":499,"header":"def diff_dim_summary(a, b)","id":756,"name":"diff_dim_summary","nodeType":"Function","startLoc":493,"text":"def diff_dim_summary(a, b):\n if a.dims != b.dims:\n return \"Differing dimensions:\\n ({}) != ({})\".format(\n dim_summary(a), dim_summary(b)\n )\n else:\n return \"\""},{"col":4,"comment":"null","endLoc":1276,"header":"def __setitem__(self, key, value)","id":757,"name":"__setitem__","nodeType":"Function","startLoc":1264,"text":"def __setitem__(self, key, value):\n array, key = self._indexing_array_and_key(key)\n try:\n array[key] = value\n except ValueError:\n # More informative exception if read-only view\n if not array.flags.writeable and not array.flags.owndata:\n raise ValueError(\n \"Assignment destination is a view. \"\n \"Do you want to .copy() array first?\"\n )\n else:\n raise"},{"col":0,"comment":"null","endLoc":555,"header":"def _diff_mapping_repr(a_mapping, b_mapping, compat, title, summarizer, col_width=None)","id":758,"name":"_diff_mapping_repr","nodeType":"Function","startLoc":502,"text":"def _diff_mapping_repr(a_mapping, b_mapping, compat, title, summarizer, col_width=None):\n def extra_items_repr(extra_keys, mapping, ab_side):\n extra_repr = [summarizer(k, mapping[k], col_width) for k in extra_keys]\n if extra_repr:\n header = \"{} only on the {} object:\".format(title, ab_side)\n return [header] + extra_repr\n else:\n return []\n\n a_keys = set(a_mapping)\n b_keys = set(b_mapping)\n\n summary = []\n\n diff_items = []\n\n for k in a_keys & b_keys:\n try:\n # compare xarray variable\n compatible = getattr(a_mapping[k], compat)(b_mapping[k])\n is_variable = True\n except AttributeError:\n # compare attribute value\n compatible = a_mapping[k] == b_mapping[k]\n is_variable = False\n\n if not compatible:\n temp = [\n summarizer(k, vars[k], col_width) for vars in (a_mapping, b_mapping)\n ]\n\n if compat == \"identical\" and is_variable:\n attrs_summary = []\n\n for m in (a_mapping, b_mapping):\n attr_s = \"\\n\".join(\n [summarize_attr(ak, av) for ak, av in m[k].attrs.items()]\n )\n attrs_summary.append(attr_s)\n\n temp = [\n \"\\n\".join([var_s, attr_s]) if attr_s else var_s\n for var_s, attr_s in zip(temp, attrs_summary)\n ]\n\n diff_items += [ab_side + s[1:] for ab_side, s in zip((\"L\", \"R\"), temp)]\n\n if diff_items:\n summary += [\"Differing {}:\".format(title.lower())] + diff_items\n\n summary += extra_items_repr(a_keys - b_keys, a_mapping, \"left\")\n summary += extra_items_repr(b_keys - a_keys, b_mapping, \"right\")\n\n return \"\\n\".join(summary)"},{"col":4,"comment":"Deprecated version of _replace_with_new_dims().\n\n Unlike _replace_with_new_dims(), this method always recalculates\n indexes from variables.\n ","endLoc":948,"header":"def _replace_vars_and_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n dims: Dict[Hashable, int] = None,\n attrs: Dict[Hashable, Any] = __default,\n inplace: bool = False,\n ) -> \"Dataset\"","id":759,"name":"_replace_vars_and_dims","nodeType":"Function","startLoc":931,"text":"def _replace_vars_and_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n dims: Dict[Hashable, int] = None,\n attrs: Dict[Hashable, Any] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Deprecated version of _replace_with_new_dims().\n\n Unlike _replace_with_new_dims(), this method always recalculates\n indexes from variables.\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes=None, inplace=inplace\n )"},{"col":4,"comment":"null","endLoc":58,"header":"def __init__(self, manager, lock=None)","id":760,"name":"__init__","nodeType":"Function","startLoc":56,"text":"def __init__(self, manager, lock=None):\n self._manager = manager\n self.lock = ensure_lock(lock)"},{"attributeType":"null","col":4,"comment":"null","endLoc":1228,"id":761,"name":"__slots__","nodeType":"Attribute","startLoc":1228,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":1237,"id":762,"name":"array","nodeType":"Attribute","startLoc":1237,"text":"self.array"},{"className":"DummyFileManager","col":0,"comment":"FileManager that simply wraps an open file in the FileManager interface.\n ","endLoc":333,"id":763,"nodeType":"Class","startLoc":315,"text":"class DummyFileManager(FileManager):\n \"\"\"FileManager that simply wraps an open file in the FileManager interface.\n \"\"\"\n\n def __init__(self, value):\n self._value = value\n\n def acquire(self, needs_lock=True):\n del needs_lock # ignored\n return self._value\n\n @contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n del needs_lock\n yield self._value\n\n def close(self, needs_lock=True):\n del needs_lock # ignored\n self._value.close()"},{"col":4,"comment":"null","endLoc":324,"header":"def acquire(self, needs_lock=True)","id":764,"name":"acquire","nodeType":"Function","startLoc":322,"text":"def acquire(self, needs_lock=True):\n del needs_lock # ignored\n return self._value"},{"col":4,"comment":"null","endLoc":329,"header":"@contextlib.contextmanager\n def acquire_context(self, needs_lock=True)","id":765,"name":"acquire_context","nodeType":"Function","startLoc":326,"text":"@contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n del needs_lock\n yield self._value"},{"col":4,"comment":"null","endLoc":333,"header":"def close(self, needs_lock=True)","id":766,"name":"close","nodeType":"Function","startLoc":331,"text":"def close(self, needs_lock=True):\n del needs_lock # ignored\n self._value.close()"},{"col":4,"comment":"null","endLoc":78,"header":"def __init__(self, freq, closed=None, label=None, base=0, loffset=None)","id":767,"name":"__init__","nodeType":"Function","startLoc":62,"text":"def __init__(self, freq, closed=None, label=None, base=0, loffset=None):\n self.freq = to_offset(freq)\n self.closed = closed\n self.label = label\n self.base = base\n self.loffset = loffset\n\n if isinstance(self.freq, (MonthEnd, QuarterEnd, YearEnd)):\n if self.closed is None:\n self.closed = \"right\"\n if self.label is None:\n self.label = \"right\"\n else:\n if self.closed is None:\n self.closed = \"left\"\n if self.label is None:\n self.label = \"left\""},{"attributeType":"null","col":8,"comment":"null","endLoc":320,"id":768,"name":"_value","nodeType":"Attribute","startLoc":320,"text":"self._value"},{"className":"ScipyArrayWrapper","col":0,"comment":"null","endLoc":56,"id":769,"nodeType":"Class","startLoc":26,"text":"class ScipyArrayWrapper(BackendArray):\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_variable().data\n self.shape = array.shape\n self.dtype = np.dtype(array.dtype.kind + str(array.dtype.itemsize))\n\n def get_variable(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]\n\n def __getitem__(self, key):\n data = NumpyIndexingAdapter(self.get_variable().data)[key]\n # Copy data if the source file is mmapped. This makes things consistent\n # with the netCDF4 library by ensuring we can safely read arrays even\n # after closing associated files.\n copy = self.datastore.ds.use_mmap\n return np.array(data, dtype=self.dtype, copy=copy)\n\n def __setitem__(self, key, value):\n with self.datastore.lock:\n data = self.get_variable(needs_lock=False)\n try:\n data[key] = value\n except TypeError:\n if key is Ellipsis:\n # workaround for GH: scipy/scipy#6880\n data[:] = value\n else:\n raise"},{"col":4,"comment":"null","endLoc":32,"header":"def __init__(self, variable_name, datastore)","id":770,"name":"__init__","nodeType":"Function","startLoc":27,"text":"def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_variable().data\n self.shape = array.shape\n self.dtype = np.dtype(array.dtype.kind + str(array.dtype.itemsize))"},{"col":4,"comment":"null","endLoc":43,"header":"def __init__(self, filename, lock=None, **backend_kwargs)","id":771,"name":"__init__","nodeType":"Function","startLoc":37,"text":"def __init__(self, filename, lock=None, **backend_kwargs):\n import cfgrib\n\n if lock is None:\n lock = ECCODES_LOCK\n self.lock = ensure_lock(lock)\n self.ds = cfgrib.open_file(filename, **backend_kwargs)"},{"col":4,"comment":"null","endLoc":968,"header":"def _overwrite_indexes(self, indexes: Mapping[Any, pd.Index]) -> \"Dataset\"","id":772,"name":"_overwrite_indexes","nodeType":"Function","startLoc":950,"text":"def _overwrite_indexes(self, indexes: Mapping[Any, pd.Index]) -> \"Dataset\":\n if not indexes:\n return self\n\n variables = self._variables.copy()\n new_indexes = dict(self.indexes)\n for name, idx in indexes.items():\n variables[name] = IndexVariable(name, idx)\n new_indexes[name] = idx\n obj = self._replace(variables, indexes=new_indexes)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Hashable, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj"},{"col":4,"comment":"null","endLoc":36,"header":"def get_variable(self, needs_lock=True)","id":773,"name":"get_variable","nodeType":"Function","startLoc":34,"text":"def get_variable(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]"},{"col":4,"comment":"null","endLoc":44,"header":"def __getitem__(self, key)","id":774,"name":"__getitem__","nodeType":"Function","startLoc":38,"text":"def __getitem__(self, key):\n data = NumpyIndexingAdapter(self.get_variable().data)[key]\n # Copy data if the source file is mmapped. This makes things consistent\n # with the netCDF4 library by ensuring we can safely read arrays even\n # after closing associated files.\n copy = self.datastore.ds.use_mmap\n return np.array(data, dtype=self.dtype, copy=copy)"},{"col":0,"comment":"null","endLoc":139,"header":"def _get_engine_from_magic_number(filename_or_obj)","id":775,"name":"_get_engine_from_magic_number","nodeType":"Function","startLoc":109,"text":"def _get_engine_from_magic_number(filename_or_obj):\n # check byte header to determine file type\n if isinstance(filename_or_obj, bytes):\n magic_number = filename_or_obj[:8]\n else:\n if filename_or_obj.tell() != 0:\n raise ValueError(\n \"file-like object read/write pointer not at zero \"\n \"please close and reopen, or use a context \"\n \"manager\"\n )\n magic_number = filename_or_obj.read(8)\n filename_or_obj.seek(0)\n\n if magic_number.startswith(b\"CDF\"):\n engine = \"scipy\"\n elif magic_number.startswith(b\"\\211HDF\\r\\n\\032\\n\"):\n engine = \"h5netcdf\"\n if isinstance(filename_or_obj, bytes):\n raise ValueError(\n \"can't open netCDF4/HDF5 as bytes \"\n \"try passing a path or file-like object\"\n )\n else:\n if isinstance(filename_or_obj, bytes) and len(filename_or_obj) > 80:\n filename_or_obj = filename_or_obj[:80] + b\"...\"\n raise ValueError(\n \"{} is not a valid netCDF file \"\n \"did you mean to pass a string for a path instead?\".format(filename_or_obj)\n )\n return engine"},{"col":4,"comment":"null","endLoc":56,"header":"def __setitem__(self, key, value)","id":776,"name":"__setitem__","nodeType":"Function","startLoc":46,"text":"def __setitem__(self, key, value):\n with self.datastore.lock:\n data = self.get_variable(needs_lock=False)\n try:\n data[key] = value\n except TypeError:\n if key is Ellipsis:\n # workaround for GH: scipy/scipy#6880\n data[:] = value\n else:\n raise"},{"col":4,"comment":"null","endLoc":439,"header":"def _get_index_and_items(self, index, grouper)","id":777,"name":"_get_index_and_items","nodeType":"Function","startLoc":427,"text":"def _get_index_and_items(self, index, grouper):\n from .resample_cftime import CFTimeGrouper\n\n s = pd.Series(np.arange(index.size), index)\n if isinstance(grouper, CFTimeGrouper):\n first_items = grouper.first_items(index)\n else:\n first_items = s.groupby(grouper).first()\n _apply_loffset(grouper, first_items)\n full_index = first_items.index\n if first_items.isnull().any():\n first_items = first_items.dropna()\n return full_index, first_items"},{"attributeType":"null","col":8,"comment":"null","endLoc":31,"id":778,"name":"shape","nodeType":"Attribute","startLoc":31,"text":"self.shape"},{"col":0,"comment":"Convert a frequency string to the appropriate subclass of\n BaseCFTimeOffset.","endLoc":667,"header":"def to_offset(freq)","id":779,"name":"to_offset","nodeType":"Function","startLoc":649,"text":"def to_offset(freq):\n \"\"\"Convert a frequency string to the appropriate subclass of\n BaseCFTimeOffset.\"\"\"\n if isinstance(freq, BaseCFTimeOffset):\n return freq\n else:\n try:\n freq_data = re.match(_PATTERN, freq).groupdict()\n except AttributeError:\n raise ValueError(\"Invalid frequency string provided\")\n\n freq = freq_data[\"freq\"]\n multiples = freq_data[\"multiple\"]\n if multiples is None:\n multiples = 1\n else:\n multiples = int(multiples)\n\n return _FREQUENCIES[freq](n=multiples)"},{"attributeType":"null","col":8,"comment":"null","endLoc":28,"id":780,"name":"datastore","nodeType":"Attribute","startLoc":28,"text":"self.datastore"},{"attributeType":"null","col":8,"comment":"null","endLoc":32,"id":781,"name":"dtype","nodeType":"Attribute","startLoc":32,"text":"self.dtype"},{"attributeType":"null","col":8,"comment":"null","endLoc":29,"id":782,"name":"variable_name","nodeType":"Attribute","startLoc":29,"text":"self.variable_name"},{"col":0,"comment":"\n (copied from pandas)\n if loffset is set, offset the result index\n\n This is NOT an idempotent routine, it will be applied\n exactly once to the result.\n\n Parameters\n ----------\n result : Series or DataFrame\n the result of resample\n ","endLoc":220,"header":"def _apply_loffset(grouper, result)","id":783,"name":"_apply_loffset","nodeType":"Function","startLoc":197,"text":"def _apply_loffset(grouper, result):\n \"\"\"\n (copied from pandas)\n if loffset is set, offset the result index\n\n This is NOT an idempotent routine, it will be applied\n exactly once to the result.\n\n Parameters\n ----------\n result : Series or DataFrame\n the result of resample\n \"\"\"\n\n needs_offset = (\n isinstance(grouper.loffset, (pd.DateOffset, datetime.timedelta))\n and isinstance(result.index, pd.DatetimeIndex)\n and len(result.index) > 0\n )\n\n if needs_offset:\n result.index = result.index + grouper.loffset\n\n grouper.loffset = None"},{"className":"ScipyDataStore","col":0,"comment":"Store for reading and writing data via scipy.io.netcdf.\n\n This store has the advantage of being able to be initialized with a\n StringIO object, allow for serialization without writing to disk.\n\n It only supports the NetCDF3 file-format.\n ","endLoc":223,"id":784,"nodeType":"Class","startLoc":99,"text":"class ScipyDataStore(WritableCFDataStore):\n \"\"\"Store for reading and writing data via scipy.io.netcdf.\n\n This store has the advantage of being able to be initialized with a\n StringIO object, allow for serialization without writing to disk.\n\n It only supports the NetCDF3 file-format.\n \"\"\"\n\n def __init__(\n self, filename_or_obj, mode=\"r\", format=None, group=None, mmap=None, lock=None\n ):\n if group is not None:\n raise ValueError(\n \"cannot save to a group with the \" \"scipy.io.netcdf backend\"\n )\n\n if format is None or format == \"NETCDF3_64BIT\":\n version = 2\n elif format == \"NETCDF3_CLASSIC\":\n version = 1\n else:\n raise ValueError(\"invalid format for scipy.io.netcdf backend: %r\" % format)\n\n if lock is None and mode != \"r\" and isinstance(filename_or_obj, str):\n lock = get_write_lock(filename_or_obj)\n\n self.lock = ensure_lock(lock)\n\n if isinstance(filename_or_obj, str):\n manager = CachingFileManager(\n _open_scipy_netcdf,\n filename_or_obj,\n mode=mode,\n lock=lock,\n kwargs=dict(mmap=mmap, version=version),\n )\n else:\n scipy_dataset = _open_scipy_netcdf(\n filename_or_obj, mode=mode, mmap=mmap, version=version\n )\n manager = DummyFileManager(scipy_dataset)\n\n self._manager = manager\n\n @property\n def ds(self):\n return self._manager.acquire()\n\n def open_store_variable(self, name, var):\n return Variable(\n var.dimensions,\n ScipyArrayWrapper(name, self),\n _decode_attrs(var._attributes),\n )\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen(_decode_attrs(self.ds._attributes))\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v is None\n }\n return encoding\n\n def set_dimension(self, name, length, is_unlimited=False):\n if name in self.ds.dimensions:\n raise ValueError(\n \"%s does not support modifying dimensions\" % type(self).__name__\n )\n dim_length = length if not is_unlimited else None\n self.ds.createDimension(name, dim_length)\n\n def _validate_attr_key(self, key):\n if not is_valid_nc3_name(key):\n raise ValueError(\"Not a valid attribute name\")\n\n def set_attribute(self, key, value):\n self._validate_attr_key(key)\n value = encode_nc3_attr_value(value)\n setattr(self.ds, key, value)\n\n def encode_variable(self, variable):\n variable = encode_nc3_variable(variable)\n return variable\n\n def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n if check_encoding and variable.encoding:\n if variable.encoding != {\"_FillValue\": None}:\n raise ValueError(\n \"unexpected encoding for scipy backend: %r\"\n % list(variable.encoding)\n )\n\n data = variable.data\n # nb. this still creates a numpy array in all memory, even though we\n # don't write the data yet; scipy.io.netcdf does not not support\n # incremental writes.\n if name not in self.ds.variables:\n self.ds.createVariable(name, data.dtype, variable.dims)\n scipy_var = self.ds.variables[name]\n for k, v in variable.attrs.items():\n self._validate_attr_key(k)\n setattr(scipy_var, k, v)\n\n target = ScipyArrayWrapper(name, self)\n\n return target, data\n\n def sync(self):\n self.ds.sync()\n\n def close(self):\n self._manager.close()"},{"col":4,"comment":"null","endLoc":146,"header":"@property\n def ds(self)","id":785,"name":"ds","nodeType":"Function","startLoc":144,"text":"@property\n def ds(self):\n return self._manager.acquire()"},{"col":0,"comment":"null","endLoc":577,"header":"def _compat_to_str(compat)","id":787,"name":"_compat_to_str","nodeType":"Function","startLoc":573,"text":"def _compat_to_str(compat):\n if compat == \"equals\":\n return \"equal\"\n else:\n return compat"},{"col":0,"comment":"null","endLoc":607,"header":"def diff_array_repr(a, b, compat)","id":788,"name":"diff_array_repr","nodeType":"Function","startLoc":580,"text":"def diff_array_repr(a, b, compat):\n # used for DataArray, Variable and IndexVariable\n summary = [\n \"Left and right {} objects are not {}\".format(\n type(a).__name__, _compat_to_str(compat)\n )\n ]\n\n summary.append(diff_dim_summary(a, b))\n\n if not array_equiv(a.data, b.data):\n temp = [wrap_indent(short_numpy_repr(obj), start=\" \") for obj in (a, b)]\n diff_data_repr = [\n ab_side + \"\\n\" + ab_data_repr\n for ab_side, ab_data_repr in zip((\"L\", \"R\"), temp)\n ]\n summary += [\"Differing values:\"] + diff_data_repr\n\n if hasattr(a, \"coords\"):\n col_width = _calculate_col_width(set(a.coords) | set(b.coords))\n summary.append(\n diff_coords_repr(a.coords, b.coords, compat, col_width=col_width)\n )\n\n if compat == \"identical\":\n summary.append(diff_attrs_repr(a.attrs, b.attrs, compat))\n\n return \"\\n\".join(summary)"},{"col":0,"comment":"Context manager to ensure that a file opened by xarray is closed if an\n exception is raised before the user sees the file object.\n ","endLoc":560,"header":"@contextlib.contextmanager\ndef close_on_error(f)","id":789,"name":"close_on_error","nodeType":"Function","startLoc":551,"text":"@contextlib.contextmanager\ndef close_on_error(f):\n \"\"\"Context manager to ensure that a file opened by xarray is closed if an\n exception is raised before the user sees the file object.\n \"\"\"\n try:\n yield\n except Exception:\n f.close()\n raise"},{"col":0,"comment":"null","endLoc":194,"header":"def _unique_and_monotonic(group)","id":791,"name":"_unique_and_monotonic","nodeType":"Function","startLoc":189,"text":"def _unique_and_monotonic(group):\n if isinstance(group, _DummyGroup):\n return True\n else:\n index = safe_cast_to_index(group)\n return index.is_unique and index.is_monotonic"},{"id":792,"name":"xarray/plot","nodeType":"Package"},{"fileName":"dataset_plot.py","filePath":"xarray/plot","id":793,"nodeType":"File","text":"import functools\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..core.alignment import broadcast\nfrom .facetgrid import _easy_facetgrid\nfrom .utils import (\n _add_colorbar,\n _is_numeric,\n _process_cmap_cbar_kwargs,\n get_axis,\n label_from_attrs,\n)\n\n# copied from seaborn\n_MARKERSIZE_RANGE = np.array([18.0, 72.0])\n\n\ndef _infer_meta_data(ds, x, y, hue, hue_style, add_guide):\n dvars = set(ds.variables.keys())\n error_msg = \" must be one of ({0:s})\".format(\", \".join(dvars))\n\n if x not in dvars:\n raise ValueError(\"x\" + error_msg)\n\n if y not in dvars:\n raise ValueError(\"y\" + error_msg)\n\n if hue is not None and hue not in dvars:\n raise ValueError(\"hue\" + error_msg)\n\n if hue:\n hue_is_numeric = _is_numeric(ds[hue].values)\n\n if hue_style is None:\n hue_style = \"continuous\" if hue_is_numeric else \"discrete\"\n\n if not hue_is_numeric and (hue_style == \"continuous\"):\n raise ValueError(\n \"Cannot create a colorbar for a non numeric\" \" coordinate: \" + hue\n )\n\n if add_guide is None or add_guide is True:\n add_colorbar = True if hue_style == \"continuous\" else False\n add_legend = True if hue_style == \"discrete\" else False\n else:\n add_colorbar = False\n add_legend = False\n else:\n if add_guide is True:\n raise ValueError(\"Cannot set add_guide when hue is None.\")\n add_legend = False\n add_colorbar = False\n\n if hue_style is not None and hue_style not in [\"discrete\", \"continuous\"]:\n raise ValueError(\n \"hue_style must be either None, 'discrete' \" \"or 'continuous'.\"\n )\n\n if hue:\n hue_label = label_from_attrs(ds[hue])\n hue = ds[hue]\n else:\n hue_label = None\n hue = None\n\n return {\n \"add_colorbar\": add_colorbar,\n \"add_legend\": add_legend,\n \"hue_label\": hue_label,\n \"hue_style\": hue_style,\n \"xlabel\": label_from_attrs(ds[x]),\n \"ylabel\": label_from_attrs(ds[y]),\n \"hue\": hue,\n }\n\n\ndef _infer_scatter_data(ds, x, y, hue, markersize, size_norm, size_mapping=None):\n\n broadcast_keys = [\"x\", \"y\"]\n to_broadcast = [ds[x], ds[y]]\n if hue:\n to_broadcast.append(ds[hue])\n broadcast_keys.append(\"hue\")\n if markersize:\n to_broadcast.append(ds[markersize])\n broadcast_keys.append(\"size\")\n\n broadcasted = dict(zip(broadcast_keys, broadcast(*to_broadcast)))\n\n data = {\"x\": broadcasted[\"x\"], \"y\": broadcasted[\"y\"], \"hue\": None, \"sizes\": None}\n\n if hue:\n data[\"hue\"] = broadcasted[\"hue\"]\n\n if markersize:\n size = broadcasted[\"size\"]\n\n if size_mapping is None:\n size_mapping = _parse_size(size, size_norm)\n\n data[\"sizes\"] = size.copy(\n data=np.reshape(size_mapping.loc[size.values.ravel()].values, size.shape)\n )\n\n return data\n\n\n# copied from seaborn\ndef _parse_size(data, norm):\n\n import matplotlib as mpl\n\n if data is None:\n return None\n\n data = data.values.flatten()\n\n if not _is_numeric(data):\n levels = np.unique(data)\n numbers = np.arange(1, 1 + len(levels))[::-1]\n else:\n levels = numbers = np.sort(np.unique(data))\n\n min_width, max_width = _MARKERSIZE_RANGE\n # width_range = min_width, max_width\n\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``size_norm`` must be None, tuple, \" \"or Normalize object.\"\n raise ValueError(err)\n\n norm.clip = True\n if not norm.scaled():\n norm(np.asarray(numbers))\n # limits = norm.vmin, norm.vmax\n\n scl = norm(numbers)\n widths = np.asarray(min_width + scl * (max_width - min_width))\n if scl.mask.any():\n widths[scl.mask] = 0\n sizes = dict(zip(levels, widths))\n\n return pd.Series(sizes)\n\n\nclass _Dataset_PlotMethods(object):\n \"\"\"\n Enables use of xarray.plot functions as attributes on a Dataset.\n For example, Dataset.plot.scatter\n \"\"\"\n\n def __init__(self, dataset):\n self._ds = dataset\n\n def __call__(self, *args, **kwargs):\n raise ValueError(\n \"Dataset.plot cannot be called directly. Use \"\n \"an explicit plot method, e.g. ds.plot.scatter(...)\"\n )\n\n\ndef _dsplot(plotfunc):\n commondoc = \"\"\"\n Parameters\n ----------\n\n ds : Dataset\n x, y : string\n Variable names for x, y axis.\n hue: str, optional\n Variable by which to color scattered points\n hue_style: str, optional\n Can be either 'discrete' (legend) or 'continuous' (color bar).\n markersize: str, optional (scatter only)\n Variably by which to vary size of scattered points\n size_norm: optional\n Either None or 'Norm' instance to normalize the 'markersize' variable.\n add_guide: bool, optional\n Add a guide that depends on hue_style\n - for \"discrete\", build a legend.\n This is the default for non-numeric `hue` variables.\n - for \"continuous\", build a colorbar\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n ax : matplotlib axes, optional\n If None, uses the current axis. Not applicable when using facets.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n norm : ``matplotlib.colors.Normalize`` instance, optional\n If the ``norm`` has vmin or vmax specified, the corresponding kwarg\n must be None.\n vmin, vmax : floats, optional\n Values to anchor the colormap, otherwise they are inferred from the\n data and other keyword arguments. When a diverging dataset is inferred,\n setting one of these values will fix the other by symmetry around\n ``center``. Setting both values prevents use of a diverging colormap.\n If discrete levels are provided as an explicit list, both of these\n values are ignored.\n cmap : matplotlib colormap name or object, optional\n The mapping from data values to color space. If not provided, this\n will be either be ``viridis`` (if the function infers a sequential\n dataset) or ``RdBu_r`` (if the function infers a diverging dataset).\n When `Seaborn` is installed, ``cmap`` may also be a `seaborn`\n color palette. If ``cmap`` is seaborn color palette and the plot type\n is not ``contour`` or ``contourf``, ``levels`` must also be specified.\n colors : discrete colors to plot, optional\n A single color or a list of colors. If the plot type is not ``contour``\n or ``contourf``, the ``levels`` argument is required.\n center : float, optional\n The value at which to center the colormap. Passing this value implies\n use of a diverging colormap. Setting it to ``False`` prevents use of a\n diverging colormap.\n robust : bool, optional\n If True and ``vmin`` or ``vmax`` are absent, the colormap range is\n computed with 2nd and 98th percentiles instead of the extreme values.\n extend : {'neither', 'both', 'min', 'max'}, optional\n How to draw arrows extending the colorbar beyond its limits. If not\n provided, extend is inferred from vmin, vmax and the data limits.\n levels : int or list-like object, optional\n Split the colormap (cmap) into discrete color intervals. If an integer\n is provided, \"nice\" levels are chosen based on the data range: this can\n imply that the final number of levels is not exactly the expected one.\n Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to\n setting ``levels=np.linspace(vmin, vmax, N)``.\n **kwargs : optional\n Additional keyword arguments to matplotlib\n \"\"\"\n\n # Build on the original docstring\n plotfunc.__doc__ = \"%s\\n%s\" % (plotfunc.__doc__, commondoc)\n\n @functools.wraps(plotfunc)\n def newplotfunc(\n ds,\n x=None,\n y=None,\n hue=None,\n hue_style=None,\n col=None,\n row=None,\n ax=None,\n figsize=None,\n size=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n aspect=None,\n subplot_kws=None,\n add_guide=None,\n cbar_kwargs=None,\n cbar_ax=None,\n vmin=None,\n vmax=None,\n norm=None,\n infer_intervals=None,\n center=None,\n levels=None,\n robust=None,\n colors=None,\n extend=None,\n cmap=None,\n **kwargs\n ):\n\n _is_facetgrid = kwargs.pop(\"_is_facetgrid\", False)\n if _is_facetgrid: # facetgrid call\n meta_data = kwargs.pop(\"meta_data\")\n else:\n meta_data = _infer_meta_data(ds, x, y, hue, hue_style, add_guide)\n\n hue_style = meta_data[\"hue_style\"]\n\n # handle facetgrids first\n if col or row:\n allargs = locals().copy()\n allargs[\"plotfunc\"] = globals()[plotfunc.__name__]\n allargs[\"data\"] = ds\n # TODO dcherian: why do I need to remove kwargs?\n for arg in [\"meta_data\", \"kwargs\", \"ds\"]:\n del allargs[arg]\n\n return _easy_facetgrid(kind=\"dataset\", **allargs, **kwargs)\n\n figsize = kwargs.pop(\"figsize\", None)\n ax = get_axis(figsize, size, aspect, ax)\n\n if hue_style == \"continuous\" and hue is not None:\n if _is_facetgrid:\n cbar_kwargs = meta_data[\"cbar_kwargs\"]\n cmap_params = meta_data[\"cmap_params\"]\n else:\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n plotfunc, ds[hue].values, **locals()\n )\n\n # subset that can be passed to scatter, hist2d\n cmap_params_subset = dict(\n (vv, cmap_params[vv]) for vv in [\"vmin\", \"vmax\", \"norm\", \"cmap\"]\n )\n\n else:\n cmap_params_subset = {}\n\n primitive = plotfunc(\n ds=ds,\n x=x,\n y=y,\n hue=hue,\n hue_style=hue_style,\n ax=ax,\n cmap_params=cmap_params_subset,\n **kwargs\n )\n\n if _is_facetgrid: # if this was called from Facetgrid.map_dataset,\n return primitive # finish here. Else, make labels\n\n if meta_data.get(\"xlabel\", None):\n ax.set_xlabel(meta_data.get(\"xlabel\"))\n if meta_data.get(\"ylabel\", None):\n ax.set_ylabel(meta_data.get(\"ylabel\"))\n\n if meta_data[\"add_legend\"]:\n ax.legend(\n handles=primitive,\n labels=list(meta_data[\"hue\"].values),\n title=meta_data.get(\"hue_label\", None),\n )\n if meta_data[\"add_colorbar\"]:\n cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs\n if \"label\" not in cbar_kwargs:\n cbar_kwargs[\"label\"] = meta_data.get(\"hue_label\", None)\n _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params)\n\n return primitive\n\n @functools.wraps(newplotfunc)\n def plotmethod(\n _PlotMethods_obj,\n x=None,\n y=None,\n hue=None,\n hue_style=None,\n col=None,\n row=None,\n ax=None,\n figsize=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n aspect=None,\n size=None,\n subplot_kws=None,\n add_guide=None,\n cbar_kwargs=None,\n cbar_ax=None,\n vmin=None,\n vmax=None,\n norm=None,\n infer_intervals=None,\n center=None,\n levels=None,\n robust=None,\n colors=None,\n extend=None,\n cmap=None,\n **kwargs\n ):\n \"\"\"\n The method should have the same signature as the function.\n\n This just makes the method work on Plotmethods objects,\n and passes all the other arguments straight through.\n \"\"\"\n allargs = locals()\n allargs[\"ds\"] = _PlotMethods_obj._ds\n allargs.update(kwargs)\n for arg in [\"_PlotMethods_obj\", \"newplotfunc\", \"kwargs\"]:\n del allargs[arg]\n return newplotfunc(**allargs)\n\n # Add to class _PlotMethods\n setattr(_Dataset_PlotMethods, plotmethod.__name__, plotmethod)\n\n return newplotfunc\n\n\n@_dsplot\ndef scatter(ds, x, y, ax, **kwargs):\n \"\"\"\n Scatter Dataset data variables against each other.\n \"\"\"\n\n if \"add_colorbar\" in kwargs or \"add_legend\" in kwargs:\n raise ValueError(\n \"Dataset.plot.scatter does not accept \"\n \"'add_colorbar' or 'add_legend'. \"\n \"Use 'add_guide' instead.\"\n )\n\n cmap_params = kwargs.pop(\"cmap_params\")\n hue = kwargs.pop(\"hue\")\n hue_style = kwargs.pop(\"hue_style\")\n markersize = kwargs.pop(\"markersize\", None)\n size_norm = kwargs.pop(\"size_norm\", None)\n size_mapping = kwargs.pop(\"size_mapping\", None) # set by facetgrid\n\n # need to infer size_mapping with full dataset\n data = _infer_scatter_data(ds, x, y, hue, markersize, size_norm, size_mapping)\n\n if hue_style == \"discrete\":\n primitive = []\n for label in np.unique(data[\"hue\"].values):\n mask = data[\"hue\"] == label\n if data[\"sizes\"] is not None:\n kwargs.update(s=data[\"sizes\"].where(mask, drop=True).values.flatten())\n\n primitive.append(\n ax.scatter(\n data[\"x\"].where(mask, drop=True).values.flatten(),\n data[\"y\"].where(mask, drop=True).values.flatten(),\n label=label,\n **kwargs\n )\n )\n\n elif hue is None or hue_style == \"continuous\":\n if data[\"sizes\"] is not None:\n kwargs.update(s=data[\"sizes\"].values.ravel())\n if data[\"hue\"] is not None:\n kwargs.update(c=data[\"hue\"].values.ravel())\n\n primitive = ax.scatter(\n data[\"x\"].values.ravel(), data[\"y\"].values.ravel(), **cmap_params, **kwargs\n )\n\n return primitive\n"},{"col":4,"comment":"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n ","endLoc":1102,"header":"@property\n def _level_coords(self) -> Dict[str, Hashable]","id":794,"name":"_level_coords","nodeType":"Function","startLoc":1091,"text":"@property\n def _level_coords(self) -> Dict[str, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[str, Hashable] = {}\n for name, index in self.indexes.items():\n if isinstance(index, pd.MultiIndex):\n level_names = index.names\n (dim,) = self.variables[name].dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords"},{"col":0,"comment":"Group an array by its unique values.\n\n Parameters\n ----------\n ar : array-like\n Input array. This will be flattened if it is not already 1-D.\n sort : boolean, optional\n Whether or not to sort unique values.\n\n Returns\n -------\n values : np.ndarray\n Sorted, unique values as returned by `np.unique`.\n indices : list of lists of int\n Each element provides the integer indices in `ar` with values given by\n the corresponding value in `unique_values`.\n ","endLoc":49,"header":"def unique_value_groups(ar, sort=True)","id":795,"name":"unique_value_groups","nodeType":"Function","startLoc":25,"text":"def unique_value_groups(ar, sort=True):\n \"\"\"Group an array by its unique values.\n\n Parameters\n ----------\n ar : array-like\n Input array. This will be flattened if it is not already 1-D.\n sort : boolean, optional\n Whether or not to sort unique values.\n\n Returns\n -------\n values : np.ndarray\n Sorted, unique values as returned by `np.unique`.\n indices : list of lists of int\n Each element provides the integer indices in `ar` with values given by\n the corresponding value in `unique_values`.\n \"\"\"\n inverse, values = pd.factorize(ar, sort=sort)\n groups = [[] for _ in range(len(values))]\n for n, g in enumerate(inverse):\n if g >= 0:\n # pandas uses -1 to mark NaN, but doesn't include them in values\n groups[g].append(n)\n return values, groups"},{"col":4,"comment":"null","endLoc":153,"header":"def open_store_variable(self, name, var)","id":796,"name":"open_store_variable","nodeType":"Function","startLoc":148,"text":"def open_store_variable(self, name, var):\n return Variable(\n var.dimensions,\n ScipyArrayWrapper(name, self),\n _decode_attrs(var._attributes),\n )"},{"col":0,"comment":"null","endLoc":23,"header":"def _decode_attrs(d)","id":797,"name":"_decode_attrs","nodeType":"Function","startLoc":20,"text":"def _decode_attrs(d):\n # don't decode _FillValue from bytes -> unicode, because we want to ensure\n # that its type matches the data exactly\n return {k: v if k == \"_FillValue\" else _decode_string(v) for (k, v) in d.items()}"},{"col":0,"comment":"null","endLoc":17,"header":"def _decode_string(s)","id":798,"name":"_decode_string","nodeType":"Function","startLoc":14,"text":"def _decode_string(s):\n if isinstance(s, bytes):\n return s.decode(\"utf-8\", \"replace\")\n return s"},{"col":4,"comment":"Create a new Dataset with the listed variables from this dataset and\n the all relevant coordinates. Skips all validation.\n ","endLoc":1138,"header":"def _copy_listed(self, names: Iterable[Hashable]) -> \"Dataset\"","id":799,"name":"_copy_listed","nodeType":"Function","startLoc":1104,"text":"def _copy_listed(self, names: Iterable[Hashable]) -> \"Dataset\":\n \"\"\"Create a new Dataset with the listed variables from this dataset and\n the all relevant coordinates. Skips all validation.\n \"\"\"\n variables: Dict[Hashable, Variable] = {}\n coord_names = set()\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name in names:\n try:\n variables[name] = self._variables[name]\n except KeyError:\n ref_name, var_name, var = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n variables[var_name] = var\n if ref_name in self._coord_names or ref_name in self.dims:\n coord_names.add(var_name)\n if (var_name,) == var.dims:\n indexes[var_name] = var.to_index()\n\n needed_dims: Set[Hashable] = set()\n for v in variables.values():\n needed_dims.update(v.dims)\n\n dims = {k: self.dims[k] for k in needed_dims}\n\n for k in self._coord_names:\n if set(self.variables[k].dims) <= needed_dims:\n variables[k] = self._variables[k]\n coord_names.add(k)\n if k in self.indexes:\n indexes[k] = self.indexes[k]\n\n return self._replace(variables, coord_names, dims, indexes=indexes)"},{"col":0,"comment":"null","endLoc":630,"header":"def diff_dataset_repr(a, b, compat)","id":800,"name":"diff_dataset_repr","nodeType":"Function","startLoc":610,"text":"def diff_dataset_repr(a, b, compat):\n summary = [\n \"Left and right {} objects are not {}\".format(\n type(a).__name__, _compat_to_str(compat)\n )\n ]\n\n col_width = _calculate_col_width(\n set(_get_col_items(a.variables) + _get_col_items(b.variables))\n )\n\n summary.append(diff_dim_summary(a, b))\n summary.append(diff_coords_repr(a.coords, b.coords, compat, col_width=col_width))\n summary.append(\n diff_data_vars_repr(a.data_vars, b.data_vars, compat, col_width=col_width)\n )\n\n if compat == \"identical\":\n summary.append(diff_attrs_repr(a.attrs, b.attrs, compat))\n\n return \"\\n\".join(summary)"},{"col":4,"comment":"null","endLoc":404,"header":"@property\n def dims(self)","id":801,"name":"dims","nodeType":"Function","startLoc":397,"text":"@property\n def dims(self):\n if self._dims is None:\n self._dims = self._obj.isel(\n **{self._group_dim: self._group_indices[0]}\n ).dims\n\n return self._dims"},{"col":0,"comment":"Get a virtual variable (e.g., 'time.year' or a MultiIndex level)\n from a dict of xarray.Variable objects (if possible)\n ","endLoc":161,"header":"def _get_virtual_variable(\n variables, key: Hashable, level_vars: Mapping = None, dim_sizes: Mapping = None\n) -> Tuple[Hashable, Hashable, Variable]","id":802,"name":"_get_virtual_variable","nodeType":"Function","startLoc":116,"text":"def _get_virtual_variable(\n variables, key: Hashable, level_vars: Mapping = None, dim_sizes: Mapping = None\n) -> Tuple[Hashable, Hashable, Variable]:\n \"\"\"Get a virtual variable (e.g., 'time.year' or a MultiIndex level)\n from a dict of xarray.Variable objects (if possible)\n \"\"\"\n if level_vars is None:\n level_vars = {}\n if dim_sizes is None:\n dim_sizes = {}\n\n if key in dim_sizes:\n data = pd.Index(range(dim_sizes[key]), name=key)\n variable = IndexVariable((key,), data)\n return key, key, variable\n\n if not isinstance(key, str):\n raise KeyError(key)\n\n split_key = key.split(\".\", 1)\n var_name: Optional[str]\n if len(split_key) == 2:\n ref_name, var_name = split_key\n elif len(split_key) == 1:\n ref_name, var_name = key, None\n else:\n raise KeyError(key)\n\n if ref_name in level_vars:\n dim_var = variables[level_vars[ref_name]]\n ref_var = dim_var.to_index_variable().get_level_variable(ref_name)\n else:\n ref_var = variables[ref_name]\n\n if var_name is None:\n virtual_var = ref_var\n var_name = key\n else:\n if _contains_datetime_like_objects(ref_var):\n ref_var = xr.DataArray(ref_var)\n data = getattr(ref_var.dt, var_name).data\n else:\n data = getattr(ref_var, var_name).data\n virtual_var = Variable(ref_var.dims, data)\n\n return ref_name, var_name, virtual_var"},{"col":4,"comment":"null","endLoc":411,"header":"@property\n def groups(self)","id":803,"name":"groups","nodeType":"Function","startLoc":406,"text":"@property\n def groups(self):\n # provided to mimic pandas.groupby\n if self._groups is None:\n self._groups = dict(zip(self._unique_coord.values, self._group_indices))\n return self._groups"},{"col":4,"comment":"null","endLoc":414,"header":"def __len__(self)","id":804,"name":"__len__","nodeType":"Function","startLoc":413,"text":"def __len__(self):\n return self._unique_coord.size"},{"col":4,"comment":"null","endLoc":417,"header":"def __iter__(self)","id":805,"name":"__iter__","nodeType":"Function","startLoc":416,"text":"def __iter__(self):\n return zip(self._unique_coord.values, self._iter_grouped())"},{"col":4,"comment":"Iterate over each element in this group","endLoc":444,"header":"def _iter_grouped(self)","id":806,"name":"_iter_grouped","nodeType":"Function","startLoc":441,"text":"def _iter_grouped(self):\n \"\"\"Iterate over each element in this group\"\"\"\n for indices in self._group_indices:\n yield self._obj.isel(**{self._group_dim: indices})"},{"col":0,"comment":"Explicitly broadcast any number of DataArray or Dataset objects against\n one another.\n\n xarray objects automatically broadcast against each other in arithmetic\n operations, so this function should not be necessary for normal use.\n\n If no change is needed, the input data is returned to the output without\n being copied.\n\n Parameters\n ----------\n *args : DataArray or Dataset objects\n Arrays to broadcast against each other.\n exclude : sequence of str, optional\n Dimensions that must not be broadcasted\n\n Returns\n -------\n broadcast : tuple of xarray objects\n The same data as the input arrays, but with additional dimensions\n inserted so that all data arrays have the same dimensions and shape.\n\n Examples\n --------\n\n Broadcast two data arrays against one another to fill out their dimensions:\n\n >>> a = xr.DataArray([1, 2, 3], dims='x')\n >>> b = xr.DataArray([5, 6], dims='y')\n >>> a\n \n array([1, 2, 3])\n Coordinates:\n * x (x) int64 0 1 2\n >>> b\n \n array([5, 6])\n Coordinates:\n * y (y) int64 0 1\n >>> a2, b2 = xr.broadcast(a, b)\n >>> a2\n \n array([[1, 1],\n [2, 2],\n [3, 3]])\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1\n >>> b2\n \n array([[5, 6],\n [5, 6],\n [5, 6]])\n Coordinates:\n * y (y) int64 0 1\n * x (x) int64 0 1 2\n\n Fill out the dimensions of all data variables in a dataset:\n\n >>> ds = xr.Dataset({'a': a, 'b': b})\n >>> ds2, = xr.broadcast(ds) # use tuple unpacking to extract one dataset\n >>> ds2\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1\n Data variables:\n a (x, y) int64 1 1 2 2 3 3\n b (x, y) int64 5 6 5 6 5 6\n ","endLoc":728,"header":"def broadcast(*args, exclude=None)","id":807,"name":"broadcast","nodeType":"Function","startLoc":646,"text":"def broadcast(*args, exclude=None):\n \"\"\"Explicitly broadcast any number of DataArray or Dataset objects against\n one another.\n\n xarray objects automatically broadcast against each other in arithmetic\n operations, so this function should not be necessary for normal use.\n\n If no change is needed, the input data is returned to the output without\n being copied.\n\n Parameters\n ----------\n *args : DataArray or Dataset objects\n Arrays to broadcast against each other.\n exclude : sequence of str, optional\n Dimensions that must not be broadcasted\n\n Returns\n -------\n broadcast : tuple of xarray objects\n The same data as the input arrays, but with additional dimensions\n inserted so that all data arrays have the same dimensions and shape.\n\n Examples\n --------\n\n Broadcast two data arrays against one another to fill out their dimensions:\n\n >>> a = xr.DataArray([1, 2, 3], dims='x')\n >>> b = xr.DataArray([5, 6], dims='y')\n >>> a\n \n array([1, 2, 3])\n Coordinates:\n * x (x) int64 0 1 2\n >>> b\n \n array([5, 6])\n Coordinates:\n * y (y) int64 0 1\n >>> a2, b2 = xr.broadcast(a, b)\n >>> a2\n \n array([[1, 1],\n [2, 2],\n [3, 3]])\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1\n >>> b2\n \n array([[5, 6],\n [5, 6],\n [5, 6]])\n Coordinates:\n * y (y) int64 0 1\n * x (x) int64 0 1 2\n\n Fill out the dimensions of all data variables in a dataset:\n\n >>> ds = xr.Dataset({'a': a, 'b': b})\n >>> ds2, = xr.broadcast(ds) # use tuple unpacking to extract one dataset\n >>> ds2\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1\n Data variables:\n a (x, y) int64 1 1 2 2 3 3\n b (x, y) int64 5 6 5 6 5 6\n \"\"\"\n\n if exclude is None:\n exclude = set()\n args = align(*args, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n result = []\n for arg in args:\n result.append(_broadcast_helper(arg, exclude, dims_map, common_coords))\n\n return tuple(result)"},{"col":4,"comment":"null","endLoc":425,"header":"def __repr__(self)","id":808,"name":"__repr__","nodeType":"Function","startLoc":419,"text":"def __repr__(self):\n return \"%s, grouped over %r \\n%r groups with labels %s.\" % (\n self.__class__.__name__,\n self._unique_coord.name,\n self._unique_coord.size,\n \", \".join(format_array_flat(self._unique_coord, 30).split()),\n )"},{"col":4,"comment":"null","endLoc":456,"header":"def _infer_concat_args(self, applied_example)","id":809,"name":"_infer_concat_args","nodeType":"Function","startLoc":446,"text":"def _infer_concat_args(self, applied_example):\n if self._group_dim in applied_example.dims:\n coord = self._group\n positions = self._group_indices\n else:\n coord = self._unique_coord\n positions = None\n dim, = coord.dims\n if isinstance(coord, _DummyGroup):\n coord = None\n return coord, dim, positions"},{"col":4,"comment":"null","endLoc":467,"header":"@staticmethod\n def _binary_op(f, reflexive=False, **ignored_kwargs)","id":810,"name":"_binary_op","nodeType":"Function","startLoc":458,"text":"@staticmethod\n def _binary_op(f, reflexive=False, **ignored_kwargs):\n @functools.wraps(f)\n def func(self, other):\n g = f if not reflexive else lambda x, y: f(y, x)\n applied = self._yield_binary_applied(g, other)\n combined = self._combine(applied)\n return combined\n\n return func"},{"col":40,"endLoc":462,"id":811,"nodeType":"Lambda","startLoc":462,"text":"lambda x, y: f(y, x)"},{"col":4,"comment":"Filter elements from this object according to a condition.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this object's values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, these locations filled with NA.\n drop : boolean, optional\n If True, coordinate labels that only correspond to False values of\n the condition are dropped from the result. Mutually exclusive with\n ``other``.\n\n Returns\n -------\n Same type as caller.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))\n >>> a.where(a.x + a.y < 4)\n \n array([[ 0., 1., 2., 3., nan],\n [ 5., 6., 7., nan, nan],\n [ 10., 11., nan, nan, nan],\n [ 15., nan, nan, nan, nan],\n [ nan, nan, nan, nan, nan]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 5, -1)\n \n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, -1],\n [10, 11, 12, -1, -1],\n [15, 16, -1, -1, -1],\n [20, -1, -1, -1, -1]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 4, drop=True)\n \n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n where : equivalent function\n ","endLoc":1129,"header":"def where(self, cond, other=dtypes.NA, drop: bool = False)","id":812,"name":"where","nodeType":"Function","startLoc":1043,"text":"def where(self, cond, other=dtypes.NA, drop: bool = False):\n \"\"\"Filter elements from this object according to a condition.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this object's values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, these locations filled with NA.\n drop : boolean, optional\n If True, coordinate labels that only correspond to False values of\n the condition are dropped from the result. Mutually exclusive with\n ``other``.\n\n Returns\n -------\n Same type as caller.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))\n >>> a.where(a.x + a.y < 4)\n \n array([[ 0., 1., 2., 3., nan],\n [ 5., 6., 7., nan, nan],\n [ 10., 11., nan, nan, nan],\n [ 15., nan, nan, nan, nan],\n [ nan, nan, nan, nan, nan]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 5, -1)\n \n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, -1],\n [10, 11, 12, -1, -1],\n [15, 16, -1, -1, -1],\n [20, -1, -1, -1, -1]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 4, drop=True)\n \n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n where : equivalent function\n \"\"\"\n from .alignment import align\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if drop:\n if other is not dtypes.NA:\n raise ValueError(\"cannot set `other` if drop=True\")\n\n if not isinstance(cond, (Dataset, DataArray)):\n raise TypeError(\n \"cond argument is %r but must be a %r or %r\"\n % (cond, Dataset, DataArray)\n )\n\n # align so we can use integer indexing\n self, cond = align(self, cond)\n\n # get cond with the minimal size needed for the Dataset\n if isinstance(cond, Dataset):\n clipcond = cond.to_array().any(\"variable\")\n else:\n clipcond = cond\n\n # clip the data corresponding to coordinate dims that are not used\n nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))\n indexers = {k: np.unique(v) for k, v in nonzeros}\n\n self = self.isel(**indexers)\n cond = cond.isel(**indexers)\n\n return ops.where_method(self, cond, other)"},{"col":4,"comment":"null","endLoc":493,"header":"def _yield_binary_applied(self, func, other)","id":813,"name":"_yield_binary_applied","nodeType":"Function","startLoc":469,"text":"def _yield_binary_applied(self, func, other):\n dummy = None\n\n for group_value, obj in self:\n try:\n other_sel = other.sel(**{self._group.name: group_value})\n except AttributeError:\n raise TypeError(\n \"GroupBy objects only support binary ops \"\n \"when the other argument is a Dataset or \"\n \"DataArray\"\n )\n except (KeyError, ValueError):\n if self._group.name not in other.dims:\n raise ValueError(\n \"incompatible dimensions for a grouped \"\n \"binary operation: the group variable %r \"\n \"is not a dimension on the other argument\" % self._group.name\n )\n if dummy is None:\n dummy = _dummy_copy(other)\n other_sel = dummy\n\n result = func(obj, other_sel)\n yield result"},{"col":4,"comment":"null","endLoc":158,"header":"def get_variables(self)","id":814,"name":"get_variables","nodeType":"Function","startLoc":155,"text":"def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )"},{"col":4,"comment":"Our index contained empty groups (e.g., from a resampling). If we\n reduced on that dimension, we want to restore the full index.\n ","endLoc":502,"header":"def _maybe_restore_empty_groups(self, combined)","id":815,"name":"_maybe_restore_empty_groups","nodeType":"Function","startLoc":495,"text":"def _maybe_restore_empty_groups(self, combined):\n \"\"\"Our index contained empty groups (e.g., from a resampling). If we\n reduced on that dimension, we want to restore the full index.\n \"\"\"\n if self._full_index is not None and self._group.name in combined.dims:\n indexers = {self._group.name: self._full_index}\n combined = combined.reindex(**indexers)\n return combined"},{"col":4,"comment":"This gets called if we are applying on an array with a\n multidimensional group.","endLoc":512,"header":"def _maybe_unstack(self, obj)","id":816,"name":"_maybe_unstack","nodeType":"Function","startLoc":504,"text":"def _maybe_unstack(self, obj):\n \"\"\"This gets called if we are applying on an array with a\n multidimensional group.\"\"\"\n if self._stacked_dim is not None and self._stacked_dim in obj.dims:\n obj = obj.unstack(self._stacked_dim)\n for dim in self._inserted_dims:\n if dim in obj.coords:\n del obj.coords[dim]\n return obj"},{"col":0,"comment":"null","endLoc":608,"header":"def _get_broadcast_dims_map_common_coords(args, exclude)","id":817,"name":"_get_broadcast_dims_map_common_coords","nodeType":"Function","startLoc":597,"text":"def _get_broadcast_dims_map_common_coords(args, exclude):\n\n common_coords = {}\n dims_map = {}\n for arg in args:\n for dim in arg.dims:\n if dim not in common_coords and dim not in exclude:\n dims_map[dim] = arg.sizes[dim]\n if dim in arg.coords:\n common_coords[dim] = arg.coords[dim].variable\n\n return dims_map, common_coords"},{"col":0,"comment":"null","endLoc":643,"header":"def _broadcast_helper(arg, exclude, dims_map, common_coords)","id":818,"name":"_broadcast_helper","nodeType":"Function","startLoc":611,"text":"def _broadcast_helper(arg, exclude, dims_map, common_coords):\n\n from .dataarray import DataArray\n from .dataset import Dataset\n\n def _set_dims(var):\n # Add excluded dims to a copy of dims_map\n var_dims_map = dims_map.copy()\n for dim in exclude:\n with suppress(ValueError):\n # ignore dim not in var.dims\n var_dims_map[dim] = var.shape[var.dims.index(dim)]\n\n return var.set_dims(var_dims_map)\n\n def _broadcast_array(array):\n data = _set_dims(array.variable)\n coords = dict(array.coords)\n coords.update(common_coords)\n return DataArray(data, coords, data.dims, name=array.name, attrs=array.attrs)\n\n def _broadcast_dataset(ds):\n data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars}\n coords = dict(ds.coords)\n coords.update(common_coords)\n return Dataset(data_vars, coords, ds.attrs)\n\n if isinstance(arg, DataArray):\n return _broadcast_array(arg)\n elif isinstance(arg, Dataset):\n return _broadcast_dataset(arg)\n else:\n raise ValueError(\"all input must be Dataset or DataArray objects\")"},{"col":0,"comment":"Check if a variable contains datetime like objects (either\n np.datetime64, np.timedelta64, or cftime.datetime)\n ","endLoc":1461,"header":"def _contains_datetime_like_objects(var) -> bool","id":819,"name":"_contains_datetime_like_objects","nodeType":"Function","startLoc":1457,"text":"def _contains_datetime_like_objects(var) -> bool:\n \"\"\"Check if a variable contains datetime like objects (either\n np.datetime64, np.timedelta64, or cftime.datetime)\n \"\"\"\n return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)"},{"col":0,"comment":"Check if a dtype is a subclass of the numpy datetime types\n ","endLoc":1429,"header":"def is_np_datetime_like(dtype: DTypeLike) -> bool","id":820,"name":"is_np_datetime_like","nodeType":"Function","startLoc":1426,"text":"def is_np_datetime_like(dtype: DTypeLike) -> bool:\n \"\"\"Check if a dtype is a subclass of the numpy datetime types\n \"\"\"\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)"},{"attributeType":"null","col":16,"comment":"null","endLoc":8,"id":821,"name":"np","nodeType":"Attribute","startLoc":8,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":9,"id":822,"name":"pd","nodeType":"Attribute","startLoc":9,"text":"pd"},{"attributeType":"null","col":0,"comment":"null","endLoc":209,"id":823,"name":"_KNOWN_TYPE_REPRS","nodeType":"Attribute","startLoc":209,"text":"_KNOWN_TYPE_REPRS"},{"attributeType":"null","col":0,"comment":"null","endLoc":325,"id":824,"name":"EMPTY_REPR","nodeType":"Attribute","startLoc":325,"text":"EMPTY_REPR"},{"attributeType":"null","col":0,"comment":"null","endLoc":362,"id":825,"name":"data_vars_repr","nodeType":"Attribute","startLoc":362,"text":"data_vars_repr"},{"attributeType":"null","col":0,"comment":"null","endLoc":367,"id":826,"name":"attrs_repr","nodeType":"Attribute","startLoc":367,"text":"attrs_repr"},{"attributeType":"null","col":0,"comment":"null","endLoc":558,"id":827,"name":"diff_coords_repr","nodeType":"Attribute","startLoc":558,"text":"diff_coords_repr"},{"attributeType":"null","col":0,"comment":"null","endLoc":563,"id":828,"name":"diff_data_vars_repr","nodeType":"Attribute","startLoc":563,"text":"diff_data_vars_repr"},{"attributeType":"null","col":0,"comment":"null","endLoc":568,"id":829,"name":"diff_attrs_repr","nodeType":"Attribute","startLoc":568,"text":"diff_attrs_repr"},{"col":0,"comment":"","endLoc":2,"header":"formatting.py#","id":830,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"String formatting routines for __repr__.\n\"\"\"\n\n_KNOWN_TYPE_REPRS = {np.ndarray: \"np.ndarray\"}\n\nwith contextlib.suppress(ImportError):\n import sparse\n\n _KNOWN_TYPE_REPRS[sparse.COO] = \"sparse.COO\"\n\nEMPTY_REPR = \" *empty*\"\n\ndata_vars_repr = functools.partial(\n _mapping_repr, title=\"Data variables\", summarizer=summarize_datavar\n)\n\nattrs_repr = functools.partial(\n _mapping_repr, title=\"Attributes\", summarizer=summarize_attr\n)\n\ndiff_coords_repr = functools.partial(\n _diff_mapping_repr, title=\"Coordinates\", summarizer=summarize_coord\n)\n\ndiff_data_vars_repr = functools.partial(\n _diff_mapping_repr, title=\"Data variables\", summarizer=summarize_datavar\n)\n\ndiff_attrs_repr = functools.partial(\n _diff_mapping_repr, title=\"Attributes\", summarizer=summarize_attr\n)"},{"col":4,"comment":"Fill missing values in this object by group.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : valid type for the grouped object's fillna method\n Used to fill all matching missing values by group.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.fillna\n DataArray.fillna\n ","endLoc":537,"header":"def fillna(self, value)","id":832,"name":"fillna","nodeType":"Function","startLoc":514,"text":"def fillna(self, value):\n \"\"\"Fill missing values in this object by group.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : valid type for the grouped object's fillna method\n Used to fill all matching missing values by group.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.fillna\n DataArray.fillna\n \"\"\"\n out = ops.fillna(self, value)\n return out"},{"col":4,"comment":"null","endLoc":161,"header":"def get_attrs(self)","id":833,"name":"get_attrs","nodeType":"Function","startLoc":160,"text":"def get_attrs(self):\n return Frozen(_decode_attrs(self.ds._attributes))"},{"col":0,"comment":"Fill missing values in this object with data from the other object.\n Follows normal broadcasting and alignment rules.\n\n Parameters\n ----------\n join : {'outer', 'inner', 'left', 'right'}, optional\n Method for joining the indexes of the passed objects along each\n dimension\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': raise `ValueError` instead of aligning when indexes to be\n aligned are not equal\n dataset_join : {'outer', 'inner', 'left', 'right'}, optional\n Method for joining variables of Dataset objects with mismatched\n data variables.\n - 'outer': take variables from both Dataset objects\n - 'inner': take only overlapped variables\n - 'left': take only variables from the first object\n - 'right': take only variables from the last object\n ","endLoc":171,"header":"def fillna(data, other, join=\"left\", dataset_join=\"left\")","id":834,"name":"fillna","nodeType":"Function","startLoc":137,"text":"def fillna(data, other, join=\"left\", dataset_join=\"left\"):\n \"\"\"Fill missing values in this object with data from the other object.\n Follows normal broadcasting and alignment rules.\n\n Parameters\n ----------\n join : {'outer', 'inner', 'left', 'right'}, optional\n Method for joining the indexes of the passed objects along each\n dimension\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': raise `ValueError` instead of aligning when indexes to be\n aligned are not equal\n dataset_join : {'outer', 'inner', 'left', 'right'}, optional\n Method for joining variables of Dataset objects with mismatched\n data variables.\n - 'outer': take variables from both Dataset objects\n - 'inner': take only overlapped variables\n - 'left': take only variables from the first object\n - 'right': take only variables from the last object\n \"\"\"\n from .computation import apply_ufunc\n\n return apply_ufunc(\n duck_array_ops.fillna,\n data,\n other,\n join=join,\n dask=\"allowed\",\n dataset_join=dataset_join,\n dataset_fill_value=np.nan,\n keep_attrs=True,\n )"},{"col":4,"comment":"null","endLoc":164,"header":"def get_dimensions(self)","id":835,"name":"get_dimensions","nodeType":"Function","startLoc":163,"text":"def get_dimensions(self):\n return Frozen(self.ds.dimensions)"},{"col":4,"comment":"null","endLoc":171,"header":"def get_encoding(self)","id":836,"name":"get_encoding","nodeType":"Function","startLoc":166,"text":"def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v is None\n }\n return encoding"},{"col":4,"comment":"null","endLoc":179,"header":"def set_dimension(self, name, length, is_unlimited=False)","id":837,"name":"set_dimension","nodeType":"Function","startLoc":173,"text":"def set_dimension(self, name, length, is_unlimited=False):\n if name in self.ds.dimensions:\n raise ValueError(\n \"%s does not support modifying dimensions\" % type(self).__name__\n )\n dim_length = length if not is_unlimited else None\n self.ds.createDimension(name, dim_length)"},{"col":4,"comment":"null","endLoc":183,"header":"def _validate_attr_key(self, key)","id":838,"name":"_validate_attr_key","nodeType":"Function","startLoc":181,"text":"def _validate_attr_key(self, key):\n if not is_valid_nc3_name(key):\n raise ValueError(\"Not a valid attribute name\")"},{"col":4,"comment":"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this objects values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.where\n ","endLoc":558,"header":"def where(self, cond, other=dtypes.NA)","id":839,"name":"where","nodeType":"Function","startLoc":539,"text":"def where(self, cond, other=dtypes.NA):\n \"\"\"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this objects values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.where\n \"\"\"\n return ops.where_method(self, cond, other)"},{"fileName":"netCDF4_.py","filePath":"xarray/backends","id":840,"nodeType":"File","text":"import functools\nimport operator\nfrom contextlib import suppress\n\nimport numpy as np\n\nfrom .. import Variable, coding\nfrom ..coding.variables import pop_to\nfrom ..core import indexing\nfrom ..core.utils import FrozenDict, is_remote_uri\nfrom .common import (\n BackendArray,\n WritableCFDataStore,\n find_root_and_group,\n robust_getitem,\n)\nfrom .file_manager import CachingFileManager, DummyFileManager\nfrom .locks import HDF5_LOCK, NETCDFC_LOCK, combine_locks, ensure_lock, get_write_lock\nfrom .netcdf3 import encode_nc3_attr_value, encode_nc3_variable\n\n# This lookup table maps from dtype.byteorder to a readable endian\n# string used by netCDF4.\n_endian_lookup = {\"=\": \"native\", \">\": \"big\", \"<\": \"little\", \"|\": \"native\"}\n\n\nNETCDF4_PYTHON_LOCK = combine_locks([NETCDFC_LOCK, HDF5_LOCK])\n\n\nclass BaseNetCDF4Array(BackendArray):\n __slots__ = (\"datastore\", \"dtype\", \"shape\", \"variable_name\")\n\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n\n array = self.get_array()\n self.shape = array.shape\n\n dtype = array.dtype\n if dtype is str:\n # use object dtype because that's the only way in numpy to\n # represent variable length strings; it also prevents automatic\n # string concatenation via conventions.decode_cf_variable\n dtype = np.dtype(\"O\")\n self.dtype = dtype\n\n def __setitem__(self, key, value):\n with self.datastore.lock:\n data = self.get_array(needs_lock=False)\n data[key] = value\n if self.datastore.autoclose:\n self.datastore.close(needs_lock=False)\n\n def get_array(self, needs_lock=True):\n raise NotImplementedError(\"Virtual Method\")\n\n\nclass NetCDF4ArrayWrapper(BaseNetCDF4Array):\n __slots__ = ()\n\n def get_array(self, needs_lock=True):\n ds = self.datastore._acquire(needs_lock)\n variable = ds.variables[self.variable_name]\n variable.set_auto_maskandscale(False)\n # only added in netCDF4-python v1.2.8\n with suppress(AttributeError):\n variable.set_auto_chartostring(False)\n return variable\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )\n\n def _getitem(self, key):\n if self.datastore.is_remote: # pragma: no cover\n getitem = functools.partial(robust_getitem, catch=RuntimeError)\n else:\n getitem = operator.getitem\n\n try:\n with self.datastore.lock:\n original_array = self.get_array(needs_lock=False)\n array = getitem(original_array, key)\n except IndexError:\n # Catch IndexError in netCDF4 and return a more informative\n # error message. This is most often called when an unsorted\n # indexer is used before the data is loaded from disk.\n msg = (\n \"The indexing operation you are attempting to perform \"\n \"is not valid on netCDF4.Variable object. Try loading \"\n \"your data into memory first by calling .load().\"\n )\n raise IndexError(msg)\n return array\n\n\ndef _encode_nc4_variable(var):\n for coder in [\n coding.strings.EncodedStringCoder(allows_unicode=True),\n coding.strings.CharacterArrayCoder(),\n ]:\n var = coder.encode(var)\n return var\n\n\ndef _check_encoding_dtype_is_vlen_string(dtype):\n if dtype is not str:\n raise AssertionError( # pragma: no cover\n \"unexpected dtype encoding %r. This shouldn't happen: please \"\n \"file a bug report at github.com/pydata/xarray\" % dtype\n )\n\n\ndef _get_datatype(var, nc_format=\"NETCDF4\", raise_on_invalid_encoding=False):\n if nc_format == \"NETCDF4\":\n datatype = _nc4_dtype(var)\n else:\n if \"dtype\" in var.encoding:\n encoded_dtype = var.encoding[\"dtype\"]\n _check_encoding_dtype_is_vlen_string(encoded_dtype)\n if raise_on_invalid_encoding:\n raise ValueError(\n \"encoding dtype=str for vlen strings is only supported \"\n \"with format='NETCDF4'.\"\n )\n datatype = var.dtype\n return datatype\n\n\ndef _nc4_dtype(var):\n if \"dtype\" in var.encoding:\n dtype = var.encoding.pop(\"dtype\")\n _check_encoding_dtype_is_vlen_string(dtype)\n elif coding.strings.is_unicode_dtype(var.dtype):\n dtype = str\n elif var.dtype.kind in [\"i\", \"u\", \"f\", \"c\", \"S\"]:\n dtype = var.dtype\n else:\n raise ValueError(\"unsupported dtype for netCDF4 variable: {}\".format(var.dtype))\n return dtype\n\n\ndef _netcdf4_create_group(dataset, name):\n return dataset.createGroup(name)\n\n\ndef _nc4_require_group(ds, group, mode, create_group=_netcdf4_create_group):\n if group in {None, \"\", \"/\"}:\n # use the root group\n return ds\n else:\n # make sure it's a string\n if not isinstance(group, str):\n raise ValueError(\"group must be a string or None\")\n # support path-like syntax\n path = group.strip(\"/\").split(\"/\")\n for key in path:\n try:\n ds = ds.groups[key]\n except KeyError as e:\n if mode != \"r\":\n ds = create_group(ds, key)\n else:\n # wrap error to provide slightly more helpful message\n raise OSError(\"group not found: %s\" % key, e)\n return ds\n\n\ndef _ensure_fill_value_valid(data, attributes):\n # work around for netCDF4/scipy issue where _FillValue has the wrong type:\n # https://github.com/Unidata/netcdf4-python/issues/271\n if data.dtype.kind == \"S\" and \"_FillValue\" in attributes:\n attributes[\"_FillValue\"] = np.string_(attributes[\"_FillValue\"])\n\n\ndef _force_native_endianness(var):\n # possible values for byteorder are:\n # = native\n # < little-endian\n # > big-endian\n # | not applicable\n # Below we check if the data type is not native or NA\n if var.dtype.byteorder not in [\"=\", \"|\"]:\n # if endianness is specified explicitly, convert to the native type\n data = var.data.astype(var.dtype.newbyteorder(\"=\"))\n var = Variable(var.dims, data, var.attrs, var.encoding)\n # if endian exists, remove it from the encoding.\n var.encoding.pop(\"endian\", None)\n # check to see if encoding has a value for endian its 'native'\n if not var.encoding.get(\"endian\", \"native\") == \"native\":\n raise NotImplementedError(\n \"Attempt to write non-native endian type, \"\n \"this is not supported by the netCDF4 \"\n \"python library.\"\n )\n return var\n\n\ndef _extract_nc4_variable_encoding(\n variable,\n raise_on_invalid=False,\n lsd_okay=True,\n h5py_okay=False,\n backend=\"netCDF4\",\n unlimited_dims=None,\n):\n if unlimited_dims is None:\n unlimited_dims = ()\n\n encoding = variable.encoding.copy()\n\n safe_to_drop = {\"source\", \"original_shape\"}\n valid_encodings = {\n \"zlib\",\n \"complevel\",\n \"fletcher32\",\n \"contiguous\",\n \"chunksizes\",\n \"shuffle\",\n \"_FillValue\",\n \"dtype\",\n }\n if lsd_okay:\n valid_encodings.add(\"least_significant_digit\")\n if h5py_okay:\n valid_encodings.add(\"compression\")\n valid_encodings.add(\"compression_opts\")\n\n if not raise_on_invalid and encoding.get(\"chunksizes\") is not None:\n # It's possible to get encoded chunksizes larger than a dimension size\n # if the original file had an unlimited dimension. This is problematic\n # if the new file no longer has an unlimited dimension.\n chunksizes = encoding[\"chunksizes\"]\n chunks_too_big = any(\n c > d and dim not in unlimited_dims\n for c, d, dim in zip(chunksizes, variable.shape, variable.dims)\n )\n has_original_shape = \"original_shape\" in encoding\n changed_shape = (\n has_original_shape and encoding.get(\"original_shape\") != variable.shape\n )\n if chunks_too_big or changed_shape:\n del encoding[\"chunksizes\"]\n\n var_has_unlim_dim = any(dim in unlimited_dims for dim in variable.dims)\n if not raise_on_invalid and var_has_unlim_dim and \"contiguous\" in encoding.keys():\n del encoding[\"contiguous\"]\n\n for k in safe_to_drop:\n if k in encoding:\n del encoding[k]\n\n if raise_on_invalid:\n invalid = [k for k in encoding if k not in valid_encodings]\n if invalid:\n raise ValueError(\n \"unexpected encoding parameters for %r backend: %r. Valid \"\n \"encodings are: %r\" % (backend, invalid, valid_encodings)\n )\n else:\n for k in list(encoding):\n if k not in valid_encodings:\n del encoding[k]\n\n return encoding\n\n\ndef _is_list_of_strings(value):\n if np.asarray(value).dtype.kind in [\"U\", \"S\"] and np.asarray(value).size > 1:\n return True\n else:\n return False\n\n\nclass NetCDF4DataStore(WritableCFDataStore):\n \"\"\"Store for reading and writing data via the Python-NetCDF4 library.\n\n This store supports NetCDF3, NetCDF4 and OpenDAP datasets.\n \"\"\"\n\n __slots__ = (\n \"autoclose\",\n \"format\",\n \"is_remote\",\n \"lock\",\n \"_filename\",\n \"_group\",\n \"_manager\",\n \"_mode\",\n )\n\n def __init__(\n self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False\n ):\n import netCDF4\n\n if isinstance(manager, netCDF4.Dataset):\n if group is None:\n root, group = find_root_and_group(manager)\n else:\n if not type(manager) is netCDF4.Dataset:\n raise ValueError(\n \"must supply a root netCDF4.Dataset if the group \"\n \"argument is provided\"\n )\n root = manager\n manager = DummyFileManager(root)\n\n self._manager = manager\n self._group = group\n self._mode = mode\n self.format = self.ds.data_model\n self._filename = self.ds.filepath()\n self.is_remote = is_remote_uri(self._filename)\n self.lock = ensure_lock(lock)\n self.autoclose = autoclose\n\n @classmethod\n def open(\n cls,\n filename,\n mode=\"r\",\n format=\"NETCDF4\",\n group=None,\n clobber=True,\n diskless=False,\n persist=False,\n lock=None,\n lock_maker=None,\n autoclose=False,\n ):\n import netCDF4\n\n if format is None:\n format = \"NETCDF4\"\n\n if lock is None:\n if mode == \"r\":\n if is_remote_uri(filename):\n lock = NETCDFC_LOCK\n else:\n lock = NETCDF4_PYTHON_LOCK\n else:\n if format is None or format.startswith(\"NETCDF4\"):\n base_lock = NETCDF4_PYTHON_LOCK\n else:\n base_lock = NETCDFC_LOCK\n lock = combine_locks([base_lock, get_write_lock(filename)])\n\n kwargs = dict(\n clobber=clobber, diskless=diskless, persist=persist, format=format\n )\n manager = CachingFileManager(\n netCDF4.Dataset, filename, mode=mode, kwargs=kwargs\n )\n return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)\n\n def _acquire(self, needs_lock=True):\n with self._manager.acquire_context(needs_lock) as root:\n ds = _nc4_require_group(root, self._group, self._mode)\n return ds\n\n @property\n def ds(self):\n return self._acquire()\n\n def open_store_variable(self, name, var):\n dimensions = var.dimensions\n data = indexing.LazilyOuterIndexedArray(NetCDF4ArrayWrapper(name, self))\n attributes = {k: var.getncattr(k) for k in var.ncattrs()}\n _ensure_fill_value_valid(data, attributes)\n # netCDF4 specific encoding; save _FillValue for later\n encoding = {}\n filters = var.filters()\n if filters is not None:\n encoding.update(filters)\n chunking = var.chunking()\n if chunking is not None:\n if chunking == \"contiguous\":\n encoding[\"contiguous\"] = True\n encoding[\"chunksizes\"] = None\n else:\n encoding[\"contiguous\"] = False\n encoding[\"chunksizes\"] = tuple(chunking)\n # TODO: figure out how to round-trip \"endian-ness\" without raising\n # warnings from netCDF4\n # encoding['endian'] = var.endian()\n pop_to(attributes, encoding, \"least_significant_digit\")\n # save source so __repr__ can detect if it's local or not\n encoding[\"source\"] = self._filename\n encoding[\"original_shape\"] = var.shape\n encoding[\"dtype\"] = var.dtype\n\n return Variable(dimensions, data, attributes, encoding)\n\n def get_variables(self):\n dsvars = FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n return dsvars\n\n def get_attrs(self):\n attrs = FrozenDict((k, self.ds.getncattr(k)) for k in self.ds.ncattrs())\n return attrs\n\n def get_dimensions(self):\n dims = FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())\n return dims\n\n def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v.isunlimited()\n }\n return encoding\n\n def set_dimension(self, name, length, is_unlimited=False):\n dim_length = length if not is_unlimited else None\n self.ds.createDimension(name, size=dim_length)\n\n def set_attribute(self, key, value):\n if self.format != \"NETCDF4\":\n value = encode_nc3_attr_value(value)\n if _is_list_of_strings(value):\n # encode as NC_STRING if attr is list of strings\n self.ds.setncattr_string(key, value)\n else:\n self.ds.setncattr(key, value)\n\n def encode_variable(self, variable):\n variable = _force_native_endianness(variable)\n if self.format == \"NETCDF4\":\n variable = _encode_nc4_variable(variable)\n else:\n variable = encode_nc3_variable(variable)\n return variable\n\n def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n datatype = _get_datatype(\n variable, self.format, raise_on_invalid_encoding=check_encoding\n )\n attrs = variable.attrs.copy()\n\n fill_value = attrs.pop(\"_FillValue\", None)\n\n if datatype is str and fill_value is not None:\n raise NotImplementedError(\n \"netCDF4 does not yet support setting a fill value for \"\n \"variable-length strings \"\n \"(https://github.com/Unidata/netcdf4-python/issues/730). \"\n \"Either remove '_FillValue' from encoding on variable %r \"\n \"or set {'dtype': 'S1'} in encoding to use the fixed width \"\n \"NC_CHAR type.\" % name\n )\n\n encoding = _extract_nc4_variable_encoding(\n variable, raise_on_invalid=check_encoding, unlimited_dims=unlimited_dims\n )\n\n if name in self.ds.variables:\n nc4_var = self.ds.variables[name]\n else:\n nc4_var = self.ds.createVariable(\n varname=name,\n datatype=datatype,\n dimensions=variable.dims,\n zlib=encoding.get(\"zlib\", False),\n complevel=encoding.get(\"complevel\", 4),\n shuffle=encoding.get(\"shuffle\", True),\n fletcher32=encoding.get(\"fletcher32\", False),\n contiguous=encoding.get(\"contiguous\", False),\n chunksizes=encoding.get(\"chunksizes\"),\n endian=\"native\",\n least_significant_digit=encoding.get(\"least_significant_digit\"),\n fill_value=fill_value,\n )\n\n nc4_var.setncatts(attrs)\n\n target = NetCDF4ArrayWrapper(name, self)\n\n return target, variable.data\n\n def sync(self):\n self.ds.sync()\n\n def close(self, **kwargs):\n self._manager.close(**kwargs)\n"},{"col":4,"comment":"null","endLoc":188,"header":"def set_attribute(self, key, value)","id":841,"name":"set_attribute","nodeType":"Function","startLoc":185,"text":"def set_attribute(self, key, value):\n self._validate_attr_key(key)\n value = encode_nc3_attr_value(value)\n setattr(self.ds, key, value)"},{"col":0,"comment":"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this objects values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n Same type as caller.\n ","endLoc":202,"header":"def where_method(self, cond, other=dtypes.NA)","id":842,"name":"where_method","nodeType":"Function","startLoc":174,"text":"def where_method(self, cond, other=dtypes.NA):\n \"\"\"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this objects values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n Same type as caller.\n \"\"\"\n from .computation import apply_ufunc\n\n # alignment for three arguments is complicated, so don't support it yet\n join = \"inner\" if other is dtypes.NA else \"exact\"\n return apply_ufunc(\n duck_array_ops.where_method,\n self,\n cond,\n other,\n join=join,\n dataset_join=join,\n dask=\"allowed\",\n keep_attrs=True,\n )"},{"col":4,"comment":"null","endLoc":569,"header":"def _first_or_last(self, op, skipna, keep_attrs)","id":843,"name":"_first_or_last","nodeType":"Function","startLoc":560,"text":"def _first_or_last(self, op, skipna, keep_attrs):\n if isinstance(self._group_indices[0], integer_types):\n # NB. this is currently only used for reductions along an existing\n # dimension\n return self._obj\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=True)\n return self.reduce(\n op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs, allow_lazy=True\n )"},{"className":"NetCDF4ArrayWrapper","col":0,"comment":"null","endLoc":95,"id":844,"nodeType":"Class","startLoc":58,"text":"class NetCDF4ArrayWrapper(BaseNetCDF4Array):\n __slots__ = ()\n\n def get_array(self, needs_lock=True):\n ds = self.datastore._acquire(needs_lock)\n variable = ds.variables[self.variable_name]\n variable.set_auto_maskandscale(False)\n # only added in netCDF4-python v1.2.8\n with suppress(AttributeError):\n variable.set_auto_chartostring(False)\n return variable\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )\n\n def _getitem(self, key):\n if self.datastore.is_remote: # pragma: no cover\n getitem = functools.partial(robust_getitem, catch=RuntimeError)\n else:\n getitem = operator.getitem\n\n try:\n with self.datastore.lock:\n original_array = self.get_array(needs_lock=False)\n array = getitem(original_array, key)\n except IndexError:\n # Catch IndexError in netCDF4 and return a more informative\n # error message. This is most often called when an unsorted\n # indexer is used before the data is loaded from disk.\n msg = (\n \"The indexing operation you are attempting to perform \"\n \"is not valid on netCDF4.Variable object. Try loading \"\n \"your data into memory first by calling .load().\"\n )\n raise IndexError(msg)\n return array"},{"col":4,"comment":"null","endLoc":68,"header":"def get_array(self, needs_lock=True)","id":845,"name":"get_array","nodeType":"Function","startLoc":61,"text":"def get_array(self, needs_lock=True):\n ds = self.datastore._acquire(needs_lock)\n variable = ds.variables[self.variable_name]\n variable.set_auto_maskandscale(False)\n # only added in netCDF4-python v1.2.8\n with suppress(AttributeError):\n variable.set_auto_chartostring(False)\n return variable"},{"col":4,"comment":"Close any files linked to this object\n ","endLoc":1136,"header":"def close(self: Any) -> None","id":846,"name":"close","nodeType":"Function","startLoc":1131,"text":"def close(self: Any) -> None:\n \"\"\"Close any files linked to this object\n \"\"\"\n if self._file_obj is not None:\n self._file_obj.close()\n self._file_obj = None"},{"col":4,"comment":"Tests each value in the array for whether it is in test elements.\n\n Parameters\n ----------\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if an array or array_like.\n See numpy notes for behavior with non-array-like parameters.\n\n Returns\n -------\n isin : same as object, bool\n Has the same shape as this object.\n\n Examples\n --------\n\n >>> array = xr.DataArray([1, 2, 3], dims='x')\n >>> array.isin([1, 3])\n \n array([ True, False, True])\n Dimensions without coordinates: x\n\n See also\n --------\n numpy.isin\n ","endLoc":1187,"header":"def isin(self, test_elements)","id":847,"name":"isin","nodeType":"Function","startLoc":1138,"text":"def isin(self, test_elements):\n \"\"\"Tests each value in the array for whether it is in test elements.\n\n Parameters\n ----------\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if an array or array_like.\n See numpy notes for behavior with non-array-like parameters.\n\n Returns\n -------\n isin : same as object, bool\n Has the same shape as this object.\n\n Examples\n --------\n\n >>> array = xr.DataArray([1, 2, 3], dims='x')\n >>> array.isin([1, 3])\n \n array([ True, False, True])\n Dimensions without coordinates: x\n\n See also\n --------\n numpy.isin\n \"\"\"\n from .computation import apply_ufunc\n from .dataset import Dataset\n from .dataarray import DataArray\n from .variable import Variable\n\n if isinstance(test_elements, Dataset):\n raise TypeError(\n \"isin() argument must be convertible to an array: {}\".format(\n test_elements\n )\n )\n elif isinstance(test_elements, (Variable, DataArray)):\n # need to explicitly pull out data to support dask arrays as the\n # second argument\n test_elements = test_elements.data\n\n return apply_ufunc(\n duck_array_ops.isin,\n self,\n kwargs=dict(test_elements=test_elements),\n dask=\"allowed\",\n )"},{"col":4,"comment":"null","endLoc":192,"header":"def encode_variable(self, variable)","id":848,"name":"encode_variable","nodeType":"Function","startLoc":190,"text":"def encode_variable(self, variable):\n variable = encode_nc3_variable(variable)\n return variable"},{"col":4,"comment":"null","endLoc":217,"header":"def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n )","id":849,"name":"prepare_variable","nodeType":"Function","startLoc":194,"text":"def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n if check_encoding and variable.encoding:\n if variable.encoding != {\"_FillValue\": None}:\n raise ValueError(\n \"unexpected encoding for scipy backend: %r\"\n % list(variable.encoding)\n )\n\n data = variable.data\n # nb. this still creates a numpy array in all memory, even though we\n # don't write the data yet; scipy.io.netcdf does not not support\n # incremental writes.\n if name not in self.ds.variables:\n self.ds.createVariable(name, data.dtype, variable.dims)\n scipy_var = self.ds.variables[name]\n for k, v in variable.attrs.items():\n self._validate_attr_key(k)\n setattr(scipy_var, k, v)\n\n target = ScipyArrayWrapper(name, self)\n\n return target, data"},{"col":4,"comment":"Return the first element of each group along the group dimension\n ","endLoc":574,"header":"def first(self, skipna=None, keep_attrs=None)","id":850,"name":"first","nodeType":"Function","startLoc":571,"text":"def first(self, skipna=None, keep_attrs=None):\n \"\"\"Return the first element of each group along the group dimension\n \"\"\"\n return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)"},{"col":4,"comment":"null","endLoc":73,"header":"def __getitem__(self, key)","id":851,"name":"__getitem__","nodeType":"Function","startLoc":70,"text":"def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )"},{"col":4,"comment":"Return the last element of each group along the group dimension\n ","endLoc":579,"header":"def last(self, skipna=None, keep_attrs=None)","id":852,"name":"last","nodeType":"Function","startLoc":576,"text":"def last(self, skipna=None, keep_attrs=None):\n \"\"\"Return the last element of each group along the group dimension\n \"\"\"\n return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)"},{"col":4,"comment":"null","endLoc":95,"header":"def _getitem(self, key)","id":853,"name":"_getitem","nodeType":"Function","startLoc":75,"text":"def _getitem(self, key):\n if self.datastore.is_remote: # pragma: no cover\n getitem = functools.partial(robust_getitem, catch=RuntimeError)\n else:\n getitem = operator.getitem\n\n try:\n with self.datastore.lock:\n original_array = self.get_array(needs_lock=False)\n array = getitem(original_array, key)\n except IndexError:\n # Catch IndexError in netCDF4 and return a more informative\n # error message. This is most often called when an unsorted\n # indexer is used before the data is loaded from disk.\n msg = (\n \"The indexing operation you are attempting to perform \"\n \"is not valid on netCDF4.Variable object. Try loading \"\n \"your data into memory first by calling .load().\"\n )\n raise IndexError(msg)\n return array"},{"col":4,"comment":"Assign coordinates by group.\n\n See also\n --------\n Dataset.assign_coords\n Dataset.swap_dims\n ","endLoc":590,"header":"def assign_coords(self, coords=None, **coords_kwargs)","id":854,"name":"assign_coords","nodeType":"Function","startLoc":581,"text":"def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign coordinates by group.\n\n See also\n --------\n Dataset.assign_coords\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n return self.apply(lambda ds: ds.assign_coords(**coords_kwargs))"},{"col":4,"comment":"null","endLoc":220,"header":"def sync(self)","id":855,"name":"sync","nodeType":"Function","startLoc":219,"text":"def sync(self):\n self.ds.sync()"},{"col":26,"endLoc":590,"id":856,"nodeType":"Lambda","startLoc":590,"text":"lambda ds: ds.assign_coords(**coords_kwargs)"},{"col":4,"comment":"null","endLoc":223,"header":"def close(self)","id":857,"name":"close","nodeType":"Function","startLoc":222,"text":"def close(self):\n self._manager.close()"},{"attributeType":"null","col":4,"comment":"null","endLoc":240,"id":858,"name":"__slots__","nodeType":"Attribute","startLoc":240,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":395,"id":859,"name":"_dims","nodeType":"Attribute","startLoc":395,"text":"self._dims"},{"attributeType":"null","col":8,"comment":"null","endLoc":383,"id":860,"name":"_obj","nodeType":"Attribute","startLoc":383,"text":"self._obj"},{"attributeType":"null","col":8,"comment":"null","endLoc":388,"id":861,"name":"_stacked_dim","nodeType":"Attribute","startLoc":388,"text":"self._stacked_dim"},{"attributeType":"null","col":8,"comment":"null","endLoc":385,"id":862,"name":"_group_dim","nodeType":"Attribute","startLoc":385,"text":"self._group_dim"},{"attributeType":"CachingFileManager | DummyFileManager","col":8,"comment":"null","endLoc":142,"id":863,"name":"_manager","nodeType":"Attribute","startLoc":142,"text":"self._manager"},{"attributeType":"null","col":8,"comment":"null","endLoc":386,"id":864,"name":"_group_indices","nodeType":"Attribute","startLoc":386,"text":"self._group_indices"},{"col":4,"comment":"null","endLoc":1190,"header":"def __enter__(self: T) -> T","id":865,"name":"__enter__","nodeType":"Function","startLoc":1189,"text":"def __enter__(self: T) -> T:\n return self"},{"col":4,"comment":"null","endLoc":1193,"header":"def __exit__(self, exc_type, exc_value, traceback) -> None","id":866,"name":"__exit__","nodeType":"Function","startLoc":1192,"text":"def __exit__(self, exc_type, exc_value, traceback) -> None:\n self.close()"},{"attributeType":"null","col":8,"comment":"null","endLoc":126,"id":867,"name":"lock","nodeType":"Attribute","startLoc":126,"text":"self.lock"},{"attributeType":"null","col":8,"comment":"null","endLoc":384,"id":868,"name":"_group","nodeType":"Attribute","startLoc":384,"text":"self._group"},{"attributeType":"null","col":8,"comment":"null","endLoc":387,"id":869,"name":"_unique_coord","nodeType":"Attribute","startLoc":387,"text":"self._unique_coord"},{"attributeType":"null","col":8,"comment":"null","endLoc":390,"id":870,"name":"_full_index","nodeType":"Attribute","startLoc":390,"text":"self._full_index"},{"attributeType":"null","col":8,"comment":"null","endLoc":394,"id":871,"name":"_groups","nodeType":"Attribute","startLoc":394,"text":"self._groups"},{"col":4,"comment":"null","endLoc":1197,"header":"def __getitem__(self, value)","id":872,"name":"__getitem__","nodeType":"Function","startLoc":1195,"text":"def __getitem__(self, value):\n # implementations of this class should implement this method\n raise NotImplementedError()"},{"attributeType":"null","col":8,"comment":"null","endLoc":389,"id":873,"name":"_inserted_dims","nodeType":"Attribute","startLoc":389,"text":"self._inserted_dims"},{"attributeType":"null","col":4,"comment":"null","endLoc":332,"id":874,"name":"__slots__","nodeType":"Attribute","startLoc":332,"text":"__slots__"},{"attributeType":"null","col":4,"comment":"null","endLoc":334,"id":875,"name":"_rolling_exp_cls","nodeType":"Attribute","startLoc":334,"text":"_rolling_exp_cls"},{"attributeType":"null","col":8,"comment":"null","endLoc":1136,"id":876,"name":"_file_obj","nodeType":"Attribute","startLoc":1136,"text":"self._file_obj"},{"col":4,"comment":"null","endLoc":398,"header":"def _replace(\n self,\n variable: Variable = None,\n coords=None,\n name: Optional[Hashable] = __default,\n ) -> \"DataArray\"","id":877,"name":"_replace","nodeType":"Function","startLoc":386,"text":"def _replace(\n self,\n variable: Variable = None,\n coords=None,\n name: Optional[Hashable] = __default,\n ) -> \"DataArray\":\n if variable is None:\n variable = self.variable\n if coords is None:\n coords = self._coords\n if name is self.__default:\n name = self.name\n return type(self)(variable, coords, name=name, fastpath=True)"},{"attributeType":"null","col":8,"comment":"null","endLoc":391,"id":878,"name":"_restore_coord_dims","nodeType":"Attribute","startLoc":391,"text":"self._restore_coord_dims"},{"col":4,"comment":"null","endLoc":418,"header":"def _replace_maybe_drop_dims(\n self, variable: Variable, name: Optional[Hashable] = __default\n ) -> \"DataArray\"","id":879,"name":"_replace_maybe_drop_dims","nodeType":"Function","startLoc":400,"text":"def _replace_maybe_drop_dims(\n self, variable: Variable, name: Optional[Hashable] = __default\n ) -> \"DataArray\":\n if variable.dims == self.dims and variable.shape == self.shape:\n coords = self._coords.copy()\n elif variable.dims == self.dims:\n # Shape has changed (e.g. from reduce(..., keepdims=True)\n new_sizes = dict(zip(self.dims, variable.shape))\n coords = {\n k: v\n for k, v in self._coords.items()\n if v.shape == tuple(new_sizes[d] for d in v.dims)\n }\n else:\n allowed_dims = set(variable.dims)\n coords = {\n k: v for k, v in self._coords.items() if set(v.dims) <= allowed_dims\n }\n return self._replace(variable, coords, name)"},{"id":880,"name":"readthedocs.yml","nodeType":"TextFile","path":"","text":"build:\n image: latest\nconda:\n file: ci/requirements/doc.yml\npython:\n version: 3.7\n setup_py_install: true\nformats: []\n"},{"fileName":"plot_cartopy_facetgrid.py","filePath":"doc/gallery","id":881,"nodeType":"File","text":"# -*- coding: utf-8 -*-\n\"\"\"\n==================================\nMultiple plots and map projections\n==================================\n\nControl the map projection parameters on multiple axes\n\nThis example illustrates how to plot multiple maps and control their extent\nand aspect ratio.\n\nFor more details see `this discussion`_ on github.\n\n.. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567\n\"\"\"\n\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\nimport xarray as xr\n\n# Load the data\nds = xr.tutorial.load_dataset(\"air_temperature\")\nair = ds.air.isel(time=[0, 724]) - 273.15\n\n# This is the map projection we want to plot *onto*\nmap_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)\n\np = air.plot(\n transform=ccrs.PlateCarree(), # the data's projection\n col=\"time\",\n col_wrap=1, # multiplot settings\n aspect=ds.dims[\"lon\"] / ds.dims[\"lat\"], # for a sensible figsize\n subplot_kws={\"projection\": map_proj}, # the plot's projection\n)\n\n# We have to set the map's options on all four axes\nfor ax in p.axes.flat:\n ax.coastlines()\n ax.set_extent([-160, -30, 5, 75])\n # Without this aspect attributes the maps will look chaotic and the\n # \"extent\" attribute above will be ignored\n ax.set_aspect(\"equal\")\n\nplt.show()\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":59,"id":882,"name":"__slots__","nodeType":"Attribute","startLoc":59,"text":"__slots__"},{"className":"NetCDF4DataStore","col":0,"comment":"Store for reading and writing data via the Python-NetCDF4 library.\n\n This store supports NetCDF3, NetCDF4 and OpenDAP datasets.\n ","endLoc":491,"id":883,"nodeType":"Class","startLoc":276,"text":"class NetCDF4DataStore(WritableCFDataStore):\n \"\"\"Store for reading and writing data via the Python-NetCDF4 library.\n\n This store supports NetCDF3, NetCDF4 and OpenDAP datasets.\n \"\"\"\n\n __slots__ = (\n \"autoclose\",\n \"format\",\n \"is_remote\",\n \"lock\",\n \"_filename\",\n \"_group\",\n \"_manager\",\n \"_mode\",\n )\n\n def __init__(\n self, manager, group=None, mode=None, lock=NETCDF4_PYTHON_LOCK, autoclose=False\n ):\n import netCDF4\n\n if isinstance(manager, netCDF4.Dataset):\n if group is None:\n root, group = find_root_and_group(manager)\n else:\n if not type(manager) is netCDF4.Dataset:\n raise ValueError(\n \"must supply a root netCDF4.Dataset if the group \"\n \"argument is provided\"\n )\n root = manager\n manager = DummyFileManager(root)\n\n self._manager = manager\n self._group = group\n self._mode = mode\n self.format = self.ds.data_model\n self._filename = self.ds.filepath()\n self.is_remote = is_remote_uri(self._filename)\n self.lock = ensure_lock(lock)\n self.autoclose = autoclose\n\n @classmethod\n def open(\n cls,\n filename,\n mode=\"r\",\n format=\"NETCDF4\",\n group=None,\n clobber=True,\n diskless=False,\n persist=False,\n lock=None,\n lock_maker=None,\n autoclose=False,\n ):\n import netCDF4\n\n if format is None:\n format = \"NETCDF4\"\n\n if lock is None:\n if mode == \"r\":\n if is_remote_uri(filename):\n lock = NETCDFC_LOCK\n else:\n lock = NETCDF4_PYTHON_LOCK\n else:\n if format is None or format.startswith(\"NETCDF4\"):\n base_lock = NETCDF4_PYTHON_LOCK\n else:\n base_lock = NETCDFC_LOCK\n lock = combine_locks([base_lock, get_write_lock(filename)])\n\n kwargs = dict(\n clobber=clobber, diskless=diskless, persist=persist, format=format\n )\n manager = CachingFileManager(\n netCDF4.Dataset, filename, mode=mode, kwargs=kwargs\n )\n return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)\n\n def _acquire(self, needs_lock=True):\n with self._manager.acquire_context(needs_lock) as root:\n ds = _nc4_require_group(root, self._group, self._mode)\n return ds\n\n @property\n def ds(self):\n return self._acquire()\n\n def open_store_variable(self, name, var):\n dimensions = var.dimensions\n data = indexing.LazilyOuterIndexedArray(NetCDF4ArrayWrapper(name, self))\n attributes = {k: var.getncattr(k) for k in var.ncattrs()}\n _ensure_fill_value_valid(data, attributes)\n # netCDF4 specific encoding; save _FillValue for later\n encoding = {}\n filters = var.filters()\n if filters is not None:\n encoding.update(filters)\n chunking = var.chunking()\n if chunking is not None:\n if chunking == \"contiguous\":\n encoding[\"contiguous\"] = True\n encoding[\"chunksizes\"] = None\n else:\n encoding[\"contiguous\"] = False\n encoding[\"chunksizes\"] = tuple(chunking)\n # TODO: figure out how to round-trip \"endian-ness\" without raising\n # warnings from netCDF4\n # encoding['endian'] = var.endian()\n pop_to(attributes, encoding, \"least_significant_digit\")\n # save source so __repr__ can detect if it's local or not\n encoding[\"source\"] = self._filename\n encoding[\"original_shape\"] = var.shape\n encoding[\"dtype\"] = var.dtype\n\n return Variable(dimensions, data, attributes, encoding)\n\n def get_variables(self):\n dsvars = FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n return dsvars\n\n def get_attrs(self):\n attrs = FrozenDict((k, self.ds.getncattr(k)) for k in self.ds.ncattrs())\n return attrs\n\n def get_dimensions(self):\n dims = FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())\n return dims\n\n def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v.isunlimited()\n }\n return encoding\n\n def set_dimension(self, name, length, is_unlimited=False):\n dim_length = length if not is_unlimited else None\n self.ds.createDimension(name, size=dim_length)\n\n def set_attribute(self, key, value):\n if self.format != \"NETCDF4\":\n value = encode_nc3_attr_value(value)\n if _is_list_of_strings(value):\n # encode as NC_STRING if attr is list of strings\n self.ds.setncattr_string(key, value)\n else:\n self.ds.setncattr(key, value)\n\n def encode_variable(self, variable):\n variable = _force_native_endianness(variable)\n if self.format == \"NETCDF4\":\n variable = _encode_nc4_variable(variable)\n else:\n variable = encode_nc3_variable(variable)\n return variable\n\n def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n datatype = _get_datatype(\n variable, self.format, raise_on_invalid_encoding=check_encoding\n )\n attrs = variable.attrs.copy()\n\n fill_value = attrs.pop(\"_FillValue\", None)\n\n if datatype is str and fill_value is not None:\n raise NotImplementedError(\n \"netCDF4 does not yet support setting a fill value for \"\n \"variable-length strings \"\n \"(https://github.com/Unidata/netcdf4-python/issues/730). \"\n \"Either remove '_FillValue' from encoding on variable %r \"\n \"or set {'dtype': 'S1'} in encoding to use the fixed width \"\n \"NC_CHAR type.\" % name\n )\n\n encoding = _extract_nc4_variable_encoding(\n variable, raise_on_invalid=check_encoding, unlimited_dims=unlimited_dims\n )\n\n if name in self.ds.variables:\n nc4_var = self.ds.variables[name]\n else:\n nc4_var = self.ds.createVariable(\n varname=name,\n datatype=datatype,\n dimensions=variable.dims,\n zlib=encoding.get(\"zlib\", False),\n complevel=encoding.get(\"complevel\", 4),\n shuffle=encoding.get(\"shuffle\", True),\n fletcher32=encoding.get(\"fletcher32\", False),\n contiguous=encoding.get(\"contiguous\", False),\n chunksizes=encoding.get(\"chunksizes\"),\n endian=\"native\",\n least_significant_digit=encoding.get(\"least_significant_digit\"),\n fill_value=fill_value,\n )\n\n nc4_var.setncatts(attrs)\n\n target = NetCDF4ArrayWrapper(name, self)\n\n return target, variable.data\n\n def sync(self):\n self.ds.sync()\n\n def close(self, **kwargs):\n self._manager.close(**kwargs)"},{"attributeType":"null","col":22,"comment":"null","endLoc":18,"id":884,"name":"ccrs","nodeType":"Attribute","startLoc":18,"text":"ccrs"},{"col":4,"comment":"null","endLoc":362,"header":"def _acquire(self, needs_lock=True)","id":885,"name":"_acquire","nodeType":"Function","startLoc":359,"text":"def _acquire(self, needs_lock=True):\n with self._manager.acquire_context(needs_lock) as root:\n ds = _nc4_require_group(root, self._group, self._mode)\n return ds"},{"col":4,"comment":"Fast version of `_iter_grouped` that yields Variables without\n metadata\n ","endLoc":612,"header":"def _iter_grouped_shortcut(self)","id":886,"name":"_iter_grouped_shortcut","nodeType":"Function","startLoc":606,"text":"def _iter_grouped_shortcut(self):\n \"\"\"Fast version of `_iter_grouped` that yields Variables without\n metadata\n \"\"\"\n var = self._obj.variable\n for indices in self._group_indices:\n yield var[{self._group_dim: indices}]"},{"attributeType":"null","col":28,"comment":"null","endLoc":19,"id":887,"name":"plt","nodeType":"Attribute","startLoc":19,"text":"plt"},{"col":4,"comment":"null","endLoc":622,"header":"def _concat_shortcut(self, applied, dim, positions=None)","id":888,"name":"_concat_shortcut","nodeType":"Function","startLoc":614,"text":"def _concat_shortcut(self, applied, dim, positions=None):\n # nb. don't worry too much about maintaining this method -- it does\n # speed things up, but it's not very interpretable and there are much\n # faster alternatives (e.g., doing the grouped aggregation in a\n # compiled language)\n stacked = Variable.concat(applied, dim, shortcut=True)\n reordered = _maybe_reorder(stacked, dim, positions)\n result = self._obj._replace_maybe_drop_dims(reordered)\n return result"},{"attributeType":"null","col":17,"comment":"null","endLoc":21,"id":889,"name":"xr","nodeType":"Attribute","startLoc":21,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":24,"id":890,"name":"ds","nodeType":"Attribute","startLoc":24,"text":"ds"},{"col":0,"comment":"null","endLoc":599,"header":"def _maybe_reorder(xarray_obj, dim, positions)","id":891,"name":"_maybe_reorder","nodeType":"Function","startLoc":593,"text":"def _maybe_reorder(xarray_obj, dim, positions):\n order = _inverse_permutation_indices(positions)\n\n if order is None:\n return xarray_obj\n else:\n return xarray_obj[{dim: order}]"},{"col":0,"comment":"Like inverse_permutation, but also handles slices.\n\n Parameters\n ----------\n positions : list of np.ndarray or slice objects.\n If slice objects, all are assumed to be slices.\n\n Returns\n -------\n np.ndarray of indices or None, if no permutation is necessary.\n ","endLoc":134,"header":"def _inverse_permutation_indices(positions)","id":892,"name":"_inverse_permutation_indices","nodeType":"Function","startLoc":112,"text":"def _inverse_permutation_indices(positions):\n \"\"\"Like inverse_permutation, but also handles slices.\n\n Parameters\n ----------\n positions : list of np.ndarray or slice objects.\n If slice objects, all are assumed to be slices.\n\n Returns\n -------\n np.ndarray of indices or None, if no permutation is necessary.\n \"\"\"\n if not positions:\n return None\n\n if isinstance(positions[0], slice):\n positions = _consolidate_slices(positions)\n if positions == slice(None):\n return None\n positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]\n\n indices = nputils.inverse_permutation(np.concatenate(positions))\n return indices"},{"col":0,"comment":"Consolidate adjacent slices in a list of slices.\n ","endLoc":109,"header":"def _consolidate_slices(slices)","id":893,"name":"_consolidate_slices","nodeType":"Function","startLoc":90,"text":"def _consolidate_slices(slices):\n \"\"\"Consolidate adjacent slices in a list of slices.\n \"\"\"\n result = []\n last_slice = slice(None)\n for slice_ in slices:\n if not isinstance(slice_, slice):\n raise ValueError(\"list element is not a slice: %r\" % slice_)\n if (\n result\n and last_slice.stop == slice_.start\n and _is_one_or_none(last_slice.step)\n and _is_one_or_none(slice_.step)\n ):\n last_slice = slice(last_slice.start, slice_.stop, slice_.step)\n result[-1] = last_slice\n else:\n result.append(slice_)\n last_slice = slice_\n return result"},{"col":0,"comment":"null","endLoc":87,"header":"def _is_one_or_none(obj)","id":894,"name":"_is_one_or_none","nodeType":"Function","startLoc":86,"text":"def _is_one_or_none(obj):\n return obj == 1 or obj is None"},{"attributeType":"null","col":0,"comment":"null","endLoc":25,"id":897,"name":"air","nodeType":"Attribute","startLoc":25,"text":"air"},{"col":4,"comment":"Construct a DataArray by indexing this dataset\n ","endLoc":1164,"header":"def _construct_dataarray(self, name: Hashable) -> \"DataArray\"","id":898,"name":"_construct_dataarray","nodeType":"Function","startLoc":1140,"text":"def _construct_dataarray(self, name: Hashable) -> \"DataArray\":\n \"\"\"Construct a DataArray by indexing this dataset\n \"\"\"\n from .dataarray import DataArray\n\n try:\n variable = self._variables[name]\n except KeyError:\n _, name, variable = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n\n needed_dims = set(variable.dims)\n\n coords: Dict[Hashable, Variable] = {}\n for k in self.coords:\n if set(self.variables[k].dims) <= needed_dims:\n coords[k] = self.variables[k]\n\n if self._indexes is None:\n indexes = None\n else:\n indexes = {k: v for k, v in self._indexes.items() if k in coords}\n\n return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True)"},{"attributeType":"null","col":0,"comment":"null","endLoc":28,"id":899,"name":"map_proj","nodeType":"Attribute","startLoc":28,"text":"map_proj"},{"col":4,"comment":"null","endLoc":366,"header":"@property\n def ds(self)","id":900,"name":"ds","nodeType":"Function","startLoc":364,"text":"@property\n def ds(self):\n return self._acquire()"},{"col":0,"comment":"\n Convenience method to call xarray.plot.FacetGrid from 2d plotting methods\n\n kwargs are the arguments to 2d plotting method\n ","endLoc":629,"header":"def _easy_facetgrid(\n data,\n plotfunc,\n kind,\n x=None,\n y=None,\n row=None,\n col=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n aspect=None,\n size=None,\n subplot_kws=None,\n ax=None,\n figsize=None,\n **kwargs\n)","id":901,"name":"_easy_facetgrid","nodeType":"Function","startLoc":577,"text":"def _easy_facetgrid(\n data,\n plotfunc,\n kind,\n x=None,\n y=None,\n row=None,\n col=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n aspect=None,\n size=None,\n subplot_kws=None,\n ax=None,\n figsize=None,\n **kwargs\n):\n \"\"\"\n Convenience method to call xarray.plot.FacetGrid from 2d plotting methods\n\n kwargs are the arguments to 2d plotting method\n \"\"\"\n if ax is not None:\n raise ValueError(\"Can't use axes when making faceted plots.\")\n if aspect is None:\n aspect = 1\n if size is None:\n size = 3\n elif figsize is not None:\n raise ValueError(\"cannot provide both `figsize` and `size` arguments\")\n\n g = FacetGrid(\n data=data,\n col=col,\n row=row,\n col_wrap=col_wrap,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n aspect=aspect,\n size=size,\n subplot_kws=subplot_kws,\n )\n\n if kind == \"line\":\n return g.map_dataarray_line(plotfunc, x, y, **kwargs)\n\n if kind == \"dataarray\":\n return g.map_dataarray(plotfunc, x, y, **kwargs)\n\n if kind == \"dataset\":\n return g.map_dataset(plotfunc, x, y, **kwargs)"},{"col":4,"comment":"null","endLoc":435,"header":"def _overwrite_indexes(self, indexes: Mapping[Hashable, Any]) -> \"DataArray\"","id":902,"name":"_overwrite_indexes","nodeType":"Function","startLoc":420,"text":"def _overwrite_indexes(self, indexes: Mapping[Hashable, Any]) -> \"DataArray\":\n if not len(indexes):\n return self\n coords = self._coords.copy()\n for name, idx in indexes.items():\n coords[name] = IndexVariable(name, idx)\n obj = self._replace(coords=coords)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Any, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj"},{"col":4,"comment":"null","endLoc":635,"header":"def _restore_dim_order(self, stacked)","id":903,"name":"_restore_dim_order","nodeType":"Function","startLoc":624,"text":"def _restore_dim_order(self, stacked):\n def lookup_order(dimension):\n if dimension == self._group.name:\n dimension, = self._group.dims\n if dimension in self._obj.dims:\n axis = self._obj.get_axis_num(dimension)\n else:\n axis = 1e6 # some arbitrarily high value\n return axis\n\n new_order = sorted(stacked.dims, key=lookup_order)\n return stacked.transpose(*new_order, transpose_coords=self._restore_coord_dims)"},{"attributeType":"null","col":0,"comment":"null","endLoc":30,"id":904,"name":"p","nodeType":"Attribute","startLoc":30,"text":"p"},{"col":4,"comment":"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n ","endLoc":682,"header":"def apply(self, func, shortcut=False, args=(), **kwargs)","id":905,"name":"apply","nodeType":"Function","startLoc":637,"text":"def apply(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n \"\"\"\n if shortcut:\n grouped = self._iter_grouped_shortcut()\n else:\n grouped = self._iter_grouped()\n applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped)\n return self._combine(applied, shortcut=shortcut)"},{"attributeType":"null","col":4,"comment":"null","endLoc":39,"id":906,"name":"ax","nodeType":"Attribute","startLoc":39,"text":"ax"},{"col":0,"comment":"","endLoc":15,"header":"plot_cartopy_facetgrid.py#","id":907,"name":"","nodeType":"Function","startLoc":2,"text":"\"\"\"\n==================================\nMultiple plots and map projections\n==================================\n\nControl the map projection parameters on multiple axes\n\nThis example illustrates how to plot multiple maps and control their extent\nand aspect ratio.\n\nFor more details see `this discussion`_ on github.\n\n.. _this discussion: https://github.com/pydata/xarray/issues/1397#issuecomment-299190567\n\"\"\"\n\nds = xr.tutorial.load_dataset(\"air_temperature\")\n\nair = ds.air.isel(time=[0, 724]) - 273.15\n\nmap_proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=45)\n\np = air.plot(\n transform=ccrs.PlateCarree(), # the data's projection\n col=\"time\",\n col_wrap=1, # multiplot settings\n aspect=ds.dims[\"lon\"] / ds.dims[\"lat\"], # for a sensible figsize\n subplot_kws={\"projection\": map_proj}, # the plot's projection\n)\n\nfor ax in p.axes.flat:\n ax.coastlines()\n ax.set_extent([-160, -30, 5, 75])\n # Without this aspect attributes the maps will look chaotic and the\n # \"extent\" attribute above will be ignored\n ax.set_aspect(\"equal\")\n\nplt.show()"},{"col":0,"comment":"Wrap a transformed array with __array_wrap__ is it can be done safely.\n\n This lets us treat arbitrary functions that take and return ndarray objects\n like ufuncs, as long as they return an array with the same shape.\n ","endLoc":142,"header":"def maybe_wrap_array(original, new_array)","id":908,"name":"maybe_wrap_array","nodeType":"Function","startLoc":132,"text":"def maybe_wrap_array(original, new_array):\n \"\"\"Wrap a transformed array with __array_wrap__ is it can be done safely.\n\n This lets us treat arbitrary functions that take and return ndarray objects\n like ufuncs, as long as they return an array with the same shape.\n \"\"\"\n # in case func lost array's metadata\n if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:\n return original.__array_wrap__(new_array)\n else:\n return new_array"},{"col":4,"comment":"Recombine the applied objects like the original.","endLoc":704,"header":"def _combine(self, applied, restore_coord_dims=False, shortcut=False)","id":909,"name":"_combine","nodeType":"Function","startLoc":684,"text":"def _combine(self, applied, restore_coord_dims=False, shortcut=False):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n if shortcut:\n combined = self._concat_shortcut(applied, dim, positions)\n else:\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n\n if isinstance(combined, type(self._obj)):\n # only restore dimension order for arrays\n combined = self._restore_dim_order(combined)\n if coord is not None:\n if shortcut:\n combined._coords[coord.name] = as_variable(coord)\n else:\n combined.coords[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined"},{"fileName":"groupby.py","filePath":"xarray/core","id":910,"nodeType":"File","text":"import datetime\nimport functools\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dtypes, duck_array_ops, nputils, ops\nfrom .arithmetic import SupportsArithmetic\nfrom .common import ALL_DIMS, ImplementsArrayReduce, ImplementsDatasetReduce\nfrom .concat import concat\nfrom .formatting import format_array_flat\nfrom .options import _get_keep_attrs\nfrom .pycompat import integer_types\nfrom .utils import (\n either_dict_or_kwargs,\n hashable,\n maybe_wrap_array,\n peek_at,\n safe_cast_to_index,\n)\nfrom .variable import IndexVariable, Variable, as_variable\n\n\ndef unique_value_groups(ar, sort=True):\n \"\"\"Group an array by its unique values.\n\n Parameters\n ----------\n ar : array-like\n Input array. This will be flattened if it is not already 1-D.\n sort : boolean, optional\n Whether or not to sort unique values.\n\n Returns\n -------\n values : np.ndarray\n Sorted, unique values as returned by `np.unique`.\n indices : list of lists of int\n Each element provides the integer indices in `ar` with values given by\n the corresponding value in `unique_values`.\n \"\"\"\n inverse, values = pd.factorize(ar, sort=sort)\n groups = [[] for _ in range(len(values))]\n for n, g in enumerate(inverse):\n if g >= 0:\n # pandas uses -1 to mark NaN, but doesn't include them in values\n groups[g].append(n)\n return values, groups\n\n\ndef _dummy_copy(xarray_obj):\n from .dataset import Dataset\n from .dataarray import DataArray\n\n if isinstance(xarray_obj, Dataset):\n res = Dataset(\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.data_vars.items()\n },\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.coords.items()\n if k not in xarray_obj.dims\n },\n xarray_obj.attrs,\n )\n elif isinstance(xarray_obj, DataArray):\n res = DataArray(\n dtypes.get_fill_value(xarray_obj.dtype),\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.coords.items()\n if k not in xarray_obj.dims\n },\n dims=[],\n name=xarray_obj.name,\n attrs=xarray_obj.attrs,\n )\n else: # pragma: no cover\n raise AssertionError\n return res\n\n\ndef _is_one_or_none(obj):\n return obj == 1 or obj is None\n\n\ndef _consolidate_slices(slices):\n \"\"\"Consolidate adjacent slices in a list of slices.\n \"\"\"\n result = []\n last_slice = slice(None)\n for slice_ in slices:\n if not isinstance(slice_, slice):\n raise ValueError(\"list element is not a slice: %r\" % slice_)\n if (\n result\n and last_slice.stop == slice_.start\n and _is_one_or_none(last_slice.step)\n and _is_one_or_none(slice_.step)\n ):\n last_slice = slice(last_slice.start, slice_.stop, slice_.step)\n result[-1] = last_slice\n else:\n result.append(slice_)\n last_slice = slice_\n return result\n\n\ndef _inverse_permutation_indices(positions):\n \"\"\"Like inverse_permutation, but also handles slices.\n\n Parameters\n ----------\n positions : list of np.ndarray or slice objects.\n If slice objects, all are assumed to be slices.\n\n Returns\n -------\n np.ndarray of indices or None, if no permutation is necessary.\n \"\"\"\n if not positions:\n return None\n\n if isinstance(positions[0], slice):\n positions = _consolidate_slices(positions)\n if positions == slice(None):\n return None\n positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]\n\n indices = nputils.inverse_permutation(np.concatenate(positions))\n return indices\n\n\nclass _DummyGroup:\n \"\"\"Class for keeping track of grouped dimensions without coordinates.\n\n Should not be user visible.\n \"\"\"\n\n __slots__ = (\"name\", \"coords\", \"size\")\n\n def __init__(self, obj, name, coords):\n self.name = name\n self.coords = coords\n self.size = obj.sizes[name]\n\n @property\n def dims(self):\n return (self.name,)\n\n @property\n def ndim(self):\n return 1\n\n @property\n def values(self):\n return range(self.size)\n\n @property\n def shape(self):\n return (self.size,)\n\n def __getitem__(self, key):\n if isinstance(key, tuple):\n key = key[0]\n return self.values[key]\n\n\ndef _ensure_1d(group, obj):\n if group.ndim != 1:\n # try to stack the dims of the group into a single dim\n orig_dims = group.dims\n stacked_dim = \"stacked_\" + \"_\".join(orig_dims)\n # these dimensions get created by the stack operation\n inserted_dims = [dim for dim in group.dims if dim not in group.coords]\n # the copy is necessary here, otherwise read only array raises error\n # in pandas: https://github.com/pydata/pandas/issues/12813\n group = group.stack(**{stacked_dim: orig_dims}).copy()\n obj = obj.stack(**{stacked_dim: orig_dims})\n else:\n stacked_dim = None\n inserted_dims = []\n return group, obj, stacked_dim, inserted_dims\n\n\ndef _unique_and_monotonic(group):\n if isinstance(group, _DummyGroup):\n return True\n else:\n index = safe_cast_to_index(group)\n return index.is_unique and index.is_monotonic\n\n\ndef _apply_loffset(grouper, result):\n \"\"\"\n (copied from pandas)\n if loffset is set, offset the result index\n\n This is NOT an idempotent routine, it will be applied\n exactly once to the result.\n\n Parameters\n ----------\n result : Series or DataFrame\n the result of resample\n \"\"\"\n\n needs_offset = (\n isinstance(grouper.loffset, (pd.DateOffset, datetime.timedelta))\n and isinstance(result.index, pd.DatetimeIndex)\n and len(result.index) > 0\n )\n\n if needs_offset:\n result.index = result.index + grouper.loffset\n\n grouper.loffset = None\n\n\nclass GroupBy(SupportsArithmetic):\n \"\"\"A object that implements the split-apply-combine pattern.\n\n Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over\n (unique_value, grouped_array) pairs, but the main way to interact with a\n groupby object are with the `apply` or `reduce` methods. You can also\n directly call numpy methods like `mean` or `std`.\n\n You should create a GroupBy object by using the `DataArray.groupby` or\n `Dataset.groupby` methods.\n\n See Also\n --------\n Dataset.groupby\n DataArray.groupby\n \"\"\"\n\n __slots__ = (\n \"_full_index\",\n \"_inserted_dims\",\n \"_group\",\n \"_group_dim\",\n \"_group_indices\",\n \"_groups\",\n \"_obj\",\n \"_restore_coord_dims\",\n \"_stacked_dim\",\n \"_unique_coord\",\n \"_dims\",\n )\n\n def __init__(\n self,\n obj,\n group,\n squeeze=False,\n grouper=None,\n bins=None,\n restore_coord_dims=None,\n cut_kwargs={},\n ):\n \"\"\"Create a GroupBy object\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to group.\n group : DataArray\n Array with the group values.\n squeeze : boolean, optional\n If \"group\" is a coordinate of object, `squeeze` controls whether\n the subarrays have a dimension of length 1 along that coordinate or\n if the dimension is squeezed out.\n grouper : pd.Grouper, optional\n Used for grouping values along the `group` array.\n bins : array-like, optional\n If `bins` is specified, the groups will be discretized into the\n specified bins by `pandas.cut`.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n cut_kwargs : dict, optional\n Extra keyword arguments to pass to `pandas.cut`\n\n \"\"\"\n from .dataarray import DataArray\n\n if grouper is not None and bins is not None:\n raise TypeError(\"can't specify both `grouper` and `bins`\")\n\n if not isinstance(group, (DataArray, IndexVariable)):\n if not hashable(group):\n raise TypeError(\n \"`group` must be an xarray.DataArray or the \"\n \"name of an xarray variable or dimension\"\n )\n group = obj[group]\n if len(group) == 0:\n raise ValueError(\"{} must not be empty\".format(group.name))\n\n if group.name not in obj.coords and group.name in obj.dims:\n # DummyGroups should not appear on groupby results\n group = _DummyGroup(obj, group.name, group.coords)\n\n if getattr(group, \"name\", None) is None:\n raise ValueError(\"`group` must have a name\")\n\n group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)\n group_dim, = group.dims\n\n expected_size = obj.sizes[group_dim]\n if group.size != expected_size:\n raise ValueError(\n \"the group variable's length does not \"\n \"match the length of this variable along its \"\n \"dimension\"\n )\n\n full_index = None\n\n if bins is not None:\n if np.isnan(bins).all():\n raise ValueError(\"All bin edges are NaN.\")\n binned = pd.cut(group.values, bins, **cut_kwargs)\n new_dim_name = group.name + \"_bins\"\n group = DataArray(binned, group.coords, name=new_dim_name)\n full_index = binned.categories\n\n if grouper is not None:\n index = safe_cast_to_index(group)\n if not index.is_monotonic:\n # TODO: sort instead of raising an error\n raise ValueError(\"index must be monotonic for resampling\")\n full_index, first_items = self._get_index_and_items(index, grouper)\n sbins = first_items.values.astype(np.int64)\n group_indices = [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])] + [\n slice(sbins[-1], None)\n ]\n unique_coord = IndexVariable(group.name, first_items.index)\n elif group.dims == (group.name,) and _unique_and_monotonic(group):\n # no need to factorize\n group_indices = np.arange(group.size)\n if not squeeze:\n # use slices to do views instead of fancy indexing\n # equivalent to: group_indices = group_indices.reshape(-1, 1)\n group_indices = [slice(i, i + 1) for i in group_indices]\n unique_coord = group\n else:\n # look through group to find the unique values\n unique_values, group_indices = unique_value_groups(\n safe_cast_to_index(group), sort=(bins is None)\n )\n unique_coord = IndexVariable(group.name, unique_values)\n\n if len(group_indices) == 0:\n if bins is not None:\n raise ValueError(\n \"None of the data falls within bins with edges %r\" % bins\n )\n else:\n raise ValueError(\n \"Failed to group data. Are you grouping by a variable that is all NaN?\"\n )\n\n if (\n isinstance(obj, DataArray)\n and restore_coord_dims is None\n and any(obj[c].ndim > 1 for c in obj.coords)\n ):\n warnings.warn(\n \"This DataArray contains multi-dimensional \"\n \"coordinates. In the future, the dimension order \"\n \"of these coordinates will be restored as well \"\n \"unless you specify restore_coord_dims=False.\",\n FutureWarning,\n stacklevel=2,\n )\n restore_coord_dims = False\n\n # specification for the groupby operation\n self._obj = obj\n self._group = group\n self._group_dim = group_dim\n self._group_indices = group_indices\n self._unique_coord = unique_coord\n self._stacked_dim = stacked_dim\n self._inserted_dims = inserted_dims\n self._full_index = full_index\n self._restore_coord_dims = restore_coord_dims\n\n # cached attributes\n self._groups = None\n self._dims = None\n\n @property\n def dims(self):\n if self._dims is None:\n self._dims = self._obj.isel(\n **{self._group_dim: self._group_indices[0]}\n ).dims\n\n return self._dims\n\n @property\n def groups(self):\n # provided to mimic pandas.groupby\n if self._groups is None:\n self._groups = dict(zip(self._unique_coord.values, self._group_indices))\n return self._groups\n\n def __len__(self):\n return self._unique_coord.size\n\n def __iter__(self):\n return zip(self._unique_coord.values, self._iter_grouped())\n\n def __repr__(self):\n return \"%s, grouped over %r \\n%r groups with labels %s.\" % (\n self.__class__.__name__,\n self._unique_coord.name,\n self._unique_coord.size,\n \", \".join(format_array_flat(self._unique_coord, 30).split()),\n )\n\n def _get_index_and_items(self, index, grouper):\n from .resample_cftime import CFTimeGrouper\n\n s = pd.Series(np.arange(index.size), index)\n if isinstance(grouper, CFTimeGrouper):\n first_items = grouper.first_items(index)\n else:\n first_items = s.groupby(grouper).first()\n _apply_loffset(grouper, first_items)\n full_index = first_items.index\n if first_items.isnull().any():\n first_items = first_items.dropna()\n return full_index, first_items\n\n def _iter_grouped(self):\n \"\"\"Iterate over each element in this group\"\"\"\n for indices in self._group_indices:\n yield self._obj.isel(**{self._group_dim: indices})\n\n def _infer_concat_args(self, applied_example):\n if self._group_dim in applied_example.dims:\n coord = self._group\n positions = self._group_indices\n else:\n coord = self._unique_coord\n positions = None\n dim, = coord.dims\n if isinstance(coord, _DummyGroup):\n coord = None\n return coord, dim, positions\n\n @staticmethod\n def _binary_op(f, reflexive=False, **ignored_kwargs):\n @functools.wraps(f)\n def func(self, other):\n g = f if not reflexive else lambda x, y: f(y, x)\n applied = self._yield_binary_applied(g, other)\n combined = self._combine(applied)\n return combined\n\n return func\n\n def _yield_binary_applied(self, func, other):\n dummy = None\n\n for group_value, obj in self:\n try:\n other_sel = other.sel(**{self._group.name: group_value})\n except AttributeError:\n raise TypeError(\n \"GroupBy objects only support binary ops \"\n \"when the other argument is a Dataset or \"\n \"DataArray\"\n )\n except (KeyError, ValueError):\n if self._group.name not in other.dims:\n raise ValueError(\n \"incompatible dimensions for a grouped \"\n \"binary operation: the group variable %r \"\n \"is not a dimension on the other argument\" % self._group.name\n )\n if dummy is None:\n dummy = _dummy_copy(other)\n other_sel = dummy\n\n result = func(obj, other_sel)\n yield result\n\n def _maybe_restore_empty_groups(self, combined):\n \"\"\"Our index contained empty groups (e.g., from a resampling). If we\n reduced on that dimension, we want to restore the full index.\n \"\"\"\n if self._full_index is not None and self._group.name in combined.dims:\n indexers = {self._group.name: self._full_index}\n combined = combined.reindex(**indexers)\n return combined\n\n def _maybe_unstack(self, obj):\n \"\"\"This gets called if we are applying on an array with a\n multidimensional group.\"\"\"\n if self._stacked_dim is not None and self._stacked_dim in obj.dims:\n obj = obj.unstack(self._stacked_dim)\n for dim in self._inserted_dims:\n if dim in obj.coords:\n del obj.coords[dim]\n return obj\n\n def fillna(self, value):\n \"\"\"Fill missing values in this object by group.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : valid type for the grouped object's fillna method\n Used to fill all matching missing values by group.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.fillna\n DataArray.fillna\n \"\"\"\n out = ops.fillna(self, value)\n return out\n\n def where(self, cond, other=dtypes.NA):\n \"\"\"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this objects values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n same type as the grouped object\n\n See also\n --------\n Dataset.where\n \"\"\"\n return ops.where_method(self, cond, other)\n\n def _first_or_last(self, op, skipna, keep_attrs):\n if isinstance(self._group_indices[0], integer_types):\n # NB. this is currently only used for reductions along an existing\n # dimension\n return self._obj\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=True)\n return self.reduce(\n op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs, allow_lazy=True\n )\n\n def first(self, skipna=None, keep_attrs=None):\n \"\"\"Return the first element of each group along the group dimension\n \"\"\"\n return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)\n\n def last(self, skipna=None, keep_attrs=None):\n \"\"\"Return the last element of each group along the group dimension\n \"\"\"\n return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign coordinates by group.\n\n See also\n --------\n Dataset.assign_coords\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n return self.apply(lambda ds: ds.assign_coords(**coords_kwargs))\n\n\ndef _maybe_reorder(xarray_obj, dim, positions):\n order = _inverse_permutation_indices(positions)\n\n if order is None:\n return xarray_obj\n else:\n return xarray_obj[{dim: order}]\n\n\nclass DataArrayGroupBy(GroupBy, ImplementsArrayReduce):\n \"\"\"GroupBy object specialized to grouping DataArray objects\n \"\"\"\n\n def _iter_grouped_shortcut(self):\n \"\"\"Fast version of `_iter_grouped` that yields Variables without\n metadata\n \"\"\"\n var = self._obj.variable\n for indices in self._group_indices:\n yield var[{self._group_dim: indices}]\n\n def _concat_shortcut(self, applied, dim, positions=None):\n # nb. don't worry too much about maintaining this method -- it does\n # speed things up, but it's not very interpretable and there are much\n # faster alternatives (e.g., doing the grouped aggregation in a\n # compiled language)\n stacked = Variable.concat(applied, dim, shortcut=True)\n reordered = _maybe_reorder(stacked, dim, positions)\n result = self._obj._replace_maybe_drop_dims(reordered)\n return result\n\n def _restore_dim_order(self, stacked):\n def lookup_order(dimension):\n if dimension == self._group.name:\n dimension, = self._group.dims\n if dimension in self._obj.dims:\n axis = self._obj.get_axis_num(dimension)\n else:\n axis = 1e6 # some arbitrarily high value\n return axis\n\n new_order = sorted(stacked.dims, key=lookup_order)\n return stacked.transpose(*new_order, transpose_coords=self._restore_coord_dims)\n\n def apply(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n \"\"\"\n if shortcut:\n grouped = self._iter_grouped_shortcut()\n else:\n grouped = self._iter_grouped()\n applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped)\n return self._combine(applied, shortcut=shortcut)\n\n def _combine(self, applied, restore_coord_dims=False, shortcut=False):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n if shortcut:\n combined = self._concat_shortcut(applied, dim, positions)\n else:\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n\n if isinstance(combined, type(self._obj)):\n # only restore dimension order for arrays\n combined = self._restore_dim_order(combined)\n if coord is not None:\n if shortcut:\n combined._coords[coord.name] = as_variable(coord)\n else:\n combined.coords[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined\n\n def quantile(self, q, dim=None, interpolation=\"linear\", keep_attrs=None):\n \"\"\"Compute the qth quantile over each array in the groups and\n concatenate them together into a new array.\n\n Parameters\n ----------\n q : float in range of [0,1] (or sequence of floats)\n Quantile to compute, which must be between 0 and 1\n inclusive.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n Defaults to the grouped dimension.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n\n Returns\n -------\n quantiles : Variable\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,\n DataArray.quantile\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n out = self.apply(\n self._obj.__class__.quantile,\n shortcut=False,\n q=q,\n dim=dim,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n )\n\n if np.asarray(q, dtype=np.float64).ndim == 0:\n out = out.drop(\"quantile\")\n return out\n\n def reduce(\n self, func, dim=None, axis=None, keep_attrs=None, shortcut=True, **kwargs\n ):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n if dim is not ALL_DIMS and dim not in self.dims:\n raise ValueError(\n \"cannot reduce over dimension %r. expected either xarray.ALL_DIMS to reduce over all dimensions or one or more of %r.\"\n % (dim, self.dims)\n )\n\n def reduce_array(ar):\n return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)\n\n return self.apply(reduce_array, shortcut=shortcut)\n\n\nops.inject_reduce_methods(DataArrayGroupBy)\nops.inject_binary_ops(DataArrayGroupBy)\n\n\nclass DatasetGroupBy(GroupBy, ImplementsDatasetReduce):\n def apply(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"Apply a function over each Dataset in the group and concatenate them\n together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments to pass to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n \"\"\"\n # ignore shortcut if set (for now)\n applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())\n return self._combine(applied)\n\n def _combine(self, applied):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n if coord is not None:\n combined[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined\n\n def reduce(self, func, dim=None, keep_attrs=None, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n def reduce_dataset(ds):\n return ds.reduce(func, dim, keep_attrs, **kwargs)\n\n if dim is not ALL_DIMS and dim not in self.dims:\n raise ValueError(\n \"cannot reduce over dimension %r. expected either xarray.ALL_DIMS to reduce over all dimensions or one or more of %r.\"\n % (dim, self.dims)\n )\n\n return self.apply(reduce_dataset)\n\n def assign(self, **kwargs):\n \"\"\"Assign data variables by group.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n return self.apply(lambda ds: ds.assign(**kwargs))\n\n\nops.inject_reduce_methods(DatasetGroupBy)\nops.inject_binary_ops(DatasetGroupBy)\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":30,"id":911,"name":"ALL_DIMS","nodeType":"Attribute","startLoc":30,"text":"ALL_DIMS"},{"col":4,"comment":"Compute the qth quantile over each array in the groups and\n concatenate them together into a new array.\n\n Parameters\n ----------\n q : float in range of [0,1] (or sequence of floats)\n Quantile to compute, which must be between 0 and 1\n inclusive.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n Defaults to the grouped dimension.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n\n Returns\n -------\n quantiles : Variable\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,\n DataArray.quantile\n ","endLoc":758,"header":"def quantile(self, q, dim=None, interpolation=\"linear\", keep_attrs=None)","id":912,"name":"quantile","nodeType":"Function","startLoc":706,"text":"def quantile(self, q, dim=None, interpolation=\"linear\", keep_attrs=None):\n \"\"\"Compute the qth quantile over each array in the groups and\n concatenate them together into a new array.\n\n Parameters\n ----------\n q : float in range of [0,1] (or sequence of floats)\n Quantile to compute, which must be between 0 and 1\n inclusive.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n Defaults to the grouped dimension.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n\n Returns\n -------\n quantiles : Variable\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,\n DataArray.quantile\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n out = self.apply(\n self._obj.__class__.quantile,\n shortcut=False,\n q=q,\n dim=dim,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n )\n\n if np.asarray(q, dtype=np.float64).ndim == 0:\n out = out.drop(\"quantile\")\n return out"},{"attributeType":"null","col":0,"comment":"null","endLoc":3,"id":913,"name":"integer_types","nodeType":"Attribute","startLoc":3,"text":"integer_types"},{"className":"_DummyGroup","col":0,"comment":"Class for keeping track of grouped dimensions without coordinates.\n\n Should not be user visible.\n ","endLoc":169,"id":914,"nodeType":"Class","startLoc":137,"text":"class _DummyGroup:\n \"\"\"Class for keeping track of grouped dimensions without coordinates.\n\n Should not be user visible.\n \"\"\"\n\n __slots__ = (\"name\", \"coords\", \"size\")\n\n def __init__(self, obj, name, coords):\n self.name = name\n self.coords = coords\n self.size = obj.sizes[name]\n\n @property\n def dims(self):\n return (self.name,)\n\n @property\n def ndim(self):\n return 1\n\n @property\n def values(self):\n return range(self.size)\n\n @property\n def shape(self):\n return (self.size,)\n\n def __getitem__(self, key):\n if isinstance(key, tuple):\n key = key[0]\n return self.values[key]"},{"col":4,"comment":"null","endLoc":152,"header":"@property\n def dims(self)","id":915,"name":"dims","nodeType":"Function","startLoc":150,"text":"@property\n def dims(self):\n return (self.name,)"},{"col":4,"comment":"null","endLoc":156,"header":"@property\n def ndim(self)","id":916,"name":"ndim","nodeType":"Function","startLoc":154,"text":"@property\n def ndim(self):\n return 1"},{"col":4,"comment":"null","endLoc":160,"header":"@property\n def values(self)","id":917,"name":"values","nodeType":"Function","startLoc":158,"text":"@property\n def values(self):\n return range(self.size)"},{"col":4,"comment":"null","endLoc":164,"header":"@property\n def shape(self)","id":918,"name":"shape","nodeType":"Function","startLoc":162,"text":"@property\n def shape(self):\n return (self.size,)"},{"col":4,"comment":"null","endLoc":169,"header":"def __getitem__(self, key)","id":919,"name":"__getitem__","nodeType":"Function","startLoc":166,"text":"def __getitem__(self, key):\n if isinstance(key, tuple):\n key = key[0]\n return self.values[key]"},{"attributeType":"null","col":4,"comment":"null","endLoc":143,"id":920,"name":"__slots__","nodeType":"Attribute","startLoc":143,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":148,"id":921,"name":"size","nodeType":"Attribute","startLoc":148,"text":"self.size"},{"col":4,"comment":"null","endLoc":395,"header":"def open_store_variable(self, name, var)","id":922,"name":"open_store_variable","nodeType":"Function","startLoc":368,"text":"def open_store_variable(self, name, var):\n dimensions = var.dimensions\n data = indexing.LazilyOuterIndexedArray(NetCDF4ArrayWrapper(name, self))\n attributes = {k: var.getncattr(k) for k in var.ncattrs()}\n _ensure_fill_value_valid(data, attributes)\n # netCDF4 specific encoding; save _FillValue for later\n encoding = {}\n filters = var.filters()\n if filters is not None:\n encoding.update(filters)\n chunking = var.chunking()\n if chunking is not None:\n if chunking == \"contiguous\":\n encoding[\"contiguous\"] = True\n encoding[\"chunksizes\"] = None\n else:\n encoding[\"contiguous\"] = False\n encoding[\"chunksizes\"] = tuple(chunking)\n # TODO: figure out how to round-trip \"endian-ness\" without raising\n # warnings from netCDF4\n # encoding['endian'] = var.endian()\n pop_to(attributes, encoding, \"least_significant_digit\")\n # save source so __repr__ can detect if it's local or not\n encoding[\"source\"] = self._filename\n encoding[\"original_shape\"] = var.shape\n encoding[\"dtype\"] = var.dtype\n\n return Variable(dimensions, data, attributes, encoding)"},{"col":4,"comment":"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n ","endLoc":806,"header":"def reduce(\n self, func, dim=None, axis=None, keep_attrs=None, shortcut=True, **kwargs\n )","id":923,"name":"reduce","nodeType":"Function","startLoc":760,"text":"def reduce(\n self, func, dim=None, axis=None, keep_attrs=None, shortcut=True, **kwargs\n ):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n if dim is not ALL_DIMS and dim not in self.dims:\n raise ValueError(\n \"cannot reduce over dimension %r. expected either xarray.ALL_DIMS to reduce over all dimensions or one or more of %r.\"\n % (dim, self.dims)\n )\n\n def reduce_array(ar):\n return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)\n\n return self.apply(reduce_array, shortcut=shortcut)"},{"attributeType":"null","col":8,"comment":"null","endLoc":146,"id":924,"name":"name","nodeType":"Attribute","startLoc":146,"text":"self.name"},{"col":4,"comment":"null","endLoc":1167,"header":"def __copy__(self) -> \"Dataset\"","id":925,"name":"__copy__","nodeType":"Function","startLoc":1166,"text":"def __copy__(self) -> \"Dataset\":\n return self.copy(deep=False)"},{"className":"DatasetGroupBy","col":0,"comment":"null","endLoc":913,"id":926,"nodeType":"Class","startLoc":813,"text":"class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):\n def apply(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"Apply a function over each Dataset in the group and concatenate them\n together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments to pass to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n \"\"\"\n # ignore shortcut if set (for now)\n applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())\n return self._combine(applied)\n\n def _combine(self, applied):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n if coord is not None:\n combined[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined\n\n def reduce(self, func, dim=None, keep_attrs=None, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n def reduce_dataset(ds):\n return ds.reduce(func, dim, keep_attrs, **kwargs)\n\n if dim is not ALL_DIMS and dim not in self.dims:\n raise ValueError(\n \"cannot reduce over dimension %r. expected either xarray.ALL_DIMS to reduce over all dimensions or one or more of %r.\"\n % (dim, self.dims)\n )\n\n return self.apply(reduce_dataset)\n\n def assign(self, **kwargs):\n \"\"\"Assign data variables by group.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n return self.apply(lambda ds: ds.assign(**kwargs))"},{"attributeType":"null","col":8,"comment":"null","endLoc":147,"id":927,"name":"coords","nodeType":"Attribute","startLoc":147,"text":"self.coords"},{"col":0,"comment":"","endLoc":1,"header":"groupby.py#","id":928,"name":"","nodeType":"Function","startLoc":1,"text":"ops.inject_reduce_methods(DataArrayGroupBy)\n\nops.inject_binary_ops(DataArrayGroupBy)\n\nops.inject_reduce_methods(DatasetGroupBy)\n\nops.inject_binary_ops(DatasetGroupBy)"},{"col":4,"comment":"null","endLoc":1172,"header":"def __deepcopy__(self, memo=None) -> \"Dataset\"","id":929,"name":"__deepcopy__","nodeType":"Function","startLoc":1169,"text":"def __deepcopy__(self, memo=None) -> \"Dataset\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)"},{"col":0,"comment":"null","endLoc":275,"header":"def inject_reduce_methods(cls)","id":930,"name":"inject_reduce_methods","nodeType":"Function","startLoc":253,"text":"def inject_reduce_methods(cls):\n methods = (\n [\n (name, getattr(duck_array_ops, \"array_%s\" % name), False)\n for name in REDUCE_METHODS\n ]\n + [(name, getattr(duck_array_ops, name), True) for name in NAN_REDUCE_METHODS]\n + [(\"count\", duck_array_ops.count, False)]\n )\n for name, f, include_skipna in methods:\n numeric_only = getattr(f, \"numeric_only\", False)\n available_min_count = getattr(f, \"available_min_count\", False)\n min_count_docs = _MINCOUNT_DOCSTRING if available_min_count else \"\"\n\n func = cls._reduce_method(f, include_skipna, numeric_only)\n func.__name__ = name\n func.__doc__ = _REDUCE_DOCSTRING_TEMPLATE.format(\n name=name,\n cls=cls.__name__,\n extra_args=cls._reduce_extra_args_docstring.format(name=name),\n min_count_docs=min_count_docs,\n )\n setattr(cls, name, func)"},{"col":4,"comment":"Apply a function over each Dataset in the group and concatenate them\n together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments to pass to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n ","endLoc":846,"header":"def apply(self, func, args=(), shortcut=None, **kwargs)","id":931,"name":"apply","nodeType":"Function","startLoc":814,"text":"def apply(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"Apply a function over each Dataset in the group and concatenate them\n together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments to pass to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n \"\"\"\n # ignore shortcut if set (for now)\n applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())\n return self._combine(applied)"},{"col":0,"comment":"null","endLoc":174,"header":"def _ensure_fill_value_valid(data, attributes)","id":932,"name":"_ensure_fill_value_valid","nodeType":"Function","startLoc":170,"text":"def _ensure_fill_value_valid(data, attributes):\n # work around for netCDF4/scipy issue where _FillValue has the wrong type:\n # https://github.com/Unidata/netcdf4-python/issues/271\n if data.dtype.kind == \"S\" and \"_FillValue\" in attributes:\n attributes[\"_FillValue\"] = np.string_(attributes[\"_FillValue\"])"},{"col":4,"comment":"List of places to look-up items for attribute-style access\n ","endLoc":1178,"header":"@property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]","id":933,"name":"_attr_sources","nodeType":"Function","startLoc":1174,"text":"@property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return self._item_sources + [self.attrs]"},{"col":4,"comment":"List of places to look-up items for key-completion\n ","endLoc":1189,"header":"@property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]","id":934,"name":"_item_sources","nodeType":"Function","startLoc":1180,"text":"@property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-completion\n \"\"\"\n return [\n self.data_vars,\n self.coords,\n {d: self[d] for d in self.dims},\n LevelCoordinatesSource(self),\n ]"},{"col":4,"comment":"\n Parameters\n ----------\n data : DataArray\n xarray DataArray to be plotted\n row, col : strings\n Dimesion names that define subsets of the data, which will be drawn\n on separate facets in the grid.\n col_wrap : int, optional\n \"Wrap\" the column variable at this width, so that the column facets\n sharex : bool, optional\n If true, the facets will share x axes\n sharey : bool, optional\n If true, the facets will share y axes\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n If set, overrides ``size`` and ``aspect``.\n aspect : scalar, optional\n Aspect ratio of each facet, so that ``aspect * size`` gives the\n width of each facet in inches\n size : scalar, optional\n Height (in inches) of each facet. See also: ``aspect``\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots\n\n ","endLoc":207,"header":"def __init__(\n self,\n data,\n col=None,\n row=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n figsize=None,\n aspect=1,\n size=3,\n subplot_kws=None,\n )","id":935,"name":"__init__","nodeType":"Function","startLoc":72,"text":"def __init__(\n self,\n data,\n col=None,\n row=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n figsize=None,\n aspect=1,\n size=3,\n subplot_kws=None,\n ):\n \"\"\"\n Parameters\n ----------\n data : DataArray\n xarray DataArray to be plotted\n row, col : strings\n Dimesion names that define subsets of the data, which will be drawn\n on separate facets in the grid.\n col_wrap : int, optional\n \"Wrap\" the column variable at this width, so that the column facets\n sharex : bool, optional\n If true, the facets will share x axes\n sharey : bool, optional\n If true, the facets will share y axes\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n If set, overrides ``size`` and ``aspect``.\n aspect : scalar, optional\n Aspect ratio of each facet, so that ``aspect * size`` gives the\n width of each facet in inches\n size : scalar, optional\n Height (in inches) of each facet. See also: ``aspect``\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots\n\n \"\"\"\n\n plt = import_matplotlib_pyplot()\n\n # Handle corner case of nonunique coordinates\n rep_col = col is not None and not data[col].to_index().is_unique\n rep_row = row is not None and not data[row].to_index().is_unique\n if rep_col or rep_row:\n raise ValueError(\n \"Coordinates used for faceting cannot \"\n \"contain repeated (nonunique) values.\"\n )\n\n # single_group is the grouping variable, if there is exactly one\n if col and row:\n single_group = False\n nrow = len(data[row])\n ncol = len(data[col])\n nfacet = nrow * ncol\n if col_wrap is not None:\n warnings.warn(\"Ignoring col_wrap since both col and row \" \"were passed\")\n elif row and not col:\n single_group = row\n elif not row and col:\n single_group = col\n else:\n raise ValueError(\"Pass a coordinate name as an argument for row or col\")\n\n # Compute grid shape\n if single_group:\n nfacet = len(data[single_group])\n if col:\n # idea - could add heuristic for nice shapes like 3x4\n ncol = nfacet\n if row:\n ncol = 1\n if col_wrap is not None:\n # Overrides previous settings\n ncol = col_wrap\n nrow = int(np.ceil(nfacet / ncol))\n\n # Set the subplot kwargs\n subplot_kws = {} if subplot_kws is None else subplot_kws\n\n if figsize is None:\n # Calculate the base figure size with extra horizontal space for a\n # colorbar\n cbar_space = 1\n figsize = (ncol * size * aspect + cbar_space, nrow * size)\n\n fig, axes = plt.subplots(\n nrow,\n ncol,\n sharex=sharex,\n sharey=sharey,\n squeeze=False,\n figsize=figsize,\n subplot_kw=subplot_kws,\n )\n\n # Set up the lists of names for the row and column facet variables\n col_names = list(data[col].values) if col else []\n row_names = list(data[row].values) if row else []\n\n if single_group:\n full = [{single_group: x} for x in data[single_group].values]\n empty = [None for x in range(nrow * ncol - len(full))]\n name_dicts = full + empty\n else:\n rowcols = itertools.product(row_names, col_names)\n name_dicts = [{row: r, col: c} for r, c in rowcols]\n\n name_dicts = np.array(name_dicts).reshape(nrow, ncol)\n\n # Set up the class attributes\n # ---------------------------\n\n # First the public API\n self.data = data\n self.name_dicts = name_dicts\n self.fig = fig\n self.axes = axes\n self.row_names = row_names\n self.col_names = col_names\n self.figlegend = None\n\n # Next the private variables\n self._single_group = single_group\n self._nrow = nrow\n self._row_var = row\n self._ncol = ncol\n self._col_var = col\n self._col_wrap = col_wrap\n self._x_var = None\n self._y_var = None\n self._cmap_extend = None\n self._mappables = []\n self._finalized = False"},{"col":0,"comment":"Import pyplot as register appropriate converters.","endLoc":60,"header":"def import_matplotlib_pyplot()","id":936,"name":"import_matplotlib_pyplot","nodeType":"Function","startLoc":55,"text":"def import_matplotlib_pyplot():\n \"\"\"Import pyplot as register appropriate converters.\"\"\"\n register_pandas_datetime_converter_if_needed()\n import matplotlib.pyplot as plt\n\n return plt"},{"col":4,"comment":"Recombine the applied objects like the original.","endLoc":858,"header":"def _combine(self, applied)","id":937,"name":"_combine","nodeType":"Function","startLoc":848,"text":"def _combine(self, applied):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n if coord is not None:\n combined[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined"},{"col":4,"comment":"null","endLoc":438,"header":"def _to_temp_dataset(self) -> Dataset","id":938,"name":"_to_temp_dataset","nodeType":"Function","startLoc":437,"text":"def _to_temp_dataset(self) -> Dataset:\n return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False)"},{"col":4,"comment":"null","endLoc":445,"header":"def _from_temp_dataset(\n self, dataset: Dataset, name: Hashable = __default\n ) -> \"DataArray\"","id":939,"name":"_from_temp_dataset","nodeType":"Function","startLoc":440,"text":"def _from_temp_dataset(\n self, dataset: Dataset, name: Hashable = __default\n ) -> \"DataArray\":\n variable = dataset._variables.pop(_THIS_ARRAY)\n coords = dataset._variables\n return self._replace(variable, coords, name)"},{"col":0,"comment":"null","endLoc":318,"header":"def inject_binary_ops(cls, inplace=False)","id":940,"name":"inject_binary_ops","nodeType":"Function","startLoc":307,"text":"def inject_binary_ops(cls, inplace=False):\n for name in CMP_BINARY_OPS + NUM_BINARY_OPS:\n setattr(cls, op_str(name), cls._binary_op(get_op(name)))\n\n for name, f in [(\"eq\", array_eq), (\"ne\", array_ne)]:\n setattr(cls, op_str(name), cls._binary_op(f))\n\n for name in NUM_BINARY_OPS:\n # only numeric operations have in-place and reflexive variants\n setattr(cls, op_str(\"r\" + name), cls._binary_op(get_op(name), reflexive=True))\n if inplace:\n setattr(cls, op_str(\"i\" + name), cls._inplace_binary_op(get_op(\"i\" + name)))"},{"col":0,"comment":"null","endLoc":293,"header":"def op_str(name)","id":941,"name":"op_str","nodeType":"Function","startLoc":292,"text":"def op_str(name):\n return \"__%s__\" % name"},{"col":0,"comment":"null","endLoc":297,"header":"def get_op(name)","id":942,"name":"get_op","nodeType":"Function","startLoc":296,"text":"def get_op(name):\n return getattr(operator, op_str(name))"},{"col":4,"comment":"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n ","endLoc":904,"header":"def reduce(self, func, dim=None, keep_attrs=None, **kwargs)","id":943,"name":"reduce","nodeType":"Function","startLoc":860,"text":"def reduce(self, func, dim=None, keep_attrs=None, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : xarray.ALL_DIMS, str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n def reduce_dataset(ds):\n return ds.reduce(func, dim, keep_attrs, **kwargs)\n\n if dim is not ALL_DIMS and dim not in self.dims:\n raise ValueError(\n \"cannot reduce over dimension %r. expected either xarray.ALL_DIMS to reduce over all dimensions or one or more of %r.\"\n % (dim, self.dims)\n )\n\n return self.apply(reduce_dataset)"},{"col":4,"comment":"The name of this array.\n ","endLoc":521,"header":"@property\n def name(self) -> Optional[Hashable]","id":944,"name":"name","nodeType":"Function","startLoc":517,"text":"@property\n def name(self) -> Optional[Hashable]:\n \"\"\"The name of this array.\n \"\"\"\n return self._name"},{"col":4,"comment":"null","endLoc":525,"header":"@name.setter\n def name(self, value: Optional[Hashable]) -> None","id":945,"name":"name","nodeType":"Function","startLoc":523,"text":"@name.setter\n def name(self, value: Optional[Hashable]) -> None:\n self._name = value"},{"col":4,"comment":"Low level interface to the Variable object for this DataArray.","endLoc":530,"header":"@property\n def variable(self) -> Variable","id":946,"name":"variable","nodeType":"Function","startLoc":527,"text":"@property\n def variable(self) -> Variable:\n \"\"\"Low level interface to the Variable object for this DataArray.\"\"\"\n return self._variable"},{"col":4,"comment":"null","endLoc":534,"header":"@property\n def dtype(self) -> np.dtype","id":947,"name":"dtype","nodeType":"Function","startLoc":532,"text":"@property\n def dtype(self) -> np.dtype:\n return self.variable.dtype"},{"col":4,"comment":"null","endLoc":538,"header":"@property\n def shape(self) -> Tuple[int, ...]","id":948,"name":"shape","nodeType":"Function","startLoc":536,"text":"@property\n def shape(self) -> Tuple[int, ...]:\n return self.variable.shape"},{"col":4,"comment":"null","endLoc":542,"header":"@property\n def size(self) -> int","id":949,"name":"size","nodeType":"Function","startLoc":540,"text":"@property\n def size(self) -> int:\n return self.variable.size"},{"col":4,"comment":"null","endLoc":546,"header":"@property\n def nbytes(self) -> int","id":950,"name":"nbytes","nodeType":"Function","startLoc":544,"text":"@property\n def nbytes(self) -> int:\n return self.variable.nbytes"},{"col":4,"comment":"null","endLoc":550,"header":"@property\n def ndim(self) -> int","id":951,"name":"ndim","nodeType":"Function","startLoc":548,"text":"@property\n def ndim(self) -> int:\n return self.variable.ndim"},{"col":4,"comment":"null","endLoc":553,"header":"def __len__(self) -> int","id":952,"name":"__len__","nodeType":"Function","startLoc":552,"text":"def __len__(self) -> int:\n return len(self.variable)"},{"col":4,"comment":"The array's data as a dask or numpy array\n ","endLoc":559,"header":"@property\n def data(self) -> Any","id":953,"name":"data","nodeType":"Function","startLoc":555,"text":"@property\n def data(self) -> Any:\n \"\"\"The array's data as a dask or numpy array\n \"\"\"\n return self.variable.data"},{"col":4,"comment":"null","endLoc":563,"header":"@data.setter\n def data(self, value: Any) -> None","id":954,"name":"data","nodeType":"Function","startLoc":561,"text":"@data.setter\n def data(self, value: Any) -> None:\n self.variable.data = value"},{"col":4,"comment":"The array's data as a numpy.ndarray","endLoc":568,"header":"@property\n def values(self) -> np.ndarray","id":955,"name":"values","nodeType":"Function","startLoc":565,"text":"@property\n def values(self) -> np.ndarray:\n \"\"\"The array's data as a numpy.ndarray\"\"\"\n return self.variable.values"},{"col":4,"comment":"null","endLoc":572,"header":"@values.setter\n def values(self, value: Any) -> None","id":956,"name":"values","nodeType":"Function","startLoc":570,"text":"@values.setter\n def values(self, value: Any) -> None:\n self.variable.values = value"},{"col":4,"comment":"null","endLoc":576,"header":"@property\n def _in_memory(self) -> bool","id":957,"name":"_in_memory","nodeType":"Function","startLoc":574,"text":"@property\n def _in_memory(self) -> bool:\n return self.variable._in_memory"},{"col":4,"comment":"Convert this variable to a pandas.Index. Only possible for 1D\n arrays.\n ","endLoc":582,"header":"def to_index(self) -> pd.Index","id":958,"name":"to_index","nodeType":"Function","startLoc":578,"text":"def to_index(self) -> pd.Index:\n \"\"\"Convert this variable to a pandas.Index. Only possible for 1D\n arrays.\n \"\"\"\n return self.variable.to_index()"},{"col":4,"comment":"null","endLoc":332,"header":"def __init__(self, data_object: \"Union[DataArray, Dataset]\")","id":959,"name":"__init__","nodeType":"Function","startLoc":331,"text":"def __init__(self, data_object: \"Union[DataArray, Dataset]\"):\n self._data = data_object"},{"col":4,"comment":"The 'in' operator will return true or false depending on whether\n 'key' is an array in the dataset or not.\n ","endLoc":1195,"header":"def __contains__(self, key: object) -> bool","id":960,"name":"__contains__","nodeType":"Function","startLoc":1191,"text":"def __contains__(self, key: object) -> bool:\n \"\"\"The 'in' operator will return true or false depending on whether\n 'key' is an array in the dataset or not.\n \"\"\"\n return key in self._variables"},{"col":4,"comment":"null","endLoc":1198,"header":"def __len__(self) -> int","id":961,"name":"__len__","nodeType":"Function","startLoc":1197,"text":"def __len__(self) -> int:\n return len(self.data_vars)"},{"col":4,"comment":"Assign data variables by group.\n\n See also\n --------\n Dataset.assign\n ","endLoc":913,"header":"def assign(self, **kwargs)","id":962,"name":"assign","nodeType":"Function","startLoc":906,"text":"def assign(self, **kwargs):\n \"\"\"Assign data variables by group.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n return self.apply(lambda ds: ds.assign(**kwargs))"},{"col":4,"comment":"Tuple of dimension names associated with this array.\n\n Note that the type of this property is inconsistent with\n `Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for\n consistently named properties.\n ","endLoc":592,"header":"@property\n def dims(self) -> Tuple[Hashable, ...]","id":963,"name":"dims","nodeType":"Function","startLoc":584,"text":"@property\n def dims(self) -> Tuple[Hashable, ...]:\n \"\"\"Tuple of dimension names associated with this array.\n\n Note that the type of this property is inconsistent with\n `Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for\n consistently named properties.\n \"\"\"\n return self.variable.dims"},{"col":4,"comment":"null","endLoc":599,"header":"@dims.setter\n def dims(self, value)","id":964,"name":"dims","nodeType":"Function","startLoc":594,"text":"@dims.setter\n def dims(self, value):\n raise AttributeError(\n \"you cannot assign dims on a DataArray. Use \"\n \".rename() or .swap_dims() instead.\"\n )"},{"col":4,"comment":"null","endLoc":606,"header":"def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]","id":965,"name":"_item_key_to_dict","nodeType":"Function","startLoc":601,"text":"def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]:\n if utils.is_dict_like(key):\n return key\n else:\n key = indexing.expanded_indexer(key, self.ndim)\n return dict(zip(self.dims, key))"},{"col":4,"comment":"null","endLoc":1201,"header":"def __bool__(self) -> bool","id":966,"name":"__bool__","nodeType":"Function","startLoc":1200,"text":"def __bool__(self) -> bool:\n return bool(self.data_vars)"},{"col":4,"comment":"null","endLoc":401,"header":"def get_variables(self)","id":967,"name":"get_variables","nodeType":"Function","startLoc":397,"text":"def get_variables(self):\n dsvars = FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n return dsvars"},{"col":26,"endLoc":913,"id":968,"nodeType":"Lambda","startLoc":913,"text":"lambda ds: ds.assign(**kwargs)"},{"col":0,"comment":"null","endLoc":52,"header":"def register_pandas_datetime_converter_if_needed()","id":969,"name":"register_pandas_datetime_converter_if_needed","nodeType":"Function","startLoc":47,"text":"def register_pandas_datetime_converter_if_needed():\n # based on https://github.com/pandas-dev/pandas/pull/17710\n global _registered\n if not _registered:\n pd.plotting.register_matplotlib_converters()\n _registered = True"},{"col":0,"comment":"Given a key for indexing an ndarray, return an equivalent key which is a\n tuple with length equal to the number of dimensions.\n\n The expansion is done by replacing all `Ellipsis` items with the right\n number of full slices and then padding the key with full slices so that it\n reaches the appropriate dimensionality.\n ","endLoc":45,"header":"def expanded_indexer(key, ndim)","id":970,"name":"expanded_indexer","nodeType":"Function","startLoc":18,"text":"def expanded_indexer(key, ndim):\n \"\"\"Given a key for indexing an ndarray, return an equivalent key which is a\n tuple with length equal to the number of dimensions.\n\n The expansion is done by replacing all `Ellipsis` items with the right\n number of full slices and then padding the key with full slices so that it\n reaches the appropriate dimensionality.\n \"\"\"\n if not isinstance(key, tuple):\n # numpy treats non-tuple keys equivalent to tuples of length 1\n key = (key,)\n new_key = []\n # handling Ellipsis right is a little tricky, see:\n # http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n found_ellipsis = False\n for k in key:\n if k is Ellipsis:\n if not found_ellipsis:\n new_key.extend((ndim + 1 - len(key)) * [slice(None)])\n found_ellipsis = True\n else:\n new_key.append(slice(None))\n else:\n new_key.append(k)\n if len(new_key) > ndim:\n raise IndexError(\"too many indices\")\n new_key.extend((ndim - len(new_key)) * [slice(None)])\n return tuple(new_key)"},{"col":4,"comment":"null","endLoc":1204,"header":"def __iter__(self) -> Iterator[Hashable]","id":971,"name":"__iter__","nodeType":"Function","startLoc":1203,"text":"def __iter__(self) -> Iterator[Hashable]:\n return iter(self.data_vars)"},{"col":4,"comment":"null","endLoc":1212,"header":"def __array__(self, dtype=None)","id":972,"name":"__array__","nodeType":"Function","startLoc":1206,"text":"def __array__(self, dtype=None):\n raise TypeError(\n \"cannot directly convert an xarray.Dataset into a \"\n \"numpy array. Instead, create an xarray.DataArray \"\n \"first, either with indexing on the Dataset or by \"\n \"invoking the `to_array()` method.\"\n )"},{"className":"Resample","col":0,"comment":"An object that extends the `GroupBy` object with additional logic\n for handling specialized re-sampling operations.\n\n You should create a `Resample` object by using the `DataArray.resample` or\n `Dataset.resample` methods. The dimension along re-sampling\n\n See Also\n --------\n DataArray.resample\n Dataset.resample\n\n ","endLoc":155,"id":973,"nodeType":"Class","startLoc":7,"text":"class Resample:\n \"\"\"An object that extends the `GroupBy` object with additional logic\n for handling specialized re-sampling operations.\n\n You should create a `Resample` object by using the `DataArray.resample` or\n `Dataset.resample` methods. The dimension along re-sampling\n\n See Also\n --------\n DataArray.resample\n Dataset.resample\n\n \"\"\"\n\n def _upsample(self, method, *args, **kwargs):\n \"\"\"Dispatch function to call appropriate up-sampling methods on\n data.\n\n This method should not be called directly; instead, use one of the\n wrapper functions supplied by `Resample`.\n\n Parameters\n ----------\n method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest',\n 'interpolate'}\n Method to use for up-sampling\n\n See Also\n --------\n Resample.asfreq\n Resample.pad\n Resample.backfill\n Resample.interpolate\n\n \"\"\"\n\n upsampled_index = self._full_index\n\n # Drop non-dimension coordinates along the resampled dimension\n for k, v in self._obj.coords.items():\n if k == self._dim:\n continue\n if self._dim in v.dims:\n self._obj = self._obj.drop(k)\n\n if method == \"asfreq\":\n return self.mean(self._dim)\n\n elif method in [\"pad\", \"ffill\", \"backfill\", \"bfill\", \"nearest\"]:\n kwargs = kwargs.copy()\n kwargs.update(**{self._dim: upsampled_index})\n return self._obj.reindex(method=method, *args, **kwargs)\n\n elif method == \"interpolate\":\n return self._interpolate(*args, **kwargs)\n\n else:\n raise ValueError(\n 'Specified method was \"{}\" but must be one of'\n '\"asfreq\", \"ffill\", \"bfill\", or \"interpolate\"'.format(method)\n )\n\n def asfreq(self):\n \"\"\"Return values of original object at the new up-sampling frequency;\n essentially a re-index with new times set to NaN.\n \"\"\"\n return self._upsample(\"asfreq\")\n\n def pad(self, tolerance=None):\n \"\"\"Forward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"pad\", tolerance=tolerance)\n\n ffill = pad\n\n def backfill(self, tolerance=None):\n \"\"\"Backward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"backfill\", tolerance=tolerance)\n\n bfill = backfill\n\n def nearest(self, tolerance=None):\n \"\"\"Take new values from nearest original coordinate to up-sampled\n frequency coordinates.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"nearest\", tolerance=tolerance)\n\n def interpolate(self, kind=\"linear\"):\n \"\"\"Interpolate up-sampled data using the original data\n as knots.\n\n Parameters\n ----------\n kind : str {'linear', 'nearest', 'zero', 'slinear',\n 'quadratic', 'cubic'}\n Interpolation scheme to use\n\n See Also\n --------\n scipy.interpolate.interp1d\n\n \"\"\"\n return self._interpolate(kind=kind)\n\n def _interpolate(self, kind=\"linear\"):\n \"\"\"Apply scipy.interpolate.interp1d along resampling dimension.\"\"\"\n # drop any existing non-dimension coordinates along the resampling\n # dimension\n dummy = self._obj.copy()\n for k, v in self._obj.coords.items():\n if k != self._dim and self._dim in v.dims:\n dummy = dummy.drop(k)\n return dummy.interp(\n assume_sorted=True,\n method=kind,\n kwargs={\"bounds_error\": False},\n **{self._dim: self._full_index}\n )"},{"col":4,"comment":"null","endLoc":1216,"header":"@property\n def nbytes(self) -> int","id":974,"name":"nbytes","nodeType":"Function","startLoc":1214,"text":"@property\n def nbytes(self) -> int:\n return sum(v.nbytes for v in self.variables.values())"},{"col":4,"comment":"Dispatch function to call appropriate up-sampling methods on\n data.\n\n This method should not be called directly; instead, use one of the\n wrapper functions supplied by `Resample`.\n\n Parameters\n ----------\n method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest',\n 'interpolate'}\n Method to use for up-sampling\n\n See Also\n --------\n Resample.asfreq\n Resample.pad\n Resample.backfill\n Resample.interpolate\n\n ","endLoc":67,"header":"def _upsample(self, method, *args, **kwargs)","id":975,"name":"_upsample","nodeType":"Function","startLoc":21,"text":"def _upsample(self, method, *args, **kwargs):\n \"\"\"Dispatch function to call appropriate up-sampling methods on\n data.\n\n This method should not be called directly; instead, use one of the\n wrapper functions supplied by `Resample`.\n\n Parameters\n ----------\n method : str {'asfreq', 'pad', 'ffill', 'backfill', 'bfill', 'nearest',\n 'interpolate'}\n Method to use for up-sampling\n\n See Also\n --------\n Resample.asfreq\n Resample.pad\n Resample.backfill\n Resample.interpolate\n\n \"\"\"\n\n upsampled_index = self._full_index\n\n # Drop non-dimension coordinates along the resampled dimension\n for k, v in self._obj.coords.items():\n if k == self._dim:\n continue\n if self._dim in v.dims:\n self._obj = self._obj.drop(k)\n\n if method == \"asfreq\":\n return self.mean(self._dim)\n\n elif method in [\"pad\", \"ffill\", \"backfill\", \"bfill\", \"nearest\"]:\n kwargs = kwargs.copy()\n kwargs.update(**{self._dim: upsampled_index})\n return self._obj.reindex(method=method, *args, **kwargs)\n\n elif method == \"interpolate\":\n return self._interpolate(*args, **kwargs)\n\n else:\n raise ValueError(\n 'Specified method was \"{}\" but must be one of'\n '\"asfreq\", \"ffill\", \"bfill\", or \"interpolate\"'.format(method)\n )"},{"col":4,"comment":"Attribute for location based indexing. Only supports __getitem__,\n and only when the key is a dict of the form {dim: labels}.\n ","endLoc":1223,"header":"@property\n def loc(self) -> _LocIndexer","id":976,"name":"loc","nodeType":"Function","startLoc":1218,"text":"@property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing. Only supports __getitem__,\n and only when the key is a dict of the form {dim: labels}.\n \"\"\"\n return _LocIndexer(self)"},{"col":4,"comment":"null","endLoc":405,"header":"def get_attrs(self)","id":977,"name":"get_attrs","nodeType":"Function","startLoc":403,"text":"def get_attrs(self):\n attrs = FrozenDict((k, self.ds.getncattr(k)) for k in self.ds.ncattrs())\n return attrs"},{"col":4,"comment":"null","endLoc":409,"header":"def get_dimensions(self)","id":978,"name":"get_dimensions","nodeType":"Function","startLoc":407,"text":"def get_dimensions(self):\n dims = FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())\n return dims"},{"col":4,"comment":"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n ","endLoc":621,"header":"@property\n def _level_coords(self) -> Dict[Hashable, Hashable]","id":979,"name":"_level_coords","nodeType":"Function","startLoc":608,"text":"@property\n def _level_coords(self) -> Dict[Hashable, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[Hashable, Hashable] = {}\n\n for cname, var in self._coords.items():\n if var.ndim == 1 and isinstance(var, IndexVariable):\n level_names = var.level_names\n if level_names is not None:\n dim, = var.dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords"},{"col":4,"comment":"null","endLoc":389,"header":"def __init__(self, dataset: \"Dataset\")","id":980,"name":"__init__","nodeType":"Function","startLoc":388,"text":"def __init__(self, dataset: \"Dataset\"):\n self.dataset = dataset"},{"col":4,"comment":"Access variables or coordinates this dataset as a\n :py:class:`~xarray.DataArray`.\n\n Indexing with a list of names will return a new ``Dataset`` object.\n ","endLoc":1238,"header":"def __getitem__(self, key: Any) -> \"Union[DataArray, Dataset]\"","id":981,"name":"__getitem__","nodeType":"Function","startLoc":1225,"text":"def __getitem__(self, key: Any) -> \"Union[DataArray, Dataset]\":\n \"\"\"Access variables or coordinates this dataset as a\n :py:class:`~xarray.DataArray`.\n\n Indexing with a list of names will return a new ``Dataset`` object.\n \"\"\"\n # TODO(shoyer): type this properly: https://github.com/python/mypy/issues/7328\n if utils.is_dict_like(key):\n return self.isel(**cast(Mapping, key))\n\n if hashable(key):\n return self._construct_dataarray(key)\n else:\n return self._copy_listed(np.asarray(key))"},{"col":4,"comment":"null","endLoc":416,"header":"def get_encoding(self)","id":982,"name":"get_encoding","nodeType":"Function","startLoc":411,"text":"def get_encoding(self):\n encoding = {}\n encoding[\"unlimited_dims\"] = {\n k for k, v in self.ds.dimensions.items() if v.isunlimited()\n }\n return encoding"},{"col":4,"comment":"null","endLoc":420,"header":"def set_dimension(self, name, length, is_unlimited=False)","id":983,"name":"set_dimension","nodeType":"Function","startLoc":418,"text":"def set_dimension(self, name, length, is_unlimited=False):\n dim_length = length if not is_unlimited else None\n self.ds.createDimension(name, size=dim_length)"},{"col":4,"comment":"null","endLoc":429,"header":"def set_attribute(self, key, value)","id":984,"name":"set_attribute","nodeType":"Function","startLoc":422,"text":"def set_attribute(self, key, value):\n if self.format != \"NETCDF4\":\n value = encode_nc3_attr_value(value)\n if _is_list_of_strings(value):\n # encode as NC_STRING if attr is list of strings\n self.ds.setncattr_string(key, value)\n else:\n self.ds.setncattr(key, value)"},{"col":0,"comment":"null","endLoc":273,"header":"def _is_list_of_strings(value)","id":985,"name":"_is_list_of_strings","nodeType":"Function","startLoc":269,"text":"def _is_list_of_strings(value):\n if np.asarray(value).dtype.kind in [\"U\", \"S\"] and np.asarray(value).size > 1:\n return True\n else:\n return False"},{"col":4,"comment":"Returns a new dataset with each array indexed along the specified\n dimension(s).\n\n This method selects values from each array using its `__getitem__`\n method, except this method does not require knowing the order of\n each array's dimensions.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by integers, slice objects or arrays.\n indexer can be a integer, slice, array-like or DataArray.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables indexed by integers\n instead of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n ","endLoc":1927,"header":"def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\"","id":986,"name":"isel","nodeType":"Function","startLoc":1848,"text":"def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along the specified\n dimension(s).\n\n This method selects values from each array using its `__getitem__`\n method, except this method does not require knowing the order of\n each array's dimensions.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by integers, slice objects or arrays.\n indexer can be a integer, slice, array-like or DataArray.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables indexed by integers\n instead of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n # Note: we need to preserve the original indexers variable in order to merge the\n # coords below\n indexers_list = list(self._validate_indexers(indexers))\n\n variables: Dict[Hashable, Variable] = {}\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name, var in self.variables.items():\n var_indexers = {k: v for k, v in indexers_list if k in var.dims}\n if drop and name in var_indexers:\n continue # drop this variable\n\n if name in self.indexes:\n new_var, new_index = isel_variable_and_index(\n name, var, self.indexes[name], var_indexers\n )\n if new_index is not None:\n indexes[name] = new_index\n elif var_indexers:\n new_var = var.isel(indexers=var_indexers)\n else:\n new_var = var.copy(deep=False)\n\n variables[name] = new_var\n\n coord_names = self._coord_names & variables.keys()\n selected = self._replace_with_new_dims(variables, coord_names, indexes)\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n coord_names = self._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)"},{"col":4,"comment":"null","endLoc":634,"header":"def _getitem_coord(self, key)","id":987,"name":"_getitem_coord","nodeType":"Function","startLoc":623,"text":"def _getitem_coord(self, key):\n from .dataset import _get_virtual_variable\n\n try:\n var = self._coords[key]\n except KeyError:\n dim_sizes = dict(zip(self.dims, self.shape))\n _, key, var = _get_virtual_variable(\n self._coords, key, self._level_coords, dim_sizes\n )\n\n return self._replace_maybe_drop_dims(var, name=key)"},{"col":4,"comment":"null","endLoc":641,"header":"def __getitem__(self, key: Any) -> \"DataArray\"","id":988,"name":"__getitem__","nodeType":"Function","startLoc":636,"text":"def __getitem__(self, key: Any) -> \"DataArray\":\n if isinstance(key, str):\n return self._getitem_coord(key)\n else:\n # xarray-style array indexing\n return self.isel(indexers=self._item_key_to_dict(key))"},{"col":4,"comment":" Here we make sure\n + indexer has a valid keys\n + indexer is in a valid data type\n + string indexers are cast to the appropriate date type if the\n associated index is a DatetimeIndex or CFTimeIndex\n ","endLoc":1785,"header":"def _validate_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]","id":989,"name":"_validate_indexers","nodeType":"Function","startLoc":1743,"text":"def _validate_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]:\n \"\"\" Here we make sure\n + indexer has a valid keys\n + indexer is in a valid data type\n + string indexers are cast to the appropriate date type if the\n associated index is a DatetimeIndex or CFTimeIndex\n \"\"\"\n from .dataarray import DataArray\n\n invalid = indexers.keys() - self.dims.keys()\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n # all indexers should be int, slice, np.ndarrays, or Variable\n for k, v in indexers.items():\n if isinstance(v, (int, slice, Variable)):\n yield k, v\n elif isinstance(v, DataArray):\n yield k, v.variable\n elif isinstance(v, tuple):\n yield k, as_variable(v)\n elif isinstance(v, Dataset):\n raise TypeError(\"cannot use a Dataset as an indexer\")\n elif isinstance(v, Sequence) and len(v) == 0:\n yield k, np.empty((0,), dtype=\"int64\")\n else:\n v = np.asarray(v)\n\n if v.dtype.kind in \"US\":\n index = self.indexes[k]\n if isinstance(index, pd.DatetimeIndex):\n v = v.astype(\"datetime64[ns]\")\n elif isinstance(index, xr.CFTimeIndex):\n v = _parse_array_of_cftime_strings(v, index.date_type)\n\n if v.ndim > 1:\n raise IndexError(\n \"Unlabeled multi-dimensional array cannot be \"\n \"used for indexing: {}\".format(k)\n )\n yield k, v"},{"col":0,"comment":"null","endLoc":545,"header":"def _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params)","id":990,"name":"_add_colorbar","nodeType":"Function","startLoc":535,"text":"def _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params):\n plt = import_matplotlib_pyplot()\n cbar_kwargs.setdefault(\"extend\", cmap_params[\"extend\"])\n if cbar_ax is None:\n cbar_kwargs.setdefault(\"ax\", ax)\n else:\n cbar_kwargs.setdefault(\"cax\", cbar_ax)\n\n cbar = plt.colorbar(primitive, **cbar_kwargs)\n\n return cbar"},{"col":4,"comment":"null","endLoc":437,"header":"def encode_variable(self, variable)","id":991,"name":"encode_variable","nodeType":"Function","startLoc":431,"text":"def encode_variable(self, variable):\n variable = _force_native_endianness(variable)\n if self.format == \"NETCDF4\":\n variable = _encode_nc4_variable(variable)\n else:\n variable = encode_nc3_variable(variable)\n return variable"},{"col":0,"comment":"null","endLoc":197,"header":"def _force_native_endianness(var)","id":992,"name":"_force_native_endianness","nodeType":"Function","startLoc":177,"text":"def _force_native_endianness(var):\n # possible values for byteorder are:\n # = native\n # < little-endian\n # > big-endian\n # | not applicable\n # Below we check if the data type is not native or NA\n if var.dtype.byteorder not in [\"=\", \"|\"]:\n # if endianness is specified explicitly, convert to the native type\n data = var.data.astype(var.dtype.newbyteorder(\"=\"))\n var = Variable(var.dims, data, var.attrs, var.encoding)\n # if endian exists, remove it from the encoding.\n var.encoding.pop(\"endian\", None)\n # check to see if encoding has a value for endian its 'native'\n if not var.encoding.get(\"endian\", \"native\") == \"native\":\n raise NotImplementedError(\n \"Attempt to write non-native endian type, \"\n \"this is not supported by the netCDF4 \"\n \"python library.\"\n )\n return var"},{"col":4,"comment":"Return a new DataArray whose data is given by integer indexing\n along the specified dimension(s).\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n ","endLoc":1011,"header":"def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\"","id":993,"name":"isel","nodeType":"Function","startLoc":995,"text":"def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by integer indexing\n along the specified dimension(s).\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n ds = self._to_temp_dataset().isel(drop=drop, indexers=indexers)\n return self._from_temp_dataset(ds)"},{"fileName":"utils.py","filePath":"xarray/core","id":994,"nodeType":"File","text":"\"\"\"Internal utilties; not for external use\n\"\"\"\nimport contextlib\nimport functools\nimport itertools\nimport os.path\nimport re\nimport warnings\nfrom typing import (\n AbstractSet,\n Any,\n Callable,\n Container,\n Dict,\n Hashable,\n Iterable,\n Iterator,\n Mapping,\n MutableMapping,\n MutableSet,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n cast,\n)\n\nimport numpy as np\nimport pandas as pd\n\nK = TypeVar(\"K\")\nV = TypeVar(\"V\")\nT = TypeVar(\"T\")\n\n\ndef _check_inplace(inplace: Optional[bool]) -> None:\n if inplace is not None:\n raise TypeError(\n \"The `inplace` argument has been removed from xarray. \"\n \"You can achieve an identical effect with python's standard assignment.\"\n )\n\n\ndef alias_message(old_name: str, new_name: str) -> str:\n return \"%s has been deprecated. Use %s instead.\" % (old_name, new_name)\n\n\ndef alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None:\n warnings.warn(\n alias_message(old_name, new_name), FutureWarning, stacklevel=stacklevel\n )\n\n\ndef alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]:\n assert isinstance(old_name, str)\n\n @functools.wraps(obj)\n def wrapper(*args, **kwargs):\n alias_warning(old_name, obj.__name__)\n return obj(*args, **kwargs)\n\n wrapper.__doc__ = alias_message(old_name, obj.__name__)\n return wrapper\n\n\ndef _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index:\n from ..coding.cftimeindex import CFTimeIndex\n\n if len(index) > 0 and index.dtype == \"O\":\n try:\n return CFTimeIndex(index)\n except (ImportError, TypeError):\n return index\n else:\n return index\n\n\ndef maybe_cast_to_coords_dtype(label, coords_dtype):\n if coords_dtype.kind == \"f\" and not isinstance(label, slice):\n label = np.asarray(label, dtype=coords_dtype)\n return label\n\n\ndef safe_cast_to_index(array: Any) -> pd.Index:\n \"\"\"Given an array, safely cast it to a pandas.Index.\n\n If it is already a pandas.Index, return it unchanged.\n\n Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,\n this function will not attempt to do automatic type conversion but will\n always return an index with dtype=object.\n \"\"\"\n if isinstance(array, pd.Index):\n index = array\n elif hasattr(array, \"to_index\"):\n index = array.to_index()\n else:\n kwargs = {}\n if hasattr(array, \"dtype\") and array.dtype.kind == \"O\":\n kwargs[\"dtype\"] = object\n index = pd.Index(np.asarray(array), **kwargs)\n return _maybe_cast_to_cftimeindex(index)\n\n\ndef multiindex_from_product_levels(\n levels: Sequence[pd.Index], names: Sequence[str] = None\n) -> pd.MultiIndex:\n \"\"\"Creating a MultiIndex from a product without refactorizing levels.\n\n Keeping levels the same gives back the original labels when we unstack.\n\n Parameters\n ----------\n levels : sequence of pd.Index\n Values for each MultiIndex level.\n names : optional sequence of objects\n Names for each level.\n\n Returns\n -------\n pandas.MultiIndex\n \"\"\"\n if any(not isinstance(lev, pd.Index) for lev in levels):\n raise TypeError(\"levels must be a list of pd.Index objects\")\n\n split_labels, levels = zip(*[lev.factorize() for lev in levels])\n labels_mesh = np.meshgrid(*split_labels, indexing=\"ij\")\n labels = [x.ravel() for x in labels_mesh]\n return pd.MultiIndex(levels, labels, sortorder=0, names=names)\n\n\ndef maybe_wrap_array(original, new_array):\n \"\"\"Wrap a transformed array with __array_wrap__ is it can be done safely.\n\n This lets us treat arbitrary functions that take and return ndarray objects\n like ufuncs, as long as they return an array with the same shape.\n \"\"\"\n # in case func lost array's metadata\n if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:\n return original.__array_wrap__(new_array)\n else:\n return new_array\n\n\ndef equivalent(first: T, second: T) -> bool:\n \"\"\"Compare two objects for equivalence (identity or equality), using\n array_equiv if either object is an ndarray. If both objects are lists,\n equivalent is sequentially called on all the elements.\n \"\"\"\n # TODO: refactor to avoid circular import\n from . import duck_array_ops\n\n if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):\n return duck_array_ops.array_equiv(first, second)\n elif isinstance(first, list) or isinstance(second, list):\n return list_equiv(first, second)\n else:\n return (\n (first is second)\n or (first == second)\n or (pd.isnull(first) and pd.isnull(second))\n )\n\n\ndef list_equiv(first, second):\n equiv = True\n if len(first) != len(second):\n return False\n else:\n for f, s in zip(first, second):\n equiv = equiv and equivalent(f, s)\n return equiv\n\n\ndef peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]:\n \"\"\"Returns the first value from iterable, as well as a new iterator with\n the same content as the original iterable\n \"\"\"\n gen = iter(iterable)\n peek = next(gen)\n return peek, itertools.chain([peek], gen)\n\n\ndef update_safety_check(\n first_dict: MutableMapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> None:\n \"\"\"Check the safety of updating one dictionary with another.\n\n Raises ValueError if dictionaries have non-compatible values for any key,\n where compatibility is determined by identity (they are the same item) or\n the `compat` function.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n All items in the second dictionary are checked against for conflicts\n against items in the first dictionary.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n \"\"\"\n for k, v in second_dict.items():\n if k in first_dict and not compat(v, first_dict[k]):\n raise ValueError(\n \"unsafe to merge dictionaries without \"\n \"overriding values; conflicting key %r\" % k\n )\n\n\ndef remove_incompatible_items(\n first_dict: MutableMapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> None:\n \"\"\"Remove incompatible items from the first dictionary in-place.\n\n Items are retained if their keys are found in both dictionaries and the\n values are compatible.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n Mappings to merge.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n \"\"\"\n for k in list(first_dict):\n if k not in second_dict or not compat(first_dict[k], second_dict[k]):\n del first_dict[k]\n\n\ndef is_dict_like(value: Any) -> bool:\n return hasattr(value, \"keys\") and hasattr(value, \"__getitem__\")\n\n\ndef is_full_slice(value: Any) -> bool:\n return isinstance(value, slice) and value == slice(None)\n\n\ndef is_list_like(value: Any) -> bool:\n return isinstance(value, list) or isinstance(value, tuple)\n\n\ndef either_dict_or_kwargs(\n pos_kwargs: Optional[Mapping[Hashable, T]],\n kw_kwargs: Mapping[str, T],\n func_name: str,\n) -> Mapping[Hashable, T]:\n if pos_kwargs is not None:\n if not is_dict_like(pos_kwargs):\n raise ValueError(\n \"the first argument to .%s must be a dictionary\" % func_name\n )\n if kw_kwargs:\n raise ValueError(\n \"cannot specify both keyword and positional \"\n \"arguments to .%s\" % func_name\n )\n return pos_kwargs\n else:\n # Need an explicit cast to appease mypy due to invariance; see\n # https://github.com/python/mypy/issues/6228\n return cast(Mapping[Hashable, T], kw_kwargs)\n\n\ndef is_scalar(value: Any, include_0d: bool = True) -> bool:\n \"\"\"Whether to treat a value as a scalar.\n\n Any non-iterable, string, or 0-D array\n \"\"\"\n from .variable import NON_NUMPY_SUPPORTED_ARRAY_TYPES\n\n if include_0d:\n include_0d = getattr(value, \"ndim\", None) == 0\n return (\n include_0d\n or isinstance(value, (str, bytes))\n or not (\n isinstance(value, (Iterable,) + NON_NUMPY_SUPPORTED_ARRAY_TYPES)\n or hasattr(value, \"__array_function__\")\n )\n )\n\n\ndef is_valid_numpy_dtype(dtype: Any) -> bool:\n try:\n np.dtype(dtype)\n except (TypeError, ValueError):\n return False\n else:\n return True\n\n\ndef to_0d_object_array(value: Any) -> np.ndarray:\n \"\"\"Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.\n \"\"\"\n result = np.empty((), dtype=object)\n result[()] = value\n return result\n\n\ndef to_0d_array(value: Any) -> np.ndarray:\n \"\"\"Given a value, wrap it in a 0-D numpy.ndarray.\n \"\"\"\n if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0):\n return np.array(value)\n else:\n return to_0d_object_array(value)\n\n\ndef dict_equiv(\n first: Mapping[K, V],\n second: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> bool:\n \"\"\"Test equivalence of two dict-like objects. If any of the values are\n numpy arrays, compare them correctly.\n\n Parameters\n ----------\n first, second : dict-like\n Dictionaries to compare for equality\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n\n Returns\n -------\n equals : bool\n True if the dictionaries are equal\n \"\"\"\n for k in first:\n if k not in second or not compat(first[k], second[k]):\n return False\n for k in second:\n if k not in first:\n return False\n return True\n\n\ndef ordered_dict_intersection(\n first_dict: Mapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> MutableMapping[K, V]:\n \"\"\"Return the intersection of two dictionaries as a new dictionary.\n\n Items are retained if their keys are found in both dictionaries and the\n values are compatible.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n Mappings to merge.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n\n Returns\n -------\n intersection : dict\n Intersection of the contents.\n \"\"\"\n new_dict = dict(first_dict)\n remove_incompatible_items(new_dict, second_dict, compat)\n return new_dict\n\n\nclass Frozen(Mapping[K, V]):\n \"\"\"Wrapper around an object implementing the mapping interface to make it\n immutable. If you really want to modify the mapping, the mutable version is\n saved under the `mapping` attribute.\n \"\"\"\n\n __slots__ = (\"mapping\",)\n\n def __init__(self, mapping: Mapping[K, V]):\n self.mapping = mapping\n\n def __getitem__(self, key: K) -> V:\n return self.mapping[key]\n\n def __iter__(self) -> Iterator[K]:\n return iter(self.mapping)\n\n def __len__(self) -> int:\n return len(self.mapping)\n\n def __contains__(self, key: object) -> bool:\n return key in self.mapping\n\n def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, self.mapping)\n\n\ndef FrozenDict(*args, **kwargs) -> Frozen:\n return Frozen(dict(*args, **kwargs))\n\n\nclass SortedKeysDict(MutableMapping[K, V]):\n \"\"\"An wrapper for dictionary-like objects that always iterates over its\n items in sorted order by key but is otherwise equivalent to the underlying\n mapping.\n \"\"\"\n\n __slots__ = (\"mapping\",)\n\n def __init__(self, mapping: MutableMapping[K, V] = None):\n self.mapping = {} if mapping is None else mapping\n\n def __getitem__(self, key: K) -> V:\n return self.mapping[key]\n\n def __setitem__(self, key: K, value: V) -> None:\n self.mapping[key] = value\n\n def __delitem__(self, key: K) -> None:\n del self.mapping[key]\n\n def __iter__(self) -> Iterator[K]:\n return iter(sorted(self.mapping))\n\n def __len__(self) -> int:\n return len(self.mapping)\n\n def __contains__(self, key: object) -> bool:\n return key in self.mapping\n\n def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, self.mapping)\n\n\nclass OrderedSet(MutableSet[T]):\n \"\"\"A simple ordered set.\n\n The API matches the builtin set, but it preserves insertion order of elements, like\n a dict. Note that, unlike in an OrderedDict, equality tests are not order-sensitive.\n \"\"\"\n\n _d: Dict[T, None]\n\n __slots__ = (\"_d\",)\n\n def __init__(self, values: AbstractSet[T] = None):\n self._d = {}\n if values is not None:\n # Disable type checking - both mypy and PyCharm believe that\n # we're altering the type of self in place (see signature of\n # MutableSet.__ior__)\n self |= values # type: ignore\n\n # Required methods for MutableSet\n\n def __contains__(self, value: object) -> bool:\n return value in self._d\n\n def __iter__(self) -> Iterator[T]:\n return iter(self._d)\n\n def __len__(self) -> int:\n return len(self._d)\n\n def add(self, value: T) -> None:\n self._d[value] = None\n\n def discard(self, value: T) -> None:\n del self._d[value]\n\n # Additional methods\n\n def update(self, values: AbstractSet[T]) -> None:\n # See comment on __init__ re. type checking\n self |= values # type: ignore\n\n def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, list(self))\n\n\nclass NdimSizeLenMixin:\n \"\"\"Mixin class that extends a class that defines a ``shape`` property to\n one that also defines ``ndim``, ``size`` and ``__len__``.\n \"\"\"\n\n __slots__ = ()\n\n @property\n def ndim(self: Any) -> int:\n return len(self.shape)\n\n @property\n def size(self: Any) -> int:\n # cast to int so that shape = () gives size = 1\n return int(np.prod(self.shape))\n\n def __len__(self: Any) -> int:\n try:\n return self.shape[0]\n except IndexError:\n raise TypeError(\"len() of unsized object\")\n\n\nclass NDArrayMixin(NdimSizeLenMixin):\n \"\"\"Mixin class for making wrappers of N-dimensional arrays that conform to\n the ndarray interface required for the data argument to Variable objects.\n\n A subclass should set the `array` property and override one or more of\n `dtype`, `shape` and `__getitem__`.\n \"\"\"\n\n __slots__ = ()\n\n @property\n def dtype(self: Any) -> np.dtype:\n return self.array.dtype\n\n @property\n def shape(self: Any) -> Tuple[int]:\n return self.array.shape\n\n def __getitem__(self: Any, key):\n return self.array[key]\n\n def __repr__(self: Any) -> str:\n return \"%s(array=%r)\" % (type(self).__name__, self.array)\n\n\nclass ReprObject:\n \"\"\"Object that prints as the given value, for use with sentinel values.\n \"\"\"\n\n __slots__ = (\"_value\",)\n\n def __init__(self, value: str):\n self._value = value\n\n def __repr__(self) -> str:\n return self._value\n\n def __eq__(self, other) -> bool:\n if isinstance(other, ReprObject):\n return self._value == other._value\n return False\n\n def __hash__(self) -> int:\n return hash((ReprObject, self._value))\n\n\n@contextlib.contextmanager\ndef close_on_error(f):\n \"\"\"Context manager to ensure that a file opened by xarray is closed if an\n exception is raised before the user sees the file object.\n \"\"\"\n try:\n yield\n except Exception:\n f.close()\n raise\n\n\ndef is_remote_uri(path: str) -> bool:\n return bool(re.search(r\"^https?\\://\", path))\n\n\ndef is_grib_path(path: str) -> bool:\n _, ext = os.path.splitext(path)\n return ext in [\".grib\", \".grb\", \".grib2\", \".grb2\"]\n\n\ndef is_uniform_spaced(arr, **kwargs) -> bool:\n \"\"\"Return True if values of an array are uniformly spaced and sorted.\n\n >>> is_uniform_spaced(range(5))\n True\n >>> is_uniform_spaced([-4, 0, 100])\n False\n\n kwargs are additional arguments to ``np.isclose``\n \"\"\"\n arr = np.array(arr, dtype=float)\n diffs = np.diff(arr)\n return bool(np.isclose(diffs.min(), diffs.max(), **kwargs))\n\n\ndef hashable(v: Any) -> bool:\n \"\"\"Determine whether `v` can be hashed.\n \"\"\"\n try:\n hash(v)\n except TypeError:\n return False\n return True\n\n\ndef not_implemented(*args, **kwargs):\n return NotImplemented\n\n\ndef decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]:\n \"\"\"Convert attribute values from numpy objects to native Python objects,\n for use in to_dict\n \"\"\"\n attrs = dict(attrs)\n for k, v in attrs.items():\n if isinstance(v, np.ndarray):\n attrs[k] = v.tolist()\n elif isinstance(v, np.generic):\n attrs[k] = v.item()\n return attrs\n\n\ndef ensure_us_time_resolution(val):\n \"\"\"Convert val out of numpy time, for use in to_dict.\n Needed because of numpy bug GH#7619\"\"\"\n if np.issubdtype(val.dtype, np.datetime64):\n val = val.astype(\"datetime64[us]\")\n elif np.issubdtype(val.dtype, np.timedelta64):\n val = val.astype(\"timedelta64[us]\")\n return val\n\n\nclass HiddenKeyDict(MutableMapping[K, V]):\n \"\"\"Acts like a normal dictionary, but hides certain keys.\n \"\"\"\n\n __slots__ = (\"_data\", \"_hidden_keys\")\n\n # ``__init__`` method required to create instance from class.\n\n def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]):\n self._data = data\n self._hidden_keys = frozenset(hidden_keys)\n\n def _raise_if_hidden(self, key: K) -> None:\n if key in self._hidden_keys:\n raise KeyError(\"Key `%r` is hidden.\" % key)\n\n # The next five methods are requirements of the ABC.\n def __setitem__(self, key: K, value: V) -> None:\n self._raise_if_hidden(key)\n self._data[key] = value\n\n def __getitem__(self, key: K) -> V:\n self._raise_if_hidden(key)\n return self._data[key]\n\n def __delitem__(self, key: K) -> None:\n self._raise_if_hidden(key)\n del self._data[key]\n\n def __iter__(self) -> Iterator[K]:\n for k in self._data:\n if k not in self._hidden_keys:\n yield k\n\n def __len__(self) -> int:\n num_hidden = len(self._hidden_keys & self._data.keys())\n return len(self._data) - num_hidden\n\n\ndef get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable:\n \"\"\" Get an new dimension name based on new_dim, that is not used in dims.\n If the same name exists, we add an underscore(s) in the head.\n\n Example1:\n dims: ['a', 'b', 'c']\n new_dim: ['_rolling']\n -> ['_rolling']\n Example2:\n dims: ['a', 'b', 'c', '_rolling']\n new_dim: ['_rolling']\n -> ['__rolling']\n \"\"\"\n while new_dim in dims:\n new_dim = \"_\" + str(new_dim)\n return new_dim\n"},{"className":"SortedKeysDict","col":0,"comment":"An wrapper for dictionary-like objects that always iterates over its\n items in sorted order by key but is otherwise equivalent to the underlying\n mapping.\n ","endLoc":433,"id":995,"nodeType":"Class","startLoc":403,"text":"class SortedKeysDict(MutableMapping[K, V]):\n \"\"\"An wrapper for dictionary-like objects that always iterates over its\n items in sorted order by key but is otherwise equivalent to the underlying\n mapping.\n \"\"\"\n\n __slots__ = (\"mapping\",)\n\n def __init__(self, mapping: MutableMapping[K, V] = None):\n self.mapping = {} if mapping is None else mapping\n\n def __getitem__(self, key: K) -> V:\n return self.mapping[key]\n\n def __setitem__(self, key: K, value: V) -> None:\n self.mapping[key] = value\n\n def __delitem__(self, key: K) -> None:\n del self.mapping[key]\n\n def __iter__(self) -> Iterator[K]:\n return iter(sorted(self.mapping))\n\n def __len__(self) -> int:\n return len(self.mapping)\n\n def __contains__(self, key: object) -> bool:\n return key in self.mapping\n\n def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, self.mapping)"},{"col":4,"comment":"null","endLoc":415,"header":"def __getitem__(self, key: K) -> V","id":996,"name":"__getitem__","nodeType":"Function","startLoc":414,"text":"def __getitem__(self, key: K) -> V:\n return self.mapping[key]"},{"col":4,"comment":"null","endLoc":418,"header":"def __setitem__(self, key: K, value: V) -> None","id":997,"name":"__setitem__","nodeType":"Function","startLoc":417,"text":"def __setitem__(self, key: K, value: V) -> None:\n self.mapping[key] = value"},{"col":4,"comment":"null","endLoc":421,"header":"def __delitem__(self, key: K) -> None","id":998,"name":"__delitem__","nodeType":"Function","startLoc":420,"text":"def __delitem__(self, key: K) -> None:\n del self.mapping[key]"},{"col":4,"comment":"null","endLoc":424,"header":"def __iter__(self) -> Iterator[K]","id":999,"name":"__iter__","nodeType":"Function","startLoc":423,"text":"def __iter__(self) -> Iterator[K]:\n return iter(sorted(self.mapping))"},{"col":4,"comment":"null","endLoc":427,"header":"def __len__(self) -> int","id":1000,"name":"__len__","nodeType":"Function","startLoc":426,"text":"def __len__(self) -> int:\n return len(self.mapping)"},{"col":4,"comment":"null","endLoc":430,"header":"def __contains__(self, key: object) -> bool","id":1001,"name":"__contains__","nodeType":"Function","startLoc":429,"text":"def __contains__(self, key: object) -> bool:\n return key in self.mapping"},{"col":4,"comment":"null","endLoc":433,"header":"def __repr__(self) -> str","id":1002,"name":"__repr__","nodeType":"Function","startLoc":432,"text":"def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, self.mapping)"},{"attributeType":"null","col":4,"comment":"null","endLoc":409,"id":1003,"name":"__slots__","nodeType":"Attribute","startLoc":409,"text":"__slots__"},{"attributeType":"None","col":8,"comment":"null","endLoc":412,"id":1004,"name":"mapping","nodeType":"Attribute","startLoc":412,"text":"self.mapping"},{"col":4,"comment":"null","endLoc":658,"header":"def __setitem__(self, key: Any, value: Any) -> None","id":1005,"name":"__setitem__","nodeType":"Function","startLoc":643,"text":"def __setitem__(self, key: Any, value: Any) -> None:\n if isinstance(key, str):\n self.coords[key] = value\n else:\n # Coordinates in key, value and self[key] should be consistent.\n # TODO Coordinate consistency in key is checked here, but it\n # causes unnecessary indexing. It should be optimized.\n obj = self[key]\n if isinstance(value, DataArray):\n assert_coordinate_consistent(value, obj.coords.variables)\n # DataArray key -> Variable key\n key = {\n k: v.variable if isinstance(v, DataArray) else v\n for k, v in self._item_key_to_dict(key).items()\n }\n self.variable[key] = value"},{"col":4,"comment":"null","endLoc":485,"header":"def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n )","id":1006,"name":"prepare_variable","nodeType":"Function","startLoc":439,"text":"def prepare_variable(\n self, name, variable, check_encoding=False, unlimited_dims=None\n ):\n datatype = _get_datatype(\n variable, self.format, raise_on_invalid_encoding=check_encoding\n )\n attrs = variable.attrs.copy()\n\n fill_value = attrs.pop(\"_FillValue\", None)\n\n if datatype is str and fill_value is not None:\n raise NotImplementedError(\n \"netCDF4 does not yet support setting a fill value for \"\n \"variable-length strings \"\n \"(https://github.com/Unidata/netcdf4-python/issues/730). \"\n \"Either remove '_FillValue' from encoding on variable %r \"\n \"or set {'dtype': 'S1'} in encoding to use the fixed width \"\n \"NC_CHAR type.\" % name\n )\n\n encoding = _extract_nc4_variable_encoding(\n variable, raise_on_invalid=check_encoding, unlimited_dims=unlimited_dims\n )\n\n if name in self.ds.variables:\n nc4_var = self.ds.variables[name]\n else:\n nc4_var = self.ds.createVariable(\n varname=name,\n datatype=datatype,\n dimensions=variable.dims,\n zlib=encoding.get(\"zlib\", False),\n complevel=encoding.get(\"complevel\", 4),\n shuffle=encoding.get(\"shuffle\", True),\n fletcher32=encoding.get(\"fletcher32\", False),\n contiguous=encoding.get(\"contiguous\", False),\n chunksizes=encoding.get(\"chunksizes\"),\n endian=\"native\",\n least_significant_digit=encoding.get(\"least_significant_digit\"),\n fill_value=fill_value,\n )\n\n nc4_var.setncatts(attrs)\n\n target = NetCDF4ArrayWrapper(name, self)\n\n return target, variable.data"},{"className":"OrderedSet","col":0,"comment":"A simple ordered set.\n\n The API matches the builtin set, but it preserves insertion order of elements, like\n a dict. Note that, unlike in an OrderedDict, equality tests are not order-sensitive.\n ","endLoc":479,"id":1007,"nodeType":"Class","startLoc":436,"text":"class OrderedSet(MutableSet[T]):\n \"\"\"A simple ordered set.\n\n The API matches the builtin set, but it preserves insertion order of elements, like\n a dict. Note that, unlike in an OrderedDict, equality tests are not order-sensitive.\n \"\"\"\n\n _d: Dict[T, None]\n\n __slots__ = (\"_d\",)\n\n def __init__(self, values: AbstractSet[T] = None):\n self._d = {}\n if values is not None:\n # Disable type checking - both mypy and PyCharm believe that\n # we're altering the type of self in place (see signature of\n # MutableSet.__ior__)\n self |= values # type: ignore\n\n # Required methods for MutableSet\n\n def __contains__(self, value: object) -> bool:\n return value in self._d\n\n def __iter__(self) -> Iterator[T]:\n return iter(self._d)\n\n def __len__(self) -> int:\n return len(self._d)\n\n def add(self, value: T) -> None:\n self._d[value] = None\n\n def discard(self, value: T) -> None:\n del self._d[value]\n\n # Additional methods\n\n def update(self, values: AbstractSet[T]) -> None:\n # See comment on __init__ re. type checking\n self |= values # type: ignore\n\n def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, list(self))"},{"col":4,"comment":"null","endLoc":453,"header":"def __init__(self, values: AbstractSet[T] = None)","id":1008,"name":"__init__","nodeType":"Function","startLoc":447,"text":"def __init__(self, values: AbstractSet[T] = None):\n self._d = {}\n if values is not None:\n # Disable type checking - both mypy and PyCharm believe that\n # we're altering the type of self in place (see signature of\n # MutableSet.__ior__)\n self |= values # type: ignore"},{"col":4,"comment":"null","endLoc":458,"header":"def __contains__(self, value: object) -> bool","id":1009,"name":"__contains__","nodeType":"Function","startLoc":457,"text":"def __contains__(self, value: object) -> bool:\n return value in self._d"},{"col":4,"comment":"null","endLoc":461,"header":"def __iter__(self) -> Iterator[T]","id":1010,"name":"__iter__","nodeType":"Function","startLoc":460,"text":"def __iter__(self) -> Iterator[T]:\n return iter(self._d)"},{"col":4,"comment":"null","endLoc":464,"header":"def __len__(self) -> int","id":1011,"name":"__len__","nodeType":"Function","startLoc":463,"text":"def __len__(self) -> int:\n return len(self._d)"},{"col":4,"comment":"null","endLoc":467,"header":"def add(self, value: T) -> None","id":1012,"name":"add","nodeType":"Function","startLoc":466,"text":"def add(self, value: T) -> None:\n self._d[value] = None"},{"col":4,"comment":"null","endLoc":470,"header":"def discard(self, value: T) -> None","id":1013,"name":"discard","nodeType":"Function","startLoc":469,"text":"def discard(self, value: T) -> None:\n del self._d[value]"},{"col":4,"comment":"null","endLoc":476,"header":"def update(self, values: AbstractSet[T]) -> None","id":1014,"name":"update","nodeType":"Function","startLoc":474,"text":"def update(self, values: AbstractSet[T]) -> None:\n # See comment on __init__ re. type checking\n self |= values # type: ignore"},{"col":4,"comment":"null","endLoc":479,"header":"def __repr__(self) -> str","id":1015,"name":"__repr__","nodeType":"Function","startLoc":478,"text":"def __repr__(self) -> str:\n return \"%s(%r)\" % (type(self).__name__, list(self))"},{"col":0,"comment":"Make sure the dimension coordinate of obj is consistent with coords.\n\n obj: DataArray or Dataset\n coords: Dict-like of variables\n ","endLoc":362,"header":"def assert_coordinate_consistent(\n obj: Union[\"DataArray\", \"Dataset\"], coords: Mapping[Hashable, Variable]\n) -> None","id":1016,"name":"assert_coordinate_consistent","nodeType":"Function","startLoc":345,"text":"def assert_coordinate_consistent(\n obj: Union[\"DataArray\", \"Dataset\"], coords: Mapping[Hashable, Variable]\n) -> None:\n \"\"\"Make sure the dimension coordinate of obj is consistent with coords.\n\n obj: DataArray or Dataset\n coords: Dict-like of variables\n \"\"\"\n for k in obj.dims:\n # make sure there are no conflict in dimension coordinates\n if k in coords and k in obj.coords:\n if not coords[k].equals(obj[k].variable):\n raise IndexError(\n \"dimension coordinate {!r} conflicts between \"\n \"indexed and indexing objects:\\n{}\\nvs.\\n{}\".format(\n k, obj[k], coords[k]\n )\n )"},{"attributeType":"null","col":4,"comment":"null","endLoc":443,"id":1017,"name":"_d","nodeType":"Attribute","startLoc":443,"text":"_d"},{"col":0,"comment":"null","endLoc":532,"header":"def _is_numeric(arr)","id":1018,"name":"_is_numeric","nodeType":"Function","startLoc":530,"text":"def _is_numeric(arr):\n numpy_types = [np.floating, np.integer]\n return _valid_numpy_subdtype(arr, numpy_types)"},{"col":0,"comment":"\n Is any dtype from numpy_types superior to the dtype of x?\n ","endLoc":489,"header":"def _valid_numpy_subdtype(x, numpy_types)","id":1019,"name":"_valid_numpy_subdtype","nodeType":"Function","startLoc":480,"text":"def _valid_numpy_subdtype(x, numpy_types):\n \"\"\"\n Is any dtype from numpy_types superior to the dtype of x?\n \"\"\"\n # If any of the types given in numpy_types is understood as numpy.generic,\n # all possible x will be considered valid. This is probably unwanted.\n for t in numpy_types:\n assert not np.issubdtype(np.generic, t)\n\n return any(np.issubdtype(x.dtype, t) for t in numpy_types)"},{"col":0,"comment":"\n Parameters\n ==========\n func : plotting function\n data : ndarray,\n Data values\n\n Returns\n =======\n cmap_params\n\n cbar_kwargs\n ","endLoc":744,"header":"def _process_cmap_cbar_kwargs(\n func,\n data,\n cmap=None,\n colors=None,\n cbar_kwargs: Union[Iterable[Tuple[str, Any]], Mapping[str, Any]] = None,\n levels=None,\n **kwargs\n)","id":1020,"name":"_process_cmap_cbar_kwargs","nodeType":"Function","startLoc":689,"text":"def _process_cmap_cbar_kwargs(\n func,\n data,\n cmap=None,\n colors=None,\n cbar_kwargs: Union[Iterable[Tuple[str, Any]], Mapping[str, Any]] = None,\n levels=None,\n **kwargs\n):\n \"\"\"\n Parameters\n ==========\n func : plotting function\n data : ndarray,\n Data values\n\n Returns\n =======\n cmap_params\n\n cbar_kwargs\n \"\"\"\n cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs)\n\n if \"contour\" in func.__name__ and levels is None:\n levels = 7 # this is the matplotlib default\n\n # colors is mutually exclusive with cmap\n if cmap and colors:\n raise ValueError(\"Can't specify both cmap and colors.\")\n\n # colors is only valid when levels is supplied or the plot is of type\n # contour or contourf\n if colors and ((\"contour\" not in func.__name__) and (levels is None)):\n raise ValueError(\"Can only specify colors with contour or levels\")\n\n # we should not be getting a list of colors in cmap anymore\n # is there a better way to do this test?\n if isinstance(cmap, (list, tuple)):\n raise ValueError(\n \"Specifying a list of colors in cmap is deprecated. \"\n \"Use colors keyword instead.\"\n )\n\n cmap_kwargs = {\n \"plot_data\": data,\n \"levels\": levels,\n \"cmap\": colors if colors else cmap,\n \"filled\": func.__name__ != \"contour\",\n }\n\n cmap_args = getfullargspec(_determine_cmap_params).args\n cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs)\n cmap_params = _determine_cmap_params(**cmap_kwargs)\n\n return cmap_params, cbar_kwargs"},{"attributeType":"null","col":4,"comment":"null","endLoc":445,"id":1021,"name":"__slots__","nodeType":"Attribute","startLoc":445,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":448,"id":1022,"name":"_d","nodeType":"Attribute","startLoc":448,"text":"self._d"},{"className":"ReprObject","col":0,"comment":"Object that prints as the given value, for use with sentinel values.\n ","endLoc":548,"id":1023,"nodeType":"Class","startLoc":530,"text":"class ReprObject:\n \"\"\"Object that prints as the given value, for use with sentinel values.\n \"\"\"\n\n __slots__ = (\"_value\",)\n\n def __init__(self, value: str):\n self._value = value\n\n def __repr__(self) -> str:\n return self._value\n\n def __eq__(self, other) -> bool:\n if isinstance(other, ReprObject):\n return self._value == other._value\n return False\n\n def __hash__(self) -> int:\n return hash((ReprObject, self._value))"},{"col":4,"comment":"null","endLoc":537,"header":"def __init__(self, value: str)","id":1024,"name":"__init__","nodeType":"Function","startLoc":536,"text":"def __init__(self, value: str):\n self._value = value"},{"col":4,"comment":"null","endLoc":540,"header":"def __repr__(self) -> str","id":1025,"name":"__repr__","nodeType":"Function","startLoc":539,"text":"def __repr__(self) -> str:\n return self._value"},{"col":4,"comment":"null","endLoc":545,"header":"def __eq__(self, other) -> bool","id":1026,"name":"__eq__","nodeType":"Function","startLoc":542,"text":"def __eq__(self, other) -> bool:\n if isinstance(other, ReprObject):\n return self._value == other._value\n return False"},{"col":4,"comment":"null","endLoc":548,"header":"def __hash__(self) -> int","id":1027,"name":"__hash__","nodeType":"Function","startLoc":547,"text":"def __hash__(self) -> int:\n return hash((ReprObject, self._value))"},{"attributeType":"null","col":4,"comment":"null","endLoc":534,"id":1028,"name":"__slots__","nodeType":"Attribute","startLoc":534,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":537,"id":1029,"name":"_value","nodeType":"Attribute","startLoc":537,"text":"self._value"},{"className":"HiddenKeyDict","col":0,"comment":"Acts like a normal dictionary, but hides certain keys.\n ","endLoc":660,"id":1030,"nodeType":"Class","startLoc":624,"text":"class HiddenKeyDict(MutableMapping[K, V]):\n \"\"\"Acts like a normal dictionary, but hides certain keys.\n \"\"\"\n\n __slots__ = (\"_data\", \"_hidden_keys\")\n\n # ``__init__`` method required to create instance from class.\n\n def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]):\n self._data = data\n self._hidden_keys = frozenset(hidden_keys)\n\n def _raise_if_hidden(self, key: K) -> None:\n if key in self._hidden_keys:\n raise KeyError(\"Key `%r` is hidden.\" % key)\n\n # The next five methods are requirements of the ABC.\n def __setitem__(self, key: K, value: V) -> None:\n self._raise_if_hidden(key)\n self._data[key] = value\n\n def __getitem__(self, key: K) -> V:\n self._raise_if_hidden(key)\n return self._data[key]\n\n def __delitem__(self, key: K) -> None:\n self._raise_if_hidden(key)\n del self._data[key]\n\n def __iter__(self) -> Iterator[K]:\n for k in self._data:\n if k not in self._hidden_keys:\n yield k\n\n def __len__(self) -> int:\n num_hidden = len(self._hidden_keys & self._data.keys())\n return len(self._data) - num_hidden"},{"col":0,"comment":"\n Use some heuristics to set good defaults for colorbar and range.\n\n Parameters\n ==========\n plot_data: Numpy array\n Doesn't handle xarray objects\n\n Returns\n =======\n cmap_params : dict\n Use depends on the type of the plotting function\n ","endLoc":283,"header":"def _determine_cmap_params(\n plot_data,\n vmin=None,\n vmax=None,\n cmap=None,\n center=None,\n robust=False,\n extend=None,\n levels=None,\n filled=True,\n norm=None,\n)","id":1031,"name":"_determine_cmap_params","nodeType":"Function","startLoc":141,"text":"def _determine_cmap_params(\n plot_data,\n vmin=None,\n vmax=None,\n cmap=None,\n center=None,\n robust=False,\n extend=None,\n levels=None,\n filled=True,\n norm=None,\n):\n \"\"\"\n Use some heuristics to set good defaults for colorbar and range.\n\n Parameters\n ==========\n plot_data: Numpy array\n Doesn't handle xarray objects\n\n Returns\n =======\n cmap_params : dict\n Use depends on the type of the plotting function\n \"\"\"\n import matplotlib as mpl\n\n calc_data = np.ravel(plot_data[np.isfinite(plot_data)])\n\n # Handle all-NaN input data gracefully\n if calc_data.size == 0:\n # Arbitrary default for when all values are NaN\n calc_data = np.array(0.0)\n\n # Setting center=False prevents a divergent cmap\n possibly_divergent = center is not False\n\n # Set center to 0 so math below makes sense but remember its state\n center_is_none = False\n if center is None:\n center = 0\n center_is_none = True\n\n # Setting both vmin and vmax prevents a divergent cmap\n if (vmin is not None) and (vmax is not None):\n possibly_divergent = False\n\n # Setting vmin or vmax implies linspaced levels\n user_minmax = (vmin is not None) or (vmax is not None)\n\n # vlim might be computed below\n vlim = None\n\n # save state; needed later\n vmin_was_none = vmin is None\n vmax_was_none = vmax is None\n\n if vmin is None:\n if robust:\n vmin = np.percentile(calc_data, ROBUST_PERCENTILE)\n else:\n vmin = calc_data.min()\n elif possibly_divergent:\n vlim = abs(vmin - center)\n\n if vmax is None:\n if robust:\n vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)\n else:\n vmax = calc_data.max()\n elif possibly_divergent:\n vlim = abs(vmax - center)\n\n if possibly_divergent:\n # kwargs not specific about divergent or not: infer defaults from data\n divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none\n else:\n divergent = False\n\n # A divergent map should be symmetric around the center value\n if divergent:\n if vlim is None:\n vlim = max(abs(vmin - center), abs(vmax - center))\n vmin, vmax = -vlim, vlim\n\n # Now add in the centering value and set the limits\n vmin += center\n vmax += center\n\n # now check norm and harmonize with vmin, vmax\n if norm is not None:\n if norm.vmin is None:\n norm.vmin = vmin\n else:\n if not vmin_was_none and vmin != norm.vmin:\n raise ValueError(\n \"Cannot supply vmin and a norm\" + \" with a different vmin.\"\n )\n vmin = norm.vmin\n\n if norm.vmax is None:\n norm.vmax = vmax\n else:\n if not vmax_was_none and vmax != norm.vmax:\n raise ValueError(\n \"Cannot supply vmax and a norm\" + \" with a different vmax.\"\n )\n vmax = norm.vmax\n\n # if BoundaryNorm, then set levels\n if isinstance(norm, mpl.colors.BoundaryNorm):\n levels = norm.boundaries\n\n # Choose default colormaps if not provided\n if cmap is None:\n if divergent:\n cmap = OPTIONS[\"cmap_divergent\"]\n else:\n cmap = OPTIONS[\"cmap_sequential\"]\n\n # Handle discrete levels\n if levels is not None and norm is None:\n if is_scalar(levels):\n if user_minmax:\n levels = np.linspace(vmin, vmax, levels)\n elif levels == 1:\n levels = np.asarray([(vmin + vmax) / 2])\n else:\n # N in MaxNLocator refers to bins, not ticks\n ticker = mpl.ticker.MaxNLocator(levels - 1)\n levels = ticker.tick_values(vmin, vmax)\n vmin, vmax = levels[0], levels[-1]\n\n if extend is None:\n extend = _determine_extend(calc_data, vmin, vmax)\n\n if levels is not None or isinstance(norm, mpl.colors.BoundaryNorm):\n cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled)\n norm = newnorm if norm is None else norm\n\n return dict(\n vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm\n )"},{"col":4,"comment":"null","endLoc":634,"header":"def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K])","id":1032,"name":"__init__","nodeType":"Function","startLoc":632,"text":"def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]):\n self._data = data\n self._hidden_keys = frozenset(hidden_keys)"},{"col":4,"comment":"null","endLoc":638,"header":"def _raise_if_hidden(self, key: K) -> None","id":1033,"name":"_raise_if_hidden","nodeType":"Function","startLoc":636,"text":"def _raise_if_hidden(self, key: K) -> None:\n if key in self._hidden_keys:\n raise KeyError(\"Key `%r` is hidden.\" % key)"},{"col":4,"comment":"null","endLoc":643,"header":"def __setitem__(self, key: K, value: V) -> None","id":1034,"name":"__setitem__","nodeType":"Function","startLoc":641,"text":"def __setitem__(self, key: K, value: V) -> None:\n self._raise_if_hidden(key)\n self._data[key] = value"},{"col":4,"comment":"null","endLoc":647,"header":"def __getitem__(self, key: K) -> V","id":1035,"name":"__getitem__","nodeType":"Function","startLoc":645,"text":"def __getitem__(self, key: K) -> V:\n self._raise_if_hidden(key)\n return self._data[key]"},{"col":4,"comment":"null","endLoc":661,"header":"def __delitem__(self, key: Any) -> None","id":1036,"name":"__delitem__","nodeType":"Function","startLoc":660,"text":"def __delitem__(self, key: Any) -> None:\n del self.coords[key]"},{"col":4,"comment":"List of places to look-up items for attribute-style access\n ","endLoc":667,"header":"@property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]","id":1037,"name":"_attr_sources","nodeType":"Function","startLoc":663,"text":"@property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return self._item_sources + [self.attrs]"},{"col":4,"comment":"List of places to look-up items for key-completion\n ","endLoc":677,"header":"@property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]","id":1038,"name":"_item_sources","nodeType":"Function","startLoc":669,"text":"@property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-completion\n \"\"\"\n return [\n self.coords,\n {d: self.coords[d] for d in self.dims},\n LevelCoordinatesSource(self),\n ]"},{"col":4,"comment":"null","endLoc":651,"header":"def __delitem__(self, key: K) -> None","id":1039,"name":"__delitem__","nodeType":"Function","startLoc":649,"text":"def __delitem__(self, key: K) -> None:\n self._raise_if_hidden(key)\n del self._data[key]"},{"col":4,"comment":"null","endLoc":656,"header":"def __iter__(self) -> Iterator[K]","id":1040,"name":"__iter__","nodeType":"Function","startLoc":653,"text":"def __iter__(self) -> Iterator[K]:\n for k in self._data:\n if k not in self._hidden_keys:\n yield k"},{"col":4,"comment":"null","endLoc":660,"header":"def __len__(self) -> int","id":1041,"name":"__len__","nodeType":"Function","startLoc":658,"text":"def __len__(self) -> int:\n num_hidden = len(self._hidden_keys & self._data.keys())\n return len(self._data) - num_hidden"},{"col":4,"comment":"null","endLoc":680,"header":"def __contains__(self, key: Any) -> bool","id":1042,"name":"__contains__","nodeType":"Function","startLoc":679,"text":"def __contains__(self, key: Any) -> bool:\n return key in self.data"},{"col":4,"comment":"Apply scipy.interpolate.interp1d along resampling dimension.","endLoc":155,"header":"def _interpolate(self, kind=\"linear\")","id":1043,"name":"_interpolate","nodeType":"Function","startLoc":142,"text":"def _interpolate(self, kind=\"linear\"):\n \"\"\"Apply scipy.interpolate.interp1d along resampling dimension.\"\"\"\n # drop any existing non-dimension coordinates along the resampling\n # dimension\n dummy = self._obj.copy()\n for k, v in self._obj.coords.items():\n if k != self._dim and self._dim in v.dims:\n dummy = dummy.drop(k)\n return dummy.interp(\n assume_sorted=True,\n method=kind,\n kwargs={\"bounds_error\": False},\n **{self._dim: self._full_index}\n )"},{"col":4,"comment":"Attribute for location based indexing like pandas.\n ","endLoc":686,"header":"@property\n def loc(self) -> _LocIndexer","id":1044,"name":"loc","nodeType":"Function","startLoc":682,"text":"@property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing like pandas.\n \"\"\"\n return _LocIndexer(self)"},{"attributeType":"null","col":4,"comment":"null","endLoc":628,"id":1045,"name":"__slots__","nodeType":"Attribute","startLoc":628,"text":"__slots__"},{"col":4,"comment":"Return values of original object at the new up-sampling frequency;\n essentially a re-index with new times set to NaN.\n ","endLoc":73,"header":"def asfreq(self)","id":1046,"name":"asfreq","nodeType":"Function","startLoc":69,"text":"def asfreq(self):\n \"\"\"Return values of original object at the new up-sampling frequency;\n essentially a re-index with new times set to NaN.\n \"\"\"\n return self._upsample(\"asfreq\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":633,"id":1047,"name":"_data","nodeType":"Attribute","startLoc":633,"text":"self._data"},{"attributeType":"null","col":8,"comment":"null","endLoc":634,"id":1048,"name":"_hidden_keys","nodeType":"Attribute","startLoc":634,"text":"self._hidden_keys"},{"col":0,"comment":"null","endLoc":45,"header":"def alias_message(old_name: str, new_name: str) -> str","id":1049,"name":"alias_message","nodeType":"Function","startLoc":44,"text":"def alias_message(old_name: str, new_name: str) -> str:\n return \"%s has been deprecated. Use %s instead.\" % (old_name, new_name)"},{"col":0,"comment":"null","endLoc":51,"header":"def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None","id":1050,"name":"alias_warning","nodeType":"Function","startLoc":48,"text":"def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None:\n warnings.warn(\n alias_message(old_name, new_name), FutureWarning, stacklevel=stacklevel\n )"},{"col":0,"comment":"null","endLoc":63,"header":"def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]","id":1051,"name":"alias","nodeType":"Function","startLoc":54,"text":"def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]:\n assert isinstance(old_name, str)\n\n @functools.wraps(obj)\n def wrapper(*args, **kwargs):\n alias_warning(old_name, obj.__name__)\n return obj(*args, **kwargs)\n\n wrapper.__doc__ = alias_message(old_name, obj.__name__)\n return wrapper"},{"col":4,"comment":"null","endLoc":189,"header":"def __init__(self, data_array: \"DataArray\")","id":1052,"name":"__init__","nodeType":"Function","startLoc":188,"text":"def __init__(self, data_array: \"DataArray\"):\n self.data_array = data_array"},{"col":4,"comment":"Forward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n ","endLoc":88,"header":"def pad(self, tolerance=None)","id":1053,"name":"pad","nodeType":"Function","startLoc":75,"text":"def pad(self, tolerance=None):\n \"\"\"Forward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"pad\", tolerance=tolerance)"},{"col":4,"comment":"Dictionary storing arbitrary metadata with this array.","endLoc":691,"header":"@property\n def attrs(self) -> Dict[Hashable, Any]","id":1054,"name":"attrs","nodeType":"Function","startLoc":688,"text":"@property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary storing arbitrary metadata with this array.\"\"\"\n return self.variable.attrs"},{"col":4,"comment":"null","endLoc":696,"header":"@attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None","id":1055,"name":"attrs","nodeType":"Function","startLoc":693,"text":"@attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n # Disable type checking to work around mypy bug - see mypy#4167\n self.variable.attrs = value # type: ignore"},{"col":4,"comment":"Dictionary of format-specific settings for how this array should be\n serialized.","endLoc":702,"header":"@property\n def encoding(self) -> Dict[Hashable, Any]","id":1056,"name":"encoding","nodeType":"Function","startLoc":698,"text":"@property\n def encoding(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of format-specific settings for how this array should be\n serialized.\"\"\"\n return self.variable.encoding"},{"col":4,"comment":"null","endLoc":706,"header":"@encoding.setter\n def encoding(self, value: Mapping[Hashable, Any]) -> None","id":1057,"name":"encoding","nodeType":"Function","startLoc":704,"text":"@encoding.setter\n def encoding(self, value: Mapping[Hashable, Any]) -> None:\n self.variable.encoding = value"},{"col":4,"comment":"Mapping of pandas.Index objects used for label based indexing\n ","endLoc":714,"header":"@property\n def indexes(self) -> Indexes","id":1058,"name":"indexes","nodeType":"Function","startLoc":708,"text":"@property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\n \"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._coords, self.dims)\n return Indexes(self._indexes)"},{"col":0,"comment":"null","endLoc":74,"header":"def _determine_extend(calc_data, vmin, vmax)","id":1059,"name":"_determine_extend","nodeType":"Function","startLoc":63,"text":"def _determine_extend(calc_data, vmin, vmax):\n extend_min = calc_data.min() < vmin\n extend_max = calc_data.max() > vmax\n if extend_min and extend_max:\n extend = \"both\"\n elif extend_min:\n extend = \"min\"\n elif extend_max:\n extend = \"max\"\n else:\n extend = \"neither\"\n return extend"},{"col":4,"comment":"Backward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n ","endLoc":105,"header":"def backfill(self, tolerance=None)","id":1060,"name":"backfill","nodeType":"Function","startLoc":92,"text":"def backfill(self, tolerance=None):\n \"\"\"Backward fill new values at up-sampled frequency.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"backfill\", tolerance=tolerance)"},{"col":0,"comment":"\n Build a discrete colormap and normalization of the data.\n ","endLoc":101,"header":"def _build_discrete_cmap(cmap, levels, extend, filled)","id":1061,"name":"_build_discrete_cmap","nodeType":"Function","startLoc":77,"text":"def _build_discrete_cmap(cmap, levels, extend, filled):\n \"\"\"\n Build a discrete colormap and normalization of the data.\n \"\"\"\n import matplotlib as mpl\n\n if not filled:\n # non-filled contour plots\n extend = \"max\"\n\n if extend == \"both\":\n ext_n = 2\n elif extend in [\"min\", \"max\"]:\n ext_n = 1\n else:\n ext_n = 0\n\n n_colors = len(levels) + ext_n - 1\n pal = _color_palette(cmap, n_colors)\n\n new_cmap, cnorm = mpl.colors.from_levels_and_colors(levels, pal, extend=extend)\n # copy the old cmap name, for easier testing\n new_cmap.name = getattr(cmap, \"name\", cmap)\n\n return new_cmap, cnorm"},{"col":0,"comment":"null","endLoc":81,"header":"def maybe_cast_to_coords_dtype(label, coords_dtype)","id":1062,"name":"maybe_cast_to_coords_dtype","nodeType":"Function","startLoc":78,"text":"def maybe_cast_to_coords_dtype(label, coords_dtype):\n if coords_dtype.kind == \"f\" and not isinstance(label, slice):\n label = np.asarray(label, dtype=coords_dtype)\n return label"},{"col":4,"comment":"Take new values from nearest original coordinate to up-sampled\n frequency coordinates.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n ","endLoc":123,"header":"def nearest(self, tolerance=None)","id":1063,"name":"nearest","nodeType":"Function","startLoc":109,"text":"def nearest(self, tolerance=None):\n \"\"\"Take new values from nearest original coordinate to up-sampled\n frequency coordinates.\n\n Parameters\n ----------\n tolerance : optional\n Maximum distance between original and new labels to limit\n the up-sampling method.\n Up-sampled data with indices that satisfy the equation\n ``abs(index[indexer] - target) <= tolerance`` are filled by\n new values. Data with indices that are outside the given\n tolerance are filled with ``NaN`` s\n \"\"\"\n return self._upsample(\"nearest\", tolerance=tolerance)"},{"col":0,"comment":"Default indexes for a Dataset/DataArray.\n\n Parameters\n ----------\n coords : Mapping[Any, xarray.Variable]\n Coordinate variables from which to draw default indexes.\n dims : iterable\n Iterable of dimension names.\n\n Returns\n -------\n Mapping from indexing keys (levels/dimension names) to indexes used for\n indexing along that dimension.\n ","endLoc":59,"header":"def default_indexes(\n coords: Mapping[Any, Variable], dims: Iterable\n) -> Dict[Hashable, pd.Index]","id":1064,"name":"default_indexes","nodeType":"Function","startLoc":42,"text":"def default_indexes(\n coords: Mapping[Any, Variable], dims: Iterable\n) -> Dict[Hashable, pd.Index]:\n \"\"\"Default indexes for a Dataset/DataArray.\n\n Parameters\n ----------\n coords : Mapping[Any, xarray.Variable]\n Coordinate variables from which to draw default indexes.\n dims : iterable\n Iterable of dimension names.\n\n Returns\n -------\n Mapping from indexing keys (levels/dimension names) to indexes used for\n indexing along that dimension.\n \"\"\"\n return {key: coords[key].to_index() for key in dims if key in coords}"},{"col":0,"comment":"null","endLoc":133,"header":"def _color_palette(cmap, n_colors)","id":1065,"name":"_color_palette","nodeType":"Function","startLoc":104,"text":"def _color_palette(cmap, n_colors):\n import matplotlib.pyplot as plt\n from matplotlib.colors import ListedColormap\n\n colors_i = np.linspace(0, 1.0, n_colors)\n if isinstance(cmap, (list, tuple)):\n # we have a list of colors\n cmap = ListedColormap(cmap, N=n_colors)\n pal = cmap(colors_i)\n elif isinstance(cmap, str):\n # we have some sort of named palette\n try:\n # is this a matplotlib cmap?\n cmap = plt.get_cmap(cmap)\n pal = cmap(colors_i)\n except ValueError:\n # ValueError happens when mpl doesn't like a colormap, try seaborn\n try:\n from seaborn.apionly import color_palette\n\n pal = color_palette(cmap, n_colors=n_colors)\n except (ValueError, ImportError):\n # or maybe we just got a single color as a string\n cmap = ListedColormap([cmap], N=n_colors)\n pal = cmap(colors_i)\n else:\n # cmap better be a LinearSegmentedColormap (e.g. viridis)\n pal = cmap(colors_i)\n\n return pal"},{"col":4,"comment":"Interpolate up-sampled data using the original data\n as knots.\n\n Parameters\n ----------\n kind : str {'linear', 'nearest', 'zero', 'slinear',\n 'quadratic', 'cubic'}\n Interpolation scheme to use\n\n See Also\n --------\n scipy.interpolate.interp1d\n\n ","endLoc":140,"header":"def interpolate(self, kind=\"linear\")","id":1066,"name":"interpolate","nodeType":"Function","startLoc":125,"text":"def interpolate(self, kind=\"linear\"):\n \"\"\"Interpolate up-sampled data using the original data\n as knots.\n\n Parameters\n ----------\n kind : str {'linear', 'nearest', 'zero', 'slinear',\n 'quadratic', 'cubic'}\n Interpolation scheme to use\n\n See Also\n --------\n scipy.interpolate.interp1d\n\n \"\"\"\n return self._interpolate(kind=kind)"},{"attributeType":"function","col":4,"comment":"null","endLoc":90,"id":1067,"name":"ffill","nodeType":"Attribute","startLoc":90,"text":"ffill"},{"col":0,"comment":"Creating a MultiIndex from a product without refactorizing levels.\n\n Keeping levels the same gives back the original labels when we unstack.\n\n Parameters\n ----------\n levels : sequence of pd.Index\n Values for each MultiIndex level.\n names : optional sequence of objects\n Names for each level.\n\n Returns\n -------\n pandas.MultiIndex\n ","endLoc":129,"header":"def multiindex_from_product_levels(\n levels: Sequence[pd.Index], names: Sequence[str] = None\n) -> pd.MultiIndex","id":1068,"name":"multiindex_from_product_levels","nodeType":"Function","startLoc":105,"text":"def multiindex_from_product_levels(\n levels: Sequence[pd.Index], names: Sequence[str] = None\n) -> pd.MultiIndex:\n \"\"\"Creating a MultiIndex from a product without refactorizing levels.\n\n Keeping levels the same gives back the original labels when we unstack.\n\n Parameters\n ----------\n levels : sequence of pd.Index\n Values for each MultiIndex level.\n names : optional sequence of objects\n Names for each level.\n\n Returns\n -------\n pandas.MultiIndex\n \"\"\"\n if any(not isinstance(lev, pd.Index) for lev in levels):\n raise TypeError(\"levels must be a list of pd.Index objects\")\n\n split_labels, levels = zip(*[lev.factorize() for lev in levels])\n labels_mesh = np.meshgrid(*split_labels, indexing=\"ij\")\n labels = [x.ravel() for x in labels_mesh]\n return pd.MultiIndex(levels, labels, sortorder=0, names=names)"},{"col":0,"comment":"Create a numpy array from an array of strings.\n\n For use in generating dates from strings for use with interp. Assumes the\n array is either 0-dimensional or 1-dimensional.\n\n Parameters\n ----------\n strings : array of strings\n Strings to convert to dates\n date_type : cftime.datetime type\n Calendar type to use for dates\n\n Returns\n -------\n np.array\n ","endLoc":547,"header":"def _parse_array_of_cftime_strings(strings, date_type)","id":1069,"name":"_parse_array_of_cftime_strings","nodeType":"Function","startLoc":528,"text":"def _parse_array_of_cftime_strings(strings, date_type):\n \"\"\"Create a numpy array from an array of strings.\n\n For use in generating dates from strings for use with interp. Assumes the\n array is either 0-dimensional or 1-dimensional.\n\n Parameters\n ----------\n strings : array of strings\n Strings to convert to dates\n date_type : cftime.datetime type\n Calendar type to use for dates\n\n Returns\n -------\n np.array\n \"\"\"\n return np.array(\n [_parse_iso8601_without_reso(date_type, s) for s in strings.ravel()]\n ).reshape(strings.shape)"},{"attributeType":"function","col":4,"comment":"null","endLoc":107,"id":1070,"name":"bfill","nodeType":"Attribute","startLoc":107,"text":"bfill"},{"col":0,"comment":"null","endLoc":525,"header":"def _parse_iso8601_without_reso(date_type, datetime_str)","id":1071,"name":"_parse_iso8601_without_reso","nodeType":"Function","startLoc":523,"text":"def _parse_iso8601_without_reso(date_type, datetime_str):\n date, _ = _parse_iso8601_with_reso(date_type, datetime_str)\n return date"},{"attributeType":"null","col":16,"comment":"null","endLoc":50,"id":1072,"name":"_obj","nodeType":"Attribute","startLoc":50,"text":"self._obj"},{"className":"DataArrayResample","col":0,"comment":"DataArrayGroupBy object specialized to time resampling operations over a\n specified dimension\n ","endLoc":226,"id":1073,"nodeType":"Class","startLoc":158,"text":"class DataArrayResample(DataArrayGroupBy, Resample):\n \"\"\"DataArrayGroupBy object specialized to time resampling operations over a\n specified dimension\n \"\"\"\n\n def __init__(self, *args, dim=None, resample_dim=None, **kwargs):\n\n if dim == resample_dim:\n raise ValueError(\n \"Proxy resampling dimension ('{}') \"\n \"cannot have the same name as actual dimension \"\n \"('{}')! \".format(resample_dim, dim)\n )\n self._dim = dim\n self._resample_dim = resample_dim\n\n super().__init__(*args, **kwargs)\n\n def apply(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n \"\"\"\n combined = super().apply(func, shortcut=shortcut, args=args, **kwargs)\n\n # If the aggregation function didn't drop the original resampling\n # dimension, then we need to do so before we can rename the proxy\n # dimension we used.\n if self._dim in combined.coords:\n combined = combined.drop(self._dim)\n\n if self._resample_dim in combined.dims:\n combined = combined.rename({self._resample_dim: self._dim})\n\n return combined"},{"col":0,"comment":"null","endLoc":116,"header":"def _parse_iso8601_with_reso(date_type, timestr)","id":1074,"name":"_parse_iso8601_with_reso","nodeType":"Function","startLoc":98,"text":"def _parse_iso8601_with_reso(date_type, timestr):\n default = date_type(1, 1, 1)\n result = parse_iso8601(timestr)\n replace = {}\n\n for attr in [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\"]:\n value = result.get(attr, None)\n if value is not None:\n # Note ISO8601 conventions allow for fractional seconds.\n # TODO: Consider adding support for sub-second resolution?\n replace[attr] = int(value)\n resolution = attr\n\n # dayofwk=-1 is required to update the dayofwk and dayofyr attributes of\n # the returned date object in versions of cftime between 1.0.2 and\n # 1.0.3.4. It can be removed for versions of cftime greater than\n # 1.0.3.4.\n replace[\"dayofwk\"] = -1\n return default.replace(**replace), resolution"},{"col":4,"comment":"null","endLoc":174,"header":"def __init__(self, *args, dim=None, resample_dim=None, **kwargs)","id":1075,"name":"__init__","nodeType":"Function","startLoc":163,"text":"def __init__(self, *args, dim=None, resample_dim=None, **kwargs):\n\n if dim == resample_dim:\n raise ValueError(\n \"Proxy resampling dimension ('{}') \"\n \"cannot have the same name as actual dimension \"\n \"('{}')! \".format(resample_dim, dim)\n )\n self._dim = dim\n self._resample_dim = resample_dim\n\n super().__init__(*args, **kwargs)"},{"col":0,"comment":"null","endLoc":95,"header":"def parse_iso8601(datetime_string)","id":1076,"name":"parse_iso8601","nodeType":"Function","startLoc":90,"text":"def parse_iso8601(datetime_string):\n for pattern in _PATTERNS:\n match = re.match(pattern, datetime_string)\n if match:\n return match.groupdict()\n raise ValueError(\"no ISO-8601 match for string: %s\" % datetime_string)"},{"col":4,"comment":"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n ","endLoc":226,"header":"def apply(self, func, shortcut=False, args=(), **kwargs)","id":1077,"name":"apply","nodeType":"Function","startLoc":176,"text":"def apply(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"Apply a function over each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n \"\"\"\n combined = super().apply(func, shortcut=shortcut, args=args, **kwargs)\n\n # If the aggregation function didn't drop the original resampling\n # dimension, then we need to do so before we can rename the proxy\n # dimension we used.\n if self._dim in combined.coords:\n combined = combined.drop(self._dim)\n\n if self._resample_dim in combined.dims:\n combined = combined.rename({self._resample_dim: self._dim})\n\n return combined"},{"col":0,"comment":"Compare two objects for equivalence (identity or equality), using\n array_equiv if either object is an ndarray. If both objects are lists,\n equivalent is sequentially called on all the elements.\n ","endLoc":162,"header":"def equivalent(first: T, second: T) -> bool","id":1078,"name":"equivalent","nodeType":"Function","startLoc":145,"text":"def equivalent(first: T, second: T) -> bool:\n \"\"\"Compare two objects for equivalence (identity or equality), using\n array_equiv if either object is an ndarray. If both objects are lists,\n equivalent is sequentially called on all the elements.\n \"\"\"\n # TODO: refactor to avoid circular import\n from . import duck_array_ops\n\n if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):\n return duck_array_ops.array_equiv(first, second)\n elif isinstance(first, list) or isinstance(second, list):\n return list_equiv(first, second)\n else:\n return (\n (first is second)\n or (first == second)\n or (pd.isnull(first) and pd.isnull(second))\n )"},{"col":0,"comment":"null","endLoc":395,"header":"def get_axis(figsize, size, aspect, ax)","id":1079,"name":"get_axis","nodeType":"Function","startLoc":371,"text":"def get_axis(figsize, size, aspect, ax):\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\n if figsize is not None:\n if ax is not None:\n raise ValueError(\"cannot provide both `figsize` and \" \"`ax` arguments\")\n if size is not None:\n raise ValueError(\"cannot provide both `figsize` and \" \"`size` arguments\")\n _, ax = plt.subplots(figsize=figsize)\n elif size is not None:\n if ax is not None:\n raise ValueError(\"cannot provide both `size` and `ax` arguments\")\n if aspect is None:\n width, height = mpl.rcParams[\"figure.figsize\"]\n aspect = width / height\n figsize = (size * aspect, size)\n _, ax = plt.subplots(figsize=figsize)\n elif aspect is not None:\n raise ValueError(\"cannot provide `aspect` argument without `size`\")\n\n if ax is None:\n ax = plt.gca()\n\n return ax"},{"attributeType":"{__eq__}","col":8,"comment":"null","endLoc":171,"id":1080,"name":"_dim","nodeType":"Attribute","startLoc":171,"text":"self._dim"},{"col":0,"comment":" Makes informative labels if variable metadata (attrs) follows\n CF conventions. ","endLoc":416,"header":"def label_from_attrs(da, extra=\"\")","id":1081,"name":"label_from_attrs","nodeType":"Function","startLoc":398,"text":"def label_from_attrs(da, extra=\"\"):\n \"\"\" Makes informative labels if variable metadata (attrs) follows\n CF conventions. \"\"\"\n\n if da.attrs.get(\"long_name\"):\n name = da.attrs[\"long_name\"]\n elif da.attrs.get(\"standard_name\"):\n name = da.attrs[\"standard_name\"]\n elif da.name is not None:\n name = da.name\n else:\n name = \"\"\n\n if da.attrs.get(\"units\"):\n units = \" [{}]\".format(da.attrs[\"units\"])\n else:\n units = \"\"\n\n return \"\\n\".join(textwrap.wrap(name + extra + units, 30))"},{"col":0,"comment":"null","endLoc":172,"header":"def list_equiv(first, second)","id":1082,"name":"list_equiv","nodeType":"Function","startLoc":165,"text":"def list_equiv(first, second):\n equiv = True\n if len(first) != len(second):\n return False\n else:\n for f, s in zip(first, second):\n equiv = equiv and equivalent(f, s)\n return equiv"},{"col":0,"comment":"Index a Variable and pandas.Index together.","endLoc":90,"header":"def isel_variable_and_index(\n name: Hashable,\n variable: Variable,\n index: pd.Index,\n indexers: Mapping[Hashable, Union[int, slice, np.ndarray, Variable]],\n) -> Tuple[Variable, Optional[pd.Index]]","id":1083,"name":"isel_variable_and_index","nodeType":"Function","startLoc":62,"text":"def isel_variable_and_index(\n name: Hashable,\n variable: Variable,\n index: pd.Index,\n indexers: Mapping[Hashable, Union[int, slice, np.ndarray, Variable]],\n) -> Tuple[Variable, Optional[pd.Index]]:\n \"\"\"Index a Variable and pandas.Index together.\"\"\"\n if not indexers:\n # nothing to index\n return variable.copy(deep=False), index\n\n if len(variable.dims) > 1:\n raise NotImplementedError(\n \"indexing multi-dimensional variable with indexes is not \" \"supported yet\"\n )\n\n new_variable = variable.isel(indexers)\n\n if new_variable.dims != (name,):\n # can't preserve a index if result has new dimensions\n return new_variable, None\n\n # we need to compute the new index\n (dim,) = variable.dims\n indexer = indexers[dim]\n if isinstance(indexer, Variable):\n indexer = indexer.data\n new_index = index[indexer]\n return new_variable, new_index"},{"className":"_Dataset_PlotMethods","col":0,"comment":"\n Enables use of xarray.plot functions as attributes on a Dataset.\n For example, Dataset.plot.scatter\n ","endLoc":164,"id":1084,"nodeType":"Class","startLoc":151,"text":"class _Dataset_PlotMethods(object):\n \"\"\"\n Enables use of xarray.plot functions as attributes on a Dataset.\n For example, Dataset.plot.scatter\n \"\"\"\n\n def __init__(self, dataset):\n self._ds = dataset\n\n def __call__(self, *args, **kwargs):\n raise ValueError(\n \"Dataset.plot cannot be called directly. Use \"\n \"an explicit plot method, e.g. ds.plot.scatter(...)\"\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":172,"id":1085,"name":"_resample_dim","nodeType":"Attribute","startLoc":172,"text":"self._resample_dim"},{"col":4,"comment":"null","endLoc":158,"header":"def __init__(self, dataset)","id":1086,"name":"__init__","nodeType":"Function","startLoc":157,"text":"def __init__(self, dataset):\n self._ds = dataset"},{"col":4,"comment":"null","endLoc":164,"header":"def __call__(self, *args, **kwargs)","id":1087,"name":"__call__","nodeType":"Function","startLoc":160,"text":"def __call__(self, *args, **kwargs):\n raise ValueError(\n \"Dataset.plot cannot be called directly. Use \"\n \"an explicit plot method, e.g. ds.plot.scatter(...)\"\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":158,"id":1089,"name":"_ds","nodeType":"Attribute","startLoc":158,"text":"self._ds"},{"col":4,"comment":"Not for public consumption.\n\n Parameters\n ----------\n indexes : Dict[Any, pandas.Index]\n Indexes held by this object.\n ","endLoc":24,"header":"def __init__(self, indexes)","id":1090,"name":"__init__","nodeType":"Function","startLoc":16,"text":"def __init__(self, indexes):\n \"\"\"Not for public consumption.\n\n Parameters\n ----------\n indexes : Dict[Any, pandas.Index]\n Indexes held by this object.\n \"\"\"\n self._indexes = indexes"},{"col":4,"comment":"Dictionary-like container of coordinate arrays.\n ","endLoc":720,"header":"@property\n def coords(self) -> DataArrayCoordinates","id":1091,"name":"coords","nodeType":"Function","startLoc":716,"text":"@property\n def coords(self) -> DataArrayCoordinates:\n \"\"\"Dictionary-like container of coordinate arrays.\n \"\"\"\n return DataArrayCoordinates(self)"},{"col":0,"comment":"null","endLoc":76,"header":"def _infer_meta_data(ds, x, y, hue, hue_style, add_guide)","id":1092,"name":"_infer_meta_data","nodeType":"Function","startLoc":20,"text":"def _infer_meta_data(ds, x, y, hue, hue_style, add_guide):\n dvars = set(ds.variables.keys())\n error_msg = \" must be one of ({0:s})\".format(\", \".join(dvars))\n\n if x not in dvars:\n raise ValueError(\"x\" + error_msg)\n\n if y not in dvars:\n raise ValueError(\"y\" + error_msg)\n\n if hue is not None and hue not in dvars:\n raise ValueError(\"hue\" + error_msg)\n\n if hue:\n hue_is_numeric = _is_numeric(ds[hue].values)\n\n if hue_style is None:\n hue_style = \"continuous\" if hue_is_numeric else \"discrete\"\n\n if not hue_is_numeric and (hue_style == \"continuous\"):\n raise ValueError(\n \"Cannot create a colorbar for a non numeric\" \" coordinate: \" + hue\n )\n\n if add_guide is None or add_guide is True:\n add_colorbar = True if hue_style == \"continuous\" else False\n add_legend = True if hue_style == \"discrete\" else False\n else:\n add_colorbar = False\n add_legend = False\n else:\n if add_guide is True:\n raise ValueError(\"Cannot set add_guide when hue is None.\")\n add_legend = False\n add_colorbar = False\n\n if hue_style is not None and hue_style not in [\"discrete\", \"continuous\"]:\n raise ValueError(\n \"hue_style must be either None, 'discrete' \" \"or 'continuous'.\"\n )\n\n if hue:\n hue_label = label_from_attrs(ds[hue])\n hue = ds[hue]\n else:\n hue_label = None\n hue = None\n\n return {\n \"add_colorbar\": add_colorbar,\n \"add_legend\": add_legend,\n \"hue_label\": hue_label,\n \"hue_style\": hue_style,\n \"xlabel\": label_from_attrs(ds[x]),\n \"ylabel\": label_from_attrs(ds[y]),\n \"hue\": hue,\n }"},{"className":"DatasetResample","col":0,"comment":"DatasetGroupBy object specialized to resampling a specified dimension\n ","endLoc":310,"id":1093,"nodeType":"Class","startLoc":233,"text":"class DatasetResample(DatasetGroupBy, Resample):\n \"\"\"DatasetGroupBy object specialized to resampling a specified dimension\n \"\"\"\n\n def __init__(self, *args, dim=None, resample_dim=None, **kwargs):\n\n if dim == resample_dim:\n raise ValueError(\n \"Proxy resampling dimension ('{}') \"\n \"cannot have the same name as actual dimension \"\n \"('{}')! \".format(resample_dim, dim)\n )\n self._dim = dim\n self._resample_dim = resample_dim\n\n super().__init__(*args, **kwargs)\n\n def apply(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"Apply a function over each Dataset in the groups generated for\n resampling and concatenate them together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n \"\"\"\n # ignore shortcut if set (for now)\n applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())\n combined = self._combine(applied)\n\n return combined.rename({self._resample_dim: self._dim})\n\n def reduce(self, func, dim=None, keep_attrs=None, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along the\n pre-defined resampling dimension.\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n return super().reduce(func, dim, keep_attrs, **kwargs)"},{"col":4,"comment":"null","endLoc":248,"header":"def __init__(self, *args, dim=None, resample_dim=None, **kwargs)","id":1094,"name":"__init__","nodeType":"Function","startLoc":237,"text":"def __init__(self, *args, dim=None, resample_dim=None, **kwargs):\n\n if dim == resample_dim:\n raise ValueError(\n \"Proxy resampling dimension ('{}') \"\n \"cannot have the same name as actual dimension \"\n \"('{}')! \".format(resample_dim, dim)\n )\n self._dim = dim\n self._resample_dim = resample_dim\n\n super().__init__(*args, **kwargs)"},{"col":0,"comment":"Check the safety of updating one dictionary with another.\n\n Raises ValueError if dictionaries have non-compatible values for any key,\n where compatibility is determined by identity (they are the same item) or\n the `compat` function.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n All items in the second dictionary are checked against for conflicts\n against items in the first dictionary.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n ","endLoc":209,"header":"def update_safety_check(\n first_dict: MutableMapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> None","id":1095,"name":"update_safety_check","nodeType":"Function","startLoc":184,"text":"def update_safety_check(\n first_dict: MutableMapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> None:\n \"\"\"Check the safety of updating one dictionary with another.\n\n Raises ValueError if dictionaries have non-compatible values for any key,\n where compatibility is determined by identity (they are the same item) or\n the `compat` function.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n All items in the second dictionary are checked against for conflicts\n against items in the first dictionary.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n \"\"\"\n for k, v in second_dict.items():\n if k in first_dict and not compat(v, first_dict[k]):\n raise ValueError(\n \"unsafe to merge dictionaries without \"\n \"overriding values; conflicting key %r\" % k\n )"},{"col":4,"comment":"Apply a function over each Dataset in the groups generated for\n resampling and concatenate them together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n ","endLoc":283,"header":"def apply(self, func, args=(), shortcut=None, **kwargs)","id":1096,"name":"apply","nodeType":"Function","startLoc":250,"text":"def apply(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"Apply a function over each Dataset in the groups generated for\n resampling and concatenate them together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : function\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n \"\"\"\n # ignore shortcut if set (for now)\n applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())\n combined = self._combine(applied)\n\n return combined.rename({self._resample_dim: self._dim})"},{"col":4,"comment":"null","endLoc":488,"header":"def sync(self)","id":1097,"name":"sync","nodeType":"Function","startLoc":487,"text":"def sync(self):\n self.ds.sync()"},{"col":0,"comment":"null","endLoc":107,"header":"def _infer_scatter_data(ds, x, y, hue, markersize, size_norm, size_mapping=None)","id":1098,"name":"_infer_scatter_data","nodeType":"Function","startLoc":79,"text":"def _infer_scatter_data(ds, x, y, hue, markersize, size_norm, size_mapping=None):\n\n broadcast_keys = [\"x\", \"y\"]\n to_broadcast = [ds[x], ds[y]]\n if hue:\n to_broadcast.append(ds[hue])\n broadcast_keys.append(\"hue\")\n if markersize:\n to_broadcast.append(ds[markersize])\n broadcast_keys.append(\"size\")\n\n broadcasted = dict(zip(broadcast_keys, broadcast(*to_broadcast)))\n\n data = {\"x\": broadcasted[\"x\"], \"y\": broadcasted[\"y\"], \"hue\": None, \"sizes\": None}\n\n if hue:\n data[\"hue\"] = broadcasted[\"hue\"]\n\n if markersize:\n size = broadcasted[\"size\"]\n\n if size_mapping is None:\n size_mapping = _parse_size(size, size_norm)\n\n data[\"sizes\"] = size.copy(\n data=np.reshape(size_mapping.loc[size.values.ravel()].values, size.shape)\n )\n\n return data"},{"col":4,"comment":"Reduce the items in this group by applying `func` along the\n pre-defined resampling dimension.\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n ","endLoc":310,"header":"def reduce(self, func, dim=None, keep_attrs=None, **kwargs)","id":1099,"name":"reduce","nodeType":"Function","startLoc":285,"text":"def reduce(self, func, dim=None, keep_attrs=None, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along the\n pre-defined resampling dimension.\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n return super().reduce(func, dim, keep_attrs, **kwargs)"},{"col":4,"comment":"null","endLoc":491,"header":"def close(self, **kwargs)","id":1100,"name":"close","nodeType":"Function","startLoc":490,"text":"def close(self, **kwargs):\n self._manager.close(**kwargs)"},{"col":4,"comment":"null","endLoc":271,"header":"def __init__(self, dataarray: \"DataArray\")","id":1101,"name":"__init__","nodeType":"Function","startLoc":270,"text":"def __init__(self, dataarray: \"DataArray\"):\n self._data = dataarray"},{"col":4,"comment":"Given names of coordinates, reset them to become variables.\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset, or DataArray if ``drop == True``\n ","endLoc":755,"header":"def reset_coords(\n self,\n names: Union[Iterable[Hashable], Hashable, None] = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> Union[None, \"DataArray\", Dataset]","id":1102,"name":"reset_coords","nodeType":"Function","startLoc":722,"text":"def reset_coords(\n self,\n names: Union[Iterable[Hashable], Hashable, None] = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> Union[None, \"DataArray\", Dataset]:\n \"\"\"Given names of coordinates, reset them to become variables.\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset, or DataArray if ``drop == True``\n \"\"\"\n _check_inplace(inplace)\n if names is None:\n names = set(self.coords) - set(self.dims)\n dataset = self.coords.to_dataset().reset_coords(names, drop)\n if drop:\n return self._replace(coords=dataset._variables)\n else:\n if self.name is None:\n raise ValueError(\n \"cannot reset_coords with drop=False on an unnamed DataArrray\"\n )\n dataset[self.name] = self.variable\n return dataset"},{"col":0,"comment":"null","endLoc":240,"header":"def is_full_slice(value: Any) -> bool","id":1103,"name":"is_full_slice","nodeType":"Function","startLoc":239,"text":"def is_full_slice(value: Any) -> bool:\n return isinstance(value, slice) and value == slice(None)"},{"col":0,"comment":"null","endLoc":244,"header":"def is_list_like(value: Any) -> bool","id":1104,"name":"is_list_like","nodeType":"Function","startLoc":243,"text":"def is_list_like(value: Any) -> bool:\n return isinstance(value, list) or isinstance(value, tuple)"},{"col":0,"comment":"Given a value, wrap it in a 0-D numpy.ndarray.\n ","endLoc":311,"header":"def to_0d_array(value: Any) -> np.ndarray","id":1105,"name":"to_0d_array","nodeType":"Function","startLoc":305,"text":"def to_0d_array(value: Any) -> np.ndarray:\n \"\"\"Given a value, wrap it in a 0-D numpy.ndarray.\n \"\"\"\n if np.isscalar(value) or (isinstance(value, np.ndarray) and value.ndim == 0):\n return np.array(value)\n else:\n return to_0d_object_array(value)"},{"attributeType":"null","col":4,"comment":"null","endLoc":282,"id":1106,"name":"__slots__","nodeType":"Attribute","startLoc":282,"text":"__slots__"},{"col":0,"comment":"null","endLoc":148,"header":"def _parse_size(data, norm)","id":1107,"name":"_parse_size","nodeType":"Function","startLoc":111,"text":"def _parse_size(data, norm):\n\n import matplotlib as mpl\n\n if data is None:\n return None\n\n data = data.values.flatten()\n\n if not _is_numeric(data):\n levels = np.unique(data)\n numbers = np.arange(1, 1 + len(levels))[::-1]\n else:\n levels = numbers = np.sort(np.unique(data))\n\n min_width, max_width = _MARKERSIZE_RANGE\n # width_range = min_width, max_width\n\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``size_norm`` must be None, tuple, \" \"or Normalize object.\"\n raise ValueError(err)\n\n norm.clip = True\n if not norm.scaled():\n norm(np.asarray(numbers))\n # limits = norm.vmin, norm.vmax\n\n scl = norm(numbers)\n widths = np.asarray(min_width + scl * (max_width - min_width))\n if scl.mask.any():\n widths[scl.mask] = 0\n sizes = dict(zip(levels, widths))\n\n return pd.Series(sizes)"},{"attributeType":"null","col":8,"comment":"null","endLoc":317,"id":1108,"name":"autoclose","nodeType":"Attribute","startLoc":317,"text":"self.autoclose"},{"col":4,"comment":"null","endLoc":758,"header":"def __dask_graph__(self)","id":1109,"name":"__dask_graph__","nodeType":"Function","startLoc":757,"text":"def __dask_graph__(self):\n return self._to_temp_dataset().__dask_graph__()"},{"attributeType":"{__eq__}","col":8,"comment":"null","endLoc":245,"id":1110,"name":"_dim","nodeType":"Attribute","startLoc":245,"text":"self._dim"},{"attributeType":"null","col":8,"comment":"null","endLoc":246,"id":1111,"name":"_resample_dim","nodeType":"Attribute","startLoc":246,"text":"self._resample_dim"},{"col":4,"comment":"null","endLoc":761,"header":"def __dask_keys__(self)","id":1113,"name":"__dask_keys__","nodeType":"Function","startLoc":760,"text":"def __dask_keys__(self):\n return self._to_temp_dataset().__dask_keys__()"},{"attributeType":"null","col":0,"comment":"null","endLoc":4,"id":1114,"name":"RESAMPLE_DIM","nodeType":"Attribute","startLoc":4,"text":"RESAMPLE_DIM"},{"col":0,"comment":"","endLoc":1,"header":"resample.py#","id":1115,"name":"","nodeType":"Function","startLoc":1,"text":"RESAMPLE_DIM = \"__resample_dim__\"\n\nops.inject_reduce_methods(DataArrayResample)\n\nops.inject_binary_ops(DataArrayResample)\n\nops.inject_reduce_methods(DatasetResample)\n\nops.inject_binary_ops(DatasetResample)"},{"col":0,"comment":"Return the intersection of two dictionaries as a new dictionary.\n\n Items are retained if their keys are found in both dictionaries and the\n values are compatible.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n Mappings to merge.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n\n Returns\n -------\n intersection : dict\n Intersection of the contents.\n ","endLoc":369,"header":"def ordered_dict_intersection(\n first_dict: Mapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> MutableMapping[K, V]","id":1116,"name":"ordered_dict_intersection","nodeType":"Function","startLoc":344,"text":"def ordered_dict_intersection(\n first_dict: Mapping[K, V],\n second_dict: Mapping[K, V],\n compat: Callable[[V, V], bool] = equivalent,\n) -> MutableMapping[K, V]:\n \"\"\"Return the intersection of two dictionaries as a new dictionary.\n\n Items are retained if their keys are found in both dictionaries and the\n values are compatible.\n\n Parameters\n ----------\n first_dict, second_dict : dict-like\n Mappings to merge.\n compat : function, optional\n Binary operator to determine if two values are compatible. By default,\n checks for equivalence.\n\n Returns\n -------\n intersection : dict\n Intersection of the contents.\n \"\"\"\n new_dict = dict(first_dict)\n remove_incompatible_items(new_dict, second_dict, compat)\n return new_dict"},{"attributeType":"DummyFileManager","col":8,"comment":"null","endLoc":310,"id":1117,"name":"_manager","nodeType":"Attribute","startLoc":310,"text":"self._manager"},{"attributeType":"null","col":8,"comment":"null","endLoc":313,"id":1118,"name":"format","nodeType":"Attribute","startLoc":313,"text":"self.format"},{"attributeType":"null","col":8,"comment":"null","endLoc":315,"id":1119,"name":"is_remote","nodeType":"Attribute","startLoc":315,"text":"self.is_remote"},{"attributeType":"null","col":8,"comment":"null","endLoc":316,"id":1120,"name":"lock","nodeType":"Attribute","startLoc":316,"text":"self.lock"},{"col":4,"comment":"null","endLoc":764,"header":"def __dask_layers__(self)","id":1121,"name":"__dask_layers__","nodeType":"Function","startLoc":763,"text":"def __dask_layers__(self):\n return self._to_temp_dataset().__dask_layers__()"},{"col":0,"comment":"Return True if values of an array are uniformly spaced and sorted.\n\n >>> is_uniform_spaced(range(5))\n True\n >>> is_uniform_spaced([-4, 0, 100])\n False\n\n kwargs are additional arguments to ``np.isclose``\n ","endLoc":584,"header":"def is_uniform_spaced(arr, **kwargs) -> bool","id":1122,"name":"is_uniform_spaced","nodeType":"Function","startLoc":572,"text":"def is_uniform_spaced(arr, **kwargs) -> bool:\n \"\"\"Return True if values of an array are uniformly spaced and sorted.\n\n >>> is_uniform_spaced(range(5))\n True\n >>> is_uniform_spaced([-4, 0, 100])\n False\n\n kwargs are additional arguments to ``np.isclose``\n \"\"\"\n arr = np.array(arr, dtype=float)\n diffs = np.diff(arr)\n return bool(np.isclose(diffs.min(), diffs.max(), **kwargs))"},{"id":1123,"name":"api-hidden.rst","nodeType":"TextFile","path":"doc","text":".. Generate API reference pages, but don't display these in tables.\n.. This extra page is a work around for sphinx not having any support for\n.. hiding an autosummary table.\n\n.. currentmodule:: xarray\n\n.. autosummary::\n :toctree: generated/\n\n auto_combine\n\n Dataset.nbytes\n Dataset.chunks\n\n Dataset.all\n Dataset.any\n Dataset.argmax\n Dataset.argmin\n Dataset.max\n Dataset.min\n Dataset.mean\n Dataset.median\n Dataset.prod\n Dataset.sum\n Dataset.std\n Dataset.var\n\n core.groupby.DatasetGroupBy.assign\n core.groupby.DatasetGroupBy.assign_coords\n core.groupby.DatasetGroupBy.first\n core.groupby.DatasetGroupBy.last\n core.groupby.DatasetGroupBy.fillna\n core.groupby.DatasetGroupBy.where\n\n Dataset.argsort\n Dataset.clip\n Dataset.conj\n Dataset.conjugate\n Dataset.imag\n Dataset.round\n Dataset.real\n Dataset.cumsum\n Dataset.cumprod\n Dataset.rank\n\n DataArray.ndim\n DataArray.nbytes\n DataArray.shape\n DataArray.size\n DataArray.dtype\n DataArray.nbytes\n DataArray.chunks\n\n DataArray.astype\n DataArray.item\n\n DataArray.all\n DataArray.any\n DataArray.argmax\n DataArray.argmin\n DataArray.max\n DataArray.min\n DataArray.mean\n DataArray.median\n DataArray.prod\n DataArray.sum\n DataArray.std\n DataArray.var\n\n core.groupby.DataArrayGroupBy.assign_coords\n core.groupby.DataArrayGroupBy.first\n core.groupby.DataArrayGroupBy.last\n core.groupby.DataArrayGroupBy.fillna\n core.groupby.DataArrayGroupBy.where\n\n DataArray.argsort\n DataArray.clip\n DataArray.conj\n DataArray.conjugate\n DataArray.imag\n DataArray.searchsorted\n DataArray.round\n DataArray.real\n DataArray.T\n DataArray.cumsum\n DataArray.cumprod\n DataArray.rank\n\n ufuncs.angle\n ufuncs.arccos\n ufuncs.arccosh\n ufuncs.arcsin\n ufuncs.arcsinh\n ufuncs.arctan\n ufuncs.arctan2\n ufuncs.arctanh\n ufuncs.ceil\n ufuncs.conj\n ufuncs.copysign\n ufuncs.cos\n ufuncs.cosh\n ufuncs.deg2rad\n ufuncs.degrees\n ufuncs.exp\n ufuncs.expm1\n ufuncs.fabs\n ufuncs.fix\n ufuncs.floor\n ufuncs.fmax\n ufuncs.fmin\n ufuncs.fmod\n ufuncs.fmod\n ufuncs.frexp\n ufuncs.hypot\n ufuncs.imag\n ufuncs.iscomplex\n ufuncs.isfinite\n ufuncs.isinf\n ufuncs.isnan\n ufuncs.isreal\n ufuncs.ldexp\n ufuncs.log\n ufuncs.log10\n ufuncs.log1p\n ufuncs.log2\n ufuncs.logaddexp\n ufuncs.logaddexp2\n ufuncs.logical_and\n ufuncs.logical_not\n ufuncs.logical_or\n ufuncs.logical_xor\n ufuncs.maximum\n ufuncs.minimum\n ufuncs.nextafter\n ufuncs.rad2deg\n ufuncs.radians\n ufuncs.real\n ufuncs.rint\n ufuncs.sign\n ufuncs.signbit\n ufuncs.sin\n ufuncs.sinh\n ufuncs.sqrt\n ufuncs.square\n ufuncs.tan\n ufuncs.tanh\n ufuncs.trunc\n\n plot.FacetGrid.map_dataarray\n plot.FacetGrid.set_titles\n plot.FacetGrid.set_ticks\n plot.FacetGrid.map\n\n CFTimeIndex.shift\n CFTimeIndex.to_datetimeindex\n CFTimeIndex.strftime\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":311,"id":1124,"name":"_group","nodeType":"Attribute","startLoc":311,"text":"self._group"},{"id":1125,"name":"asv_bench/benchmarks","nodeType":"Package"},{"fileName":"unstacking.py","filePath":"asv_bench/benchmarks","id":1126,"nodeType":"File","text":"import numpy as np\n\nimport xarray as xr\n\nfrom . import requires_dask\n\n\nclass Unstacking:\n def setup(self):\n data = np.random.RandomState(0).randn(1, 1000, 500)\n self.ds = xr.DataArray(data).stack(flat_dim=[\"dim_1\", \"dim_2\"])\n\n def time_unstack_fast(self):\n self.ds.unstack(\"flat_dim\")\n\n def time_unstack_slow(self):\n self.ds[:, ::-1].unstack(\"flat_dim\")\n\n\nclass UnstackingDask(Unstacking):\n def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"flat_dim\": 50})\n"},{"col":4,"comment":"null","endLoc":768,"header":"@property\n def __dask_optimize__(self)","id":1127,"name":"__dask_optimize__","nodeType":"Function","startLoc":766,"text":"@property\n def __dask_optimize__(self):\n return self._to_temp_dataset().__dask_optimize__"},{"col":0,"comment":"null","endLoc":21,"header":"def requires_dask()","id":1128,"name":"requires_dask","nodeType":"Function","startLoc":17,"text":"def requires_dask():\n try:\n import dask # noqa: F401\n except ImportError:\n raise NotImplementedError()"},{"col":4,"comment":"null","endLoc":772,"header":"@property\n def __dask_scheduler__(self)","id":1129,"name":"__dask_scheduler__","nodeType":"Function","startLoc":770,"text":"@property\n def __dask_scheduler__(self):\n return self._to_temp_dataset().__dask_scheduler__"},{"attributeType":"null","col":8,"comment":"null","endLoc":312,"id":1130,"name":"_mode","nodeType":"Attribute","startLoc":312,"text":"self._mode"},{"col":4,"comment":"null","endLoc":776,"header":"def __dask_postcompute__(self)","id":1131,"name":"__dask_postcompute__","nodeType":"Function","startLoc":774,"text":"def __dask_postcompute__(self):\n func, args = self._to_temp_dataset().__dask_postcompute__()\n return self._dask_finalize, (func, args, self.name)"},{"className":"Unstacking","col":0,"comment":"null","endLoc":17,"id":1132,"nodeType":"Class","startLoc":8,"text":"class Unstacking:\n def setup(self):\n data = np.random.RandomState(0).randn(1, 1000, 500)\n self.ds = xr.DataArray(data).stack(flat_dim=[\"dim_1\", \"dim_2\"])\n\n def time_unstack_fast(self):\n self.ds.unstack(\"flat_dim\")\n\n def time_unstack_slow(self):\n self.ds[:, ::-1].unstack(\"flat_dim\")"},{"col":4,"comment":"null","endLoc":11,"header":"def setup(self)","id":1133,"name":"setup","nodeType":"Function","startLoc":9,"text":"def setup(self):\n data = np.random.RandomState(0).randn(1, 1000, 500)\n self.ds = xr.DataArray(data).stack(flat_dim=[\"dim_1\", \"dim_2\"])"},{"attributeType":"null","col":8,"comment":"null","endLoc":314,"id":1134,"name":"_filename","nodeType":"Attribute","startLoc":314,"text":"self._filename"},{"col":0,"comment":"null","endLoc":598,"header":"def not_implemented(*args, **kwargs)","id":1135,"name":"not_implemented","nodeType":"Function","startLoc":597,"text":"def not_implemented(*args, **kwargs):\n return NotImplemented"},{"col":0,"comment":"Convert attribute values from numpy objects to native Python objects,\n for use in to_dict\n ","endLoc":611,"header":"def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]","id":1136,"name":"decode_numpy_dict_values","nodeType":"Function","startLoc":601,"text":"def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]:\n \"\"\"Convert attribute values from numpy objects to native Python objects,\n for use in to_dict\n \"\"\"\n attrs = dict(attrs)\n for k, v in attrs.items():\n if isinstance(v, np.ndarray):\n attrs[k] = v.tolist()\n elif isinstance(v, np.generic):\n attrs[k] = v.item()\n return attrs"},{"col":0,"comment":"null","endLoc":145,"header":"def _netcdf4_create_group(dataset, name)","id":1137,"name":"_netcdf4_create_group","nodeType":"Function","startLoc":144,"text":"def _netcdf4_create_group(dataset, name):\n return dataset.createGroup(name)"},{"attributeType":"null","col":0,"comment":"null","endLoc":23,"id":1138,"name":"_endian_lookup","nodeType":"Attribute","startLoc":23,"text":"_endian_lookup"},{"attributeType":"null","col":0,"comment":"null","endLoc":26,"id":1139,"name":"NETCDF4_PYTHON_LOCK","nodeType":"Attribute","startLoc":26,"text":"NETCDF4_PYTHON_LOCK"},{"col":4,"comment":"null","endLoc":780,"header":"def __dask_postpersist__(self)","id":1140,"name":"__dask_postpersist__","nodeType":"Function","startLoc":778,"text":"def __dask_postpersist__(self):\n func, args = self._to_temp_dataset().__dask_postpersist__()\n return self._dask_finalize, (func, args, self.name)"},{"col":4,"comment":"Add an array to this dataset.\n\n If value is a `DataArray`, call its `select_vars()` method, rename it\n to `key` and merge the contents of the resulting dataset into this\n dataset.\n\n If value is an `Variable` object (or tuple of form\n ``(dims, data[, attrs])``), add it to this dataset as a new\n variable.\n ","endLoc":1256,"header":"def __setitem__(self, key: Hashable, value) -> None","id":1141,"name":"__setitem__","nodeType":"Function","startLoc":1240,"text":"def __setitem__(self, key: Hashable, value) -> None:\n \"\"\"Add an array to this dataset.\n\n If value is a `DataArray`, call its `select_vars()` method, rename it\n to `key` and merge the contents of the resulting dataset into this\n dataset.\n\n If value is an `Variable` object (or tuple of form\n ``(dims, data[, attrs])``), add it to this dataset as a new\n variable.\n \"\"\"\n if utils.is_dict_like(key):\n raise NotImplementedError(\n \"cannot yet use a dictionary as a key \" \"to set Dataset values\"\n )\n\n self.update({key: value})"},{"col":4,"comment":"null","endLoc":787,"header":"@staticmethod\n def _dask_finalize(results, func, args, name)","id":1142,"name":"_dask_finalize","nodeType":"Function","startLoc":782,"text":"@staticmethod\n def _dask_finalize(results, func, args, name):\n ds = func(results, *args)\n variable = ds._variables.pop(_THIS_ARRAY)\n coords = ds._variables\n return DataArray(variable, coords, name=name, fastpath=True)"},{"col":0,"comment":"Convert val out of numpy time, for use in to_dict.\n Needed because of numpy bug GH#7619","endLoc":621,"header":"def ensure_us_time_resolution(val)","id":1143,"name":"ensure_us_time_resolution","nodeType":"Function","startLoc":614,"text":"def ensure_us_time_resolution(val):\n \"\"\"Convert val out of numpy time, for use in to_dict.\n Needed because of numpy bug GH#7619\"\"\"\n if np.issubdtype(val.dtype, np.datetime64):\n val = val.astype(\"datetime64[us]\")\n elif np.issubdtype(val.dtype, np.timedelta64):\n val = val.astype(\"timedelta64[us]\")\n return val"},{"col":0,"comment":"","endLoc":1,"header":"netCDF4_.py#","id":1144,"name":"","nodeType":"Function","startLoc":1,"text":"_endian_lookup = {\"=\": \"native\", \">\": \"big\", \"<\": \"little\", \"|\": \"native\"}\n\nNETCDF4_PYTHON_LOCK = combine_locks([NETCDFC_LOCK, HDF5_LOCK])"},{"col":4,"comment":"Manually trigger loading of this array's data from disk or a\n remote source into memory and return this array.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n ","endLoc":811,"header":"def load(self, **kwargs) -> \"DataArray\"","id":1145,"name":"load","nodeType":"Function","startLoc":789,"text":"def load(self, **kwargs) -> \"DataArray\":\n \"\"\"Manually trigger loading of this array's data from disk or a\n remote source into memory and return this array.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n ds = self._to_temp_dataset().load(**kwargs)\n new = self._from_temp_dataset(ds)\n self._variable = new._variable\n self._coords = new._coords\n return self"},{"col":4,"comment":"Update this dataset's variables with those from another dataset.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Variables with which to update this dataset. One of:\n\n - Dataset\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n\n\n Returns\n -------\n updated : Dataset\n Updated dataset.\n\n Raises\n ------\n ValueError\n If any dimensions would have inconsistent sizes in the updated\n dataset.\n ","endLoc":3439,"header":"def update(self, other: \"CoercibleMapping\", inplace: bool = None) -> \"Dataset\"","id":1146,"name":"update","nodeType":"Function","startLoc":3411,"text":"def update(self, other: \"CoercibleMapping\", inplace: bool = None) -> \"Dataset\":\n \"\"\"Update this dataset's variables with those from another dataset.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Variables with which to update this dataset. One of:\n\n - Dataset\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n\n\n Returns\n -------\n updated : Dataset\n Updated dataset.\n\n Raises\n ------\n ValueError\n If any dimensions would have inconsistent sizes in the updated\n dataset.\n \"\"\"\n _check_inplace(inplace)\n merge_result = dataset_update_method(self, other)\n return self._replace(inplace=True, **merge_result._asdict())"},{"col":0,"comment":" Get an new dimension name based on new_dim, that is not used in dims.\n If the same name exists, we add an underscore(s) in the head.\n\n Example1:\n dims: ['a', 'b', 'c']\n new_dim: ['_rolling']\n -> ['_rolling']\n Example2:\n dims: ['a', 'b', 'c', '_rolling']\n new_dim: ['_rolling']\n -> ['__rolling']\n ","endLoc":678,"header":"def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable","id":1147,"name":"get_temp_dimname","nodeType":"Function","startLoc":663,"text":"def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable:\n \"\"\" Get an new dimension name based on new_dim, that is not used in dims.\n If the same name exists, we add an underscore(s) in the head.\n\n Example1:\n dims: ['a', 'b', 'c']\n new_dim: ['_rolling']\n -> ['_rolling']\n Example2:\n dims: ['a', 'b', 'c', '_rolling']\n new_dim: ['_rolling']\n -> ['__rolling']\n \"\"\"\n while new_dim in dims:\n new_dim = \"_\" + str(new_dim)\n return new_dim"},{"attributeType":"null","col":0,"comment":"null","endLoc":31,"id":1148,"name":"K","nodeType":"Attribute","startLoc":31,"text":"K"},{"attributeType":"null","col":0,"comment":"null","endLoc":32,"id":1149,"name":"V","nodeType":"Attribute","startLoc":32,"text":"V"},{"attributeType":"null","col":0,"comment":"null","endLoc":33,"id":1150,"name":"T","nodeType":"Attribute","startLoc":33,"text":"T"},{"col":0,"comment":"","endLoc":2,"header":"utils.py#","id":1151,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Internal utilties; not for external use\n\"\"\"\n\nK = TypeVar(\"K\")\n\nV = TypeVar(\"V\")\n\nT = TypeVar(\"T\")"},{"col":4,"comment":"Remove a variable from this dataset.\n ","endLoc":1266,"header":"def __delitem__(self, key: Hashable) -> None","id":1152,"name":"__delitem__","nodeType":"Function","startLoc":1258,"text":"def __delitem__(self, key: Hashable) -> None:\n \"\"\"Remove a variable from this dataset.\n \"\"\"\n del self._variables[key]\n self._coord_names.discard(key)\n if key in self.indexes:\n assert self._indexes is not None\n del self._indexes[key]\n self._dims = calculate_dimensions(self._variables)"},{"fileName":"versioneer.py","filePath":"","id":1153,"nodeType":"File","text":"# flake8: noqa\n\n# Version: 0.18\n\n\"\"\"The Versioneer - like a rocketeer, but for versions.\n\nThe Versioneer\n==============\n\n* like a rocketeer, but for versions!\n* https://github.com/warner/python-versioneer\n* Brian Warner\n* License: Public Domain\n* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy\n* [![Latest Version]\n(https://pypip.in/version/versioneer/badge.svg?style=flat)\n](https://pypi.python.org/pypi/versioneer/)\n* [![Build Status]\n(https://travis-ci.org/warner/python-versioneer.png?branch=master)\n](https://travis-ci.org/warner/python-versioneer)\n\nThis is a tool for managing a recorded version number in distutils-based\npython projects. The goal is to remove the tedious and error-prone \"update\nthe embedded version string\" step from your release process. Making a new\nrelease should be as easy as recording a new tag in your version-control\nsystem, and maybe making new tarballs.\n\n\n## Quick Install\n\n* `pip install versioneer` to somewhere to your $PATH\n* add a `[versioneer]` section to your setup.cfg (see below)\n* run `versioneer install` in your source tree, commit the results\n\n## Version Identifiers\n\nSource trees come from a variety of places:\n\n* a version-control system checkout (mostly used by developers)\n* a nightly tarball, produced by build automation\n* a snapshot tarball, produced by a web-based VCS browser, like github's\n \"tarball from tag\" feature\n* a release tarball, produced by \"setup.py sdist\", distributed through PyPI\n\nWithin each source tree, the version identifier (either a string or a number,\nthis tool is format-agnostic) can come from a variety of places:\n\n* ask the VCS tool itself, e.g. \"git describe\" (for checkouts), which knows\n about recent \"tags\" and an absolute revision-id\n* the name of the directory into which the tarball was unpacked\n* an expanded VCS keyword ($Id$, etc)\n* a `_version.py` created by some earlier build step\n\nFor released software, the version identifier is closely related to a VCS\ntag. Some projects use tag names that include more than just the version\nstring (e.g. \"myproject-1.2\" instead of just \"1.2\"), in which case the tool\nneeds to strip the tag prefix to extract the version identifier. For\nunreleased software (between tags), the version identifier should provide\nenough information to help developers recreate the same tree, while also\ngiving them an idea of roughly how old the tree is (after version 1.2, before\nversion 1.3). Many VCS systems can report a description that captures this,\nfor example `git describe --tags --dirty --always` reports things like\n\"0.7-1-g574ab98-dirty\" to indicate that the checkout is one revision past the\n0.7 tag, has a unique revision id of \"574ab98\", and is \"dirty\" (it has\nuncommitted changes.\n\nThe version identifier is used for multiple purposes:\n\n* to allow the module to self-identify its version: `myproject.__version__`\n* to choose a name and prefix for a 'setup.py sdist' tarball\n\n## Theory of Operation\n\nVersioneer works by adding a special `_version.py` file into your source\ntree, where your `__init__.py` can import it. This `_version.py` knows how to\ndynamically ask the VCS tool for version information at import time.\n\n`_version.py` also contains `$Revision$` markers, and the installation\nprocess marks `_version.py` to have this marker rewritten with a tag name\nduring the `git archive` command. As a result, generated tarballs will\ncontain enough information to get the proper version.\n\nTo allow `setup.py` to compute a version too, a `versioneer.py` is added to\nthe top level of your source tree, next to `setup.py` and the `setup.cfg`\nthat configures it. This overrides several distutils/setuptools commands to\ncompute the version when invoked, and changes `setup.py build` and `setup.py\nsdist` to replace `_version.py` with a small static file that contains just\nthe generated version data.\n\n## Installation\n\nSee [INSTALL.md](./INSTALL.md) for detailed installation instructions.\n\n## Version-String Flavors\n\nCode which uses Versioneer can learn about its version string at runtime by\nimporting `_version` from your main `__init__.py` file and running the\n`get_versions()` function. From the \"outside\" (e.g. in `setup.py`), you can\nimport the top-level `versioneer.py` and run `get_versions()`.\n\nBoth functions return a dictionary with different flavors of version\ninformation:\n\n* `['version']`: A condensed version string, rendered using the selected\n style. This is the most commonly used value for the project's version\n string. The default \"pep440\" style yields strings like `0.11`,\n `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the \"Styles\" section\n below for alternative styles.\n\n* `['full-revisionid']`: detailed revision identifier. For Git, this is the\n full SHA1 commit id, e.g. \"1076c978a8d3cfc70f408fe5974aa6c092c949ac\".\n\n* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the\n commit date in ISO 8601 format. This will be None if the date is not\n available.\n\n* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that\n this is only accurate if run in a VCS checkout, otherwise it is likely to\n be False or None\n\n* `['error']`: if the version string could not be computed, this will be set\n to a string describing the problem, otherwise it will be None. It may be\n useful to throw an exception in setup.py if this is set, to avoid e.g.\n creating tarballs with a version string of \"unknown\".\n\nSome variants are more useful than others. Including `full-revisionid` in a\nbug report should allow developers to reconstruct the exact code being tested\n(or indicate the presence of local changes that should be shared with the\ndevelopers). `version` is suitable for display in an \"about\" box or a CLI\n`--version` output: it can be easily compared against release notes and lists\nof bugs fixed in various releases.\n\nThe installer adds the following text to your `__init__.py` to place a basic\nversion in `YOURPROJECT.__version__`:\n\n from ._version import get_versions\n __version__ = get_versions()['version']\n del get_versions\n\n## Styles\n\nThe setup.cfg `style=` configuration controls how the VCS information is\nrendered into a version string.\n\nThe default style, \"pep440\", produces a PEP440-compliant string, equal to the\nun-prefixed tag name for actual releases, and containing an additional \"local\nversion\" section with more detail for in-between builds. For Git, this is\nTAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags\n--dirty --always`. For example \"0.11+2.g1076c97.dirty\" indicates that the\ntree is like the \"1076c97\" commit but has uncommitted changes (\".dirty\"), and\nthat this commit is two revisions (\"+2\") beyond the \"0.11\" tag. For released\nsoftware (exactly equal to a known tag), the identifier will only contain the\nstripped tag, e.g. \"0.11\".\n\nOther styles are available. See [details.md](details.md) in the Versioneer\nsource tree for descriptions.\n\n## Debugging\n\nVersioneer tries to avoid fatal errors: if something goes wrong, it will tend\nto return a version of \"0+unknown\". To investigate the problem, run `setup.py\nversion`, which will run the version-lookup code in a verbose mode, and will\ndisplay the full contents of `get_versions()` (including the `error` string,\nwhich may help identify what went wrong).\n\n## Known Limitations\n\nSome situations are known to cause problems for Versioneer. This details the\nmost significant ones. More can be found on Github\n[issues page](https://github.com/warner/python-versioneer/issues).\n\n### Subprojects\n\nVersioneer has limited support for source trees in which `setup.py` is not in\nthe root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are\ntwo common reasons why `setup.py` might not be in the root:\n\n* Source trees which contain multiple subprojects, such as\n [Buildbot](https://github.com/buildbot/buildbot), which contains both\n \"master\" and \"slave\" subprojects, each with their own `setup.py`,\n `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI\n distributions (and upload multiple independently-installable tarballs).\n* Source trees whose main purpose is to contain a C library, but which also\n provide bindings to Python (and perhaps other langauges) in subdirectories.\n\nVersioneer will look for `.git` in parent directories, and most operations\nshould get the right version string. However `pip` and `setuptools` have bugs\nand implementation details which frequently cause `pip install .` from a\nsubproject directory to fail to find a correct version string (so it usually\ndefaults to `0+unknown`).\n\n`pip install --editable .` should work correctly. `setup.py install` might\nwork too.\n\nPip-8.1.1 is known to have this problem, but hopefully it will get fixed in\nsome later version.\n\n[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking\nthis issue. The discussion in\n[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the\nissue from the Versioneer side in more detail.\n[pip PR#3176](https://github.com/pypa/pip/pull/3176) and\n[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve\npip to let Versioneer work correctly.\n\nVersioneer-0.16 and earlier only looked for a `.git` directory next to the\n`setup.cfg`, so subprojects were completely unsupported with those releases.\n\n### Editable installs with setuptools <= 18.5\n\n`setup.py develop` and `pip install --editable .` allow you to install a\nproject into a virtualenv once, then continue editing the source code (and\ntest) without re-installing after every change.\n\n\"Entry-point scripts\" (`setup(entry_points={\"console_scripts\": ..})`) are a\nconvenient way to specify executable scripts that should be installed along\nwith the python package.\n\nThese both work as expected when using modern setuptools. When using\nsetuptools-18.5 or earlier, however, certain operations will cause\n`pkg_resources.DistributionNotFound` errors when running the entrypoint\nscript, which must be resolved by re-installing the package. This happens\nwhen the install happens with one version, then the egg_info data is\nregenerated while a different version is checked out. Many setup.py commands\ncause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into\na different virtualenv), so this can be surprising.\n\n[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes\nthis one, but upgrading to a newer version of setuptools should probably\nresolve it.\n\n### Unicode version strings\n\nWhile Versioneer works (and is continually tested) with both Python 2 and\nPython 3, it is not entirely consistent with bytes-vs-unicode distinctions.\nNewer releases probably generate unicode version strings on py2. It's not\nclear that this is wrong, but it may be surprising for applications when then\nwrite these strings to a network connection or include them in bytes-oriented\nAPIs like cryptographic checksums.\n\n[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates\nthis question.\n\n\n## Updating Versioneer\n\nTo upgrade your project to a new release of Versioneer, do the following:\n\n* install the new Versioneer (`pip install -U versioneer` or equivalent)\n* edit `setup.cfg`, if necessary, to include any new configuration settings\n indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.\n* re-run `versioneer install` in your source tree, to replace\n `SRC/_version.py`\n* commit any changed files\n\n## Future Directions\n\nThis tool is designed to make it easily extended to other version-control\nsystems: all VCS-specific components are in separate directories like\nsrc/git/ . The top-level `versioneer.py` script is assembled from these\ncomponents by running make-versioneer.py . In the future, make-versioneer.py\nwill take a VCS name as an argument, and will construct a version of\n`versioneer.py` that is specific to the given VCS. It might also take the\nconfiguration arguments that are currently provided manually during\ninstallation by editing setup.py . Alternatively, it might go the other\ndirection and include code from all supported VCS systems, reducing the\nnumber of intermediate scripts.\n\n\n## License\n\nTo make Versioneer easier to embed, all its code is dedicated to the public\ndomain. The `_version.py` that it creates is also in the public domain.\nSpecifically, both are released under the Creative Commons \"Public Domain\nDedication\" license (CC0-1.0), as described in\nhttps://creativecommons.org/publicdomain/zero/1.0/ .\n\n\"\"\"\n\n\nimport errno\nimport json\nimport os\nimport re\nimport subprocess\nimport sys\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_root():\n \"\"\"Get the project root directory.\n\n We require that all commands are run from the project root, i.e. the\n directory that contains setup.py, setup.cfg, and versioneer.py .\n \"\"\"\n root = os.path.realpath(os.path.abspath(os.getcwd()))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n # allow 'python path/to/setup.py COMMAND'\n root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n err = (\n \"Versioneer was unable to run the project root directory. \"\n \"Versioneer requires setup.py to be executed from \"\n \"its immediate directory (like 'python setup.py COMMAND'), \"\n \"or in a way that lets it use sys.argv[0] to find the root \"\n \"(like 'python path/to/setup.py COMMAND').\"\n )\n raise VersioneerBadRootError(err)\n try:\n # Certain runtime workflows (setup.py install/develop in a setuptools\n # tree) execute all dependencies in a single python process, so\n # \"versioneer\" may be imported multiple times, and python's shared\n # module-import table will cache the first one. So we can't use\n # os.path.dirname(__file__), as that will find whichever\n # versioneer.py was first imported, even in later projects.\n me = os.path.realpath(os.path.abspath(__file__))\n me_dir = os.path.normcase(os.path.splitext(me)[0])\n vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])\n if me_dir != vsr_dir:\n print(\n \"Warning: build in %s is using versioneer.py from %s\"\n % (os.path.dirname(me), versioneer_py)\n )\n except NameError:\n pass\n return root\n\n\ndef get_config_from_root(root):\n \"\"\"Read the project setup.cfg file to determine Versioneer config.\"\"\"\n # This might raise EnvironmentError (if setup.cfg is missing), or\n # configparser.NoSectionError (if it lacks a [versioneer] section), or\n # configparser.NoOptionError (if it lacks \"VCS=\"). See the docstring at\n # the top of versioneer.py for instructions on writing your setup.cfg .\n setup_cfg = os.path.join(root, \"setup.cfg\")\n parser = configparser.SafeConfigParser()\n with open(setup_cfg, \"r\") as f:\n parser.readfp(f)\n VCS = parser.get(\"versioneer\", \"VCS\") # mandatory\n\n def get(parser, name):\n if parser.has_option(\"versioneer\", name):\n return parser.get(\"versioneer\", name)\n return None\n\n cfg = VersioneerConfig()\n cfg.VCS = VCS\n cfg.style = get(parser, \"style\") or \"\"\n cfg.versionfile_source = get(parser, \"versionfile_source\")\n cfg.versionfile_build = get(parser, \"versionfile_build\")\n cfg.tag_prefix = get(parser, \"tag_prefix\")\n if cfg.tag_prefix in (\"''\", '\"\"'):\n cfg.tag_prefix = \"\"\n cfg.parentdir_prefix = get(parser, \"parentdir_prefix\")\n cfg.verbose = get(parser, \"verbose\")\n return cfg\n\n\nclass NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n\n# these dictionaries contain VCS-specific tools\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n\n return decorate\n\n\ndef run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen(\n [c] + args,\n cwd=cwd,\n env=env,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr else None),\n )\n break\n except OSError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None, None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None, None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n print(\"stdout was %s\" % stdout)\n return None, p.returncode\n return stdout, p.returncode\n\n\nLONG_VERSION_PY[\n \"git\"\n] = r'''\n# This file helps to compute a version number in source trees obtained from\n# git-archive tarball (such as those provided by githubs download-from-tag\n# feature). Distribution tarballs (built by setup.py sdist) and build\n# directories (produced by setup.py build) will contain a much shorter file\n# that just contains the computed version number.\n\n# This file is released into the public domain. Generated by\n# versioneer-0.18 (https://github.com/warner/python-versioneer)\n\n\"\"\"Git implementation of _version.py.\"\"\"\n\nimport errno\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef get_keywords():\n \"\"\"Get the keywords needed to look up the version information.\"\"\"\n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"%(DOLLAR)sFormat:%%d%(DOLLAR)s\"\n git_full = \"%(DOLLAR)sFormat:%%H%(DOLLAR)s\"\n git_date = \"%(DOLLAR)sFormat:%%ci%(DOLLAR)s\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full, \"date\": git_date}\n return keywords\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_config():\n \"\"\"Create, populate and return the VersioneerConfig() object.\"\"\"\n # these strings are filled in when 'setup.py versioneer' creates\n # _version.py\n cfg = VersioneerConfig()\n cfg.VCS = \"git\"\n cfg.style = \"%(STYLE)s\"\n cfg.tag_prefix = \"%(TAG_PREFIX)s\"\n cfg.parentdir_prefix = \"%(PARENTDIR_PREFIX)s\"\n cfg.versionfile_source = \"%(VERSIONFILE_SOURCE)s\"\n cfg.verbose = False\n return cfg\n\n\nclass NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n return decorate\n\n\ndef run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,\n env=None):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen([c] + args, cwd=cwd, env=env,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None))\n break\n except EnvironmentError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %%s\" %% dispcmd)\n print(e)\n return None, None\n else:\n if verbose:\n print(\"unable to find command, tried %%s\" %% (commands,))\n return None, None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %%s (error)\" %% dispcmd)\n print(\"stdout was %%s\" %% stdout)\n return None, p.returncode\n return stdout, p.returncode\n\n\ndef versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \"\"\"\n rootdirs = []\n\n for i in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parentdir_prefix):\n return {\"version\": dirname[len(parentdir_prefix):],\n \"full-revisionid\": None,\n \"dirty\": False, \"error\": None, \"date\": None}\n else:\n rootdirs.append(root)\n root = os.path.dirname(root) # up a level\n\n if verbose:\n print(\"Tried directories %%s but none started with prefix %%s\" %%\n (str(rootdirs), parentdir_prefix))\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n\n\n@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n if line.strip().startswith(\"git_date =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"date\"] = mo.group(1)\n f.close()\n except EnvironmentError:\n pass\n return keywords\n\n\n@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n date = keywords.get(\"date\")\n if date is not None:\n # git-2.2.0 added \"%%cI\", which expands to an ISO-8601 -compliant\n # datestamp. However we prefer \"%%ci\" (which expands to an \"ISO-8601\n # -like\" string, which we must then edit to make compliant), because\n # it's been around since git-1.5.3, and it's too difficult to\n # discover which version we're using, or to work around using an\n # older one.\n date = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = set([r.strip() for r in refnames.strip(\"()\").split(\",\")])\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %%d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = set([r for r in refs if re.search(r'\\d', r)])\n if verbose:\n print(\"discarding '%%s', no digits\" %% \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %%s\" %% \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix):]\n if verbose:\n print(\"picking %%s\" %% r)\n return {\"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": None,\n \"date\": date}\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": \"no suitable tags\", \"date\": None}\n\n\n@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n\n out, rc = run_command(GITS, [\"rev-parse\", \"--git-dir\"], cwd=root,\n hide_stderr=True)\n if rc != 0:\n if verbose:\n print(\"Directory %%s not under git control\" %% root)\n raise NotThisMethod(\"'git rev-parse --git-dir' returned error\")\n\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out, rc = run_command(GITS, [\"describe\", \"--tags\", \"--dirty\",\n \"--always\", \"--long\",\n \"--match\", \"%%s*\" %% tag_prefix],\n cwd=root)\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[:git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r'^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = (\"unable to parse git-describe output: '%%s'\"\n %% describe_out)\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%%s' doesn't start with prefix '%%s'\"\n print(fmt %% (full_tag, tag_prefix))\n pieces[\"error\"] = (\"tag '%%s' doesn't start with prefix '%%s'\"\n %% (full_tag, tag_prefix))\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix):]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out, rc = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"],\n cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n # commit date: see ISO-8601 comment in git_versions_from_keywords()\n date = run_command(GITS, [\"show\", \"-s\", \"--format=%%ci\", \"HEAD\"],\n cwd=root)[0].strip()\n pieces[\"date\"] = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n\n return pieces\n\n\ndef plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"\n\n\ndef render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%%d.g%%s\" %% (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%%d.g%%s\" %% (pieces[\"distance\"],\n pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n\ndef render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%%d\" %% pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%%d\" %% pieces[\"distance\"]\n return rendered\n\n\ndef render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%%s\" %% pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%%s\" %% pieces[\"short\"]\n return rendered\n\n\ndef render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered\n\n\ndef render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%%d-g%%s\" %% (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%%d-g%%s\" %% (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n \"date\": None}\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%%s'\" %% style)\n\n return {\"version\": rendered, \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"], \"error\": None,\n \"date\": pieces.get(\"date\")}\n\n\ndef get_versions():\n \"\"\"Get version information or return default if unable to do so.\"\"\"\n # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have\n # __file__, we can work backwards from there to the root. Some\n # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which\n # case we can only use expanded keywords.\n\n cfg = get_config()\n verbose = cfg.verbose\n\n try:\n return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,\n verbose)\n except NotThisMethod:\n pass\n\n try:\n root = os.path.realpath(__file__)\n # versionfile_source is the relative path from the top of the source\n # tree (where the .git directory might live) to this file. Invert\n # this to find the root from __file__.\n for i in cfg.versionfile_source.split('/'):\n root = os.path.dirname(root)\n except NameError:\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to find root of source tree\",\n \"date\": None}\n\n try:\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\n return render(pieces, cfg.style)\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n except NotThisMethod:\n pass\n\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\", \"date\": None}\n'''\n\n\n@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n if line.strip().startswith(\"git_date =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"date\"] = mo.group(1)\n f.close()\n except OSError:\n pass\n return keywords\n\n\n@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n date = keywords.get(\"date\")\n if date is not None:\n # git-2.2.0 added \"%cI\", which expands to an ISO-8601 -compliant\n # datestamp. However we prefer \"%ci\" (which expands to an \"ISO-8601\n # -like\" string, which we must then edit to make compliant), because\n # it's been around since git-1.5.3, and it's too difficult to\n # discover which version we're using, or to work around using an\n # older one.\n date = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = {r.strip() for r in refnames.strip(\"()\").split(\",\")}\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = {r for r in refs if re.search(r\"\\d\", r)}\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix) :]\n if verbose:\n print(\"picking %s\" % r)\n return {\n \"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": None,\n \"date\": date,\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": \"no suitable tags\",\n \"date\": None,\n }\n\n\n@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n\n out, rc = run_command(GITS, [\"rev-parse\", \"--git-dir\"], cwd=root, hide_stderr=True)\n if rc != 0:\n if verbose:\n print(\"Directory %s not under git control\" % root)\n raise NotThisMethod(\"'git rev-parse --git-dir' returned error\")\n\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out, rc = run_command(\n GITS,\n [\n \"describe\",\n \"--tags\",\n \"--dirty\",\n \"--always\",\n \"--long\",\n \"--match\",\n \"%s*\" % tag_prefix,\n ],\n cwd=root,\n )\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[: git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r\"^(.+)-(\\d+)-g([0-9a-f]+)$\", git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = \"unable to parse git-describe output: '%s'\" % describe_out\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = \"tag '%s' doesn't start with prefix '%s'\" % (\n full_tag,\n tag_prefix,\n )\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix) :]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out, rc = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"], cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n # commit date: see ISO-8601 comment in git_versions_from_keywords()\n date = run_command(GITS, [\"show\", \"-s\", \"--format=%ci\", \"HEAD\"], cwd=root)[\n 0\n ].strip()\n pieces[\"date\"] = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n\n return pieces\n\n\ndef do_vcs_install(manifest_in, versionfile_source, ipy):\n \"\"\"Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-subst keyword substitution.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n files = [manifest_in, versionfile_source]\n if ipy:\n files.append(ipy)\n try:\n me = __file__\n if me.endswith(\".pyc\") or me.endswith(\".pyo\"):\n me = os.path.splitext(me)[0] + \".py\"\n versioneer_file = os.path.relpath(me)\n except NameError:\n versioneer_file = \"versioneer.py\"\n files.append(versioneer_file)\n present = False\n try:\n f = open(\".gitattributes\", \"r\")\n for line in f.readlines():\n if line.strip().startswith(versionfile_source):\n if \"export-subst\" in line.strip().split()[1:]:\n present = True\n f.close()\n except OSError:\n pass\n if not present:\n f = open(\".gitattributes\", \"a+\")\n f.write(\"%s export-subst\\n\" % versionfile_source)\n f.close()\n files.append(\".gitattributes\")\n run_command(GITS, [\"add\", \"--\"] + files)\n\n\ndef versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \"\"\"\n rootdirs = []\n\n for i in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parentdir_prefix):\n return {\n \"version\": dirname[len(parentdir_prefix) :],\n \"full-revisionid\": None,\n \"dirty\": False,\n \"error\": None,\n \"date\": None,\n }\n else:\n rootdirs.append(root)\n root = os.path.dirname(root) # up a level\n\n if verbose:\n print(\n \"Tried directories %s but none started with prefix %s\"\n % (str(rootdirs), parentdir_prefix)\n )\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n\n\nSHORT_VERSION_PY = \"\"\"\n# This file was generated by 'versioneer.py' (0.18) from\n# revision-control system data, or from the parent directory name of an\n# unpacked source archive. Distribution tarballs contain a pre-generated copy\n# of this file.\n\nimport json\n\nversion_json = '''\n%s\n''' # END VERSION_JSON\n\n\ndef get_versions():\n return json.loads(version_json)\n\"\"\"\n\n\ndef versions_from_file(filename):\n \"\"\"Try to determine the version from _version.py if present.\"\"\"\n try:\n with open(filename) as f:\n contents = f.read()\n except OSError:\n raise NotThisMethod(\"unable to read _version.py\")\n mo = re.search(\n r\"version_json = '''\\n(.*)''' # END VERSION_JSON\", contents, re.M | re.S\n )\n if not mo:\n mo = re.search(\n r\"version_json = '''\\r\\n(.*)''' # END VERSION_JSON\", contents, re.M | re.S\n )\n if not mo:\n raise NotThisMethod(\"no version_json in _version.py\")\n return json.loads(mo.group(1))\n\n\ndef write_to_version_file(filename, versions):\n \"\"\"Write the given version number to the given _version.py file.\"\"\"\n os.unlink(filename)\n contents = json.dumps(versions, sort_keys=True, indent=1, separators=(\",\", \": \"))\n with open(filename, \"w\") as f:\n f.write(SHORT_VERSION_PY % contents)\n\n print(\"set %s to '%s'\" % (filename, versions[\"version\"]))\n\n\ndef plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"\n\n\ndef render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n\ndef render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered\n\n\ndef render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered\n\n\ndef render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered\n\n\ndef render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\n \"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n \"date\": None,\n }\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\n \"version\": rendered,\n \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"],\n \"error\": None,\n \"date\": pieces.get(\"date\"),\n }\n\n\nclass VersioneerBadRootError(Exception):\n \"\"\"The project root directory is unknown or missing key files.\"\"\"\n\n\ndef get_versions(verbose=False):\n \"\"\"Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.\n \"\"\"\n if \"versioneer\" in sys.modules:\n # see the discussion in cmdclass.py:get_cmdclass()\n del sys.modules[\"versioneer\"]\n\n root = get_root()\n cfg = get_config_from_root(root)\n\n assert cfg.VCS is not None, \"please set [versioneer]VCS= in setup.cfg\"\n handlers = HANDLERS.get(cfg.VCS)\n assert handlers, \"unrecognized VCS '%s'\" % cfg.VCS\n verbose = verbose or cfg.verbose\n assert (\n cfg.versionfile_source is not None\n ), \"please set versioneer.versionfile_source\"\n assert cfg.tag_prefix is not None, \"please set versioneer.tag_prefix\"\n\n versionfile_abs = os.path.join(root, cfg.versionfile_source)\n\n # extract version from first of: _version.py, VCS command (e.g. 'git\n # describe'), parentdir. This is meant to work for developers using a\n # source checkout, for users of a tarball created by 'setup.py sdist',\n # and for users of a tarball/zipball created by 'git archive' or github's\n # download-from-tag feature or the equivalent in other VCSes.\n\n get_keywords_f = handlers.get(\"get_keywords\")\n from_keywords_f = handlers.get(\"keywords\")\n if get_keywords_f and from_keywords_f:\n try:\n keywords = get_keywords_f(versionfile_abs)\n ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)\n if verbose:\n print(\"got version from expanded keyword %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n ver = versions_from_file(versionfile_abs)\n if verbose:\n print(\"got version from file %s %s\" % (versionfile_abs, ver))\n return ver\n except NotThisMethod:\n pass\n\n from_vcs_f = handlers.get(\"pieces_from_vcs\")\n if from_vcs_f:\n try:\n pieces = from_vcs_f(cfg.tag_prefix, root, verbose)\n ver = render(pieces, cfg.style)\n if verbose:\n print(\"got version from VCS %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n if verbose:\n print(\"got version from parentdir %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n if verbose:\n print(\"unable to compute version\")\n\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\",\n \"date\": None,\n }\n\n\ndef get_version():\n \"\"\"Get the short version string for this project.\"\"\"\n return get_versions()[\"version\"]\n\n\ndef get_cmdclass():\n \"\"\"Get the custom setuptools/distutils subclasses used by Versioneer.\"\"\"\n if \"versioneer\" in sys.modules:\n del sys.modules[\"versioneer\"]\n # this fixes the \"python setup.py develop\" case (also 'install' and\n # 'easy_install .'), in which subdependencies of the main project are\n # built (using setup.py bdist_egg) in the same python process. Assume\n # a main project A and a dependency B, which use different versions\n # of Versioneer. A's setup.py imports A's Versioneer, leaving it in\n # sys.modules by the time B's setup.py is executed, causing B to run\n # with the wrong versioneer. Setuptools wraps the sub-dep builds in a\n # sandbox that restores sys.modules to it's pre-build state, so the\n # parent is protected against the child's \"import versioneer\". By\n # removing ourselves from sys.modules here, before the child build\n # happens, we protect the child from the parent's versioneer too.\n # Also see https://github.com/warner/python-versioneer/issues/52\n\n cmds = {}\n\n # we add \"version\" to both distutils and setuptools\n from distutils.core import Command\n\n class cmd_version(Command):\n description = \"report generated version string\"\n user_options = []\n boolean_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n vers = get_versions(verbose=True)\n print(\"Version: %s\" % vers[\"version\"])\n print(\" full-revisionid: %s\" % vers.get(\"full-revisionid\"))\n print(\" dirty: %s\" % vers.get(\"dirty\"))\n print(\" date: %s\" % vers.get(\"date\"))\n if vers[\"error\"]:\n print(\" error: %s\" % vers[\"error\"])\n\n cmds[\"version\"] = cmd_version\n\n # we override \"build_py\" in both distutils and setuptools\n #\n # most invocation pathways end up running build_py:\n # distutils/build -> build_py\n # distutils/install -> distutils/build ->..\n # setuptools/bdist_wheel -> distutils/install ->..\n # setuptools/bdist_egg -> distutils/install_lib -> build_py\n # setuptools/install -> bdist_egg ->..\n # setuptools/develop -> ?\n # pip install:\n # copies source tree to a tempdir before running egg_info/etc\n # if .git isn't copied too, 'git describe' will fail\n # then does setup.py bdist_wheel, or sometimes setup.py install\n # setup.py egg_info -> ?\n\n # we override different \"build_py\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.build_py import build_py as _build_py\n else:\n from distutils.command.build_py import build_py as _build_py\n\n class cmd_build_py(_build_py):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n _build_py.run(self)\n # now locate _version.py in the new build/ directory and replace\n # it with an updated value\n if cfg.versionfile_build:\n target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n cmds[\"build_py\"] = cmd_build_py\n\n if \"cx_Freeze\" in sys.modules: # cx_freeze enabled?\n from cx_Freeze.dist import build_exe as _build_exe\n\n # nczeczulin reports that py2exe won't like the pep440-style string\n # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.\n # setup(console=[{\n # \"version\": versioneer.get_version().split(\"+\", 1)[0], # FILEVERSION\n # \"product_version\": versioneer.get_version(),\n # ...\n\n class cmd_build_exe(_build_exe):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n target_versionfile = cfg.versionfile_source\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n _build_exe.run(self)\n os.unlink(target_versionfile)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(\n LONG\n % {\n \"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n }\n )\n\n cmds[\"build_exe\"] = cmd_build_exe\n del cmds[\"build_py\"]\n\n if \"py2exe\" in sys.modules: # py2exe enabled?\n try:\n from py2exe.distutils_buildexe import py2exe as _py2exe # py3\n except ImportError:\n from py2exe.build_exe import py2exe as _py2exe # py2\n\n class cmd_py2exe(_py2exe):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n target_versionfile = cfg.versionfile_source\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n _py2exe.run(self)\n os.unlink(target_versionfile)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(\n LONG\n % {\n \"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n }\n )\n\n cmds[\"py2exe\"] = cmd_py2exe\n\n # we override different \"sdist\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.sdist import sdist as _sdist\n else:\n from distutils.command.sdist import sdist as _sdist\n\n class cmd_sdist(_sdist):\n def run(self):\n versions = get_versions()\n self._versioneer_generated_versions = versions\n # unless we update this, the command will keep using the old\n # version\n self.distribution.metadata.version = versions[\"version\"]\n return _sdist.run(self)\n\n def make_release_tree(self, base_dir, files):\n root = get_root()\n cfg = get_config_from_root(root)\n _sdist.make_release_tree(self, base_dir, files)\n # now locate _version.py in the new base_dir directory\n # (remembering that it may be a hardlink) and replace it with an\n # updated value\n target_versionfile = os.path.join(base_dir, cfg.versionfile_source)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(\n target_versionfile, self._versioneer_generated_versions\n )\n\n cmds[\"sdist\"] = cmd_sdist\n\n return cmds\n\n\nCONFIG_ERROR = \"\"\"\nsetup.cfg is missing the necessary Versioneer configuration. You need\na section like:\n\n [versioneer]\n VCS = git\n style = pep440\n versionfile_source = src/myproject/_version.py\n versionfile_build = myproject/_version.py\n tag_prefix =\n parentdir_prefix = myproject-\n\nYou will also need to edit your setup.py to use the results:\n\n import versioneer\n setup(version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(), ...)\n\nPlease read the docstring in ./versioneer.py for configuration instructions,\nedit setup.cfg, and re-run the installer or 'python versioneer.py setup'.\n\"\"\"\n\nSAMPLE_CONFIG = \"\"\"\n# See the docstring in versioneer.py for instructions. Note that you must\n# re-run 'versioneer.py setup' after changing this section, and commit the\n# resulting files.\n\n[versioneer]\n#VCS = git\n#style = pep440\n#versionfile_source =\n#versionfile_build =\n#tag_prefix =\n#parentdir_prefix =\n\n\"\"\"\n\nINIT_PY_SNIPPET = \"\"\"\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n\"\"\"\n\n\ndef do_setup():\n \"\"\"Main VCS-independent setup function for installing Versioneer.\"\"\"\n root = get_root()\n try:\n cfg = get_config_from_root(root)\n except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:\n if isinstance(e, (EnvironmentError, configparser.NoSectionError)):\n print(\"Adding sample versioneer config to setup.cfg\", file=sys.stderr)\n with open(os.path.join(root, \"setup.cfg\"), \"a\") as f:\n f.write(SAMPLE_CONFIG)\n print(CONFIG_ERROR, file=sys.stderr)\n return 1\n\n print(\" creating %s\" % cfg.versionfile_source)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(\n LONG\n % {\n \"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n }\n )\n\n ipy = os.path.join(os.path.dirname(cfg.versionfile_source), \"__init__.py\")\n if os.path.exists(ipy):\n try:\n with open(ipy, \"r\") as f:\n old = f.read()\n except OSError:\n old = \"\"\n if INIT_PY_SNIPPET not in old:\n print(\" appending to %s\" % ipy)\n with open(ipy, \"a\") as f:\n f.write(INIT_PY_SNIPPET)\n else:\n print(\" %s unmodified\" % ipy)\n else:\n print(\" %s doesn't exist, ok\" % ipy)\n ipy = None\n\n # Make sure both the top-level \"versioneer.py\" and versionfile_source\n # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so\n # they'll be copied into source distributions. Pip won't be able to\n # install the package without this.\n manifest_in = os.path.join(root, \"MANIFEST.in\")\n simple_includes = set()\n try:\n with open(manifest_in, \"r\") as f:\n for line in f:\n if line.startswith(\"include \"):\n for include in line.split()[1:]:\n simple_includes.add(include)\n except OSError:\n pass\n # That doesn't cover everything MANIFEST.in can do\n # (http://docs.python.org/2/distutils/sourcedist.html#commands), so\n # it might give some false negatives. Appending redundant 'include'\n # lines is safe, though.\n if \"versioneer.py\" not in simple_includes:\n print(\" appending 'versioneer.py' to MANIFEST.in\")\n with open(manifest_in, \"a\") as f:\n f.write(\"include versioneer.py\\n\")\n else:\n print(\" 'versioneer.py' already in MANIFEST.in\")\n if cfg.versionfile_source not in simple_includes:\n print(\n \" appending versionfile_source ('%s') to MANIFEST.in\"\n % cfg.versionfile_source\n )\n with open(manifest_in, \"a\") as f:\n f.write(\"include %s\\n\" % cfg.versionfile_source)\n else:\n print(\" versionfile_source already in MANIFEST.in\")\n\n # Make VCS-specific changes. For git, this means creating/changing\n # .gitattributes to mark _version.py for export-subst keyword\n # substitution.\n do_vcs_install(manifest_in, cfg.versionfile_source, ipy)\n return 0\n\n\ndef scan_setup_py():\n \"\"\"Validate the contents of setup.py against Versioneer's expectations.\"\"\"\n found = set()\n setters = False\n errors = 0\n with open(\"setup.py\", \"r\") as f:\n for line in f.readlines():\n if \"import versioneer\" in line:\n found.add(\"import\")\n if \"versioneer.get_cmdclass()\" in line:\n found.add(\"cmdclass\")\n if \"versioneer.get_version()\" in line:\n found.add(\"get_version\")\n if \"versioneer.VCS\" in line:\n setters = True\n if \"versioneer.versionfile_source\" in line:\n setters = True\n if len(found) != 3:\n print(\"\")\n print(\"Your setup.py appears to be missing some important items\")\n print(\"(but I might be wrong). Please make sure it has something\")\n print(\"roughly like the following:\")\n print(\"\")\n print(\" import versioneer\")\n print(\" setup( version=versioneer.get_version(),\")\n print(\" cmdclass=versioneer.get_cmdclass(), ...)\")\n print(\"\")\n errors += 1\n if setters:\n print(\"You should remove lines like 'versioneer.VCS = ' and\")\n print(\"'versioneer.versionfile_source = ' . This configuration\")\n print(\"now lives in setup.cfg, and should be removed from setup.py\")\n print(\"\")\n errors += 1\n return errors\n\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"setup\":\n errors = do_setup()\n errors += scan_setup_py()\n if errors:\n sys.exit(1)\n"},{"col":4,"comment":"Manually trigger loading of this array's data from disk or a\n remote source into memory and return a new array. The original is\n left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n ","endLoc":833,"header":"def compute(self, **kwargs) -> \"DataArray\"","id":1154,"name":"compute","nodeType":"Function","startLoc":813,"text":"def compute(self, **kwargs) -> \"DataArray\":\n \"\"\"Manually trigger loading of this array's data from disk or a\n remote source into memory and return a new array. The original is\n left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)"},{"col":0,"comment":"null","endLoc":401,"header":"def _dsplot(plotfunc)","id":1155,"name":"_dsplot","nodeType":"Function","startLoc":167,"text":"def _dsplot(plotfunc):\n commondoc = \"\"\"\n Parameters\n ----------\n\n ds : Dataset\n x, y : string\n Variable names for x, y axis.\n hue: str, optional\n Variable by which to color scattered points\n hue_style: str, optional\n Can be either 'discrete' (legend) or 'continuous' (color bar).\n markersize: str, optional (scatter only)\n Variably by which to vary size of scattered points\n size_norm: optional\n Either None or 'Norm' instance to normalize the 'markersize' variable.\n add_guide: bool, optional\n Add a guide that depends on hue_style\n - for \"discrete\", build a legend.\n This is the default for non-numeric `hue` variables.\n - for \"continuous\", build a colorbar\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n ax : matplotlib axes, optional\n If None, uses the current axis. Not applicable when using facets.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n norm : ``matplotlib.colors.Normalize`` instance, optional\n If the ``norm`` has vmin or vmax specified, the corresponding kwarg\n must be None.\n vmin, vmax : floats, optional\n Values to anchor the colormap, otherwise they are inferred from the\n data and other keyword arguments. When a diverging dataset is inferred,\n setting one of these values will fix the other by symmetry around\n ``center``. Setting both values prevents use of a diverging colormap.\n If discrete levels are provided as an explicit list, both of these\n values are ignored.\n cmap : matplotlib colormap name or object, optional\n The mapping from data values to color space. If not provided, this\n will be either be ``viridis`` (if the function infers a sequential\n dataset) or ``RdBu_r`` (if the function infers a diverging dataset).\n When `Seaborn` is installed, ``cmap`` may also be a `seaborn`\n color palette. If ``cmap`` is seaborn color palette and the plot type\n is not ``contour`` or ``contourf``, ``levels`` must also be specified.\n colors : discrete colors to plot, optional\n A single color or a list of colors. If the plot type is not ``contour``\n or ``contourf``, the ``levels`` argument is required.\n center : float, optional\n The value at which to center the colormap. Passing this value implies\n use of a diverging colormap. Setting it to ``False`` prevents use of a\n diverging colormap.\n robust : bool, optional\n If True and ``vmin`` or ``vmax`` are absent, the colormap range is\n computed with 2nd and 98th percentiles instead of the extreme values.\n extend : {'neither', 'both', 'min', 'max'}, optional\n How to draw arrows extending the colorbar beyond its limits. If not\n provided, extend is inferred from vmin, vmax and the data limits.\n levels : int or list-like object, optional\n Split the colormap (cmap) into discrete color intervals. If an integer\n is provided, \"nice\" levels are chosen based on the data range: this can\n imply that the final number of levels is not exactly the expected one.\n Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to\n setting ``levels=np.linspace(vmin, vmax, N)``.\n **kwargs : optional\n Additional keyword arguments to matplotlib\n \"\"\"\n\n # Build on the original docstring\n plotfunc.__doc__ = \"%s\\n%s\" % (plotfunc.__doc__, commondoc)\n\n @functools.wraps(plotfunc)\n def newplotfunc(\n ds,\n x=None,\n y=None,\n hue=None,\n hue_style=None,\n col=None,\n row=None,\n ax=None,\n figsize=None,\n size=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n aspect=None,\n subplot_kws=None,\n add_guide=None,\n cbar_kwargs=None,\n cbar_ax=None,\n vmin=None,\n vmax=None,\n norm=None,\n infer_intervals=None,\n center=None,\n levels=None,\n robust=None,\n colors=None,\n extend=None,\n cmap=None,\n **kwargs\n ):\n\n _is_facetgrid = kwargs.pop(\"_is_facetgrid\", False)\n if _is_facetgrid: # facetgrid call\n meta_data = kwargs.pop(\"meta_data\")\n else:\n meta_data = _infer_meta_data(ds, x, y, hue, hue_style, add_guide)\n\n hue_style = meta_data[\"hue_style\"]\n\n # handle facetgrids first\n if col or row:\n allargs = locals().copy()\n allargs[\"plotfunc\"] = globals()[plotfunc.__name__]\n allargs[\"data\"] = ds\n # TODO dcherian: why do I need to remove kwargs?\n for arg in [\"meta_data\", \"kwargs\", \"ds\"]:\n del allargs[arg]\n\n return _easy_facetgrid(kind=\"dataset\", **allargs, **kwargs)\n\n figsize = kwargs.pop(\"figsize\", None)\n ax = get_axis(figsize, size, aspect, ax)\n\n if hue_style == \"continuous\" and hue is not None:\n if _is_facetgrid:\n cbar_kwargs = meta_data[\"cbar_kwargs\"]\n cmap_params = meta_data[\"cmap_params\"]\n else:\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n plotfunc, ds[hue].values, **locals()\n )\n\n # subset that can be passed to scatter, hist2d\n cmap_params_subset = dict(\n (vv, cmap_params[vv]) for vv in [\"vmin\", \"vmax\", \"norm\", \"cmap\"]\n )\n\n else:\n cmap_params_subset = {}\n\n primitive = plotfunc(\n ds=ds,\n x=x,\n y=y,\n hue=hue,\n hue_style=hue_style,\n ax=ax,\n cmap_params=cmap_params_subset,\n **kwargs\n )\n\n if _is_facetgrid: # if this was called from Facetgrid.map_dataset,\n return primitive # finish here. Else, make labels\n\n if meta_data.get(\"xlabel\", None):\n ax.set_xlabel(meta_data.get(\"xlabel\"))\n if meta_data.get(\"ylabel\", None):\n ax.set_ylabel(meta_data.get(\"ylabel\"))\n\n if meta_data[\"add_legend\"]:\n ax.legend(\n handles=primitive,\n labels=list(meta_data[\"hue\"].values),\n title=meta_data.get(\"hue_label\", None),\n )\n if meta_data[\"add_colorbar\"]:\n cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs\n if \"label\" not in cbar_kwargs:\n cbar_kwargs[\"label\"] = meta_data.get(\"hue_label\", None)\n _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params)\n\n return primitive\n\n @functools.wraps(newplotfunc)\n def plotmethod(\n _PlotMethods_obj,\n x=None,\n y=None,\n hue=None,\n hue_style=None,\n col=None,\n row=None,\n ax=None,\n figsize=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n aspect=None,\n size=None,\n subplot_kws=None,\n add_guide=None,\n cbar_kwargs=None,\n cbar_ax=None,\n vmin=None,\n vmax=None,\n norm=None,\n infer_intervals=None,\n center=None,\n levels=None,\n robust=None,\n colors=None,\n extend=None,\n cmap=None,\n **kwargs\n ):\n \"\"\"\n The method should have the same signature as the function.\n\n This just makes the method work on Plotmethods objects,\n and passes all the other arguments straight through.\n \"\"\"\n allargs = locals()\n allargs[\"ds\"] = _PlotMethods_obj._ds\n allargs.update(kwargs)\n for arg in [\"_PlotMethods_obj\", \"newplotfunc\", \"kwargs\"]:\n del allargs[arg]\n return newplotfunc(**allargs)\n\n # Add to class _PlotMethods\n setattr(_Dataset_PlotMethods, plotmethod.__name__, plotmethod)\n\n return newplotfunc"},{"col":4,"comment":"Returns a copy of this array.\n\n If `deep=True`, a deep copy is made of the data array.\n Otherwise, a shallow copy is made, so each variable in the new\n array's dataset is also a variable in this array's dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether the data array and its coordinates are loaded into memory\n and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n When `data` is used, `deep` is ignored for all data variables,\n and only used for coords.\n\n Returns\n -------\n object : DataArray\n New object with dimensions, attributes, coordinates, name,\n encoding, and optionally data copied from original.\n\n Examples\n --------\n\n Shallow versus deep copy\n\n >>> array = xr.DataArray([1, 2, 3], dims='x',\n ... coords={'x': ['a', 'b', 'c']})\n >>> array.copy()\n \n array([1, 2, 3])\n Coordinates:\n * x (x) >> array_0 = array.copy(deep=False)\n >>> array_0[0] = 7\n >>> array_0\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array.copy(data=[0.1, 0.2, 0.3])\n \n array([ 0.1, 0.2, 0.3])\n Coordinates:\n * x (x) >> array\n \n array([1, 2, 3])\n Coordinates:\n * x (x) \"DataArray\"","id":1156,"name":"copy","nodeType":"Function","startLoc":854,"text":"def copy(self, deep: bool = True, data: Any = None) -> \"DataArray\":\n \"\"\"Returns a copy of this array.\n\n If `deep=True`, a deep copy is made of the data array.\n Otherwise, a shallow copy is made, so each variable in the new\n array's dataset is also a variable in this array's dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether the data array and its coordinates are loaded into memory\n and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n When `data` is used, `deep` is ignored for all data variables,\n and only used for coords.\n\n Returns\n -------\n object : DataArray\n New object with dimensions, attributes, coordinates, name,\n encoding, and optionally data copied from original.\n\n Examples\n --------\n\n Shallow versus deep copy\n\n >>> array = xr.DataArray([1, 2, 3], dims='x',\n ... coords={'x': ['a', 'b', 'c']})\n >>> array.copy()\n \n array([1, 2, 3])\n Coordinates:\n * x (x) >> array_0 = array.copy(deep=False)\n >>> array_0[0] = 7\n >>> array_0\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array.copy(data=[0.1, 0.2, 0.3])\n \n array([ 0.1, 0.2, 0.3])\n Coordinates:\n * x (x) >> array\n \n array([1, 2, 3])\n Coordinates:\n * x (x) bool","id":1157,"name":"_all_compat","nodeType":"Function","startLoc":1272,"text":"def _all_compat(self, other: \"Dataset\", compat_str: str) -> bool:\n \"\"\"Helper function for equals and identical\n \"\"\"\n\n # some stores (e.g., scipy) do not seem to preserve order, so don't\n # require matching order for equality\n def compat(x: Variable, y: Variable) -> bool:\n return getattr(x, compat_str)(y)\n\n return self._coord_names == other._coord_names and utils.dict_equiv(\n self._variables, other._variables, compat=compat\n )"},{"id":1158,"name":"installing.rst","nodeType":"TextFile","path":"doc","text":".. _installing:\n\nInstallation\n============\n\nRequired dependencies\n---------------------\n\n- Python (3.6 or later)\n- `numpy `__ (1.14 or later)\n- `pandas `__ (0.24 or later)\n\nOptional dependencies\n---------------------\n\nFor netCDF and IO\n~~~~~~~~~~~~~~~~~\n\n- `netCDF4 `__: recommended if you\n want to use xarray for reading or writing netCDF files\n- `scipy `__: used as a fallback for reading/writing netCDF3\n- `pydap `__: used as a fallback for accessing OPeNDAP\n- `h5netcdf `__: an alternative library for\n reading and writing netCDF4 files that does not use the netCDF-C libraries\n- `pynio `__: for reading GRIB and other\n geoscience specific file formats\n- `zarr `__: for chunked, compressed, N-dimensional arrays.\n- `cftime `__: recommended if you\n want to encode/decode datetimes for non-standard calendars or dates before\n year 1678 or after year 2262.\n- `PseudoNetCDF `__: recommended\n for accessing CAMx, GEOS-Chem (bpch), NOAA ARL files, ICARTT files\n (ffi1001) and many other.\n- `rasterio `__: for reading GeoTiffs and\n other gridded raster datasets.\n- `iris `__: for conversion to and from iris'\n Cube objects\n- `cfgrib `__: for reading GRIB files via the\n *ECMWF ecCodes* library.\n\nFor accelerating xarray\n~~~~~~~~~~~~~~~~~~~~~~~\n\n- `scipy `__: necessary to enable the interpolation features for\n xarray objects\n- `bottleneck `__: speeds up\n NaN-skipping and rolling window aggregations by a large factor\n- `numbagg `_: for exponential rolling\n window operations\n\nFor parallel computing\n~~~~~~~~~~~~~~~~~~~~~~\n\n- `dask.array `__: required for :ref:`dask`.\n\nFor plotting\n~~~~~~~~~~~~\n\n- `matplotlib `__: required for :ref:`plotting`\n- `cartopy `__: recommended for :ref:`plot-maps`\n- `seaborn `__: for better\n color palettes\n- `nc-time-axis `__: for plotting\n cftime.datetime objects\n\nAlternative data containers\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n- `sparse `_: for sparse arrays\n- Any numpy-like objects that support\n `NEP-18 `_.\n Note that while such libraries theoretically should work, they are untested.\n Integration tests are in the process of being written for individual libraries.\n\n\n.. _mindeps_policy:\n\nMinimum dependency versions\n---------------------------\nxarray adopts a rolling policy regarding the minimum supported version of its\ndependencies:\n\n- **Python:** 42 months\n (`NEP-29 `_)\n- **numpy:** 24 months\n (`NEP-29 `_)\n- **pandas:** 12 months\n- **scipy:** 12 months\n- **sparse** and other libraries that rely on\n `NEP-18 `_\n for integration: very latest available versions only, until the technology will have\n matured. This extends to dask when used in conjunction with any of these libraries.\n numpy >=1.17.\n- **all other libraries:** 6 months\n\nThe above should be interpreted as *the minor version (X.Y) initially published no more\nthan N months ago*. Patch versions (x.y.Z) are not pinned, and only the latest available\nat the moment of publishing the xarray release is guaranteed to work.\n\nYou can see the actual minimum tested versions:\n\n- `For NEP-18 libraries\n `_\n- `For everything else\n `_\n\n\nInstructions\n------------\n\nxarray itself is a pure Python package, but its dependencies are not. The\neasiest way to get everything installed is to use conda_. To install xarray\nwith its recommended dependencies using the conda command line tool::\n\n $ conda install xarray dask netCDF4 bottleneck\n\n.. _conda: http://conda.io/\n\nWe recommend using the community maintained `conda-forge `__ channel if you need difficult\\-to\\-build dependencies such as cartopy, pynio or PseudoNetCDF::\n\n $ conda install -c conda-forge xarray cartopy pynio pseudonetcdf\n\nNew releases may also appear in conda-forge before being updated in the default\nchannel.\n\nIf you don't use conda, be sure you have the required dependencies (numpy and\npandas) installed first. Then, install xarray with pip::\n\n $ pip install xarray\n\nTesting\n-------\n\nTo run the test suite after installing xarray, install (via pypi or conda) `py.test\n`__ and run ``pytest`` in the root directory of the xarray\nrepository.\n\n\nPerformance Monitoring\n~~~~~~~~~~~~~~~~~~~~~~\n\nA fixed-point performance monitoring of (a part of) our codes can be seen on\n`this page `__.\n\nTo run these benchmark tests in a local machine, first install\n\n- `airspeed-velocity `__: a tool for benchmarking\n Python packages over their lifetime.\n\nand run\n``asv run # this will install some conda environments in ./.asv/envs``\n"},{"col":4,"comment":"Two Datasets are broadcast equal if they are equal after\n broadcasting all variables against each other.\n\n For example, variables that are scalar in one dataset but non-scalar in\n the other dataset can still be broadcast equal if the the non-scalar\n variable is a constant.\n\n See Also\n --------\n Dataset.equals\n Dataset.identical\n ","endLoc":1301,"header":"def broadcast_equals(self, other: \"Dataset\") -> bool","id":1159,"name":"broadcast_equals","nodeType":"Function","startLoc":1285,"text":"def broadcast_equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are broadcast equal if they are equal after\n broadcasting all variables against each other.\n\n For example, variables that are scalar in one dataset but non-scalar in\n the other dataset can still be broadcast equal if the the non-scalar\n variable is a constant.\n\n See Also\n --------\n Dataset.equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False"},{"col":4,"comment":" Trigger computation in constituent dask arrays\n\n This keeps them as dask arrays but encourages them to keep data in\n memory. This is particularly useful when on a distributed machine.\n When on a single machine consider using ``.compute()`` instead.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n ","endLoc":852,"header":"def persist(self, **kwargs) -> \"DataArray\"","id":1160,"name":"persist","nodeType":"Function","startLoc":835,"text":"def persist(self, **kwargs) -> \"DataArray\":\n \"\"\" Trigger computation in constituent dask arrays\n\n This keeps them as dask arrays but encourages them to keep data in\n memory. This is particularly useful when on a distributed machine.\n When on a single machine consider using ``.compute()`` instead.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n ds = self._to_temp_dataset().persist(**kwargs)\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"Two Datasets are equal if they have matching variables and\n coordinates, all of which are equal.\n\n Datasets can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``Dataset``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.identical\n ","endLoc":1321,"header":"def equals(self, other: \"Dataset\") -> bool","id":1161,"name":"equals","nodeType":"Function","startLoc":1303,"text":"def equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are equal if they have matching variables and\n coordinates, all of which are equal.\n\n Datasets can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``Dataset``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False"},{"id":1162,"name":"examples.rst","nodeType":"TextFile","path":"doc","text":"Examples\n========\n\n.. toctree::\n :maxdepth: 2\n\n examples/weather-data\n examples/monthly-means\n examples/multidimensional-coords\n auto_gallery/index\n"},{"col":4,"comment":"Like equals, but also checks all dataset attributes and the\n attributes on all variables and coordinates.\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.equals\n ","endLoc":1337,"header":"def identical(self, other: \"Dataset\") -> bool","id":1163,"name":"identical","nodeType":"Function","startLoc":1323,"text":"def identical(self, other: \"Dataset\") -> bool:\n \"\"\"Like equals, but also checks all dataset attributes and the\n attributes on all variables and coordinates.\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.equals\n \"\"\"\n try:\n return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(\n other, \"identical\"\n )\n except (TypeError, AttributeError):\n return False"},{"fileName":"plot_colorbar_center.py","filePath":"doc/gallery","id":1164,"nodeType":"File","text":"# -*- coding: utf-8 -*-\n\"\"\"\n==================\nCentered colormaps\n==================\n\nxarray's automatic colormaps choice\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nimport xarray as xr\n\n# Load the data\nds = xr.tutorial.load_dataset(\"air_temperature\")\nair = ds.air.isel(time=0)\n\nf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))\n\n# The first plot (in kelvins) chooses \"viridis\" and uses the data's min/max\nair.plot(ax=ax1, cbar_kwargs={\"label\": \"K\"})\nax1.set_title(\"Kelvins: default\")\nax2.set_xlabel(\"\")\n\n# The second plot (in celsius) now chooses \"BuRd\" and centers min/max around 0\nairc = air - 273.15\nairc.plot(ax=ax2, cbar_kwargs={\"label\": \"°C\"})\nax2.set_title(\"Celsius: default\")\nax2.set_xlabel(\"\")\nax2.set_ylabel(\"\")\n\n# The center doesn't have to be 0\nair.plot(ax=ax3, center=273.15, cbar_kwargs={\"label\": \"K\"})\nax3.set_title(\"Kelvins: center=273.15\")\n\n# Or it can be ignored\nairc.plot(ax=ax4, center=False, cbar_kwargs={\"label\": \"°C\"})\nax4.set_title(\"Celsius: center=False\")\nax4.set_ylabel(\"\")\n\n# Mke it nice\nplt.tight_layout()\nplt.show()\n"},{"col":4,"comment":"Mapping of pandas.Index objects used for label based indexing\n ","endLoc":1345,"header":"@property\n def indexes(self) -> Indexes","id":1165,"name":"indexes","nodeType":"Function","startLoc":1339,"text":"@property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\n \"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._variables, self._dims)\n return Indexes(self._indexes)"},{"col":4,"comment":"null","endLoc":929,"header":"def __copy__(self) -> \"DataArray\"","id":1166,"name":"__copy__","nodeType":"Function","startLoc":928,"text":"def __copy__(self) -> \"DataArray\":\n return self.copy(deep=False)"},{"col":4,"comment":"null","endLoc":934,"header":"def __deepcopy__(self, memo=None) -> \"DataArray\"","id":1167,"name":"__deepcopy__","nodeType":"Function","startLoc":931,"text":"def __deepcopy__(self, memo=None) -> \"DataArray\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)"},{"attributeType":"null","col":28,"comment":"null","endLoc":11,"id":1168,"name":"plt","nodeType":"Attribute","startLoc":11,"text":"plt"},{"col":4,"comment":"Block dimensions for this array's data or None if it's not a dask\n array.\n ","endLoc":945,"header":"@property\n def chunks(self) -> Optional[Tuple[Tuple[int, ...], ...]]","id":1169,"name":"chunks","nodeType":"Function","startLoc":940,"text":"@property\n def chunks(self) -> Optional[Tuple[Tuple[int, ...], ...]]:\n \"\"\"Block dimensions for this array's data or None if it's not a dask\n array.\n \"\"\"\n return self.variable.chunks"},{"col":4,"comment":"Coerce this array's data into a dask arrays with the given chunks.\n\n If this variable is a non-dask array, it will be converted to dask\n array. If it's a dask array, it will be rechunked to the given chunk\n sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, tuple or mapping, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of the new dask array.\n token : str, optional\n Token uniquely identifying this array.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.DataArray\n ","endLoc":993,"header":"def chunk(\n self,\n chunks: Union[\n None,\n Number,\n Tuple[Number, ...],\n Tuple[Tuple[Number, ...], ...],\n Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]],\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"DataArray\"","id":1170,"name":"chunk","nodeType":"Function","startLoc":947,"text":"def chunk(\n self,\n chunks: Union[\n None,\n Number,\n Tuple[Number, ...],\n Tuple[Tuple[Number, ...], ...],\n Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]],\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"DataArray\":\n \"\"\"Coerce this array's data into a dask arrays with the given chunks.\n\n If this variable is a non-dask array, it will be converted to dask\n array. If it's a dask array, it will be rechunked to the given chunk\n sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, tuple or mapping, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of the new dask array.\n token : str, optional\n Token uniquely identifying this array.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.DataArray\n \"\"\"\n if isinstance(chunks, (tuple, list)):\n chunks = dict(zip(self.dims, chunks))\n\n ds = self._to_temp_dataset().chunk(\n chunks, name_prefix=name_prefix, token=token, lock=lock\n )\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"Dictionary of xarray.DataArray objects corresponding to coordinate\n variables\n ","endLoc":1352,"header":"@property\n def coords(self) -> DatasetCoordinates","id":1171,"name":"coords","nodeType":"Function","startLoc":1347,"text":"@property\n def coords(self) -> DatasetCoordinates:\n \"\"\"Dictionary of xarray.DataArray objects corresponding to coordinate\n variables\n \"\"\"\n return DatasetCoordinates(self)"},{"attributeType":"null","col":17,"comment":"null","endLoc":13,"id":1172,"name":"xr","nodeType":"Attribute","startLoc":13,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":16,"id":1173,"name":"ds","nodeType":"Attribute","startLoc":16,"text":"ds"},{"col":4,"comment":"null","endLoc":195,"header":"def __init__(self, dataset: \"Dataset\")","id":1174,"name":"__init__","nodeType":"Function","startLoc":194,"text":"def __init__(self, dataset: \"Dataset\"):\n self._data = dataset"},{"col":4,"comment":"Dictionary of DataArray objects corresponding to data variables\n ","endLoc":1358,"header":"@property\n def data_vars(self) -> DataVariables","id":1175,"name":"data_vars","nodeType":"Function","startLoc":1354,"text":"@property\n def data_vars(self) -> DataVariables:\n \"\"\"Dictionary of DataArray objects corresponding to data variables\n \"\"\"\n return DataVariables(self)"},{"className":"VersioneerConfig","col":0,"comment":"Container for Versioneer configuration parameters.","endLoc":295,"id":1176,"nodeType":"Class","startLoc":294,"text":"class VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\""},{"className":"NotThisMethod","col":0,"comment":"Exception raised if a method is not valid for the current scenario.","endLoc":372,"id":1177,"nodeType":"Class","startLoc":371,"text":"class NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\""},{"attributeType":"null","col":0,"comment":"null","endLoc":17,"id":1178,"name":"air","nodeType":"Attribute","startLoc":17,"text":"air"},{"col":4,"comment":"null","endLoc":348,"header":"def __init__(self, dataset: \"Dataset\")","id":1179,"name":"__init__","nodeType":"Function","startLoc":347,"text":"def __init__(self, dataset: \"Dataset\"):\n self._dataset = dataset"},{"col":4,"comment":"Given names of coordinates, reset them to become variables\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset\n ","endLoc":1432,"header":"def reset_coords(\n self,\n names: \"Union[Hashable, Iterable[Hashable], None]\" = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\"","id":1180,"name":"reset_coords","nodeType":"Function","startLoc":1392,"text":"def reset_coords(\n self,\n names: \"Union[Hashable, Iterable[Hashable], None]\" = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\":\n \"\"\"Given names of coordinates, reset them to become variables\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset\n \"\"\"\n _check_inplace(inplace)\n if names is None:\n names = self._coord_names - set(self.dims)\n else:\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n bad_coords = set(names) & set(self.dims)\n if bad_coords:\n raise ValueError(\n \"cannot remove index coordinates with reset_coords: %s\" % bad_coords\n )\n obj = self.copy()\n obj._coord_names.difference_update(names)\n if drop:\n for name in names:\n del obj._variables[name]\n return obj"},{"attributeType":"null","col":0,"comment":"null","endLoc":19,"id":1181,"name":"f","nodeType":"Attribute","startLoc":19,"text":"f"},{"col":4,"comment":"Coerce all arrays in this dataset into dask arrays with the given\n chunks.\n\n Non-dask arrays in this dataset will be converted to dask arrays. Dask\n arrays will be rechunked to the given chunk sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int or mapping, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of any new dask arrays.\n token : str, optional\n Token uniquely identifying this dataset.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Dataset\n ","endLoc":1741,"header":"def chunk(\n self,\n chunks: Union[\n None, Number, Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]]\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"Dataset\"","id":1182,"name":"chunk","nodeType":"Function","startLoc":1675,"text":"def chunk(\n self,\n chunks: Union[\n None, Number, Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]]\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"Dataset\":\n \"\"\"Coerce all arrays in this dataset into dask arrays with the given\n chunks.\n\n Non-dask arrays in this dataset will be converted to dask arrays. Dask\n arrays will be rechunked to the given chunk sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int or mapping, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of any new dask arrays.\n token : str, optional\n Token uniquely identifying this dataset.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Dataset\n \"\"\"\n from dask.base import tokenize\n\n if isinstance(chunks, Number):\n chunks = dict.fromkeys(self.dims, chunks)\n\n if chunks is not None:\n bad_dims = chunks.keys() - self.dims.keys()\n if bad_dims:\n raise ValueError(\n \"some chunks keys are not dimensions on this \"\n \"object: %s\" % bad_dims\n )\n\n def selkeys(dict_, keys):\n if dict_ is None:\n return None\n return {d: dict_[d] for d in keys if d in dict_}\n\n def maybe_chunk(name, var, chunks):\n chunks = selkeys(chunks, var.dims)\n if not chunks:\n chunks = None\n if var.ndim > 0:\n token2 = tokenize(name, token if token else var._data)\n name2 = \"%s%s-%s\" % (name_prefix, name, token2)\n return var.chunk(chunks, name=name2, lock=lock)\n else:\n return var\n\n variables = {k: maybe_chunk(k, v, chunks) for k, v in self.variables.items()}\n return self._replace(variables)"},{"attributeType":"null","col":5,"comment":"null","endLoc":19,"id":1183,"name":"ax1","nodeType":"Attribute","startLoc":19,"text":"ax1"},{"col":4,"comment":"Store dataset contents to a backends.*DataStore object.\n ","endLoc":1441,"header":"def dump_to_store(self, store: \"AbstractDataStore\", **kwargs) -> None","id":1184,"name":"dump_to_store","nodeType":"Function","startLoc":1434,"text":"def dump_to_store(self, store: \"AbstractDataStore\", **kwargs) -> None:\n \"\"\"Store dataset contents to a backends.*DataStore object.\n \"\"\"\n from ..backends.api import dump_to_store\n\n # TODO: rename and/or cleanup this method to make it more consistent\n # with to_netcdf()\n dump_to_store(self, store, **kwargs)"},{"col":0,"comment":"Store dataset contents to a backends.*DataStore object.","endLoc":1117,"header":"def dump_to_store(\n dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None\n)","id":1185,"name":"dump_to_store","nodeType":"Function","startLoc":1095,"text":"def dump_to_store(\n dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None\n):\n \"\"\"Store dataset contents to a backends.*DataStore object.\"\"\"\n if writer is None:\n writer = ArrayWriter()\n\n if encoding is None:\n encoding = {}\n\n variables, attrs = conventions.encode_dataset_coordinates(dataset)\n\n check_encoding = set()\n for k, enc in encoding.items():\n # no need to shallow copy the variable again; that already happened\n # in encode_dataset_coordinates\n variables[k].encoding = enc\n check_encoding.add(k)\n\n if encoder:\n variables, attrs = encoder(variables, attrs)\n\n store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims)"},{"className":"VersioneerBadRootError","col":0,"comment":"The project root directory is unknown or missing key files.","endLoc":1440,"id":1186,"nodeType":"Class","startLoc":1439,"text":"class VersioneerBadRootError(Exception):\n \"\"\"The project root directory is unknown or missing key files.\"\"\""},{"col":0,"comment":"Get the project root directory.\n\n We require that all commands are run from the project root, i.e. the\n directory that contains setup.py, setup.cfg, and versioneer.py .\n ","endLoc":338,"header":"def get_root()","id":1187,"name":"get_root","nodeType":"Function","startLoc":298,"text":"def get_root():\n \"\"\"Get the project root directory.\n\n We require that all commands are run from the project root, i.e. the\n directory that contains setup.py, setup.cfg, and versioneer.py .\n \"\"\"\n root = os.path.realpath(os.path.abspath(os.getcwd()))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n # allow 'python path/to/setup.py COMMAND'\n root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))\n setup_py = os.path.join(root, \"setup.py\")\n versioneer_py = os.path.join(root, \"versioneer.py\")\n if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):\n err = (\n \"Versioneer was unable to run the project root directory. \"\n \"Versioneer requires setup.py to be executed from \"\n \"its immediate directory (like 'python setup.py COMMAND'), \"\n \"or in a way that lets it use sys.argv[0] to find the root \"\n \"(like 'python path/to/setup.py COMMAND').\"\n )\n raise VersioneerBadRootError(err)\n try:\n # Certain runtime workflows (setup.py install/develop in a setuptools\n # tree) execute all dependencies in a single python process, so\n # \"versioneer\" may be imported multiple times, and python's shared\n # module-import table will cache the first one. So we can't use\n # os.path.dirname(__file__), as that will find whichever\n # versioneer.py was first imported, even in later projects.\n me = os.path.realpath(os.path.abspath(__file__))\n me_dir = os.path.normcase(os.path.splitext(me)[0])\n vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])\n if me_dir != vsr_dir:\n print(\n \"Warning: build in %s is using versioneer.py from %s\"\n % (os.path.dirname(me), versioneer_py)\n )\n except NameError:\n pass\n return root"},{"col":0,"comment":"Encode coordinates on the given dataset object into variable specific\n and global attributes.\n\n When possible, this is done according to CF conventions.\n\n Parameters\n ----------\n dataset : Dataset\n Object to encode.\n\n Returns\n -------\n variables : dict\n attrs : dict\n ","endLoc":714,"header":"def encode_dataset_coordinates(dataset)","id":1188,"name":"encode_dataset_coordinates","nodeType":"Function","startLoc":695,"text":"def encode_dataset_coordinates(dataset):\n \"\"\"Encode coordinates on the given dataset object into variable specific\n and global attributes.\n\n When possible, this is done according to CF conventions.\n\n Parameters\n ----------\n dataset : Dataset\n Object to encode.\n\n Returns\n -------\n variables : dict\n attrs : dict\n \"\"\"\n non_dim_coord_names = set(dataset.coords) - set(dataset.dims)\n return _encode_coordinates(\n dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names\n )"},{"col":4,"comment":"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : DataArray\n DataArray with stacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n\n See also\n --------\n DataArray.unstack\n ","endLoc":1722,"header":"def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable]\n ) -> \"DataArray\"","id":1189,"name":"stack","nodeType":"Function","startLoc":1674,"text":"def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable]\n ) -> \"DataArray\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : DataArray\n DataArray with stacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n\n See also\n --------\n DataArray.unstack\n \"\"\"\n ds = self._to_temp_dataset().stack(dimensions, **dimensions_kwargs)\n return self._from_temp_dataset(ds)"},{"attributeType":"null","col":10,"comment":"null","endLoc":19,"id":1190,"name":"ax2","nodeType":"Attribute","startLoc":19,"text":"ax2"},{"col":0,"comment":"null","endLoc":692,"header":"def _encode_coordinates(variables, attributes, non_dim_coord_names)","id":1191,"name":"_encode_coordinates","nodeType":"Function","startLoc":637,"text":"def _encode_coordinates(variables, attributes, non_dim_coord_names):\n # calculate global and variable specific coordinates\n non_dim_coord_names = set(non_dim_coord_names)\n\n for name in list(non_dim_coord_names):\n if isinstance(name, str) and \" \" in name:\n warnings.warn(\n \"coordinate {!r} has a space in its name, which means it \"\n \"cannot be marked as a coordinate on disk and will be \"\n \"saved as a data variable instead\".format(name),\n SerializationWarning,\n stacklevel=6,\n )\n non_dim_coord_names.discard(name)\n\n global_coordinates = non_dim_coord_names.copy()\n variable_coordinates = defaultdict(set)\n for coord_name in non_dim_coord_names:\n target_dims = variables[coord_name].dims\n for k, v in variables.items():\n if (\n k not in non_dim_coord_names\n and k not in v.dims\n and set(target_dims) <= set(v.dims)\n ):\n variable_coordinates[k].add(coord_name)\n global_coordinates.discard(coord_name)\n\n variables = {k: v.copy(deep=False) for k, v in variables.items()}\n\n # These coordinates are saved according to CF conventions\n for var_name, coord_names in variable_coordinates.items():\n attrs = variables[var_name].attrs\n if \"coordinates\" in attrs:\n raise ValueError(\n \"cannot serialize coordinates because variable \"\n \"%s already has an attribute 'coordinates'\" % var_name\n )\n attrs[\"coordinates\"] = \" \".join(map(str, coord_names))\n\n # These coordinates are not associated with any particular variables, so we\n # save them under a global 'coordinates' attribute so xarray can roundtrip\n # the dataset faithfully. Because this serialization goes beyond CF\n # conventions, only do it if necessary.\n # Reference discussion:\n # http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/057771.html\n if global_coordinates:\n attributes = dict(attributes)\n if \"coordinates\" in attributes:\n raise ValueError(\n \"cannot serialize coordinates because the global \"\n \"attribute 'coordinates' already exists\"\n )\n attributes[\"coordinates\"] = \" \".join(map(str, global_coordinates))\n\n return variables, attributes"},{"attributeType":"null","col":17,"comment":"null","endLoc":19,"id":1194,"name":"ax3","nodeType":"Attribute","startLoc":19,"text":"ax3"},{"attributeType":"null","col":22,"comment":"null","endLoc":19,"id":1195,"name":"ax4","nodeType":"Attribute","startLoc":19,"text":"ax4"},{"col":4,"comment":"Return a new DataArray whose data is given by selecting index\n labels along the specified dimension(s).\n\n .. warning::\n\n Do not try to assign values when using any of the indexing methods\n ``isel`` or ``sel``::\n\n da = xr.DataArray([0, 1, 2, 3], dims=['x'])\n # DO NOT do this\n da.isel(x=[0, 1, 2])[1] = -1\n\n Assigning values with the chained indexing using ``.sel`` or\n ``.isel`` fails silently.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n\n ","endLoc":1049,"header":"def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\"","id":1196,"name":"sel","nodeType":"Function","startLoc":1013,"text":"def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by selecting index\n labels along the specified dimension(s).\n\n .. warning::\n\n Do not try to assign values when using any of the indexing methods\n ``isel`` or ``sel``::\n\n da = xr.DataArray([0, 1, 2, 3], dims=['x'])\n # DO NOT do this\n da.isel(x=[0, 1, 2])[1] = -1\n\n Assigning values with the chained indexing using ``.sel`` or\n ``.isel`` fails silently.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n\n \"\"\"\n ds = self._to_temp_dataset().sel(\n indexers=indexers,\n drop=drop,\n method=method,\n tolerance=tolerance,\n **indexers_kwargs\n )\n return self._from_temp_dataset(ds)"},{"attributeType":"null","col":0,"comment":"null","endLoc":27,"id":1197,"name":"airc","nodeType":"Attribute","startLoc":27,"text":"airc"},{"col":4,"comment":"Returns a new dataset with each array indexed by tick labels\n along the specified dimension(s).\n\n In contrast to `Dataset.isel`, indexers for this method should use\n labels instead of integers.\n\n Under the hood, this method is powered by using pandas's powerful Index\n objects. This makes label based indexing essentially just as fast as\n using integer indexing.\n\n It also means this method uses pandas's (well documented) logic for\n indexing. This means you can use string shortcuts for datetime indexes\n (e.g., '2000-01' to select all values in January 2000). It also means\n that slices are treated as inclusive of both the start and stop values,\n unlike normal Python indexing.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by scalars, slices or arrays of tick labels. For dimensions with\n multi-index, the indexer may also be a dict-like object with keys\n matching index level names.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for inexact matches:\n\n * None (default): only exact matches\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables in `indexers` instead\n of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n variable and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n ","endLoc":2003,"header":"def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\"","id":1198,"name":"sel","nodeType":"Function","startLoc":1929,"text":"def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed by tick labels\n along the specified dimension(s).\n\n In contrast to `Dataset.isel`, indexers for this method should use\n labels instead of integers.\n\n Under the hood, this method is powered by using pandas's powerful Index\n objects. This makes label based indexing essentially just as fast as\n using integer indexing.\n\n It also means this method uses pandas's (well documented) logic for\n indexing. This means you can use string shortcuts for datetime indexes\n (e.g., '2000-01' to select all values in January 2000). It also means\n that slices are treated as inclusive of both the start and stop values,\n unlike normal Python indexing.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by scalars, slices or arrays of tick labels. For dimensions with\n multi-index, the indexer may also be a dict-like object with keys\n matching index level names.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for inexact matches:\n\n * None (default): only exact matches\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables in `indexers` instead\n of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n variable and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"sel\")\n pos_indexers, new_indexes = remap_label_indexers(\n self, indexers=indexers, method=method, tolerance=tolerance\n )\n result = self.isel(indexers=pos_indexers, drop=drop)\n return result._overwrite_indexes(new_indexes)"},{"col":0,"comment":"Read the project setup.cfg file to determine Versioneer config.","endLoc":368,"header":"def get_config_from_root(root)","id":1199,"name":"get_config_from_root","nodeType":"Function","startLoc":341,"text":"def get_config_from_root(root):\n \"\"\"Read the project setup.cfg file to determine Versioneer config.\"\"\"\n # This might raise EnvironmentError (if setup.cfg is missing), or\n # configparser.NoSectionError (if it lacks a [versioneer] section), or\n # configparser.NoOptionError (if it lacks \"VCS=\"). See the docstring at\n # the top of versioneer.py for instructions on writing your setup.cfg .\n setup_cfg = os.path.join(root, \"setup.cfg\")\n parser = configparser.SafeConfigParser()\n with open(setup_cfg, \"r\") as f:\n parser.readfp(f)\n VCS = parser.get(\"versioneer\", \"VCS\") # mandatory\n\n def get(parser, name):\n if parser.has_option(\"versioneer\", name):\n return parser.get(\"versioneer\", name)\n return None\n\n cfg = VersioneerConfig()\n cfg.VCS = VCS\n cfg.style = get(parser, \"style\") or \"\"\n cfg.versionfile_source = get(parser, \"versionfile_source\")\n cfg.versionfile_build = get(parser, \"versionfile_build\")\n cfg.tag_prefix = get(parser, \"tag_prefix\")\n if cfg.tag_prefix in (\"''\", '\"\"'):\n cfg.tag_prefix = \"\"\n cfg.parentdir_prefix = get(parser, \"parentdir_prefix\")\n cfg.verbose = get(parser, \"verbose\")\n return cfg"},{"col":0,"comment":"","endLoc":9,"header":"plot_colorbar_center.py#","id":1200,"name":"","nodeType":"Function","startLoc":2,"text":"\"\"\"\n==================\nCentered colormaps\n==================\n\nxarray's automatic colormaps choice\n\n\"\"\"\n\nds = xr.tutorial.load_dataset(\"air_temperature\")\n\nair = ds.air.isel(time=0)\n\nf, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(8, 6))\n\nair.plot(ax=ax1, cbar_kwargs={\"label\": \"K\"})\n\nax1.set_title(\"Kelvins: default\")\n\nax2.set_xlabel(\"\")\n\nairc = air - 273.15\n\nairc.plot(ax=ax2, cbar_kwargs={\"label\": \"°C\"})\n\nax2.set_title(\"Celsius: default\")\n\nax2.set_xlabel(\"\")\n\nax2.set_ylabel(\"\")\n\nair.plot(ax=ax3, center=273.15, cbar_kwargs={\"label\": \"K\"})\n\nax3.set_title(\"Kelvins: center=273.15\")\n\nairc.plot(ax=ax4, center=False, cbar_kwargs={\"label\": \"°C\"})\n\nax4.set_title(\"Celsius: center=False\")\n\nax4.set_ylabel(\"\")\n\nplt.tight_layout()\n\nplt.show()"},{"col":0,"comment":"Remap indexers from obj.coords.\n If indexer is an instance of DataArray and it has coordinate, then this coordinate\n will be attached to pos_indexers.\n\n Returns\n -------\n pos_indexers: Same type of indexers.\n np.ndarray or Variable or DataArray\n new_indexes: mapping of new dimensional-coordinate.\n ","endLoc":403,"header":"def remap_label_indexers(\n obj: Union[\"DataArray\", \"Dataset\"],\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n **indexers_kwargs: Any\n) -> Tuple[dict, dict]","id":1201,"name":"remap_label_indexers","nodeType":"Function","startLoc":365,"text":"def remap_label_indexers(\n obj: Union[\"DataArray\", \"Dataset\"],\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n **indexers_kwargs: Any\n) -> Tuple[dict, dict]: # TODO more precise return type after annotations in indexing\n \"\"\"Remap indexers from obj.coords.\n If indexer is an instance of DataArray and it has coordinate, then this coordinate\n will be attached to pos_indexers.\n\n Returns\n -------\n pos_indexers: Same type of indexers.\n np.ndarray or Variable or DataArray\n new_indexes: mapping of new dimensional-coordinate.\n \"\"\"\n from .dataarray import DataArray\n\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"remap_label_indexers\")\n\n v_indexers = {\n k: v.variable.data if isinstance(v, DataArray) else v\n for k, v in indexers.items()\n }\n\n pos_indexers, new_indexes = indexing.remap_label_indexers(\n obj, v_indexers, method=method, tolerance=tolerance\n )\n # attach indexer's coordinate to pos_indexers\n for k, v in indexers.items():\n if isinstance(v, Variable):\n pos_indexers[k] = Variable(v.dims, pos_indexers[k])\n elif isinstance(v, DataArray):\n # drop coordinates found in indexers since .sel() already\n # ensures alignments\n coords = {k: var for k, var in v._coords.items() if k not in indexers}\n pos_indexers[k] = DataArray(pos_indexers[k], coords=coords, dims=v.dims)\n return pos_indexers, new_indexes"},{"id":1203,"name":".github/ISSUE_TEMPLATE","nodeType":"Package"},{"id":1204,"name":"bug_report.md","nodeType":"TextFile","path":".github/ISSUE_TEMPLATE","text":"---\nname: Bug report / Feature request\nabout: 'Post a problem or idea'\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n#### MCVE Code Sample\n\n\n```python\n# Your code here\n\n```\n\n#### Expected Output\n\n\n#### Problem Description\n\n\n\n#### Output of ``xr.show_versions()``\n
\n# Paste the output here xr.show_versions() here\n\n
\n"},{"id":1205,"name":"README.rst","nodeType":"TextFile","path":"","text":"xarray: N-D labeled arrays and datasets\n=======================================\n\n.. image:: https://dev.azure.com/xarray/xarray/_apis/build/status/pydata.xarray?branchName=master\n :target: https://dev.azure.com/xarray/xarray/_build/latest?definitionId=1&branchName=master\n.. image:: https://codecov.io/gh/pydata/xarray/branch/master/graph/badge.svg\n :target: https://codecov.io/gh/pydata/xarray\n.. image:: https://readthedocs.org/projects/xray/badge/?version=latest\n :target: https://xarray.pydata.org/\n.. image:: https://img.shields.io/badge/benchmarked%20by-asv-green.svg?style=flat\n :target: https://pandas.pydata.org/speed/xarray/\n.. image:: https://img.shields.io/pypi/v/xarray.svg\n :target: https://pypi.python.org/pypi/xarray/\n.. image:: https://img.shields.io/badge/code%20style-black-000000.svg\n :target: https://github.com/python/black\n\n\n**xarray** (formerly **xray**) is an open source project and Python package\nthat makes working with labelled multi-dimensional arrays simple,\nefficient, and fun!\n\nXarray introduces labels in the form of dimensions, coordinates and\nattributes on top of raw NumPy_-like arrays, which allows for a more\nintuitive, more concise, and less error-prone developer experience.\nThe package includes a large and growing library of domain-agnostic functions\nfor advanced analytics and visualization with these data structures.\n\nXarray was inspired by and borrows heavily from pandas_, the popular data\nanalysis package focused on labelled tabular data.\nIt is particularly tailored to working with netCDF_ files, which were the\nsource of xarray's data model, and integrates tightly with dask_ for parallel\ncomputing.\n\n.. _NumPy: https://www.numpy.org\n.. _pandas: https://pandas.pydata.org\n.. _dask: https://dask.org\n.. _netCDF: https://www.unidata.ucar.edu/software/netcdf\n\nWhy xarray?\n-----------\n\nMulti-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called\n\"tensors\") are an essential part of computational science.\nThey are encountered in a wide range of fields, including physics, astronomy,\ngeoscience, bioinformatics, engineering, finance, and deep learning.\nIn Python, NumPy_ provides the fundamental data structure and API for\nworking with raw ND arrays.\nHowever, real-world datasets are usually more than just raw numbers;\nthey have labels which encode information about how the array values map\nto locations in space, time, etc.\n\nXarray doesn't just keep track of labels on arrays -- it uses them to provide a\npowerful and concise interface. For example:\n\n- Apply operations over dimensions by name: ``x.sum('time')``.\n- Select values by label instead of integer location:\n ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``.\n- Mathematical operations (e.g., ``x - y``) vectorize across multiple\n dimensions (array broadcasting) based on dimension names, not shape.\n- Flexible split-apply-combine operations with groupby:\n ``x.groupby('time.dayofyear').mean()``.\n- Database like alignment based on coordinate labels that smoothly\n handles missing values: ``x, y = xr.align(x, y, join='outer')``.\n- Keep track of arbitrary metadata in the form of a Python dictionary:\n ``x.attrs``.\n\nDocumentation\n-------------\n\nLearn more about xarray in its official documentation at https://xarray.pydata.org/\n\nContributing\n------------\n\nYou can find information about contributing to xarray at our `Contributing page `_.\n\nGet in touch\n------------\n\n- Ask usage questions (\"How do I?\") on `StackOverflow`_.\n- Report bugs, suggest features or view the source code `on GitHub`_.\n- For less well defined questions or ideas, or to announce other projects of\n interest to xarray users, use the `mailing list`_.\n\n.. _StackOverFlow: https://stackoverflow.com/questions/tagged/python-xarray\n.. _mailing list: https://groups.google.com/forum/#!forum/xarray\n.. _on GitHub: https://github.com/pydata/xarray\n\nNumFOCUS\n--------\n\n.. image:: https://numfocus.org/wp-content/uploads/2017/07/NumFocus_LRG.png\n :scale: 25 %\n :target: https://numfocus.org/\n\nXarray is a fiscally sponsored project of NumFOCUS_, a nonprofit dedicated\nto supporting the open source scientific computing community. If you like\nXarray and want to support our mission, please consider making a donation_\nto support our efforts.\n\n.. _donation: https://numfocus.salsalabs.org/donate-to-xarray/\n\nHistory\n-------\n\nxarray is an evolution of an internal tool developed at `The Climate\nCorporation`__. It was originally written by Climate Corp researchers Stephan\nHoyer, Alex Kleeman and Eugene Brevdo and was released as open source in\nMay 2014. The project was renamed from \"xray\" in January 2016. Xarray became a\nfiscally sponsored project of NumFOCUS_ in August 2018.\n\n__ http://climate.com/\n.. _NumFOCUS: https://numfocus.org\n\nLicense\n-------\n\nCopyright 2014-2019, xarray Developers\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nxarray bundles portions of pandas, NumPy and Seaborn, all of which are available\nunder a \"3-clause BSD\" license:\n- pandas: setup.py, xarray/util/print_versions.py\n- NumPy: xarray/core/npcompat.py\n- Seaborn: _determine_cmap_params in xarray/core/plot/utils.py\n\nxarray also bundles portions of CPython, which is available under the \"Python\nSoftware Foundation License\" in xarray/core/pycompat.py.\n\nThe full text of these licenses are included in the licenses directory.\n"},{"col":0,"comment":"Given an xarray data object and label based indexers, return a mapping\n of equivalent location based indexers. Also return a mapping of updated\n pandas index objects (in case of multi-index level drop).\n ","endLoc":266,"header":"def remap_label_indexers(data_obj, indexers, method=None, tolerance=None)","id":1206,"name":"remap_label_indexers","nodeType":"Function","startLoc":234,"text":"def remap_label_indexers(data_obj, indexers, method=None, tolerance=None):\n \"\"\"Given an xarray data object and label based indexers, return a mapping\n of equivalent location based indexers. Also return a mapping of updated\n pandas index objects (in case of multi-index level drop).\n \"\"\"\n if method is not None and not isinstance(method, str):\n raise TypeError(\"``method`` must be a string\")\n\n pos_indexers = {}\n new_indexes = {}\n\n dim_indexers = get_dim_indexers(data_obj, indexers)\n for dim, label in dim_indexers.items():\n try:\n index = data_obj.indexes[dim]\n except KeyError:\n # no index for this dimension: reuse the provided labels\n if method is not None or tolerance is not None:\n raise ValueError(\n \"cannot supply ``method`` or ``tolerance`` \"\n \"when the indexed dimension does not have \"\n \"an associated coordinate.\"\n )\n pos_indexers[dim] = label\n else:\n coords_dtype = data_obj.coords[dim].dtype\n label = maybe_cast_to_coords_dtype(label, coords_dtype)\n idxr, new_idx = convert_label_indexer(index, label, dim, method, tolerance)\n pos_indexers[dim] = idxr\n if new_idx is not None:\n new_indexes[dim] = new_idx\n\n return pos_indexers, new_indexes"},{"col":0,"comment":"Given a xarray data object and label based indexers, return a mapping\n of label indexers with only dimension names as keys.\n\n It groups multiple level indexers given on a multi-index dimension\n into a single, dictionary indexer for that dimension (Raise a ValueError\n if it is not possible).\n ","endLoc":231,"header":"def get_dim_indexers(data_obj, indexers)","id":1207,"name":"get_dim_indexers","nodeType":"Function","startLoc":197,"text":"def get_dim_indexers(data_obj, indexers):\n \"\"\"Given a xarray data object and label based indexers, return a mapping\n of label indexers with only dimension names as keys.\n\n It groups multiple level indexers given on a multi-index dimension\n into a single, dictionary indexer for that dimension (Raise a ValueError\n if it is not possible).\n \"\"\"\n invalid = [\n k\n for k in indexers\n if k not in data_obj.dims and k not in data_obj._level_coords\n ]\n if invalid:\n raise ValueError(\"dimensions or multi-index levels %r do not exist\" % invalid)\n\n level_indexers = defaultdict(dict)\n dim_indexers = {}\n for key, label in indexers.items():\n dim, = data_obj[key].dims\n if key != dim:\n # assume here multi-index level indexer\n level_indexers[dim][key] = label\n else:\n dim_indexers[key] = label\n\n for dim, level_labels in level_indexers.items():\n if dim_indexers.get(dim, False):\n raise ValueError(\n \"cannot combine multi-index level indexers \"\n \"with an indexer for dimension %s\" % dim\n )\n dim_indexers[dim] = level_labels\n\n return dim_indexers"},{"col":4,"comment":"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Dataset\n Dataset with stacked data.\n\n See also\n --------\n Dataset.unstack\n ","endLoc":3204,"header":"def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable],\n ) -> \"Dataset\"","id":1208,"name":"stack","nodeType":"Function","startLoc":3171,"text":"def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable],\n ) -> \"Dataset\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Dataset\n Dataset with stacked data.\n\n See also\n --------\n Dataset.unstack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"stack\")\n result = self\n for new_dim, dims in dimensions.items():\n result = result._stack_once(dims, new_dim)\n return result"},{"col":0,"comment":"Decorator to mark a method as the handler for a particular VCS.","endLoc":390,"header":"def register_vcs_handler(vcs, method)","id":1209,"name":"register_vcs_handler","nodeType":"Function","startLoc":380,"text":"def register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n\n return decorate"},{"col":0,"comment":"Call the given command(s).","endLoc":429,"header":"def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None)","id":1210,"name":"run_command","nodeType":"Function","startLoc":393,"text":"def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen(\n [c] + args,\n cwd=cwd,\n env=env,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr else None),\n )\n break\n except OSError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None, None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None, None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n print(\"stdout was %s\" % stdout)\n return None, p.returncode\n return stdout, p.returncode"},{"fileName":"dataset_io.py","filePath":"asv_bench/benchmarks","id":1211,"nodeType":"File","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\nimport xarray as xr\n\nfrom . import randint, randn, requires_dask\n\ntry:\n import dask\n import dask.multiprocessing\nexcept ImportError:\n pass\n\n\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n\n\nclass IOSingleNetCDF:\n \"\"\"\n A few examples that benchmark reading/writing a single netCDF file with\n xarray\n \"\"\"\n\n timeout = 300.0\n repeat = 1\n number = 5\n\n def make_ds(self):\n\n # single Dataset\n self.ds = xr.Dataset()\n self.nt = 1000\n self.nx = 90\n self.ny = 45\n\n self.block_chunks = {\n \"time\": self.nt / 4,\n \"lon\": self.nx / 3,\n \"lat\": self.ny / 3,\n }\n\n self.time_chunks = {\"time\": int(self.nt / 36)}\n\n times = pd.date_range(\"1970-01-01\", periods=self.nt, freq=\"D\")\n lons = xr.DataArray(\n np.linspace(0, 360, self.nx),\n dims=(\"lon\",),\n attrs={\"units\": \"degrees east\", \"long_name\": \"longitude\"},\n )\n lats = xr.DataArray(\n np.linspace(-90, 90, self.ny),\n dims=(\"lat\",),\n attrs={\"units\": \"degrees north\", \"long_name\": \"latitude\"},\n )\n self.ds[\"foo\"] = xr.DataArray(\n randn((self.nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"foo\",\n encoding=None,\n attrs={\"units\": \"foo units\", \"description\": \"a description\"},\n )\n self.ds[\"bar\"] = xr.DataArray(\n randn((self.nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"bar\",\n encoding=None,\n attrs={\"units\": \"bar units\", \"description\": \"a description\"},\n )\n self.ds[\"baz\"] = xr.DataArray(\n randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),\n coords={\"lon\": lons, \"lat\": lats},\n dims=(\"lon\", \"lat\"),\n name=\"baz\",\n encoding=None,\n attrs={\"units\": \"baz units\", \"description\": \"a description\"},\n )\n\n self.ds.attrs = {\"history\": \"created for xarray benchmarking\"}\n\n self.oinds = {\n \"time\": randint(0, self.nt, 120),\n \"lon\": randint(0, self.nx, 20),\n \"lat\": randint(0, self.ny, 10),\n }\n self.vinds = {\n \"time\": xr.DataArray(randint(0, self.nt, 120), dims=\"x\"),\n \"lon\": xr.DataArray(randint(0, self.nx, 120), dims=\"x\"),\n \"lat\": slice(3, 20),\n }\n\n\nclass IOWriteSingleNetCDF3(IOSingleNetCDF):\n def setup(self):\n self.format = \"NETCDF3_64BIT\"\n self.make_ds()\n\n def time_write_dataset_netcdf4(self):\n self.ds.to_netcdf(\"test_netcdf4_write.nc\", engine=\"netcdf4\", format=self.format)\n\n def time_write_dataset_scipy(self):\n self.ds.to_netcdf(\"test_scipy_write.nc\", engine=\"scipy\", format=self.format)\n\n\nclass IOReadSingleNetCDF4(IOSingleNetCDF):\n def setup(self):\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc4.nc\"\n self.format = \"NETCDF4\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_netcdf4(self):\n xr.open_dataset(self.filepath, engine=\"netcdf4\").load()\n\n def time_orthogonal_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\")\n ds = ds.isel(**self.oinds).load()\n\n def time_vectorized_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\")\n ds = ds.isel(**self.vinds).load()\n\n\nclass IOReadSingleNetCDF3(IOReadSingleNetCDF4):\n def setup(self):\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc3.nc\"\n self.format = \"NETCDF3_64BIT\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_scipy(self):\n xr.open_dataset(self.filepath, engine=\"scipy\").load()\n\n def time_orthogonal_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\")\n ds = ds.isel(**self.oinds).load()\n\n def time_vectorized_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\")\n ds = ds.isel(**self.vinds).load()\n\n\nclass IOReadSingleNetCDF4Dask(IOSingleNetCDF):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc4.nc\"\n self.format = \"NETCDF4\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_netcdf4_with_block_chunks(self):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_oindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.block_chunks)\n ds = ds.isel(**self.oinds).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_vindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.block_chunks)\n ds = ds.isel(**self.vinds).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_time_chunks(self):\n xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.time_chunks).load()\n\n def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()\n\n\nclass IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc3.nc\"\n self.format = \"NETCDF3_64BIT\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"scipy\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_scipy_with_block_chunks_oindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\", chunks=self.block_chunks)\n ds = ds.isel(**self.oinds).load()\n\n def time_load_dataset_scipy_with_block_chunks_vindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\", chunks=self.block_chunks)\n ds = ds.isel(**self.vinds).load()\n\n def time_load_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"scipy\", chunks=self.time_chunks\n ).load()\n\n\nclass IOMultipleNetCDF:\n \"\"\"\n A few examples that benchmark reading/writing multiple netCDF files with\n xarray\n \"\"\"\n\n timeout = 300.0\n repeat = 1\n number = 5\n\n def make_ds(self, nfiles=10):\n\n # multiple Dataset\n self.ds = xr.Dataset()\n self.nt = 1000\n self.nx = 90\n self.ny = 45\n self.nfiles = nfiles\n\n self.block_chunks = {\n \"time\": self.nt / 4,\n \"lon\": self.nx / 3,\n \"lat\": self.ny / 3,\n }\n\n self.time_chunks = {\"time\": int(self.nt / 36)}\n\n self.time_vars = np.split(\n pd.date_range(\"1970-01-01\", periods=self.nt, freq=\"D\"), self.nfiles\n )\n\n self.ds_list = []\n self.filenames_list = []\n for i, times in enumerate(self.time_vars):\n ds = xr.Dataset()\n nt = len(times)\n lons = xr.DataArray(\n np.linspace(0, 360, self.nx),\n dims=(\"lon\",),\n attrs={\"units\": \"degrees east\", \"long_name\": \"longitude\"},\n )\n lats = xr.DataArray(\n np.linspace(-90, 90, self.ny),\n dims=(\"lat\",),\n attrs={\"units\": \"degrees north\", \"long_name\": \"latitude\"},\n )\n ds[\"foo\"] = xr.DataArray(\n randn((nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"foo\",\n encoding=None,\n attrs={\"units\": \"foo units\", \"description\": \"a description\"},\n )\n ds[\"bar\"] = xr.DataArray(\n randn((nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"bar\",\n encoding=None,\n attrs={\"units\": \"bar units\", \"description\": \"a description\"},\n )\n ds[\"baz\"] = xr.DataArray(\n randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),\n coords={\"lon\": lons, \"lat\": lats},\n dims=(\"lon\", \"lat\"),\n name=\"baz\",\n encoding=None,\n attrs={\"units\": \"baz units\", \"description\": \"a description\"},\n )\n\n ds.attrs = {\"history\": \"created for xarray benchmarking\"}\n\n self.ds_list.append(ds)\n self.filenames_list.append(\"test_netcdf_%i.nc\" % i)\n\n\nclass IOWriteMultipleNetCDF3(IOMultipleNetCDF):\n def setup(self):\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n\n def time_write_dataset_netcdf4(self):\n xr.save_mfdataset(\n self.ds_list, self.filenames_list, engine=\"netcdf4\", format=self.format\n )\n\n def time_write_dataset_scipy(self):\n xr.save_mfdataset(\n self.ds_list, self.filenames_list, engine=\"scipy\", format=self.format\n )\n\n\nclass IOReadMultipleNetCDF4(IOMultipleNetCDF):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF4\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_netcdf4(self):\n xr.open_mfdataset(self.filenames_list, engine=\"netcdf4\").load()\n\n def time_open_dataset_netcdf4(self):\n xr.open_mfdataset(self.filenames_list, engine=\"netcdf4\")\n\n\nclass IOReadMultipleNetCDF3(IOReadMultipleNetCDF4):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_scipy(self):\n xr.open_mfdataset(self.filenames_list, engine=\"scipy\").load()\n\n def time_open_dataset_scipy(self):\n xr.open_mfdataset(self.filenames_list, engine=\"scipy\")\n\n\nclass IOReadMultipleNetCDF4Dask(IOMultipleNetCDF):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF4\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_netcdf4_with_block_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_time_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()\n\n def time_open_dataset_netcdf4_with_block_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n )\n\n def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n )\n\n def time_open_dataset_netcdf4_with_time_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n )\n\n def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n )\n\n\nclass IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.time_chunks\n ).load()\n\n def time_open_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.block_chunks\n )\n\n def time_open_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.time_chunks\n )\n\n\ndef create_delayed_write():\n import dask.array as da\n\n vals = da.random.random(300, chunks=(1,))\n ds = xr.Dataset({\"vals\": ([\"a\"], vals)})\n return ds.to_netcdf(\"file.nc\", engine=\"netcdf4\", compute=False)\n\n\nclass IOWriteNetCDFDask:\n timeout = 60\n repeat = 1\n number = 5\n\n def setup(self):\n requires_dask()\n self.write = create_delayed_write()\n\n def time_write(self):\n self.write.compute()\n\n\nclass IOWriteNetCDFDaskDistributed:\n def setup(self):\n try:\n import distributed\n except ImportError:\n raise NotImplementedError()\n self.client = distributed.Client()\n self.write = create_delayed_write()\n\n def cleanup(self):\n self.client.shutdown()\n\n def time_write(self):\n self.write.compute()\n"},{"col":4,"comment":"Write dataset contents to a netCDF file.\n\n Parameters\n ----------\n path : str, Path or file-like object, optional\n Path to which to save this dataset. File-like objects are only\n supported by the scipy engine. If no path is provided, this\n function returns the resulting netCDF file as bytes; in this case,\n we need to use scipy, which does not support netCDF version 4 (the\n default format becomes NETCDF3_64BIT).\n mode : {'w', 'a'}, optional\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n this location will be overwritten. If mode='a', existing variables\n will be overwritten.\n format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',\n 'NETCDF3_CLASSIC'}, optional\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n format='NETCDF4'). The group(s) will be created if necessary.\n engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,\n 'zlib': True}, ...}``\n\n The `h5netcdf` engine supports both the NetCDF4-style compression\n encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py\n ones ``{'compression': 'gzip', 'compression_opts': 9}``.\n This allows using any compression plugin installed in the HDF5\n library, e.g. LZF.\n\n unlimited_dims : iterable of hashable, optional\n Dimension(s) that should be serialized as unlimited dimensions.\n By default, no dimensions are treated as unlimited dimensions.\n Note that unlimited_dims may also be set via\n ``dataset.encoding['unlimited_dims']``.\n compute: boolean\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n invalid_netcdf: boolean\n Only valid along with engine='h5netcdf'. If True, allow writing\n hdf5 files which are valid netcdf as described in\n https://github.com/shoyer/h5netcdf. Default: False.\n ","endLoc":1537,"header":"def to_netcdf(\n self,\n path=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n invalid_netcdf: bool = False,\n ) -> Union[bytes, \"Delayed\", None]","id":1212,"name":"to_netcdf","nodeType":"Function","startLoc":1443,"text":"def to_netcdf(\n self,\n path=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n invalid_netcdf: bool = False,\n ) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write dataset contents to a netCDF file.\n\n Parameters\n ----------\n path : str, Path or file-like object, optional\n Path to which to save this dataset. File-like objects are only\n supported by the scipy engine. If no path is provided, this\n function returns the resulting netCDF file as bytes; in this case,\n we need to use scipy, which does not support netCDF version 4 (the\n default format becomes NETCDF3_64BIT).\n mode : {'w', 'a'}, optional\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n this location will be overwritten. If mode='a', existing variables\n will be overwritten.\n format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',\n 'NETCDF3_CLASSIC'}, optional\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n format='NETCDF4'). The group(s) will be created if necessary.\n engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,\n 'zlib': True}, ...}``\n\n The `h5netcdf` engine supports both the NetCDF4-style compression\n encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py\n ones ``{'compression': 'gzip', 'compression_opts': 9}``.\n This allows using any compression plugin installed in the HDF5\n library, e.g. LZF.\n\n unlimited_dims : iterable of hashable, optional\n Dimension(s) that should be serialized as unlimited dimensions.\n By default, no dimensions are treated as unlimited dimensions.\n Note that unlimited_dims may also be set via\n ``dataset.encoding['unlimited_dims']``.\n compute: boolean\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n invalid_netcdf: boolean\n Only valid along with engine='h5netcdf'. If True, allow writing\n hdf5 files which are valid netcdf as described in\n https://github.com/shoyer/h5netcdf. Default: False.\n \"\"\"\n if encoding is None:\n encoding = {}\n from ..backends.api import to_netcdf\n\n return to_netcdf(\n self,\n path,\n mode,\n format=format,\n group=group,\n engine=engine,\n encoding=encoding,\n unlimited_dims=unlimited_dims,\n compute=compute,\n invalid_netcdf=invalid_netcdf,\n )"},{"col":4,"comment":"null","endLoc":3169,"header":"def _stack_once(self, dims, new_dim)","id":1213,"name":"_stack_once","nodeType":"Function","startLoc":3143,"text":"def _stack_once(self, dims, new_dim):\n variables = {}\n for name, var in self.variables.items():\n if name not in dims:\n if any(d in var.dims for d in dims):\n add_dims = [d for d in dims if d not in var.dims]\n vdims = list(var.dims) + add_dims\n shape = [self.dims[d] for d in vdims]\n exp_var = var.set_dims(vdims, shape)\n stacked_var = exp_var.stack(**{new_dim: dims})\n variables[name] = stacked_var\n else:\n variables[name] = var.copy(deep=False)\n\n # consider dropping levels that are unused?\n levels = [self.get_index(dim) for dim in dims]\n idx = utils.multiindex_from_product_levels(levels, names=dims)\n variables[new_dim] = IndexVariable(new_dim, idx)\n\n coord_names = set(self._coord_names) - set(dims) | {new_dim}\n\n indexes = {k: v for k, v in self.indexes.items() if k not in dims}\n indexes[new_dim] = idx\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )"},{"col":0,"comment":"Extract version information from the given file.","endLoc":983,"header":"@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs)","id":1214,"name":"git_get_keywords","nodeType":"Function","startLoc":957,"text":"@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n if line.strip().startswith(\"git_date =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"date\"] = mo.group(1)\n f.close()\n except OSError:\n pass\n return keywords"},{"col":0,"comment":"This function creates an appropriate datastore for writing a dataset to\n disk as a netCDF file\n\n See `Dataset.to_netcdf` for full API docs.\n\n The ``multifile`` argument is only for the private use of save_mfdataset.\n ","endLoc":1092,"header":"def to_netcdf(\n dataset: Dataset,\n path_or_file=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n multifile: bool = False,\n invalid_netcdf: bool = False,\n) -> Union[Tuple[ArrayWriter, AbstractDataStore], bytes, \"Delayed\", None]","id":1215,"name":"to_netcdf","nodeType":"Function","startLoc":973,"text":"def to_netcdf(\n dataset: Dataset,\n path_or_file=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n multifile: bool = False,\n invalid_netcdf: bool = False,\n) -> Union[Tuple[ArrayWriter, AbstractDataStore], bytes, \"Delayed\", None]:\n \"\"\"This function creates an appropriate datastore for writing a dataset to\n disk as a netCDF file\n\n See `Dataset.to_netcdf` for full API docs.\n\n The ``multifile`` argument is only for the private use of save_mfdataset.\n \"\"\"\n if isinstance(path_or_file, Path):\n path_or_file = str(path_or_file)\n\n if encoding is None:\n encoding = {}\n\n if path_or_file is None:\n if engine is None:\n engine = \"scipy\"\n elif engine != \"scipy\":\n raise ValueError(\n \"invalid engine for creating bytes with \"\n \"to_netcdf: %r. Only the default engine \"\n \"or engine='scipy' is supported\" % engine\n )\n if not compute:\n raise NotImplementedError(\n \"to_netcdf() with compute=False is not yet implemented when \"\n \"returning bytes\"\n )\n elif isinstance(path_or_file, str):\n if engine is None:\n engine = _get_default_engine(path_or_file)\n path_or_file = _normalize_path(path_or_file)\n else: # file-like object\n engine = \"scipy\"\n\n # validate Dataset keys, DataArray names, and attr keys/values\n _validate_dataset_names(dataset)\n _validate_attrs(dataset)\n\n try:\n store_open = WRITEABLE_STORES[engine]\n except KeyError:\n raise ValueError(\"unrecognized engine for to_netcdf: %r\" % engine)\n\n if format is not None:\n format = format.upper()\n\n # handle scheduler specific logic\n scheduler = _get_scheduler()\n have_chunks = any(v.chunks for v in dataset.variables.values())\n\n autoclose = have_chunks and scheduler in [\"distributed\", \"multiprocessing\"]\n if autoclose and engine == \"scipy\":\n raise NotImplementedError(\n \"Writing netCDF files with the %s backend \"\n \"is not currently supported with dask's %s \"\n \"scheduler\" % (engine, scheduler)\n )\n\n target = path_or_file if path_or_file is not None else BytesIO()\n kwargs = dict(autoclose=True) if autoclose else {}\n if invalid_netcdf:\n if engine == \"h5netcdf\":\n kwargs[\"invalid_netcdf\"] = invalid_netcdf\n else:\n raise ValueError(\n \"unrecognized option 'invalid_netcdf' for engine %s\" % engine\n )\n store = store_open(target, mode, format, group, **kwargs)\n\n if unlimited_dims is None:\n unlimited_dims = dataset.encoding.get(\"unlimited_dims\", None)\n if unlimited_dims is not None:\n if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable):\n unlimited_dims = [unlimited_dims]\n else:\n unlimited_dims = list(unlimited_dims)\n\n writer = ArrayWriter()\n\n # TODO: figure out how to refactor this logic (here and in save_mfdataset)\n # to avoid this mess of conditionals\n try:\n # TODO: allow this work (setting up the file for writing array data)\n # to be parallelized with dask\n dump_to_store(\n dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims\n )\n if autoclose:\n store.close()\n\n if multifile:\n return writer, store\n\n writes = writer.sync(compute=compute)\n\n if path_or_file is None:\n store.sync()\n return target.getvalue()\n finally:\n if not multifile and compute:\n store.close()\n\n if not compute:\n import dask\n\n return dask.delayed(_finalize_store)(writes, store)\n return None"},{"col":0,"comment":"null","endLoc":48,"header":"def randint(low, high=None, size=None, frac_minus=None, seed=0)","id":1216,"name":"randint","nodeType":"Function","startLoc":41,"text":"def randint(low, high=None, size=None, frac_minus=None, seed=0):\n rng = np.random.RandomState(seed)\n x = rng.randint(low, high, size)\n if frac_minus is not None:\n inds = rng.choice(range(x.size), int(x.size * frac_minus))\n x.flat[inds] = -1\n\n return x"},{"col":0,"comment":"\n Scatter Dataset data variables against each other.\n ","endLoc":453,"header":"@_dsplot\ndef scatter(ds, x, y, ax, **kwargs)","id":1218,"name":"scatter","nodeType":"Function","startLoc":404,"text":"@_dsplot\ndef scatter(ds, x, y, ax, **kwargs):\n \"\"\"\n Scatter Dataset data variables against each other.\n \"\"\"\n\n if \"add_colorbar\" in kwargs or \"add_legend\" in kwargs:\n raise ValueError(\n \"Dataset.plot.scatter does not accept \"\n \"'add_colorbar' or 'add_legend'. \"\n \"Use 'add_guide' instead.\"\n )\n\n cmap_params = kwargs.pop(\"cmap_params\")\n hue = kwargs.pop(\"hue\")\n hue_style = kwargs.pop(\"hue_style\")\n markersize = kwargs.pop(\"markersize\", None)\n size_norm = kwargs.pop(\"size_norm\", None)\n size_mapping = kwargs.pop(\"size_mapping\", None) # set by facetgrid\n\n # need to infer size_mapping with full dataset\n data = _infer_scatter_data(ds, x, y, hue, markersize, size_norm, size_mapping)\n\n if hue_style == \"discrete\":\n primitive = []\n for label in np.unique(data[\"hue\"].values):\n mask = data[\"hue\"] == label\n if data[\"sizes\"] is not None:\n kwargs.update(s=data[\"sizes\"].where(mask, drop=True).values.flatten())\n\n primitive.append(\n ax.scatter(\n data[\"x\"].where(mask, drop=True).values.flatten(),\n data[\"y\"].where(mask, drop=True).values.flatten(),\n label=label,\n **kwargs\n )\n )\n\n elif hue is None or hue_style == \"continuous\":\n if data[\"sizes\"] is not None:\n kwargs.update(s=data[\"sizes\"].values.ravel())\n if data[\"hue\"] is not None:\n kwargs.update(c=data[\"hue\"].values.ravel())\n\n primitive = ax.scatter(\n data[\"x\"].values.ravel(), data[\"y\"].values.ravel(), **cmap_params, **kwargs\n )\n\n return primitive"},{"col":0,"comment":"null","endLoc":38,"header":"def randn(shape, frac_nan=None, chunks=None, seed=0)","id":1219,"name":"randn","nodeType":"Function","startLoc":24,"text":"def randn(shape, frac_nan=None, chunks=None, seed=0):\n rng = np.random.RandomState(seed)\n if chunks is None:\n x = rng.standard_normal(shape)\n else:\n import dask.array as da\n\n rng = da.random.RandomState(seed)\n x = rng.standard_normal(shape, chunks=chunks)\n\n if frac_nan is not None:\n inds = rng.choice(range(x.size), int(x.size * frac_nan))\n x.flat[inds] = np.nan\n\n return x"},{"col":0,"comment":"Given a pandas.Index and labels (e.g., from __getitem__) for one\n dimension, return an indexer suitable for indexing an ndarray along that\n dimension. If `index` is a pandas.MultiIndex and depending on `label`,\n return a new pandas.Index or pandas.MultiIndex (otherwise return None).\n ","endLoc":194,"header":"def convert_label_indexer(index, label, index_name=\"\", method=None, tolerance=None)","id":1220,"name":"convert_label_indexer","nodeType":"Function","startLoc":109,"text":"def convert_label_indexer(index, label, index_name=\"\", method=None, tolerance=None):\n \"\"\"Given a pandas.Index and labels (e.g., from __getitem__) for one\n dimension, return an indexer suitable for indexing an ndarray along that\n dimension. If `index` is a pandas.MultiIndex and depending on `label`,\n return a new pandas.Index or pandas.MultiIndex (otherwise return None).\n \"\"\"\n new_index = None\n\n if isinstance(label, slice):\n if method is not None or tolerance is not None:\n raise NotImplementedError(\n \"cannot use ``method`` argument if any indexers are \" \"slice objects\"\n )\n indexer = index.slice_indexer(\n _sanitize_slice_element(label.start),\n _sanitize_slice_element(label.stop),\n _sanitize_slice_element(label.step),\n )\n if not isinstance(indexer, slice):\n # unlike pandas, in xarray we never want to silently convert a\n # slice indexer into an array indexer\n raise KeyError(\n \"cannot represent labeled-based slice indexer for \"\n \"dimension %r with a slice over integer positions; \"\n \"the index is unsorted or non-unique\" % index_name\n )\n\n elif is_dict_like(label):\n is_nested_vals = _is_nested_tuple(tuple(label.values()))\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\n \"cannot use a dict-like object for selection on \"\n \"a dimension that does not have a MultiIndex\"\n )\n elif len(label) == index.nlevels and not is_nested_vals:\n indexer = index.get_loc(tuple(label[k] for k in index.names))\n else:\n for k, v in label.items():\n # index should be an item (i.e. Hashable) not an array-like\n if isinstance(v, Sequence) and not isinstance(v, str):\n raise ValueError(\n \"Vectorized selection is not \"\n \"available along level variable: \" + k\n )\n indexer, new_index = index.get_loc_level(\n tuple(label.values()), level=tuple(label.keys())\n )\n\n # GH2619. Raise a KeyError if nothing is chosen\n if indexer.dtype.kind == \"b\" and indexer.sum() == 0:\n raise KeyError(\"{} not found\".format(label))\n\n elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):\n if _is_nested_tuple(label):\n indexer = index.get_locs(label)\n elif len(label) == index.nlevels:\n indexer = index.get_loc(label)\n else:\n indexer, new_index = index.get_loc_level(\n label, level=list(range(len(label)))\n )\n else:\n label = (\n label\n if getattr(label, \"ndim\", 1) > 1 # vectorized-indexing\n else _asarray_tuplesafe(label)\n )\n if label.ndim == 0:\n if isinstance(index, pd.MultiIndex):\n indexer, new_index = index.get_loc_level(label.item(), level=0)\n else:\n indexer = index.get_loc(\n label.item(), method=method, tolerance=tolerance\n )\n elif label.dtype.kind == \"b\":\n indexer = label\n else:\n if isinstance(index, pd.MultiIndex) and label.ndim > 1:\n raise ValueError(\n \"Vectorized selection is not available along \"\n \"MultiIndex variable: \" + index_name\n )\n indexer = get_indexer_nd(index, label, method, tolerance)\n if np.any(indexer < 0):\n raise KeyError(\"not all values found in index %r\" % index_name)\n return indexer, new_index"},{"col":0,"comment":"null","endLoc":72,"header":"def _sanitize_slice_element(x)","id":1221,"name":"_sanitize_slice_element","nodeType":"Function","startLoc":52,"text":"def _sanitize_slice_element(x):\n from .variable import Variable\n from .dataarray import DataArray\n\n if isinstance(x, (Variable, DataArray)):\n x = x.values\n\n if isinstance(x, np.ndarray):\n if x.ndim != 0:\n raise ValueError(\n \"cannot use non-scalar arrays in a slice for \"\n \"xarray indexing: {}\".format(x)\n )\n x = x[()]\n\n if isinstance(x, np.timedelta64):\n # pandas does not support indexing with np.timedelta64 yet:\n # https://github.com/pandas-dev/pandas/issues/20393\n x = pd.Timedelta(x)\n\n return x"},{"col":0,"comment":"DataArray.name and Dataset keys must be a string or None","endLoc":179,"header":"def _validate_dataset_names(dataset)","id":1222,"name":"_validate_dataset_names","nodeType":"Function","startLoc":161,"text":"def _validate_dataset_names(dataset):\n \"\"\"DataArray.name and Dataset keys must be a string or None\"\"\"\n\n def check_name(name):\n if isinstance(name, str):\n if not name:\n raise ValueError(\n \"Invalid name for DataArray or Dataset key: \"\n \"string must be length 1 or greater for \"\n \"serialization to netCDF files\"\n )\n elif name is not None:\n raise TypeError(\n \"DataArray.name or Dataset key must be either a \"\n \"string or None for serialization to netCDF files\"\n )\n\n for k in dataset.variables:\n check_name(k)"},{"className":"IOSingleNetCDF","col":0,"comment":"\n A few examples that benchmark reading/writing a single netCDF file with\n xarray\n ","endLoc":93,"id":1224,"nodeType":"Class","startLoc":20,"text":"class IOSingleNetCDF:\n \"\"\"\n A few examples that benchmark reading/writing a single netCDF file with\n xarray\n \"\"\"\n\n timeout = 300.0\n repeat = 1\n number = 5\n\n def make_ds(self):\n\n # single Dataset\n self.ds = xr.Dataset()\n self.nt = 1000\n self.nx = 90\n self.ny = 45\n\n self.block_chunks = {\n \"time\": self.nt / 4,\n \"lon\": self.nx / 3,\n \"lat\": self.ny / 3,\n }\n\n self.time_chunks = {\"time\": int(self.nt / 36)}\n\n times = pd.date_range(\"1970-01-01\", periods=self.nt, freq=\"D\")\n lons = xr.DataArray(\n np.linspace(0, 360, self.nx),\n dims=(\"lon\",),\n attrs={\"units\": \"degrees east\", \"long_name\": \"longitude\"},\n )\n lats = xr.DataArray(\n np.linspace(-90, 90, self.ny),\n dims=(\"lat\",),\n attrs={\"units\": \"degrees north\", \"long_name\": \"latitude\"},\n )\n self.ds[\"foo\"] = xr.DataArray(\n randn((self.nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"foo\",\n encoding=None,\n attrs={\"units\": \"foo units\", \"description\": \"a description\"},\n )\n self.ds[\"bar\"] = xr.DataArray(\n randn((self.nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"bar\",\n encoding=None,\n attrs={\"units\": \"bar units\", \"description\": \"a description\"},\n )\n self.ds[\"baz\"] = xr.DataArray(\n randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),\n coords={\"lon\": lons, \"lat\": lats},\n dims=(\"lon\", \"lat\"),\n name=\"baz\",\n encoding=None,\n attrs={\"units\": \"baz units\", \"description\": \"a description\"},\n )\n\n self.ds.attrs = {\"history\": \"created for xarray benchmarking\"}\n\n self.oinds = {\n \"time\": randint(0, self.nt, 120),\n \"lon\": randint(0, self.nx, 20),\n \"lat\": randint(0, self.ny, 10),\n }\n self.vinds = {\n \"time\": xr.DataArray(randint(0, self.nt, 120), dims=\"x\"),\n \"lon\": xr.DataArray(randint(0, self.nx, 120), dims=\"x\"),\n \"lat\": slice(3, 20),\n }"},{"col":0,"comment":"`attrs` must have a string key and a value which is either: a number,\n a string, an ndarray or a list/tuple of numbers/strings.\n ","endLoc":216,"header":"def _validate_attrs(dataset)","id":1225,"name":"_validate_attrs","nodeType":"Function","startLoc":182,"text":"def _validate_attrs(dataset):\n \"\"\"`attrs` must have a string key and a value which is either: a number,\n a string, an ndarray or a list/tuple of numbers/strings.\n \"\"\"\n\n def check_attr(name, value):\n if isinstance(name, str):\n if not name:\n raise ValueError(\n \"Invalid name for attr: string must be \"\n \"length 1 or greater for serialization to \"\n \"netCDF files\"\n )\n else:\n raise TypeError(\n \"Invalid name for attr: {} must be a string for \"\n \"serialization to netCDF files\".format(name)\n )\n\n if not isinstance(value, (str, Number, np.ndarray, np.number, list, tuple)):\n raise TypeError(\n \"Invalid value for attr: {} must be a number, \"\n \"a string, an ndarray or a list/tuple of \"\n \"numbers/strings for serialization to netCDF \"\n \"files\".format(value)\n )\n\n # Check attrs on the dataset itself\n for k, v in dataset.attrs.items():\n check_attr(k, v)\n\n # Check attrs on each variable within the dataset\n for variable in dataset.variables.values():\n for k, v in variable.attrs.items():\n check_attr(k, v)"},{"col":4,"comment":"null","endLoc":93,"header":"def make_ds(self)","id":1226,"name":"make_ds","nodeType":"Function","startLoc":30,"text":"def make_ds(self):\n\n # single Dataset\n self.ds = xr.Dataset()\n self.nt = 1000\n self.nx = 90\n self.ny = 45\n\n self.block_chunks = {\n \"time\": self.nt / 4,\n \"lon\": self.nx / 3,\n \"lat\": self.ny / 3,\n }\n\n self.time_chunks = {\"time\": int(self.nt / 36)}\n\n times = pd.date_range(\"1970-01-01\", periods=self.nt, freq=\"D\")\n lons = xr.DataArray(\n np.linspace(0, 360, self.nx),\n dims=(\"lon\",),\n attrs={\"units\": \"degrees east\", \"long_name\": \"longitude\"},\n )\n lats = xr.DataArray(\n np.linspace(-90, 90, self.ny),\n dims=(\"lat\",),\n attrs={\"units\": \"degrees north\", \"long_name\": \"latitude\"},\n )\n self.ds[\"foo\"] = xr.DataArray(\n randn((self.nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"foo\",\n encoding=None,\n attrs={\"units\": \"foo units\", \"description\": \"a description\"},\n )\n self.ds[\"bar\"] = xr.DataArray(\n randn((self.nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"bar\",\n encoding=None,\n attrs={\"units\": \"bar units\", \"description\": \"a description\"},\n )\n self.ds[\"baz\"] = xr.DataArray(\n randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),\n coords={\"lon\": lons, \"lat\": lats},\n dims=(\"lon\", \"lat\"),\n name=\"baz\",\n encoding=None,\n attrs={\"units\": \"baz units\", \"description\": \"a description\"},\n )\n\n self.ds.attrs = {\"history\": \"created for xarray benchmarking\"}\n\n self.oinds = {\n \"time\": randint(0, self.nt, 120),\n \"lon\": randint(0, self.nx, 20),\n \"lat\": randint(0, self.ny, 10),\n }\n self.vinds = {\n \"time\": xr.DataArray(randint(0, self.nt, 120), dims=\"x\"),\n \"lon\": xr.DataArray(randint(0, self.nx, 120), dims=\"x\"),\n \"lat\": slice(3, 20),\n }"},{"col":0,"comment":"Get version information from git keywords.","endLoc":1045,"header":"@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose)","id":1227,"name":"git_versions_from_keywords","nodeType":"Function","startLoc":986,"text":"@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n date = keywords.get(\"date\")\n if date is not None:\n # git-2.2.0 added \"%cI\", which expands to an ISO-8601 -compliant\n # datestamp. However we prefer \"%ci\" (which expands to an \"ISO-8601\n # -like\" string, which we must then edit to make compliant), because\n # it's been around since git-1.5.3, and it's too difficult to\n # discover which version we're using, or to work around using an\n # older one.\n date = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = {r.strip() for r in refnames.strip(\"()\").split(\",\")}\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = {r for r in refs if re.search(r\"\\d\", r)}\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix) :]\n if verbose:\n print(\"picking %s\" % r)\n return {\n \"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": None,\n \"date\": date,\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": \"no suitable tags\",\n \"date\": None,\n }"},{"col":0,"comment":"null","endLoc":96,"header":"def _is_nested_tuple(possible_tuple)","id":1229,"name":"_is_nested_tuple","nodeType":"Function","startLoc":93,"text":"def _is_nested_tuple(possible_tuple):\n return isinstance(possible_tuple, tuple) and any(\n isinstance(value, (tuple, list, slice)) for value in possible_tuple\n )"},{"col":0,"comment":"\n Convert values into a numpy array of at most 1-dimension, while preserving\n tuples.\n\n Adapted from pandas.core.common._asarray_tuplesafe\n ","endLoc":90,"header":"def _asarray_tuplesafe(values)","id":1230,"name":"_asarray_tuplesafe","nodeType":"Function","startLoc":75,"text":"def _asarray_tuplesafe(values):\n \"\"\"\n Convert values into a numpy array of at most 1-dimension, while preserving\n tuples.\n\n Adapted from pandas.core.common._asarray_tuplesafe\n \"\"\"\n if isinstance(values, tuple):\n result = utils.to_0d_object_array(values)\n else:\n result = np.asarray(values)\n if result.ndim == 2:\n result = np.empty(len(values), dtype=object)\n result[:] = values\n\n return result"},{"col":0,"comment":"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n ","endLoc":1146,"header":"@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command)","id":1231,"name":"git_pieces_from_vcs","nodeType":"Function","startLoc":1048,"text":"@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n\n out, rc = run_command(GITS, [\"rev-parse\", \"--git-dir\"], cwd=root, hide_stderr=True)\n if rc != 0:\n if verbose:\n print(\"Directory %s not under git control\" % root)\n raise NotThisMethod(\"'git rev-parse --git-dir' returned error\")\n\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out, rc = run_command(\n GITS,\n [\n \"describe\",\n \"--tags\",\n \"--dirty\",\n \"--always\",\n \"--long\",\n \"--match\",\n \"%s*\" % tag_prefix,\n ],\n cwd=root,\n )\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[: git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r\"^(.+)-(\\d+)-g([0-9a-f]+)$\", git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = \"unable to parse git-describe output: '%s'\" % describe_out\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = \"tag '%s' doesn't start with prefix '%s'\" % (\n full_tag,\n tag_prefix,\n )\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix) :]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out, rc = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"], cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n # commit date: see ISO-8601 comment in git_versions_from_keywords()\n date = run_command(GITS, [\"show\", \"-s\", \"--format=%ci\", \"HEAD\"], cwd=root)[\n 0\n ].strip()\n pieces[\"date\"] = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n\n return pieces"},{"col":0,"comment":"Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional\n labels\n ","endLoc":106,"header":"def get_indexer_nd(index, labels, method=None, tolerance=None)","id":1232,"name":"get_indexer_nd","nodeType":"Function","startLoc":99,"text":"def get_indexer_nd(index, labels, method=None, tolerance=None):\n \"\"\"Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional\n labels\n \"\"\"\n flat_labels = np.ravel(labels)\n flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)\n indexer = flat_indexer.reshape(labels.shape)\n return indexer"},{"col":4,"comment":"null","endLoc":14,"header":"def time_unstack_fast(self)","id":1233,"name":"time_unstack_fast","nodeType":"Function","startLoc":13,"text":"def time_unstack_fast(self):\n self.ds.unstack(\"flat_dim\")"},{"attributeType":"null","col":16,"comment":"null","endLoc":3,"id":1234,"name":"np","nodeType":"Attribute","startLoc":3,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":4,"id":1235,"name":"pd","nodeType":"Attribute","startLoc":4,"text":"pd"},{"attributeType":"null","col":0,"comment":"null","endLoc":17,"id":1236,"name":"_MARKERSIZE_RANGE","nodeType":"Attribute","startLoc":17,"text":"_MARKERSIZE_RANGE"},{"col":0,"comment":"","endLoc":1,"header":"dataset_plot.py#","id":1237,"name":"","nodeType":"Function","startLoc":1,"text":"_MARKERSIZE_RANGE = np.array([18.0, 72.0])"},{"fileName":"dask_array_compat.py","filePath":"xarray/core","id":1238,"nodeType":"File","text":"from distutils.version import LooseVersion\n\nimport dask.array as da\nimport numpy as np\nfrom dask import __version__ as dask_version\n\nif LooseVersion(dask_version) >= LooseVersion(\"2.0.0\"):\n meta_from_array = da.utils.meta_from_array\nelse:\n # Copied from dask v2.4.0\n # Used under the terms of Dask's license, see licenses/DASK_LICENSE.\n import numbers\n\n def meta_from_array(x, ndim=None, dtype=None):\n \"\"\" Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n \"\"\"\n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, da.Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[\n (Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))\n ]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n meta = meta.astype(dtype)\n\n return meta\n"},{"col":4,"comment":"Write dataset contents to a zarr group.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n Parameters\n ----------\n store : MutableMapping, str or Path, optional\n Store or path to directory in file system.\n mode : {'w', 'w-', 'a', None}\n Persistence mode: 'w' means create (overwrite if exists);\n 'w-' means create (fail if exists);\n 'a' means append (create if does not exist).\n If ``append_dim`` is set, ``mode`` can be omitted as it is\n internally set to ``'a'``. Otherwise, ``mode`` will default to\n `w-` if not set.\n synchronizer : object, optional\n Array synchronizer\n group : str, optional\n Group path. (a.k.a. `path` in zarr terminology.)\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,}, ...}``\n compute: bool, optional\n If True compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n consolidated: bool, optional\n If True, apply zarr's `consolidate_metadata` function to the store\n after writing.\n append_dim: hashable, optional\n If set, the dimension on which the data will be appended.\n\n References\n ----------\n https://zarr.readthedocs.io/\n ","endLoc":1617,"header":"def to_zarr(\n self,\n store: Union[MutableMapping, str, Path] = None,\n mode: str = None,\n synchronizer=None,\n group: str = None,\n encoding: Mapping = None,\n compute: bool = True,\n consolidated: bool = False,\n append_dim: Hashable = None,\n ) -> \"ZarrStore\"","id":1239,"name":"to_zarr","nodeType":"Function","startLoc":1539,"text":"def to_zarr(\n self,\n store: Union[MutableMapping, str, Path] = None,\n mode: str = None,\n synchronizer=None,\n group: str = None,\n encoding: Mapping = None,\n compute: bool = True,\n consolidated: bool = False,\n append_dim: Hashable = None,\n ) -> \"ZarrStore\":\n \"\"\"Write dataset contents to a zarr group.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n Parameters\n ----------\n store : MutableMapping, str or Path, optional\n Store or path to directory in file system.\n mode : {'w', 'w-', 'a', None}\n Persistence mode: 'w' means create (overwrite if exists);\n 'w-' means create (fail if exists);\n 'a' means append (create if does not exist).\n If ``append_dim`` is set, ``mode`` can be omitted as it is\n internally set to ``'a'``. Otherwise, ``mode`` will default to\n `w-` if not set.\n synchronizer : object, optional\n Array synchronizer\n group : str, optional\n Group path. (a.k.a. `path` in zarr terminology.)\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,}, ...}``\n compute: bool, optional\n If True compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n consolidated: bool, optional\n If True, apply zarr's `consolidate_metadata` function to the store\n after writing.\n append_dim: hashable, optional\n If set, the dimension on which the data will be appended.\n\n References\n ----------\n https://zarr.readthedocs.io/\n \"\"\"\n if encoding is None:\n encoding = {}\n if (mode == \"a\") or (append_dim is not None):\n if mode is None:\n mode = \"a\"\n elif mode != \"a\":\n raise ValueError(\n \"append_dim was set along with mode='{}', either set \"\n \"mode='a' or don't set it.\".format(mode)\n )\n elif mode is None:\n mode = \"w-\"\n if mode not in [\"w\", \"w-\", \"a\"]:\n # TODO: figure out how to handle 'r+'\n raise ValueError(\n \"The only supported options for mode are 'w',\" \"'w-' and 'a'.\"\n )\n from ..backends.api import to_zarr\n\n return to_zarr(\n self,\n store=store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n encoding=encoding,\n compute=compute,\n consolidated=consolidated,\n append_dim=append_dim,\n )"},{"col":4,"comment":" Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n ","endLoc":91,"header":"def meta_from_array(x, ndim=None, dtype=None)","id":1240,"name":"meta_from_array","nodeType":"Function","startLoc":14,"text":"def meta_from_array(x, ndim=None, dtype=None):\n \"\"\" Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n \"\"\"\n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, da.Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[\n (Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))\n ]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n meta = meta.astype(dtype)\n\n return meta"},{"col":0,"comment":"This function creates an appropriate datastore for writing a dataset to\n a zarr ztore\n\n See `Dataset.to_zarr` for full API docs.\n ","endLoc":1334,"header":"def to_zarr(\n dataset,\n store=None,\n mode=None,\n synchronizer=None,\n group=None,\n encoding=None,\n compute=True,\n consolidated=False,\n append_dim=None,\n)","id":1241,"name":"to_zarr","nodeType":"Function","startLoc":1278,"text":"def to_zarr(\n dataset,\n store=None,\n mode=None,\n synchronizer=None,\n group=None,\n encoding=None,\n compute=True,\n consolidated=False,\n append_dim=None,\n):\n \"\"\"This function creates an appropriate datastore for writing a dataset to\n a zarr ztore\n\n See `Dataset.to_zarr` for full API docs.\n \"\"\"\n if isinstance(store, Path):\n store = str(store)\n if encoding is None:\n encoding = {}\n\n # validate Dataset keys, DataArray names, and attr keys/values\n _validate_dataset_names(dataset)\n _validate_attrs(dataset)\n\n if mode == \"a\":\n _validate_datatypes_for_zarr_append(dataset)\n _validate_append_dim_and_encoding(\n dataset,\n store,\n append_dim,\n group=group,\n consolidated=consolidated,\n encoding=encoding,\n )\n\n zstore = backends.ZarrStore.open_group(\n store=store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n consolidate_on_close=consolidated,\n )\n zstore.append_dim = append_dim\n writer = ArrayWriter()\n # TODO: figure out how to properly handle unlimited_dims\n dump_to_store(dataset, zstore, writer, encoding=encoding)\n writes = writer.sync(compute=compute)\n\n if compute:\n _finalize_store(writes, zstore)\n else:\n import dask\n\n return dask.delayed(_finalize_store)(writes, zstore)\n\n return zstore"},{"col":0,"comment":"DataArray.name and Dataset keys must be a string or None","endLoc":1249,"header":"def _validate_datatypes_for_zarr_append(dataset)","id":1242,"name":"_validate_datatypes_for_zarr_append","nodeType":"Function","startLoc":1231,"text":"def _validate_datatypes_for_zarr_append(dataset):\n \"\"\"DataArray.name and Dataset keys must be a string or None\"\"\"\n\n def check_dtype(var):\n if (\n not np.issubdtype(var.dtype, np.number)\n and not coding.strings.is_unicode_dtype(var.dtype)\n and not var.dtype == object\n ):\n # and not re.match('^bytes[1-9]+$', var.dtype.name)):\n raise ValueError(\n \"Invalid dtype for data variable: {} \"\n \"dtype must be a subtype of number, \"\n \"a fixed sized string, a fixed size \"\n \"unicode string or an object\".format(var)\n )\n\n for k in dataset.data_vars.values():\n check_dtype(k)"},{"col":4,"comment":"Return a new DataArray whose data is given by the the first `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.head\n DataArray.tail\n DataArray.thin\n ","endLoc":1066,"header":"def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\"","id":1243,"name":"head","nodeType":"Function","startLoc":1051,"text":"def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by the the first `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.head\n DataArray.tail\n DataArray.thin\n \"\"\"\n ds = self._to_temp_dataset().head(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)"},{"col":0,"comment":"null","endLoc":1275,"header":"def _validate_append_dim_and_encoding(\n ds_to_append, store, append_dim, encoding, **open_kwargs\n)","id":1245,"name":"_validate_append_dim_and_encoding","nodeType":"Function","startLoc":1252,"text":"def _validate_append_dim_and_encoding(\n ds_to_append, store, append_dim, encoding, **open_kwargs\n):\n try:\n ds = backends.zarr.open_zarr(store, **open_kwargs)\n except ValueError: # store empty\n return\n if append_dim:\n if append_dim not in ds.dims:\n raise ValueError(\n \"{} not a valid dimension in the Dataset\".format(append_dim)\n )\n for data_var in ds_to_append:\n if data_var in ds:\n if append_dim is None:\n raise ValueError(\n \"variable '{}' already exists, but append_dim \"\n \"was not set\".format(data_var)\n )\n if data_var in encoding.keys():\n raise ValueError(\n \"variable '{}' already exists, but encoding was\"\n \"provided\".format(data_var)\n )"},{"col":0,"comment":"Load and decode a dataset from a Zarr store.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n The `store` object should be a valid store for a Zarr group. `store`\n variables must contain dimension metadata encoded in the\n `_ARRAY_DIMENSIONS` attribute.\n\n Parameters\n ----------\n store : MutableMapping or str\n A MutableMapping where a Zarr Group has been stored or a path to a\n directory in file system where a Zarr DirectoryStore has been stored.\n synchronizer : object, optional\n Array synchronizer provided to zarr\n group : str, obtional\n Group path. (a.k.a. `path` in zarr terminology.)\n chunks : int or dict or tuple or {None, 'auto'}, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created\n based on the variable's zarr chunks. If `chunks=None`, zarr array\n data will lazily convert to numpy arrays upon access. This accepts\n all the chunk specifications as Dask does.\n overwrite_encoded_chunks: bool, optional\n Whether to drop the zarr chunks encoded for each variable when a\n dataset is loaded with specified chunk sizes (default: False)\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n drop_variables : string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n consolidated : bool, optional\n Whether to open the store using zarr's consolidated metadata\n capability. Only works for stores that have already been consolidated.\n\n Returns\n -------\n dataset : Dataset\n The newly created dataset.\n\n See Also\n --------\n open_dataset\n\n References\n ----------\n http://zarr.readthedocs.io/\n ","endLoc":659,"header":"def open_zarr(\n store,\n group=None,\n synchronizer=None,\n chunks=\"auto\",\n decode_cf=True,\n mask_and_scale=True,\n decode_times=True,\n concat_characters=True,\n decode_coords=True,\n drop_variables=None,\n consolidated=False,\n overwrite_encoded_chunks=False,\n **kwargs\n)","id":1246,"name":"open_zarr","nodeType":"Function","startLoc":457,"text":"def open_zarr(\n store,\n group=None,\n synchronizer=None,\n chunks=\"auto\",\n decode_cf=True,\n mask_and_scale=True,\n decode_times=True,\n concat_characters=True,\n decode_coords=True,\n drop_variables=None,\n consolidated=False,\n overwrite_encoded_chunks=False,\n **kwargs\n):\n \"\"\"Load and decode a dataset from a Zarr store.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n The `store` object should be a valid store for a Zarr group. `store`\n variables must contain dimension metadata encoded in the\n `_ARRAY_DIMENSIONS` attribute.\n\n Parameters\n ----------\n store : MutableMapping or str\n A MutableMapping where a Zarr Group has been stored or a path to a\n directory in file system where a Zarr DirectoryStore has been stored.\n synchronizer : object, optional\n Array synchronizer provided to zarr\n group : str, obtional\n Group path. (a.k.a. `path` in zarr terminology.)\n chunks : int or dict or tuple or {None, 'auto'}, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created\n based on the variable's zarr chunks. If `chunks=None`, zarr array\n data will lazily convert to numpy arrays upon access. This accepts\n all the chunk specifications as Dask does.\n overwrite_encoded_chunks: bool, optional\n Whether to drop the zarr chunks encoded for each variable when a\n dataset is loaded with specified chunk sizes (default: False)\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n drop_variables : string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n consolidated : bool, optional\n Whether to open the store using zarr's consolidated metadata\n capability. Only works for stores that have already been consolidated.\n\n Returns\n -------\n dataset : Dataset\n The newly created dataset.\n\n See Also\n --------\n open_dataset\n\n References\n ----------\n http://zarr.readthedocs.io/\n \"\"\"\n if \"auto_chunk\" in kwargs:\n auto_chunk = kwargs.pop(\"auto_chunk\")\n if auto_chunk:\n chunks = \"auto\" # maintain backwards compatibility\n else:\n chunks = None\n\n warnings.warn(\n \"auto_chunk is deprecated. Use chunks='auto' instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n if kwargs:\n raise TypeError(\n \"open_zarr() got unexpected keyword arguments \" + \",\".join(kwargs.keys())\n )\n\n if not isinstance(chunks, (int, dict)):\n if chunks != \"auto\" and chunks is not None:\n raise ValueError(\n \"chunks must be an int, dict, 'auto', or None. \"\n \"Instead found %s. \" % chunks\n )\n\n if not decode_cf:\n mask_and_scale = False\n decode_times = False\n concat_characters = False\n decode_coords = False\n\n def maybe_decode_store(store, lock=False):\n ds = conventions.decode_cf(\n store,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n concat_characters=concat_characters,\n decode_coords=decode_coords,\n drop_variables=drop_variables,\n )\n\n # TODO: this is where we would apply caching\n\n return ds\n\n # Zarr supports a wide range of access modes, but for now xarray either\n # reads or writes from a store, never both. For open_zarr, we only read\n mode = \"r\"\n zarr_store = ZarrStore.open_group(\n store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n consolidated=consolidated,\n )\n ds = maybe_decode_store(zarr_store)\n\n # auto chunking needs to be here and not in ZarrStore because variable\n # chunks do not survive decode_cf\n # return trivial case\n if not chunks:\n return ds\n\n # adapted from Dataset.Chunk()\n if isinstance(chunks, int):\n chunks = dict.fromkeys(ds.dims, chunks)\n\n if isinstance(chunks, tuple) and len(chunks) == len(ds.dims):\n chunks = dict(zip(ds.dims, chunks))\n\n def get_chunk(name, var, chunks):\n chunk_spec = dict(zip(var.dims, var.encoding.get(\"chunks\")))\n\n # Coordinate labels aren't chunked\n if var.ndim == 1 and var.dims[0] == name:\n return chunk_spec\n\n if chunks == \"auto\":\n return chunk_spec\n\n for dim in var.dims:\n if dim in chunks:\n spec = chunks[dim]\n if isinstance(spec, int):\n spec = (spec,)\n if isinstance(spec, (tuple, list)) and chunk_spec[dim]:\n if any(s % chunk_spec[dim] for s in spec):\n warnings.warn(\n \"Specified Dask chunks %r would \"\n \"separate Zarr chunk shape %r for \"\n \"dimension %r. This significantly \"\n \"degrades performance. Consider \"\n \"rechunking after loading instead.\"\n % (chunks[dim], chunk_spec[dim], dim),\n stacklevel=2,\n )\n chunk_spec[dim] = chunks[dim]\n return chunk_spec\n\n def maybe_chunk(name, var, chunks):\n from dask.base import tokenize\n\n chunk_spec = get_chunk(name, var, chunks)\n\n if (var.ndim > 0) and (chunk_spec is not None):\n # does this cause any data to be read?\n token2 = tokenize(name, var._data)\n name2 = \"zarr-%s\" % token2\n var = var.chunk(chunk_spec, name=name2, lock=None)\n if overwrite_encoded_chunks and var.chunks is not None:\n var.encoding[\"chunks\"] = tuple(x[0] for x in var.chunks)\n return var\n else:\n return var\n\n variables = {k: maybe_chunk(k, v, chunks) for k, v in ds.variables.items()}\n return ds._replace_vars_and_dims(variables)"},{"col":4,"comment":"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : DataArray\n Array with unstacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n >>> roundtripped = stacked.unstack()\n >>> arr.identical(roundtripped)\n True\n\n See also\n --------\n DataArray.stack\n ","endLoc":1770,"header":"def unstack(\n self, dim: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\"","id":1247,"name":"unstack","nodeType":"Function","startLoc":1724,"text":"def unstack(\n self, dim: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : DataArray\n Array with unstacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n >>> roundtripped = stacked.unstack()\n >>> arr.identical(roundtripped)\n True\n\n See also\n --------\n DataArray.stack\n \"\"\"\n ds = self._to_temp_dataset().unstack(dim)\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"Returns a new dataset with the first `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.tail\n Dataset.thin\n DataArray.head\n ","endLoc":2050,"header":"def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\"","id":1248,"name":"head","nodeType":"Function","startLoc":2005,"text":"def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the first `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.tail\n Dataset.thin\n DataArray.head\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"head\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {k: slice(val) for k, val in indexers.items()}\n return self.isel(indexers_slices)"},{"col":4,"comment":"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : Hashable or iterable of Hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : Dataset\n Dataset with unstacked data.\n\n See also\n --------\n Dataset.stack\n ","endLoc":3409,"header":"def unstack(self, dim: Union[Hashable, Iterable[Hashable]] = None) -> \"Dataset\"","id":1249,"name":"unstack","nodeType":"Function","startLoc":3359,"text":"def unstack(self, dim: Union[Hashable, Iterable[Hashable]] = None) -> \"Dataset\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : Hashable or iterable of Hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : Dataset\n Dataset with unstacked data.\n\n See also\n --------\n Dataset.stack\n \"\"\"\n if dim is None:\n dims = [\n d for d in self.dims if isinstance(self.get_index(d), pd.MultiIndex)\n ]\n else:\n if isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = [dim]\n else:\n dims = list(dim)\n\n missing_dims = [d for d in dims if d not in self.dims]\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n non_multi_dims = [\n d for d in dims if not isinstance(self.get_index(d), pd.MultiIndex)\n ]\n if non_multi_dims:\n raise ValueError(\n \"cannot unstack dimensions that do not \"\n \"have a MultiIndex: %s\" % non_multi_dims\n )\n\n result = self.copy(deep=False)\n for dim in dims:\n result = result._unstack_once(dim)\n return result"},{"col":4,"comment":"Return a new DataArray whose data is given by the the last `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.tail\n DataArray.head\n DataArray.thin\n ","endLoc":1083,"header":"def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\"","id":1250,"name":"tail","nodeType":"Function","startLoc":1068,"text":"def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by the the last `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.tail\n DataArray.head\n DataArray.thin\n \"\"\"\n ds = self._to_temp_dataset().tail(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)"},{"col":0,"comment":"Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-subst keyword substitution.\n ","endLoc":1184,"header":"def do_vcs_install(manifest_in, versionfile_source, ipy)","id":1251,"name":"do_vcs_install","nodeType":"Function","startLoc":1149,"text":"def do_vcs_install(manifest_in, versionfile_source, ipy):\n \"\"\"Git-specific installation logic for Versioneer.\n\n For Git, this means creating/changing .gitattributes to mark _version.py\n for export-subst keyword substitution.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n files = [manifest_in, versionfile_source]\n if ipy:\n files.append(ipy)\n try:\n me = __file__\n if me.endswith(\".pyc\") or me.endswith(\".pyo\"):\n me = os.path.splitext(me)[0] + \".py\"\n versioneer_file = os.path.relpath(me)\n except NameError:\n versioneer_file = \"versioneer.py\"\n files.append(versioneer_file)\n present = False\n try:\n f = open(\".gitattributes\", \"r\")\n for line in f.readlines():\n if line.strip().startswith(versionfile_source):\n if \"export-subst\" in line.strip().split()[1:]:\n present = True\n f.close()\n except OSError:\n pass\n if not present:\n f = open(\".gitattributes\", \"a+\")\n f.write(\"%s export-subst\\n\" % versionfile_source)\n f.close()\n files.append(\".gitattributes\")\n run_command(GITS, [\"add\", \"--\"] + files)"},{"attributeType":"null","col":21,"comment":"null","endLoc":3,"id":1252,"name":"da","nodeType":"Attribute","startLoc":3,"text":"da"},{"attributeType":"null","col":4,"comment":"null","endLoc":26,"id":1253,"name":"timeout","nodeType":"Attribute","startLoc":26,"text":"timeout"},{"attributeType":"null","col":16,"comment":"null","endLoc":4,"id":1254,"name":"np","nodeType":"Attribute","startLoc":4,"text":"np"},{"attributeType":"null","col":32,"comment":"null","endLoc":5,"id":1255,"name":"dask_version","nodeType":"Attribute","startLoc":5,"text":"dask_version"},{"attributeType":"null","col":4,"comment":"null","endLoc":8,"id":1256,"name":"meta_from_array","nodeType":"Attribute","startLoc":8,"text":"meta_from_array"},{"col":0,"comment":"","endLoc":1,"header":"dask_array_compat.py#","id":1257,"name":"","nodeType":"Function","startLoc":1,"text":"if LooseVersion(dask_version) >= LooseVersion(\"2.0.0\"):\n meta_from_array = da.utils.meta_from_array\nelse:\n # Copied from dask v2.4.0\n # Used under the terms of Dask's license, see licenses/DASK_LICENSE.\n import numbers\n\n def meta_from_array(x, ndim=None, dtype=None):\n \"\"\" Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n \"\"\"\n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, da.Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[\n (Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))\n ]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n meta = meta.astype(dtype)\n\n return meta"},{"col":4,"comment":"Returns a new dataset with the last `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.thin\n DataArray.tail\n ","endLoc":2100,"header":"def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\"","id":1258,"name":"tail","nodeType":"Function","startLoc":2052,"text":"def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the last `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.thin\n DataArray.tail\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"tail\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {\n k: slice(-val, None) if val != 0 else slice(val)\n for k, val in indexers.items()\n }\n return self.isel(indexers_slices)"},{"col":4,"comment":"null","endLoc":261,"header":"@classmethod\n def open_group(\n cls,\n store,\n mode=\"r\",\n synchronizer=None,\n group=None,\n consolidated=False,\n consolidate_on_close=False,\n )","id":1259,"name":"open_group","nodeType":"Function","startLoc":243,"text":"@classmethod\n def open_group(\n cls,\n store,\n mode=\"r\",\n synchronizer=None,\n group=None,\n consolidated=False,\n consolidate_on_close=False,\n ):\n import zarr\n\n open_kwargs = dict(mode=mode, synchronizer=synchronizer, path=group)\n if consolidated:\n # TODO: an option to pass the metadata_key keyword\n zarr_group = zarr.open_consolidated(store, **open_kwargs)\n else:\n zarr_group = zarr.open_group(store, **open_kwargs)\n return cls(zarr_group, consolidate_on_close)"},{"attributeType":"null","col":4,"comment":"null","endLoc":27,"id":1260,"name":"repeat","nodeType":"Attribute","startLoc":27,"text":"repeat"},{"attributeType":"null","col":4,"comment":"null","endLoc":28,"id":1261,"name":"number","nodeType":"Attribute","startLoc":28,"text":"number"},{"fileName":"__init__.py","filePath":"xarray/plot","id":1262,"nodeType":"File","text":"from .facetgrid import FacetGrid\nfrom .plot import contour, contourf, hist, imshow, line, pcolormesh, plot, step\n\n__all__ = [\n \"plot\",\n \"line\",\n \"step\",\n \"contour\",\n \"contourf\",\n \"hist\",\n \"imshow\",\n \"pcolormesh\",\n \"FacetGrid\",\n]\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":38,"id":1263,"name":"block_chunks","nodeType":"Attribute","startLoc":38,"text":"self.block_chunks"},{"attributeType":"null","col":8,"comment":"null","endLoc":44,"id":1264,"name":"time_chunks","nodeType":"Attribute","startLoc":44,"text":"self.time_chunks"},{"attributeType":"null","col":8,"comment":"null","endLoc":34,"id":1265,"name":"nt","nodeType":"Attribute","startLoc":34,"text":"self.nt"},{"attributeType":"null","col":8,"comment":"null","endLoc":35,"id":1266,"name":"nx","nodeType":"Attribute","startLoc":35,"text":"self.nx"},{"attributeType":"null","col":8,"comment":"null","endLoc":89,"id":1267,"name":"vinds","nodeType":"Attribute","startLoc":89,"text":"self.vinds"},{"className":"FacetGrid","col":0,"comment":"\n Initialize the matplotlib figure and FacetGrid object.\n\n The :class:`FacetGrid` is an object that links a xarray DataArray to\n a matplotlib figure with a particular structure.\n\n In particular, :class:`FacetGrid` is used to draw plots with multiple\n Axes where each Axes shows the same relationship conditioned on\n different levels of some dimension. It's possible to condition on up to\n two variables by assigning variables to the rows and columns of the\n grid.\n\n The general approach to plotting here is called \"small multiples\",\n where the same kind of plot is repeated multiple times, and the\n specific use of small multiples to display the same relationship\n conditioned on one ore more other variables is often called a \"trellis\n plot\".\n\n The basic workflow is to initialize the :class:`FacetGrid` object with\n the DataArray and the variable names that are used to structure the grid.\n Then plotting functions can be applied to each subset by calling\n :meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.\n\n Attributes\n ----------\n axes : numpy object array\n Contains axes in corresponding position, as returned from\n plt.subplots\n fig : matplotlib.Figure\n The figure containing all the axes\n name_dicts : numpy object array\n Contains dictionaries mapping coordinate names to values. None is\n used as a sentinel value for axes which should remain empty, ie.\n sometimes the bottom right grid\n ","endLoc":574,"id":1268,"nodeType":"Class","startLoc":35,"text":"class FacetGrid:\n \"\"\"\n Initialize the matplotlib figure and FacetGrid object.\n\n The :class:`FacetGrid` is an object that links a xarray DataArray to\n a matplotlib figure with a particular structure.\n\n In particular, :class:`FacetGrid` is used to draw plots with multiple\n Axes where each Axes shows the same relationship conditioned on\n different levels of some dimension. It's possible to condition on up to\n two variables by assigning variables to the rows and columns of the\n grid.\n\n The general approach to plotting here is called \"small multiples\",\n where the same kind of plot is repeated multiple times, and the\n specific use of small multiples to display the same relationship\n conditioned on one ore more other variables is often called a \"trellis\n plot\".\n\n The basic workflow is to initialize the :class:`FacetGrid` object with\n the DataArray and the variable names that are used to structure the grid.\n Then plotting functions can be applied to each subset by calling\n :meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.\n\n Attributes\n ----------\n axes : numpy object array\n Contains axes in corresponding position, as returned from\n plt.subplots\n fig : matplotlib.Figure\n The figure containing all the axes\n name_dicts : numpy object array\n Contains dictionaries mapping coordinate names to values. None is\n used as a sentinel value for axes which should remain empty, ie.\n sometimes the bottom right grid\n \"\"\"\n\n def __init__(\n self,\n data,\n col=None,\n row=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n figsize=None,\n aspect=1,\n size=3,\n subplot_kws=None,\n ):\n \"\"\"\n Parameters\n ----------\n data : DataArray\n xarray DataArray to be plotted\n row, col : strings\n Dimesion names that define subsets of the data, which will be drawn\n on separate facets in the grid.\n col_wrap : int, optional\n \"Wrap\" the column variable at this width, so that the column facets\n sharex : bool, optional\n If true, the facets will share x axes\n sharey : bool, optional\n If true, the facets will share y axes\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n If set, overrides ``size`` and ``aspect``.\n aspect : scalar, optional\n Aspect ratio of each facet, so that ``aspect * size`` gives the\n width of each facet in inches\n size : scalar, optional\n Height (in inches) of each facet. See also: ``aspect``\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots\n\n \"\"\"\n\n plt = import_matplotlib_pyplot()\n\n # Handle corner case of nonunique coordinates\n rep_col = col is not None and not data[col].to_index().is_unique\n rep_row = row is not None and not data[row].to_index().is_unique\n if rep_col or rep_row:\n raise ValueError(\n \"Coordinates used for faceting cannot \"\n \"contain repeated (nonunique) values.\"\n )\n\n # single_group is the grouping variable, if there is exactly one\n if col and row:\n single_group = False\n nrow = len(data[row])\n ncol = len(data[col])\n nfacet = nrow * ncol\n if col_wrap is not None:\n warnings.warn(\"Ignoring col_wrap since both col and row \" \"were passed\")\n elif row and not col:\n single_group = row\n elif not row and col:\n single_group = col\n else:\n raise ValueError(\"Pass a coordinate name as an argument for row or col\")\n\n # Compute grid shape\n if single_group:\n nfacet = len(data[single_group])\n if col:\n # idea - could add heuristic for nice shapes like 3x4\n ncol = nfacet\n if row:\n ncol = 1\n if col_wrap is not None:\n # Overrides previous settings\n ncol = col_wrap\n nrow = int(np.ceil(nfacet / ncol))\n\n # Set the subplot kwargs\n subplot_kws = {} if subplot_kws is None else subplot_kws\n\n if figsize is None:\n # Calculate the base figure size with extra horizontal space for a\n # colorbar\n cbar_space = 1\n figsize = (ncol * size * aspect + cbar_space, nrow * size)\n\n fig, axes = plt.subplots(\n nrow,\n ncol,\n sharex=sharex,\n sharey=sharey,\n squeeze=False,\n figsize=figsize,\n subplot_kw=subplot_kws,\n )\n\n # Set up the lists of names for the row and column facet variables\n col_names = list(data[col].values) if col else []\n row_names = list(data[row].values) if row else []\n\n if single_group:\n full = [{single_group: x} for x in data[single_group].values]\n empty = [None for x in range(nrow * ncol - len(full))]\n name_dicts = full + empty\n else:\n rowcols = itertools.product(row_names, col_names)\n name_dicts = [{row: r, col: c} for r, c in rowcols]\n\n name_dicts = np.array(name_dicts).reshape(nrow, ncol)\n\n # Set up the class attributes\n # ---------------------------\n\n # First the public API\n self.data = data\n self.name_dicts = name_dicts\n self.fig = fig\n self.axes = axes\n self.row_names = row_names\n self.col_names = col_names\n self.figlegend = None\n\n # Next the private variables\n self._single_group = single_group\n self._nrow = nrow\n self._row_var = row\n self._ncol = ncol\n self._col_var = col\n self._col_wrap = col_wrap\n self._x_var = None\n self._y_var = None\n self._cmap_extend = None\n self._mappables = []\n self._finalized = False\n\n @property\n def _left_axes(self):\n return self.axes[:, 0]\n\n @property\n def _bottom_axes(self):\n return self.axes[-1, :]\n\n def map_dataarray(self, func, x, y, **kwargs):\n \"\"\"\n Apply a plotting function to a 2d facet's subset of the data.\n\n This is more convenient and less general than ``FacetGrid.map``\n\n Parameters\n ----------\n func : callable\n A plotting function with the same signature as a 2d xarray\n plotting method such as `xarray.plot.imshow`\n x, y : string\n Names of the coordinates to plot on x, y axes\n kwargs :\n additional keyword arguments to func\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n\n if kwargs.get(\"cbar_ax\", None) is not None:\n raise ValueError(\"cbar_ax not supported by FacetGrid.\")\n\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n func, self.data.values, **kwargs\n )\n\n self._cmap_extend = cmap_params.get(\"extend\")\n\n # Order is important\n func_kwargs = {\n k: v\n for k, v in kwargs.items()\n if k not in {\"cmap\", \"colors\", \"cbar_kwargs\", \"levels\"}\n }\n func_kwargs.update(cmap_params)\n func_kwargs.update({\"add_colorbar\": False, \"add_labels\": False})\n\n # Get x, y labels for the first subplot\n x, y = _infer_xy_labels(\n darray=self.data.loc[self.name_dicts.flat[0]],\n x=x,\n y=y,\n imshow=func.__name__ == \"imshow\",\n rgb=kwargs.get(\"rgb\", None),\n )\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n mappable = func(subset, x=x, y=y, ax=ax, **func_kwargs)\n self._mappables.append(mappable)\n\n self._finalize_grid(x, y)\n\n if kwargs.get(\"add_colorbar\", True):\n self.add_colorbar(**cbar_kwargs)\n\n return self\n\n def map_dataarray_line(\n self, func, x, y, hue, add_legend=True, _labels=None, **kwargs\n ):\n from .plot import _infer_line_data\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n mappable = func(\n subset,\n x=x,\n y=y,\n ax=ax,\n hue=hue,\n add_legend=False,\n _labels=False,\n **kwargs\n )\n self._mappables.append(mappable)\n\n _, _, hueplt, xlabel, ylabel, huelabel = _infer_line_data(\n darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue\n )\n\n self._hue_var = hueplt\n self._hue_label = huelabel\n self._finalize_grid(xlabel, ylabel)\n\n if add_legend and hueplt is not None and huelabel is not None:\n self.add_legend()\n\n return self\n\n def map_dataset(\n self, func, x=None, y=None, hue=None, hue_style=None, add_guide=None, **kwargs\n ):\n from .dataset_plot import _infer_meta_data, _parse_size\n\n kwargs[\"add_guide\"] = False\n kwargs[\"_is_facetgrid\"] = True\n\n if kwargs.get(\"markersize\", None):\n kwargs[\"size_mapping\"] = _parse_size(\n self.data[kwargs[\"markersize\"]], kwargs.pop(\"size_norm\", None)\n )\n\n meta_data = _infer_meta_data(self.data, x, y, hue, hue_style, add_guide)\n kwargs[\"meta_data\"] = meta_data\n\n if hue and meta_data[\"hue_style\"] == \"continuous\":\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n func, self.data[hue].values, **kwargs\n )\n kwargs[\"meta_data\"][\"cmap_params\"] = cmap_params\n kwargs[\"meta_data\"][\"cbar_kwargs\"] = cbar_kwargs\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n maybe_mappable = func(\n ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs\n )\n # TODO: this is needed to get legends to work.\n # but maybe_mappable is a list in that case :/\n self._mappables.append(maybe_mappable)\n\n self._finalize_grid(meta_data[\"xlabel\"], meta_data[\"ylabel\"])\n\n if hue:\n self._hue_label = meta_data.pop(\"hue_label\", None)\n if meta_data[\"add_legend\"]:\n self._hue_var = meta_data[\"hue\"]\n self.add_legend()\n elif meta_data[\"add_colorbar\"]:\n self.add_colorbar(label=self._hue_label, **cbar_kwargs)\n\n return self\n\n def _finalize_grid(self, *axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n if not self._finalized:\n self.set_axis_labels(*axlabels)\n self.set_titles()\n self.fig.tight_layout()\n\n for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):\n if namedict is None:\n ax.set_visible(False)\n\n self._finalized = True\n\n def add_legend(self, **kwargs):\n figlegend = self.fig.legend(\n handles=self._mappables[-1],\n labels=list(self._hue_var.values),\n title=self._hue_label,\n loc=\"center right\",\n **kwargs\n )\n\n self.figlegend = figlegend\n # Draw the plot to set the bounding boxes correctly\n self.fig.draw(self.fig.canvas.get_renderer())\n\n # Calculate and set the new width of the figure so the legend fits\n legend_width = figlegend.get_window_extent().width / self.fig.dpi\n figure_width = self.fig.get_figwidth()\n self.fig.set_figwidth(figure_width + legend_width)\n\n # Draw the plot again to get the new transformations\n self.fig.draw(self.fig.canvas.get_renderer())\n\n # Now calculate how much space we need on the right side\n legend_width = figlegend.get_window_extent().width / self.fig.dpi\n space_needed = legend_width / (figure_width + legend_width) + 0.02\n # margin = .01\n # _space_needed = margin + space_needed\n right = 1 - space_needed\n\n # Place the subplot axes to give space for the legend\n self.fig.subplots_adjust(right=right)\n\n def add_colorbar(self, **kwargs):\n \"\"\"Draw a colorbar\n \"\"\"\n kwargs = kwargs.copy()\n if self._cmap_extend is not None:\n kwargs.setdefault(\"extend\", self._cmap_extend)\n if \"label\" not in kwargs:\n kwargs.setdefault(\"label\", label_from_attrs(self.data))\n self.cbar = self.fig.colorbar(\n self._mappables[-1], ax=list(self.axes.flat), **kwargs\n )\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n if x_var in self.data.coords:\n self._x_var = x_var\n self.set_xlabels(label_from_attrs(self.data[x_var]))\n else:\n # x_var is a string\n self.set_xlabels(x_var)\n\n if y_var is not None:\n if y_var in self.data.coords:\n self._y_var = y_var\n self.set_ylabels(label_from_attrs(self.data[y_var]))\n else:\n self.set_ylabels(y_var)\n return self\n\n def set_xlabels(self, label=None, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = label_from_attrs(self.data[self._x_var])\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n return self\n\n def set_ylabels(self, label=None, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = label_from_attrs(self.data[self._y_var])\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n return self\n\n def set_titles(self, template=\"{coord} = {value}\", maxchar=30, size=None, **kwargs):\n \"\"\"\n Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for plot titles containing {coord} and {value}\n maxchar : int\n Truncate titles at maxchar\n kwargs : keyword args\n additional arguments to matplotlib.text\n\n Returns\n -------\n self: FacetGrid object\n\n \"\"\"\n import matplotlib as mpl\n\n if size is None:\n size = mpl.rcParams[\"axes.labelsize\"]\n\n nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template)\n\n if self._single_group:\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # Only label the ones with data\n if d is not None:\n coord, value = list(d.items()).pop()\n title = nicetitle(coord, value, maxchar=maxchar)\n ax.set_title(title, size=size, **kwargs)\n else:\n # The row titles on the right edge of the grid\n for ax, row_name in zip(self.axes[:, -1], self.row_names):\n title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar)\n ax.annotate(\n title,\n xy=(1.02, 0.5),\n xycoords=\"axes fraction\",\n rotation=270,\n ha=\"left\",\n va=\"center\",\n **kwargs\n )\n\n # The column titles on the top row\n for ax, col_name in zip(self.axes[0, :], self.col_names):\n title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar)\n ax.set_title(title, size=size, **kwargs)\n\n return self\n\n def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS, fontsize=_FONTSIZE):\n \"\"\"\n Set and control tick behavior\n\n Parameters\n ----------\n max_xticks, max_yticks : int, optional\n Maximum number of labeled ticks to plot on x, y axes\n fontsize : string or int\n Font size as used by matplotlib text\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n from matplotlib.ticker import MaxNLocator\n\n # Both are necessary\n x_major_locator = MaxNLocator(nbins=max_xticks)\n y_major_locator = MaxNLocator(nbins=max_yticks)\n\n for ax in self.axes.flat:\n ax.xaxis.set_major_locator(x_major_locator)\n ax.yaxis.set_major_locator(y_major_locator)\n for tick in itertools.chain(\n ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks()\n ):\n tick.label1.set_fontsize(fontsize)\n\n return self\n\n def map(self, func, *args, **kwargs):\n \"\"\"\n Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n plt = import_matplotlib_pyplot()\n\n for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):\n if namedict is not None:\n data = self.data.loc[namedict]\n plt.sca(ax)\n innerargs = [data[a].values for a in args]\n maybe_mappable = func(*innerargs, **kwargs)\n # TODO: better way to verify that an artist is mappable?\n # https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522\n if maybe_mappable and hasattr(maybe_mappable, \"autoscale_None\"):\n self._mappables.append(maybe_mappable)\n\n self._finalize_grid(*args[:2])\n\n return self"},{"attributeType":"null","col":8,"comment":"null","endLoc":36,"id":1269,"name":"ny","nodeType":"Attribute","startLoc":36,"text":"self.ny"},{"col":0,"comment":"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n ","endLoc":1215,"header":"def versions_from_parentdir(parentdir_prefix, root, verbose)","id":1270,"name":"versions_from_parentdir","nodeType":"Function","startLoc":1187,"text":"def versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \"\"\"\n rootdirs = []\n\n for i in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parentdir_prefix):\n return {\n \"version\": dirname[len(parentdir_prefix) :],\n \"full-revisionid\": None,\n \"dirty\": False,\n \"error\": None,\n \"date\": None,\n }\n else:\n rootdirs.append(root)\n root = os.path.dirname(root) # up a level\n\n if verbose:\n print(\n \"Tried directories %s but none started with prefix %s\"\n % (str(rootdirs), parentdir_prefix)\n )\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":84,"id":1271,"name":"oinds","nodeType":"Attribute","startLoc":84,"text":"self.oinds"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":33,"id":1272,"name":"ds","nodeType":"Attribute","startLoc":33,"text":"self.ds"},{"col":4,"comment":"null","endLoc":17,"header":"def time_unstack_slow(self)","id":1273,"name":"time_unstack_slow","nodeType":"Function","startLoc":16,"text":"def time_unstack_slow(self):\n self.ds[:, ::-1].unstack(\"flat_dim\")"},{"col":4,"comment":"null","endLoc":211,"header":"@property\n def _left_axes(self)","id":1274,"name":"_left_axes","nodeType":"Function","startLoc":209,"text":"@property\n def _left_axes(self):\n return self.axes[:, 0]"},{"col":4,"comment":"null","endLoc":215,"header":"@property\n def _bottom_axes(self)","id":1275,"name":"_bottom_axes","nodeType":"Function","startLoc":213,"text":"@property\n def _bottom_axes(self):\n return self.axes[-1, :]"},{"col":4,"comment":"\n Apply a plotting function to a 2d facet's subset of the data.\n\n This is more convenient and less general than ``FacetGrid.map``\n\n Parameters\n ----------\n func : callable\n A plotting function with the same signature as a 2d xarray\n plotting method such as `xarray.plot.imshow`\n x, y : string\n Names of the coordinates to plot on x, y axes\n kwargs :\n additional keyword arguments to func\n\n Returns\n -------\n self : FacetGrid object\n\n ","endLoc":278,"header":"def map_dataarray(self, func, x, y, **kwargs)","id":1276,"name":"map_dataarray","nodeType":"Function","startLoc":217,"text":"def map_dataarray(self, func, x, y, **kwargs):\n \"\"\"\n Apply a plotting function to a 2d facet's subset of the data.\n\n This is more convenient and less general than ``FacetGrid.map``\n\n Parameters\n ----------\n func : callable\n A plotting function with the same signature as a 2d xarray\n plotting method such as `xarray.plot.imshow`\n x, y : string\n Names of the coordinates to plot on x, y axes\n kwargs :\n additional keyword arguments to func\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n\n if kwargs.get(\"cbar_ax\", None) is not None:\n raise ValueError(\"cbar_ax not supported by FacetGrid.\")\n\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n func, self.data.values, **kwargs\n )\n\n self._cmap_extend = cmap_params.get(\"extend\")\n\n # Order is important\n func_kwargs = {\n k: v\n for k, v in kwargs.items()\n if k not in {\"cmap\", \"colors\", \"cbar_kwargs\", \"levels\"}\n }\n func_kwargs.update(cmap_params)\n func_kwargs.update({\"add_colorbar\": False, \"add_labels\": False})\n\n # Get x, y labels for the first subplot\n x, y = _infer_xy_labels(\n darray=self.data.loc[self.name_dicts.flat[0]],\n x=x,\n y=y,\n imshow=func.__name__ == \"imshow\",\n rgb=kwargs.get(\"rgb\", None),\n )\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n mappable = func(subset, x=x, y=y, ax=ax, **func_kwargs)\n self._mappables.append(mappable)\n\n self._finalize_grid(x, y)\n\n if kwargs.get(\"add_colorbar\", True):\n self.add_colorbar(**cbar_kwargs)\n\n return self"},{"className":"IOWriteSingleNetCDF3","col":0,"comment":"null","endLoc":105,"id":1277,"nodeType":"Class","startLoc":96,"text":"class IOWriteSingleNetCDF3(IOSingleNetCDF):\n def setup(self):\n self.format = \"NETCDF3_64BIT\"\n self.make_ds()\n\n def time_write_dataset_netcdf4(self):\n self.ds.to_netcdf(\"test_netcdf4_write.nc\", engine=\"netcdf4\", format=self.format)\n\n def time_write_dataset_scipy(self):\n self.ds.to_netcdf(\"test_scipy_write.nc\", engine=\"scipy\", format=self.format)"},{"col":4,"comment":"null","endLoc":99,"header":"def setup(self)","id":1278,"name":"setup","nodeType":"Function","startLoc":97,"text":"def setup(self):\n self.format = \"NETCDF3_64BIT\"\n self.make_ds()"},{"col":0,"comment":"Try to determine the version from _version.py if present.","endLoc":1252,"header":"def versions_from_file(filename)","id":1279,"name":"versions_from_file","nodeType":"Function","startLoc":1236,"text":"def versions_from_file(filename):\n \"\"\"Try to determine the version from _version.py if present.\"\"\"\n try:\n with open(filename) as f:\n contents = f.read()\n except OSError:\n raise NotThisMethod(\"unable to read _version.py\")\n mo = re.search(\n r\"version_json = '''\\n(.*)''' # END VERSION_JSON\", contents, re.M | re.S\n )\n if not mo:\n mo = re.search(\n r\"version_json = '''\\r\\n(.*)''' # END VERSION_JSON\", contents, re.M | re.S\n )\n if not mo:\n raise NotThisMethod(\"no version_json in _version.py\")\n return json.loads(mo.group(1))"},{"col":4,"comment":"null","endLoc":269,"header":"def __init__(self, zarr_group, consolidate_on_close=False)","id":1280,"name":"__init__","nodeType":"Function","startLoc":263,"text":"def __init__(self, zarr_group, consolidate_on_close=False):\n self.ds = zarr_group\n self._read_only = self.ds.read_only\n self._synchronizer = self.ds.synchronizer\n self._group = self.ds.path\n self._consolidate_on_close = consolidate_on_close\n self.append_dim = None"},{"col":4,"comment":"null","endLoc":102,"header":"def time_write_dataset_netcdf4(self)","id":1281,"name":"time_write_dataset_netcdf4","nodeType":"Function","startLoc":101,"text":"def time_write_dataset_netcdf4(self):\n self.ds.to_netcdf(\"test_netcdf4_write.nc\", engine=\"netcdf4\", format=self.format)"},{"col":0,"comment":"Write the given version number to the given _version.py file.","endLoc":1262,"header":"def write_to_version_file(filename, versions)","id":1283,"name":"write_to_version_file","nodeType":"Function","startLoc":1255,"text":"def write_to_version_file(filename, versions):\n \"\"\"Write the given version number to the given _version.py file.\"\"\"\n os.unlink(filename)\n contents = json.dumps(versions, sort_keys=True, indent=1, separators=(\",\", \": \"))\n with open(filename, \"w\") as f:\n f.write(SHORT_VERSION_PY % contents)\n\n print(\"set %s to '%s'\" % (filename, versions[\"version\"]))"},{"col":0,"comment":"\n Determine x and y labels. For use in _plot2d\n\n darray must be a 2 dimensional data array, or 3d for imshow only.\n ","endLoc":368,"header":"def _infer_xy_labels(darray, x, y, imshow=False, rgb=None)","id":1284,"name":"_infer_xy_labels","nodeType":"Function","startLoc":344,"text":"def _infer_xy_labels(darray, x, y, imshow=False, rgb=None):\n \"\"\"\n Determine x and y labels. For use in _plot2d\n\n darray must be a 2 dimensional data array, or 3d for imshow only.\n \"\"\"\n assert x is None or x != y\n if imshow and darray.ndim == 3:\n return _infer_xy_labels_3d(darray, x, y, rgb)\n\n if x is None and y is None:\n if darray.ndim != 2:\n raise ValueError(\"DataArray must be 2d\")\n y, x = darray.dims\n elif x is None:\n if y not in darray.dims and y not in darray.coords:\n raise ValueError(\"y must be a dimension name if x is not supplied\")\n x = darray.dims[0] if y == darray.dims[1] else darray.dims[1]\n elif y is None:\n if x not in darray.dims and x not in darray.coords:\n raise ValueError(\"x must be a dimension name if y is not supplied\")\n y = darray.dims[0] if x == darray.dims[1] else darray.dims[1]\n elif any(k not in darray.coords and k not in darray.dims for k in (x, y)):\n raise ValueError(\"x and y must be coordinate variables\")\n return x, y"},{"col":0,"comment":"\n Determine x and y labels for showing RGB images.\n\n Attempts to infer which dimension is RGB/RGBA by size and order of dims.\n\n ","endLoc":341,"header":"def _infer_xy_labels_3d(darray, x, y, rgb)","id":1285,"name":"_infer_xy_labels_3d","nodeType":"Function","startLoc":286,"text":"def _infer_xy_labels_3d(darray, x, y, rgb):\n \"\"\"\n Determine x and y labels for showing RGB images.\n\n Attempts to infer which dimension is RGB/RGBA by size and order of dims.\n\n \"\"\"\n assert rgb is None or rgb != x\n assert rgb is None or rgb != y\n # Start by detecting and reporting invalid combinations of arguments\n assert darray.ndim == 3\n not_none = [a for a in (x, y, rgb) if a is not None]\n if len(set(not_none)) < len(not_none):\n raise ValueError(\n \"Dimension names must be None or unique strings, but imshow was \"\n \"passed x=%r, y=%r, and rgb=%r.\" % (x, y, rgb)\n )\n for label in not_none:\n if label not in darray.dims:\n raise ValueError(\"%r is not a dimension\" % (label,))\n\n # Then calculate rgb dimension if certain and check validity\n could_be_color = [\n label\n for label in darray.dims\n if darray[label].size in (3, 4) and label not in (x, y)\n ]\n if rgb is None and not could_be_color:\n raise ValueError(\n \"A 3-dimensional array was passed to imshow(), but there is no \"\n \"dimension that could be color. At least one dimension must be \"\n \"of size 3 (RGB) or 4 (RGBA), and not given as x or y.\"\n )\n if rgb is None and len(could_be_color) == 1:\n rgb = could_be_color[0]\n if rgb is not None and darray[rgb].size not in (3, 4):\n raise ValueError(\n \"Cannot interpret dim %r of size %s as RGB or RGBA.\"\n % (rgb, darray[rgb].size)\n )\n\n # If rgb dimension is still unknown, there must be two or three dimensions\n # in could_be_color. We therefore warn, and use a heuristic to break ties.\n if rgb is None:\n assert len(could_be_color) in (2, 3)\n rgb = could_be_color[-1]\n warnings.warn(\n \"Several dimensions of this array could be colors. Xarray \"\n \"will use the last possible dimension (%r) to match \"\n \"matplotlib.pyplot.imshow. You can pass names of x, y, \"\n \"and/or rgb dimensions to override this guess.\" % rgb\n )\n assert rgb is not None\n\n # Finally, we pick out the red slice and delegate to the 2D version:\n return _infer_xy_labels(darray.isel(**{rgb: 0}), x, y)"},{"col":0,"comment":"Return a + if we don't already have one, else return a .","endLoc":1269,"header":"def plus_or_dot(pieces)","id":1286,"name":"plus_or_dot","nodeType":"Function","startLoc":1265,"text":"def plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\""},{"attributeType":"DataArray","col":8,"comment":"null","endLoc":11,"id":1287,"name":"ds","nodeType":"Attribute","startLoc":11,"text":"self.ds"},{"className":"UnstackingDask","col":0,"comment":"null","endLoc":24,"id":1288,"nodeType":"Class","startLoc":20,"text":"class UnstackingDask(Unstacking):\n def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"flat_dim\": 50})"},{"col":4,"comment":"null","endLoc":24,"header":"def setup(self, *args, **kwargs)","id":1289,"name":"setup","nodeType":"Function","startLoc":21,"text":"def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"flat_dim\": 50})"},{"col":0,"comment":"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n ","endLoc":1293,"header":"def render_pep440(pieces)","id":1290,"name":"render_pep440","nodeType":"Function","startLoc":1272,"text":"def render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered"},{"col":0,"comment":"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n ","endLoc":1309,"header":"def render_pep440_pre(pieces)","id":1291,"name":"render_pep440_pre","nodeType":"Function","startLoc":1296,"text":"def render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered"},{"col":0,"comment":"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n ","endLoc":1336,"header":"def render_pep440_post(pieces)","id":1292,"name":"render_pep440_post","nodeType":"Function","startLoc":1312,"text":"def render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered"},{"col":0,"comment":"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n ","endLoc":1358,"header":"def render_pep440_old(pieces)","id":1293,"name":"render_pep440_old","nodeType":"Function","startLoc":1339,"text":"def render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered"},{"col":0,"comment":"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n ","endLoc":1378,"header":"def render_git_describe(pieces)","id":1294,"name":"render_git_describe","nodeType":"Function","startLoc":1361,"text":"def render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered"},{"col":0,"comment":"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n ","endLoc":1398,"header":"def render_git_describe_long(pieces)","id":1295,"name":"render_git_describe_long","nodeType":"Function","startLoc":1381,"text":"def render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered"},{"col":0,"comment":"Render the given version pieces into the requested style.","endLoc":1436,"header":"def render(pieces, style)","id":1296,"name":"render","nodeType":"Function","startLoc":1401,"text":"def render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\n \"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n \"date\": None,\n }\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\n \"version\": rendered,\n \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"],\n \"error\": None,\n \"date\": pieces.get(\"date\"),\n }"},{"col":4,"comment":"Return a new DataArray whose data is given by each `n` value\n along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.thin\n DataArray.head\n DataArray.tail\n ","endLoc":1100,"header":"def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\"","id":1297,"name":"thin","nodeType":"Function","startLoc":1085,"text":"def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by each `n` value\n along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.thin\n DataArray.head\n DataArray.tail\n \"\"\"\n ds = self._to_temp_dataset().thin(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"Returns a new dataset with each array indexed along every `n`th\n value for the specified dimension(s)\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.tail\n DataArray.thin\n ","endLoc":2150,"header":"def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\"","id":1298,"name":"thin","nodeType":"Function","startLoc":2102,"text":"def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along every `n`th\n value for the specified dimension(s)\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.tail\n DataArray.thin\n \"\"\"\n if (\n not indexers_kwargs\n and not isinstance(indexers, int)\n and not is_dict_like(indexers)\n ):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"thin\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n elif v == 0:\n raise ValueError(\"step cannot be zero\")\n indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}\n return self.isel(indexers_slices)"},{"col":4,"comment":"Finalize the annotations and layout.","endLoc":371,"header":"def _finalize_grid(self, *axlabels)","id":1301,"name":"_finalize_grid","nodeType":"Function","startLoc":360,"text":"def _finalize_grid(self, *axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n if not self._finalized:\n self.set_axis_labels(*axlabels)\n self.set_titles()\n self.fig.tight_layout()\n\n for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):\n if namedict is None:\n ax.set_visible(False)\n\n self._finalized = True"},{"col":4,"comment":"Set axis labels on the left column and bottom row of the grid.","endLoc":433,"header":"def set_axis_labels(self, x_var=None, y_var=None)","id":1302,"name":"set_axis_labels","nodeType":"Function","startLoc":417,"text":"def set_axis_labels(self, x_var=None, y_var=None):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n if x_var in self.data.coords:\n self._x_var = x_var\n self.set_xlabels(label_from_attrs(self.data[x_var]))\n else:\n # x_var is a string\n self.set_xlabels(x_var)\n\n if y_var is not None:\n if y_var in self.data.coords:\n self._y_var = y_var\n self.set_ylabels(label_from_attrs(self.data[y_var]))\n else:\n self.set_ylabels(y_var)\n return self"},{"col":4,"comment":"Label the x axis on the bottom row of the grid.","endLoc":441,"header":"def set_xlabels(self, label=None, **kwargs)","id":1303,"name":"set_xlabels","nodeType":"Function","startLoc":435,"text":"def set_xlabels(self, label=None, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = label_from_attrs(self.data[self._x_var])\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n return self"},{"col":4,"comment":"Broadcast this DataArray against another Dataset or DataArray.\n\n This is equivalent to xr.broadcast(other, self)[1]\n\n xarray objects are broadcast against each other in arithmetic\n operations, so this method is not be necessary for most uses.\n\n If no change is needed, the input data is returned to the output\n without being copied.\n\n If new coords are added by the broadcast, their values are\n NaN filled.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n Returns\n -------\n new_da: xr.DataArray\n\n Examples\n --------\n\n >>> arr1\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115]])\n Coordinates:\n * x (x) >> arr2\n \n array([[0.612611, 0.125753],\n [0.853181, 0.948818],\n [0.180885, 0.33363 ]])\n Coordinates:\n * x (x) >> arr1.broadcast_like(arr2)\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115],\n [ nan, nan, nan]])\n Coordinates:\n * x (x) object 'a' 'b' 'c'\n * y (y) object 'a' 'b' 'c'\n ","endLoc":1164,"header":"def broadcast_like(\n self, other: Union[\"DataArray\", Dataset], exclude: Iterable[Hashable] = None\n ) -> \"DataArray\"","id":1304,"name":"broadcast_like","nodeType":"Function","startLoc":1102,"text":"def broadcast_like(\n self, other: Union[\"DataArray\", Dataset], exclude: Iterable[Hashable] = None\n ) -> \"DataArray\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n\n This is equivalent to xr.broadcast(other, self)[1]\n\n xarray objects are broadcast against each other in arithmetic\n operations, so this method is not be necessary for most uses.\n\n If no change is needed, the input data is returned to the output\n without being copied.\n\n If new coords are added by the broadcast, their values are\n NaN filled.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n Returns\n -------\n new_da: xr.DataArray\n\n Examples\n --------\n\n >>> arr1\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115]])\n Coordinates:\n * x (x) >> arr2\n \n array([[0.612611, 0.125753],\n [0.853181, 0.948818],\n [0.180885, 0.33363 ]])\n Coordinates:\n * x (x) >> arr1.broadcast_like(arr2)\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115],\n [ nan, nan, nan]])\n Coordinates:\n * x (x) object 'a' 'b' 'c'\n * y (y) object 'a' 'b' 'c'\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)"},{"col":0,"comment":" Finalize this store by explicitly syncing and closing","endLoc":232,"header":"def _finalize_store(write, store)","id":1305,"name":"_finalize_store","nodeType":"Function","startLoc":229,"text":"def _finalize_store(write, store):\n \"\"\" Finalize this store by explicitly syncing and closing\"\"\"\n del write # ensure writing is done first\n store.close()"},{"col":4,"comment":"null","endLoc":1620,"header":"def __repr__(self) -> str","id":1306,"name":"__repr__","nodeType":"Function","startLoc":1619,"text":"def __repr__(self) -> str:\n return formatting.dataset_repr(self)"},{"col":4,"comment":"\n Concise summary of a Dataset variables and attributes.\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n\n See Also\n --------\n pandas.DataFrame.assign\n ncdump: netCDF's ncdump\n ","endLoc":1656,"header":"def info(self, buf=None) -> None","id":1307,"name":"info","nodeType":"Function","startLoc":1622,"text":"def info(self, buf=None) -> None:\n \"\"\"\n Concise summary of a Dataset variables and attributes.\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n\n See Also\n --------\n pandas.DataFrame.assign\n ncdump: netCDF's ncdump\n \"\"\"\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n lines.append(\"xarray.Dataset {\")\n lines.append(\"dimensions:\")\n for name, size in self.dims.items():\n lines.append(\"\\t{name} = {size} ;\".format(name=name, size=size))\n lines.append(\"\\nvariables:\")\n for name, da in self.variables.items():\n dims = \", \".join(da.dims)\n lines.append(\n \"\\t{type} {name}({dims}) ;\".format(type=da.dtype, name=name, dims=dims)\n )\n for k, v in da.attrs.items():\n lines.append(\"\\t\\t{name}:{k} = {v} ;\".format(name=name, k=k, v=v))\n lines.append(\"\\n// global attributes:\")\n for k, v in self.attrs.items():\n lines.append(\"\\t:{k} = {v} ;\".format(k=k, v=v))\n lines.append(\"}\")\n\n buf.write(\"\\n\".join(lines))"},{"col":4,"comment":"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found on this\n data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but coordinates from\n the other object.\n\n See Also\n --------\n DataArray.reindex\n align\n ","endLoc":1224,"header":"def reindex_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n ) -> \"DataArray\"","id":1308,"name":"reindex_like","nodeType":"Function","startLoc":1166,"text":"def reindex_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n ) -> \"DataArray\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found on this\n data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but coordinates from\n the other object.\n\n See Also\n --------\n DataArray.reindex\n align\n \"\"\"\n indexers = reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n tolerance=tolerance,\n copy=copy,\n fill_value=fill_value,\n )"},{"col":4,"comment":"Label the y axis on the left column of the grid.","endLoc":449,"header":"def set_ylabels(self, label=None, **kwargs)","id":1309,"name":"set_ylabels","nodeType":"Function","startLoc":443,"text":"def set_ylabels(self, label=None, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = label_from_attrs(self.data[self._y_var])\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n return self"},{"col":4,"comment":"null","endLoc":105,"header":"def time_write_dataset_scipy(self)","id":1310,"name":"time_write_dataset_scipy","nodeType":"Function","startLoc":104,"text":"def time_write_dataset_scipy(self):\n self.ds.to_netcdf(\"test_scipy_write.nc\", engine=\"scipy\", format=self.format)"},{"col":0,"comment":"Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.\n ","endLoc":1521,"header":"def get_versions(verbose=False)","id":1311,"name":"get_versions","nodeType":"Function","startLoc":1443,"text":"def get_versions(verbose=False):\n \"\"\"Get the project version from whatever source is available.\n\n Returns dict with two keys: 'version' and 'full'.\n \"\"\"\n if \"versioneer\" in sys.modules:\n # see the discussion in cmdclass.py:get_cmdclass()\n del sys.modules[\"versioneer\"]\n\n root = get_root()\n cfg = get_config_from_root(root)\n\n assert cfg.VCS is not None, \"please set [versioneer]VCS= in setup.cfg\"\n handlers = HANDLERS.get(cfg.VCS)\n assert handlers, \"unrecognized VCS '%s'\" % cfg.VCS\n verbose = verbose or cfg.verbose\n assert (\n cfg.versionfile_source is not None\n ), \"please set versioneer.versionfile_source\"\n assert cfg.tag_prefix is not None, \"please set versioneer.tag_prefix\"\n\n versionfile_abs = os.path.join(root, cfg.versionfile_source)\n\n # extract version from first of: _version.py, VCS command (e.g. 'git\n # describe'), parentdir. This is meant to work for developers using a\n # source checkout, for users of a tarball created by 'setup.py sdist',\n # and for users of a tarball/zipball created by 'git archive' or github's\n # download-from-tag feature or the equivalent in other VCSes.\n\n get_keywords_f = handlers.get(\"get_keywords\")\n from_keywords_f = handlers.get(\"keywords\")\n if get_keywords_f and from_keywords_f:\n try:\n keywords = get_keywords_f(versionfile_abs)\n ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)\n if verbose:\n print(\"got version from expanded keyword %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n ver = versions_from_file(versionfile_abs)\n if verbose:\n print(\"got version from file %s %s\" % (versionfile_abs, ver))\n return ver\n except NotThisMethod:\n pass\n\n from_vcs_f = handlers.get(\"pieces_from_vcs\")\n if from_vcs_f:\n try:\n pieces = from_vcs_f(cfg.tag_prefix, root, verbose)\n ver = render(pieces, cfg.style)\n if verbose:\n print(\"got version from VCS %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n if verbose:\n print(\"got version from parentdir %s\" % ver)\n return ver\n except NotThisMethod:\n pass\n\n if verbose:\n print(\"unable to compute version\")\n\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\",\n \"date\": None,\n }"},{"attributeType":"null","col":8,"comment":"null","endLoc":98,"id":1312,"name":"format","nodeType":"Attribute","startLoc":98,"text":"self.format"},{"col":4,"comment":"\n Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for plot titles containing {coord} and {value}\n maxchar : int\n Truncate titles at maxchar\n kwargs : keyword args\n additional arguments to matplotlib.text\n\n Returns\n -------\n self: FacetGrid object\n\n ","endLoc":502,"header":"def set_titles(self, template=\"{coord} = {value}\", maxchar=30, size=None, **kwargs)","id":1313,"name":"set_titles","nodeType":"Function","startLoc":451,"text":"def set_titles(self, template=\"{coord} = {value}\", maxchar=30, size=None, **kwargs):\n \"\"\"\n Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for plot titles containing {coord} and {value}\n maxchar : int\n Truncate titles at maxchar\n kwargs : keyword args\n additional arguments to matplotlib.text\n\n Returns\n -------\n self: FacetGrid object\n\n \"\"\"\n import matplotlib as mpl\n\n if size is None:\n size = mpl.rcParams[\"axes.labelsize\"]\n\n nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template)\n\n if self._single_group:\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # Only label the ones with data\n if d is not None:\n coord, value = list(d.items()).pop()\n title = nicetitle(coord, value, maxchar=maxchar)\n ax.set_title(title, size=size, **kwargs)\n else:\n # The row titles on the right edge of the grid\n for ax, row_name in zip(self.axes[:, -1], self.row_names):\n title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar)\n ax.annotate(\n title,\n xy=(1.02, 0.5),\n xycoords=\"axes fraction\",\n rotation=270,\n ha=\"left\",\n va=\"center\",\n **kwargs\n )\n\n # The column titles on the top row\n for ax, col_name in zip(self.axes[0, :], self.col_names):\n title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar)\n ax.set_title(title, size=size, **kwargs)\n\n return self"},{"className":"IOReadSingleNetCDF4","col":0,"comment":"null","endLoc":126,"id":1314,"nodeType":"Class","startLoc":108,"text":"class IOReadSingleNetCDF4(IOSingleNetCDF):\n def setup(self):\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc4.nc\"\n self.format = \"NETCDF4\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_netcdf4(self):\n xr.open_dataset(self.filepath, engine=\"netcdf4\").load()\n\n def time_orthogonal_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\")\n ds = ds.isel(**self.oinds).load()\n\n def time_vectorized_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\")\n ds = ds.isel(**self.vinds).load()"},{"col":4,"comment":"null","endLoc":115,"header":"def setup(self)","id":1315,"name":"setup","nodeType":"Function","startLoc":109,"text":"def setup(self):\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc4.nc\"\n self.format = \"NETCDF4\"\n self.ds.to_netcdf(self.filepath, format=self.format)"},{"col":4,"comment":"Block dimensions for this dataset's data or None if it's not a dask\n array.\n ","endLoc":1673,"header":"@property\n def chunks(self) -> Mapping[Hashable, Tuple[int, ...]]","id":1316,"name":"chunks","nodeType":"Function","startLoc":1658,"text":"@property\n def chunks(self) -> Mapping[Hashable, Tuple[int, ...]]:\n \"\"\"Block dimensions for this dataset's data or None if it's not a dask\n array.\n \"\"\"\n chunks: Dict[Hashable, Tuple[int, ...]] = {}\n for v in self.variables.values():\n if v.chunks is not None:\n for dim, c in zip(v.dims, v.chunks):\n if dim in chunks and c != chunks[dim]:\n raise ValueError(\n f\"Object has inconsistent chunks along dimension {dim}. \"\n \"This can be fixed by calling unify_chunks().\"\n )\n chunks[dim] = c\n return Frozen(SortedKeysDict(chunks))"},{"col":4,"comment":"null","endLoc":118,"header":"def time_load_dataset_netcdf4(self)","id":1317,"name":"time_load_dataset_netcdf4","nodeType":"Function","startLoc":117,"text":"def time_load_dataset_netcdf4(self):\n xr.open_dataset(self.filepath, engine=\"netcdf4\").load()"},{"col":4,"comment":"Variant of _validate_indexers to be used for interpolation\n ","endLoc":1808,"header":"def _validate_interp_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Variable]]","id":1318,"name":"_validate_interp_indexers","nodeType":"Function","startLoc":1787,"text":"def _validate_interp_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Variable]]:\n \"\"\"Variant of _validate_indexers to be used for interpolation\n \"\"\"\n for k, v in self._validate_indexers(indexers):\n if isinstance(v, Variable):\n if v.ndim == 1:\n yield k, v.to_index_variable()\n else:\n yield k, v\n elif isinstance(v, int):\n yield k, Variable((), v)\n elif isinstance(v, np.ndarray):\n if v.ndim == 0:\n yield k, Variable((), v)\n elif v.ndim == 1:\n yield k, IndexVariable((k,), v)\n else:\n raise AssertionError() # Already tested by _validate_indexers\n else:\n raise TypeError(type(v))"},{"col":4,"comment":"null","endLoc":122,"header":"def time_orthogonal_indexing(self)","id":1319,"name":"time_orthogonal_indexing","nodeType":"Function","startLoc":120,"text":"def time_orthogonal_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\")\n ds = ds.isel(**self.oinds).load()"},{"col":4,"comment":"Extract coordinates and indexes from indexers.\n\n Only coordinate with a name different from any of self.variables will\n be attached.\n ","endLoc":1846,"header":"def _get_indexers_coords_and_indexes(self, indexers)","id":1320,"name":"_get_indexers_coords_and_indexes","nodeType":"Function","startLoc":1810,"text":"def _get_indexers_coords_and_indexes(self, indexers):\n \"\"\"Extract coordinates and indexes from indexers.\n\n Only coordinate with a name different from any of self.variables will\n be attached.\n \"\"\"\n from .dataarray import DataArray\n\n coords_list = []\n for k, v in indexers.items():\n if isinstance(v, DataArray):\n if v.dtype.kind == \"b\":\n if v.ndim != 1: # we only support 1-d boolean array\n raise ValueError(\n \"{:d}d-boolean array is used for indexing along \"\n \"dimension {!r}, but only 1d boolean arrays are \"\n \"supported.\".format(v.ndim, k)\n )\n # Make sure in case of boolean DataArray, its\n # coordinate also should be indexed.\n v_coords = v[v.values.nonzero()[0]].coords\n else:\n v_coords = v.coords\n coords_list.append(v_coords)\n\n # we don't need to call align() explicitly or check indexes for\n # alignment, because merge_variables already checks for exact alignment\n # between dimension coordinates\n coords, indexes = merge_coordinates_without_align(coords_list)\n assert_coordinate_consistent(self, coords)\n\n # silently drop the conflicted variables.\n attached_coords = {k: v for k, v in coords.items() if k not in self._variables}\n attached_indexes = {\n k: v for k, v in indexes.items() if k not in self._variables\n }\n return attached_coords, attached_indexes"},{"col":4,"comment":"Draw a colorbar\n ","endLoc":415,"header":"def add_colorbar(self, **kwargs)","id":1321,"name":"add_colorbar","nodeType":"Function","startLoc":404,"text":"def add_colorbar(self, **kwargs):\n \"\"\"Draw a colorbar\n \"\"\"\n kwargs = kwargs.copy()\n if self._cmap_extend is not None:\n kwargs.setdefault(\"extend\", self._cmap_extend)\n if \"label\" not in kwargs:\n kwargs.setdefault(\"label\", label_from_attrs(self.data))\n self.cbar = self.fig.colorbar(\n self._mappables[-1], ax=list(self.axes.flat), **kwargs\n )\n return self"},{"col":0,"comment":"Extract indexers to align target with other.\n\n Not public API.\n\n Parameters\n ----------\n target : Dataset or DataArray\n Object to be aligned.\n other : Dataset or DataArray\n Object to be aligned with.\n\n Returns\n -------\n Dict[Hashable, pandas.Index] providing indexes for reindex keyword\n arguments.\n\n Raises\n ------\n ValueError\n If any dimensions without labels have different sizes.\n ","endLoc":459,"header":"def reindex_like_indexers(\n target: \"Union[DataArray, Dataset]\", other: \"Union[DataArray, Dataset]\"\n) -> Dict[Hashable, pd.Index]","id":1322,"name":"reindex_like_indexers","nodeType":"Function","startLoc":423,"text":"def reindex_like_indexers(\n target: \"Union[DataArray, Dataset]\", other: \"Union[DataArray, Dataset]\"\n) -> Dict[Hashable, pd.Index]:\n \"\"\"Extract indexers to align target with other.\n\n Not public API.\n\n Parameters\n ----------\n target : Dataset or DataArray\n Object to be aligned.\n other : Dataset or DataArray\n Object to be aligned with.\n\n Returns\n -------\n Dict[Hashable, pandas.Index] providing indexes for reindex keyword\n arguments.\n\n Raises\n ------\n ValueError\n If any dimensions without labels have different sizes.\n \"\"\"\n indexers = {k: v for k, v in other.indexes.items() if k in target.dims}\n\n for dim in other.dims:\n if dim not in indexers and dim in target.dims:\n other_size = other.sizes[dim]\n target_size = target.sizes[dim]\n if other_size != target_size:\n raise ValueError(\n \"different size for unlabeled \"\n \"dimension on argument %r: %r vs %r\"\n % (dim, other_size, target_size)\n )\n return indexers"},{"attributeType":"DataArray","col":8,"comment":"null","endLoc":24,"id":1323,"name":"ds","nodeType":"Attribute","startLoc":24,"text":"self.ds"},{"col":0,"comment":"Get the short version string for this project.","endLoc":1526,"header":"def get_version()","id":1324,"name":"get_version","nodeType":"Function","startLoc":1524,"text":"def get_version():\n \"\"\"Get the short version string for this project.\"\"\"\n return get_versions()[\"version\"]"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":1325,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":3,"id":1326,"name":"xr","nodeType":"Attribute","startLoc":3,"text":"xr"},{"col":0,"comment":"Get the custom setuptools/distutils subclasses used by Versioneer.","endLoc":1708,"header":"def get_cmdclass()","id":1327,"name":"get_cmdclass","nodeType":"Function","startLoc":1529,"text":"def get_cmdclass():\n \"\"\"Get the custom setuptools/distutils subclasses used by Versioneer.\"\"\"\n if \"versioneer\" in sys.modules:\n del sys.modules[\"versioneer\"]\n # this fixes the \"python setup.py develop\" case (also 'install' and\n # 'easy_install .'), in which subdependencies of the main project are\n # built (using setup.py bdist_egg) in the same python process. Assume\n # a main project A and a dependency B, which use different versions\n # of Versioneer. A's setup.py imports A's Versioneer, leaving it in\n # sys.modules by the time B's setup.py is executed, causing B to run\n # with the wrong versioneer. Setuptools wraps the sub-dep builds in a\n # sandbox that restores sys.modules to it's pre-build state, so the\n # parent is protected against the child's \"import versioneer\". By\n # removing ourselves from sys.modules here, before the child build\n # happens, we protect the child from the parent's versioneer too.\n # Also see https://github.com/warner/python-versioneer/issues/52\n\n cmds = {}\n\n # we add \"version\" to both distutils and setuptools\n from distutils.core import Command\n\n class cmd_version(Command):\n description = \"report generated version string\"\n user_options = []\n boolean_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n vers = get_versions(verbose=True)\n print(\"Version: %s\" % vers[\"version\"])\n print(\" full-revisionid: %s\" % vers.get(\"full-revisionid\"))\n print(\" dirty: %s\" % vers.get(\"dirty\"))\n print(\" date: %s\" % vers.get(\"date\"))\n if vers[\"error\"]:\n print(\" error: %s\" % vers[\"error\"])\n\n cmds[\"version\"] = cmd_version\n\n # we override \"build_py\" in both distutils and setuptools\n #\n # most invocation pathways end up running build_py:\n # distutils/build -> build_py\n # distutils/install -> distutils/build ->..\n # setuptools/bdist_wheel -> distutils/install ->..\n # setuptools/bdist_egg -> distutils/install_lib -> build_py\n # setuptools/install -> bdist_egg ->..\n # setuptools/develop -> ?\n # pip install:\n # copies source tree to a tempdir before running egg_info/etc\n # if .git isn't copied too, 'git describe' will fail\n # then does setup.py bdist_wheel, or sometimes setup.py install\n # setup.py egg_info -> ?\n\n # we override different \"build_py\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.build_py import build_py as _build_py\n else:\n from distutils.command.build_py import build_py as _build_py\n\n class cmd_build_py(_build_py):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n _build_py.run(self)\n # now locate _version.py in the new build/ directory and replace\n # it with an updated value\n if cfg.versionfile_build:\n target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n cmds[\"build_py\"] = cmd_build_py\n\n if \"cx_Freeze\" in sys.modules: # cx_freeze enabled?\n from cx_Freeze.dist import build_exe as _build_exe\n\n # nczeczulin reports that py2exe won't like the pep440-style string\n # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.\n # setup(console=[{\n # \"version\": versioneer.get_version().split(\"+\", 1)[0], # FILEVERSION\n # \"product_version\": versioneer.get_version(),\n # ...\n\n class cmd_build_exe(_build_exe):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n target_versionfile = cfg.versionfile_source\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n _build_exe.run(self)\n os.unlink(target_versionfile)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(\n LONG\n % {\n \"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n }\n )\n\n cmds[\"build_exe\"] = cmd_build_exe\n del cmds[\"build_py\"]\n\n if \"py2exe\" in sys.modules: # py2exe enabled?\n try:\n from py2exe.distutils_buildexe import py2exe as _py2exe # py3\n except ImportError:\n from py2exe.build_exe import py2exe as _py2exe # py2\n\n class cmd_py2exe(_py2exe):\n def run(self):\n root = get_root()\n cfg = get_config_from_root(root)\n versions = get_versions()\n target_versionfile = cfg.versionfile_source\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(target_versionfile, versions)\n\n _py2exe.run(self)\n os.unlink(target_versionfile)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(\n LONG\n % {\n \"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n }\n )\n\n cmds[\"py2exe\"] = cmd_py2exe\n\n # we override different \"sdist\" commands for both environments\n if \"setuptools\" in sys.modules:\n from setuptools.command.sdist import sdist as _sdist\n else:\n from distutils.command.sdist import sdist as _sdist\n\n class cmd_sdist(_sdist):\n def run(self):\n versions = get_versions()\n self._versioneer_generated_versions = versions\n # unless we update this, the command will keep using the old\n # version\n self.distribution.metadata.version = versions[\"version\"]\n return _sdist.run(self)\n\n def make_release_tree(self, base_dir, files):\n root = get_root()\n cfg = get_config_from_root(root)\n _sdist.make_release_tree(self, base_dir, files)\n # now locate _version.py in the new base_dir directory\n # (remembering that it may be a hardlink) and replace it with an\n # updated value\n target_versionfile = os.path.join(base_dir, cfg.versionfile_source)\n print(\"UPDATING %s\" % target_versionfile)\n write_to_version_file(\n target_versionfile, self._versioneer_generated_versions\n )\n\n cmds[\"sdist\"] = cmd_sdist\n\n return cmds"},{"col":4,"comment":"null","endLoc":126,"header":"def time_vectorized_indexing(self)","id":1328,"name":"time_vectorized_indexing","nodeType":"Function","startLoc":124,"text":"def time_vectorized_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\")\n ds = ds.isel(**self.vinds).load()"},{"id":1329,"name":"README.rst","nodeType":"TextFile","path":"doc","text":"xarray\n------\n\nYou can find information about building the docs at our `Contributing page `_.\n"},{"id":1330,"name":"licenses","nodeType":"Package"},{"id":1331,"name":"PANDAS_LICENSE","nodeType":"TextFile","path":"licenses","text":"pandas license\n==============\n\nCopyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team\nAll rights reserved.\n\nCopyright (c) 2008-2011 AQR Capital Management, LLC\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n\n * Neither the name of the copyright holder nor the names of any\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"},{"id":1332,"name":"DASK_LICENSE","nodeType":"TextFile","path":"licenses","text":":py:meth:`~xarray.DataArray.isin`Copyright (c) 2014-2018, Anaconda, Inc. and contributors\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\nRedistributions of source code must retain the above copyright notice,\nthis list of conditions and the following disclaimer.\n\nRedistributions in binary form must reproduce the above copyright notice,\nthis list of conditions and the following disclaimer in the documentation\nand/or other materials provided with the distribution.\n\nNeither the name of Anaconda nor the names of any contributors may be used to\nendorse or promote products derived from this software without specific prior\nwritten permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF\nTHE POSSIBILITY OF SUCH DAMAGE.\n"},{"col":4,"comment":"null","endLoc":312,"header":"def map_dataarray_line(\n self, func, x, y, hue, add_legend=True, _labels=None, **kwargs\n )","id":1333,"name":"map_dataarray_line","nodeType":"Function","startLoc":280,"text":"def map_dataarray_line(\n self, func, x, y, hue, add_legend=True, _labels=None, **kwargs\n ):\n from .plot import _infer_line_data\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n mappable = func(\n subset,\n x=x,\n y=y,\n ax=ax,\n hue=hue,\n add_legend=False,\n _labels=False,\n **kwargs\n )\n self._mappables.append(mappable)\n\n _, _, hueplt, xlabel, ylabel, huelabel = _infer_line_data(\n darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue\n )\n\n self._hue_var = hueplt\n self._hue_label = huelabel\n self._finalize_grid(xlabel, ylabel)\n\n if add_legend and hueplt is not None and huelabel is not None:\n self.add_legend()\n\n return self"},{"fileName":"print_versions.py","filePath":"xarray/util","id":1334,"nodeType":"File","text":"\"\"\"Utility functions for printing version information.\"\"\"\nimport importlib\nimport locale\nimport os\nimport platform\nimport struct\nimport subprocess\nimport sys\n\n\ndef get_sys_info():\n \"Returns system information as a dict\"\n\n blob = []\n\n # get full commit hash\n commit = None\n if os.path.isdir(\".git\") and os.path.isdir(\"xarray\"):\n try:\n pipe = subprocess.Popen(\n 'git log --format=\"%H\" -n 1'.split(\" \"),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n so, serr = pipe.communicate()\n except Exception:\n pass\n else:\n if pipe.returncode == 0:\n commit = so\n try:\n commit = so.decode(\"utf-8\")\n except ValueError:\n pass\n commit = commit.strip().strip('\"')\n\n blob.append((\"commit\", commit))\n\n try:\n (sysname, nodename, release, version, machine, processor) = platform.uname()\n blob.extend(\n [\n (\"python\", sys.version),\n (\"python-bits\", struct.calcsize(\"P\") * 8),\n (\"OS\", \"%s\" % (sysname)),\n (\"OS-release\", \"%s\" % (release)),\n # (\"Version\", \"%s\" % (version)),\n (\"machine\", \"%s\" % (machine)),\n (\"processor\", \"%s\" % (processor)),\n (\"byteorder\", \"%s\" % sys.byteorder),\n (\"LC_ALL\", \"%s\" % os.environ.get(\"LC_ALL\", \"None\")),\n (\"LANG\", \"%s\" % os.environ.get(\"LANG\", \"None\")),\n (\"LOCALE\", \"%s.%s\" % locale.getlocale()),\n ]\n )\n except Exception:\n pass\n\n return blob\n\n\ndef netcdf_and_hdf5_versions():\n libhdf5_version = None\n libnetcdf_version = None\n try:\n import netCDF4\n\n libhdf5_version = netCDF4.__hdf5libversion__\n libnetcdf_version = netCDF4.__netcdf4libversion__\n except ImportError:\n try:\n import h5py\n\n libhdf5_version = h5py.version.hdf5_version\n except ImportError:\n pass\n return [(\"libhdf5\", libhdf5_version), (\"libnetcdf\", libnetcdf_version)]\n\n\ndef show_versions(file=sys.stdout):\n sys_info = get_sys_info()\n\n try:\n sys_info.extend(netcdf_and_hdf5_versions())\n except Exception as e:\n print(\"Error collecting netcdf / hdf5 version: {}\".format(e))\n\n deps = [\n # (MODULE_NAME, f(mod) -> mod version)\n (\"xarray\", lambda mod: mod.__version__),\n (\"pandas\", lambda mod: mod.__version__),\n (\"numpy\", lambda mod: mod.__version__),\n (\"scipy\", lambda mod: mod.__version__),\n # xarray optionals\n (\"netCDF4\", lambda mod: mod.__version__),\n (\"pydap\", lambda mod: mod.__version__),\n (\"h5netcdf\", lambda mod: mod.__version__),\n (\"h5py\", lambda mod: mod.__version__),\n (\"Nio\", lambda mod: mod.__version__),\n (\"zarr\", lambda mod: mod.__version__),\n (\"cftime\", lambda mod: mod.__version__),\n (\"nc_time_axis\", lambda mod: mod.__version__),\n (\"PseudoNetCDF\", lambda mod: mod.__version__),\n (\"rasterio\", lambda mod: mod.__version__),\n (\"cfgrib\", lambda mod: mod.__version__),\n (\"iris\", lambda mod: mod.__version__),\n (\"bottleneck\", lambda mod: mod.__version__),\n (\"dask\", lambda mod: mod.__version__),\n (\"distributed\", lambda mod: mod.__version__),\n (\"matplotlib\", lambda mod: mod.__version__),\n (\"cartopy\", lambda mod: mod.__version__),\n (\"seaborn\", lambda mod: mod.__version__),\n (\"numbagg\", lambda mod: mod.__version__),\n # xarray setup/test\n (\"setuptools\", lambda mod: mod.__version__),\n (\"pip\", lambda mod: mod.__version__),\n (\"conda\", lambda mod: mod.__version__),\n (\"pytest\", lambda mod: mod.__version__),\n # Misc.\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n ]\n\n deps_blob = list()\n for (modname, ver_f) in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n except Exception:\n deps_blob.append((modname, None))\n else:\n try:\n ver = ver_f(mod)\n deps_blob.append((modname, ver))\n except Exception:\n deps_blob.append((modname, \"installed\"))\n\n print(\"\\nINSTALLED VERSIONS\", file=file)\n print(\"------------------\", file=file)\n\n for k, stat in sys_info:\n print(\"%s: %s\" % (k, stat), file=file)\n\n print(\"\", file=file)\n for k, stat in deps_blob:\n print(\"%s: %s\" % (k, stat), file=file)\n\n\nif __name__ == \"__main__\":\n show_versions()\n"},{"col":4,"comment":"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict, optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found on\n this data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.reindex_like\n align\n ","endLoc":1288,"header":"def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n **indexers_kwargs: Any\n ) -> \"DataArray\"","id":1335,"name":"reindex","nodeType":"Function","startLoc":1226,"text":"def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict, optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found on\n this data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.reindex_like\n align\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n ds = self._to_temp_dataset().reindex(\n indexers=indexers,\n method=method,\n tolerance=tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"Broadcast this DataArray against another Dataset or DataArray.\n This is equivalent to xr.broadcast(other, self)[1]\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n ","endLoc":2174,"header":"def broadcast_like(\n self, other: Union[\"Dataset\", \"DataArray\"], exclude: Iterable[Hashable] = None\n ) -> \"Dataset\"","id":1336,"name":"broadcast_like","nodeType":"Function","startLoc":2152,"text":"def broadcast_like(\n self, other: Union[\"Dataset\", \"DataArray\"], exclude: Iterable[Hashable] = None\n ) -> \"Dataset\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n This is equivalent to xr.broadcast(other, self)[1]\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)"},{"col":0,"comment":"Returns system information as a dict","endLoc":59,"header":"def get_sys_info()","id":1337,"name":"get_sys_info","nodeType":"Function","startLoc":11,"text":"def get_sys_info():\n \"Returns system information as a dict\"\n\n blob = []\n\n # get full commit hash\n commit = None\n if os.path.isdir(\".git\") and os.path.isdir(\"xarray\"):\n try:\n pipe = subprocess.Popen(\n 'git log --format=\"%H\" -n 1'.split(\" \"),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n so, serr = pipe.communicate()\n except Exception:\n pass\n else:\n if pipe.returncode == 0:\n commit = so\n try:\n commit = so.decode(\"utf-8\")\n except ValueError:\n pass\n commit = commit.strip().strip('\"')\n\n blob.append((\"commit\", commit))\n\n try:\n (sysname, nodename, release, version, machine, processor) = platform.uname()\n blob.extend(\n [\n (\"python\", sys.version),\n (\"python-bits\", struct.calcsize(\"P\") * 8),\n (\"OS\", \"%s\" % (sysname)),\n (\"OS-release\", \"%s\" % (release)),\n # (\"Version\", \"%s\" % (version)),\n (\"machine\", \"%s\" % (machine)),\n (\"processor\", \"%s\" % (processor)),\n (\"byteorder\", \"%s\" % sys.byteorder),\n (\"LC_ALL\", \"%s\" % os.environ.get(\"LC_ALL\", \"None\")),\n (\"LANG\", \"%s\" % os.environ.get(\"LANG\", \"None\")),\n (\"LOCALE\", \"%s.%s\" % locale.getlocale()),\n ]\n )\n except Exception:\n pass\n\n return blob"},{"attributeType":"null","col":8,"comment":"null","endLoc":113,"id":1338,"name":"filepath","nodeType":"Attribute","startLoc":113,"text":"self.filepath"},{"col":0,"comment":"null","endLoc":115,"header":"def _infer_line_data(darray, x, y, hue)","id":1339,"name":"_infer_line_data","nodeType":"Function","startLoc":33,"text":"def _infer_line_data(darray, x, y, hue):\n error_msg = \"must be either None or one of ({:s})\".format(\n \", \".join([repr(dd) for dd in darray.dims])\n )\n ndims = len(darray.dims)\n\n if x is not None and x not in darray.dims and x not in darray.coords:\n raise ValueError(\"x \" + error_msg)\n\n if y is not None and y not in darray.dims and y not in darray.coords:\n raise ValueError(\"y \" + error_msg)\n\n if x is not None and y is not None:\n raise ValueError(\"You cannot specify both x and y kwargs\" \"for line plots.\")\n\n if ndims == 1:\n huename = None\n hueplt = None\n huelabel = \"\"\n\n if x is not None:\n xplt = darray[x]\n yplt = darray\n\n elif y is not None:\n xplt = darray\n yplt = darray[y]\n\n else: # Both x & y are None\n dim = darray.dims[0]\n xplt = darray[dim]\n yplt = darray\n\n else:\n if x is None and y is None and hue is None:\n raise ValueError(\"For 2D inputs, please\" \"specify either hue, x or y.\")\n\n if y is None:\n xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue)\n xplt = darray[xname]\n if xplt.ndim > 1:\n if huename in darray.dims:\n otherindex = 1 if darray.dims.index(huename) == 0 else 0\n otherdim = darray.dims[otherindex]\n yplt = darray.transpose(otherdim, huename, transpose_coords=False)\n xplt = xplt.transpose(otherdim, huename, transpose_coords=False)\n else:\n raise ValueError(\n \"For 2D inputs, hue must be a dimension\"\n \" i.e. one of \" + repr(darray.dims)\n )\n\n else:\n xdim, = darray[xname].dims\n huedim, = darray[huename].dims\n yplt = darray.transpose(xdim, huedim)\n\n else:\n yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue)\n yplt = darray[yname]\n if yplt.ndim > 1:\n if huename in darray.dims:\n otherindex = 1 if darray.dims.index(huename) == 0 else 0\n otherdim = darray.dims[otherindex]\n xplt = darray.transpose(otherdim, huename, transpose_coords=False)\n else:\n raise ValueError(\n \"For 2D inputs, hue must be a dimension\"\n \" i.e. one of \" + repr(darray.dims)\n )\n\n else:\n ydim, = darray[yname].dims\n huedim, = darray[huename].dims\n xplt = darray.transpose(ydim, huedim)\n\n huelabel = label_from_attrs(darray[huename])\n hueplt = darray[huename]\n\n xlabel = label_from_attrs(xplt)\n ylabel = label_from_attrs(yplt)\n\n return xplt, yplt, hueplt, xlabel, ylabel, huelabel"},{"col":4,"comment":"Conform this object onto a new set of indexes, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict. optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n Keyword arguments in the same form as ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.reindex_like\n align\n pandas.Index.get_indexer\n\n Examples\n --------\n\n Create a dataset with some fictional data.\n\n >>> import xarray as xr\n >>> import pandas as pd\n >>> x = xr.Dataset(\n ... {\n ... \"temperature\": (\"station\", 20 * np.random.rand(4)),\n ... \"pressure\": (\"station\", 500 * np.random.rand(4))\n ... },\n ... coords={\"station\": [\"boston\", \"nyc\", \"seattle\", \"denver\"]})\n >>> x\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) >> x.indexes\n station: Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')\n\n Create a new index and reindex the dataset. By default values in the new index that\n do not have corresponding records in the dataset are assigned `NaN`.\n\n >>> new_index = ['boston', 'austin', 'seattle', 'lincoln']\n >>> x.reindex({'station': new_index})\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 nan 19.22 nan\n pressure (station) float64 324.1 nan 122.8 nan\n\n We can fill in the missing values by passing a value to the keyword `fill_value`.\n\n >>> x.reindex({'station': new_index}, fill_value=0)\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 0.0 19.22 0.0\n pressure (station) float64 324.1 0.0 122.8 0.0\n\n Because the index is not monotonically increasing or decreasing, we cannot use arguments\n to the keyword method to fill the `NaN` values.\n\n >>> x.reindex({'station': new_index}, method='nearest')\n Traceback (most recent call last):\n ...\n raise ValueError('index must be monotonic increasing or decreasing')\n ValueError: index must be monotonic increasing or decreasing\n\n To further illustrate the filling functionality in reindex, we will create a\n dataset with a monotonically increasing index (for example, a sequence of dates).\n\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": (\"time\", [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12]),\n ... \"pressure\": (\"time\", 500 * np.random.rand(6))\n ... },\n ... coords={\"time\": pd.date_range('01/01/2019', periods=6, freq='D')})\n >>> x2\n \n Dimensions: (time: 6)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06\n Data variables:\n temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12\n pressure (time) float64 103.4 122.7 452.0 444.0 399.2 486.0\n\n Suppose we decide to expand the dataset to cover a wider date range.\n\n >>> time_index2 = pd.date_range('12/29/2018', periods=10, freq='D')\n >>> x2.reindex({'time': time_index2})\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan\n pressure (time) float64 nan nan nan 103.4 ... 444.0 399.2 486.0 nan\n\n The index entries that did not have a value in the original data frame (for example, `2018-12-29`)\n are by default filled with NaN. If desired, we can fill in the missing values using one of several options.\n\n For example, to back-propagate the last valid value to fill the `NaN` values,\n pass `bfill` as an argument to the `method` keyword.\n\n >>> x3 = x2.reindex({'time': time_index2}, method='bfill')\n >>> x3\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan\n pressure (time) float64 103.4 103.4 103.4 103.4 ... 399.2 486.0 nan\n\n Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)\n will not be filled by any of the value propagation schemes.\n\n >>> x2.where(x2.temperature.isnull(), drop=True)\n \n Dimensions: (time: 1)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03\n Data variables:\n temperature (time) float64 nan\n pressure (time) float64 452.0\n >>> x3.where(x3.temperature.isnull(), drop=True)\n \n Dimensions: (time: 2)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03 2019-01-07\n Data variables:\n temperature (time) float64 nan nan\n pressure (time) float64 452.0 nan\n\n This is because filling while reindexing does not look at dataset values, but only compares\n the original and desired indexes. If you do want to fill in the `NaN` values present in the\n original dataset, use the :py:meth:`~Dataset.fillna()` method.\n\n ","endLoc":2436,"header":"def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n **indexers_kwargs: Any,\n ) -> \"Dataset\"","id":1340,"name":"reindex","nodeType":"Function","startLoc":2236,"text":"def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto a new set of indexes, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict. optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n Keyword arguments in the same form as ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.reindex_like\n align\n pandas.Index.get_indexer\n\n Examples\n --------\n\n Create a dataset with some fictional data.\n\n >>> import xarray as xr\n >>> import pandas as pd\n >>> x = xr.Dataset(\n ... {\n ... \"temperature\": (\"station\", 20 * np.random.rand(4)),\n ... \"pressure\": (\"station\", 500 * np.random.rand(4))\n ... },\n ... coords={\"station\": [\"boston\", \"nyc\", \"seattle\", \"denver\"]})\n >>> x\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) >> x.indexes\n station: Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')\n\n Create a new index and reindex the dataset. By default values in the new index that\n do not have corresponding records in the dataset are assigned `NaN`.\n\n >>> new_index = ['boston', 'austin', 'seattle', 'lincoln']\n >>> x.reindex({'station': new_index})\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 nan 19.22 nan\n pressure (station) float64 324.1 nan 122.8 nan\n\n We can fill in the missing values by passing a value to the keyword `fill_value`.\n\n >>> x.reindex({'station': new_index}, fill_value=0)\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 0.0 19.22 0.0\n pressure (station) float64 324.1 0.0 122.8 0.0\n\n Because the index is not monotonically increasing or decreasing, we cannot use arguments\n to the keyword method to fill the `NaN` values.\n\n >>> x.reindex({'station': new_index}, method='nearest')\n Traceback (most recent call last):\n ...\n raise ValueError('index must be monotonic increasing or decreasing')\n ValueError: index must be monotonic increasing or decreasing\n\n To further illustrate the filling functionality in reindex, we will create a\n dataset with a monotonically increasing index (for example, a sequence of dates).\n\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": (\"time\", [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12]),\n ... \"pressure\": (\"time\", 500 * np.random.rand(6))\n ... },\n ... coords={\"time\": pd.date_range('01/01/2019', periods=6, freq='D')})\n >>> x2\n \n Dimensions: (time: 6)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06\n Data variables:\n temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12\n pressure (time) float64 103.4 122.7 452.0 444.0 399.2 486.0\n\n Suppose we decide to expand the dataset to cover a wider date range.\n\n >>> time_index2 = pd.date_range('12/29/2018', periods=10, freq='D')\n >>> x2.reindex({'time': time_index2})\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan\n pressure (time) float64 nan nan nan 103.4 ... 444.0 399.2 486.0 nan\n\n The index entries that did not have a value in the original data frame (for example, `2018-12-29`)\n are by default filled with NaN. If desired, we can fill in the missing values using one of several options.\n\n For example, to back-propagate the last valid value to fill the `NaN` values,\n pass `bfill` as an argument to the `method` keyword.\n\n >>> x3 = x2.reindex({'time': time_index2}, method='bfill')\n >>> x3\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan\n pressure (time) float64 103.4 103.4 103.4 103.4 ... 399.2 486.0 nan\n\n Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)\n will not be filled by any of the value propagation schemes.\n\n >>> x2.where(x2.temperature.isnull(), drop=True)\n \n Dimensions: (time: 1)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03\n Data variables:\n temperature (time) float64 nan\n pressure (time) float64 452.0\n >>> x3.where(x3.temperature.isnull(), drop=True)\n \n Dimensions: (time: 2)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03 2019-01-07\n Data variables:\n temperature (time) float64 nan nan\n pressure (time) float64 452.0 nan\n\n This is because filling while reindexing does not look at dataset values, but only compares\n the original and desired indexes. If you do want to fill in the `NaN` values present in the\n original dataset, use the :py:meth:`~Dataset.fillna()` method.\n\n \"\"\"\n indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n\n bad_dims = [d for d in indexers if d not in self.dims]\n if bad_dims:\n raise ValueError(\"invalid reindex dimensions: %s\" % bad_dims)\n\n variables, indexes = alignment.reindex_variables(\n self.variables,\n self.sizes,\n self.indexes,\n indexers,\n method,\n tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n coord_names = set(self._coord_names)\n coord_names.update(indexers)\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)"},{"col":4,"comment":"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found in this\n dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but coordinates from the\n other object.\n\n See Also\n --------\n Dataset.reindex\n align\n ","endLoc":2234,"header":"def reindex_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\"","id":1341,"name":"reindex_like","nodeType":"Function","startLoc":2176,"text":"def reindex_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found in this\n dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but coordinates from the\n other object.\n\n See Also\n --------\n Dataset.reindex\n align\n \"\"\"\n indexers = alignment.reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n copy=copy,\n fill_value=fill_value,\n tolerance=tolerance,\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":114,"id":1342,"name":"format","nodeType":"Attribute","startLoc":114,"text":"self.format"},{"col":0,"comment":"Conform a dictionary of aligned variables onto a new set of variables,\n filling in missing values with NaN.\n\n Not public API.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of xarray.Variable objects.\n sizes : dict-like\n Dictionary from dimension names to integer sizes.\n indexes : dict-like\n Dictionary of indexes associated with variables.\n indexers : dict\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate values\n will be filled in with NaN, and any mis-matched dimension names will\n simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact matches.\n The values of the index at the matching locations must satisfy the\n equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return values is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, new xarray objects are always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : dict\n Dict of reindexed variables.\n new_indexes : dict\n Dict of indexes associated with the reindexed variables.\n ","endLoc":594,"header":"def reindex_variables(\n variables: Mapping[Any, Variable],\n sizes: Mapping[Any, int],\n indexes: Mapping[Any, pd.Index],\n indexers: Mapping,\n method: Optional[str] = None,\n tolerance: Any = None,\n copy: bool = True,\n fill_value: Optional[Any] = dtypes.NA,\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]","id":1343,"name":"reindex_variables","nodeType":"Function","startLoc":462,"text":"def reindex_variables(\n variables: Mapping[Any, Variable],\n sizes: Mapping[Any, int],\n indexes: Mapping[Any, pd.Index],\n indexers: Mapping,\n method: Optional[str] = None,\n tolerance: Any = None,\n copy: bool = True,\n fill_value: Optional[Any] = dtypes.NA,\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Conform a dictionary of aligned variables onto a new set of variables,\n filling in missing values with NaN.\n\n Not public API.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of xarray.Variable objects.\n sizes : dict-like\n Dictionary from dimension names to integer sizes.\n indexes : dict-like\n Dictionary of indexes associated with variables.\n indexers : dict\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate values\n will be filled in with NaN, and any mis-matched dimension names will\n simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact matches.\n The values of the index at the matching locations must satisfy the\n equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return values is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, new xarray objects are always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : dict\n Dict of reindexed variables.\n new_indexes : dict\n Dict of indexes associated with the reindexed variables.\n \"\"\"\n from .dataarray import DataArray\n\n # create variables for the new dataset\n reindexed: Dict[Hashable, Variable] = {}\n\n # build up indexers for assignment along each dimension\n int_indexers = {}\n new_indexes = dict(indexes)\n masked_dims = set()\n unchanged_dims = set()\n\n for dim, indexer in indexers.items():\n if isinstance(indexer, DataArray) and indexer.dims != (dim,):\n raise ValueError(\n \"Indexer has dimensions {:s} that are different \"\n \"from that to be indexed along {:s}\".format(str(indexer.dims), dim)\n )\n\n target = new_indexes[dim] = utils.safe_cast_to_index(indexers[dim])\n\n if dim in indexes:\n index = indexes[dim]\n\n if not index.is_unique:\n raise ValueError(\n \"cannot reindex or align along dimension %r because the \"\n \"index has duplicate values\" % dim\n )\n\n int_indexer = get_indexer_nd(index, target, method, tolerance)\n\n # We uses negative values from get_indexer_nd to signify\n # values that are missing in the index.\n if (int_indexer < 0).any():\n masked_dims.add(dim)\n elif np.array_equal(int_indexer, np.arange(len(index))):\n unchanged_dims.add(dim)\n\n int_indexers[dim] = int_indexer\n\n if dim in variables:\n var = variables[dim]\n args: tuple = (var.attrs, var.encoding)\n else:\n args = ()\n reindexed[dim] = IndexVariable((dim,), target, *args)\n\n for dim in sizes:\n if dim not in indexes and dim in indexers:\n existing_size = sizes[dim]\n new_size = indexers[dim].size\n if existing_size != new_size:\n raise ValueError(\n \"cannot reindex or align along dimension %r without an \"\n \"index because its size %r is different from the size of \"\n \"the new index %r\" % (dim, existing_size, new_size)\n )\n\n for name, var in variables.items():\n if name not in indexers:\n key = tuple(\n slice(None) if d in unchanged_dims else int_indexers.get(d, slice(None))\n for d in var.dims\n )\n needs_masking = any(d in masked_dims for d in var.dims)\n\n if needs_masking:\n new_var = var._getitem_with_mask(key, fill_value=fill_value)\n elif all(is_full_slice(k) for k in key):\n # no reindexing necessary\n # here we need to manually deal with copying data, since\n # we neither created a new ndarray nor used fancy indexing\n new_var = var.copy(deep=copy)\n else:\n new_var = var[key]\n\n reindexed[name] = new_var\n\n return reindexed, new_indexes"},{"className":"IOReadSingleNetCDF3","col":0,"comment":"null","endLoc":147,"id":1344,"nodeType":"Class","startLoc":129,"text":"class IOReadSingleNetCDF3(IOReadSingleNetCDF4):\n def setup(self):\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc3.nc\"\n self.format = \"NETCDF3_64BIT\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_scipy(self):\n xr.open_dataset(self.filepath, engine=\"scipy\").load()\n\n def time_orthogonal_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\")\n ds = ds.isel(**self.oinds).load()\n\n def time_vectorized_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\")\n ds = ds.isel(**self.vinds).load()"},{"col":4,"comment":" Multidimensional interpolation of Dataset.\n\n Parameters\n ----------\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n New coordinate can be a scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.Dataset\n New dataset on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n ","endLoc":2552,"header":"def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any,\n ) -> \"Dataset\"","id":1345,"name":"interp","nodeType":"Function","startLoc":2438,"text":"def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\" Multidimensional interpolation of Dataset.\n\n Parameters\n ----------\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n New coordinate can be a scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.Dataset\n New dataset on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n \"\"\"\n from . import missing\n\n if kwargs is None:\n kwargs = {}\n\n coords = either_dict_or_kwargs(coords, coords_kwargs, \"interp\")\n indexers = dict(self._validate_interp_indexers(coords))\n\n obj = self if assume_sorted else self.sortby([k for k in coords])\n\n def maybe_variable(obj, k):\n # workaround to get variable for dimension without coordinate.\n try:\n return obj._variables[k]\n except KeyError:\n return as_variable((k, range(obj.dims[k])))\n\n def _validate_interp_indexer(x, new_x):\n # In the case of datetimes, the restrictions placed on indexers\n # used with interp are stronger than those which are placed on\n # isel, so we need an additional check after _validate_indexers.\n if _contains_datetime_like_objects(\n x\n ) and not _contains_datetime_like_objects(new_x):\n raise TypeError(\n \"When interpolating over a datetime-like \"\n \"coordinate, the coordinates to \"\n \"interpolate to must be either datetime \"\n \"strings or datetimes. \"\n \"Instead got\\n{}\".format(new_x)\n )\n return x, new_x\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in obj._variables.items():\n if name in indexers:\n continue\n\n if var.dtype.kind in \"uifc\":\n var_indexers = {\n k: _validate_interp_indexer(maybe_variable(obj, k), v)\n for k, v in indexers.items()\n if k in var.dims\n }\n variables[name] = missing.interp(var, var_indexers, method, **kwargs)\n elif all(d not in indexers for d in var.dims):\n # keep unrelated object array\n variables[name] = var\n\n coord_names = obj._coord_names & variables.keys()\n indexes = {k: v for k, v in obj.indexes.items() if k not in indexers}\n selected = self._replace_with_new_dims(\n variables.copy(), coord_names, indexes=indexes\n )\n\n # attach indexer as coordinate\n variables.update(indexers)\n for k, v in indexers.items():\n assert isinstance(v, Variable)\n if v.dims == (k,):\n indexes[k] = v.to_index()\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n\n coord_names = obj._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)"},{"col":4,"comment":"null","endLoc":136,"header":"def setup(self)","id":1346,"name":"setup","nodeType":"Function","startLoc":130,"text":"def setup(self):\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc3.nc\"\n self.format = \"NETCDF3_64BIT\"\n self.ds.to_netcdf(self.filepath, format=self.format)"},{"col":0,"comment":"null","endLoc":77,"header":"def netcdf_and_hdf5_versions()","id":1347,"name":"netcdf_and_hdf5_versions","nodeType":"Function","startLoc":62,"text":"def netcdf_and_hdf5_versions():\n libhdf5_version = None\n libnetcdf_version = None\n try:\n import netCDF4\n\n libhdf5_version = netCDF4.__hdf5libversion__\n libnetcdf_version = netCDF4.__netcdf4libversion__\n except ImportError:\n try:\n import h5py\n\n libhdf5_version = h5py.version.hdf5_version\n except ImportError:\n pass\n return [(\"libhdf5\", libhdf5_version), (\"libnetcdf\", libnetcdf_version)]"},{"col":0,"comment":"null","endLoc":148,"header":"def show_versions(file=sys.stdout)","id":1348,"name":"show_versions","nodeType":"Function","startLoc":80,"text":"def show_versions(file=sys.stdout):\n sys_info = get_sys_info()\n\n try:\n sys_info.extend(netcdf_and_hdf5_versions())\n except Exception as e:\n print(\"Error collecting netcdf / hdf5 version: {}\".format(e))\n\n deps = [\n # (MODULE_NAME, f(mod) -> mod version)\n (\"xarray\", lambda mod: mod.__version__),\n (\"pandas\", lambda mod: mod.__version__),\n (\"numpy\", lambda mod: mod.__version__),\n (\"scipy\", lambda mod: mod.__version__),\n # xarray optionals\n (\"netCDF4\", lambda mod: mod.__version__),\n (\"pydap\", lambda mod: mod.__version__),\n (\"h5netcdf\", lambda mod: mod.__version__),\n (\"h5py\", lambda mod: mod.__version__),\n (\"Nio\", lambda mod: mod.__version__),\n (\"zarr\", lambda mod: mod.__version__),\n (\"cftime\", lambda mod: mod.__version__),\n (\"nc_time_axis\", lambda mod: mod.__version__),\n (\"PseudoNetCDF\", lambda mod: mod.__version__),\n (\"rasterio\", lambda mod: mod.__version__),\n (\"cfgrib\", lambda mod: mod.__version__),\n (\"iris\", lambda mod: mod.__version__),\n (\"bottleneck\", lambda mod: mod.__version__),\n (\"dask\", lambda mod: mod.__version__),\n (\"distributed\", lambda mod: mod.__version__),\n (\"matplotlib\", lambda mod: mod.__version__),\n (\"cartopy\", lambda mod: mod.__version__),\n (\"seaborn\", lambda mod: mod.__version__),\n (\"numbagg\", lambda mod: mod.__version__),\n # xarray setup/test\n (\"setuptools\", lambda mod: mod.__version__),\n (\"pip\", lambda mod: mod.__version__),\n (\"conda\", lambda mod: mod.__version__),\n (\"pytest\", lambda mod: mod.__version__),\n # Misc.\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n ]\n\n deps_blob = list()\n for (modname, ver_f) in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n except Exception:\n deps_blob.append((modname, None))\n else:\n try:\n ver = ver_f(mod)\n deps_blob.append((modname, ver))\n except Exception:\n deps_blob.append((modname, \"installed\"))\n\n print(\"\\nINSTALLED VERSIONS\", file=file)\n print(\"------------------\", file=file)\n\n for k, stat in sys_info:\n print(\"%s: %s\" % (k, stat), file=file)\n\n print(\"\", file=file)\n for k, stat in deps_blob:\n print(\"%s: %s\" % (k, stat), file=file)"},{"col":4,"comment":"null","endLoc":139,"header":"def time_load_dataset_scipy(self)","id":1349,"name":"time_load_dataset_scipy","nodeType":"Function","startLoc":138,"text":"def time_load_dataset_scipy(self):\n xr.open_dataset(self.filepath, engine=\"scipy\").load()"},{"col":19,"endLoc":90,"id":1350,"nodeType":"Lambda","startLoc":90,"text":"lambda mod: mod.__version__"},{"col":19,"endLoc":91,"id":1351,"nodeType":"Lambda","startLoc":91,"text":"lambda mod: mod.__version__"},{"col":18,"endLoc":92,"id":1352,"nodeType":"Lambda","startLoc":92,"text":"lambda mod: mod.__version__"},{"col":18,"endLoc":93,"id":1353,"nodeType":"Lambda","startLoc":93,"text":"lambda mod: mod.__version__"},{"col":20,"endLoc":95,"id":1354,"nodeType":"Lambda","startLoc":95,"text":"lambda mod: mod.__version__"},{"col":18,"endLoc":96,"id":1355,"nodeType":"Lambda","startLoc":96,"text":"lambda mod: mod.__version__"},{"col":21,"endLoc":97,"id":1356,"nodeType":"Lambda","startLoc":97,"text":"lambda mod: mod.__version__"},{"col":17,"endLoc":98,"id":1357,"nodeType":"Lambda","startLoc":98,"text":"lambda mod: mod.__version__"},{"col":16,"endLoc":99,"id":1358,"nodeType":"Lambda","startLoc":99,"text":"lambda mod: mod.__version__"},{"col":17,"endLoc":100,"id":1359,"nodeType":"Lambda","startLoc":100,"text":"lambda mod: mod.__version__"},{"col":19,"endLoc":101,"id":1360,"nodeType":"Lambda","startLoc":101,"text":"lambda mod: mod.__version__"},{"col":25,"endLoc":102,"id":1361,"nodeType":"Lambda","startLoc":102,"text":"lambda mod: mod.__version__"},{"col":25,"endLoc":103,"id":1362,"nodeType":"Lambda","startLoc":103,"text":"lambda mod: mod.__version__"},{"col":21,"endLoc":104,"id":1363,"nodeType":"Lambda","startLoc":104,"text":"lambda mod: mod.__version__"},{"col":19,"endLoc":105,"id":1364,"nodeType":"Lambda","startLoc":105,"text":"lambda mod: mod.__version__"},{"col":17,"endLoc":106,"id":1365,"nodeType":"Lambda","startLoc":106,"text":"lambda mod: mod.__version__"},{"col":23,"endLoc":107,"id":1366,"nodeType":"Lambda","startLoc":107,"text":"lambda mod: mod.__version__"},{"col":17,"endLoc":108,"id":1367,"nodeType":"Lambda","startLoc":108,"text":"lambda mod: mod.__version__"},{"col":24,"endLoc":109,"id":1368,"nodeType":"Lambda","startLoc":109,"text":"lambda mod: mod.__version__"},{"col":23,"endLoc":110,"id":1369,"nodeType":"Lambda","startLoc":110,"text":"lambda mod: mod.__version__"},{"col":20,"endLoc":111,"id":1370,"nodeType":"Lambda","startLoc":111,"text":"lambda mod: mod.__version__"},{"col":20,"endLoc":112,"id":1371,"nodeType":"Lambda","startLoc":112,"text":"lambda mod: mod.__version__"},{"col":20,"endLoc":113,"id":1372,"nodeType":"Lambda","startLoc":113,"text":"lambda mod: mod.__version__"},{"col":23,"endLoc":115,"id":1373,"nodeType":"Lambda","startLoc":115,"text":"lambda mod: mod.__version__"},{"col":16,"endLoc":116,"id":1374,"nodeType":"Lambda","startLoc":116,"text":"lambda mod: mod.__version__"},{"col":18,"endLoc":117,"id":1375,"nodeType":"Lambda","startLoc":117,"text":"lambda mod: mod.__version__"},{"col":19,"endLoc":118,"id":1376,"nodeType":"Lambda","startLoc":118,"text":"lambda mod: mod.__version__"},{"col":20,"endLoc":120,"id":1377,"nodeType":"Lambda","startLoc":120,"text":"lambda mod: mod.__version__"},{"col":19,"endLoc":121,"id":1378,"nodeType":"Lambda","startLoc":121,"text":"lambda mod: mod.__version__"},{"col":4,"comment":"\n Sort object by labels or values (along an axis).\n\n Sorts the dataset, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: str, DataArray, or list of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords/data_vars whose values are used to sort the dataset.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: Dataset\n A new dataset where all the specified dims are sorted by dim\n labels.\n ","endLoc":4951,"header":"def sortby(self, variables, ascending=True)","id":1379,"name":"sortby","nodeType":"Function","startLoc":4898,"text":"def sortby(self, variables, ascending=True):\n \"\"\"\n Sort object by labels or values (along an axis).\n\n Sorts the dataset, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: str, DataArray, or list of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords/data_vars whose values are used to sort the dataset.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: Dataset\n A new dataset where all the specified dims are sorted by dim\n labels.\n \"\"\"\n from .dataarray import DataArray\n\n if not isinstance(variables, list):\n variables = [variables]\n else:\n variables = variables\n variables = [v if isinstance(v, DataArray) else self[v] for v in variables]\n aligned_vars = align(self, *variables, join=\"left\")\n aligned_self = aligned_vars[0]\n aligned_other_vars = aligned_vars[1:]\n vars_by_dim = defaultdict(list)\n for data_array in aligned_other_vars:\n if data_array.ndim != 1:\n raise ValueError(\"Input DataArray is not 1-D.\")\n (key,) = data_array.dims\n vars_by_dim[key].append(data_array)\n\n indices = {}\n for key, arrays in vars_by_dim.items():\n order = np.lexsort(tuple(reversed(arrays)))\n indices[key] = order if ascending else order[::-1]\n return aligned_self.isel(**indices)"},{"col":0,"comment":"","endLoc":1,"header":"print_versions.py#","id":1380,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Utility functions for printing version information.\"\"\"\n\nif __name__ == \"__main__\":\n show_versions()"},{"fileName":"dtypes.py","filePath":"xarray/core","id":1381,"nodeType":"File","text":"import functools\n\nimport numpy as np\n\nfrom . import utils\n\n# Use as a sentinel value to indicate a dtype appropriate NA value.\nNA = utils.ReprObject(\"\")\n\n\n@functools.total_ordering\nclass AlwaysGreaterThan:\n def __gt__(self, other):\n return True\n\n def __eq__(self, other):\n return isinstance(other, type(self))\n\n\n@functools.total_ordering\nclass AlwaysLessThan:\n def __lt__(self, other):\n return True\n\n def __eq__(self, other):\n return isinstance(other, type(self))\n\n\n# Equivalence to np.inf (-np.inf) for object-type\nINF = AlwaysGreaterThan()\nNINF = AlwaysLessThan()\n\n\n# Pairs of types that, if both found, should be promoted to object dtype\n# instead of following NumPy's own type-promotion rules. These type promotion\n# rules match pandas instead. For reference, see the NumPy type hierarchy:\n# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.scalars.html\nPROMOTE_TO_OBJECT = [\n {np.number, np.character}, # numpy promotes to character\n {np.bool_, np.character}, # numpy promotes to character\n {np.bytes_, np.unicode_}, # numpy promotes to unicode\n]\n\n\ndef maybe_promote(dtype):\n \"\"\"Simpler equivalent of pandas.core.common._maybe_promote\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n dtype : Promoted dtype that can hold missing values.\n fill_value : Valid missing value for the promoted dtype.\n \"\"\"\n # N.B. these casting rules should match pandas\n if np.issubdtype(dtype, np.floating):\n fill_value = np.nan\n elif np.issubdtype(dtype, np.timedelta64):\n # See https://github.com/numpy/numpy/issues/10685\n # np.timedelta64 is a subclass of np.integer\n # Check np.timedelta64 before np.integer\n fill_value = np.timedelta64(\"NaT\")\n elif np.issubdtype(dtype, np.integer):\n if dtype.itemsize <= 2:\n dtype = np.float32\n else:\n dtype = np.float64\n fill_value = np.nan\n elif np.issubdtype(dtype, np.complexfloating):\n fill_value = np.nan + np.nan * 1j\n elif np.issubdtype(dtype, np.datetime64):\n fill_value = np.datetime64(\"NaT\")\n else:\n dtype = object\n fill_value = np.nan\n return np.dtype(dtype), fill_value\n\n\nNAT_TYPES = (np.datetime64(\"NaT\"), np.timedelta64(\"NaT\"))\n\n\ndef get_fill_value(dtype):\n \"\"\"Return an appropriate fill value for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : Missing value corresponding to this dtype.\n \"\"\"\n _, fill_value = maybe_promote(dtype)\n return fill_value\n\n\ndef get_pos_infinity(dtype):\n \"\"\"Return an appropriate positive infinity for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : positive infinity value corresponding to this dtype.\n \"\"\"\n if issubclass(dtype.type, (np.floating, np.integer)):\n return np.inf\n\n if issubclass(dtype.type, np.complexfloating):\n return np.inf + 1j * np.inf\n\n return INF\n\n\ndef get_neg_infinity(dtype):\n \"\"\"Return an appropriate positive infinity for this dtype.\n\n Parameters\n ----------\n dtype : np.dtype\n\n Returns\n -------\n fill_value : positive infinity value corresponding to this dtype.\n \"\"\"\n if issubclass(dtype.type, (np.floating, np.integer)):\n return -np.inf\n\n if issubclass(dtype.type, np.complexfloating):\n return -np.inf - 1j * np.inf\n\n return NINF\n\n\ndef is_datetime_like(dtype):\n \"\"\"Check if a dtype is a subclass of the numpy datetime types\n \"\"\"\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)\n\n\ndef result_type(*arrays_and_dtypes):\n \"\"\"Like np.result_type, but with type promotion rules matching pandas.\n\n Examples of changed behavior:\n number + string -> object (not string)\n bytes + unicode -> object (not unicode)\n\n Parameters\n ----------\n *arrays_and_dtypes : list of arrays and dtypes\n The dtype is extracted from both numpy and dask arrays.\n\n Returns\n -------\n numpy.dtype for the result.\n \"\"\"\n types = {np.result_type(t).type for t in arrays_and_dtypes}\n\n for left, right in PROMOTE_TO_OBJECT:\n if any(issubclass(t, left) for t in types) and any(\n issubclass(t, right) for t in types\n ):\n return np.dtype(object)\n\n return np.result_type(*arrays_and_dtypes)\n"},{"className":"AlwaysGreaterThan","col":0,"comment":"null","endLoc":17,"id":1382,"nodeType":"Class","startLoc":11,"text":"@functools.total_ordering\nclass AlwaysGreaterThan:\n def __gt__(self, other):\n return True\n\n def __eq__(self, other):\n return isinstance(other, type(self))"},{"col":4,"comment":"null","endLoc":14,"header":"def __gt__(self, other)","id":1383,"name":"__gt__","nodeType":"Function","startLoc":13,"text":"def __gt__(self, other):\n return True"},{"col":4,"comment":"null","endLoc":17,"header":"def __eq__(self, other)","id":1384,"name":"__eq__","nodeType":"Function","startLoc":16,"text":"def __eq__(self, other):\n return isinstance(other, type(self))"},{"col":4,"comment":"null","endLoc":143,"header":"def time_orthogonal_indexing(self)","id":1385,"name":"time_orthogonal_indexing","nodeType":"Function","startLoc":141,"text":"def time_orthogonal_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\")\n ds = ds.isel(**self.oinds).load()"},{"className":"AlwaysLessThan","col":0,"comment":"null","endLoc":26,"id":1386,"nodeType":"Class","startLoc":20,"text":"@functools.total_ordering\nclass AlwaysLessThan:\n def __lt__(self, other):\n return True\n\n def __eq__(self, other):\n return isinstance(other, type(self))"},{"col":4,"comment":"null","endLoc":23,"header":"def __lt__(self, other)","id":1387,"name":"__lt__","nodeType":"Function","startLoc":22,"text":"def __lt__(self, other):\n return True"},{"col":4,"comment":"null","endLoc":26,"header":"def __eq__(self, other)","id":1388,"name":"__eq__","nodeType":"Function","startLoc":25,"text":"def __eq__(self, other):\n return isinstance(other, type(self))"},{"col":0,"comment":"Main VCS-independent setup function for installing Versioneer.","endLoc":1837,"header":"def do_setup()","id":1389,"name":"do_setup","nodeType":"Function","startLoc":1755,"text":"def do_setup():\n \"\"\"Main VCS-independent setup function for installing Versioneer.\"\"\"\n root = get_root()\n try:\n cfg = get_config_from_root(root)\n except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e:\n if isinstance(e, (EnvironmentError, configparser.NoSectionError)):\n print(\"Adding sample versioneer config to setup.cfg\", file=sys.stderr)\n with open(os.path.join(root, \"setup.cfg\"), \"a\") as f:\n f.write(SAMPLE_CONFIG)\n print(CONFIG_ERROR, file=sys.stderr)\n return 1\n\n print(\" creating %s\" % cfg.versionfile_source)\n with open(cfg.versionfile_source, \"w\") as f:\n LONG = LONG_VERSION_PY[cfg.VCS]\n f.write(\n LONG\n % {\n \"DOLLAR\": \"$\",\n \"STYLE\": cfg.style,\n \"TAG_PREFIX\": cfg.tag_prefix,\n \"PARENTDIR_PREFIX\": cfg.parentdir_prefix,\n \"VERSIONFILE_SOURCE\": cfg.versionfile_source,\n }\n )\n\n ipy = os.path.join(os.path.dirname(cfg.versionfile_source), \"__init__.py\")\n if os.path.exists(ipy):\n try:\n with open(ipy, \"r\") as f:\n old = f.read()\n except OSError:\n old = \"\"\n if INIT_PY_SNIPPET not in old:\n print(\" appending to %s\" % ipy)\n with open(ipy, \"a\") as f:\n f.write(INIT_PY_SNIPPET)\n else:\n print(\" %s unmodified\" % ipy)\n else:\n print(\" %s doesn't exist, ok\" % ipy)\n ipy = None\n\n # Make sure both the top-level \"versioneer.py\" and versionfile_source\n # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so\n # they'll be copied into source distributions. Pip won't be able to\n # install the package without this.\n manifest_in = os.path.join(root, \"MANIFEST.in\")\n simple_includes = set()\n try:\n with open(manifest_in, \"r\") as f:\n for line in f:\n if line.startswith(\"include \"):\n for include in line.split()[1:]:\n simple_includes.add(include)\n except OSError:\n pass\n # That doesn't cover everything MANIFEST.in can do\n # (http://docs.python.org/2/distutils/sourcedist.html#commands), so\n # it might give some false negatives. Appending redundant 'include'\n # lines is safe, though.\n if \"versioneer.py\" not in simple_includes:\n print(\" appending 'versioneer.py' to MANIFEST.in\")\n with open(manifest_in, \"a\") as f:\n f.write(\"include versioneer.py\\n\")\n else:\n print(\" 'versioneer.py' already in MANIFEST.in\")\n if cfg.versionfile_source not in simple_includes:\n print(\n \" appending versionfile_source ('%s') to MANIFEST.in\"\n % cfg.versionfile_source\n )\n with open(manifest_in, \"a\") as f:\n f.write(\"include %s\\n\" % cfg.versionfile_source)\n else:\n print(\" versionfile_source already in MANIFEST.in\")\n\n # Make VCS-specific changes. For git, this means creating/changing\n # .gitattributes to mark _version.py for export-subst keyword\n # substitution.\n do_vcs_install(manifest_in, cfg.versionfile_source, ipy)\n return 0"},{"col":0,"comment":"Check if a dtype is a subclass of the numpy datetime types\n ","endLoc":142,"header":"def is_datetime_like(dtype)","id":1390,"name":"is_datetime_like","nodeType":"Function","startLoc":139,"text":"def is_datetime_like(dtype):\n \"\"\"Check if a dtype is a subclass of the numpy datetime types\n \"\"\"\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)"},{"col":0,"comment":"Like np.result_type, but with type promotion rules matching pandas.\n\n Examples of changed behavior:\n number + string -> object (not string)\n bytes + unicode -> object (not unicode)\n\n Parameters\n ----------\n *arrays_and_dtypes : list of arrays and dtypes\n The dtype is extracted from both numpy and dask arrays.\n\n Returns\n -------\n numpy.dtype for the result.\n ","endLoc":169,"header":"def result_type(*arrays_and_dtypes)","id":1392,"name":"result_type","nodeType":"Function","startLoc":145,"text":"def result_type(*arrays_and_dtypes):\n \"\"\"Like np.result_type, but with type promotion rules matching pandas.\n\n Examples of changed behavior:\n number + string -> object (not string)\n bytes + unicode -> object (not unicode)\n\n Parameters\n ----------\n *arrays_and_dtypes : list of arrays and dtypes\n The dtype is extracted from both numpy and dask arrays.\n\n Returns\n -------\n numpy.dtype for the result.\n \"\"\"\n types = {np.result_type(t).type for t in arrays_and_dtypes}\n\n for left, right in PROMOTE_TO_OBJECT:\n if any(issubclass(t, left) for t in types) and any(\n issubclass(t, right) for t in types\n ):\n return np.dtype(object)\n\n return np.result_type(*arrays_and_dtypes)"},{"col":4,"comment":" Multidimensional interpolation of variables.\n\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n new coordinate can be an scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array.\n assume_sorted: boolean, optional\n If False, values of x can be in any order and they are sorted\n first. If True, x has to be an array of monotonically increasing\n values.\n kwargs: dictionary\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.DataArray\n New dataarray on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n\n Examples\n --------\n >>> da = xr.DataArray([1, 3], [('x', np.arange(2))])\n >>> da.interp(x=0.5)\n \n array(2.0)\n Coordinates:\n x float64 0.5\n ","endLoc":1353,"header":"def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any\n ) -> \"DataArray\"","id":1394,"name":"interp","nodeType":"Function","startLoc":1290,"text":"def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any\n ) -> \"DataArray\":\n \"\"\" Multidimensional interpolation of variables.\n\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n new coordinate can be an scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array.\n assume_sorted: boolean, optional\n If False, values of x can be in any order and they are sorted\n first. If True, x has to be an array of monotonically increasing\n values.\n kwargs: dictionary\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.DataArray\n New dataarray on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n\n Examples\n --------\n >>> da = xr.DataArray([1, 3], [('x', np.arange(2))])\n >>> da.interp(x=0.5)\n \n array(2.0)\n Coordinates:\n x float64 0.5\n \"\"\"\n if self.dtype.kind not in \"uifc\":\n raise TypeError(\n \"interp only works for a numeric type array. \"\n \"Given {}.\".format(self.dtype)\n )\n ds = self._to_temp_dataset().interp(\n coords,\n method=method,\n kwargs=kwargs,\n assume_sorted=assume_sorted,\n **coords_kwargs\n )\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"null","endLoc":147,"header":"def time_vectorized_indexing(self)","id":1395,"name":"time_vectorized_indexing","nodeType":"Function","startLoc":145,"text":"def time_vectorized_indexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\")\n ds = ds.isel(**self.vinds).load()"},{"col":0,"comment":" Make an interpolation of Variable\n\n Parameters\n ----------\n var: Variable\n index_coords:\n Mapping from dimension name to a pair of original and new coordinates.\n Original coordinates should be sorted in strictly ascending order.\n Note that all the coordinates should be Variable objects.\n method: string\n One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic',\n 'cubic'}. For multidimensional interpolation, only\n {'linear', 'nearest'} can be used.\n **kwargs:\n keyword arguments to be passed to scipy.interpolate\n\n Returns\n -------\n Interpolated Variable\n\n See Also\n --------\n DataArray.interp\n Dataset.interp\n ","endLoc":535,"header":"def interp(var, indexes_coords, method, **kwargs)","id":1396,"name":"interp","nodeType":"Function","startLoc":477,"text":"def interp(var, indexes_coords, method, **kwargs):\n \"\"\" Make an interpolation of Variable\n\n Parameters\n ----------\n var: Variable\n index_coords:\n Mapping from dimension name to a pair of original and new coordinates.\n Original coordinates should be sorted in strictly ascending order.\n Note that all the coordinates should be Variable objects.\n method: string\n One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic',\n 'cubic'}. For multidimensional interpolation, only\n {'linear', 'nearest'} can be used.\n **kwargs:\n keyword arguments to be passed to scipy.interpolate\n\n Returns\n -------\n Interpolated Variable\n\n See Also\n --------\n DataArray.interp\n Dataset.interp\n \"\"\"\n if not indexes_coords:\n return var.copy()\n\n # simple speed up for the local interpolation\n if method in [\"linear\", \"nearest\"]:\n var, indexes_coords = _localize(var, indexes_coords)\n\n # default behavior\n kwargs[\"bounds_error\"] = kwargs.get(\"bounds_error\", False)\n\n # target dimensions\n dims = list(indexes_coords)\n x, new_x = zip(*[indexes_coords[d] for d in dims])\n destination = broadcast_variables(*new_x)\n\n # transpose to make the interpolated axis to the last position\n broadcast_dims = [d for d in var.dims if d not in dims]\n original_dims = broadcast_dims + dims\n new_dims = broadcast_dims + list(destination[0].dims)\n interped = interp_func(\n var.transpose(*original_dims).data, x, destination, method, kwargs\n )\n\n result = Variable(new_dims, interped, attrs=var.attrs)\n\n # dimension of the output array\n out_dims = OrderedSet()\n for d in var.dims:\n if d in dims:\n out_dims.update(indexes_coords[d][1].dims)\n else:\n out_dims.add(d)\n return result.transpose(*tuple(out_dims))"},{"attributeType":"ReprObject","col":0,"comment":"null","endLoc":8,"id":1397,"name":"NA","nodeType":"Attribute","startLoc":8,"text":"NA"},{"attributeType":"null","col":8,"comment":"null","endLoc":134,"id":1398,"name":"filepath","nodeType":"Attribute","startLoc":134,"text":"self.filepath"},{"col":4,"comment":"Interpolate this object onto the coordinates of another object,\n filling out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.DataArray\n Another dataarray by interpolating this dataarray's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataarray has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n DataArray.interp\n DataArray.reindex_like\n ","endLoc":1408,"header":"def interp_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"DataArray\"","id":1399,"name":"interp_like","nodeType":"Function","startLoc":1355,"text":"def interp_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"DataArray\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.DataArray\n Another dataarray by interpolating this dataarray's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataarray has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n DataArray.interp\n DataArray.reindex_like\n \"\"\"\n if self.dtype.kind not in \"uifc\":\n raise TypeError(\n \"interp only works for a numeric type array. \"\n \"Given {}.\".format(self.dtype)\n )\n ds = self._to_temp_dataset().interp_like(\n other, method=method, kwargs=kwargs, assume_sorted=assume_sorted\n )\n return self._from_temp_dataset(ds)"},{"attributeType":"null","col":8,"comment":"null","endLoc":135,"id":1400,"name":"format","nodeType":"Attribute","startLoc":135,"text":"self.format"},{"className":"IOReadSingleNetCDF4Dask","col":0,"comment":"null","endLoc":187,"id":1401,"nodeType":"Class","startLoc":150,"text":"class IOReadSingleNetCDF4Dask(IOSingleNetCDF):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc4.nc\"\n self.format = \"NETCDF4\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_netcdf4_with_block_chunks(self):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_oindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.block_chunks)\n ds = ds.isel(**self.oinds).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_vindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.block_chunks)\n ds = ds.isel(**self.vinds).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_time_chunks(self):\n xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.time_chunks).load()\n\n def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()"},{"col":4,"comment":"null","endLoc":159,"header":"def setup(self)","id":1402,"name":"setup","nodeType":"Function","startLoc":151,"text":"def setup(self):\n\n requires_dask()\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc4.nc\"\n self.format = \"NETCDF4\"\n self.ds.to_netcdf(self.filepath, format=self.format)"},{"col":4,"comment":"Interpolate this object onto the coordinates of another object,\n filling the out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.Dataset\n Another dataset by interpolating this dataset's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataset has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n Dataset.interp\n Dataset.reindex_like\n ","endLoc":2616,"header":"def interp_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"Dataset\"","id":1403,"name":"interp_like","nodeType":"Function","startLoc":2554,"text":"def interp_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"Dataset\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling the out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.Dataset\n Another dataset by interpolating this dataset's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataset has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n Dataset.interp\n Dataset.reindex_like\n \"\"\"\n if kwargs is None:\n kwargs = {}\n coords = alignment.reindex_like_indexers(self, other)\n\n numeric_coords: Dict[Hashable, pd.Index] = {}\n object_coords: Dict[Hashable, pd.Index] = {}\n for k, v in coords.items():\n if v.dtype.kind in \"uifcMm\":\n numeric_coords[k] = v\n else:\n object_coords[k] = v\n\n ds = self\n if object_coords:\n # We do not support interpolation along object coordinate.\n # reindex instead.\n ds = self.reindex(object_coords)\n return ds.interp(numeric_coords, method, assume_sorted, kwargs)"},{"attributeType":"AlwaysGreaterThan","col":0,"comment":"null","endLoc":30,"id":1404,"name":"INF","nodeType":"Attribute","startLoc":30,"text":"INF"},{"col":4,"comment":"null","endLoc":402,"header":"def add_legend(self, **kwargs)","id":1405,"name":"add_legend","nodeType":"Function","startLoc":373,"text":"def add_legend(self, **kwargs):\n figlegend = self.fig.legend(\n handles=self._mappables[-1],\n labels=list(self._hue_var.values),\n title=self._hue_label,\n loc=\"center right\",\n **kwargs\n )\n\n self.figlegend = figlegend\n # Draw the plot to set the bounding boxes correctly\n self.fig.draw(self.fig.canvas.get_renderer())\n\n # Calculate and set the new width of the figure so the legend fits\n legend_width = figlegend.get_window_extent().width / self.fig.dpi\n figure_width = self.fig.get_figwidth()\n self.fig.set_figwidth(figure_width + legend_width)\n\n # Draw the plot again to get the new transformations\n self.fig.draw(self.fig.canvas.get_renderer())\n\n # Now calculate how much space we need on the right side\n legend_width = figlegend.get_window_extent().width / self.fig.dpi\n space_needed = legend_width / (figure_width + legend_width) + 0.02\n # margin = .01\n # _space_needed = margin + space_needed\n right = 1 - space_needed\n\n # Place the subplot axes to give space for the legend\n self.fig.subplots_adjust(right=right)"},{"col":0,"comment":" Speed up for linear and nearest neighbor method.\n Only consider a subspace that is needed for the interpolation\n ","endLoc":454,"header":"def _localize(var, indexes_coords)","id":1406,"name":"_localize","nodeType":"Function","startLoc":442,"text":"def _localize(var, indexes_coords):\n \"\"\" Speed up for linear and nearest neighbor method.\n Only consider a subspace that is needed for the interpolation\n \"\"\"\n indexes = {}\n for dim, [x, new_x] in indexes_coords.items():\n index = x.to_index()\n imin = index.get_loc(np.min(new_x.values), method=\"nearest\")\n imax = index.get_loc(np.max(new_x.values), method=\"nearest\")\n\n indexes[dim] = slice(max(imin - 2, 0), imax + 2)\n indexes_coords[dim] = (x[indexes[dim]], new_x)\n return var.isel(**indexes), indexes_coords"},{"col":4,"comment":"null","endLoc":164,"header":"def time_load_dataset_netcdf4_with_block_chunks(self)","id":1407,"name":"time_load_dataset_netcdf4_with_block_chunks","nodeType":"Function","startLoc":161,"text":"def time_load_dataset_netcdf4_with_block_chunks(self):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()"},{"col":4,"comment":"null","endLoc":168,"header":"def time_load_dataset_netcdf4_with_block_chunks_oindexing(self)","id":1408,"name":"time_load_dataset_netcdf4_with_block_chunks_oindexing","nodeType":"Function","startLoc":166,"text":"def time_load_dataset_netcdf4_with_block_chunks_oindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.block_chunks)\n ds = ds.isel(**self.oinds).load()"},{"col":4,"comment":"Returns a new DataArray with renamed coordinates or a new name.\n\n Parameters\n ----------\n new_name_or_name_dict : str or dict-like, optional\n If the argument is dict-like, it used as a mapping from old\n names to new names for coordinates. Otherwise, use the argument\n as the new name for this array.\n **names: hashable, optional\n The keyword arguments form of a mapping from old names to\n new names for coordinates.\n One of new_name_or_name_dict or names must be provided.\n\n Returns\n -------\n renamed : DataArray\n Renamed array or array with renamed coordinates.\n\n See Also\n --------\n Dataset.rename\n DataArray.swap_dims\n ","endLoc":1447,"header":"def rename(\n self,\n new_name_or_name_dict: Union[Hashable, Mapping[Hashable, Hashable]] = None,\n **names: Hashable\n ) -> \"DataArray\"","id":1409,"name":"rename","nodeType":"Function","startLoc":1410,"text":"def rename(\n self,\n new_name_or_name_dict: Union[Hashable, Mapping[Hashable, Hashable]] = None,\n **names: Hashable\n ) -> \"DataArray\":\n \"\"\"Returns a new DataArray with renamed coordinates or a new name.\n\n Parameters\n ----------\n new_name_or_name_dict : str or dict-like, optional\n If the argument is dict-like, it used as a mapping from old\n names to new names for coordinates. Otherwise, use the argument\n as the new name for this array.\n **names: hashable, optional\n The keyword arguments form of a mapping from old names to\n new names for coordinates.\n One of new_name_or_name_dict or names must be provided.\n\n Returns\n -------\n renamed : DataArray\n Renamed array or array with renamed coordinates.\n\n See Also\n --------\n Dataset.rename\n DataArray.swap_dims\n \"\"\"\n if names or utils.is_dict_like(new_name_or_name_dict):\n new_name_or_name_dict = cast(\n Mapping[Hashable, Hashable], new_name_or_name_dict\n )\n name_dict = either_dict_or_kwargs(new_name_or_name_dict, names, \"rename\")\n dataset = self._to_temp_dataset().rename(name_dict)\n return self._from_temp_dataset(dataset)\n else:\n new_name_or_name_dict = cast(Hashable, new_name_or_name_dict)\n return self._replace(name=new_name_or_name_dict)"},{"attributeType":"AlwaysLessThan","col":0,"comment":"null","endLoc":31,"id":1410,"name":"NINF","nodeType":"Attribute","startLoc":31,"text":"NINF"},{"col":4,"comment":"null","endLoc":172,"header":"def time_load_dataset_netcdf4_with_block_chunks_vindexing(self)","id":1411,"name":"time_load_dataset_netcdf4_with_block_chunks_vindexing","nodeType":"Function","startLoc":170,"text":"def time_load_dataset_netcdf4_with_block_chunks_vindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.block_chunks)\n ds = ds.isel(**self.vinds).load()"},{"col":4,"comment":"null","endLoc":358,"header":"def map_dataset(\n self, func, x=None, y=None, hue=None, hue_style=None, add_guide=None, **kwargs\n )","id":1412,"name":"map_dataset","nodeType":"Function","startLoc":314,"text":"def map_dataset(\n self, func, x=None, y=None, hue=None, hue_style=None, add_guide=None, **kwargs\n ):\n from .dataset_plot import _infer_meta_data, _parse_size\n\n kwargs[\"add_guide\"] = False\n kwargs[\"_is_facetgrid\"] = True\n\n if kwargs.get(\"markersize\", None):\n kwargs[\"size_mapping\"] = _parse_size(\n self.data[kwargs[\"markersize\"]], kwargs.pop(\"size_norm\", None)\n )\n\n meta_data = _infer_meta_data(self.data, x, y, hue, hue_style, add_guide)\n kwargs[\"meta_data\"] = meta_data\n\n if hue and meta_data[\"hue_style\"] == \"continuous\":\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n func, self.data[hue].values, **kwargs\n )\n kwargs[\"meta_data\"][\"cmap_params\"] = cmap_params\n kwargs[\"meta_data\"][\"cbar_kwargs\"] = cbar_kwargs\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n maybe_mappable = func(\n ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs\n )\n # TODO: this is needed to get legends to work.\n # but maybe_mappable is a list in that case :/\n self._mappables.append(maybe_mappable)\n\n self._finalize_grid(meta_data[\"xlabel\"], meta_data[\"ylabel\"])\n\n if hue:\n self._hue_label = meta_data.pop(\"hue_label\", None)\n if meta_data[\"add_legend\"]:\n self._hue_var = meta_data[\"hue\"]\n self.add_legend()\n elif meta_data[\"add_colorbar\"]:\n self.add_colorbar(label=self._hue_label, **cbar_kwargs)\n\n return self"},{"col":4,"comment":"Returns a new object with renamed variables and dimensions.\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or dimension names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables and dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename_vars\n Dataset.rename_dims\n DataArray.rename\n ","endLoc":2705,"header":"def rename(\n self,\n name_dict: Mapping[Hashable, Hashable] = None,\n inplace: bool = None,\n **names: Hashable,\n ) -> \"Dataset\"","id":1413,"name":"rename","nodeType":"Function","startLoc":2664,"text":"def rename(\n self,\n name_dict: Mapping[Hashable, Hashable] = None,\n inplace: bool = None,\n **names: Hashable,\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables and dimensions.\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or dimension names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables and dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename_vars\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n _check_inplace(inplace)\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename\")\n for k in name_dict.keys():\n if k not in self and k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or dimension in this dataset\" % k\n )\n\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict=name_dict\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)"},{"col":4,"comment":"null","endLoc":178,"header":"def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self)","id":1414,"name":"time_load_dataset_netcdf4_with_block_chunks_multiprocessing","nodeType":"Function","startLoc":174,"text":"def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()"},{"col":0,"comment":"Validate the contents of setup.py against Versioneer's expectations.","endLoc":1874,"header":"def scan_setup_py()","id":1415,"name":"scan_setup_py","nodeType":"Function","startLoc":1840,"text":"def scan_setup_py():\n \"\"\"Validate the contents of setup.py against Versioneer's expectations.\"\"\"\n found = set()\n setters = False\n errors = 0\n with open(\"setup.py\", \"r\") as f:\n for line in f.readlines():\n if \"import versioneer\" in line:\n found.add(\"import\")\n if \"versioneer.get_cmdclass()\" in line:\n found.add(\"cmdclass\")\n if \"versioneer.get_version()\" in line:\n found.add(\"get_version\")\n if \"versioneer.VCS\" in line:\n setters = True\n if \"versioneer.versionfile_source\" in line:\n setters = True\n if len(found) != 3:\n print(\"\")\n print(\"Your setup.py appears to be missing some important items\")\n print(\"(but I might be wrong). Please make sure it has something\")\n print(\"roughly like the following:\")\n print(\"\")\n print(\" import versioneer\")\n print(\" setup( version=versioneer.get_version(),\")\n print(\" cmdclass=versioneer.get_cmdclass(), ...)\")\n print(\"\")\n errors += 1\n if setters:\n print(\"You should remove lines like 'versioneer.VCS = ' and\")\n print(\"'versioneer.versionfile_source = ' . This configuration\")\n print(\"now lives in setup.cfg, and should be removed from setup.py\")\n print(\"\")\n errors += 1\n return errors"},{"attributeType":"null","col":0,"comment":"null","endLoc":38,"id":1416,"name":"PROMOTE_TO_OBJECT","nodeType":"Attribute","startLoc":38,"text":"PROMOTE_TO_OBJECT"},{"attributeType":"null","col":0,"comment":"null","endLoc":81,"id":1417,"name":"NAT_TYPES","nodeType":"Attribute","startLoc":81,"text":"NAT_TYPES"},{"col":0,"comment":"","endLoc":1,"header":"dtypes.py#","id":1418,"name":"","nodeType":"Function","startLoc":1,"text":"NA = utils.ReprObject(\"\")\n\nINF = AlwaysGreaterThan()\n\nNINF = AlwaysLessThan()\n\nPROMOTE_TO_OBJECT = [\n {np.number, np.character}, # numpy promotes to character\n {np.bool_, np.character}, # numpy promotes to character\n {np.bytes_, np.unicode_}, # numpy promotes to unicode\n]\n\nNAT_TYPES = (np.datetime64(\"NaT\"), np.timedelta64(\"NaT\"))"},{"col":4,"comment":"null","endLoc":2662,"header":"def _rename_all(self, name_dict, dims_dict)","id":1419,"name":"_rename_all","nodeType":"Function","startLoc":2658,"text":"def _rename_all(self, name_dict, dims_dict):\n variables, coord_names = self._rename_vars(name_dict, dims_dict)\n dims = self._rename_dims(dims_dict)\n indexes = self._rename_indexes(name_dict, dims.keys())\n return variables, coord_names, dims, indexes"},{"attributeType":"null","col":27,"comment":"null","endLoc":291,"id":1420,"name":"configparser","nodeType":"Attribute","startLoc":291,"text":"configparser"},{"attributeType":"null","col":0,"comment":"null","endLoc":376,"id":1421,"name":"LONG_VERSION_PY","nodeType":"Attribute","startLoc":376,"text":"LONG_VERSION_PY"},{"attributeType":"null","col":0,"comment":"null","endLoc":377,"id":1422,"name":"HANDLERS","nodeType":"Attribute","startLoc":377,"text":"HANDLERS"},{"attributeType":"null","col":0,"comment":"null","endLoc":1218,"id":1423,"name":"SHORT_VERSION_PY","nodeType":"Attribute","startLoc":1218,"text":"SHORT_VERSION_PY"},{"col":4,"comment":"null","endLoc":2631,"header":"def _rename_vars(self, name_dict, dims_dict)","id":1424,"name":"_rename_vars","nodeType":"Function","startLoc":2619,"text":"def _rename_vars(self, name_dict, dims_dict):\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n var = v.copy(deep=False)\n var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)\n name = name_dict.get(k, k)\n if name in variables:\n raise ValueError(\"the new name %r conflicts\" % (name,))\n variables[name] = var\n if k in self._coord_names:\n coord_names.add(name)\n return variables, coord_names"},{"attributeType":"null","col":0,"comment":"null","endLoc":1711,"id":1425,"name":"CONFIG_ERROR","nodeType":"Attribute","startLoc":1711,"text":"CONFIG_ERROR"},{"attributeType":"null","col":0,"comment":"null","endLoc":1733,"id":1426,"name":"SAMPLE_CONFIG","nodeType":"Attribute","startLoc":1733,"text":"SAMPLE_CONFIG"},{"attributeType":"null","col":0,"comment":"null","endLoc":1748,"id":1427,"name":"INIT_PY_SNIPPET","nodeType":"Attribute","startLoc":1748,"text":"INIT_PY_SNIPPET"},{"attributeType":"null","col":4,"comment":"null","endLoc":1878,"id":1428,"name":"cmd","nodeType":"Attribute","startLoc":1878,"text":"cmd"},{"col":0,"comment":"Given any number of variables, return variables with matching dimensions\n and broadcast data.\n\n The data on the returned variables will be a view of the data on the\n corresponding original arrays, but dimensions will be reordered and\n inserted so that both broadcast arrays have the same dimensions. The new\n dimensions are sorted in order of appearance in the first variable's\n dimensions followed by the second variable's dimensions.\n ","endLoc":2180,"header":"def broadcast_variables(*variables)","id":1429,"name":"broadcast_variables","nodeType":"Function","startLoc":2166,"text":"def broadcast_variables(*variables):\n \"\"\"Given any number of variables, return variables with matching dimensions\n and broadcast data.\n\n The data on the returned variables will be a view of the data on the\n corresponding original arrays, but dimensions will be reordered and\n inserted so that both broadcast arrays have the same dimensions. The new\n dimensions are sorted in order of appearance in the first variable's\n dimensions followed by the second variable's dimensions.\n \"\"\"\n dims_map = _unified_dims(variables)\n dims_tuple = tuple(dims_map)\n return tuple(\n var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables\n )"},{"fileName":"nputils.py","filePath":"xarray/core","id":1430,"nodeType":"File","text":"import warnings\n\nimport numpy as np\nimport pandas as pd\n\ntry:\n import bottleneck as bn\n\n _USE_BOTTLENECK = True\nexcept ImportError:\n # use numpy methods instead\n bn = np\n _USE_BOTTLENECK = False\n\n\ndef _validate_axis(data, axis):\n ndim = data.ndim\n if not -ndim <= axis < ndim:\n raise IndexError(\"axis %r out of bounds [-%r, %r)\" % (axis, ndim, ndim))\n if axis < 0:\n axis += ndim\n return axis\n\n\ndef _select_along_axis(values, idx, axis):\n other_ind = np.ix_(*[np.arange(s) for s in idx.shape])\n sl = other_ind[:axis] + (idx,) + other_ind[axis:]\n return values[sl]\n\n\ndef nanfirst(values, axis):\n axis = _validate_axis(values, axis)\n idx_first = np.argmax(~pd.isnull(values), axis=axis)\n return _select_along_axis(values, idx_first, axis)\n\n\ndef nanlast(values, axis):\n axis = _validate_axis(values, axis)\n rev = (slice(None),) * axis + (slice(None, None, -1),)\n idx_last = -1 - np.argmax(~pd.isnull(values)[rev], axis=axis)\n return _select_along_axis(values, idx_last, axis)\n\n\ndef inverse_permutation(indices):\n \"\"\"Return indices for an inverse permutation.\n\n Parameters\n ----------\n indices : 1D np.ndarray with dtype=int\n Integer positions to assign elements to.\n\n Returns\n -------\n inverse_permutation : 1D np.ndarray with dtype=int\n Integer indices to take from the original array to create the\n permutation.\n \"\"\"\n # use intp instead of int64 because of windows :(\n inverse_permutation = np.empty(len(indices), dtype=np.intp)\n inverse_permutation[indices] = np.arange(len(indices), dtype=np.intp)\n return inverse_permutation\n\n\ndef _ensure_bool_is_ndarray(result, *args):\n # numpy will sometimes return a scalar value from binary comparisons if it\n # can't handle the comparison instead of broadcasting, e.g.,\n # In [10]: 1 == np.array(['a', 'b'])\n # Out[10]: False\n # This function ensures that the result is the appropriate shape in these\n # cases\n if isinstance(result, bool):\n shape = np.broadcast(*args).shape\n constructor = np.ones if result else np.zeros\n result = constructor(shape, dtype=bool)\n return result\n\n\ndef array_eq(self, other):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", r\"elementwise comparison failed\")\n return _ensure_bool_is_ndarray(self == other, self, other)\n\n\ndef array_ne(self, other):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", r\"elementwise comparison failed\")\n return _ensure_bool_is_ndarray(self != other, self, other)\n\n\ndef _is_contiguous(positions):\n \"\"\"Given a non-empty list, does it consist of contiguous integers?\"\"\"\n previous = positions[0]\n for current in positions[1:]:\n if current != previous + 1:\n return False\n previous = current\n return True\n\n\ndef _advanced_indexer_subspaces(key):\n \"\"\"Indices of the advanced indexes subspaces for mixed indexing and vindex.\n \"\"\"\n if not isinstance(key, tuple):\n key = (key,)\n advanced_index_positions = [\n i for i, k in enumerate(key) if not isinstance(k, slice)\n ]\n\n if not advanced_index_positions or not _is_contiguous(advanced_index_positions):\n # Nothing to reorder: dimensions on the indexing result are already\n # ordered like vindex. See NumPy's rule for \"Combining advanced and\n # basic indexing\":\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing\n return (), ()\n\n non_slices = [k for k in key if not isinstance(k, slice)]\n ndim = len(np.broadcast(*non_slices).shape)\n mixed_positions = advanced_index_positions[0] + np.arange(ndim)\n vindex_positions = np.arange(ndim)\n return mixed_positions, vindex_positions\n\n\nclass NumpyVIndexAdapter:\n \"\"\"Object that implements indexing like vindex on a np.ndarray.\n\n This is a pure Python implementation of (some of) the logic in this NumPy\n proposal: https://github.com/numpy/numpy/pull/6256\n \"\"\"\n\n def __init__(self, array):\n self._array = array\n\n def __getitem__(self, key):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n return np.moveaxis(self._array[key], mixed_positions, vindex_positions)\n\n def __setitem__(self, key, value):\n \"\"\"Value must have dimensionality matching the key.\"\"\"\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)\n\n\ndef rolling_window(a, axis, window, center, fill_value):\n \"\"\" rolling window with padding. \"\"\"\n pads = [(0, 0) for s in a.shape]\n if center:\n start = int(window / 2) # 10 -> 5, 9 -> 4\n end = window - 1 - start\n pads[axis] = (start, end)\n else:\n pads[axis] = (window - 1, 0)\n a = np.pad(a, pads, mode=\"constant\", constant_values=fill_value)\n return _rolling_window(a, window, axis)\n\n\ndef _rolling_window(a, window, axis=-1):\n \"\"\"\n Make an ndarray with a rolling window along axis.\n\n Parameters\n ----------\n a : array_like\n Array to add rolling window to\n axis: int\n axis position along which rolling window will be applied.\n window : int\n Size of rolling window\n\n Returns\n -------\n Array that is a view of the original array with a added dimension\n of size w.\n\n Examples\n --------\n >>> x=np.arange(10).reshape((2,5))\n >>> np.rolling_window(x, 3, axis=-1)\n array([[[0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [[5, 6, 7], [6, 7, 8], [7, 8, 9]]])\n\n Calculate rolling mean of last dimension:\n >>> np.mean(np.rolling_window(x, 3, axis=-1), -1)\n array([[ 1., 2., 3.],\n [ 6., 7., 8.]])\n\n This function is taken from https://github.com/numpy/numpy/pull/31\n but slightly modified to accept axis option.\n \"\"\"\n axis = _validate_axis(a, axis)\n a = np.swapaxes(a, axis, -1)\n\n if window < 1:\n raise ValueError(\"`window` must be at least 1. Given : {}\".format(window))\n if window > a.shape[-1]:\n raise ValueError(\"`window` is too long. Given : {}\".format(window))\n\n shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n strides = a.strides + (a.strides[-1],)\n rolling = np.lib.stride_tricks.as_strided(\n a, shape=shape, strides=strides, writeable=False\n )\n return np.swapaxes(rolling, -2, axis)\n\n\ndef _create_bottleneck_method(name, npmodule=np):\n def f(values, axis=None, **kwargs):\n dtype = kwargs.get(\"dtype\", None)\n bn_func = getattr(bn, name, None)\n\n if (\n _USE_BOTTLENECK\n and isinstance(values, np.ndarray)\n and bn_func is not None\n and not isinstance(axis, tuple)\n and values.dtype.kind in \"uifc\"\n and values.dtype.isnative\n and (dtype is None or np.dtype(dtype) == values.dtype)\n ):\n # bottleneck does not take care dtype, min_count\n kwargs.pop(\"dtype\", None)\n result = bn_func(values, axis=axis, **kwargs)\n else:\n result = getattr(npmodule, name)(values, axis=axis, **kwargs)\n\n return result\n\n f.__name__ = name\n return f\n\n\nnanmin = _create_bottleneck_method(\"nanmin\")\nnanmax = _create_bottleneck_method(\"nanmax\")\nnanmean = _create_bottleneck_method(\"nanmean\")\nnanmedian = _create_bottleneck_method(\"nanmedian\")\nnanvar = _create_bottleneck_method(\"nanvar\")\nnanstd = _create_bottleneck_method(\"nanstd\")\nnanprod = _create_bottleneck_method(\"nanprod\")\nnancumsum = _create_bottleneck_method(\"nancumsum\")\nnancumprod = _create_bottleneck_method(\"nancumprod\")\nnanargmin = _create_bottleneck_method(\"nanargmin\")\nnanargmax = _create_bottleneck_method(\"nanargmax\")\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":1880,"id":1431,"name":"errors","nodeType":"Attribute","startLoc":1880,"text":"errors"},{"col":0,"comment":"null","endLoc":2153,"header":"def _unified_dims(variables)","id":1432,"name":"_unified_dims","nodeType":"Function","startLoc":2134,"text":"def _unified_dims(variables):\n # validate dimensions\n all_dims = {}\n for var in variables:\n var_dims = var.dims\n if len(set(var_dims)) < len(var_dims):\n raise ValueError(\n \"broadcasting cannot handle duplicate \"\n \"dimensions: %r\" % list(var_dims)\n )\n for d, s in zip(var_dims, var.shape):\n if d not in all_dims:\n all_dims[d] = s\n elif all_dims[d] != s:\n raise ValueError(\n \"operands cannot be broadcast together \"\n \"with mismatched lengths for dimension %r: %s\"\n % (d, (all_dims[d], s))\n )\n return all_dims"},{"col":4,"comment":"\n Set and control tick behavior\n\n Parameters\n ----------\n max_xticks, max_yticks : int, optional\n Maximum number of labeled ticks to plot on x, y axes\n fontsize : string or int\n Font size as used by matplotlib text\n\n Returns\n -------\n self : FacetGrid object\n\n ","endLoc":534,"header":"def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS, fontsize=_FONTSIZE)","id":1433,"name":"set_ticks","nodeType":"Function","startLoc":504,"text":"def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS, fontsize=_FONTSIZE):\n \"\"\"\n Set and control tick behavior\n\n Parameters\n ----------\n max_xticks, max_yticks : int, optional\n Maximum number of labeled ticks to plot on x, y axes\n fontsize : string or int\n Font size as used by matplotlib text\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n from matplotlib.ticker import MaxNLocator\n\n # Both are necessary\n x_major_locator = MaxNLocator(nbins=max_xticks)\n y_major_locator = MaxNLocator(nbins=max_yticks)\n\n for ax in self.axes.flat:\n ax.xaxis.set_major_locator(x_major_locator)\n ax.yaxis.set_major_locator(y_major_locator)\n for tick in itertools.chain(\n ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks()\n ):\n tick.label1.set_fontsize(fontsize)\n\n return self"},{"className":"NumpyVIndexAdapter","col":0,"comment":"Object that implements indexing like vindex on a np.ndarray.\n\n This is a pure Python implementation of (some of) the logic in this NumPy\n proposal: https://github.com/numpy/numpy/pull/6256\n ","endLoc":140,"id":1434,"nodeType":"Class","startLoc":123,"text":"class NumpyVIndexAdapter:\n \"\"\"Object that implements indexing like vindex on a np.ndarray.\n\n This is a pure Python implementation of (some of) the logic in this NumPy\n proposal: https://github.com/numpy/numpy/pull/6256\n \"\"\"\n\n def __init__(self, array):\n self._array = array\n\n def __getitem__(self, key):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n return np.moveaxis(self._array[key], mixed_positions, vindex_positions)\n\n def __setitem__(self, key, value):\n \"\"\"Value must have dimensionality matching the key.\"\"\"\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)"},{"col":4,"comment":"null","endLoc":135,"header":"def __getitem__(self, key)","id":1435,"name":"__getitem__","nodeType":"Function","startLoc":133,"text":"def __getitem__(self, key):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n return np.moveaxis(self._array[key], mixed_positions, vindex_positions)"},{"col":0,"comment":"","endLoc":278,"header":"versioneer.py#","id":1436,"name":"","nodeType":"Function","startLoc":5,"text":"\"\"\"The Versioneer - like a rocketeer, but for versions.\n\nThe Versioneer\n==============\n\n* like a rocketeer, but for versions!\n* https://github.com/warner/python-versioneer\n* Brian Warner\n* License: Public Domain\n* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy\n* [![Latest Version]\n(https://pypip.in/version/versioneer/badge.svg?style=flat)\n](https://pypi.python.org/pypi/versioneer/)\n* [![Build Status]\n(https://travis-ci.org/warner/python-versioneer.png?branch=master)\n](https://travis-ci.org/warner/python-versioneer)\n\nThis is a tool for managing a recorded version number in distutils-based\npython projects. The goal is to remove the tedious and error-prone \"update\nthe embedded version string\" step from your release process. Making a new\nrelease should be as easy as recording a new tag in your version-control\nsystem, and maybe making new tarballs.\n\n\n## Quick Install\n\n* `pip install versioneer` to somewhere to your $PATH\n* add a `[versioneer]` section to your setup.cfg (see below)\n* run `versioneer install` in your source tree, commit the results\n\n## Version Identifiers\n\nSource trees come from a variety of places:\n\n* a version-control system checkout (mostly used by developers)\n* a nightly tarball, produced by build automation\n* a snapshot tarball, produced by a web-based VCS browser, like github's\n \"tarball from tag\" feature\n* a release tarball, produced by \"setup.py sdist\", distributed through PyPI\n\nWithin each source tree, the version identifier (either a string or a number,\nthis tool is format-agnostic) can come from a variety of places:\n\n* ask the VCS tool itself, e.g. \"git describe\" (for checkouts), which knows\n about recent \"tags\" and an absolute revision-id\n* the name of the directory into which the tarball was unpacked\n* an expanded VCS keyword ($Id$, etc)\n* a `_version.py` created by some earlier build step\n\nFor released software, the version identifier is closely related to a VCS\ntag. Some projects use tag names that include more than just the version\nstring (e.g. \"myproject-1.2\" instead of just \"1.2\"), in which case the tool\nneeds to strip the tag prefix to extract the version identifier. For\nunreleased software (between tags), the version identifier should provide\nenough information to help developers recreate the same tree, while also\ngiving them an idea of roughly how old the tree is (after version 1.2, before\nversion 1.3). Many VCS systems can report a description that captures this,\nfor example `git describe --tags --dirty --always` reports things like\n\"0.7-1-g574ab98-dirty\" to indicate that the checkout is one revision past the\n0.7 tag, has a unique revision id of \"574ab98\", and is \"dirty\" (it has\nuncommitted changes.\n\nThe version identifier is used for multiple purposes:\n\n* to allow the module to self-identify its version: `myproject.__version__`\n* to choose a name and prefix for a 'setup.py sdist' tarball\n\n## Theory of Operation\n\nVersioneer works by adding a special `_version.py` file into your source\ntree, where your `__init__.py` can import it. This `_version.py` knows how to\ndynamically ask the VCS tool for version information at import time.\n\n`_version.py` also contains `$Revision$` markers, and the installation\nprocess marks `_version.py` to have this marker rewritten with a tag name\nduring the `git archive` command. As a result, generated tarballs will\ncontain enough information to get the proper version.\n\nTo allow `setup.py` to compute a version too, a `versioneer.py` is added to\nthe top level of your source tree, next to `setup.py` and the `setup.cfg`\nthat configures it. This overrides several distutils/setuptools commands to\ncompute the version when invoked, and changes `setup.py build` and `setup.py\nsdist` to replace `_version.py` with a small static file that contains just\nthe generated version data.\n\n## Installation\n\nSee [INSTALL.md](./INSTALL.md) for detailed installation instructions.\n\n## Version-String Flavors\n\nCode which uses Versioneer can learn about its version string at runtime by\nimporting `_version` from your main `__init__.py` file and running the\n`get_versions()` function. From the \"outside\" (e.g. in `setup.py`), you can\nimport the top-level `versioneer.py` and run `get_versions()`.\n\nBoth functions return a dictionary with different flavors of version\ninformation:\n\n* `['version']`: A condensed version string, rendered using the selected\n style. This is the most commonly used value for the project's version\n string. The default \"pep440\" style yields strings like `0.11`,\n `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the \"Styles\" section\n below for alternative styles.\n\n* `['full-revisionid']`: detailed revision identifier. For Git, this is the\n full SHA1 commit id, e.g. \"1076c978a8d3cfc70f408fe5974aa6c092c949ac\".\n\n* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the\n commit date in ISO 8601 format. This will be None if the date is not\n available.\n\n* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that\n this is only accurate if run in a VCS checkout, otherwise it is likely to\n be False or None\n\n* `['error']`: if the version string could not be computed, this will be set\n to a string describing the problem, otherwise it will be None. It may be\n useful to throw an exception in setup.py if this is set, to avoid e.g.\n creating tarballs with a version string of \"unknown\".\n\nSome variants are more useful than others. Including `full-revisionid` in a\nbug report should allow developers to reconstruct the exact code being tested\n(or indicate the presence of local changes that should be shared with the\ndevelopers). `version` is suitable for display in an \"about\" box or a CLI\n`--version` output: it can be easily compared against release notes and lists\nof bugs fixed in various releases.\n\nThe installer adds the following text to your `__init__.py` to place a basic\nversion in `YOURPROJECT.__version__`:\n\n from ._version import get_versions\n __version__ = get_versions()['version']\n del get_versions\n\n## Styles\n\nThe setup.cfg `style=` configuration controls how the VCS information is\nrendered into a version string.\n\nThe default style, \"pep440\", produces a PEP440-compliant string, equal to the\nun-prefixed tag name for actual releases, and containing an additional \"local\nversion\" section with more detail for in-between builds. For Git, this is\nTAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags\n--dirty --always`. For example \"0.11+2.g1076c97.dirty\" indicates that the\ntree is like the \"1076c97\" commit but has uncommitted changes (\".dirty\"), and\nthat this commit is two revisions (\"+2\") beyond the \"0.11\" tag. For released\nsoftware (exactly equal to a known tag), the identifier will only contain the\nstripped tag, e.g. \"0.11\".\n\nOther styles are available. See [details.md](details.md) in the Versioneer\nsource tree for descriptions.\n\n## Debugging\n\nVersioneer tries to avoid fatal errors: if something goes wrong, it will tend\nto return a version of \"0+unknown\". To investigate the problem, run `setup.py\nversion`, which will run the version-lookup code in a verbose mode, and will\ndisplay the full contents of `get_versions()` (including the `error` string,\nwhich may help identify what went wrong).\n\n## Known Limitations\n\nSome situations are known to cause problems for Versioneer. This details the\nmost significant ones. More can be found on Github\n[issues page](https://github.com/warner/python-versioneer/issues).\n\n### Subprojects\n\nVersioneer has limited support for source trees in which `setup.py` is not in\nthe root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are\ntwo common reasons why `setup.py` might not be in the root:\n\n* Source trees which contain multiple subprojects, such as\n [Buildbot](https://github.com/buildbot/buildbot), which contains both\n \"master\" and \"slave\" subprojects, each with their own `setup.py`,\n `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI\n distributions (and upload multiple independently-installable tarballs).\n* Source trees whose main purpose is to contain a C library, but which also\n provide bindings to Python (and perhaps other langauges) in subdirectories.\n\nVersioneer will look for `.git` in parent directories, and most operations\nshould get the right version string. However `pip` and `setuptools` have bugs\nand implementation details which frequently cause `pip install .` from a\nsubproject directory to fail to find a correct version string (so it usually\ndefaults to `0+unknown`).\n\n`pip install --editable .` should work correctly. `setup.py install` might\nwork too.\n\nPip-8.1.1 is known to have this problem, but hopefully it will get fixed in\nsome later version.\n\n[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking\nthis issue. The discussion in\n[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the\nissue from the Versioneer side in more detail.\n[pip PR#3176](https://github.com/pypa/pip/pull/3176) and\n[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve\npip to let Versioneer work correctly.\n\nVersioneer-0.16 and earlier only looked for a `.git` directory next to the\n`setup.cfg`, so subprojects were completely unsupported with those releases.\n\n### Editable installs with setuptools <= 18.5\n\n`setup.py develop` and `pip install --editable .` allow you to install a\nproject into a virtualenv once, then continue editing the source code (and\ntest) without re-installing after every change.\n\n\"Entry-point scripts\" (`setup(entry_points={\"console_scripts\": ..})`) are a\nconvenient way to specify executable scripts that should be installed along\nwith the python package.\n\nThese both work as expected when using modern setuptools. When using\nsetuptools-18.5 or earlier, however, certain operations will cause\n`pkg_resources.DistributionNotFound` errors when running the entrypoint\nscript, which must be resolved by re-installing the package. This happens\nwhen the install happens with one version, then the egg_info data is\nregenerated while a different version is checked out. Many setup.py commands\ncause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into\na different virtualenv), so this can be surprising.\n\n[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes\nthis one, but upgrading to a newer version of setuptools should probably\nresolve it.\n\n### Unicode version strings\n\nWhile Versioneer works (and is continually tested) with both Python 2 and\nPython 3, it is not entirely consistent with bytes-vs-unicode distinctions.\nNewer releases probably generate unicode version strings on py2. It's not\nclear that this is wrong, but it may be surprising for applications when then\nwrite these strings to a network connection or include them in bytes-oriented\nAPIs like cryptographic checksums.\n\n[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates\nthis question.\n\n\n## Updating Versioneer\n\nTo upgrade your project to a new release of Versioneer, do the following:\n\n* install the new Versioneer (`pip install -U versioneer` or equivalent)\n* edit `setup.cfg`, if necessary, to include any new configuration settings\n indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.\n* re-run `versioneer install` in your source tree, to replace\n `SRC/_version.py`\n* commit any changed files\n\n## Future Directions\n\nThis tool is designed to make it easily extended to other version-control\nsystems: all VCS-specific components are in separate directories like\nsrc/git/ . The top-level `versioneer.py` script is assembled from these\ncomponents by running make-versioneer.py . In the future, make-versioneer.py\nwill take a VCS name as an argument, and will construct a version of\n`versioneer.py` that is specific to the given VCS. It might also take the\nconfiguration arguments that are currently provided manually during\ninstallation by editing setup.py . Alternatively, it might go the other\ndirection and include code from all supported VCS systems, reducing the\nnumber of intermediate scripts.\n\n\n## License\n\nTo make Versioneer easier to embed, all its code is dedicated to the public\ndomain. The `_version.py` that it creates is also in the public domain.\nSpecifically, both are released under the Creative Commons \"Public Domain\nDedication\" license (CC0-1.0), as described in\nhttps://creativecommons.org/publicdomain/zero/1.0/ .\n\n\"\"\"\n\ntry:\n import configparser\nexcept ImportError:\n import ConfigParser as configparser\n\nLONG_VERSION_PY = {}\n\nHANDLERS = {}\n\nLONG_VERSION_PY[\n \"git\"\n] = r'''\n# This file helps to compute a version number in source trees obtained from\n# git-archive tarball (such as those provided by githubs download-from-tag\n# feature). Distribution tarballs (built by setup.py sdist) and build\n# directories (produced by setup.py build) will contain a much shorter file\n# that just contains the computed version number.\n\n# This file is released into the public domain. Generated by\n# versioneer-0.18 (https://github.com/warner/python-versioneer)\n\n\"\"\"Git implementation of _version.py.\"\"\"\n\nimport errno\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef get_keywords():\n \"\"\"Get the keywords needed to look up the version information.\"\"\"\n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"%(DOLLAR)sFormat:%%d%(DOLLAR)s\"\n git_full = \"%(DOLLAR)sFormat:%%H%(DOLLAR)s\"\n git_date = \"%(DOLLAR)sFormat:%%ci%(DOLLAR)s\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full, \"date\": git_date}\n return keywords\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_config():\n \"\"\"Create, populate and return the VersioneerConfig() object.\"\"\"\n # these strings are filled in when 'setup.py versioneer' creates\n # _version.py\n cfg = VersioneerConfig()\n cfg.VCS = \"git\"\n cfg.style = \"%(STYLE)s\"\n cfg.tag_prefix = \"%(TAG_PREFIX)s\"\n cfg.parentdir_prefix = \"%(PARENTDIR_PREFIX)s\"\n cfg.versionfile_source = \"%(VERSIONFILE_SOURCE)s\"\n cfg.verbose = False\n return cfg\n\n\nclass NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n return decorate\n\n\ndef run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,\n env=None):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen([c] + args, cwd=cwd, env=env,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr\n else None))\n break\n except EnvironmentError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %%s\" %% dispcmd)\n print(e)\n return None, None\n else:\n if verbose:\n print(\"unable to find command, tried %%s\" %% (commands,))\n return None, None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %%s (error)\" %% dispcmd)\n print(\"stdout was %%s\" %% stdout)\n return None, p.returncode\n return stdout, p.returncode\n\n\ndef versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \"\"\"\n rootdirs = []\n\n for i in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parentdir_prefix):\n return {\"version\": dirname[len(parentdir_prefix):],\n \"full-revisionid\": None,\n \"dirty\": False, \"error\": None, \"date\": None}\n else:\n rootdirs.append(root)\n root = os.path.dirname(root) # up a level\n\n if verbose:\n print(\"Tried directories %%s but none started with prefix %%s\" %%\n (str(rootdirs), parentdir_prefix))\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n\n\n@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n if line.strip().startswith(\"git_date =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"date\"] = mo.group(1)\n f.close()\n except EnvironmentError:\n pass\n return keywords\n\n\n@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n date = keywords.get(\"date\")\n if date is not None:\n # git-2.2.0 added \"%%cI\", which expands to an ISO-8601 -compliant\n # datestamp. However we prefer \"%%ci\" (which expands to an \"ISO-8601\n # -like\" string, which we must then edit to make compliant), because\n # it's been around since git-1.5.3, and it's too difficult to\n # discover which version we're using, or to work around using an\n # older one.\n date = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = set([r.strip() for r in refnames.strip(\"()\").split(\",\")])\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %%d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = set([r for r in refs if re.search(r'\\d', r)])\n if verbose:\n print(\"discarding '%%s', no digits\" %% \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %%s\" %% \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix):]\n if verbose:\n print(\"picking %%s\" %% r)\n return {\"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": None,\n \"date\": date}\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False, \"error\": \"no suitable tags\", \"date\": None}\n\n\n@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n\n out, rc = run_command(GITS, [\"rev-parse\", \"--git-dir\"], cwd=root,\n hide_stderr=True)\n if rc != 0:\n if verbose:\n print(\"Directory %%s not under git control\" %% root)\n raise NotThisMethod(\"'git rev-parse --git-dir' returned error\")\n\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out, rc = run_command(GITS, [\"describe\", \"--tags\", \"--dirty\",\n \"--always\", \"--long\",\n \"--match\", \"%%s*\" %% tag_prefix],\n cwd=root)\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[:git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r'^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = (\"unable to parse git-describe output: '%%s'\"\n %% describe_out)\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%%s' doesn't start with prefix '%%s'\"\n print(fmt %% (full_tag, tag_prefix))\n pieces[\"error\"] = (\"tag '%%s' doesn't start with prefix '%%s'\"\n %% (full_tag, tag_prefix))\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix):]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out, rc = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"],\n cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n # commit date: see ISO-8601 comment in git_versions_from_keywords()\n date = run_command(GITS, [\"show\", \"-s\", \"--format=%%ci\", \"HEAD\"],\n cwd=root)[0].strip()\n pieces[\"date\"] = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n\n return pieces\n\n\ndef plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"\n\n\ndef render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%%d.g%%s\" %% (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%%d.g%%s\" %% (pieces[\"distance\"],\n pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n\ndef render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%%d\" %% pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%%d\" %% pieces[\"distance\"]\n return rendered\n\n\ndef render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%%s\" %% pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%%s\" %% pieces[\"short\"]\n return rendered\n\n\ndef render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%%d\" %% pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered\n\n\ndef render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%%d-g%%s\" %% (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%%d-g%%s\" %% (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n \"date\": None}\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%%s'\" %% style)\n\n return {\"version\": rendered, \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"], \"error\": None,\n \"date\": pieces.get(\"date\")}\n\n\ndef get_versions():\n \"\"\"Get version information or return default if unable to do so.\"\"\"\n # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have\n # __file__, we can work backwards from there to the root. Some\n # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which\n # case we can only use expanded keywords.\n\n cfg = get_config()\n verbose = cfg.verbose\n\n try:\n return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,\n verbose)\n except NotThisMethod:\n pass\n\n try:\n root = os.path.realpath(__file__)\n # versionfile_source is the relative path from the top of the source\n # tree (where the .git directory might live) to this file. Invert\n # this to find the root from __file__.\n for i in cfg.versionfile_source.split('/'):\n root = os.path.dirname(root)\n except NameError:\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to find root of source tree\",\n \"date\": None}\n\n try:\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\n return render(pieces, cfg.style)\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n except NotThisMethod:\n pass\n\n return {\"version\": \"0+unknown\", \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\", \"date\": None}\n'''\n\nSHORT_VERSION_PY = \"\"\"\n# This file was generated by 'versioneer.py' (0.18) from\n# revision-control system data, or from the parent directory name of an\n# unpacked source archive. Distribution tarballs contain a pre-generated copy\n# of this file.\n\nimport json\n\nversion_json = '''\n%s\n''' # END VERSION_JSON\n\n\ndef get_versions():\n return json.loads(version_json)\n\"\"\"\n\nCONFIG_ERROR = \"\"\"\nsetup.cfg is missing the necessary Versioneer configuration. You need\na section like:\n\n [versioneer]\n VCS = git\n style = pep440\n versionfile_source = src/myproject/_version.py\n versionfile_build = myproject/_version.py\n tag_prefix =\n parentdir_prefix = myproject-\n\nYou will also need to edit your setup.py to use the results:\n\n import versioneer\n setup(version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(), ...)\n\nPlease read the docstring in ./versioneer.py for configuration instructions,\nedit setup.cfg, and re-run the installer or 'python versioneer.py setup'.\n\"\"\"\n\nSAMPLE_CONFIG = \"\"\"\n# See the docstring in versioneer.py for instructions. Note that you must\n# re-run 'versioneer.py setup' after changing this section, and commit the\n# resulting files.\n\n[versioneer]\n#VCS = git\n#style = pep440\n#versionfile_source =\n#versionfile_build =\n#tag_prefix =\n#parentdir_prefix =\n\n\"\"\"\n\nINIT_PY_SNIPPET = \"\"\"\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n\"\"\"\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"setup\":\n errors = do_setup()\n errors += scan_setup_py()\n if errors:\n sys.exit(1)"},{"col":4,"comment":"\n Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : FacetGrid object\n\n ","endLoc":574,"header":"def map(self, func, *args, **kwargs)","id":1437,"name":"map","nodeType":"Function","startLoc":536,"text":"def map(self, func, *args, **kwargs):\n \"\"\"\n Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n plt = import_matplotlib_pyplot()\n\n for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):\n if namedict is not None:\n data = self.data.loc[namedict]\n plt.sca(ax)\n innerargs = [data[a].values for a in args]\n maybe_mappable = func(*innerargs, **kwargs)\n # TODO: better way to verify that an artist is mappable?\n # https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522\n if maybe_mappable and hasattr(maybe_mappable, \"autoscale_None\"):\n self._mappables.append(maybe_mappable)\n\n self._finalize_grid(*args[:2])\n\n return self"},{"col":0,"comment":"Indices of the advanced indexes subspaces for mixed indexing and vindex.\n ","endLoc":120,"header":"def _advanced_indexer_subspaces(key)","id":1438,"name":"_advanced_indexer_subspaces","nodeType":"Function","startLoc":100,"text":"def _advanced_indexer_subspaces(key):\n \"\"\"Indices of the advanced indexes subspaces for mixed indexing and vindex.\n \"\"\"\n if not isinstance(key, tuple):\n key = (key,)\n advanced_index_positions = [\n i for i, k in enumerate(key) if not isinstance(k, slice)\n ]\n\n if not advanced_index_positions or not _is_contiguous(advanced_index_positions):\n # Nothing to reorder: dimensions on the indexing result are already\n # ordered like vindex. See NumPy's rule for \"Combining advanced and\n # basic indexing\":\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#combining-advanced-and-basic-indexing\n return (), ()\n\n non_slices = [k for k in key if not isinstance(k, slice)]\n ndim = len(np.broadcast(*non_slices).shape)\n mixed_positions = advanced_index_positions[0] + np.arange(ndim)\n vindex_positions = np.arange(ndim)\n return mixed_positions, vindex_positions"},{"col":4,"comment":"null","endLoc":2634,"header":"def _rename_dims(self, name_dict)","id":1439,"name":"_rename_dims","nodeType":"Function","startLoc":2633,"text":"def _rename_dims(self, name_dict):\n return {name_dict.get(k, k): v for k, v in self.dims.items()}"},{"col":4,"comment":"null","endLoc":181,"header":"def time_load_dataset_netcdf4_with_time_chunks(self)","id":1441,"name":"time_load_dataset_netcdf4_with_time_chunks","nodeType":"Function","startLoc":180,"text":"def time_load_dataset_netcdf4_with_time_chunks(self):\n xr.open_dataset(self.filepath, engine=\"netcdf4\", chunks=self.time_chunks).load()"},{"col":0,"comment":"Given a non-empty list, does it consist of contiguous integers?","endLoc":97,"header":"def _is_contiguous(positions)","id":1442,"name":"_is_contiguous","nodeType":"Function","startLoc":90,"text":"def _is_contiguous(positions):\n \"\"\"Given a non-empty list, does it consist of contiguous integers?\"\"\"\n previous = positions[0]\n for current in positions[1:]:\n if current != previous + 1:\n return False\n previous = current\n return True"},{"attributeType":"null","col":8,"comment":"null","endLoc":202,"id":1444,"name":"_col_wrap","nodeType":"Attribute","startLoc":202,"text":"self._col_wrap"},{"attributeType":"null","col":8,"comment":"null","endLoc":188,"id":1446,"name":"data","nodeType":"Attribute","startLoc":188,"text":"self.data"},{"col":4,"comment":"Value must have dimensionality matching the key.","endLoc":140,"header":"def __setitem__(self, key, value)","id":1447,"name":"__setitem__","nodeType":"Function","startLoc":137,"text":"def __setitem__(self, key, value):\n \"\"\"Value must have dimensionality matching the key.\"\"\"\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)"},{"attributeType":"null","col":8,"comment":"null","endLoc":190,"id":1448,"name":"fig","nodeType":"Attribute","startLoc":190,"text":"self.fig"},{"attributeType":"null","col":8,"comment":"null","endLoc":197,"id":1449,"name":"_single_group","nodeType":"Attribute","startLoc":197,"text":"self._single_group"},{"attributeType":"null","col":8,"comment":"null","endLoc":131,"id":1450,"name":"_array","nodeType":"Attribute","startLoc":131,"text":"self._array"},{"attributeType":"null","col":8,"comment":"null","endLoc":198,"id":1451,"name":"_nrow","nodeType":"Attribute","startLoc":198,"text":"self._nrow"},{"attributeType":"null","col":8,"comment":"null","endLoc":203,"id":1452,"name":"_x_var","nodeType":"Attribute","startLoc":203,"text":"self._x_var"},{"col":0,"comment":"null","endLoc":28,"header":"def _select_along_axis(values, idx, axis)","id":1453,"name":"_select_along_axis","nodeType":"Function","startLoc":25,"text":"def _select_along_axis(values, idx, axis):\n other_ind = np.ix_(*[np.arange(s) for s in idx.shape])\n sl = other_ind[:axis] + (idx,) + other_ind[axis:]\n return values[sl]"},{"attributeType":"null","col":8,"comment":"null","endLoc":306,"id":1454,"name":"_hue_label","nodeType":"Attribute","startLoc":306,"text":"self._hue_label"},{"col":0,"comment":"null","endLoc":34,"header":"def nanfirst(values, axis)","id":1455,"name":"nanfirst","nodeType":"Function","startLoc":31,"text":"def nanfirst(values, axis):\n axis = _validate_axis(values, axis)\n idx_first = np.argmax(~pd.isnull(values), axis=axis)\n return _select_along_axis(values, idx_first, axis)"},{"attributeType":"null","col":8,"comment":"null","endLoc":191,"id":1456,"name":"axes","nodeType":"Attribute","startLoc":191,"text":"self.axes"},{"attributeType":"null","col":8,"comment":"null","endLoc":201,"id":1457,"name":"_col_var","nodeType":"Attribute","startLoc":201,"text":"self._col_var"},{"col":0,"comment":"null","endLoc":41,"header":"def nanlast(values, axis)","id":1458,"name":"nanlast","nodeType":"Function","startLoc":37,"text":"def nanlast(values, axis):\n axis = _validate_axis(values, axis)\n rev = (slice(None),) * axis + (slice(None, None, -1),)\n idx_last = -1 - np.argmax(~pd.isnull(values)[rev], axis=axis)\n return _select_along_axis(values, idx_last, axis)"},{"col":4,"comment":"null","endLoc":187,"header":"def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self)","id":1459,"name":"time_load_dataset_netcdf4_with_time_chunks_multiprocessing","nodeType":"Function","startLoc":183,"text":"def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()"},{"attributeType":"null","col":8,"comment":"null","endLoc":189,"id":1460,"name":"name_dicts","nodeType":"Attribute","startLoc":189,"text":"self.name_dicts"},{"attributeType":"null","col":8,"comment":"null","endLoc":205,"id":1461,"name":"_cmap_extend","nodeType":"Attribute","startLoc":205,"text":"self._cmap_extend"},{"attributeType":"null","col":8,"comment":"null","endLoc":204,"id":1462,"name":"_y_var","nodeType":"Attribute","startLoc":204,"text":"self._y_var"},{"col":0,"comment":"\n multi-dimensional interpolation for array-like. Interpolated axes should be\n located in the last position.\n\n Parameters\n ----------\n var: np.ndarray or dask.array.Array\n Array to be interpolated. The final dimension is interpolated.\n x: a list of 1d array.\n Original coordinates. Should not contain NaN.\n new_x: a list of 1d array\n New coordinates. Should not contain NaN.\n method: string\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for\n 1-dimensional itnterpolation.\n {'linear', 'nearest'} for multidimensional interpolation\n **kwargs:\n Optional keyword arguments to be passed to scipy.interpolator\n\n Returns\n -------\n interpolated: array\n Interpolated array\n\n Note\n ----\n This requiers scipy installed.\n\n See Also\n --------\n scipy.interpolate.interp1d\n ","endLoc":599,"header":"def interp_func(var, x, new_x, method, kwargs)","id":1463,"name":"interp_func","nodeType":"Function","startLoc":538,"text":"def interp_func(var, x, new_x, method, kwargs):\n \"\"\"\n multi-dimensional interpolation for array-like. Interpolated axes should be\n located in the last position.\n\n Parameters\n ----------\n var: np.ndarray or dask.array.Array\n Array to be interpolated. The final dimension is interpolated.\n x: a list of 1d array.\n Original coordinates. Should not contain NaN.\n new_x: a list of 1d array\n New coordinates. Should not contain NaN.\n method: string\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for\n 1-dimensional itnterpolation.\n {'linear', 'nearest'} for multidimensional interpolation\n **kwargs:\n Optional keyword arguments to be passed to scipy.interpolator\n\n Returns\n -------\n interpolated: array\n Interpolated array\n\n Note\n ----\n This requiers scipy installed.\n\n See Also\n --------\n scipy.interpolate.interp1d\n \"\"\"\n if not x:\n return var.copy()\n\n if len(x) == 1:\n func, kwargs = _get_interpolator(method, vectorizeable_only=True, **kwargs)\n else:\n func, kwargs = _get_interpolator_nd(method, **kwargs)\n\n if isinstance(var, dask_array_type):\n import dask.array as da\n\n _assert_single_chunk(var, range(var.ndim - len(x), var.ndim))\n chunks = var.chunks[: -len(x)] + new_x[0].shape\n drop_axis = range(var.ndim - len(x), var.ndim)\n new_axis = range(var.ndim - len(x), var.ndim - len(x) + new_x[0].ndim)\n return da.map_blocks(\n _interpnd,\n var,\n x,\n new_x,\n func,\n kwargs,\n dtype=var.dtype,\n chunks=chunks,\n new_axis=new_axis,\n drop_axis=drop_axis,\n )\n\n return _interpnd(var, x, new_x, func, kwargs)"},{"col":0,"comment":"null","endLoc":75,"header":"def _ensure_bool_is_ndarray(result, *args)","id":1464,"name":"_ensure_bool_is_ndarray","nodeType":"Function","startLoc":64,"text":"def _ensure_bool_is_ndarray(result, *args):\n # numpy will sometimes return a scalar value from binary comparisons if it\n # can't handle the comparison instead of broadcasting, e.g.,\n # In [10]: 1 == np.array(['a', 'b'])\n # Out[10]: False\n # This function ensures that the result is the appropriate shape in these\n # cases\n if isinstance(result, bool):\n shape = np.broadcast(*args).shape\n constructor = np.ones if result else np.zeros\n result = constructor(shape, dtype=bool)\n return result"},{"attributeType":"null","col":8,"comment":"null","endLoc":207,"id":1465,"name":"_finalized","nodeType":"Attribute","startLoc":207,"text":"self._finalized"},{"attributeType":"null","col":8,"comment":"null","endLoc":206,"id":1466,"name":"_mappables","nodeType":"Attribute","startLoc":206,"text":"self._mappables"},{"col":4,"comment":"null","endLoc":2656,"header":"def _rename_indexes(self, name_dict, dims_set)","id":1467,"name":"_rename_indexes","nodeType":"Function","startLoc":2636,"text":"def _rename_indexes(self, name_dict, dims_set):\n if self._indexes is None:\n return None\n indexes = {}\n for k, v in self.indexes.items():\n new_name = name_dict.get(k, k)\n if new_name not in dims_set:\n continue\n if isinstance(v, pd.MultiIndex):\n new_names = [name_dict.get(k, k) for k in v.names]\n index = pd.MultiIndex(\n v.levels,\n v.labels,\n v.sortorder,\n names=new_names,\n verify_integrity=False,\n )\n else:\n index = pd.Index(v, name=new_name)\n indexes[new_name] = index\n return indexes"},{"attributeType":"null","col":8,"comment":"null","endLoc":192,"id":1468,"name":"row_names","nodeType":"Attribute","startLoc":192,"text":"self.row_names"},{"col":0,"comment":"null","endLoc":81,"header":"def array_eq(self, other)","id":1469,"name":"array_eq","nodeType":"Function","startLoc":78,"text":"def array_eq(self, other):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", r\"elementwise comparison failed\")\n return _ensure_bool_is_ndarray(self == other, self, other)"},{"attributeType":"null","col":8,"comment":"null","endLoc":199,"id":1470,"name":"_row_var","nodeType":"Attribute","startLoc":199,"text":"self._row_var"},{"col":0,"comment":"null","endLoc":87,"header":"def array_ne(self, other)","id":1471,"name":"array_ne","nodeType":"Function","startLoc":84,"text":"def array_ne(self, other):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", r\"elementwise comparison failed\")\n return _ensure_bool_is_ndarray(self != other, self, other)"},{"attributeType":"null","col":8,"comment":"null","endLoc":193,"id":1472,"name":"col_names","nodeType":"Attribute","startLoc":193,"text":"self.col_names"},{"col":0,"comment":" rolling window with padding. ","endLoc":153,"header":"def rolling_window(a, axis, window, center, fill_value)","id":1473,"name":"rolling_window","nodeType":"Function","startLoc":143,"text":"def rolling_window(a, axis, window, center, fill_value):\n \"\"\" rolling window with padding. \"\"\"\n pads = [(0, 0) for s in a.shape]\n if center:\n start = int(window / 2) # 10 -> 5, 9 -> 4\n end = window - 1 - start\n pads[axis] = (start, end)\n else:\n pads[axis] = (window - 1, 0)\n a = np.pad(a, pads, mode=\"constant\", constant_values=fill_value)\n return _rolling_window(a, window, axis)"},{"attributeType":"null","col":8,"comment":"null","endLoc":305,"id":1474,"name":"_hue_var","nodeType":"Attribute","startLoc":305,"text":"self._hue_var"},{"attributeType":"null","col":8,"comment":"null","endLoc":194,"id":1475,"name":"figlegend","nodeType":"Attribute","startLoc":194,"text":"self.figlegend"},{"attributeType":"null","col":8,"comment":"null","endLoc":157,"id":1476,"name":"filepath","nodeType":"Attribute","startLoc":157,"text":"self.filepath"},{"fileName":"accessor_dt.py","filePath":"xarray/core","id":1477,"nodeType":"File","text":"import numpy as np\nimport pandas as pd\n\nfrom .common import _contains_datetime_like_objects, is_np_datetime_like\nfrom .pycompat import dask_array_type\n\n\ndef _season_from_months(months):\n \"\"\"Compute season (DJF, MAM, JJA, SON) from month ordinal\n \"\"\"\n # TODO: Move \"season\" accessor upstream into pandas\n seasons = np.array([\"DJF\", \"MAM\", \"JJA\", \"SON\"])\n months = np.asarray(months)\n return seasons[(months // 3) % 4]\n\n\ndef _access_through_cftimeindex(values, name):\n \"\"\"Coerce an array of datetime-like values to a CFTimeIndex\n and access requested datetime component\n \"\"\"\n from ..coding.cftimeindex import CFTimeIndex\n\n values_as_cftimeindex = CFTimeIndex(values.ravel())\n if name == \"season\":\n months = values_as_cftimeindex.month\n field_values = _season_from_months(months)\n else:\n field_values = getattr(values_as_cftimeindex, name)\n return field_values.reshape(values.shape)\n\n\ndef _access_through_series(values, name):\n \"\"\"Coerce an array of datetime-like values to a pandas Series and\n access requested datetime component\n \"\"\"\n values_as_series = pd.Series(values.ravel())\n if name == \"season\":\n months = values_as_series.dt.month.values\n field_values = _season_from_months(months)\n else:\n field_values = getattr(values_as_series.dt, name).values\n return field_values.reshape(values.shape)\n\n\ndef _get_date_field(values, name, dtype):\n \"\"\"Indirectly access pandas' libts.get_date_field by wrapping data\n as a Series and calling through `.dt` attribute.\n\n Parameters\n ----------\n values : np.ndarray or dask.array-like\n Array-like container of datetime-like values\n name : str\n Name of datetime field to access\n dtype : dtype-like\n dtype for output date field values\n\n Returns\n -------\n datetime_fields : same type as values\n Array-like of datetime fields accessed for each element in values\n\n \"\"\"\n if is_np_datetime_like(values.dtype):\n access_method = _access_through_series\n else:\n access_method = _access_through_cftimeindex\n\n if isinstance(values, dask_array_type):\n from dask.array import map_blocks\n\n return map_blocks(access_method, values, name, dtype=dtype)\n else:\n return access_method(values, name)\n\n\ndef _round_series(values, name, freq):\n \"\"\"Coerce an array of datetime-like values to a pandas Series and\n apply requested rounding\n \"\"\"\n values_as_series = pd.Series(values.ravel())\n method = getattr(values_as_series.dt, name)\n field_values = method(freq=freq).values\n\n return field_values.reshape(values.shape)\n\n\ndef _round_field(values, name, freq):\n \"\"\"Indirectly access pandas rounding functions by wrapping data\n as a Series and calling through `.dt` attribute.\n\n Parameters\n ----------\n values : np.ndarray or dask.array-like\n Array-like container of datetime-like values\n name : str (ceil, floor, round)\n Name of rounding function\n freq : a freq string indicating the rounding resolution\n\n Returns\n -------\n rounded timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n\n \"\"\"\n if isinstance(values, dask_array_type):\n from dask.array import map_blocks\n\n return map_blocks(_round_series, values, name, freq=freq, dtype=np.datetime64)\n else:\n return _round_series(values, name, freq)\n\n\ndef _strftime_through_cftimeindex(values, date_format):\n \"\"\"Coerce an array of cftime-like values to a CFTimeIndex\n and access requested datetime component\n \"\"\"\n from ..coding.cftimeindex import CFTimeIndex\n\n values_as_cftimeindex = CFTimeIndex(values.ravel())\n\n field_values = values_as_cftimeindex.strftime(date_format)\n return field_values.values.reshape(values.shape)\n\n\ndef _strftime_through_series(values, date_format):\n \"\"\"Coerce an array of datetime-like values to a pandas Series and\n apply string formatting\n \"\"\"\n values_as_series = pd.Series(values.ravel())\n strs = values_as_series.dt.strftime(date_format)\n return strs.values.reshape(values.shape)\n\n\ndef _strftime(values, date_format):\n if is_np_datetime_like(values.dtype):\n access_method = _strftime_through_series\n else:\n access_method = _strftime_through_cftimeindex\n if isinstance(values, dask_array_type):\n from dask.array import map_blocks\n\n return map_blocks(access_method, values, date_format)\n else:\n return access_method(values, date_format)\n\n\nclass DatetimeAccessor:\n \"\"\"Access datetime fields for DataArrays with datetime-like dtypes.\n\n Similar to pandas, fields can be accessed through the `.dt` attribute\n for applicable DataArrays:\n\n >>> ds = xarray.Dataset({'time': pd.date_range(start='2000/01/01',\n ... freq='D', periods=100)})\n >>> ds.time.dt\n \n >>> ds.time.dt.dayofyear[:5]\n \n array([1, 2, 3, 4, 5], dtype=int32)\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n\n All of the pandas fields are accessible here. Note that these fields are\n not calendar-aware; if your datetimes are encoded with a non-Gregorian\n calendar (e.g. a 360-day calendar) using cftime, then some fields like\n `dayofyear` may not be accurate.\n\n \"\"\"\n\n def __init__(self, obj):\n if not _contains_datetime_like_objects(obj):\n raise TypeError(\n \"'dt' accessor only available for \"\n \"DataArray with datetime64 timedelta64 dtype or \"\n \"for arrays containing cftime datetime \"\n \"objects.\"\n )\n self._obj = obj\n\n def _tslib_field_accessor(name, docstring=None, dtype=None):\n def f(self, dtype=dtype):\n if dtype is None:\n dtype = self._obj.dtype\n obj_type = type(self._obj)\n result = _get_date_field(self._obj.data, name, dtype)\n return obj_type(\n result, name=name, coords=self._obj.coords, dims=self._obj.dims\n )\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)\n\n year = _tslib_field_accessor(\"year\", \"The year of the datetime\", np.int64)\n month = _tslib_field_accessor(\n \"month\", \"The month as January=1, December=12\", np.int64\n )\n day = _tslib_field_accessor(\"day\", \"The days of the datetime\", np.int64)\n hour = _tslib_field_accessor(\"hour\", \"The hours of the datetime\", np.int64)\n minute = _tslib_field_accessor(\"minute\", \"The minutes of the datetime\", np.int64)\n second = _tslib_field_accessor(\"second\", \"The seconds of the datetime\", np.int64)\n microsecond = _tslib_field_accessor(\n \"microsecond\", \"The microseconds of the datetime\", np.int64\n )\n nanosecond = _tslib_field_accessor(\n \"nanosecond\", \"The nanoseconds of the datetime\", np.int64\n )\n weekofyear = _tslib_field_accessor(\n \"weekofyear\", \"The week ordinal of the year\", np.int64\n )\n week = weekofyear\n dayofweek = _tslib_field_accessor(\n \"dayofweek\", \"The day of the week with Monday=0, Sunday=6\", np.int64\n )\n weekday = dayofweek\n\n weekday_name = _tslib_field_accessor(\n \"weekday_name\", \"The name of day in a week (ex: Friday)\", object\n )\n\n dayofyear = _tslib_field_accessor(\n \"dayofyear\", \"The ordinal day of the year\", np.int64\n )\n quarter = _tslib_field_accessor(\"quarter\", \"The quarter of the date\")\n days_in_month = _tslib_field_accessor(\n \"days_in_month\", \"The number of days in the month\", np.int64\n )\n daysinmonth = days_in_month\n\n season = _tslib_field_accessor(\"season\", \"Season of the year (ex: DJF)\", object)\n\n time = _tslib_field_accessor(\n \"time\", \"Timestamps corresponding to datetimes\", object\n )\n\n def _tslib_round_accessor(self, name, freq):\n obj_type = type(self._obj)\n result = _round_field(self._obj.data, name, freq)\n return obj_type(result, name=name, coords=self._obj.coords, dims=self._obj.dims)\n\n def floor(self, freq):\n \"\"\"\n Round timestamps downward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n floor-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n\n return self._tslib_round_accessor(\"floor\", freq)\n\n def ceil(self, freq):\n \"\"\"\n Round timestamps upward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n ceil-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n return self._tslib_round_accessor(\"ceil\", freq)\n\n def round(self, freq):\n \"\"\"\n Round timestamps to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n rounded timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n return self._tslib_round_accessor(\"round\", freq)\n\n def strftime(self, date_format):\n '''\n Return an array of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n formatted strings : same type as values\n Array-like of strings formatted for each element in values\n\n Examples\n --------\n >>> rng = xr.Dataset({'time': datetime.datetime(2000, 1, 1)})\n >>> rng['time'].dt.strftime('%B %d, %Y, %r')\n \n array('January 01, 2000, 12:00:00 AM', dtype=object)\n \"\"\"\n\n '''\n obj_type = type(self._obj)\n\n result = _strftime(self._obj.data, date_format)\n\n return obj_type(\n result, name=\"strftime\", coords=self._obj.coords, dims=self._obj.dims\n )\n"},{"col":0,"comment":"null","endLoc":228,"header":"def _create_bottleneck_method(name, npmodule=np)","id":1478,"name":"_create_bottleneck_method","nodeType":"Function","startLoc":205,"text":"def _create_bottleneck_method(name, npmodule=np):\n def f(values, axis=None, **kwargs):\n dtype = kwargs.get(\"dtype\", None)\n bn_func = getattr(bn, name, None)\n\n if (\n _USE_BOTTLENECK\n and isinstance(values, np.ndarray)\n and bn_func is not None\n and not isinstance(axis, tuple)\n and values.dtype.kind in \"uifc\"\n and values.dtype.isnative\n and (dtype is None or np.dtype(dtype) == values.dtype)\n ):\n # bottleneck does not take care dtype, min_count\n kwargs.pop(\"dtype\", None)\n result = bn_func(values, axis=axis, **kwargs)\n else:\n result = getattr(npmodule, name)(values, axis=axis, **kwargs)\n\n return result\n\n f.__name__ = name\n return f"},{"attributeType":"null","col":8,"comment":"null","endLoc":158,"id":1479,"name":"format","nodeType":"Attribute","startLoc":158,"text":"self.format"},{"attributeType":"null","col":8,"comment":"null","endLoc":412,"id":1480,"name":"cbar","nodeType":"Attribute","startLoc":412,"text":"self.cbar"},{"className":"IOReadSingleNetCDF3Dask","col":0,"comment":"null","endLoc":219,"id":1481,"nodeType":"Class","startLoc":190,"text":"class IOReadSingleNetCDF3Dask(IOReadSingleNetCDF4Dask):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc3.nc\"\n self.format = \"NETCDF3_64BIT\"\n self.ds.to_netcdf(self.filepath, format=self.format)\n\n def time_load_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"scipy\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_scipy_with_block_chunks_oindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\", chunks=self.block_chunks)\n ds = ds.isel(**self.oinds).load()\n\n def time_load_dataset_scipy_with_block_chunks_vindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\", chunks=self.block_chunks)\n ds = ds.isel(**self.vinds).load()\n\n def time_load_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"scipy\", chunks=self.time_chunks\n ).load()"},{"col":0,"comment":"helper function to select the appropriate interpolator class\n\n returns interpolator class and keyword arguments for the class\n ","endLoc":392,"header":"def _get_interpolator(method, vectorizeable_only=False, **kwargs)","id":1482,"name":"_get_interpolator","nodeType":"Function","startLoc":327,"text":"def _get_interpolator(method, vectorizeable_only=False, **kwargs):\n \"\"\"helper function to select the appropriate interpolator class\n\n returns interpolator class and keyword arguments for the class\n \"\"\"\n interp1d_methods = [\n \"linear\",\n \"nearest\",\n \"zero\",\n \"slinear\",\n \"quadratic\",\n \"cubic\",\n \"polynomial\",\n ]\n valid_methods = interp1d_methods + [\n \"barycentric\",\n \"krog\",\n \"pchip\",\n \"spline\",\n \"akima\",\n ]\n\n has_scipy = True\n try:\n from scipy import interpolate\n except ImportError:\n has_scipy = False\n\n # prioritize scipy.interpolate\n if (\n method == \"linear\"\n and not kwargs.get(\"fill_value\", None) == \"extrapolate\"\n and not vectorizeable_only\n ):\n kwargs.update(method=method)\n interp_class = NumpyInterpolator\n\n elif method in valid_methods:\n if not has_scipy:\n raise ImportError(\"Interpolation with method `%s` requires scipy\" % method)\n\n if method in interp1d_methods:\n kwargs.update(method=method)\n interp_class = ScipyInterpolator\n elif vectorizeable_only:\n raise ValueError(\n \"{} is not a vectorizeable interpolator. \"\n \"Available methods are {}\".format(method, interp1d_methods)\n )\n elif method == \"barycentric\":\n interp_class = interpolate.BarycentricInterpolator\n elif method == \"krog\":\n interp_class = interpolate.KroghInterpolator\n elif method == \"pchip\":\n interp_class = interpolate.PchipInterpolator\n elif method == \"spline\":\n kwargs.update(method=method)\n interp_class = SplineInterpolator\n elif method == \"akima\":\n interp_class = interpolate.Akima1DInterpolator\n else:\n raise ValueError(\"%s is not a valid scipy interpolator\" % method)\n else:\n raise ValueError(\"%s is not a valid interpolator\" % method)\n\n return interp_class, kwargs"},{"col":4,"comment":"null","endLoc":199,"header":"def setup(self)","id":1483,"name":"setup","nodeType":"Function","startLoc":191,"text":"def setup(self):\n\n requires_dask()\n\n self.make_ds()\n\n self.filepath = \"test_single_file.nc3.nc\"\n self.format = \"NETCDF3_64BIT\"\n self.ds.to_netcdf(self.filepath, format=self.format)"},{"attributeType":"null","col":8,"comment":"null","endLoc":200,"id":1484,"name":"_ncol","nodeType":"Attribute","startLoc":200,"text":"self._ncol"},{"col":0,"comment":"\n Contour plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.contour`\n ","endLoc":915,"header":"@_plot2d\ndef contour(x, y, z, ax, **kwargs)","id":1485,"name":"contour","nodeType":"Function","startLoc":907,"text":"@_plot2d\ndef contour(x, y, z, ax, **kwargs):\n \"\"\"\n Contour plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.contour`\n \"\"\"\n primitive = ax.contour(x, y, z, **kwargs)\n return primitive"},{"col":0,"comment":"\n Filled contour plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.contourf`\n ","endLoc":926,"header":"@_plot2d\ndef contourf(x, y, z, ax, **kwargs)","id":1486,"name":"contourf","nodeType":"Function","startLoc":918,"text":"@_plot2d\ndef contourf(x, y, z, ax, **kwargs):\n \"\"\"\n Filled contour plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.contourf`\n \"\"\"\n primitive = ax.contourf(x, y, z, **kwargs)\n return primitive"},{"col":0,"comment":"\n Histogram of DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.hist`\n\n Plots N dimensional arrays by first flattening the array.\n\n Parameters\n ----------\n darray : DataArray\n Can be any dimension\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n **kwargs : optional\n Additional keyword arguments to matplotlib.pyplot.hist\n\n ","endLoc":448,"header":"def hist(\n darray,\n figsize=None,\n size=None,\n aspect=None,\n ax=None,\n xincrease=None,\n yincrease=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n **kwargs\n)","id":1487,"name":"hist","nodeType":"Function","startLoc":393,"text":"def hist(\n darray,\n figsize=None,\n size=None,\n aspect=None,\n ax=None,\n xincrease=None,\n yincrease=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n **kwargs\n):\n \"\"\"\n Histogram of DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.hist`\n\n Plots N dimensional arrays by first flattening the array.\n\n Parameters\n ----------\n darray : DataArray\n Can be any dimension\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n **kwargs : optional\n Additional keyword arguments to matplotlib.pyplot.hist\n\n \"\"\"\n ax = get_axis(figsize, size, aspect, ax)\n\n no_nan = np.ravel(darray.values)\n no_nan = no_nan[pd.notnull(no_nan)]\n\n primitive = ax.hist(no_nan, **kwargs)\n\n ax.set_title(\"Histogram\")\n ax.set_xlabel(label_from_attrs(darray))\n\n _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)\n\n return primitive"},{"col":4,"comment":"Returns a new DataArray with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names. Each value must already be a coordinate on this\n array.\n\n Returns\n -------\n swapped : DataArray\n DataArray with swapped dimensions.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=[0, 1], dims=\"x\",\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> arr\n \n array([0, 1])\n Coordinates:\n * x (x) >> arr.swap_dims({\"x\": \"y\"})\n \n array([0, 1])\n Coordinates:\n x (y) \"DataArray\"","id":1488,"name":"swap_dims","nodeType":"Function","startLoc":1449,"text":"def swap_dims(self, dims_dict: Mapping[Hashable, Hashable]) -> \"DataArray\":\n \"\"\"Returns a new DataArray with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names. Each value must already be a coordinate on this\n array.\n\n Returns\n -------\n swapped : DataArray\n DataArray with swapped dimensions.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=[0, 1], dims=\"x\",\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> arr\n \n array([0, 1])\n Coordinates:\n * x (x) >> arr.swap_dims({\"x\": \"y\"})\n \n array([0, 1])\n Coordinates:\n x (y) >> ds = xr.Dataset(data_vars={\"a\": (\"x\", [5, 7]), \"b\": (\"x\", [0.1, 2.4])},\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> ds\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) >> ds.swap_dims({\"x\": \"y\"})\n \n Dimensions: (y: 2)\n Coordinates:\n x (y) \"Dataset\"","id":1490,"name":"swap_dims","nodeType":"Function","startLoc":2784,"text":"def swap_dims(\n self, dims_dict: Mapping[Hashable, Hashable], inplace: bool = None\n ) -> \"Dataset\":\n \"\"\"Returns a new object with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names. Each value must already be a variable in the\n dataset.\n\n Returns\n -------\n swapped : Dataset\n Dataset with swapped dimensions.\n\n Examples\n --------\n >>> ds = xr.Dataset(data_vars={\"a\": (\"x\", [5, 7]), \"b\": (\"x\", [0.1, 2.4])},\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> ds\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) >> ds.swap_dims({\"x\": \"y\"})\n \n Dimensions: (y: 2)\n Coordinates:\n x (y) >> ds = xarray.Dataset({'time': pd.date_range(start='2000/01/01',\n ... freq='D', periods=100)})\n >>> ds.time.dt\n \n >>> ds.time.dt.dayofyear[:5]\n \n array([1, 2, 3, 4, 5], dtype=int32)\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n\n All of the pandas fields are accessible here. Note that these fields are\n not calendar-aware; if your datetimes are encoded with a non-Gregorian\n calendar (e.g. a 360-day calendar) using cftime, then some fields like\n `dayofyear` may not be accurate.\n\n ","endLoc":323,"id":1495,"nodeType":"Class","startLoc":148,"text":"class DatetimeAccessor:\n \"\"\"Access datetime fields for DataArrays with datetime-like dtypes.\n\n Similar to pandas, fields can be accessed through the `.dt` attribute\n for applicable DataArrays:\n\n >>> ds = xarray.Dataset({'time': pd.date_range(start='2000/01/01',\n ... freq='D', periods=100)})\n >>> ds.time.dt\n \n >>> ds.time.dt.dayofyear[:5]\n \n array([1, 2, 3, 4, 5], dtype=int32)\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n\n All of the pandas fields are accessible here. Note that these fields are\n not calendar-aware; if your datetimes are encoded with a non-Gregorian\n calendar (e.g. a 360-day calendar) using cftime, then some fields like\n `dayofyear` may not be accurate.\n\n \"\"\"\n\n def __init__(self, obj):\n if not _contains_datetime_like_objects(obj):\n raise TypeError(\n \"'dt' accessor only available for \"\n \"DataArray with datetime64 timedelta64 dtype or \"\n \"for arrays containing cftime datetime \"\n \"objects.\"\n )\n self._obj = obj\n\n def _tslib_field_accessor(name, docstring=None, dtype=None):\n def f(self, dtype=dtype):\n if dtype is None:\n dtype = self._obj.dtype\n obj_type = type(self._obj)\n result = _get_date_field(self._obj.data, name, dtype)\n return obj_type(\n result, name=name, coords=self._obj.coords, dims=self._obj.dims\n )\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)\n\n year = _tslib_field_accessor(\"year\", \"The year of the datetime\", np.int64)\n month = _tslib_field_accessor(\n \"month\", \"The month as January=1, December=12\", np.int64\n )\n day = _tslib_field_accessor(\"day\", \"The days of the datetime\", np.int64)\n hour = _tslib_field_accessor(\"hour\", \"The hours of the datetime\", np.int64)\n minute = _tslib_field_accessor(\"minute\", \"The minutes of the datetime\", np.int64)\n second = _tslib_field_accessor(\"second\", \"The seconds of the datetime\", np.int64)\n microsecond = _tslib_field_accessor(\n \"microsecond\", \"The microseconds of the datetime\", np.int64\n )\n nanosecond = _tslib_field_accessor(\n \"nanosecond\", \"The nanoseconds of the datetime\", np.int64\n )\n weekofyear = _tslib_field_accessor(\n \"weekofyear\", \"The week ordinal of the year\", np.int64\n )\n week = weekofyear\n dayofweek = _tslib_field_accessor(\n \"dayofweek\", \"The day of the week with Monday=0, Sunday=6\", np.int64\n )\n weekday = dayofweek\n\n weekday_name = _tslib_field_accessor(\n \"weekday_name\", \"The name of day in a week (ex: Friday)\", object\n )\n\n dayofyear = _tslib_field_accessor(\n \"dayofyear\", \"The ordinal day of the year\", np.int64\n )\n quarter = _tslib_field_accessor(\"quarter\", \"The quarter of the date\")\n days_in_month = _tslib_field_accessor(\n \"days_in_month\", \"The number of days in the month\", np.int64\n )\n daysinmonth = days_in_month\n\n season = _tslib_field_accessor(\"season\", \"Season of the year (ex: DJF)\", object)\n\n time = _tslib_field_accessor(\n \"time\", \"Timestamps corresponding to datetimes\", object\n )\n\n def _tslib_round_accessor(self, name, freq):\n obj_type = type(self._obj)\n result = _round_field(self._obj.data, name, freq)\n return obj_type(result, name=name, coords=self._obj.coords, dims=self._obj.dims)\n\n def floor(self, freq):\n \"\"\"\n Round timestamps downward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n floor-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n\n return self._tslib_round_accessor(\"floor\", freq)\n\n def ceil(self, freq):\n \"\"\"\n Round timestamps upward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n ceil-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n return self._tslib_round_accessor(\"ceil\", freq)\n\n def round(self, freq):\n \"\"\"\n Round timestamps to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n rounded timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n return self._tslib_round_accessor(\"round\", freq)\n\n def strftime(self, date_format):\n '''\n Return an array of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n formatted strings : same type as values\n Array-like of strings formatted for each element in values\n\n Examples\n --------\n >>> rng = xr.Dataset({'time': datetime.datetime(2000, 1, 1)})\n >>> rng['time'].dt.strftime('%B %d, %Y, %r')\n \n array('January 01, 2000, 12:00:00 AM', dtype=object)\n \"\"\"\n\n '''\n obj_type = type(self._obj)\n\n result = _strftime(self._obj.data, date_format)\n\n return obj_type(\n result, name=\"strftime\", coords=self._obj.coords, dims=self._obj.dims\n )"},{"col":0,"comment":"null","endLoc":439,"header":"def _assert_single_chunk(var, axes)","id":1496,"name":"_assert_single_chunk","nodeType":"Function","startLoc":433,"text":"def _assert_single_chunk(var, axes):\n for axis in axes:\n if len(var.chunks[axis]) > 1 or var.chunks[axis][0] < var.shape[axis]:\n raise NotImplementedError(\n \"Chunking along the dimension to be interpolated \"\n \"({}) is not yet supported.\".format(axis)\n )"},{"col":4,"comment":"null","endLoc":179,"header":"def __init__(self, obj)","id":1497,"name":"__init__","nodeType":"Function","startLoc":171,"text":"def __init__(self, obj):\n if not _contains_datetime_like_objects(obj):\n raise TypeError(\n \"'dt' accessor only available for \"\n \"DataArray with datetime64 timedelta64 dtype or \"\n \"for arrays containing cftime datetime \"\n \"objects.\"\n )\n self._obj = obj"},{"attributeType":"function","col":0,"comment":"null","endLoc":232,"id":1498,"name":"nanmax","nodeType":"Attribute","startLoc":232,"text":"nanmax"},{"attributeType":"function","col":0,"comment":"null","endLoc":233,"id":1499,"name":"nanmean","nodeType":"Attribute","startLoc":233,"text":"nanmean"},{"col":0,"comment":"null","endLoc":626,"header":"def _interpnd(var, x, new_x, func, kwargs)","id":1500,"name":"_interpnd","nodeType":"Function","startLoc":613,"text":"def _interpnd(var, x, new_x, func, kwargs):\n x, new_x = _floatize_x(x, new_x)\n\n if len(x) == 1:\n return _interp1d(var, x, new_x, func, kwargs)\n\n # move the interpolation axes to the start position\n var = var.transpose(range(-len(x), var.ndim - len(x)))\n # stack new_x to 1 vector, with reshape\n xi = np.stack([x1.values.ravel() for x1 in new_x], axis=-1)\n rslt = func(x, var, xi, **kwargs)\n # move back the interpolation axes to the last position\n rslt = rslt.transpose(range(-rslt.ndim + 1, 1))\n return rslt.reshape(rslt.shape[:-1] + new_x[0].shape)"},{"col":0,"comment":" Make x and new_x float.\n This is particulary useful for datetime dtype.\n x, new_x: tuple of np.ndarray\n ","endLoc":474,"header":"def _floatize_x(x, new_x)","id":1501,"name":"_floatize_x","nodeType":"Function","startLoc":457,"text":"def _floatize_x(x, new_x):\n \"\"\" Make x and new_x float.\n This is particulary useful for datetime dtype.\n x, new_x: tuple of np.ndarray\n \"\"\"\n x = list(x)\n new_x = list(new_x)\n for i in range(len(x)):\n if _contains_datetime_like_objects(x[i]):\n # Scipy casts coordinates to np.float64, which is not accurate\n # enough for datetime64 (uses 64bit integer).\n # We assume that the most of the bits are used to represent the\n # offset (min(x)) and the variation (x - min(x)) can be\n # represented by float.\n xmin = x[i].values.min()\n x[i] = x[i]._to_numeric(offset=xmin, dtype=np.float64)\n new_x[i] = new_x[i]._to_numeric(offset=xmin, dtype=np.float64)\n return x, new_x"},{"attributeType":"function","col":0,"comment":"null","endLoc":234,"id":1502,"name":"nanmedian","nodeType":"Attribute","startLoc":234,"text":"nanmedian"},{"attributeType":"function","col":0,"comment":"null","endLoc":235,"id":1503,"name":"nanvar","nodeType":"Attribute","startLoc":235,"text":"nanvar"},{"col":4,"comment":"null","endLoc":209,"header":"def time_load_dataset_scipy_with_block_chunks_oindexing(self)","id":1504,"name":"time_load_dataset_scipy_with_block_chunks_oindexing","nodeType":"Function","startLoc":207,"text":"def time_load_dataset_scipy_with_block_chunks_oindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\", chunks=self.block_chunks)\n ds = ds.isel(**self.oinds).load()"},{"attributeType":"function","col":0,"comment":"null","endLoc":236,"id":1505,"name":"nanstd","nodeType":"Attribute","startLoc":236,"text":"nanstd"},{"attributeType":"function","col":0,"comment":"null","endLoc":237,"id":1506,"name":"nanprod","nodeType":"Attribute","startLoc":237,"text":"nanprod"},{"col":0,"comment":"null","endLoc":610,"header":"def _interp1d(var, x, new_x, func, kwargs)","id":1507,"name":"_interp1d","nodeType":"Function","startLoc":602,"text":"def _interp1d(var, x, new_x, func, kwargs):\n # x, new_x are tuples of size 1.\n x, new_x = x[0], new_x[0]\n rslt = func(x, var, assume_sorted=True, **kwargs)(np.ravel(new_x))\n if new_x.ndim > 1:\n return rslt.reshape(var.shape[:-1] + new_x.shape)\n if new_x.ndim == 0:\n return rslt[..., -1]\n return rslt"},{"col":4,"comment":"null","endLoc":193,"header":"def _tslib_field_accessor(name, docstring=None, dtype=None)","id":1508,"name":"_tslib_field_accessor","nodeType":"Function","startLoc":181,"text":"def _tslib_field_accessor(name, docstring=None, dtype=None):\n def f(self, dtype=dtype):\n if dtype is None:\n dtype = self._obj.dtype\n obj_type = type(self._obj)\n result = _get_date_field(self._obj.data, name, dtype)\n return obj_type(\n result, name=name, coords=self._obj.coords, dims=self._obj.dims\n )\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)"},{"attributeType":"function","col":0,"comment":"null","endLoc":238,"id":1509,"name":"nancumsum","nodeType":"Attribute","startLoc":238,"text":"nancumsum"},{"attributeType":"function","col":0,"comment":"null","endLoc":239,"id":1510,"name":"nancumprod","nodeType":"Attribute","startLoc":239,"text":"nancumprod"},{"col":0,"comment":"Indirectly access pandas' libts.get_date_field by wrapping data\n as a Series and calling through `.dt` attribute.\n\n Parameters\n ----------\n values : np.ndarray or dask.array-like\n Array-like container of datetime-like values\n name : str\n Name of datetime field to access\n dtype : dtype-like\n dtype for output date field values\n\n Returns\n -------\n datetime_fields : same type as values\n Array-like of datetime fields accessed for each element in values\n\n ","endLoc":74,"header":"def _get_date_field(values, name, dtype)","id":1511,"name":"_get_date_field","nodeType":"Function","startLoc":45,"text":"def _get_date_field(values, name, dtype):\n \"\"\"Indirectly access pandas' libts.get_date_field by wrapping data\n as a Series and calling through `.dt` attribute.\n\n Parameters\n ----------\n values : np.ndarray or dask.array-like\n Array-like container of datetime-like values\n name : str\n Name of datetime field to access\n dtype : dtype-like\n dtype for output date field values\n\n Returns\n -------\n datetime_fields : same type as values\n Array-like of datetime fields accessed for each element in values\n\n \"\"\"\n if is_np_datetime_like(values.dtype):\n access_method = _access_through_series\n else:\n access_method = _access_through_cftimeindex\n\n if isinstance(values, dask_array_type):\n from dask.array import map_blocks\n\n return map_blocks(access_method, values, name, dtype=dtype)\n else:\n return access_method(values, name)"},{"col":4,"comment":"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, dict, or None\n Dimensions to include on the new variable.\n If provided as str or sequence of str, then dimensions are inserted\n with length 1. If provided as a dict, then the keys are the new\n dimensions and the values are either integers (giving the length of\n the new dimensions) or sequence/ndarray (giving the coordinates of\n the new dimensions).\n axis : integer, list (or tuple) of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n ","endLoc":1541,"header":"def expand_dims(\n self,\n dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None,\n axis=None,\n **dim_kwargs: Any\n ) -> \"DataArray\"","id":1512,"name":"expand_dims","nodeType":"Function","startLoc":1490,"text":"def expand_dims(\n self,\n dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None,\n axis=None,\n **dim_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, dict, or None\n Dimensions to include on the new variable.\n If provided as str or sequence of str, then dimensions are inserted\n with length 1. If provided as a dict, then the keys are the new\n dimensions and the values are either integers (giving the length of\n the new dimensions) or sequence/ndarray (giving the coordinates of\n the new dimensions).\n axis : integer, list (or tuple) of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if isinstance(dim, int):\n raise TypeError(\"dim should be hashable or sequence/mapping of hashables\")\n elif isinstance(dim, Sequence) and not isinstance(dim, str):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = dict.fromkeys(dim, 1)\n elif dim is not None and not isinstance(dim, Mapping):\n dim = {cast(Hashable, dim): 1}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n ds = self._to_temp_dataset().expand_dims(dim, axis)\n return self._from_temp_dataset(ds)"},{"attributeType":"function","col":0,"comment":"null","endLoc":240,"id":1513,"name":"nanargmin","nodeType":"Attribute","startLoc":240,"text":"nanargmin"},{"col":0,"comment":"\n Image plot of 2d DataArray using matplotlib.pyplot\n\n Wraps :func:`matplotlib:matplotlib.pyplot.imshow`\n\n While other plot methods require the DataArray to be strictly\n two-dimensional, ``imshow`` also accepts a 3D array where some\n dimension can be interpreted as RGB or RGBA color channels and\n allows this dimension to be specified via the kwarg ``rgb=``.\n\n Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA\n data, by applying a single scaling factor and offset to all bands.\n Passing ``robust=True`` infers ``vmin`` and ``vmax``\n :ref:`in the usual way `.\n\n .. note::\n This function needs uniformly spaced coordinates to\n properly label the axes. Call DataArray.plot() to check.\n\n The pixels are centered on the coordinates values. Ie, if the coordinate\n value is 3.2 then the pixels for those coordinates will be centered on 3.2.\n ","endLoc":904,"header":"@_plot2d\ndef imshow(x, y, z, ax, **kwargs)","id":1514,"name":"imshow","nodeType":"Function","startLoc":832,"text":"@_plot2d\ndef imshow(x, y, z, ax, **kwargs):\n \"\"\"\n Image plot of 2d DataArray using matplotlib.pyplot\n\n Wraps :func:`matplotlib:matplotlib.pyplot.imshow`\n\n While other plot methods require the DataArray to be strictly\n two-dimensional, ``imshow`` also accepts a 3D array where some\n dimension can be interpreted as RGB or RGBA color channels and\n allows this dimension to be specified via the kwarg ``rgb=``.\n\n Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA\n data, by applying a single scaling factor and offset to all bands.\n Passing ``robust=True`` infers ``vmin`` and ``vmax``\n :ref:`in the usual way `.\n\n .. note::\n This function needs uniformly spaced coordinates to\n properly label the axes. Call DataArray.plot() to check.\n\n The pixels are centered on the coordinates values. Ie, if the coordinate\n value is 3.2 then the pixels for those coordinates will be centered on 3.2.\n \"\"\"\n\n if x.ndim != 1 or y.ndim != 1:\n raise ValueError(\n \"imshow requires 1D coordinates, try using \" \"pcolormesh or contour(f)\"\n )\n\n # Centering the pixels- Assumes uniform spacing\n try:\n xstep = (x[1] - x[0]) / 2.0\n except IndexError:\n # Arbitrary default value, similar to matplotlib behaviour\n xstep = 0.1\n try:\n ystep = (y[1] - y[0]) / 2.0\n except IndexError:\n ystep = 0.1\n left, right = x[0] - xstep, x[-1] + xstep\n bottom, top = y[-1] + ystep, y[0] - ystep\n\n defaults = {\"origin\": \"upper\", \"interpolation\": \"nearest\"}\n\n if not hasattr(ax, \"projection\"):\n # not for cartopy geoaxes\n defaults[\"aspect\"] = \"auto\"\n\n # Allow user to override these defaults\n defaults.update(kwargs)\n\n if defaults[\"origin\"] == \"upper\":\n defaults[\"extent\"] = [left, right, bottom, top]\n else:\n defaults[\"extent\"] = [left, right, top, bottom]\n\n if z.ndim == 3:\n # matplotlib imshow uses black for missing data, but Xarray makes\n # missing data transparent. We therefore add an alpha channel if\n # there isn't one, and set it to transparent where data is masked.\n if z.shape[-1] == 3:\n alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype)\n if np.issubdtype(z.dtype, np.integer):\n alpha *= 255\n z = np.ma.concatenate((z, alpha), axis=2)\n else:\n z = z.copy()\n z[np.any(z.mask, axis=-1), -1] = 0\n\n primitive = ax.imshow(z, **defaults)\n\n return primitive"},{"col":4,"comment":"null","endLoc":213,"header":"def time_load_dataset_scipy_with_block_chunks_vindexing(self)","id":1515,"name":"time_load_dataset_scipy_with_block_chunks_vindexing","nodeType":"Function","startLoc":211,"text":"def time_load_dataset_scipy_with_block_chunks_vindexing(self):\n ds = xr.open_dataset(self.filepath, engine=\"scipy\", chunks=self.block_chunks)\n ds = ds.isel(**self.vinds).load()"},{"col":0,"comment":"Coerce an array of datetime-like values to a pandas Series and\n access requested datetime component\n ","endLoc":42,"header":"def _access_through_series(values, name)","id":1516,"name":"_access_through_series","nodeType":"Function","startLoc":32,"text":"def _access_through_series(values, name):\n \"\"\"Coerce an array of datetime-like values to a pandas Series and\n access requested datetime component\n \"\"\"\n values_as_series = pd.Series(values.ravel())\n if name == \"season\":\n months = values_as_series.dt.month.values\n field_values = _season_from_months(months)\n else:\n field_values = getattr(values_as_series.dt, name).values\n return field_values.reshape(values.shape)"},{"col":0,"comment":"Compute season (DJF, MAM, JJA, SON) from month ordinal\n ","endLoc":14,"header":"def _season_from_months(months)","id":1517,"name":"_season_from_months","nodeType":"Function","startLoc":8,"text":"def _season_from_months(months):\n \"\"\"Compute season (DJF, MAM, JJA, SON) from month ordinal\n \"\"\"\n # TODO: Move \"season\" accessor upstream into pandas\n seasons = np.array([\"DJF\", \"MAM\", \"JJA\", \"SON\"])\n months = np.asarray(months)\n return seasons[(months // 3) % 4]"},{"attributeType":"function","col":0,"comment":"null","endLoc":241,"id":1518,"name":"nanargmax","nodeType":"Attribute","startLoc":241,"text":"nanargmax"},{"col":0,"comment":"","endLoc":1,"header":"nputils.py#","id":1519,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import bottleneck as bn\n\n _USE_BOTTLENECK = True\nexcept ImportError:\n # use numpy methods instead\n bn = np\n _USE_BOTTLENECK = False\n\nnanmin = _create_bottleneck_method(\"nanmin\")\n\nnanmax = _create_bottleneck_method(\"nanmax\")\n\nnanmean = _create_bottleneck_method(\"nanmean\")\n\nnanmedian = _create_bottleneck_method(\"nanmedian\")\n\nnanvar = _create_bottleneck_method(\"nanvar\")\n\nnanstd = _create_bottleneck_method(\"nanstd\")\n\nnanprod = _create_bottleneck_method(\"nanprod\")\n\nnancumsum = _create_bottleneck_method(\"nancumsum\")\n\nnancumprod = _create_bottleneck_method(\"nancumprod\")\n\nnanargmin = _create_bottleneck_method(\"nanargmin\")\n\nnanargmax = _create_bottleneck_method(\"nanargmax\")"},{"col":0,"comment":"\n Line plot of DataArray index against values\n\n Wraps :func:`matplotlib:matplotlib.pyplot.plot`\n\n Parameters\n ----------\n darray : DataArray\n Must be 1 dimensional\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n hue : string, optional\n Dimension or coordinate for which you want multiple lines plotted.\n If plotting against a 2D coordinate, ``hue`` must be a dimension.\n x, y : string, optional\n Dimensions or coordinates for x, y axis.\n Only one of these may be specified.\n The other coordinate plots values from the DataArray on which this\n plot method is called.\n xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional\n Specifies scaling for the x- and y-axes respectively\n xticks, yticks : Specify tick locations for x- and y-axes\n xlim, ylim : Specify x- and y-axes limits\n xincrease : None, True, or False, optional\n Should the values on the x axes be increasing from left to right?\n if None, use the default for the matplotlib function.\n yincrease : None, True, or False, optional\n Should the values on the y axes be increasing from top to bottom?\n if None, use the default for the matplotlib function.\n add_legend : boolean, optional\n Add legend with y axis coordinates (2D inputs only).\n *args, **kwargs : optional\n Additional arguments to matplotlib.pyplot.plot\n ","endLoc":350,"header":"def line(\n darray,\n *args,\n row=None,\n col=None,\n figsize=None,\n aspect=None,\n size=None,\n ax=None,\n hue=None,\n x=None,\n y=None,\n xincrease=None,\n yincrease=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n add_legend=True,\n _labels=True,\n **kwargs\n)","id":1520,"name":"line","nodeType":"Function","startLoc":207,"text":"def line(\n darray,\n *args,\n row=None,\n col=None,\n figsize=None,\n aspect=None,\n size=None,\n ax=None,\n hue=None,\n x=None,\n y=None,\n xincrease=None,\n yincrease=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n add_legend=True,\n _labels=True,\n **kwargs\n):\n \"\"\"\n Line plot of DataArray index against values\n\n Wraps :func:`matplotlib:matplotlib.pyplot.plot`\n\n Parameters\n ----------\n darray : DataArray\n Must be 1 dimensional\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n hue : string, optional\n Dimension or coordinate for which you want multiple lines plotted.\n If plotting against a 2D coordinate, ``hue`` must be a dimension.\n x, y : string, optional\n Dimensions or coordinates for x, y axis.\n Only one of these may be specified.\n The other coordinate plots values from the DataArray on which this\n plot method is called.\n xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional\n Specifies scaling for the x- and y-axes respectively\n xticks, yticks : Specify tick locations for x- and y-axes\n xlim, ylim : Specify x- and y-axes limits\n xincrease : None, True, or False, optional\n Should the values on the x axes be increasing from left to right?\n if None, use the default for the matplotlib function.\n yincrease : None, True, or False, optional\n Should the values on the y axes be increasing from top to bottom?\n if None, use the default for the matplotlib function.\n add_legend : boolean, optional\n Add legend with y axis coordinates (2D inputs only).\n *args, **kwargs : optional\n Additional arguments to matplotlib.pyplot.plot\n \"\"\"\n # Handle facetgrids first\n if row or col:\n allargs = locals().copy()\n allargs.update(allargs.pop(\"kwargs\"))\n allargs.pop(\"darray\")\n return _easy_facetgrid(darray, line, kind=\"line\", **allargs)\n\n ndims = len(darray.dims)\n if ndims > 2:\n raise ValueError(\n \"Line plots are for 1- or 2-dimensional DataArrays. \"\n \"Passed DataArray has {ndims} \"\n \"dimensions\".format(ndims=ndims)\n )\n\n # The allargs dict passed to _easy_facetgrid above contains args\n if args is ():\n args = kwargs.pop(\"args\", ())\n else:\n assert \"args\" not in kwargs\n\n ax = get_axis(figsize, size, aspect, ax)\n xplt, yplt, hueplt, xlabel, ylabel, hue_label = _infer_line_data(darray, x, y, hue)\n\n # Remove pd.Intervals if contained in xplt.values.\n if _valid_other_type(xplt.values, [pd.Interval]):\n # Is it a step plot? (see matplotlib.Axes.step)\n if kwargs.get(\"linestyle\", \"\").startswith(\"steps-\"):\n xplt_val, yplt_val = _interval_to_double_bound_points(\n xplt.values, yplt.values\n )\n # Remove steps-* to be sure that matplotlib is not confused\n kwargs[\"linestyle\"] = (\n kwargs[\"linestyle\"]\n .replace(\"steps-pre\", \"\")\n .replace(\"steps-post\", \"\")\n .replace(\"steps-mid\", \"\")\n )\n if kwargs[\"linestyle\"] == \"\":\n del kwargs[\"linestyle\"]\n else:\n xplt_val = _interval_to_mid_points(xplt.values)\n yplt_val = yplt.values\n xlabel += \"_center\"\n else:\n xplt_val = xplt.values\n yplt_val = yplt.values\n\n _ensure_plottable(xplt_val, yplt_val)\n\n primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs)\n\n if _labels:\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n ax.set_title(darray._title_for_slice())\n\n if darray.ndim == 2 and add_legend:\n ax.legend(handles=primitive, labels=list(hueplt.values), title=hue_label)\n\n # Rotate dates on xlabels\n # Do this without calling autofmt_xdate so that x-axes ticks\n # on other subplots (if any) are not deleted.\n # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots\n if np.issubdtype(xplt.dtype, np.datetime64):\n for xlabels in ax.get_xticklabels():\n xlabels.set_rotation(30)\n xlabels.set_ha(\"right\")\n\n _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)\n\n return primitive"},{"id":1521,"name":"HOW_TO_RELEASE","nodeType":"TextFile","path":"","text":"How to issue an xarray release in 15 easy steps\n\nTime required: about an hour.\n\n 1. Ensure your master branch is synced to upstream:\n git pull upstream master\n 2. Look over whats-new.rst and the docs. Make sure \"What's New\" is complete\n (check the date!) and consider adding a brief summary note describing the\n release at the top.\n Things to watch out for:\n - Important new features should be highlighted towards the top.\n - Function/method references should include links to the API docs.\n - Sometimes notes get added in the wrong section of whats-new, typically\n due to a bad merge. Check for these before a release by using git diff,\n e.g., ``git diff v0.X.Y whats-new.rst`` where 0.X.Y is the previous\n release.\n 3. If you have any doubts, run the full test suite one final time!\n py.test\n 4. On the master branch, commit the release in git:\n git commit -a -m 'Release v0.X.Y'\n 5. Tag the release:\n git tag -a v0.X.Y -m 'v0.X.Y'\n 6. Build source and binary wheels for pypi:\n git clean -xdf # this deletes all uncommited changes!\n python setup.py bdist_wheel sdist\n 7. Use twine to register and upload the release on pypi. Be careful, you can't\n take this back!\n twine upload dist/xarray-0.X.Y*\n You will need to be listed as a package owner at\n https://pypi.python.org/pypi/xarray for this to work.\n 8. Push your changes to master:\n git push upstream master\n git push upstream --tags\n 9. Update the stable branch (used by ReadTheDocs) and switch back to master:\n git checkout stable\n git rebase master\n git push upstream stable\n git checkout master\n It's OK to force push to 'stable' if necessary.\n We also update the stable branch with `git cherrypick` for documentation\n only fixes that apply the current released version.\n10. Add a section for the next release (v.X.(Y+1)) to doc/whats-new.rst.\n11. Commit your changes and push to master again:\n git commit -a -m 'Revert to dev version'\n git push upstream master\n You're done pushing to master!\n12. Issue the release on GitHub. Click on \"Draft a new release\" at\n https://github.com/pydata/xarray/releases. Type in the version number, but\n don't bother to describe it -- we maintain that on the docs instead.\n13. Update the docs. Login to https://readthedocs.org/projects/xray/versions/\n and switch your new release tag (at the bottom) from \"Inactive\" to \"Active\".\n It should now build automatically.\n14. Issue the release announcement! For bug fix releases, I usually only email\n xarray@googlegroups.com. For major/feature releases, I will email a broader\n list (no more than once every 3-6 months):\n pydata@googlegroups.com, xarray@googlegroups.com,\n numpy-discussion@scipy.org, scipy-user@scipy.org,\n pyaos@lists.johnny-lin.com\n Google search will turn up examples of prior release announcements (look for\n \"ANN xarray\").\n\nNote on version numbering:\n\nWe follow a rough approximation of semantic version. Only major releases (0.X.0)\nshow include breaking changes. Minor releases (0.X.Y) are for bug fixes and\nbackwards compatible new features, but if a sufficient number of new features\nhave arrived we will issue a major release even if there are no compatibility\nbreaks.\n\nOnce the project reaches a sufficient level of maturity for a 1.0.0 release, we\nintend to follow semantic versioning more strictly.\n"},{"id":1522,"name":".landscape.yml","nodeType":"TextFile","path":"","text":"doc-warnings: yes\ntest-warnings: yes\nstrictness: medium\nmax-line-length: 79\nautodetect: yes\nignore-paths:\n - ci\n - doc\n - examples\n - LICENSES\n - notebooks\npylint:\n disable:\n - dangerous-default-value\n"},{"fileName":"pseudonetcdf_.py","filePath":"xarray/backends","id":1523,"nodeType":"File","text":"import numpy as np\n\nfrom .. import Variable\nfrom ..core import indexing\nfrom ..core.utils import Frozen, FrozenDict\nfrom .common import AbstractDataStore, BackendArray\nfrom .file_manager import CachingFileManager\nfrom .locks import HDF5_LOCK, NETCDFC_LOCK, combine_locks, ensure_lock\n\n# psuedonetcdf can invoke netCDF libraries internally\nPNETCDF_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK])\n\n\nclass PncArrayWrapper(BackendArray):\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_array()\n self.shape = array.shape\n self.dtype = np.dtype(array.dtype)\n\n def get_array(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem\n )\n\n def _getitem(self, key):\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n return array[key]\n\n\nclass PseudoNetCDFDataStore(AbstractDataStore):\n \"\"\"Store for accessing datasets via PseudoNetCDF\n \"\"\"\n\n @classmethod\n def open(cls, filename, lock=None, mode=None, **format_kwargs):\n from PseudoNetCDF import pncopen\n\n keywords = {\"kwargs\": format_kwargs}\n # only include mode if explicitly passed\n if mode is not None:\n keywords[\"mode\"] = mode\n\n if lock is None:\n lock = PNETCDF_LOCK\n\n manager = CachingFileManager(pncopen, filename, lock=lock, **keywords)\n return cls(manager, lock)\n\n def __init__(self, manager, lock=None):\n self._manager = manager\n self.lock = ensure_lock(lock)\n\n @property\n def ds(self):\n return self._manager.acquire()\n\n def open_store_variable(self, name, var):\n data = indexing.LazilyOuterIndexedArray(PncArrayWrapper(name, self))\n attrs = {k: getattr(var, k) for k in var.ncattrs()}\n return Variable(var.dimensions, data, attrs)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen({k: getattr(self.ds, k) for k in self.ds.ncattrs()})\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n return {\n \"unlimited_dims\": {\n k for k in self.ds.dimensions if self.ds.dimensions[k].isunlimited()\n }\n }\n\n def close(self):\n self._manager.close()\n"},{"className":"PncArrayWrapper","col":0,"comment":"null","endLoc":34,"id":1524,"nodeType":"Class","startLoc":14,"text":"class PncArrayWrapper(BackendArray):\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_array()\n self.shape = array.shape\n self.dtype = np.dtype(array.dtype)\n\n def get_array(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem\n )\n\n def _getitem(self, key):\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n return array[key]"},{"col":4,"comment":"null","endLoc":20,"header":"def __init__(self, variable_name, datastore)","id":1525,"name":"__init__","nodeType":"Function","startLoc":15,"text":"def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n array = self.get_array()\n self.shape = array.shape\n self.dtype = np.dtype(array.dtype)"},{"col":4,"comment":"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, mapping, or None\n Dimensions to include on the new variable. If provided as hashable\n or sequence of hashable, then dimensions are inserted with length\n 1. If provided as a mapping, then the keys are the new dimensions\n and the values are either integers (giving the length of the new\n dimensions) or array-like (giving the coordinates of the new\n dimensions).\n axis : integer, sequence of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n ","endLoc":2999,"header":"def expand_dims(\n self,\n dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n **dim_kwargs: Any,\n ) -> \"Dataset\"","id":1526,"name":"expand_dims","nodeType":"Function","startLoc":2867,"text":"def expand_dims(\n self,\n dim: Union[None, Hashable, Sequence[Hashable], Mapping[Hashable, Any]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n **dim_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, mapping, or None\n Dimensions to include on the new variable. If provided as hashable\n or sequence of hashable, then dimensions are inserted with length\n 1. If provided as a mapping, then the keys are the new dimensions\n and the values are either integers (giving the length of the new\n dimensions) or array-like (giving the coordinates of the new\n dimensions).\n axis : integer, sequence of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if dim is None:\n pass\n elif isinstance(dim, Mapping):\n # We're later going to modify dim in place; don't tamper with\n # the input\n dim = dict(dim)\n elif isinstance(dim, int):\n raise TypeError(\n \"dim should be hashable or sequence of hashables or mapping\"\n )\n elif isinstance(dim, str) or not isinstance(dim, Sequence):\n dim = {dim: 1}\n elif isinstance(dim, Sequence):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = {d: 1 for d in dim}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n assert isinstance(dim, MutableMapping)\n\n if axis is None:\n axis = list(range(len(dim)))\n elif not isinstance(axis, Sequence):\n axis = [axis]\n\n if len(dim) != len(axis):\n raise ValueError(\"lengths of dim and axis should be identical.\")\n for d in dim:\n if d in self.dims:\n raise ValueError(\"Dimension {dim} already exists.\".format(dim=d))\n if d in self._variables and not utils.is_scalar(self._variables[d]):\n raise ValueError(\n \"{dim} already exists as coordinate or\"\n \" variable name.\".format(dim=d)\n )\n\n variables: Dict[Hashable, Variable] = {}\n coord_names = self._coord_names.copy()\n # If dim is a dict, then ensure that the values are either integers\n # or iterables.\n for k, v in dim.items():\n if hasattr(v, \"__iter__\"):\n # If the value for the new dimension is an iterable, then\n # save the coordinates to the variables dict, and set the\n # value within the dim dict to the length of the iterable\n # for later use.\n variables[k] = xr.IndexVariable((k,), v)\n coord_names.add(k)\n dim[k] = variables[k].size\n elif isinstance(v, int):\n pass # Do nothing if the dimensions value is just an int\n else:\n raise TypeError(\n \"The value of new dimension {k} must be \"\n \"an iterable or an int\".format(k=k)\n )\n\n for k, v in self._variables.items():\n if k not in dim:\n if k in coord_names: # Do not change coordinates\n variables[k] = v\n else:\n result_ndim = len(v.dims) + len(axis)\n for a in axis:\n if a < -result_ndim or result_ndim - 1 < a:\n raise IndexError(\n \"Axis {a} is out of bounds of the expanded\"\n \" dimension size {dim}.\".format(\n a=a, v=k, dim=result_ndim\n )\n )\n\n axis_pos = [a if a >= 0 else result_ndim + a for a in axis]\n if len(axis_pos) != len(set(axis_pos)):\n raise ValueError(\"axis should not contain duplicate\" \" values.\")\n # We need to sort them to make sure `axis` equals to the\n # axis positions of the result array.\n zip_axis_dim = sorted(zip(axis_pos, dim.items()))\n\n all_dims = list(zip(v.dims, v.shape))\n for d, c in zip_axis_dim:\n all_dims.insert(d, c)\n variables[k] = v.set_dims(dict(all_dims))\n else:\n # If dims includes a label of a non-dimension coordinate,\n # it will be promoted to a 1D coordinate with a single value.\n variables[k] = v.set_dims(k).to_index_variable()\n\n new_dims = self._dims.copy()\n new_dims.update(dim)\n\n return self._replace_vars_and_dims(\n variables, dims=new_dims, coord_names=coord_names\n )"},{"col":4,"comment":"null","endLoc":219,"header":"def time_load_dataset_scipy_with_time_chunks(self)","id":1527,"name":"time_load_dataset_scipy_with_time_chunks","nodeType":"Function","startLoc":215,"text":"def time_load_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_dataset(\n self.filepath, engine=\"scipy\", chunks=self.time_chunks\n ).load()"},{"col":0,"comment":"Coerce an array of datetime-like values to a CFTimeIndex\n and access requested datetime component\n ","endLoc":29,"header":"def _access_through_cftimeindex(values, name)","id":1528,"name":"_access_through_cftimeindex","nodeType":"Function","startLoc":17,"text":"def _access_through_cftimeindex(values, name):\n \"\"\"Coerce an array of datetime-like values to a CFTimeIndex\n and access requested datetime component\n \"\"\"\n from ..coding.cftimeindex import CFTimeIndex\n\n values_as_cftimeindex = CFTimeIndex(values.ravel())\n if name == \"season\":\n months = values_as_cftimeindex.month\n field_values = _season_from_months(months)\n else:\n field_values = getattr(values_as_cftimeindex, name)\n return field_values.reshape(values.shape)"},{"attributeType":"null","col":8,"comment":"null","endLoc":197,"id":1529,"name":"filepath","nodeType":"Attribute","startLoc":197,"text":"self.filepath"},{"col":4,"comment":"null","endLoc":24,"header":"def get_array(self, needs_lock=True)","id":1530,"name":"get_array","nodeType":"Function","startLoc":22,"text":"def get_array(self, needs_lock=True):\n ds = self.datastore._manager.acquire(needs_lock)\n return ds.variables[self.variable_name]"},{"attributeType":"null","col":8,"comment":"null","endLoc":198,"id":1531,"name":"format","nodeType":"Attribute","startLoc":198,"text":"self.format"},{"col":4,"comment":"null","endLoc":29,"header":"def __getitem__(self, key)","id":1532,"name":"__getitem__","nodeType":"Function","startLoc":26,"text":"def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem\n )"},{"className":"IOMultipleNetCDF","col":0,"comment":"\n A few examples that benchmark reading/writing multiple netCDF files with\n xarray\n ","endLoc":296,"id":1533,"nodeType":"Class","startLoc":222,"text":"class IOMultipleNetCDF:\n \"\"\"\n A few examples that benchmark reading/writing multiple netCDF files with\n xarray\n \"\"\"\n\n timeout = 300.0\n repeat = 1\n number = 5\n\n def make_ds(self, nfiles=10):\n\n # multiple Dataset\n self.ds = xr.Dataset()\n self.nt = 1000\n self.nx = 90\n self.ny = 45\n self.nfiles = nfiles\n\n self.block_chunks = {\n \"time\": self.nt / 4,\n \"lon\": self.nx / 3,\n \"lat\": self.ny / 3,\n }\n\n self.time_chunks = {\"time\": int(self.nt / 36)}\n\n self.time_vars = np.split(\n pd.date_range(\"1970-01-01\", periods=self.nt, freq=\"D\"), self.nfiles\n )\n\n self.ds_list = []\n self.filenames_list = []\n for i, times in enumerate(self.time_vars):\n ds = xr.Dataset()\n nt = len(times)\n lons = xr.DataArray(\n np.linspace(0, 360, self.nx),\n dims=(\"lon\",),\n attrs={\"units\": \"degrees east\", \"long_name\": \"longitude\"},\n )\n lats = xr.DataArray(\n np.linspace(-90, 90, self.ny),\n dims=(\"lat\",),\n attrs={\"units\": \"degrees north\", \"long_name\": \"latitude\"},\n )\n ds[\"foo\"] = xr.DataArray(\n randn((nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"foo\",\n encoding=None,\n attrs={\"units\": \"foo units\", \"description\": \"a description\"},\n )\n ds[\"bar\"] = xr.DataArray(\n randn((nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"bar\",\n encoding=None,\n attrs={\"units\": \"bar units\", \"description\": \"a description\"},\n )\n ds[\"baz\"] = xr.DataArray(\n randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),\n coords={\"lon\": lons, \"lat\": lats},\n dims=(\"lon\", \"lat\"),\n name=\"baz\",\n encoding=None,\n attrs={\"units\": \"baz units\", \"description\": \"a description\"},\n )\n\n ds.attrs = {\"history\": \"created for xarray benchmarking\"}\n\n self.ds_list.append(ds)\n self.filenames_list.append(\"test_netcdf_%i.nc\" % i)"},{"col":4,"comment":"null","endLoc":296,"header":"def make_ds(self, nfiles=10)","id":1534,"name":"make_ds","nodeType":"Function","startLoc":232,"text":"def make_ds(self, nfiles=10):\n\n # multiple Dataset\n self.ds = xr.Dataset()\n self.nt = 1000\n self.nx = 90\n self.ny = 45\n self.nfiles = nfiles\n\n self.block_chunks = {\n \"time\": self.nt / 4,\n \"lon\": self.nx / 3,\n \"lat\": self.ny / 3,\n }\n\n self.time_chunks = {\"time\": int(self.nt / 36)}\n\n self.time_vars = np.split(\n pd.date_range(\"1970-01-01\", periods=self.nt, freq=\"D\"), self.nfiles\n )\n\n self.ds_list = []\n self.filenames_list = []\n for i, times in enumerate(self.time_vars):\n ds = xr.Dataset()\n nt = len(times)\n lons = xr.DataArray(\n np.linspace(0, 360, self.nx),\n dims=(\"lon\",),\n attrs={\"units\": \"degrees east\", \"long_name\": \"longitude\"},\n )\n lats = xr.DataArray(\n np.linspace(-90, 90, self.ny),\n dims=(\"lat\",),\n attrs={\"units\": \"degrees north\", \"long_name\": \"latitude\"},\n )\n ds[\"foo\"] = xr.DataArray(\n randn((nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"foo\",\n encoding=None,\n attrs={\"units\": \"foo units\", \"description\": \"a description\"},\n )\n ds[\"bar\"] = xr.DataArray(\n randn((nt, self.nx, self.ny), frac_nan=0.2),\n coords={\"lon\": lons, \"lat\": lats, \"time\": times},\n dims=(\"time\", \"lon\", \"lat\"),\n name=\"bar\",\n encoding=None,\n attrs={\"units\": \"bar units\", \"description\": \"a description\"},\n )\n ds[\"baz\"] = xr.DataArray(\n randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),\n coords={\"lon\": lons, \"lat\": lats},\n dims=(\"lon\", \"lat\"),\n name=\"baz\",\n encoding=None,\n attrs={\"units\": \"baz units\", \"description\": \"a description\"},\n )\n\n ds.attrs = {\"history\": \"created for xarray benchmarking\"}\n\n self.ds_list.append(ds)\n self.filenames_list.append(\"test_netcdf_%i.nc\" % i)"},{"col":4,"comment":"null","endLoc":34,"header":"def _getitem(self, key)","id":1535,"name":"_getitem","nodeType":"Function","startLoc":31,"text":"def _getitem(self, key):\n with self.datastore.lock:\n array = self.get_array(needs_lock=False)\n return array[key]"},{"attributeType":"null","col":8,"comment":"null","endLoc":19,"id":1536,"name":"shape","nodeType":"Attribute","startLoc":19,"text":"self.shape"},{"attributeType":"null","col":8,"comment":"null","endLoc":16,"id":1537,"name":"datastore","nodeType":"Attribute","startLoc":16,"text":"self.datastore"},{"attributeType":"null","col":8,"comment":"null","endLoc":20,"id":1538,"name":"dtype","nodeType":"Attribute","startLoc":20,"text":"self.dtype"},{"col":0,"comment":"\n Do all elements of x have a type from types?\n ","endLoc":477,"header":"def _valid_other_type(x, types)","id":1539,"name":"_valid_other_type","nodeType":"Function","startLoc":473,"text":"def _valid_other_type(x, types):\n \"\"\"\n Do all elements of x have a type from types?\n \"\"\"\n return all(any(isinstance(el, t) for t in types) for el in np.ravel(x))"},{"attributeType":"null","col":8,"comment":"null","endLoc":17,"id":1540,"name":"variable_name","nodeType":"Attribute","startLoc":17,"text":"self.variable_name"},{"className":"PseudoNetCDFDataStore","col":0,"comment":"Store for accessing datasets via PseudoNetCDF\n ","endLoc":88,"id":1541,"nodeType":"Class","startLoc":37,"text":"class PseudoNetCDFDataStore(AbstractDataStore):\n \"\"\"Store for accessing datasets via PseudoNetCDF\n \"\"\"\n\n @classmethod\n def open(cls, filename, lock=None, mode=None, **format_kwargs):\n from PseudoNetCDF import pncopen\n\n keywords = {\"kwargs\": format_kwargs}\n # only include mode if explicitly passed\n if mode is not None:\n keywords[\"mode\"] = mode\n\n if lock is None:\n lock = PNETCDF_LOCK\n\n manager = CachingFileManager(pncopen, filename, lock=lock, **keywords)\n return cls(manager, lock)\n\n def __init__(self, manager, lock=None):\n self._manager = manager\n self.lock = ensure_lock(lock)\n\n @property\n def ds(self):\n return self._manager.acquire()\n\n def open_store_variable(self, name, var):\n data = indexing.LazilyOuterIndexedArray(PncArrayWrapper(name, self))\n attrs = {k: getattr(var, k) for k in var.ncattrs()}\n return Variable(var.dimensions, data, attrs)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen({k: getattr(self.ds, k) for k in self.ds.ncattrs()})\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n return {\n \"unlimited_dims\": {\n k for k in self.ds.dimensions if self.ds.dimensions[k].isunlimited()\n }\n }\n\n def close(self):\n self._manager.close()"},{"col":4,"comment":"null","endLoc":62,"header":"@property\n def ds(self)","id":1542,"name":"ds","nodeType":"Function","startLoc":60,"text":"@property\n def ds(self):\n return self._manager.acquire()"},{"col":0,"comment":"\n Helper function to deal with a xarray consisting of pd.Intervals. Each\n interval is replaced with both boundaries. I.e. the length of xarray\n doubles. yarray is modified so it matches the new shape of xarray.\n ","endLoc":453,"header":"def _interval_to_double_bound_points(xarray, yarray)","id":1543,"name":"_interval_to_double_bound_points","nodeType":"Function","startLoc":440,"text":"def _interval_to_double_bound_points(xarray, yarray):\n \"\"\"\n Helper function to deal with a xarray consisting of pd.Intervals. Each\n interval is replaced with both boundaries. I.e. the length of xarray\n doubles. yarray is modified so it matches the new shape of xarray.\n \"\"\"\n\n xarray1 = np.array([x.left for x in xarray])\n xarray2 = np.array([x.right for x in xarray])\n\n xarray = list(itertools.chain.from_iterable(zip(xarray1, xarray2)))\n yarray = list(itertools.chain.from_iterable(zip(yarray, yarray)))\n\n return xarray, yarray"},{"col":4,"comment":"null","endLoc":67,"header":"def open_store_variable(self, name, var)","id":1544,"name":"open_store_variable","nodeType":"Function","startLoc":64,"text":"def open_store_variable(self, name, var):\n data = indexing.LazilyOuterIndexedArray(PncArrayWrapper(name, self))\n attrs = {k: getattr(var, k) for k in var.ncattrs()}\n return Variable(var.dimensions, data, attrs)"},{"col":0,"comment":"\n Helper function which returns an array\n with the Intervals' mid points.\n ","endLoc":425,"header":"def _interval_to_mid_points(array)","id":1545,"name":"_interval_to_mid_points","nodeType":"Function","startLoc":419,"text":"def _interval_to_mid_points(array):\n \"\"\"\n Helper function which returns an array\n with the Intervals' mid points.\n \"\"\"\n\n return np.array([x.mid for x in array])"},{"col":0,"comment":"\n Raise exception if there is anything in args that can't be plotted on an\n axis by matplotlib.\n ","endLoc":527,"header":"def _ensure_plottable(*args)","id":1546,"name":"_ensure_plottable","nodeType":"Function","startLoc":492,"text":"def _ensure_plottable(*args):\n \"\"\"\n Raise exception if there is anything in args that can't be plotted on an\n axis by matplotlib.\n \"\"\"\n numpy_types = [np.floating, np.integer, np.timedelta64, np.datetime64]\n other_types = [datetime]\n try:\n import cftime\n\n cftime_datetime = [cftime.datetime]\n except ImportError:\n cftime_datetime = []\n other_types = other_types + cftime_datetime\n for x in args:\n if not (\n _valid_numpy_subdtype(np.array(x), numpy_types)\n or _valid_other_type(np.array(x), other_types)\n ):\n raise TypeError(\n \"Plotting requires coordinates to be numeric \"\n \"or dates of type np.datetime64, \"\n \"datetime.datetime, cftime.datetime or \"\n \"pd.Interval.\"\n )\n if (\n _valid_other_type(np.array(x), cftime_datetime)\n and not nc_time_axis_available\n ):\n raise ImportError(\n \"Plotting of arrays of cftime.datetime \"\n \"objects or arrays indexed by \"\n \"cftime.datetime objects requires the \"\n \"optional `nc-time-axis` (v1.2.0 or later) \"\n \"package.\"\n )"},{"col":4,"comment":"Returns a new object with renamed dimensions only.\n\n Parameters\n ----------\n dims_dict : dict-like, optional\n Dictionary whose keys are current dimension names and\n whose values are the desired names.\n **dims, optional\n Keyword form of ``dims_dict``.\n One of dims_dict or dims must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_vars\n DataArray.rename\n ","endLoc":2744,"header":"def rename_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims: Hashable\n ) -> \"Dataset\"","id":1547,"name":"rename_dims","nodeType":"Function","startLoc":2707,"text":"def rename_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed dimensions only.\n\n Parameters\n ----------\n dims_dict : dict-like, optional\n Dictionary whose keys are current dimension names and\n whose values are the desired names.\n **dims, optional\n Keyword form of ``dims_dict``.\n One of dims_dict or dims must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_vars\n DataArray.rename\n \"\"\"\n dims_dict = either_dict_or_kwargs(dims_dict, dims, \"rename_dims\")\n for k in dims_dict:\n if k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"dimension in this dataset\" % k\n )\n\n variables, coord_names, sizes, indexes = self._rename_all(\n name_dict={}, dims_dict=dims_dict\n )\n return self._replace(variables, coord_names, dims=sizes, indexes=indexes)"},{"col":4,"comment":"null","endLoc":240,"header":"def _tslib_round_accessor(self, name, freq)","id":1548,"name":"_tslib_round_accessor","nodeType":"Function","startLoc":237,"text":"def _tslib_round_accessor(self, name, freq):\n obj_type = type(self._obj)\n result = _round_field(self._obj.data, name, freq)\n return obj_type(result, name=name, coords=self._obj.coords, dims=self._obj.dims)"},{"col":4,"comment":"Returns a new object with renamed variables including coordinates\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or coordinate names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables including coordinates\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_dims\n DataArray.rename\n ","endLoc":2782,"header":"def rename_vars(\n self, name_dict: Mapping[Hashable, Hashable] = None, **names: Hashable\n ) -> \"Dataset\"","id":1549,"name":"rename_vars","nodeType":"Function","startLoc":2746,"text":"def rename_vars(\n self, name_dict: Mapping[Hashable, Hashable] = None, **names: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables including coordinates\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or coordinate names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables including coordinates\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename_vars\")\n for k in name_dict:\n if k not in self:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or coordinate in this dataset\" % k\n )\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict={}\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)"},{"col":0,"comment":"Indirectly access pandas rounding functions by wrapping data\n as a Series and calling through `.dt` attribute.\n\n Parameters\n ----------\n values : np.ndarray or dask.array-like\n Array-like container of datetime-like values\n name : str (ceil, floor, round)\n Name of rounding function\n freq : a freq string indicating the rounding resolution\n\n Returns\n -------\n rounded timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n\n ","endLoc":111,"header":"def _round_field(values, name, freq)","id":1550,"name":"_round_field","nodeType":"Function","startLoc":88,"text":"def _round_field(values, name, freq):\n \"\"\"Indirectly access pandas rounding functions by wrapping data\n as a Series and calling through `.dt` attribute.\n\n Parameters\n ----------\n values : np.ndarray or dask.array-like\n Array-like container of datetime-like values\n name : str (ceil, floor, round)\n Name of rounding function\n freq : a freq string indicating the rounding resolution\n\n Returns\n -------\n rounded timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n\n \"\"\"\n if isinstance(values, dask_array_type):\n from dask.array import map_blocks\n\n return map_blocks(_round_series, values, name, freq=freq, dtype=np.datetime64)\n else:\n return _round_series(values, name, freq)"},{"col":0,"comment":"Coerce an array of datetime-like values to a pandas Series and\n apply requested rounding\n ","endLoc":85,"header":"def _round_series(values, name, freq)","id":1551,"name":"_round_series","nodeType":"Function","startLoc":77,"text":"def _round_series(values, name, freq):\n \"\"\"Coerce an array of datetime-like values to a pandas Series and\n apply requested rounding\n \"\"\"\n values_as_series = pd.Series(values.ravel())\n method = getattr(values_as_series.dt, name)\n field_values = method(freq=freq).values\n\n return field_values.reshape(values.shape)"},{"col":4,"comment":"Set Dataset (multi-)indexes using one or more existing coordinates\n or variables.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> ds = xr.Dataset({'v': arr})\n >>> ds\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n >>> ds.set_index(x='a')\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n\n See Also\n --------\n Dataset.reset_index\n Dataset.swap_dims\n ","endLoc":3066,"header":"def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]],\n ) -> \"Dataset\"","id":1552,"name":"set_index","nodeType":"Function","startLoc":3001,"text":"def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]],\n ) -> \"Dataset\":\n \"\"\"Set Dataset (multi-)indexes using one or more existing coordinates\n or variables.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> ds = xr.Dataset({'v': arr})\n >>> ds\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n >>> ds.set_index(x='a')\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n\n See Also\n --------\n Dataset.reset_index\n Dataset.swap_dims\n \"\"\"\n _check_inplace(inplace)\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n variables, coord_names = merge_indexes(\n indexes, self._variables, self._coord_names, append=append\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)"},{"col":0,"comment":"\n Pseudocolor plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`\n ","endLoc":976,"header":"@_plot2d\ndef pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs)","id":1553,"name":"pcolormesh","nodeType":"Function","startLoc":929,"text":"@_plot2d\ndef pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs):\n \"\"\"\n Pseudocolor plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`\n \"\"\"\n\n # decide on a default for infer_intervals (GH781)\n x = np.asarray(x)\n if infer_intervals is None:\n if hasattr(ax, \"projection\"):\n if len(x.shape) == 1:\n infer_intervals = True\n else:\n infer_intervals = False\n else:\n infer_intervals = True\n\n if infer_intervals and (\n (np.shape(x)[0] == np.shape(z)[1])\n or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1]))\n ):\n if len(x.shape) == 1:\n x = _infer_interval_breaks(x, check_monotonic=True)\n else:\n # we have to infer the intervals on both axes\n x = _infer_interval_breaks(x, axis=1)\n x = _infer_interval_breaks(x, axis=0)\n\n if infer_intervals and (np.shape(y)[0] == np.shape(z)[0]):\n if len(y.shape) == 1:\n y = _infer_interval_breaks(y, check_monotonic=True)\n else:\n # we have to infer the intervals on both axes\n y = _infer_interval_breaks(y, axis=1)\n y = _infer_interval_breaks(y, axis=0)\n\n primitive = ax.pcolormesh(x, y, z, **kwargs)\n\n # by default, pcolormesh picks \"round\" values for bounds\n # this results in ugly looking plots with lots of surrounding whitespace\n if not hasattr(ax, \"projection\") and x.ndim == 1 and y.ndim == 1:\n # not a cartopy geoaxis\n ax.set_xlim(x[0], x[-1])\n ax.set_ylim(y[0], y[-1])\n\n return primitive"},{"col":4,"comment":"null","endLoc":72,"header":"def get_variables(self)","id":1554,"name":"get_variables","nodeType":"Function","startLoc":69,"text":"def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )"},{"col":0,"comment":"Merge variables into multi-indexes.\n\n Not public API. Used in Dataset and DataArray set_index\n methods.\n ","endLoc":265,"header":"def merge_indexes(\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n append: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]","id":1555,"name":"merge_indexes","nodeType":"Function","startLoc":191,"text":"def merge_indexes(\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n append: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Merge variables into multi-indexes.\n\n Not public API. Used in Dataset and DataArray set_index\n methods.\n \"\"\"\n vars_to_replace: Dict[Hashable, Variable] = {}\n vars_to_remove: List[Hashable] = []\n error_msg = \"{} is not the name of an existing variable.\"\n\n for dim, var_names in indexes.items():\n if isinstance(var_names, str) or not isinstance(var_names, Sequence):\n var_names = [var_names]\n\n names: List[Hashable] = []\n codes: List[List[int]] = []\n levels: List[List[int]] = []\n current_index_variable = variables.get(dim)\n\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n if (\n current_index_variable is not None\n and var.dims != current_index_variable.dims\n ):\n raise ValueError(\n \"dimension mismatch between %r %s and %r %s\"\n % (dim, current_index_variable.dims, n, var.dims)\n )\n\n if current_index_variable is not None and append:\n current_index = current_index_variable.to_index()\n if isinstance(current_index, pd.MultiIndex):\n names.extend(current_index.names)\n codes.extend(current_index.codes)\n levels.extend(current_index.levels)\n else:\n names.append(\"%s_level_0\" % dim)\n cat = pd.Categorical(current_index.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n if not len(names) and len(var_names) == 1:\n idx = pd.Index(variables[var_names[0]].values)\n\n else:\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n names.append(n)\n cat = pd.Categorical(var.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n idx = pd.MultiIndex(levels, codes, names=names)\n\n vars_to_replace[dim] = IndexVariable(dim, idx)\n vars_to_remove.extend(var_names)\n\n new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove}\n new_variables.update(vars_to_replace)\n new_coord_names = coord_names | set(vars_to_replace)\n new_coord_names -= set(vars_to_remove)\n\n return new_variables, new_coord_names"},{"col":0,"comment":"\n >>> _infer_interval_breaks(np.arange(5))\n array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])\n >>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)\n array([[-0.5, 0.5, 1.5],\n [ 2.5, 3.5, 4.5]])\n ","endLoc":686,"header":"def _infer_interval_breaks(coord, axis=0, check_monotonic=False)","id":1556,"name":"_infer_interval_breaks","nodeType":"Function","startLoc":658,"text":"def _infer_interval_breaks(coord, axis=0, check_monotonic=False):\n \"\"\"\n >>> _infer_interval_breaks(np.arange(5))\n array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])\n >>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)\n array([[-0.5, 0.5, 1.5],\n [ 2.5, 3.5, 4.5]])\n \"\"\"\n coord = np.asarray(coord)\n\n if check_monotonic and not _is_monotonic(coord, axis=axis):\n raise ValueError(\n \"The input coordinate is not sorted in increasing \"\n \"order along axis %d. This can lead to unexpected \"\n \"results. Consider calling the `sortby` method on \"\n \"the input DataArray. To plot data with categorical \"\n \"axes, consider using the `heatmap` function from \"\n \"the `seaborn` statistical plotting library.\" % axis\n )\n\n deltas = 0.5 * np.diff(coord, axis=axis)\n if deltas.size == 0:\n deltas = np.array(0.0)\n first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)\n last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)\n trim_last = tuple(\n slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim)\n )\n return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)"},{"col":0,"comment":"\n >>> _is_monotonic(np.array([0, 1, 2]))\n True\n >>> _is_monotonic(np.array([2, 1, 0]))\n True\n >>> _is_monotonic(np.array([0, 2, 1]))\n False\n ","endLoc":655,"header":"def _is_monotonic(coord, axis=0)","id":1557,"name":"_is_monotonic","nodeType":"Function","startLoc":636,"text":"def _is_monotonic(coord, axis=0):\n \"\"\"\n >>> _is_monotonic(np.array([0, 1, 2]))\n True\n >>> _is_monotonic(np.array([2, 1, 0]))\n True\n >>> _is_monotonic(np.array([0, 2, 1]))\n False\n \"\"\"\n if coord.shape[axis] < 3:\n return True\n else:\n n = coord.shape[axis]\n delta_pos = coord.take(np.arange(1, n), axis=axis) >= coord.take(\n np.arange(0, n - 1), axis=axis\n )\n delta_neg = coord.take(np.arange(1, n), axis=axis) <= coord.take(\n np.arange(0, n - 1), axis=axis\n )\n return np.all(delta_pos) or np.all(delta_neg)"},{"col":4,"comment":"\n Round timestamps downward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n floor-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n ","endLoc":257,"header":"def floor(self, freq)","id":1558,"name":"floor","nodeType":"Function","startLoc":242,"text":"def floor(self, freq):\n \"\"\"\n Round timestamps downward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n floor-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n\n return self._tslib_round_accessor(\"floor\", freq)"},{"col":4,"comment":"\n Round timestamps upward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n ceil-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n ","endLoc":273,"header":"def ceil(self, freq)","id":1559,"name":"ceil","nodeType":"Function","startLoc":259,"text":"def ceil(self, freq):\n \"\"\"\n Round timestamps upward to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n ceil-ed timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n return self._tslib_round_accessor(\"ceil\", freq)"},{"col":4,"comment":"\n Round timestamps to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n rounded timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n ","endLoc":289,"header":"def round(self, freq)","id":1560,"name":"round","nodeType":"Function","startLoc":275,"text":"def round(self, freq):\n \"\"\"\n Round timestamps to specified frequency resolution.\n\n Parameters\n ----------\n freq : a freq string indicating the rounding resolution\n e.g. 'D' for daily resolution\n\n Returns\n -------\n rounded timestamps : same type as values\n Array-like of datetime fields accessed for each element in values\n \"\"\"\n return self._tslib_round_accessor(\"round\", freq)"},{"col":4,"comment":"\n Return an array of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n formatted strings : same type as values\n Array-like of strings formatted for each element in values\n\n Examples\n --------\n >>> rng = xr.Dataset({'time': datetime.datetime(2000, 1, 1)})\n >>> rng['time'].dt.strftime('%B %d, %Y, %r')\n \n array('January 01, 2000, 12:00:00 AM', dtype=object)\n \"\"\"\n\n ","endLoc":323,"header":"def strftime(self, date_format)","id":1561,"name":"strftime","nodeType":"Function","startLoc":291,"text":"def strftime(self, date_format):\n '''\n Return an array of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n formatted strings : same type as values\n Array-like of strings formatted for each element in values\n\n Examples\n --------\n >>> rng = xr.Dataset({'time': datetime.datetime(2000, 1, 1)})\n >>> rng['time'].dt.strftime('%B %d, %Y, %r')\n \n array('January 01, 2000, 12:00:00 AM', dtype=object)\n \"\"\"\n\n '''\n obj_type = type(self._obj)\n\n result = _strftime(self._obj.data, date_format)\n\n return obj_type(\n result, name=\"strftime\", coords=self._obj.coords, dims=self._obj.dims\n )"},{"col":0,"comment":"\n Default plot of DataArray using matplotlib.pyplot.\n\n Calls xarray plotting function based on the dimensions of\n darray.squeeze()\n\n =============== ===========================\n Dimensions Plotting function\n --------------- ---------------------------\n 1 :py:func:`xarray.plot.line`\n 2 :py:func:`xarray.plot.pcolormesh`\n Anything else :py:func:`xarray.plot.hist`\n =============== ===========================\n\n Parameters\n ----------\n darray : DataArray\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n hue : string, optional\n If passed, make faceted line plots with hue on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n ax : matplotlib axes, optional\n If None, uses the current axis. Not applicable when using facets.\n rtol : number, optional\n Relative tolerance used to determine if the indexes\n are uniformly spaced. Usually a small positive number.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n **kwargs : optional\n Additional keyword arguments to matplotlib\n\n ","endLoc":202,"header":"def plot(\n darray,\n row=None,\n col=None,\n col_wrap=None,\n ax=None,\n hue=None,\n rtol=0.01,\n subplot_kws=None,\n **kwargs\n)","id":1562,"name":"plot","nodeType":"Function","startLoc":118,"text":"def plot(\n darray,\n row=None,\n col=None,\n col_wrap=None,\n ax=None,\n hue=None,\n rtol=0.01,\n subplot_kws=None,\n **kwargs\n):\n \"\"\"\n Default plot of DataArray using matplotlib.pyplot.\n\n Calls xarray plotting function based on the dimensions of\n darray.squeeze()\n\n =============== ===========================\n Dimensions Plotting function\n --------------- ---------------------------\n 1 :py:func:`xarray.plot.line`\n 2 :py:func:`xarray.plot.pcolormesh`\n Anything else :py:func:`xarray.plot.hist`\n =============== ===========================\n\n Parameters\n ----------\n darray : DataArray\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n hue : string, optional\n If passed, make faceted line plots with hue on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n ax : matplotlib axes, optional\n If None, uses the current axis. Not applicable when using facets.\n rtol : number, optional\n Relative tolerance used to determine if the indexes\n are uniformly spaced. Usually a small positive number.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n **kwargs : optional\n Additional keyword arguments to matplotlib\n\n \"\"\"\n darray = darray.squeeze().compute()\n\n plot_dims = set(darray.dims)\n plot_dims.discard(row)\n plot_dims.discard(col)\n plot_dims.discard(hue)\n\n ndims = len(plot_dims)\n\n error_msg = (\n \"Only 1d and 2d plots are supported for facets in xarray. \"\n \"See the package `Seaborn` for more options.\"\n )\n\n if ndims in [1, 2]:\n if row or col:\n kwargs[\"row\"] = row\n kwargs[\"col\"] = col\n kwargs[\"col_wrap\"] = col_wrap\n kwargs[\"subplot_kws\"] = subplot_kws\n if ndims == 1:\n plotfunc = line\n kwargs[\"hue\"] = hue\n elif ndims == 2:\n if hue:\n plotfunc = line\n kwargs[\"hue\"] = hue\n else:\n plotfunc = pcolormesh\n else:\n if row or col or hue:\n raise ValueError(error_msg)\n plotfunc = hist\n\n kwargs[\"ax\"] = ax\n\n return plotfunc(darray, **kwargs)"},{"col":0,"comment":"null","endLoc":145,"header":"def _strftime(values, date_format)","id":1563,"name":"_strftime","nodeType":"Function","startLoc":135,"text":"def _strftime(values, date_format):\n if is_np_datetime_like(values.dtype):\n access_method = _strftime_through_series\n else:\n access_method = _strftime_through_cftimeindex\n if isinstance(values, dask_array_type):\n from dask.array import map_blocks\n\n return map_blocks(access_method, values, date_format)\n else:\n return access_method(values, date_format)"},{"col":4,"comment":"null","endLoc":75,"header":"def get_attrs(self)","id":1564,"name":"get_attrs","nodeType":"Function","startLoc":74,"text":"def get_attrs(self):\n return Frozen({k: getattr(self.ds, k) for k in self.ds.ncattrs()})"},{"col":0,"comment":"\n Step plot of DataArray index against values\n\n Similar to :func:`matplotlib:matplotlib.pyplot.step`\n\n Parameters\n ----------\n where : {'pre', 'post', 'mid'}, optional, default 'pre'\n Define where the steps should be placed:\n - 'pre': The y value is continued constantly to the left from\n every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the\n value ``y[i]``.\n - 'post': The y value is continued constantly to the right from\n every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the\n value ``y[i]``.\n - 'mid': Steps occur half-way between the *x* positions.\n Note that this parameter is ignored if the x coordinate consists of\n :py:func:`pandas.Interval` values, e.g. as a result of\n :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual\n boundaries of the interval are used.\n\n *args, **kwargs : optional\n Additional arguments following :py:func:`xarray.plot.line`\n ","endLoc":390,"header":"def step(darray, *args, where=\"pre\", linestyle=None, ls=None, **kwargs)","id":1565,"name":"step","nodeType":"Function","startLoc":353,"text":"def step(darray, *args, where=\"pre\", linestyle=None, ls=None, **kwargs):\n \"\"\"\n Step plot of DataArray index against values\n\n Similar to :func:`matplotlib:matplotlib.pyplot.step`\n\n Parameters\n ----------\n where : {'pre', 'post', 'mid'}, optional, default 'pre'\n Define where the steps should be placed:\n - 'pre': The y value is continued constantly to the left from\n every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the\n value ``y[i]``.\n - 'post': The y value is continued constantly to the right from\n every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the\n value ``y[i]``.\n - 'mid': Steps occur half-way between the *x* positions.\n Note that this parameter is ignored if the x coordinate consists of\n :py:func:`pandas.Interval` values, e.g. as a result of\n :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual\n boundaries of the interval are used.\n\n *args, **kwargs : optional\n Additional arguments following :py:func:`xarray.plot.line`\n \"\"\"\n if where not in {\"pre\", \"post\", \"mid\"}:\n raise ValueError(\"'where' argument to step must be \" \"'pre', 'post' or 'mid'\")\n\n if ls is not None:\n if linestyle is None:\n linestyle = ls\n else:\n raise TypeError(\"ls and linestyle are mutually exclusive\")\n if linestyle is None:\n linestyle = \"\"\n linestyle = \"steps-\" + where + linestyle\n\n return line(darray, *args, linestyle=linestyle, **kwargs)"},{"attributeType":"null","col":0,"comment":"null","endLoc":4,"id":1566,"name":"__all__","nodeType":"Attribute","startLoc":4,"text":"__all__"},{"col":0,"comment":"","endLoc":1,"header":"__init__.py#","id":1567,"name":"","nodeType":"Function","startLoc":1,"text":"__all__ = [\n \"plot\",\n \"line\",\n \"step\",\n \"contour\",\n \"contourf\",\n \"hist\",\n \"imshow\",\n \"pcolormesh\",\n \"FacetGrid\",\n]"},{"col":0,"comment":"Coerce an array of datetime-like values to a pandas Series and\n apply string formatting\n ","endLoc":132,"header":"def _strftime_through_series(values, date_format)","id":1568,"name":"_strftime_through_series","nodeType":"Function","startLoc":126,"text":"def _strftime_through_series(values, date_format):\n \"\"\"Coerce an array of datetime-like values to a pandas Series and\n apply string formatting\n \"\"\"\n values_as_series = pd.Series(values.ravel())\n strs = values_as_series.dt.strftime(date_format)\n return strs.values.reshape(values.shape)"},{"fileName":"cfgrib_.py","filePath":"xarray/backends","id":1569,"nodeType":"File","text":"import numpy as np\n\nfrom .. import Variable\nfrom ..core import indexing\nfrom ..core.utils import Frozen, FrozenDict\nfrom .common import AbstractDataStore, BackendArray\nfrom .locks import SerializableLock, ensure_lock\n\n# FIXME: Add a dedicated lock, even if ecCodes is supposed to be thread-safe\n# in most circumstances. See:\n# https://confluence.ecmwf.int/display/ECC/Frequently+Asked+Questions\nECCODES_LOCK = SerializableLock()\n\n\nclass CfGribArrayWrapper(BackendArray):\n def __init__(self, datastore, array):\n self.datastore = datastore\n self.shape = array.shape\n self.dtype = array.dtype\n self.array = array\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )\n\n def _getitem(self, key):\n with self.datastore.lock:\n return self.array[key]\n\n\nclass CfGribDataStore(AbstractDataStore):\n \"\"\"\n Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.\n \"\"\"\n\n def __init__(self, filename, lock=None, **backend_kwargs):\n import cfgrib\n\n if lock is None:\n lock = ECCODES_LOCK\n self.lock = ensure_lock(lock)\n self.ds = cfgrib.open_file(filename, **backend_kwargs)\n\n def open_store_variable(self, name, var):\n if isinstance(var.data, np.ndarray):\n data = var.data\n else:\n wrapped_array = CfGribArrayWrapper(self, var.data)\n data = indexing.LazilyOuterIndexedArray(wrapped_array)\n\n encoding = self.ds.encoding.copy()\n encoding[\"original_shape\"] = var.data.shape\n\n return Variable(var.dimensions, data, var.attributes, encoding)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen(self.ds.attributes)\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n dims = self.get_dimensions()\n encoding = {\"unlimited_dims\": {k for k, v in dims.items() if v is None}}\n return encoding\n"},{"className":"CfGribArrayWrapper","col":0,"comment":"null","endLoc":29,"id":1570,"nodeType":"Class","startLoc":15,"text":"class CfGribArrayWrapper(BackendArray):\n def __init__(self, datastore, array):\n self.datastore = datastore\n self.shape = array.shape\n self.dtype = array.dtype\n self.array = array\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )\n\n def _getitem(self, key):\n with self.datastore.lock:\n return self.array[key]"},{"col":4,"comment":"null","endLoc":20,"header":"def __init__(self, datastore, array)","id":1571,"name":"__init__","nodeType":"Function","startLoc":16,"text":"def __init__(self, datastore, array):\n self.datastore = datastore\n self.shape = array.shape\n self.dtype = array.dtype\n self.array = array"},{"col":4,"comment":"null","endLoc":25,"header":"def __getitem__(self, key)","id":1572,"name":"__getitem__","nodeType":"Function","startLoc":22,"text":"def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )"},{"col":4,"comment":"null","endLoc":29,"header":"def _getitem(self, key)","id":1573,"name":"_getitem","nodeType":"Function","startLoc":27,"text":"def _getitem(self, key):\n with self.datastore.lock:\n return self.array[key]"},{"col":0,"comment":"Coerce an array of cftime-like values to a CFTimeIndex\n and access requested datetime component\n ","endLoc":123,"header":"def _strftime_through_cftimeindex(values, date_format)","id":1574,"name":"_strftime_through_cftimeindex","nodeType":"Function","startLoc":114,"text":"def _strftime_through_cftimeindex(values, date_format):\n \"\"\"Coerce an array of cftime-like values to a CFTimeIndex\n and access requested datetime component\n \"\"\"\n from ..coding.cftimeindex import CFTimeIndex\n\n values_as_cftimeindex = CFTimeIndex(values.ravel())\n\n field_values = values_as_cftimeindex.strftime(date_format)\n return field_values.values.reshape(values.shape)"},{"attributeType":"null","col":8,"comment":"null","endLoc":18,"id":1575,"name":"shape","nodeType":"Attribute","startLoc":18,"text":"self.shape"},{"col":4,"comment":"Set DataArray (multi-)indexes using one or more existing\n coordinates.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another DataArray, with this data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> arr\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n >>> arr.set_index(x='a')\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n\n See Also\n --------\n DataArray.reset_index\n ","endLoc":1602,"header":"def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]]\n ) -> Optional[\"DataArray\"]","id":1576,"name":"set_index","nodeType":"Function","startLoc":1543,"text":"def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]]\n ) -> Optional[\"DataArray\"]:\n \"\"\"Set DataArray (multi-)indexes using one or more existing\n coordinates.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another DataArray, with this data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> arr\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n >>> arr.set_index(x='a')\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n\n See Also\n --------\n DataArray.reset_index\n \"\"\"\n _check_inplace(inplace)\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n coords, _ = merge_indexes(indexes, self._coords, set(), append=append)\n return self._replace(coords=coords)"},{"col":4,"comment":"null","endLoc":78,"header":"def get_dimensions(self)","id":1577,"name":"get_dimensions","nodeType":"Function","startLoc":77,"text":"def get_dimensions(self):\n return Frozen(self.ds.dimensions)"},{"col":4,"comment":"null","endLoc":85,"header":"def get_encoding(self)","id":1578,"name":"get_encoding","nodeType":"Function","startLoc":80,"text":"def get_encoding(self):\n return {\n \"unlimited_dims\": {\n k for k in self.ds.dimensions if self.ds.dimensions[k].isunlimited()\n }\n }"},{"col":4,"comment":"null","endLoc":88,"header":"def close(self)","id":1579,"name":"close","nodeType":"Function","startLoc":87,"text":"def close(self):\n self._manager.close()"},{"attributeType":"null","col":4,"comment":"null","endLoc":228,"id":1580,"name":"timeout","nodeType":"Attribute","startLoc":228,"text":"timeout"},{"attributeType":"null","col":4,"comment":"null","endLoc":229,"id":1581,"name":"repeat","nodeType":"Attribute","startLoc":229,"text":"repeat"},{"attributeType":"null","col":4,"comment":"null","endLoc":230,"id":1582,"name":"number","nodeType":"Attribute","startLoc":230,"text":"number"},{"attributeType":"null","col":8,"comment":"null","endLoc":57,"id":1583,"name":"_manager","nodeType":"Attribute","startLoc":57,"text":"self._manager"},{"attributeType":"null","col":8,"comment":"null","endLoc":58,"id":1584,"name":"lock","nodeType":"Attribute","startLoc":58,"text":"self.lock"},{"attributeType":"null","col":8,"comment":"null","endLoc":241,"id":1585,"name":"block_chunks","nodeType":"Attribute","startLoc":241,"text":"self.block_chunks"},{"col":4,"comment":"\n Return an Index of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n Index\n Index of formatted strings\n\n Examples\n --------\n >>> rng = xr.cftime_range(start='2000', periods=5, freq='2MS',\n ... calendar='noleap')\n >>> rng.strftime('%B %d, %Y, %r')\n Index(['January 01, 2000, 12:00:00 AM', 'March 01, 2000, 12:00:00 AM',\n 'May 01, 2000, 12:00:00 AM', 'July 01, 2000, 12:00:00 AM',\n 'September 01, 2000, 12:00:00 AM'],\n dtype='object')\n ","endLoc":520,"header":"def strftime(self, date_format)","id":1586,"name":"strftime","nodeType":"Function","startLoc":493,"text":"def strftime(self, date_format):\n \"\"\"\n Return an Index of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n Index\n Index of formatted strings\n\n Examples\n --------\n >>> rng = xr.cftime_range(start='2000', periods=5, freq='2MS',\n ... calendar='noleap')\n >>> rng.strftime('%B %d, %Y, %r')\n Index(['January 01, 2000, 12:00:00 AM', 'March 01, 2000, 12:00:00 AM',\n 'May 01, 2000, 12:00:00 AM', 'July 01, 2000, 12:00:00 AM',\n 'September 01, 2000, 12:00:00 AM'],\n dtype='object')\n \"\"\"\n return pd.Index([date.strftime(date_format) for date in self._data])"},{"attributeType":"null","col":8,"comment":"null","endLoc":247,"id":1587,"name":"time_chunks","nodeType":"Attribute","startLoc":247,"text":"self.time_chunks"},{"col":4,"comment":"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : hashable or sequence of hashables\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.set_index\n ","endLoc":1635,"header":"def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> Optional[\"DataArray\"]","id":1588,"name":"reset_index","nodeType":"Function","startLoc":1604,"text":"def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> Optional[\"DataArray\"]:\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : hashable or sequence of hashables\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.set_index\n \"\"\"\n _check_inplace(inplace)\n coords, _ = split_indexes(\n dims_or_levels, self._coords, set(), self._level_coords, drop=drop\n )\n return self._replace(coords=coords)"},{"attributeType":"null","col":8,"comment":"null","endLoc":254,"id":1589,"name":"filenames_list","nodeType":"Attribute","startLoc":254,"text":"self.filenames_list"},{"attributeType":"null","col":8,"comment":"null","endLoc":236,"id":1590,"name":"nt","nodeType":"Attribute","startLoc":236,"text":"self.nt"},{"attributeType":"null","col":0,"comment":"null","endLoc":11,"id":1591,"name":"PNETCDF_LOCK","nodeType":"Attribute","startLoc":11,"text":"PNETCDF_LOCK"},{"attributeType":"null","col":8,"comment":"null","endLoc":253,"id":1592,"name":"ds_list","nodeType":"Attribute","startLoc":253,"text":"self.ds_list"},{"attributeType":"null","col":8,"comment":"null","endLoc":237,"id":1593,"name":"nx","nodeType":"Attribute","startLoc":237,"text":"self.nx"},{"attributeType":"null","col":8,"comment":"null","endLoc":239,"id":1594,"name":"nfiles","nodeType":"Attribute","startLoc":239,"text":"self.nfiles"},{"col":0,"comment":"","endLoc":1,"header":"pseudonetcdf_.py#","id":1595,"name":"","nodeType":"Function","startLoc":1,"text":"PNETCDF_LOCK = combine_locks([HDF5_LOCK, NETCDFC_LOCK])"},{"attributeType":"{shape, dtype}","col":8,"comment":"null","endLoc":20,"id":1596,"name":"array","nodeType":"Attribute","startLoc":20,"text":"self.array"},{"col":0,"comment":"Extract (multi-)indexes (levels) as variables.\n\n Not public API. Used in Dataset and DataArray reset_index\n methods.\n ","endLoc":323,"header":"def split_indexes(\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n level_coords: Mapping[Hashable, Hashable],\n drop: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]","id":1597,"name":"split_indexes","nodeType":"Function","startLoc":268,"text":"def split_indexes(\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n level_coords: Mapping[Hashable, Hashable],\n drop: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Extract (multi-)indexes (levels) as variables.\n\n Not public API. Used in Dataset and DataArray reset_index\n methods.\n \"\"\"\n if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):\n dims_or_levels = [dims_or_levels]\n\n dim_levels: DefaultDict[Any, List[Hashable]] = defaultdict(list)\n dims = []\n for k in dims_or_levels:\n if k in level_coords:\n dim_levels[level_coords[k]].append(k)\n else:\n dims.append(k)\n\n vars_to_replace = {}\n vars_to_create: Dict[Hashable, Variable] = {}\n vars_to_remove = []\n\n for d in dims:\n index = variables[d].to_index()\n if isinstance(index, pd.MultiIndex):\n dim_levels[d] = index.names\n else:\n vars_to_remove.append(d)\n if not drop:\n vars_to_create[str(d) + \"_\"] = Variable(d, index)\n\n for d, levs in dim_levels.items():\n index = variables[d].to_index()\n if len(levs) == index.nlevels:\n vars_to_remove.append(d)\n else:\n vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))\n\n if not drop:\n for lev in levs:\n idx = index.get_level_values(lev)\n vars_to_create[idx.name] = Variable(d, idx)\n\n new_variables = dict(variables)\n for v in set(vars_to_remove):\n del new_variables[v]\n new_variables.update(vars_to_replace)\n new_variables.update(vars_to_create)\n new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)\n\n return new_variables, new_coord_names"},{"id":1598,"name":".pep8speaks.yml","nodeType":"TextFile","path":"","text":"# https://github.com/OrkoHunter/pep8speaks for more info\n# pep8speaks will use the flake8 configs in `setup.cfg`\n\nscanner:\n diff_only: False\n linter: flake8\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":238,"id":1599,"name":"ny","nodeType":"Attribute","startLoc":238,"text":"self.ny"},{"fileName":"arithmetic.py","filePath":"xarray/core","id":1600,"nodeType":"File","text":"\"\"\"Base classes implementing arithmetic for xarray objects.\"\"\"\nimport numbers\n\nimport numpy as np\n\nfrom .options import OPTIONS\nfrom .pycompat import dask_array_type\nfrom .utils import not_implemented\n\n\nclass SupportsArithmetic:\n \"\"\"Base class for xarray types that support arithmetic.\n\n Used by Dataset, DataArray, Variable and GroupBy.\n \"\"\"\n\n __slots__ = ()\n\n # TODO: implement special methods for arithmetic here rather than injecting\n # them in xarray/core/ops.py. Ideally, do so by inheriting from\n # numpy.lib.mixins.NDArrayOperatorsMixin.\n\n # TODO: allow extending this with some sort of registration system\n _HANDLED_TYPES = (\n np.ndarray,\n np.generic,\n numbers.Number,\n bytes,\n str,\n ) + dask_array_type\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n from .computation import apply_ufunc\n\n # See the docstring example for numpy.lib.mixins.NDArrayOperatorsMixin.\n out = kwargs.get(\"out\", ())\n for x in inputs + out:\n if not isinstance(x, self._HANDLED_TYPES + (SupportsArithmetic,)):\n return NotImplemented\n\n if ufunc.signature is not None:\n raise NotImplementedError(\n \"{} not supported: xarray objects do not directly implement \"\n \"generalized ufuncs. Instead, use xarray.apply_ufunc or \"\n \"explicitly convert to xarray objects to NumPy arrays \"\n \"(e.g., with `.values`).\".format(ufunc)\n )\n\n if method != \"__call__\":\n # TODO: support other methods, e.g., reduce and accumulate.\n raise NotImplementedError(\n \"{} method for ufunc {} is not implemented on xarray objects, \"\n \"which currently only support the __call__ method. As an \"\n \"alternative, consider explicitly converting xarray objects \"\n \"to NumPy arrays (e.g., with `.values`).\".format(method, ufunc)\n )\n\n if any(isinstance(o, SupportsArithmetic) for o in out):\n # TODO: implement this with logic like _inplace_binary_op. This\n # will be necessary to use NDArrayOperatorsMixin.\n raise NotImplementedError(\n \"xarray objects are not yet supported in the `out` argument \"\n \"for ufuncs. As an alternative, consider explicitly \"\n \"converting xarray objects to NumPy arrays (e.g., with \"\n \"`.values`).\"\n )\n\n join = dataset_join = OPTIONS[\"arithmetic_join\"]\n\n return apply_ufunc(\n ufunc,\n *inputs,\n input_core_dims=((),) * ufunc.nin,\n output_core_dims=((),) * ufunc.nout,\n join=join,\n dataset_join=dataset_join,\n dataset_fill_value=np.nan,\n kwargs=kwargs,\n dask=\"allowed\"\n )\n\n # this has no runtime function - these are listed so IDEs know these\n # methods are defined and don't warn on these operations\n __lt__ = (\n __le__\n ) = (\n __ge__\n ) = (\n __gt__\n ) = (\n __add__\n ) = (\n __sub__\n ) = (\n __mul__\n ) = (\n __truediv__\n ) = (\n __floordiv__\n ) = (\n __mod__\n ) = (\n __pow__\n ) = __and__ = __xor__ = __or__ = __div__ = __eq__ = __ne__ = not_implemented\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":195,"id":1601,"name":"year","nodeType":"Attribute","startLoc":195,"text":"year"},{"attributeType":"null","col":8,"comment":"null","endLoc":249,"id":1602,"name":"time_vars","nodeType":"Attribute","startLoc":249,"text":"self.time_vars"},{"col":0,"comment":"","endLoc":1,"header":"arithmetic.py#","id":1603,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Base classes implementing arithmetic for xarray objects.\"\"\""},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":235,"id":1604,"name":"ds","nodeType":"Attribute","startLoc":235,"text":"self.ds"},{"className":"IOWriteMultipleNetCDF3","col":0,"comment":"null","endLoc":312,"id":1605,"nodeType":"Class","startLoc":299,"text":"class IOWriteMultipleNetCDF3(IOMultipleNetCDF):\n def setup(self):\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n\n def time_write_dataset_netcdf4(self):\n xr.save_mfdataset(\n self.ds_list, self.filenames_list, engine=\"netcdf4\", format=self.format\n )\n\n def time_write_dataset_scipy(self):\n xr.save_mfdataset(\n self.ds_list, self.filenames_list, engine=\"scipy\", format=self.format\n )"},{"col":4,"comment":"null","endLoc":302,"header":"def setup(self)","id":1606,"name":"setup","nodeType":"Function","startLoc":300,"text":"def setup(self):\n self.make_ds()\n self.format = \"NETCDF3_64BIT\""},{"attributeType":"null","col":8,"comment":"null","endLoc":17,"id":1607,"name":"datastore","nodeType":"Attribute","startLoc":17,"text":"self.datastore"},{"attributeType":"null","col":4,"comment":"null","endLoc":196,"id":1608,"name":"month","nodeType":"Attribute","startLoc":196,"text":"month"},{"attributeType":"null","col":8,"comment":"null","endLoc":19,"id":1609,"name":"dtype","nodeType":"Attribute","startLoc":19,"text":"self.dtype"},{"col":4,"comment":"null","endLoc":307,"header":"def time_write_dataset_netcdf4(self)","id":1610,"name":"time_write_dataset_netcdf4","nodeType":"Function","startLoc":304,"text":"def time_write_dataset_netcdf4(self):\n xr.save_mfdataset(\n self.ds_list, self.filenames_list, engine=\"netcdf4\", format=self.format\n )"},{"className":"CfGribDataStore","col":0,"comment":"\n Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.\n ","endLoc":71,"id":1611,"nodeType":"Class","startLoc":32,"text":"class CfGribDataStore(AbstractDataStore):\n \"\"\"\n Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.\n \"\"\"\n\n def __init__(self, filename, lock=None, **backend_kwargs):\n import cfgrib\n\n if lock is None:\n lock = ECCODES_LOCK\n self.lock = ensure_lock(lock)\n self.ds = cfgrib.open_file(filename, **backend_kwargs)\n\n def open_store_variable(self, name, var):\n if isinstance(var.data, np.ndarray):\n data = var.data\n else:\n wrapped_array = CfGribArrayWrapper(self, var.data)\n data = indexing.LazilyOuterIndexedArray(wrapped_array)\n\n encoding = self.ds.encoding.copy()\n encoding[\"original_shape\"] = var.data.shape\n\n return Variable(var.dimensions, data, var.attributes, encoding)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )\n\n def get_attrs(self):\n return Frozen(self.ds.attributes)\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n\n def get_encoding(self):\n dims = self.get_dimensions()\n encoding = {\"unlimited_dims\": {k for k, v in dims.items() if v is None}}\n return encoding"},{"col":0,"comment":"Write multiple datasets to disk as netCDF files simultaneously.\n\n This function is intended for use with datasets consisting of dask.array\n objects, in which case it can write the multiple datasets to disk\n simultaneously using a shared thread pool.\n\n When not using dask, it is no different than calling ``to_netcdf``\n repeatedly.\n\n Parameters\n ----------\n datasets : list of xarray.Dataset\n List of datasets to save.\n paths : list of str or list of Paths\n List of paths to which to save each corresponding dataset.\n mode : {'w', 'a'}, optional\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n these locations will be overwritten.\n format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',\n 'NETCDF3_CLASSIC'}, optional\n\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n groups : list of str, optional\n Paths to the netCDF4 group in each corresponding file to which to save\n datasets (only works for format='NETCDF4'). The groups will be created\n if necessary.\n engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n See `Dataset.to_netcdf` for additional information.\n compute: boolean\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n\n Examples\n --------\n\n Save a dataset into one netCDF per year of data:\n\n >>> years, datasets = zip(*ds.groupby('time.year'))\n >>> paths = ['%s.nc' % y for y in years]\n >>> xr.save_mfdataset(datasets, paths)\n ","endLoc":1228,"header":"def save_mfdataset(\n datasets, paths, mode=\"w\", format=None, groups=None, engine=None, compute=True\n)","id":1612,"name":"save_mfdataset","nodeType":"Function","startLoc":1120,"text":"def save_mfdataset(\n datasets, paths, mode=\"w\", format=None, groups=None, engine=None, compute=True\n):\n \"\"\"Write multiple datasets to disk as netCDF files simultaneously.\n\n This function is intended for use with datasets consisting of dask.array\n objects, in which case it can write the multiple datasets to disk\n simultaneously using a shared thread pool.\n\n When not using dask, it is no different than calling ``to_netcdf``\n repeatedly.\n\n Parameters\n ----------\n datasets : list of xarray.Dataset\n List of datasets to save.\n paths : list of str or list of Paths\n List of paths to which to save each corresponding dataset.\n mode : {'w', 'a'}, optional\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n these locations will be overwritten.\n format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',\n 'NETCDF3_CLASSIC'}, optional\n\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n groups : list of str, optional\n Paths to the netCDF4 group in each corresponding file to which to save\n datasets (only works for format='NETCDF4'). The groups will be created\n if necessary.\n engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n See `Dataset.to_netcdf` for additional information.\n compute: boolean\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n\n Examples\n --------\n\n Save a dataset into one netCDF per year of data:\n\n >>> years, datasets = zip(*ds.groupby('time.year'))\n >>> paths = ['%s.nc' % y for y in years]\n >>> xr.save_mfdataset(datasets, paths)\n \"\"\"\n if mode == \"w\" and len(set(paths)) < len(paths):\n raise ValueError(\n \"cannot use mode='w' when writing multiple \" \"datasets to the same path\"\n )\n\n for obj in datasets:\n if not isinstance(obj, Dataset):\n raise TypeError(\n \"save_mfdataset only supports writing Dataset \"\n \"objects, received type %s\" % type(obj)\n )\n\n if groups is None:\n groups = [None] * len(datasets)\n\n if len({len(datasets), len(paths), len(groups)}) > 1:\n raise ValueError(\n \"must supply lists of the same length for the \"\n \"datasets, paths and groups arguments to \"\n \"save_mfdataset\"\n )\n\n writers, stores = zip(\n *[\n to_netcdf(\n ds, path, mode, format, group, engine, compute=compute, multifile=True\n )\n for ds, path, group in zip(datasets, paths, groups)\n ]\n )\n\n try:\n writes = [w.sync(compute=compute) for w in writers]\n finally:\n if compute:\n for store in stores:\n store.close()\n\n if not compute:\n import dask\n\n return dask.delayed(\n [dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)]\n )"},{"col":4,"comment":"null","endLoc":55,"header":"def open_store_variable(self, name, var)","id":1613,"name":"open_store_variable","nodeType":"Function","startLoc":45,"text":"def open_store_variable(self, name, var):\n if isinstance(var.data, np.ndarray):\n data = var.data\n else:\n wrapped_array = CfGribArrayWrapper(self, var.data)\n data = indexing.LazilyOuterIndexedArray(wrapped_array)\n\n encoding = self.ds.encoding.copy()\n encoding[\"original_shape\"] = var.data.shape\n\n return Variable(var.dimensions, data, var.attributes, encoding)"},{"id":1614,"name":"weather-data.rst","nodeType":"TextFile","path":"doc/examples","text":".. _toy weather data:\n\nToy weather data\n================\n\nHere is an example of how to easily manipulate a toy weather dataset using\nxarray and other recommended Python libraries:\n\n.. contents::\n :local:\n :depth: 1\n\nShared setup:\n\n.. literalinclude:: _code/weather_data_setup.py\n\n.. ipython:: python\n :suppress:\n\n fpath = \"examples/_code/weather_data_setup.py\"\n with open(fpath) as f:\n code = compile(f.read(), fpath, 'exec')\n exec(code)\n\n\nExamine a dataset with pandas_ and seaborn_\n-------------------------------------------\n\n.. _pandas: http://pandas.pydata.org\n.. _seaborn: http://stanford.edu/~mwaskom/software/seaborn\n\n.. ipython:: python\n\n ds\n\n df = ds.to_dataframe()\n\n df.head()\n\n df.describe()\n\n @savefig examples_tmin_tmax_plot.png\n ds.mean(dim='location').to_dataframe().plot()\n\n\n.. ipython:: python\n\n @savefig examples_pairplot.png\n sns.pairplot(df.reset_index(), vars=ds.data_vars)\n\n.. _average by month:\n\nProbability of freeze by calendar month\n---------------------------------------\n\n.. ipython:: python\n\n freeze = (ds['tmin'] <= 0).groupby('time.month').mean('time')\n freeze\n\n @savefig examples_freeze_prob.png\n freeze.to_pandas().plot()\n\n.. _monthly average:\n\nMonthly averaging\n-----------------\n\n.. ipython:: python\n\n monthly_avg = ds.resample(time='1MS').mean()\n\n @savefig examples_tmin_tmax_plot_mean.png\n monthly_avg.sel(location='IA').to_dataframe().plot(style='s-')\n\nNote that ``MS`` here refers to Month-Start; ``M`` labels Month-End (the last\nday of the month).\n\n.. _monthly anomalies:\n\nCalculate monthly anomalies\n---------------------------\n\nIn climatology, \"anomalies\" refer to the difference between observations and\ntypical weather for a particular season. Unlike observations, anomalies should\nnot show any seasonal cycle.\n\n.. ipython:: python\n\n climatology = ds.groupby('time.month').mean('time')\n anomalies = ds.groupby('time.month') - climatology\n\n @savefig examples_anomalies_plot.png\n anomalies.mean('location').to_dataframe()[['tmin', 'tmax']].plot()\n\n.. _standardized monthly anomalies:\n\nCalculate standardized monthly anomalies\n----------------------------------------\n\nYou can create standardized anomalies where the difference between the\nobservations and the climatological monthly mean is\ndivided by the climatological standard deviation.\n\n.. ipython:: python\n\n climatology_mean = ds.groupby('time.month').mean('time')\n climatology_std = ds.groupby('time.month').std('time')\n stand_anomalies = xr.apply_ufunc(\n lambda x, m, s: (x - m) / s,\n ds.groupby('time.month'),\n climatology_mean, climatology_std)\n\n @savefig examples_standardized_anomalies_plot.png\n stand_anomalies.mean('location').to_dataframe()[['tmin', 'tmax']].plot()\n\n.. _fill with climatology:\n\nFill missing values with climatology\n------------------------------------\n\nThe :py:func:`~xarray.Dataset.fillna` method on grouped objects lets you easily\nfill missing values by group:\n\n.. ipython:: python\n :okwarning:\n\n # throw away the first half of every month\n some_missing = ds.tmin.sel(time=ds['time.day'] > 15).reindex_like(ds)\n filled = some_missing.groupby('time.month').fillna(climatology.tmin)\n\n both = xr.Dataset({'some_missing': some_missing, 'filled': filled})\n both\n\n df = both.sel(time='2000').mean('location').reset_coords(drop=True).to_dataframe()\n\n @savefig examples_filled.png\n df[['filled', 'some_missing']].plot()\n"},{"id":1615,"name":"quick-overview.rst","nodeType":"TextFile","path":"doc","text":"##############\nQuick overview\n##############\n\nHere are some quick examples of what you can do with :py:class:`xarray.DataArray`\nobjects. Everything is explained in much more detail in the rest of the\ndocumentation.\n\nTo begin, import numpy, pandas and xarray using their customary abbreviations:\n\n.. ipython:: python\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n\nCreate a DataArray\n------------------\n\nYou can make a DataArray from scratch by supplying data in the form of a numpy\narray or list, with optional *dimensions* and *coordinates*:\n\n.. ipython:: python\n\n data = xr.DataArray(np.random.randn(2, 3),\n dims=('x', 'y'),\n coords={'x': [10, 20]})\n data\n\nIn this case, we have generated a 2D array, assigned the names *x* and *y* to the two dimensions respectively and associated two *coordinate labels* '10' and '20' with the two locations along the x dimension. If you supply a pandas :py:class:`~pandas.Series` or :py:class:`~pandas.DataFrame`, metadata is copied directly:\n\n.. ipython:: python\n\n xr.DataArray(pd.Series(range(3), index=list('abc'), name='foo'))\n\nHere are the key properties for a ``DataArray``:\n\n.. ipython:: python\n\n # like in pandas, values is a numpy array that you can modify in-place\n data.values\n data.dims\n data.coords\n # you can use this dictionary to store arbitrary metadata\n data.attrs\n\n\nIndexing\n--------\n\nxarray supports four kind of indexing. Since we have assigned coordinate labels to the x dimension we can use label-based indexing along that dimension just like pandas. The four examples below all yield the same result (the value at `x=10`) but at varying levels of convenience and intuitiveness.\n\n.. ipython:: python\n\n # positional and by integer label, like numpy\n data[0, :]\n\n # loc or \"location\": positional and coordinate label, like pandas\n data.loc[10]\n\n # isel or \"integer select\": by dimension name and integer label\n data.isel(x=0)\n\n # sel or \"select\": by dimension name and coordinate label\n data.sel(x=10)\n\n\nUnlike positional indexing, label-based indexing frees us from having to know how our array is organized. All we need to know are the dimension name and the label we wish to index i.e. ``data.sel(x=10)`` works regardless of whether ``x`` is the first or second dimension of the array and regardless of whether ``10`` is the first or second element of ``x``. We have already told xarray that x is the first dimension when we created ``data``: xarray keeps track of this so we don't have to. For more, see :ref:`indexing`.\n\n\nAttributes\n----------\n\nWhile you're setting up your DataArray, it's often a good idea to set metadata attributes. A useful choice is to set ``data.attrs['long_name']`` and ``data.attrs['units']`` since xarray will use these, if present, to automatically label your plots. These special names were chosen following the `NetCDF Climate and Forecast (CF) Metadata Conventions `_. ``attrs`` is just a Python dictionary, so you can assign anything you wish.\n\n.. ipython:: python\n\n data.attrs['long_name'] = 'random velocity'\n data.attrs['units'] = 'metres/sec'\n data.attrs['description'] = 'A random variable created as an example.'\n data.attrs['random_attribute'] = 123\n data.attrs\n # you can add metadata to coordinates too\n data.x.attrs['units'] = 'x units'\n\n\nComputation\n-----------\n\nData arrays work very similarly to numpy ndarrays:\n\n.. ipython:: python\n\n data + 10\n np.sin(data)\n # transpose\n data.T\n data.sum()\n\nHowever, aggregation operations can use dimension names instead of axis\nnumbers:\n\n.. ipython:: python\n\n data.mean(dim='x')\n\nArithmetic operations broadcast based on dimension name. This means you don't\nneed to insert dummy dimensions for alignment:\n\n.. ipython:: python\n\n a = xr.DataArray(np.random.randn(3), [data.coords['y']])\n b = xr.DataArray(np.random.randn(4), dims='z')\n\n a\n b\n\n a + b\n\nIt also means that in most cases you do not need to worry about the order of\ndimensions:\n\n.. ipython:: python\n\n data - data.T\n\nOperations also align based on index labels:\n\n.. ipython:: python\n\n data[:-1] - data[:1]\n\nFor more, see :ref:`comput`.\n\nGroupBy\n-------\n\nxarray supports grouped operations using a very similar API to pandas (see :ref:`groupby`):\n\n.. ipython:: python\n\n labels = xr.DataArray(['E', 'F', 'E'], [data.coords['y']], name='labels')\n labels\n data.groupby(labels).mean('y')\n data.groupby(labels).apply(lambda x: x - x.min())\n\nPlotting\n--------\n\nVisualizing your datasets is quick and convenient:\n\n.. ipython:: python\n\n @savefig plotting_quick_overview.png\n data.plot()\n\nNote the automatic labeling with names and units. Our effort in adding metadata attributes has paid off! Many aspects of these figures are customizable: see :ref:`plotting`.\n \npandas\n------\n\nXarray objects can be easily converted to and from pandas objects using the :py:meth:`~xarray.DataArray.to_series`, :py:meth:`~xarray.DataArray.to_dataframe` and :py:meth:`~pandas.DataFrame.to_xarray` methods:\n\n.. ipython:: python\n\n series = data.to_series()\n series\n\n # convert back\n series.to_xarray()\n\nDatasets\n--------\n\n:py:class:`xarray.Dataset` is a dict-like container of aligned ``DataArray``\nobjects. You can think of it as a multi-dimensional generalization of the\n:py:class:`pandas.DataFrame`:\n\n.. ipython:: python\n\n ds = xr.Dataset({'foo': data, 'bar': ('x', [1, 2]), 'baz': np.pi})\n ds\n\n\nThis creates a dataset with three DataArrays named ``foo``, ``bar`` and ``baz``. Use dictionary or dot indexing to pull out ``Dataset`` variables as ``DataArray`` objects but note that assignment only works with dictionary indexing:\n\n.. ipython:: python\n\n ds['foo']\n ds.foo\n\n\nWhen creating ``ds``, we specified that ``foo`` is identical to ``data`` created earlier, ``bar`` is one-dimensional with single dimension ``x`` and associated values '1' and '2', and ``baz`` is a scalar not associated with any dimension in ``ds``. Variables in datasets can have different ``dtype`` and even different dimensions, but all dimensions are assumed to refer to points in the same shared coordinate system i.e. if two variables have dimension ``x``, that dimension must be identical in both variables.\n\nFor example, when creating ``ds`` xarray automatically *aligns* ``bar`` with ``DataArray`` ``foo``, i.e., they share the same coordinate system so that ``ds.bar['x'] == ds.foo['x'] == ds['x']``. Consequently, the following works without explicitly specifying the coordinate ``x`` when creating ``ds['bar']``:\n\n.. ipython:: python\n\n ds.bar.sel(x=10)\n\n\n\nYou can do almost everything you can do with ``DataArray`` objects with\n``Dataset`` objects (including indexing and arithmetic) if you prefer to work\nwith multiple variables at once.\n\nRead & write netCDF files\n-------------------------\n\nNetCDF is the recommended file format for xarray objects. Users\nfrom the geosciences will recognize that the :py:class:`~xarray.Dataset` data\nmodel looks very similar to a netCDF file (which, in fact, inspired it).\n\nYou can directly read and write xarray objects to disk using :py:meth:`~xarray.Dataset.to_netcdf`, :py:func:`~xarray.open_dataset` and\n:py:func:`~xarray.open_dataarray`:\n\n.. ipython:: python\n\n ds.to_netcdf('example.nc')\n xr.open_dataset('example.nc')\n\n.. ipython:: python\n :suppress:\n\n import os\n os.remove('example.nc')\n\n\nIt is common for datasets to be distributed across multiple files (commonly one file per timestep). xarray supports this use-case by providing the :py:meth:`~xarray.open_mfdataset` and the :py:meth:`~xarray.save_mfdataset` methods. For more, see :ref:`io`.\n"},{"id":1616,"name":"index.rst","nodeType":"TextFile","path":"doc","text":"xarray: N-D labeled arrays and datasets in Python\n=================================================\n\n**xarray** (formerly **xray**) is an open source project and Python package\nthat makes working with labelled multi-dimensional arrays simple,\nefficient, and fun!\n\nXarray introduces labels in the form of dimensions, coordinates and\nattributes on top of raw NumPy_-like arrays, which allows for a more\nintuitive, more concise, and less error-prone developer experience.\nThe package includes a large and growing library of domain-agnostic functions\nfor advanced analytics and visualization with these data structures.\n\nXarray is inspired by and borrows heavily from pandas_, the popular data\nanalysis package focused on labelled tabular data.\nIt is particularly tailored to working with netCDF_ files, which were the\nsource of xarray's data model, and integrates tightly with dask_ for parallel\ncomputing.\n\n.. _NumPy: http://www.numpy.org\n.. _pandas: http://pandas.pydata.org\n.. _dask: http://dask.org\n.. _netCDF: http://www.unidata.ucar.edu/software/netcdf\n\nDocumentation\n-------------\n\n**Getting Started**\n\n* :doc:`why-xarray`\n* :doc:`faq`\n* :doc:`quick-overview`\n* :doc:`examples`\n* :doc:`installing`\n\n.. toctree::\n :maxdepth: 1\n :hidden:\n :caption: Getting Started\n\n why-xarray\n faq\n quick-overview\n examples\n installing\n\n**User Guide**\n\n* :doc:`terminology`\n* :doc:`data-structures`\n* :doc:`indexing`\n* :doc:`interpolation`\n* :doc:`computation`\n* :doc:`groupby`\n* :doc:`reshaping`\n* :doc:`combining`\n* :doc:`time-series`\n* :doc:`weather-climate`\n* :doc:`pandas`\n* :doc:`io`\n* :doc:`dask`\n* :doc:`plotting`\n\n.. toctree::\n :maxdepth: 1\n :hidden:\n :caption: User Guide\n\n terminology\n data-structures\n indexing\n interpolation\n computation\n groupby\n reshaping\n combining\n time-series\n weather-climate\n pandas\n io\n dask\n plotting\n\n**Help & reference**\n\n* :doc:`whats-new`\n* :doc:`howdoi`\n* :doc:`api`\n* :doc:`internals`\n* :doc:`roadmap`\n* :doc:`contributing`\n* :doc:`related-projects`\n\n.. toctree::\n :maxdepth: 1\n :hidden:\n :caption: Help & reference\n\n whats-new\n howdoi\n api\n internals\n roadmap\n contributing\n related-projects\n\nSee also\n--------\n\n- Stephan Hoyer and Joe Hamman's `Journal of Open Research Software paper`_ describing the xarray project.\n- The `UW eScience Institute's Geohackweek`_ tutorial on xarray for geospatial data scientists.\n- Stephan Hoyer's `SciPy2015 talk`_ introducing xarray to a general audience.\n- Stephan Hoyer's `2015 Unidata Users Workshop talk`_ and `tutorial`_ (`with answers`_) introducing\n xarray to users familiar with netCDF.\n- `Nicolas Fauchereau's tutorial`_ on xarray for netCDF users.\n\n.. _Journal of Open Research Software paper: http://doi.org/10.5334/jors.148\n.. _UW eScience Institute's Geohackweek : https://geohackweek.github.io/nDarrays/\n.. _SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk\n.. _2015 Unidata Users Workshop talk: https://www.youtube.com/watch?v=J9ypQOnt5l8\n.. _tutorial: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial.ipynb\n.. _with answers: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial-with-answers.ipynb\n.. _Nicolas Fauchereau's tutorial: http://nbviewer.iPython.org/github/nicolasfauchereau/metocean/blob/master/notebooks/xray.ipynb\n\nGet in touch\n------------\n\n- Ask usage questions (\"How do I?\") on `StackOverflow`_.\n- Report bugs, suggest features or view the source code `on GitHub`_.\n- For less well defined questions or ideas, or to announce other projects of\n interest to xarray users, use the `mailing list`_.\n\n.. _StackOverFlow: http://stackoverflow.com/questions/tagged/python-xarray\n.. _mailing list: https://groups.google.com/forum/#!forum/xarray\n.. _on GitHub: http://github.com/pydata/xarray\n\nNumFOCUS\n--------\n\n.. image:: _static/numfocus_logo.png\n :scale: 50 %\n :target: https://numfocus.org/\n\nXarray is a fiscally sponsored project of NumFOCUS_, a nonprofit dedicated\nto supporting the open source scientific computing community. If you like\nXarray and want to support our mission, please consider making a donation_\nto support our efforts.\n\n.. _donation: https://numfocus.salsalabs.org/donate-to-xarray/\n\n\nHistory\n-------\n\nxarray is an evolution of an internal tool developed at `The Climate\nCorporation`__. It was originally written by Climate Corp researchers Stephan\nHoyer, Alex Kleeman and Eugene Brevdo and was released as open source in\nMay 2014. The project was renamed from \"xray\" in January 2016. Xarray became a\nfiscally sponsored project of NumFOCUS_ in August 2018.\n\n__ http://climate.com/\n.. _NumFOCUS: https://numfocus.org\n\nLicense\n-------\n\nxarray is available under the open source `Apache License`__.\n\n__ http://www.apache.org/licenses/LICENSE-2.0.html\n"},{"col":4,"comment":"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : str or list\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.set_index\n ","endLoc":3102,"header":"def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\"","id":1617,"name":"reset_index","nodeType":"Function","startLoc":3068,"text":"def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\":\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : str or list\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.set_index\n \"\"\"\n _check_inplace(inplace)\n variables, coord_names = split_indexes(\n dims_or_levels,\n self._variables,\n self._coord_names,\n cast(Mapping[Hashable, Hashable], self._level_coords),\n drop=drop,\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)"},{"attributeType":"null","col":4,"comment":"null","endLoc":199,"id":1618,"name":"day","nodeType":"Attribute","startLoc":199,"text":"day"},{"col":4,"comment":"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced\n coordinates.\n ","endLoc":3141,"header":"def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int],\n ) -> \"Dataset\"","id":1619,"name":"reorder_levels","nodeType":"Function","startLoc":3104,"text":"def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int],\n ) -> \"Dataset\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced\n coordinates.\n \"\"\"\n _check_inplace(inplace)\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n variables = self._variables.copy()\n indexes = dict(self.indexes)\n for dim, order in dim_order.items():\n coord = self._variables[dim]\n index = self.indexes[dim]\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\"coordinate %r has no MultiIndex\" % dim)\n new_index = index.reorder_levels(order)\n variables[dim] = IndexVariable(coord.dims, new_index)\n indexes[dim] = new_index\n\n return self._replace(variables, indexes=indexes)"},{"attributeType":"null","col":4,"comment":"null","endLoc":200,"id":1620,"name":"hour","nodeType":"Attribute","startLoc":200,"text":"hour"},{"id":1621,"name":"faq.rst","nodeType":"TextFile","path":"doc","text":".. _faq:\n\nFrequently Asked Questions\n==========================\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\n\nYour documentation keeps mentioning pandas. What is pandas?\n-----------------------------------------------------------\n\npandas_ is a very popular data analysis package in Python\nwith wide usage in many fields. Our API is heavily inspired by pandas —\nthis is why there are so many references to pandas.\n\n.. _pandas: https://pandas.pydata.org\n\n\nDo I need to know pandas to use xarray?\n---------------------------------------\n\nNo! Our API is heavily inspired by pandas so while knowing pandas will let you\nbecome productive more quickly, knowledge of pandas is not necessary to use xarray.\n\n\nShould I use xarray instead of pandas?\n--------------------------------------\n\nIt's not an either/or choice! xarray provides robust support for converting\nback and forth between the tabular data-structures of pandas and its own\nmulti-dimensional data-structures.\n\nThat said, you should only bother with xarray if some aspect of data is\nfundamentally multi-dimensional. If your data is unstructured or\none-dimensional, pandas is usually the right choice: it has better performance\nfor common operations such as ``groupby`` and you'll find far more usage\nexamples online.\n\n\nWhy is pandas not enough?\n-------------------------\n\npandas is a fantastic library for analysis of low-dimensional labelled data -\nif it can be sensibly described as \"rows and columns\", pandas is probably the\nright choice. However, sometimes we want to use higher dimensional arrays\n(`ndim > 2`), or arrays for which the order of dimensions (e.g., columns vs\nrows) shouldn't really matter. For example, the images of a movie can be\nnatively represented as an array with four dimensions: time, row, column and\ncolor.\n\nPandas has historically supported N-dimensional panels, but deprecated them in\nversion 0.20 in favor of Xarray data structures. There are now built-in methods\non both sides to convert between pandas and Xarray, allowing for more focussed\ndevelopment effort. Xarray objects have a much richer model of dimensionality -\nif you were using Panels:\n\n- You need to create a new factory type for each dimensionality.\n- You can't do math between NDPanels with different dimensionality.\n- Each dimension in a NDPanel has a name (e.g., 'labels', 'items',\n 'major_axis', etc.) but the dimension names refer to order, not their\n meaning. You can't specify an operation as to be applied along the \"time\"\n axis.\n- You often have to manually convert collections of pandas arrays\n (Series, DataFrames, etc) to have the same number of dimensions.\n In contrast, this sort of data structure fits very naturally in an\n xarray ``Dataset``.\n\nYou can :ref:`read about switching from Panels to Xarray here `.\nPandas gets a lot of things right, but many science, engineering and complex\nanalytics use cases need fully multi-dimensional data structures.\n\nHow do xarray data structures differ from those found in pandas?\n----------------------------------------------------------------\n\nThe main distinguishing feature of xarray's ``DataArray`` over labeled arrays in\npandas is that dimensions can have names (e.g., \"time\", \"latitude\",\n\"longitude\"). Names are much easier to keep track of than axis numbers, and\nxarray uses dimension names for indexing, aggregation and broadcasting. Not only\ncan you write ``x.sel(time='2000-01-01')`` and ``x.mean(dim='time')``, but\noperations like ``x - x.mean(dim='time')`` always work, no matter the order\nof the \"time\" dimension. You never need to reshape arrays (e.g., with\n``np.newaxis``) to align them for arithmetic operations in xarray.\n\n\nWhy don't aggregations return Python scalars?\n---------------------------------------------\n\nxarray tries hard to be self-consistent: operations on a ``DataArray`` (resp.\n``Dataset``) return another ``DataArray`` (resp. ``Dataset``) object. In\nparticular, operations returning scalar values (e.g. indexing or aggregations\nlike ``mean`` or ``sum`` applied to all axes) will also return xarray objects.\n\nUnfortunately, this means we sometimes have to explicitly cast our results from\nxarray when using them in other libraries. As an illustration, the following\ncode fragment\n\n.. ipython:: python\n\n arr = xr.DataArray([1, 2, 3])\n pd.Series({'x': arr[0], 'mean': arr.mean(), 'std': arr.std()})\n\ndoes not yield the pandas DataFrame we expected. We need to specify the type\nconversion ourselves:\n\n.. ipython:: python\n\n pd.Series({'x': arr[0], 'mean': arr.mean(), 'std': arr.std()}, dtype=float)\n\nAlternatively, we could use the ``item`` method or the ``float`` constructor to\nconvert values one at a time\n\n.. ipython:: python\n\n pd.Series({'x': arr[0].item(), 'mean': float(arr.mean())})\n\n\n.. _approach to metadata:\n\nWhat is your approach to metadata?\n----------------------------------\n\nWe are firm believers in the power of labeled data! In addition to dimensions\nand coordinates, xarray supports arbitrary metadata in the form of global\n(Dataset) and variable specific (DataArray) attributes (``attrs``).\n\nAutomatic interpretation of labels is powerful but also reduces flexibility.\nWith xarray, we draw a firm line between labels that the library understands\n(``dims`` and ``coords``) and labels for users and user code (``attrs``). For\nexample, we do not automatically interpret and enforce units or `CF\nconventions`_. (An exception is serialization to and from netCDF files.)\n\n.. _CF conventions: http://cfconventions.org/latest.html\n\nAn implication of this choice is that we do not propagate ``attrs`` through\nmost operations unless explicitly flagged (some methods have a ``keep_attrs``\noption, and there is a global flag for setting this to be always True or\nFalse). Similarly, xarray does not check for conflicts between ``attrs`` when\ncombining arrays and datasets, unless explicitly requested with the option\n``compat='identical'``. The guiding principle is that metadata should not be\nallowed to get in the way.\n\n\nWhat other netCDF related Python libraries should I know about?\n---------------------------------------------------------------\n\n`netCDF4-python`__ provides a lower level interface for working with\nnetCDF and OpenDAP datasets in Python. We use netCDF4-python internally in\nxarray, and have contributed a number of improvements and fixes upstream. xarray\ndoes not yet support all of netCDF4-python's features, such as modifying files\non-disk.\n\n__ https://github.com/Unidata/netcdf4-python\n\nIris_ (supported by the UK Met office) provides similar tools for in-\nmemory manipulation of labeled arrays, aimed specifically at weather and\nclimate data needs. Indeed, the Iris :py:class:`~iris.cube.Cube` was direct\ninspiration for xarray's :py:class:`~xarray.DataArray`. xarray and Iris take very\ndifferent approaches to handling metadata: Iris strictly interprets\n`CF conventions`_. Iris particularly shines at mapping, thanks to its\nintegration with Cartopy_.\n\n.. _Iris: http://scitools.org.uk/iris/\n.. _Cartopy: http://scitools.org.uk/cartopy/docs/latest/\n\n`UV-CDAT`__ is another Python library that implements in-memory netCDF-like\nvariables and `tools for working with climate data`__.\n\n__ http://uvcdat.llnl.gov/\n__ http://drclimate.wordpress.com/2014/01/02/a-beginners-guide-to-scripting-with-uv-cdat/\n\nWe think the design decisions we have made for xarray (namely, basing it on\npandas) make it a faster and more flexible data analysis tool. That said, Iris\nand CDAT have some great domain specific functionality, and xarray includes\nmethods for converting back and forth between xarray and these libraries. See\n:py:meth:`~xarray.DataArray.to_iris` and :py:meth:`~xarray.DataArray.to_cdms2`\nfor more details.\n\nWhat other projects leverage xarray?\n------------------------------------\n\nSee section :ref:`related-projects`.\n\nHow should I cite xarray?\n-------------------------\n\nIf you are using xarray and would like to cite it in academic publication, we\nwould certainly appreciate it. We recommend two citations.\n\n 1. At a minimum, we recommend citing the xarray overview journal article,\n published in the Journal of Open Research Software.\n\n - Hoyer, S. & Hamman, J., (2017). xarray: N-D labeled Arrays and\n Datasets in Python. Journal of Open Research Software. 5(1), p.10.\n DOI: http://doi.org/10.5334/jors.148\n\n Here’s an example of a BibTeX entry::\n\n @article{hoyer2017xarray,\n title = {xarray: {N-D} labeled arrays and datasets in {Python}},\n author = {Hoyer, S. and J. Hamman},\n journal = {Journal of Open Research Software},\n volume = {5},\n number = {1},\n year = {2017},\n publisher = {Ubiquity Press},\n doi = {10.5334/jors.148},\n url = {http://doi.org/10.5334/jors.148}\n }\n\n 2. You may also want to cite a specific version of the xarray package. We\n provide a `Zenodo citation and DOI `_\n for this purpose:\n\n .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.598201.svg\n :target: https://doi.org/10.5281/zenodo.598201\n\n An example BibTeX entry::\n\n @misc{xarray_v0_8_0,\n author = {Stephan Hoyer and Clark Fitzgerald and Joe Hamman and others},\n title = {xarray: v0.8.0},\n month = aug,\n year = 2016,\n doi = {10.5281/zenodo.59499},\n url = {https://doi.org/10.5281/zenodo.59499}\n }\n\n.. _public api:\n\nWhat parts of xarray are considered public API?\n-----------------------------------------------\n\nAs a rule, only functions/methods documented in our :ref:`api` are considered\npart of xarray's public API. Everything else (in particular, everything in\n``xarray.core`` that is not also exposed in the top level ``xarray`` namespace)\nis considered a private implementation detail that may change at any time.\n\nObjects that exist to facilitate xarray's fluent interface on ``DataArray`` and\n``Dataset`` objects are a special case. For convenience, we document them in\nthe API docs, but only their methods and the ``DataArray``/``Dataset``\nmethods/properties to construct them (e.g., ``.plot()``, ``.groupby()``,\n``.str``) are considered public API. Constructors and other details of the\ninternal classes used to implemented them (i.e.,\n``xarray.plot.plotting._PlotMethods``, ``xarray.core.groupby.DataArrayGroupBy``,\n``xarray.core.accessor_str.StringAccessor``) are not.\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":201,"id":1622,"name":"minute","nodeType":"Attribute","startLoc":201,"text":"minute"},{"col":4,"comment":"null","endLoc":312,"header":"def time_write_dataset_scipy(self)","id":1623,"name":"time_write_dataset_scipy","nodeType":"Function","startLoc":309,"text":"def time_write_dataset_scipy(self):\n xr.save_mfdataset(\n self.ds_list, self.filenames_list, engine=\"scipy\", format=self.format\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":302,"id":1624,"name":"format","nodeType":"Attribute","startLoc":302,"text":"self.format"},{"id":1625,"name":"computation.rst","nodeType":"TextFile","path":"doc","text":".. _comput:\n\n###########\nComputation\n###########\n\nThe labels associated with :py:class:`~xarray.DataArray` and\n:py:class:`~xarray.Dataset` objects enables some powerful shortcuts for\ncomputation, notably including aggregation and broadcasting by dimension\nnames.\n\nBasic array math\n================\n\nArithmetic operations with a single DataArray automatically vectorize (like\nnumpy) over all array values:\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\n.. ipython:: python\n\n arr = xr.DataArray(np.random.RandomState(0).randn(2, 3),\n [('x', ['a', 'b']), ('y', [10, 20, 30])])\n arr - 3\n abs(arr)\n\nYou can also use any of numpy's or scipy's many `ufunc`__ functions directly on\na DataArray:\n\n__ http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n\n.. ipython:: python\n\n np.sin(arr)\n\nUse :py:func:`~xarray.where` to conditionally switch between values:\n\n.. ipython:: python\n\n xr.where(arr > 0, 'positive', 'negative')\n\nUse `@` to perform matrix multiplication:\n\n.. ipython:: python\n\n arr @ arr\n\nData arrays also implement many :py:class:`numpy.ndarray` methods:\n\n.. ipython:: python\n\n arr.round(2)\n arr.T\n\n.. _missing_values:\n\nMissing values\n==============\n\nxarray objects borrow the :py:meth:`~xarray.DataArray.isnull`,\n:py:meth:`~xarray.DataArray.notnull`, :py:meth:`~xarray.DataArray.count`,\n:py:meth:`~xarray.DataArray.dropna`, :py:meth:`~xarray.DataArray.fillna`,\n:py:meth:`~xarray.DataArray.ffill`, and :py:meth:`~xarray.DataArray.bfill`\nmethods for working with missing data from pandas:\n\n.. ipython:: python\n\n x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=['x'])\n x.isnull()\n x.notnull()\n x.count()\n x.dropna(dim='x')\n x.fillna(-1)\n x.ffill('x')\n x.bfill('x')\n\nLike pandas, xarray uses the float value ``np.nan`` (not-a-number) to represent\nmissing values.\n\nxarray objects also have an :py:meth:`~xarray.DataArray.interpolate_na` method\nfor filling missing values via 1D interpolation.\n\n.. ipython:: python\n\n x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=['x'],\n coords={'xx': xr.Variable('x', [0, 1, 1.1, 1.9, 3])})\n x.interpolate_na(dim='x', method='linear', use_coordinate='xx')\n\nNote that xarray slightly diverges from the pandas ``interpolate`` syntax by\nproviding the ``use_coordinate`` keyword which facilitates a clear specification\nof which values to use as the index in the interpolation.\n\nAggregation\n===========\n\nAggregation methods have been updated to take a `dim` argument instead of\n`axis`. This allows for very intuitive syntax for aggregation methods that are\napplied along particular dimension(s):\n\n.. ipython:: python\n\n arr.sum(dim='x')\n arr.std(['x', 'y'])\n arr.min()\n\n\nIf you need to figure out the axis number for a dimension yourself (say,\nfor wrapping code designed to work with numpy arrays), you can use the\n:py:meth:`~xarray.DataArray.get_axis_num` method:\n\n.. ipython:: python\n\n arr.get_axis_num('y')\n\nThese operations automatically skip missing values, like in pandas:\n\n.. ipython:: python\n\n xr.DataArray([1, 2, np.nan, 3]).mean()\n\nIf desired, you can disable this behavior by invoking the aggregation method\nwith ``skipna=False``.\n\n.. _comput.rolling:\n\nRolling window operations\n=========================\n\n``DataArray`` objects include a :py:meth:`~xarray.DataArray.rolling` method. This\nmethod supports rolling window aggregation:\n\n.. ipython:: python\n\n arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5),\n dims=('x', 'y'))\n arr\n\n:py:meth:`~xarray.DataArray.rolling` is applied along one dimension using the\nname of the dimension as a key (e.g. ``y``) and the window size as the value\n(e.g. ``3``). We get back a ``Rolling`` object:\n\n.. ipython:: python\n\n arr.rolling(y=3)\n\nAggregation and summary methods can be applied directly to the ``Rolling``\nobject:\n\n.. ipython:: python\n\n r = arr.rolling(y=3)\n r.reduce(np.std)\n r.mean()\n\nAggregation results are assigned the coordinate at the end of each window by\ndefault, but can be centered by passing ``center=True`` when constructing the\n``Rolling`` object:\n\n.. ipython:: python\n\n r = arr.rolling(y=3, center=True)\n r.mean()\n\nAs can be seen above, aggregations of windows which overlap the border of the\narray produce ``nan``\\s. Setting ``min_periods`` in the call to ``rolling``\nchanges the minimum number of observations within the window required to have\na value when aggregating:\n\n.. ipython:: python\n\n r = arr.rolling(y=3, min_periods=2)\n r.mean()\n r = arr.rolling(y=3, center=True, min_periods=2)\n r.mean()\n\n.. tip::\n\n Note that rolling window aggregations are faster and use less memory when bottleneck_ is installed. This only applies to numpy-backed xarray objects.\n\n.. _bottleneck: https://github.com/kwgoodman/bottleneck/\n\nWe can also manually iterate through ``Rolling`` objects:\n\n.. code:: python\n\n for label, arr_window in r:\n # arr_window is a view of x\n\n.. _comput.rolling_exp:\n\nWhile ``rolling`` provides a simple moving average, ``DataArray`` also supports\nan exponential moving average with :py:meth:`~xarray.DataArray.rolling_exp`.\nThis is similiar to pandas' ``ewm`` method. numbagg_ is required.\n\n.. _numbagg: https://github.com/shoyer/numbagg\n\n.. code:: python\n\n arr.rolling_exp(y=3).mean()\n\nThe ``rolling_exp`` method takes a ``window_type`` kwarg, which can be ``'alpha'``,\n``'com'`` (for ``center-of-mass``), ``'span'``, and ``'halflife'``. The default is\n``span``.\n\nFinally, the rolling object has a ``construct`` method which returns a\nview of the original ``DataArray`` with the windowed dimension in\nthe last position.\nYou can use this for more advanced rolling operations such as strided rolling,\nwindowed rolling, convolution, short-time FFT etc.\n\n.. ipython:: python\n\n # rolling with 2-point stride\n rolling_da = r.construct('window_dim', stride=2)\n rolling_da\n rolling_da.mean('window_dim', skipna=False)\n\nBecause the ``DataArray`` given by ``r.construct('window_dim')`` is a view\nof the original array, it is memory efficient.\nYou can also use ``construct`` to compute a weighted rolling sum:\n\n.. ipython:: python\n\n weight = xr.DataArray([0.25, 0.5, 0.25], dims=['window'])\n arr.rolling(y=3).construct('window').dot(weight)\n\n.. note::\n numpy's Nan-aggregation functions such as ``nansum`` copy the original array.\n In xarray, we internally use these functions in our aggregation methods\n (such as ``.sum()``) if ``skipna`` argument is not specified or set to True.\n This means ``rolling_da.mean('window_dim')`` is memory inefficient.\n To avoid this, use ``skipna=False`` as the above example.\n\n\n.. _comput.coarsen:\n\nCoarsen large arrays\n====================\n\n``DataArray`` and ``Dataset`` objects include a\n:py:meth:`~xarray.DataArray.coarsen` and :py:meth:`~xarray.Dataset.coarsen`\nmethods. This supports the block aggregation along multiple dimensions,\n\n.. ipython:: python\n\n x = np.linspace(0, 10, 300)\n t = pd.date_range('15/12/1999', periods=364)\n da = xr.DataArray(np.sin(x) * np.cos(np.linspace(0, 1, 364)[:, np.newaxis]),\n dims=['time', 'x'], coords={'time': t, 'x': x})\n da\n\nIn order to take a block mean for every 7 days along ``time`` dimension and\nevery 2 points along ``x`` dimension,\n\n.. ipython:: python\n\n da.coarsen(time=7, x=2).mean()\n\n:py:meth:`~xarray.DataArray.coarsen` raises an ``ValueError`` if the data\nlength is not a multiple of the corresponding window size.\nYou can choose ``boundary='trim'`` or ``boundary='pad'`` options for trimming\nthe excess entries or padding ``nan`` to insufficient entries,\n\n.. ipython:: python\n\n da.coarsen(time=30, x=2, boundary='trim').mean()\n\nIf you want to apply a specific function to coordinate, you can pass the\nfunction or method name to ``coord_func`` option,\n\n.. ipython:: python\n\n da.coarsen(time=7, x=2, coord_func={'time': 'min'}).mean()\n\n\n.. _compute.using_coordinates:\n\nComputation using Coordinates\n=============================\n\nXarray objects have some handy methods for the computation with their\ncoordinates. :py:meth:`~xarray.DataArray.differentiate` computes derivatives by\ncentral finite differences using their coordinates,\n\n.. ipython:: python\n\n a = xr.DataArray([0, 1, 2, 3], dims=['x'], coords=[[0.1, 0.11, 0.2, 0.3]])\n a\n a.differentiate('x')\n\nThis method can be used also for multidimensional arrays,\n\n.. ipython:: python\n\n a = xr.DataArray(np.arange(8).reshape(4, 2), dims=['x', 'y'],\n coords={'x': [0.1, 0.11, 0.2, 0.3]})\n a.differentiate('x')\n\n:py:meth:`~xarray.DataArray.integrate` computes integration based on\ntrapezoidal rule using their coordinates,\n\n.. ipython:: python\n\n a.integrate('x')\n\n.. note::\n These methods are limited to simple cartesian geometry. Differentiation\n and integration along multidimensional coordinate are not supported.\n\n\n.. _compute.broadcasting:\n\nBroadcasting by dimension name\n==============================\n\n``DataArray`` objects are automatically align themselves (\"broadcasting\" in\nthe numpy parlance) by dimension name instead of axis order. With xarray, you\ndo not need to transpose arrays or insert dimensions of length 1 to get array\noperations to work, as commonly done in numpy with :py:func:`np.reshape` or\n:py:const:`np.newaxis`.\n\nThis is best illustrated by a few examples. Consider two one-dimensional\narrays with different sizes aligned along different dimensions:\n\n.. ipython:: python\n\n a = xr.DataArray([1, 2], [('x', ['a', 'b'])])\n a\n b = xr.DataArray([-1, -2, -3], [('y', [10, 20, 30])])\n b\n\nWith xarray, we can apply binary mathematical operations to these arrays, and\ntheir dimensions are expanded automatically:\n\n.. ipython:: python\n\n a * b\n\nMoreover, dimensions are always reordered to the order in which they first\nappeared:\n\n.. ipython:: python\n\n c = xr.DataArray(np.arange(6).reshape(3, 2), [b['y'], a['x']])\n c\n a + c\n\nThis means, for example, that you always subtract an array from its transpose:\n\n.. ipython:: python\n\n c - c.T\n\nYou can explicitly broadcast xarray data structures by using the\n:py:func:`~xarray.broadcast` function:\n\n.. ipython:: python\n\n a2, b2 = xr.broadcast(a, b)\n a2\n b2\n\n.. _math automatic alignment:\n\nAutomatic alignment\n===================\n\nxarray enforces alignment between *index* :ref:`coordinates` (that is,\ncoordinates with the same name as a dimension, marked by ``*``) on objects used\nin binary operations.\n\nSimilarly to pandas, this alignment is automatic for arithmetic on binary\noperations. The default result of a binary operation is by the *intersection*\n(not the union) of coordinate labels:\n\n.. ipython:: python\n\n arr = xr.DataArray(np.arange(3), [('x', range(3))])\n arr + arr[:-1]\n\nIf coordinate values for a dimension are missing on either argument, all\nmatching dimensions must have the same size:\n\n.. ipython::\n :verbatim:\n\n In [1]: arr + xr.DataArray([1, 2], dims='x')\n ValueError: arguments without labels along dimension 'x' cannot be aligned because they have different dimension size(s) {2} than the size of the aligned dimension labels: 3\n\n\nHowever, one can explicitly change this default automatic alignment type (\"inner\")\nvia :py:func:`~xarray.set_options()` in context manager:\n\n.. ipython:: python\n\n with xr.set_options(arithmetic_join=\"outer\"):\n arr + arr[:1]\n arr + arr[:1]\n\nBefore loops or performance critical code, it's a good idea to align arrays\nexplicitly (e.g., by putting them in the same Dataset or using\n:py:func:`~xarray.align`) to avoid the overhead of repeated alignment with each\noperation. See :ref:`align and reindex` for more details.\n\n.. note::\n\n There is no automatic alignment between arguments when performing in-place\n arithmetic operations such as ``+=``. You will need to use\n :ref:`manual alignment`. This ensures in-place\n arithmetic never needs to modify data types.\n\n.. _coordinates math:\n\nCoordinates\n===========\n\nAlthough index coordinates are aligned, other coordinates are not, and if their\nvalues conflict, they will be dropped. This is necessary, for example, because\nindexing turns 1D coordinates into scalar coordinates:\n\n.. ipython:: python\n\n arr[0]\n arr[1]\n # notice that the scalar coordinate 'x' is silently dropped\n arr[1] - arr[0]\n\nStill, xarray will persist other coordinates in arithmetic, as long as there\nare no conflicting values:\n\n.. ipython:: python\n\n # only one argument has the 'x' coordinate\n arr[0] + 1\n # both arguments have the same 'x' coordinate\n arr[0] - arr[0]\n\nMath with datasets\n==================\n\nDatasets support arithmetic operations by automatically looping over all data\nvariables:\n\n.. ipython:: python\n\n ds = xr.Dataset({'x_and_y': (('x', 'y'), np.random.randn(3, 5)),\n 'x_only': ('x', np.random.randn(3))},\n coords=arr.coords)\n ds > 0\n\nDatasets support most of the same methods found on data arrays:\n\n.. ipython:: python\n\n ds.mean(dim='x')\n abs(ds)\n\nDatasets also support NumPy ufuncs (requires NumPy v1.13 or newer), or\nalternatively you can use :py:meth:`~xarray.Dataset.apply` to apply a function\nto each variable in a dataset:\n\n.. ipython:: python\n\n np.sin(ds)\n ds.apply(np.sin)\n\nDatasets also use looping over variables for *broadcasting* in binary\narithmetic. You can do arithmetic between any ``DataArray`` and a dataset:\n\n.. ipython:: python\n\n ds + arr\n\nArithmetic between two datasets matches data variables of the same name:\n\n.. ipython:: python\n\n ds2 = xr.Dataset({'x_and_y': 0, 'x_only': 100})\n ds - ds2\n\nSimilarly to index based alignment, the result has the intersection of all\nmatching data variables.\n\n.. _comput.wrapping-custom:\n\nWrapping custom computation\n===========================\n\nIt doesn't always make sense to do computation directly with xarray objects:\n\n - In the inner loop of performance limited code, using xarray can add\n considerable overhead compared to using NumPy or native Python types.\n This is particularly true when working with scalars or small arrays (less\n than ~1e6 elements). Keeping track of labels and ensuring their consistency\n adds overhead, and xarray's core itself is not especially fast, because it's\n written in Python rather than a compiled language like C. Also, xarray's\n high level label-based APIs removes low-level control over how operations\n are implemented.\n - Even if speed doesn't matter, it can be important to wrap existing code, or\n to support alternative interfaces that don't use xarray objects.\n\nFor these reasons, it is often well-advised to write low-level routines that\nwork with NumPy arrays, and to wrap these routines to work with xarray objects.\nHowever, adding support for labels on both :py:class:`~xarray.Dataset` and\n:py:class:`~xarray.DataArray` can be a bit of a chore.\n\nTo make this easier, xarray supplies the :py:func:`~xarray.apply_ufunc` helper\nfunction, designed for wrapping functions that support broadcasting and\nvectorization on unlabeled arrays in the style of a NumPy\n`universal function `_ (\"ufunc\" for short).\n``apply_ufunc`` takes care of everything needed for an idiomatic xarray wrapper,\nincluding alignment, broadcasting, looping over ``Dataset`` variables (if\nneeded), and merging of coordinates. In fact, many internal xarray\nfunctions/methods are written using ``apply_ufunc``.\n\nSimple functions that act independently on each value should work without\nany additional arguments:\n\n.. ipython:: python\n\n squared_error = lambda x, y: (x - y) ** 2\n arr1 = xr.DataArray([0, 1, 2, 3], dims='x')\n xr.apply_ufunc(squared_error, arr1, 1)\n\nFor using more complex operations that consider some array values collectively,\nit's important to understand the idea of \"core dimensions\" from NumPy's\n`generalized ufuncs `_. Core dimensions are defined as dimensions\nthat should *not* be broadcast over. Usually, they correspond to the fundamental\ndimensions over which an operation is defined, e.g., the summed axis in\n``np.sum``. A good clue that core dimensions are needed is the presence of an\n``axis`` argument on the corresponding NumPy function.\n\nWith ``apply_ufunc``, core dimensions are recognized by name, and then moved to\nthe last dimension of any input arguments before applying the given function.\nThis means that for functions that accept an ``axis`` argument, you usually need\nto set ``axis=-1``. As an example, here is how we would wrap\n:py:func:`numpy.linalg.norm` to calculate the vector norm:\n\n.. code-block:: python\n\n def vector_norm(x, dim, ord=None):\n return xr.apply_ufunc(np.linalg.norm, x,\n input_core_dims=[[dim]],\n kwargs={'ord': ord, 'axis': -1})\n\n.. ipython:: python\n :suppress:\n\n def vector_norm(x, dim, ord=None):\n return xr.apply_ufunc(np.linalg.norm, x,\n input_core_dims=[[dim]],\n kwargs={'ord': ord, 'axis': -1})\n\n.. ipython:: python\n\n vector_norm(arr1, dim='x')\n\nBecause ``apply_ufunc`` follows a standard convention for ufuncs, it plays\nnicely with tools for building vectorized functions, like\n:func:`numpy.broadcast_arrays` and :func:`numpy.vectorize`. For high performance\nneeds, consider using Numba's :doc:`vectorize and guvectorize `.\n\nIn addition to wrapping functions, ``apply_ufunc`` can automatically parallelize\nmany functions when using dask by setting ``dask='parallelized'``. See\n:ref:`dask.automatic-parallelization` for details.\n\n:py:func:`~xarray.apply_ufunc` also supports some advanced options for\ncontrolling alignment of variables and the form of the result. See the\ndocstring for full details and more examples.\n"},{"className":"IOReadMultipleNetCDF4","col":0,"comment":"null","endLoc":328,"id":1626,"nodeType":"Class","startLoc":315,"text":"class IOReadMultipleNetCDF4(IOMultipleNetCDF):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF4\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_netcdf4(self):\n xr.open_mfdataset(self.filenames_list, engine=\"netcdf4\").load()\n\n def time_open_dataset_netcdf4(self):\n xr.open_mfdataset(self.filenames_list, engine=\"netcdf4\")"},{"col":4,"comment":"null","endLoc":322,"header":"def setup(self)","id":1627,"name":"setup","nodeType":"Function","startLoc":316,"text":"def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF4\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)"},{"attributeType":"null","col":4,"comment":"null","endLoc":202,"id":1628,"name":"second","nodeType":"Attribute","startLoc":202,"text":"second"},{"id":1629,"name":".coveragerc","nodeType":"TextFile","path":"","text":"[run]\nomit =\n xarray/tests/*\n xarray/core/dask_array_compat.py\n xarray/core/npcompat.py\n xarray/core/pdcompat.py\n xarray/core/pycompat.py\n xarray/_version.py\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":203,"id":1630,"name":"microsecond","nodeType":"Attribute","startLoc":203,"text":"microsecond"},{"fileName":"indexing.py","filePath":"xarray/core","id":1631,"nodeType":"File","text":"import enum\nimport functools\nimport operator\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom datetime import timedelta\nfrom typing import Any, Callable, Sequence, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import duck_array_ops, nputils, utils\nfrom .npcompat import DTypeLike\nfrom .pycompat import dask_array_type, integer_types, sparse_array_type\nfrom .utils import is_dict_like, maybe_cast_to_coords_dtype\n\n\ndef expanded_indexer(key, ndim):\n \"\"\"Given a key for indexing an ndarray, return an equivalent key which is a\n tuple with length equal to the number of dimensions.\n\n The expansion is done by replacing all `Ellipsis` items with the right\n number of full slices and then padding the key with full slices so that it\n reaches the appropriate dimensionality.\n \"\"\"\n if not isinstance(key, tuple):\n # numpy treats non-tuple keys equivalent to tuples of length 1\n key = (key,)\n new_key = []\n # handling Ellipsis right is a little tricky, see:\n # http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n found_ellipsis = False\n for k in key:\n if k is Ellipsis:\n if not found_ellipsis:\n new_key.extend((ndim + 1 - len(key)) * [slice(None)])\n found_ellipsis = True\n else:\n new_key.append(slice(None))\n else:\n new_key.append(k)\n if len(new_key) > ndim:\n raise IndexError(\"too many indices\")\n new_key.extend((ndim - len(new_key)) * [slice(None)])\n return tuple(new_key)\n\n\ndef _expand_slice(slice_, size):\n return np.arange(*slice_.indices(size))\n\n\ndef _sanitize_slice_element(x):\n from .variable import Variable\n from .dataarray import DataArray\n\n if isinstance(x, (Variable, DataArray)):\n x = x.values\n\n if isinstance(x, np.ndarray):\n if x.ndim != 0:\n raise ValueError(\n \"cannot use non-scalar arrays in a slice for \"\n \"xarray indexing: {}\".format(x)\n )\n x = x[()]\n\n if isinstance(x, np.timedelta64):\n # pandas does not support indexing with np.timedelta64 yet:\n # https://github.com/pandas-dev/pandas/issues/20393\n x = pd.Timedelta(x)\n\n return x\n\n\ndef _asarray_tuplesafe(values):\n \"\"\"\n Convert values into a numpy array of at most 1-dimension, while preserving\n tuples.\n\n Adapted from pandas.core.common._asarray_tuplesafe\n \"\"\"\n if isinstance(values, tuple):\n result = utils.to_0d_object_array(values)\n else:\n result = np.asarray(values)\n if result.ndim == 2:\n result = np.empty(len(values), dtype=object)\n result[:] = values\n\n return result\n\n\ndef _is_nested_tuple(possible_tuple):\n return isinstance(possible_tuple, tuple) and any(\n isinstance(value, (tuple, list, slice)) for value in possible_tuple\n )\n\n\ndef get_indexer_nd(index, labels, method=None, tolerance=None):\n \"\"\"Wrapper around :meth:`pandas.Index.get_indexer` supporting n-dimensional\n labels\n \"\"\"\n flat_labels = np.ravel(labels)\n flat_indexer = index.get_indexer(flat_labels, method=method, tolerance=tolerance)\n indexer = flat_indexer.reshape(labels.shape)\n return indexer\n\n\ndef convert_label_indexer(index, label, index_name=\"\", method=None, tolerance=None):\n \"\"\"Given a pandas.Index and labels (e.g., from __getitem__) for one\n dimension, return an indexer suitable for indexing an ndarray along that\n dimension. If `index` is a pandas.MultiIndex and depending on `label`,\n return a new pandas.Index or pandas.MultiIndex (otherwise return None).\n \"\"\"\n new_index = None\n\n if isinstance(label, slice):\n if method is not None or tolerance is not None:\n raise NotImplementedError(\n \"cannot use ``method`` argument if any indexers are \" \"slice objects\"\n )\n indexer = index.slice_indexer(\n _sanitize_slice_element(label.start),\n _sanitize_slice_element(label.stop),\n _sanitize_slice_element(label.step),\n )\n if not isinstance(indexer, slice):\n # unlike pandas, in xarray we never want to silently convert a\n # slice indexer into an array indexer\n raise KeyError(\n \"cannot represent labeled-based slice indexer for \"\n \"dimension %r with a slice over integer positions; \"\n \"the index is unsorted or non-unique\" % index_name\n )\n\n elif is_dict_like(label):\n is_nested_vals = _is_nested_tuple(tuple(label.values()))\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\n \"cannot use a dict-like object for selection on \"\n \"a dimension that does not have a MultiIndex\"\n )\n elif len(label) == index.nlevels and not is_nested_vals:\n indexer = index.get_loc(tuple(label[k] for k in index.names))\n else:\n for k, v in label.items():\n # index should be an item (i.e. Hashable) not an array-like\n if isinstance(v, Sequence) and not isinstance(v, str):\n raise ValueError(\n \"Vectorized selection is not \"\n \"available along level variable: \" + k\n )\n indexer, new_index = index.get_loc_level(\n tuple(label.values()), level=tuple(label.keys())\n )\n\n # GH2619. Raise a KeyError if nothing is chosen\n if indexer.dtype.kind == \"b\" and indexer.sum() == 0:\n raise KeyError(\"{} not found\".format(label))\n\n elif isinstance(label, tuple) and isinstance(index, pd.MultiIndex):\n if _is_nested_tuple(label):\n indexer = index.get_locs(label)\n elif len(label) == index.nlevels:\n indexer = index.get_loc(label)\n else:\n indexer, new_index = index.get_loc_level(\n label, level=list(range(len(label)))\n )\n else:\n label = (\n label\n if getattr(label, \"ndim\", 1) > 1 # vectorized-indexing\n else _asarray_tuplesafe(label)\n )\n if label.ndim == 0:\n if isinstance(index, pd.MultiIndex):\n indexer, new_index = index.get_loc_level(label.item(), level=0)\n else:\n indexer = index.get_loc(\n label.item(), method=method, tolerance=tolerance\n )\n elif label.dtype.kind == \"b\":\n indexer = label\n else:\n if isinstance(index, pd.MultiIndex) and label.ndim > 1:\n raise ValueError(\n \"Vectorized selection is not available along \"\n \"MultiIndex variable: \" + index_name\n )\n indexer = get_indexer_nd(index, label, method, tolerance)\n if np.any(indexer < 0):\n raise KeyError(\"not all values found in index %r\" % index_name)\n return indexer, new_index\n\n\ndef get_dim_indexers(data_obj, indexers):\n \"\"\"Given a xarray data object and label based indexers, return a mapping\n of label indexers with only dimension names as keys.\n\n It groups multiple level indexers given on a multi-index dimension\n into a single, dictionary indexer for that dimension (Raise a ValueError\n if it is not possible).\n \"\"\"\n invalid = [\n k\n for k in indexers\n if k not in data_obj.dims and k not in data_obj._level_coords\n ]\n if invalid:\n raise ValueError(\"dimensions or multi-index levels %r do not exist\" % invalid)\n\n level_indexers = defaultdict(dict)\n dim_indexers = {}\n for key, label in indexers.items():\n dim, = data_obj[key].dims\n if key != dim:\n # assume here multi-index level indexer\n level_indexers[dim][key] = label\n else:\n dim_indexers[key] = label\n\n for dim, level_labels in level_indexers.items():\n if dim_indexers.get(dim, False):\n raise ValueError(\n \"cannot combine multi-index level indexers \"\n \"with an indexer for dimension %s\" % dim\n )\n dim_indexers[dim] = level_labels\n\n return dim_indexers\n\n\ndef remap_label_indexers(data_obj, indexers, method=None, tolerance=None):\n \"\"\"Given an xarray data object and label based indexers, return a mapping\n of equivalent location based indexers. Also return a mapping of updated\n pandas index objects (in case of multi-index level drop).\n \"\"\"\n if method is not None and not isinstance(method, str):\n raise TypeError(\"``method`` must be a string\")\n\n pos_indexers = {}\n new_indexes = {}\n\n dim_indexers = get_dim_indexers(data_obj, indexers)\n for dim, label in dim_indexers.items():\n try:\n index = data_obj.indexes[dim]\n except KeyError:\n # no index for this dimension: reuse the provided labels\n if method is not None or tolerance is not None:\n raise ValueError(\n \"cannot supply ``method`` or ``tolerance`` \"\n \"when the indexed dimension does not have \"\n \"an associated coordinate.\"\n )\n pos_indexers[dim] = label\n else:\n coords_dtype = data_obj.coords[dim].dtype\n label = maybe_cast_to_coords_dtype(label, coords_dtype)\n idxr, new_idx = convert_label_indexer(index, label, dim, method, tolerance)\n pos_indexers[dim] = idxr\n if new_idx is not None:\n new_indexes[dim] = new_idx\n\n return pos_indexers, new_indexes\n\n\ndef slice_slice(old_slice, applied_slice, size):\n \"\"\"Given a slice and the size of the dimension to which it will be applied,\n index it with another slice to return a new slice equivalent to applying\n the slices sequentially\n \"\"\"\n step = (old_slice.step or 1) * (applied_slice.step or 1)\n\n # For now, use the hack of turning old_slice into an ndarray to reconstruct\n # the slice start and stop. This is not entirely ideal, but it is still\n # definitely better than leaving the indexer as an array.\n items = _expand_slice(old_slice, size)[applied_slice]\n if len(items) > 0:\n start = items[0]\n stop = items[-1] + int(np.sign(step))\n if stop < 0:\n stop = None\n else:\n start = 0\n stop = 0\n return slice(start, stop, step)\n\n\ndef _index_indexer_1d(old_indexer, applied_indexer, size):\n assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))\n if isinstance(applied_indexer, slice) and applied_indexer == slice(None):\n # shortcut for the usual case\n return old_indexer\n if isinstance(old_indexer, slice):\n if isinstance(applied_indexer, slice):\n indexer = slice_slice(old_indexer, applied_indexer, size)\n else:\n indexer = _expand_slice(old_indexer, size)[applied_indexer]\n else:\n indexer = old_indexer[applied_indexer]\n return indexer\n\n\nclass ExplicitIndexer:\n \"\"\"Base class for explicit indexer objects.\n\n ExplicitIndexer objects wrap a tuple of values given by their ``tuple``\n property. These tuples should always have length equal to the number of\n dimensions on the indexed array.\n\n Do not instantiate BaseIndexer objects directly: instead, use one of the\n sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.\n \"\"\"\n\n __slots__ = (\"_key\",)\n\n def __init__(self, key):\n if type(self) is ExplicitIndexer:\n raise TypeError(\"cannot instantiate base ExplicitIndexer objects\")\n self._key = tuple(key)\n\n @property\n def tuple(self):\n return self._key\n\n def __repr__(self):\n return \"{}({})\".format(type(self).__name__, self.tuple)\n\n\ndef as_integer_or_none(value):\n return None if value is None else operator.index(value)\n\n\ndef as_integer_slice(value):\n start = as_integer_or_none(value.start)\n stop = as_integer_or_none(value.stop)\n step = as_integer_or_none(value.step)\n return slice(start, stop, step)\n\n\nclass BasicIndexer(ExplicitIndexer):\n \"\"\"Tuple for basic indexing.\n\n All elements should be int or slice objects. Indexing follows NumPy's\n rules for basic indexing: each axis is independently sliced and axes\n indexed with an integer are dropped from the result.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)\n\n\nclass OuterIndexer(ExplicitIndexer):\n \"\"\"Tuple for outer/orthogonal indexing.\n\n All elements should be int, slice or 1-dimensional np.ndarray objects with\n an integer dtype. Indexing is applied independently along each axis, and\n axes indexed with an integer are dropped from the result. This type of\n indexing works like MATLAB/Fortran.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n \"invalid indexer array, does not have \"\n \"integer dtype: {!r}\".format(k)\n )\n if k.ndim != 1:\n raise TypeError(\n \"invalid indexer array for {}, must have \"\n \"exactly 1 dimension: \".format(type(self).__name__, k)\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)\n\n\nclass VectorizedIndexer(ExplicitIndexer):\n \"\"\"Tuple for vectorized indexing.\n\n All elements should be slice or N-dimensional np.ndarray objects with an\n integer dtype and the same number of dimensions. Indexing follows proposed\n rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules\n (including broadcasting) except sliced axes are always moved to the end:\n https://github.com/numpy/numpy/pull/6256\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n ndim = None\n for k in key:\n if isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n \"invalid indexer array, does not have \"\n \"integer dtype: {!r}\".format(k)\n )\n if ndim is None:\n ndim = k.ndim\n elif ndim != k.ndim:\n ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]\n raise ValueError(\n \"invalid indexer key: ndarray arguments \"\n \"have different numbers of dimensions: {}\".format(ndims)\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)\n\n\nclass ExplicitlyIndexed:\n \"\"\"Mixin to mark support for Indexer subclasses in indexing.\n \"\"\"\n\n __slots__ = ()\n\n\nclass ExplicitlyIndexedNDArrayMixin(utils.NDArrayMixin, ExplicitlyIndexed):\n __slots__ = ()\n\n def __array__(self, dtype=None):\n key = BasicIndexer((slice(None),) * self.ndim)\n return np.asarray(self[key], dtype=dtype)\n\n\nclass ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):\n \"\"\"Wrap an array, converting tuples into the indicated explicit indexer.\"\"\"\n\n __slots__ = (\"array\", \"indexer_cls\")\n\n def __init__(self, array, indexer_cls=BasicIndexer):\n self.array = as_indexable(array)\n self.indexer_cls = indexer_cls\n\n def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n key = expanded_indexer(key, self.ndim)\n result = self.array[self.indexer_cls(key)]\n if isinstance(result, ExplicitlyIndexed):\n return type(self)(result, self.indexer_cls)\n else:\n # Sometimes explicitly indexed arrays return NumPy arrays or\n # scalars.\n return result\n\n\nclass LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap an array to make basic and outer indexing lazy.\n \"\"\"\n\n __slots__ = (\"array\", \"key\")\n\n def __init__(self, array, key=None):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : ExplicitIndexer, optional\n Array indexer. If provided, it is assumed to already be in\n canonical expanded form.\n \"\"\"\n if isinstance(array, type(self)) and key is None:\n # unwrap\n key = array.key\n array = array.array\n\n if key is None:\n key = BasicIndexer((slice(None),) * array.ndim)\n\n self.array = as_indexable(array)\n self.key = key\n\n def _updated_key(self, new_key):\n iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))\n full_key = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, integer_types):\n full_key.append(k)\n else:\n full_key.append(_index_indexer_1d(k, next(iter_new_key), size))\n full_key = tuple(full_key)\n\n if all(isinstance(k, integer_types + (slice,)) for k in full_key):\n return BasicIndexer(full_key)\n return OuterIndexer(full_key)\n\n @property\n def shape(self):\n shape = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, slice):\n shape.append(len(range(*k.indices(size))))\n elif isinstance(k, np.ndarray):\n shape.append(k.size)\n return tuple(shape)\n\n def __array__(self, dtype=None):\n array = as_indexable(self.array)\n return np.asarray(array[self.key], dtype=None)\n\n def transpose(self, order):\n return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order)\n\n def __getitem__(self, indexer):\n if isinstance(indexer, VectorizedIndexer):\n array = LazilyVectorizedIndexedArray(self.array, self.key)\n return array[indexer]\n return type(self)(self.array, self._updated_key(indexer))\n\n def __setitem__(self, key, value):\n if isinstance(key, VectorizedIndexer):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )\n full_key = self._updated_key(key)\n self.array[full_key] = value\n\n def __repr__(self):\n return \"%s(array=%r, key=%r)\" % (type(self).__name__, self.array, self.key)\n\n\nclass LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap an array to make vectorized indexing lazy.\n \"\"\"\n\n __slots__ = (\"array\", \"key\")\n\n def __init__(self, array, key):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : VectorizedIndexer\n \"\"\"\n if isinstance(key, (BasicIndexer, OuterIndexer)):\n self.key = _outer_to_vectorized_indexer(key, array.shape)\n else:\n self.key = _arrayize_vectorized_indexer(key, array.shape)\n self.array = as_indexable(array)\n\n @property\n def shape(self):\n return np.broadcast(*self.key.tuple).shape\n\n def __array__(self, dtype=None):\n return np.asarray(self.array[self.key], dtype=None)\n\n def _updated_key(self, new_key):\n return _combine_indexers(self.key, self.shape, new_key)\n\n def __getitem__(self, indexer):\n # If the indexed array becomes a scalar, return LazilyOuterIndexedArray\n if all(isinstance(ind, integer_types) for ind in indexer.tuple):\n key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))\n return LazilyOuterIndexedArray(self.array, key)\n return type(self)(self.array, self._updated_key(indexer))\n\n def transpose(self, order):\n key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple))\n return type(self)(self.array, key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )\n\n def __repr__(self):\n return \"%s(array=%r, key=%r)\" % (type(self).__name__, self.array, self.key)\n\n\ndef _wrap_numpy_scalars(array):\n \"\"\"Wrap NumPy scalars in 0d arrays.\"\"\"\n if np.isscalar(array):\n return np.array(array)\n else:\n return array\n\n\nclass CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):\n __slots__ = (\"array\", \"_copied\")\n\n def __init__(self, array):\n self.array = as_indexable(array)\n self._copied = False\n\n def _ensure_copied(self):\n if not self._copied:\n self.array = as_indexable(np.array(self.array))\n self._copied = True\n\n def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __setitem__(self, key, value):\n self._ensure_copied()\n self.array[key] = value\n\n\nclass MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = _wrap_numpy_scalars(as_indexable(array))\n\n def _ensure_cached(self):\n if not isinstance(self.array, NumpyIndexingAdapter):\n self.array = NumpyIndexingAdapter(np.asarray(self.array))\n\n def __array__(self, dtype=None):\n self._ensure_cached()\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __setitem__(self, key, value):\n self.array[key] = value\n\n\ndef as_indexable(array):\n \"\"\"\n This function always returns a ExplicitlyIndexed subclass,\n so that the vectorized indexing is always possible with the returned\n object.\n \"\"\"\n if isinstance(array, ExplicitlyIndexed):\n return array\n if isinstance(array, np.ndarray):\n return NumpyIndexingAdapter(array)\n if isinstance(array, pd.Index):\n return PandasIndexAdapter(array)\n if isinstance(array, dask_array_type):\n return DaskIndexingAdapter(array)\n if hasattr(array, \"__array_function__\"):\n return NdArrayLikeIndexingAdapter(array)\n\n raise TypeError(\"Invalid array type: {}\".format(type(array)))\n\n\ndef _outer_to_vectorized_indexer(key, shape):\n \"\"\"Convert an OuterIndexer into an vectorized indexer.\n\n Parameters\n ----------\n key : Outer/Basic Indexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n VectorizedIndexer\n Tuple suitable for use to index a NumPy array with vectorized indexing.\n Each element is an array: broadcasting them together gives the shape\n of the result.\n \"\"\"\n key = key.tuple\n\n n_dim = len([k for k in key if not isinstance(k, integer_types)])\n i_dim = 0\n new_key = []\n for k, size in zip(key, shape):\n if isinstance(k, integer_types):\n new_key.append(np.array(k).reshape((1,) * n_dim))\n else: # np.ndarray or slice\n if isinstance(k, slice):\n k = np.arange(*k.indices(size))\n assert k.dtype.kind in {\"i\", \"u\"}\n shape = [(1,) * i_dim + (k.size,) + (1,) * (n_dim - i_dim - 1)]\n new_key.append(k.reshape(*shape))\n i_dim += 1\n return VectorizedIndexer(tuple(new_key))\n\n\ndef _outer_to_numpy_indexer(key, shape):\n \"\"\"Convert an OuterIndexer into an indexer for NumPy.\n\n Parameters\n ----------\n key : Basic/OuterIndexer\n An indexer to convert.\n shape : tuple\n Shape of the array subject to the indexing.\n\n Returns\n -------\n tuple\n Tuple suitable for use to index a NumPy array.\n \"\"\"\n if len([k for k in key.tuple if not isinstance(k, slice)]) <= 1:\n # If there is only one vector and all others are slice,\n # it can be safely used in mixed basic/advanced indexing.\n # Boolean index should already be converted to integer array.\n return key.tuple\n else:\n return _outer_to_vectorized_indexer(key, shape).tuple\n\n\ndef _combine_indexers(old_key, shape, new_key):\n \"\"\" Combine two indexers.\n\n Parameters\n ----------\n old_key: ExplicitIndexer\n The first indexer for the original array\n shape: tuple of ints\n Shape of the original array to be indexed by old_key\n new_key:\n The second indexer for indexing original[old_key]\n \"\"\"\n if not isinstance(old_key, VectorizedIndexer):\n old_key = _outer_to_vectorized_indexer(old_key, shape)\n if len(old_key.tuple) == 0:\n return new_key\n\n new_shape = np.broadcast(*old_key.tuple).shape\n if isinstance(new_key, VectorizedIndexer):\n new_key = _arrayize_vectorized_indexer(new_key, new_shape)\n else:\n new_key = _outer_to_vectorized_indexer(new_key, new_shape)\n\n return VectorizedIndexer(\n tuple(o[new_key.tuple] for o in np.broadcast_arrays(*old_key.tuple))\n )\n\n\n@enum.unique\nclass IndexingSupport(enum.Enum):\n # for backends that support only basic indexer\n BASIC = 0\n # for backends that support basic / outer indexer\n OUTER = 1\n # for backends that support outer indexer including at most 1 vector.\n OUTER_1VECTOR = 2\n # for backends that support full vectorized indexer.\n VECTORIZED = 3\n\n\ndef explicit_indexing_adapter(\n key: ExplicitIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n raw_indexing_method: Callable,\n) -> Any:\n \"\"\"Support explicit indexing by delegating to a raw indexing method.\n\n Outer and/or vectorized indexers are supported by indexing a second time\n with a NumPy array.\n\n Parameters\n ----------\n key : ExplicitIndexer\n Explicit indexing object.\n shape : Tuple[int, ...]\n Shape of the indexed array.\n indexing_support : IndexingSupport enum\n Form of indexing supported by raw_indexing_method.\n raw_indexing_method: callable\n Function (like ndarray.__getitem__) that when called with indexing key\n in the form of a tuple returns an indexed array.\n\n Returns\n -------\n Indexing result, in the form of a duck numpy-array.\n \"\"\"\n raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)\n result = raw_indexing_method(raw_key.tuple)\n if numpy_indices.tuple:\n # index the loaded np.ndarray\n result = NumpyIndexingAdapter(np.asarray(result))[numpy_indices]\n return result\n\n\ndef decompose_indexer(\n indexer: ExplicitIndexer, shape: Tuple[int, ...], indexing_support: IndexingSupport\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n if isinstance(indexer, VectorizedIndexer):\n return _decompose_vectorized_indexer(indexer, shape, indexing_support)\n if isinstance(indexer, (BasicIndexer, OuterIndexer)):\n return _decompose_outer_indexer(indexer, shape, indexing_support)\n raise TypeError(\"unexpected key type: {}\".format(indexer))\n\n\ndef _decompose_slice(key, size):\n \"\"\" convert a slice to successive two slices. The first slice always has\n a positive step.\n \"\"\"\n start, stop, step = key.indices(size)\n if step > 0:\n # If key already has a positive step, use it as is in the backend\n return key, slice(None)\n else:\n # determine stop precisely for step > 1 case\n # e.g. [98:2:-2] -> [98:3:-2]\n stop = start + int((stop - start - 1) / step) * step + 1\n start, stop = stop + 1, start + 1\n return slice(start, stop, -step), slice(None, None, -1)\n\n\ndef _decompose_vectorized_indexer(\n indexer: VectorizedIndexer,\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n \"\"\"\n Decompose vectorized indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer: VectorizedIndexer\n indexing_support: one of IndexerSupport entries\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a vectorized indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports outer indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])\n >>> array = array[backend_indexer] # load subslice of the array\n >>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])\n >>> array[np_indexer] # vectorized indexing for on-memory np.ndarray.\n \"\"\"\n assert isinstance(indexer, VectorizedIndexer)\n\n if indexing_support is IndexingSupport.VECTORIZED:\n return indexer, BasicIndexer(())\n\n backend_indexer_elems = []\n np_indexer_elems = []\n # convert negative indices\n indexer_elems = [\n np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k\n for k, s in zip(indexer.tuple, shape)\n ]\n\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, slice):\n # If it is a slice, then we will slice it as-is\n # (but make its step positive) in the backend,\n # and then use all of it (slice(None)) for the in-memory portion.\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer_elems.append(bk_slice)\n np_indexer_elems.append(np_slice)\n else:\n # If it is a (multidimensional) np.ndarray, just pickup the used\n # keys without duplication and store them as a 1d-np.ndarray.\n oind, vind = np.unique(k, return_inverse=True)\n backend_indexer_elems.append(oind)\n np_indexer_elems.append(vind.reshape(*k.shape))\n\n backend_indexer = OuterIndexer(tuple(backend_indexer_elems))\n np_indexer = VectorizedIndexer(tuple(np_indexer_elems))\n\n if indexing_support is IndexingSupport.OUTER:\n return backend_indexer, np_indexer\n\n # If the backend does not support outer indexing,\n # backend_indexer (OuterIndexer) is also decomposed.\n backend_indexer1, np_indexer1 = _decompose_outer_indexer(\n backend_indexer, shape, indexing_support\n )\n np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)\n return backend_indexer1, np_indexer\n\n\ndef _decompose_outer_indexer(\n indexer: Union[BasicIndexer, OuterIndexer],\n shape: Tuple[int, ...],\n indexing_support: IndexingSupport,\n) -> Tuple[ExplicitIndexer, ExplicitIndexer]:\n \"\"\"\n Decompose outer indexer to the successive two indexers, where the\n first indexer will be used to index backend arrays, while the second one\n is used to index the loaded on-memory np.ndarray.\n\n Parameters\n ----------\n indexer: OuterIndexer or BasicIndexer\n indexing_support: One of the entries of IndexingSupport\n\n Returns\n -------\n backend_indexer: OuterIndexer or BasicIndexer\n np_indexers: an ExplicitIndexer (OuterIndexer / BasicIndexer)\n\n Notes\n -----\n This function is used to realize the vectorized indexing for the backend\n arrays that only support basic or outer indexing.\n\n As an example, let us consider to index a few elements from a backend array\n with a orthogonal indexer ([0, 3, 1], [2, 3, 2]).\n Even if the backend array only supports basic indexing, it is more\n efficient to load a subslice of the array than loading the entire array,\n\n >>> backend_indexer = BasicIndexer(slice(0, 3), slice(2, 3))\n >>> array = array[backend_indexer] # load subslice of the array\n >>> np_indexer = OuterIndexer([0, 2, 1], [0, 1, 0])\n >>> array[np_indexer] # outer indexing for on-memory np.ndarray.\n \"\"\"\n if indexing_support == IndexingSupport.VECTORIZED:\n return indexer, BasicIndexer(())\n assert isinstance(indexer, (OuterIndexer, BasicIndexer))\n\n backend_indexer = []\n np_indexer = []\n # make indexer positive\n pos_indexer = []\n for k, s in zip(indexer.tuple, shape):\n if isinstance(k, np.ndarray):\n pos_indexer.append(np.where(k < 0, k + s, k))\n elif isinstance(k, integer_types) and k < 0:\n pos_indexer.append(k + s)\n else:\n pos_indexer.append(k)\n indexer_elems = pos_indexer\n\n if indexing_support is IndexingSupport.OUTER_1VECTOR:\n # some backends such as h5py supports only 1 vector in indexers\n # We choose the most efficient axis\n gains = [\n (np.max(k) - np.min(k) + 1.0) / len(np.unique(k))\n if isinstance(k, np.ndarray)\n else 0\n for k in indexer_elems\n ]\n array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None\n\n for i, (k, s) in enumerate(zip(indexer_elems, shape)):\n if isinstance(k, np.ndarray) and i != array_index:\n # np.ndarray key is converted to slice that covers the entire\n # entries of this key.\n backend_indexer.append(slice(np.min(k), np.max(k) + 1))\n np_indexer.append(k - np.min(k))\n elif isinstance(k, np.ndarray):\n # Remove duplicates and sort them in the increasing order\n pkey, ekey = np.unique(k, return_inverse=True)\n backend_indexer.append(pkey)\n np_indexer.append(ekey)\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n else: # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n\n return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n if indexing_support == IndexingSupport.OUTER:\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, slice):\n # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n elif isinstance(k, np.ndarray) and (np.diff(k) >= 0).all():\n backend_indexer.append(k)\n np_indexer.append(slice(None))\n else:\n # Remove duplicates and sort them in the increasing order\n oind, vind = np.unique(k, return_inverse=True)\n backend_indexer.append(oind)\n np_indexer.append(vind.reshape(*k.shape))\n\n return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n # basic indexer\n assert indexing_support == IndexingSupport.BASIC\n\n for k, s in zip(indexer_elems, shape):\n if isinstance(k, np.ndarray):\n # np.ndarray key is converted to slice that covers the entire\n # entries of this key.\n backend_indexer.append(slice(np.min(k), np.max(k) + 1))\n np_indexer.append(k - np.min(k))\n elif isinstance(k, integer_types):\n backend_indexer.append(k)\n else: # slice: convert positive step slice for backend\n bk_slice, np_slice = _decompose_slice(k, s)\n backend_indexer.append(bk_slice)\n np_indexer.append(np_slice)\n\n return (BasicIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer)))\n\n\ndef _arrayize_vectorized_indexer(indexer, shape):\n \"\"\" Return an identical vindex but slices are replaced by arrays \"\"\"\n slices = [v for v in indexer.tuple if isinstance(v, slice)]\n if len(slices) == 0:\n return indexer\n\n arrays = [v for v in indexer.tuple if isinstance(v, np.ndarray)]\n n_dim = arrays[0].ndim if len(arrays) > 0 else 0\n i_dim = 0\n new_key = []\n for v, size in zip(indexer.tuple, shape):\n if isinstance(v, np.ndarray):\n new_key.append(np.reshape(v, v.shape + (1,) * len(slices)))\n else: # slice\n shape = (1,) * (n_dim + i_dim) + (-1,) + (1,) * (len(slices) - i_dim - 1)\n new_key.append(np.arange(*v.indices(size)).reshape(shape))\n i_dim += 1\n return VectorizedIndexer(tuple(new_key))\n\n\ndef _dask_array_with_chunks_hint(array, chunks):\n \"\"\"Create a dask array using the chunks hint for dimensions of size > 1.\"\"\"\n import dask.array as da\n\n if len(chunks) < array.ndim:\n raise ValueError(\"not enough chunks in hint\")\n new_chunks = []\n for chunk, size in zip(chunks, array.shape):\n new_chunks.append(chunk if size > 1 else (1,))\n return da.from_array(array, new_chunks)\n\n\ndef _logical_any(args):\n return functools.reduce(operator.or_, args)\n\n\ndef _masked_result_drop_slice(key, data=None):\n\n key = (k for k in key if not isinstance(k, slice))\n chunks_hint = getattr(data, \"chunks\", None)\n\n new_keys = []\n for k in key:\n if isinstance(k, np.ndarray):\n if isinstance(data, dask_array_type):\n new_keys.append(_dask_array_with_chunks_hint(k, chunks_hint))\n elif isinstance(data, sparse_array_type):\n import sparse\n\n new_keys.append(sparse.COO.from_numpy(k))\n else:\n new_keys.append(k)\n else:\n new_keys.append(k)\n\n mask = _logical_any(k == -1 for k in new_keys)\n return mask\n\n\ndef create_mask(indexer, shape, data=None):\n \"\"\"Create a mask for indexing with a fill-value.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Indexer with -1 in integer or ndarray value to indicate locations in\n the result that should be masked.\n shape : tuple\n Shape of the array being indexed.\n data : optional\n Data for which mask is being created. If data is a dask arrays, its chunks\n are used as a hint for chunks on the resulting mask. If data is a sparse\n array, the returned mask is also a sparse array.\n\n Returns\n -------\n mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool\n Same type as data. Has the same shape as the indexing result.\n \"\"\"\n if isinstance(indexer, OuterIndexer):\n key = _outer_to_vectorized_indexer(indexer, shape).tuple\n assert not any(isinstance(k, slice) for k in key)\n mask = _masked_result_drop_slice(key, data)\n\n elif isinstance(indexer, VectorizedIndexer):\n key = indexer.tuple\n base_mask = _masked_result_drop_slice(key, data)\n slice_shape = tuple(\n np.arange(*k.indices(size)).size\n for k, size in zip(key, shape)\n if isinstance(k, slice)\n )\n expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)]\n mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape)\n\n elif isinstance(indexer, BasicIndexer):\n mask = any(k == -1 for k in indexer.tuple)\n\n else:\n raise TypeError(\"unexpected key type: {}\".format(type(indexer)))\n\n return mask\n\n\ndef _posify_mask_subindexer(index):\n \"\"\"Convert masked indices in a flat array to the nearest unmasked index.\n\n Parameters\n ----------\n index : np.ndarray\n One dimensional ndarray with dtype=int.\n\n Returns\n -------\n np.ndarray\n One dimensional ndarray with all values equal to -1 replaced by an\n adjacent non-masked element.\n \"\"\"\n masked = index == -1\n unmasked_locs = np.flatnonzero(~masked)\n if not unmasked_locs.size:\n # indexing unmasked_locs is invalid\n return np.zeros_like(index)\n masked_locs = np.flatnonzero(masked)\n prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)\n new_index = index.copy()\n new_index[masked_locs] = index[unmasked_locs[prev_value]]\n return new_index\n\n\ndef posify_mask_indexer(indexer):\n \"\"\"Convert masked values (-1) in an indexer to nearest unmasked values.\n\n This routine is useful for dask, where it can be much faster to index\n adjacent points than arbitrary points from the end of an array.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Input indexer.\n\n Returns\n -------\n ExplicitIndexer\n Same type of input, with all values in ndarray keys equal to -1\n replaced by an adjacent non-masked element.\n \"\"\"\n key = tuple(\n _posify_mask_subindexer(k.ravel()).reshape(k.shape)\n if isinstance(k, np.ndarray)\n else k\n for k in indexer.tuple\n )\n return type(indexer)(key)\n\n\nclass NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a NumPy array to use explicit indexing.\"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n # In NumpyIndexingAdapter we only allow to store bare np.ndarray\n if not isinstance(array, np.ndarray):\n raise TypeError(\n \"NumpyIndexingAdapter only wraps np.ndarray. \"\n \"Trying to wrap {}\".format(type(array))\n )\n self.array = array\n\n def _indexing_array_and_key(self, key):\n if isinstance(key, OuterIndexer):\n array = self.array\n key = _outer_to_numpy_indexer(key, self.array.shape)\n elif isinstance(key, VectorizedIndexer):\n array = nputils.NumpyVIndexAdapter(self.array)\n key = key.tuple\n elif isinstance(key, BasicIndexer):\n array = self.array\n # We want 0d slices rather than scalars. This is achieved by\n # appending an ellipsis (see\n # https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#detailed-notes).\n key = key.tuple + (Ellipsis,)\n else:\n raise TypeError(\"unexpected key type: {}\".format(type(key)))\n\n return array, key\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __getitem__(self, key):\n array, key = self._indexing_array_and_key(key)\n return array[key]\n\n def __setitem__(self, key, value):\n array, key = self._indexing_array_and_key(key)\n try:\n array[key] = value\n except ValueError:\n # More informative exception if read-only view\n if not array.flags.writeable and not array.flags.owndata:\n raise ValueError(\n \"Assignment destination is a view. \"\n \"Do you want to .copy() array first?\"\n )\n else:\n raise\n\n\nclass NdArrayLikeIndexingAdapter(NumpyIndexingAdapter):\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n if not hasattr(array, \"__array_function__\"):\n raise TypeError(\n \"NdArrayLikeIndexingAdapter must wrap an object that \"\n \"implements the __array_function__ protocol\"\n )\n self.array = array\n\n\nclass DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a dask array to support explicit indexing.\"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n \"\"\" This adapter is created in Variable.__getitem__ in\n Variable._broadcast_indexes.\n \"\"\"\n self.array = array\n\n def __getitem__(self, key):\n if isinstance(key, BasicIndexer):\n return self.array[key.tuple]\n elif isinstance(key, VectorizedIndexer):\n return self.array.vindex[key.tuple]\n else:\n assert isinstance(key, OuterIndexer)\n key = key.tuple\n try:\n return self.array[key]\n except NotImplementedError:\n # manual orthogonal indexing.\n # TODO: port this upstream into dask in a saner way.\n value = self.array\n for axis, subkey in reversed(list(enumerate(key))):\n value = value[(slice(None),) * axis + (subkey,)]\n return value\n\n def __setitem__(self, key, value):\n raise TypeError(\n \"this variable's data is stored in a dask array, \"\n \"which does not support item assignment. To \"\n \"assign to this variable, you must first load it \"\n \"into memory explicitly using the .load() \"\n \"method or accessing its .values attribute.\"\n )\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n\nclass PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a pandas.Index to preserve dtypes and handle explicit indexing.\n \"\"\"\n\n __slots__ = (\"array\", \"_dtype\")\n\n def __init__(self, array: Any, dtype: DTypeLike = None):\n self.array = utils.safe_cast_to_index(array)\n if dtype is None:\n if isinstance(array, pd.PeriodIndex):\n dtype = np.dtype(\"O\")\n elif hasattr(array, \"categories\"):\n # category isn't a real numpy dtype\n dtype = array.categories.dtype\n elif not utils.is_valid_numpy_dtype(array.dtype):\n dtype = np.dtype(\"O\")\n else:\n dtype = array.dtype\n else:\n dtype = np.dtype(dtype)\n self._dtype = dtype\n\n @property\n def dtype(self) -> np.dtype:\n return self._dtype\n\n def __array__(self, dtype: DTypeLike = None) -> np.ndarray:\n if dtype is None:\n dtype = self.dtype\n array = self.array\n if isinstance(array, pd.PeriodIndex):\n with suppress(AttributeError):\n # this might not be public API\n array = array.astype(\"object\")\n return np.asarray(array.values, dtype=dtype)\n\n @property\n def shape(self) -> Tuple[int]:\n return (len(self.array),)\n\n def __getitem__(\n self, indexer\n ) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]:\n key = indexer.tuple\n if isinstance(key, tuple) and len(key) == 1:\n # unpack key so it can index a pandas.Index object (pandas.Index\n # objects don't like tuples)\n key, = key\n\n if getattr(key, \"ndim\", 0) > 1: # Return np-array if multidimensional\n return NumpyIndexingAdapter(self.array.values)[indexer]\n\n result = self.array[key]\n\n if isinstance(result, pd.Index):\n result = PandasIndexAdapter(result, dtype=self.dtype)\n else:\n # result is a scalar\n if result is pd.NaT:\n # work around the impossibility of casting NaT with asarray\n # note: it probably would be better in general to return\n # pd.Timestamp rather np.than datetime64 but this is easier\n # (for now)\n result = np.datetime64(\"NaT\", \"ns\")\n elif isinstance(result, timedelta):\n result = np.timedelta64(getattr(result, \"value\", result), \"ns\")\n elif isinstance(result, pd.Timestamp):\n # Work around for GH: pydata/xarray#1932 and numpy/numpy#10668\n # numpy fails to convert pd.Timestamp to np.datetime64[ns]\n result = np.asarray(result.to_datetime64())\n elif self.dtype != object:\n result = np.asarray(result, dtype=self.dtype)\n\n # as for numpy.ndarray indexing, we always want the result to be\n # a NumPy array.\n result = utils.to_0d_array(result)\n\n return result\n\n def transpose(self, order) -> pd.Index:\n return self.array # self.array should be always one-dimensional\n\n def __repr__(self) -> str:\n return \"%s(array=%r, dtype=%r)\" % (type(self).__name__, self.array, self.dtype)\n\n def copy(self, deep: bool = True) -> \"PandasIndexAdapter\":\n # Not the same as just writing `self.array.copy(deep=deep)`, as\n # shallow copies of the underlying numpy.ndarrays become deep ones\n # upon pickling\n # >>> len(pickle.dumps((self.array, self.array)))\n # 4000281\n # >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))\n # 8000341\n array = self.array.copy(deep=True) if deep else self.array\n return PandasIndexAdapter(array, self._dtype)\n"},{"col":4,"comment":"null","endLoc":325,"header":"def time_load_dataset_netcdf4(self)","id":1632,"name":"time_load_dataset_netcdf4","nodeType":"Function","startLoc":324,"text":"def time_load_dataset_netcdf4(self):\n xr.open_mfdataset(self.filenames_list, engine=\"netcdf4\").load()"},{"col":4,"comment":"Combine variables of differing dimensionality into a DataArray\n without broadcasting.\n\n This method is similar to Dataset.to_array but does not broadcast the\n variables.\n\n Parameters\n ----------\n new_dim : Hashable\n Name of the new stacked coordinate\n sample_dims : Sequence[Hashable]\n Dimensions that **will not** be stacked. Each array in the dataset\n must share these dimensions. For machine learning applications,\n these define the dimensions over which samples are drawn.\n variable_dim : str, optional\n Name of the level in the stacked coordinate which corresponds to\n the variables.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n stacked : DataArray\n DataArray with the specified dimensions and data variables\n stacked together. The stacked coordinate is named ``new_dim``\n and represented by a MultiIndex object with a level containing the\n data variable names. The name of this level is controlled using\n the ``variable_dim`` argument.\n\n See Also\n --------\n Dataset.to_array\n Dataset.stack\n DataArray.to_unstacked_dataset\n\n Examples\n --------\n >>> data = Dataset(\n ... data_vars={'a': (('x', 'y'), [[0, 1, 2], [3, 4, 5]]),\n ... 'b': ('x', [6, 7])},\n ... coords={'y': ['u', 'v', 'w']}\n ... )\n\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) >> data.to_stacked_array(\"z\", sample_dims=['x'])\n \n array([[0, 1, 2, 6],\n [3, 4, 5, 7]])\n Coordinates:\n * z (z) MultiIndex\n - variable (z) object 'a' 'a' 'a' 'b'\n - y (z) object 'u' 'v' 'w' nan\n Dimensions without coordinates: x\n\n ","endLoc":3322,"header":"def to_stacked_array(\n self,\n new_dim: Hashable,\n sample_dims: Sequence[Hashable],\n variable_dim: str = \"variable\",\n name: Hashable = None,\n ) -> \"DataArray\"","id":1633,"name":"to_stacked_array","nodeType":"Function","startLoc":3206,"text":"def to_stacked_array(\n self,\n new_dim: Hashable,\n sample_dims: Sequence[Hashable],\n variable_dim: str = \"variable\",\n name: Hashable = None,\n ) -> \"DataArray\":\n \"\"\"Combine variables of differing dimensionality into a DataArray\n without broadcasting.\n\n This method is similar to Dataset.to_array but does not broadcast the\n variables.\n\n Parameters\n ----------\n new_dim : Hashable\n Name of the new stacked coordinate\n sample_dims : Sequence[Hashable]\n Dimensions that **will not** be stacked. Each array in the dataset\n must share these dimensions. For machine learning applications,\n these define the dimensions over which samples are drawn.\n variable_dim : str, optional\n Name of the level in the stacked coordinate which corresponds to\n the variables.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n stacked : DataArray\n DataArray with the specified dimensions and data variables\n stacked together. The stacked coordinate is named ``new_dim``\n and represented by a MultiIndex object with a level containing the\n data variable names. The name of this level is controlled using\n the ``variable_dim`` argument.\n\n See Also\n --------\n Dataset.to_array\n Dataset.stack\n DataArray.to_unstacked_dataset\n\n Examples\n --------\n >>> data = Dataset(\n ... data_vars={'a': (('x', 'y'), [[0, 1, 2], [3, 4, 5]]),\n ... 'b': ('x', [6, 7])},\n ... coords={'y': ['u', 'v', 'w']}\n ... )\n\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) >> data.to_stacked_array(\"z\", sample_dims=['x'])\n \n array([[0, 1, 2, 6],\n [3, 4, 5, 7]])\n Coordinates:\n * z (z) MultiIndex\n - variable (z) object 'a' 'a' 'a' 'b'\n - y (z) object 'u' 'v' 'w' nan\n Dimensions without coordinates: x\n\n \"\"\"\n stacking_dims = tuple(dim for dim in self.dims if dim not in sample_dims)\n\n for variable in self:\n dims = self[variable].dims\n dims_include_sample_dims = set(sample_dims) <= set(dims)\n if not dims_include_sample_dims:\n raise ValueError(\n \"All variables in the dataset must contain the \"\n \"dimensions {}.\".format(dims)\n )\n\n def ensure_stackable(val):\n assign_coords = {variable_dim: val.name}\n for dim in stacking_dims:\n if dim not in val.dims:\n assign_coords[dim] = None\n\n expand_dims = set(stacking_dims).difference(set(val.dims))\n expand_dims.add(variable_dim)\n # must be list for .expand_dims\n expand_dims = list(expand_dims)\n\n return (\n val.assign_coords(**assign_coords)\n .expand_dims(expand_dims)\n .stack({new_dim: (variable_dim,) + stacking_dims})\n )\n\n # concatenate the arrays\n stackable_vars = [ensure_stackable(self[key]) for key in self.data_vars]\n data_array = xr.concat(stackable_vars, dim=new_dim)\n\n # coerce the levels of the MultiIndex to have the same type as the\n # input dimensions. This code is messy, so it might be better to just\n # input a dummy value for the singleton dimension.\n idx = data_array.indexes[new_dim]\n levels = [idx.levels[0]] + [\n level.astype(self[level.name].dtype) for level in idx.levels[1:]\n ]\n new_idx = idx.set_levels(levels)\n data_array[new_dim] = IndexVariable(new_dim, new_idx)\n\n if name is not None:\n data_array.name = name\n\n return data_array"},{"attributeType":"null","col":4,"comment":"null","endLoc":206,"id":1634,"name":"nanosecond","nodeType":"Attribute","startLoc":206,"text":"nanosecond"},{"attributeType":"null","col":0,"comment":"null","endLoc":81,"id":1635,"name":"DTypeLike","nodeType":"Attribute","startLoc":81,"text":"DTypeLike"},{"col":4,"comment":"null","endLoc":60,"header":"def get_variables(self)","id":1636,"name":"get_variables","nodeType":"Function","startLoc":57,"text":"def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()\n )"},{"col":4,"comment":"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n ","endLoc":1672,"header":"def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int]\n ) -> \"DataArray\"","id":1637,"name":"reorder_levels","nodeType":"Function","startLoc":1637,"text":"def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int]\n ) -> \"DataArray\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n \"\"\"\n _check_inplace(inplace)\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n replace_coords = {}\n for dim, order in dim_order.items():\n coord = self._coords[dim]\n index = coord.to_index()\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\"coordinate %r has no MultiIndex\" % dim)\n replace_coords[dim] = IndexVariable(coord.dims, index.reorder_levels(order))\n coords = self._coords.copy()\n coords.update(replace_coords)\n return self._replace(coords=coords)"},{"attributeType":"null","col":4,"comment":"null","endLoc":209,"id":1638,"name":"weekofyear","nodeType":"Attribute","startLoc":209,"text":"weekofyear"},{"col":4,"comment":"null","endLoc":63,"header":"def get_attrs(self)","id":1639,"name":"get_attrs","nodeType":"Function","startLoc":62,"text":"def get_attrs(self):\n return Frozen(self.ds.attributes)"},{"col":0,"comment":"Open multiple files as a single dataset.\n\n If combine='by_coords' then the function ``combine_by_coords`` is used to combine\n the datasets into one before returning the result, and if combine='nested' then\n ``combine_nested`` is used. The filepaths must be structured according to which\n combining function is used, the details of which are given in the documentation for\n ``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated)\n ``auto_combine`` will be used, please specify either ``combine='by_coords'`` or\n ``combine='nested'`` in future. Requires dask to be installed. See documentation for\n details on dask [1]. Attributes from the first dataset file are used for the\n combined dataset.\n\n Parameters\n ----------\n paths : str or sequence\n Either a string glob in the form \"path/to/my/files/*.nc\" or an explicit list of\n files to open. Paths can be given as strings or as pathlib Paths. If\n concatenation along more than one dimension is desired, then ``paths`` must be a\n nested list-of-lists (see ``manual_combine`` for details). (A string glob will\n be expanded to a 1-dimensional list.)\n chunks : int or dict, optional\n Dictionary with keys given by dimension names and values given by chunk sizes.\n In general, these should divide the dimensions of each dataset. If int, chunk\n each dimension by ``chunks``. By default, chunks will be chosen to load entire\n input files into memory at once. This has a major impact on performance: please\n see the full documentation for more details [2].\n concat_dim : str, or list of str, DataArray, Index or None, optional\n Dimensions to concatenate files along. You only need to provide this argument\n if any of the dimensions along which you want to concatenate is not a dimension\n in the original datasets, e.g., if you want to stack a collection of 2D arrays\n along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to\n disable concatenation along a particular dimension.\n combine : {'by_coords', 'nested'}, optional\n Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to\n combine all the data. If this argument is not provided, `xarray.auto_combine` is\n used, but in the future this behavior will switch to use\n `xarray.combine_by_coords` by default.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts when merging:\n * 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n * 'equals': all values and dimensions must be the same.\n * 'identical': all values, dimensions and attributes must be the\n same.\n * 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n * 'override': skip comparing and pick variable from first dataset\n preprocess : callable, optional\n If provided, call this function on each dataset prior to concatenation.\n You can find the file-name from which each dataset was loaded in\n ``ds.encoding['source']``.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \n optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n These coordinate variables will be concatenated together:\n * 'minimal': Only coordinates in which the dimension already appears\n are included.\n * 'different': Coordinates which are not equal (ignoring attributes)\n across all datasets are also concatenated (as well as all for which\n dimension already appears). Beware: this option may load the data\n payload of coordinate variables into memory if they are not already\n loaded.\n * 'all': All coordinate variables will be concatenated, except\n those corresponding to other dimensions.\n * list of str: The listed coordinate variables will be concatenated,\n in addition the 'minimal' coordinates.\n parallel : bool, optional\n If True, the open and preprocess steps of this function will be\n performed in parallel using ``dask.delayed``. Default is False.\n join : {'outer', 'inner', 'left', 'right', 'exact, 'override'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n **kwargs : optional\n Additional arguments passed on to :py:func:`xarray.open_dataset`.\n\n Returns\n -------\n xarray.Dataset\n\n Notes\n -----\n ``open_mfdataset`` opens files with read-only access. When you modify values\n of a Dataset, even one linked to files on disk, only the in-memory copy you\n are manipulating in xarray is modified: the original file on disk is never\n touched.\n\n See Also\n --------\n combine_by_coords\n combine_nested\n auto_combine\n open_dataset\n\n References\n ----------\n\n .. [1] http://xarray.pydata.org/en/stable/dask.html\n .. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance\n ","endLoc":963,"header":"def open_mfdataset(\n paths,\n chunks=None,\n concat_dim=\"_not_supplied\",\n compat=\"no_conflicts\",\n preprocess=None,\n engine=None,\n lock=None,\n data_vars=\"all\",\n coords=\"different\",\n combine=\"_old_auto\",\n autoclose=None,\n parallel=False,\n join=\"outer\",\n **kwargs\n)","id":1640,"name":"open_mfdataset","nodeType":"Function","startLoc":707,"text":"def open_mfdataset(\n paths,\n chunks=None,\n concat_dim=\"_not_supplied\",\n compat=\"no_conflicts\",\n preprocess=None,\n engine=None,\n lock=None,\n data_vars=\"all\",\n coords=\"different\",\n combine=\"_old_auto\",\n autoclose=None,\n parallel=False,\n join=\"outer\",\n **kwargs\n):\n \"\"\"Open multiple files as a single dataset.\n\n If combine='by_coords' then the function ``combine_by_coords`` is used to combine\n the datasets into one before returning the result, and if combine='nested' then\n ``combine_nested`` is used. The filepaths must be structured according to which\n combining function is used, the details of which are given in the documentation for\n ``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated)\n ``auto_combine`` will be used, please specify either ``combine='by_coords'`` or\n ``combine='nested'`` in future. Requires dask to be installed. See documentation for\n details on dask [1]. Attributes from the first dataset file are used for the\n combined dataset.\n\n Parameters\n ----------\n paths : str or sequence\n Either a string glob in the form \"path/to/my/files/*.nc\" or an explicit list of\n files to open. Paths can be given as strings or as pathlib Paths. If\n concatenation along more than one dimension is desired, then ``paths`` must be a\n nested list-of-lists (see ``manual_combine`` for details). (A string glob will\n be expanded to a 1-dimensional list.)\n chunks : int or dict, optional\n Dictionary with keys given by dimension names and values given by chunk sizes.\n In general, these should divide the dimensions of each dataset. If int, chunk\n each dimension by ``chunks``. By default, chunks will be chosen to load entire\n input files into memory at once. This has a major impact on performance: please\n see the full documentation for more details [2].\n concat_dim : str, or list of str, DataArray, Index or None, optional\n Dimensions to concatenate files along. You only need to provide this argument\n if any of the dimensions along which you want to concatenate is not a dimension\n in the original datasets, e.g., if you want to stack a collection of 2D arrays\n along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to\n disable concatenation along a particular dimension.\n combine : {'by_coords', 'nested'}, optional\n Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to\n combine all the data. If this argument is not provided, `xarray.auto_combine` is\n used, but in the future this behavior will switch to use\n `xarray.combine_by_coords` by default.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts when merging:\n * 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n * 'equals': all values and dimensions must be the same.\n * 'identical': all values, dimensions and attributes must be the\n same.\n * 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n * 'override': skip comparing and pick variable from first dataset\n preprocess : callable, optional\n If provided, call this function on each dataset prior to concatenation.\n You can find the file-name from which each dataset was loaded in\n ``ds.encoding['source']``.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \\\n optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n These coordinate variables will be concatenated together:\n * 'minimal': Only coordinates in which the dimension already appears\n are included.\n * 'different': Coordinates which are not equal (ignoring attributes)\n across all datasets are also concatenated (as well as all for which\n dimension already appears). Beware: this option may load the data\n payload of coordinate variables into memory if they are not already\n loaded.\n * 'all': All coordinate variables will be concatenated, except\n those corresponding to other dimensions.\n * list of str: The listed coordinate variables will be concatenated,\n in addition the 'minimal' coordinates.\n parallel : bool, optional\n If True, the open and preprocess steps of this function will be\n performed in parallel using ``dask.delayed``. Default is False.\n join : {'outer', 'inner', 'left', 'right', 'exact, 'override'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n **kwargs : optional\n Additional arguments passed on to :py:func:`xarray.open_dataset`.\n\n Returns\n -------\n xarray.Dataset\n\n Notes\n -----\n ``open_mfdataset`` opens files with read-only access. When you modify values\n of a Dataset, even one linked to files on disk, only the in-memory copy you\n are manipulating in xarray is modified: the original file on disk is never\n touched.\n\n See Also\n --------\n combine_by_coords\n combine_nested\n auto_combine\n open_dataset\n\n References\n ----------\n\n .. [1] http://xarray.pydata.org/en/stable/dask.html\n .. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance\n \"\"\"\n if isinstance(paths, str):\n if is_remote_uri(paths):\n raise ValueError(\n \"cannot do wild-card matching for paths that are remote URLs: \"\n \"{!r}. Instead, supply paths as an explicit list of strings.\".format(\n paths\n )\n )\n paths = sorted(glob(paths))\n else:\n paths = [str(p) if isinstance(p, Path) else p for p in paths]\n\n if not paths:\n raise OSError(\"no files to open\")\n\n # If combine='by_coords' then this is unnecessary, but quick.\n # If combine='nested' then this creates a flat list which is easier to\n # iterate over, while saving the originally-supplied structure as \"ids\"\n if combine == \"nested\":\n if str(concat_dim) == \"_not_supplied\":\n raise ValueError(\"Must supply concat_dim when using \" \"combine='nested'\")\n else:\n if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:\n concat_dim = [concat_dim]\n combined_ids_paths = _infer_concat_order_from_positions(paths)\n ids, paths = (list(combined_ids_paths.keys()), list(combined_ids_paths.values()))\n\n open_kwargs = dict(\n engine=engine, chunks=chunks or {}, lock=lock, autoclose=autoclose, **kwargs\n )\n\n if parallel:\n import dask\n\n # wrap the open_dataset, getattr, and preprocess with delayed\n open_ = dask.delayed(open_dataset)\n getattr_ = dask.delayed(getattr)\n if preprocess is not None:\n preprocess = dask.delayed(preprocess)\n else:\n open_ = open_dataset\n getattr_ = getattr\n\n datasets = [open_(p, **open_kwargs) for p in paths]\n file_objs = [getattr_(ds, \"_file_obj\") for ds in datasets]\n if preprocess is not None:\n datasets = [preprocess(ds) for ds in datasets]\n\n if parallel:\n # calling compute here will return the datasets/file_objs lists,\n # the underlying datasets will still be stored as dask arrays\n datasets, file_objs = dask.compute(datasets, file_objs)\n\n # Combine all datasets, closing them in case of a ValueError\n try:\n if combine == \"_old_auto\":\n # Use the old auto_combine for now\n # Remove this after deprecation cycle from #2616 is complete\n basic_msg = dedent(\n \"\"\"\\\n In xarray version 0.15 the default behaviour of `open_mfdataset`\n will change. To retain the existing behavior, pass\n combine='nested'. To use future default behavior, pass\n combine='by_coords'. See\n http://xarray.pydata.org/en/stable/combining.html#combining-multi\n \"\"\"\n )\n warnings.warn(basic_msg, FutureWarning, stacklevel=2)\n\n combined = auto_combine(\n datasets,\n concat_dim=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n join=join,\n from_openmfds=True,\n )\n elif combine == \"nested\":\n # Combined nested list by successive concat and merge operations\n # along each dimension, using structure given by \"ids\"\n combined = _nested_combine(\n datasets,\n concat_dims=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n ids=ids,\n join=join,\n )\n elif combine == \"by_coords\":\n # Redo ordering from coordinates, ignoring how they were ordered\n # previously\n combined = combine_by_coords(\n datasets, compat=compat, data_vars=data_vars, coords=coords, join=join\n )\n else:\n raise ValueError(\n \"{} is an invalid option for the keyword argument\"\n \" ``combine``\".format(combine)\n )\n except ValueError:\n for ds in datasets:\n ds.close()\n raise\n\n combined._file_obj = _MultiFileCloser(file_objs)\n combined.attrs = datasets[0].attrs\n return combined"},{"col":4,"comment":"null","endLoc":66,"header":"def get_dimensions(self)","id":1642,"name":"get_dimensions","nodeType":"Function","startLoc":65,"text":"def get_dimensions(self):\n return Frozen(self.ds.dimensions)"},{"attributeType":"null","col":4,"comment":"null","endLoc":212,"id":1643,"name":"week","nodeType":"Attribute","startLoc":212,"text":"week"},{"attributeType":"null","col":4,"comment":"null","endLoc":213,"id":1644,"name":"dayofweek","nodeType":"Attribute","startLoc":213,"text":"dayofweek"},{"className":"ExplicitIndexer","col":0,"comment":"Base class for explicit indexer objects.\n\n ExplicitIndexer objects wrap a tuple of values given by their ``tuple``\n property. These tuples should always have length equal to the number of\n dimensions on the indexed array.\n\n Do not instantiate BaseIndexer objects directly: instead, use one of the\n sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.\n ","endLoc":329,"id":1645,"nodeType":"Class","startLoc":306,"text":"class ExplicitIndexer:\n \"\"\"Base class for explicit indexer objects.\n\n ExplicitIndexer objects wrap a tuple of values given by their ``tuple``\n property. These tuples should always have length equal to the number of\n dimensions on the indexed array.\n\n Do not instantiate BaseIndexer objects directly: instead, use one of the\n sub-classes BasicIndexer, OuterIndexer or VectorizedIndexer.\n \"\"\"\n\n __slots__ = (\"_key\",)\n\n def __init__(self, key):\n if type(self) is ExplicitIndexer:\n raise TypeError(\"cannot instantiate base ExplicitIndexer objects\")\n self._key = tuple(key)\n\n @property\n def tuple(self):\n return self._key\n\n def __repr__(self):\n return \"{}({})\".format(type(self).__name__, self.tuple)"},{"col":4,"comment":"null","endLoc":326,"header":"@property\n def tuple(self)","id":1646,"name":"tuple","nodeType":"Function","startLoc":324,"text":"@property\n def tuple(self):\n return self._key"},{"col":4,"comment":"null","endLoc":329,"header":"def __repr__(self)","id":1647,"name":"__repr__","nodeType":"Function","startLoc":328,"text":"def __repr__(self):\n return \"{}({})\".format(type(self).__name__, self.tuple)"},{"attributeType":"null","col":4,"comment":"null","endLoc":317,"id":1648,"name":"__slots__","nodeType":"Attribute","startLoc":317,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":322,"id":1649,"name":"_key","nodeType":"Attribute","startLoc":322,"text":"self._key"},{"attributeType":"null","col":4,"comment":"null","endLoc":216,"id":1650,"name":"weekday","nodeType":"Attribute","startLoc":216,"text":"weekday"},{"className":"BasicIndexer","col":0,"comment":"Tuple for basic indexing.\n\n All elements should be int or slice objects. Indexing follows NumPy's\n rules for basic indexing: each axis is independently sliced and axes\n indexed with an integer are dropped from the result.\n ","endLoc":371,"id":1651,"nodeType":"Class","startLoc":343,"text":"class BasicIndexer(ExplicitIndexer):\n \"\"\"Tuple for basic indexing.\n\n All elements should be int or slice objects. Indexing follows NumPy's\n rules for basic indexing: each axis is independently sliced and axes\n indexed with an integer are dropped from the result.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)"},{"col":4,"comment":"Unstack DataArray expanding to Dataset along a given level of a\n stacked coordinate.\n\n This is the inverse operation of Dataset.to_stacked_array.\n\n Parameters\n ----------\n dim : str\n Name of existing dimension to unstack\n level : int or str\n The MultiIndex level to expand to a dataset along. Can either be\n the integer index of the level or its name.\n label : int, default 0\n Label of the level to expand dataset along. Overrides the label\n argument if given.\n\n Returns\n -------\n unstacked: Dataset\n\n Examples\n --------\n >>> import xarray as xr\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> data = xr.Dataset({'a': arr, 'b': arr.isel(y=0)})\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) >> stacked = data.to_stacked_array(\"z\", ['y'])\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n labels=[[0, 0, 0, 1], [0, 1, 2, -1]],\n names=['variable', 'y'])\n >>> roundtripped = stacked.to_unstacked_dataset(dim='z')\n >>> data.identical(roundtripped)\n True\n\n See Also\n --------\n Dataset.to_stacked_array\n ","endLoc":1836,"header":"def to_unstacked_dataset(self, dim, level=0)","id":1652,"name":"to_unstacked_dataset","nodeType":"Function","startLoc":1772,"text":"def to_unstacked_dataset(self, dim, level=0):\n \"\"\"Unstack DataArray expanding to Dataset along a given level of a\n stacked coordinate.\n\n This is the inverse operation of Dataset.to_stacked_array.\n\n Parameters\n ----------\n dim : str\n Name of existing dimension to unstack\n level : int or str\n The MultiIndex level to expand to a dataset along. Can either be\n the integer index of the level or its name.\n label : int, default 0\n Label of the level to expand dataset along. Overrides the label\n argument if given.\n\n Returns\n -------\n unstacked: Dataset\n\n Examples\n --------\n >>> import xarray as xr\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> data = xr.Dataset({'a': arr, 'b': arr.isel(y=0)})\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) >> stacked = data.to_stacked_array(\"z\", ['y'])\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n labels=[[0, 0, 0, 1], [0, 1, 2, -1]],\n names=['variable', 'y'])\n >>> roundtripped = stacked.to_unstacked_dataset(dim='z')\n >>> data.identical(roundtripped)\n True\n\n See Also\n --------\n Dataset.to_stacked_array\n \"\"\"\n\n idx = self.indexes[dim]\n if not isinstance(idx, pd.MultiIndex):\n raise ValueError(\"'{}' is not a stacked coordinate\".format(dim))\n\n level_number = idx._get_level_number(level)\n variables = idx.levels[level_number]\n variable_dim = idx.names[level_number]\n\n # pull variables out of datarray\n data_dict = {}\n for k in variables:\n data_dict[k] = self.sel({variable_dim: k}).squeeze(drop=True)\n\n # unstacked dataset\n return Dataset(data_dict)"},{"attributeType":"null","col":4,"comment":"null","endLoc":351,"id":1653,"name":"__slots__","nodeType":"Attribute","startLoc":351,"text":"__slots__"},{"col":0,"comment":"null","endLoc":17,"header":"def _infer_concat_order_from_positions(datasets)","id":1654,"name":"_infer_concat_order_from_positions","nodeType":"Function","startLoc":15,"text":"def _infer_concat_order_from_positions(datasets):\n combined_ids = dict(_infer_tile_ids_from_nested_list(datasets, ()))\n return combined_ids"},{"attributeType":"null","col":4,"comment":"null","endLoc":218,"id":1655,"name":"weekday_name","nodeType":"Attribute","startLoc":218,"text":"weekday_name"},{"className":"OuterIndexer","col":0,"comment":"Tuple for outer/orthogonal indexing.\n\n All elements should be int, slice or 1-dimensional np.ndarray objects with\n an integer dtype. Indexing is applied independently along each axis, and\n axes indexed with an integer are dropped from the result. This type of\n indexing works like MATLAB/Fortran.\n ","endLoc":415,"id":1656,"nodeType":"Class","startLoc":374,"text":"class OuterIndexer(ExplicitIndexer):\n \"\"\"Tuple for outer/orthogonal indexing.\n\n All elements should be int, slice or 1-dimensional np.ndarray objects with\n an integer dtype. Indexing is applied independently along each axis, and\n axes indexed with an integer are dropped from the result. This type of\n indexing works like MATLAB/Fortran.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n for k in key:\n if isinstance(k, integer_types):\n k = int(k)\n elif isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n \"invalid indexer array, does not have \"\n \"integer dtype: {!r}\".format(k)\n )\n if k.ndim != 1:\n raise TypeError(\n \"invalid indexer array for {}, must have \"\n \"exactly 1 dimension: \".format(type(self).__name__, k)\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)"},{"attributeType":"null","col":4,"comment":"null","endLoc":383,"id":1657,"name":"__slots__","nodeType":"Attribute","startLoc":383,"text":"__slots__"},{"className":"VectorizedIndexer","col":0,"comment":"Tuple for vectorized indexing.\n\n All elements should be slice or N-dimensional np.ndarray objects with an\n integer dtype and the same number of dimensions. Indexing follows proposed\n rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules\n (including broadcasting) except sliced axes are always moved to the end:\n https://github.com/numpy/numpy/pull/6256\n ","endLoc":462,"id":1658,"nodeType":"Class","startLoc":418,"text":"class VectorizedIndexer(ExplicitIndexer):\n \"\"\"Tuple for vectorized indexing.\n\n All elements should be slice or N-dimensional np.ndarray objects with an\n integer dtype and the same number of dimensions. Indexing follows proposed\n rules for np.ndarray.vindex, which matches NumPy's advanced indexing rules\n (including broadcasting) except sliced axes are always moved to the end:\n https://github.com/numpy/numpy/pull/6256\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple: {!r}\".format(key))\n\n new_key = []\n ndim = None\n for k in key:\n if isinstance(k, slice):\n k = as_integer_slice(k)\n elif isinstance(k, np.ndarray):\n if not np.issubdtype(k.dtype, np.integer):\n raise TypeError(\n \"invalid indexer array, does not have \"\n \"integer dtype: {!r}\".format(k)\n )\n if ndim is None:\n ndim = k.ndim\n elif ndim != k.ndim:\n ndims = [k.ndim for k in key if isinstance(k, np.ndarray)]\n raise ValueError(\n \"invalid indexer key: ndarray arguments \"\n \"have different numbers of dimensions: {}\".format(ndims)\n )\n k = np.asarray(k, dtype=np.int64)\n else:\n raise TypeError(\n \"unexpected indexer type for {}: {!r}\".format(\n type(self).__name__, k\n )\n )\n new_key.append(k)\n\n super().__init__(new_key)"},{"attributeType":"null","col":4,"comment":"null","endLoc":428,"id":1659,"name":"__slots__","nodeType":"Attribute","startLoc":428,"text":"__slots__"},{"col":4,"comment":"null","endLoc":71,"header":"def get_encoding(self)","id":1660,"name":"get_encoding","nodeType":"Function","startLoc":68,"text":"def get_encoding(self):\n dims = self.get_dimensions()\n encoding = {\"unlimited_dims\": {k for k, v in dims.items() if v is None}}\n return encoding"},{"attributeType":"null","col":4,"comment":"null","endLoc":222,"id":1661,"name":"dayofyear","nodeType":"Attribute","startLoc":222,"text":"dayofyear"},{"className":"ImplicitToExplicitIndexingAdapter","col":0,"comment":"Wrap an array, converting tuples into the indicated explicit indexer.","endLoc":500,"id":1662,"nodeType":"Class","startLoc":480,"text":"class ImplicitToExplicitIndexingAdapter(utils.NDArrayMixin):\n \"\"\"Wrap an array, converting tuples into the indicated explicit indexer.\"\"\"\n\n __slots__ = (\"array\", \"indexer_cls\")\n\n def __init__(self, array, indexer_cls=BasicIndexer):\n self.array = as_indexable(array)\n self.indexer_cls = indexer_cls\n\n def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n key = expanded_indexer(key, self.ndim)\n result = self.array[self.indexer_cls(key)]\n if isinstance(result, ExplicitlyIndexed):\n return type(self)(result, self.indexer_cls)\n else:\n # Sometimes explicitly indexed arrays return NumPy arrays or\n # scalars.\n return result"},{"col":4,"comment":"null","endLoc":487,"header":"def __init__(self, array, indexer_cls=BasicIndexer)","id":1663,"name":"__init__","nodeType":"Function","startLoc":485,"text":"def __init__(self, array, indexer_cls=BasicIndexer):\n self.array = as_indexable(array)\n self.indexer_cls = indexer_cls"},{"col":4,"comment":"null","endLoc":490,"header":"def __array__(self, dtype=None)","id":1664,"name":"__array__","nodeType":"Function","startLoc":489,"text":"def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)"},{"attributeType":"null","col":4,"comment":"null","endLoc":225,"id":1665,"name":"quarter","nodeType":"Attribute","startLoc":225,"text":"quarter"},{"col":4,"comment":"null","endLoc":500,"header":"def __getitem__(self, key)","id":1666,"name":"__getitem__","nodeType":"Function","startLoc":492,"text":"def __getitem__(self, key):\n key = expanded_indexer(key, self.ndim)\n result = self.array[self.indexer_cls(key)]\n if isinstance(result, ExplicitlyIndexed):\n return type(self)(result, self.indexer_cls)\n else:\n # Sometimes explicitly indexed arrays return NumPy arrays or\n # scalars.\n return result"},{"attributeType":"null","col":4,"comment":"null","endLoc":226,"id":1667,"name":"days_in_month","nodeType":"Attribute","startLoc":226,"text":"days_in_month"},{"attributeType":"null","col":8,"comment":"null","endLoc":42,"id":1668,"name":"lock","nodeType":"Attribute","startLoc":42,"text":"self.lock"},{"col":4,"comment":"null","endLoc":3357,"header":"def _unstack_once(self, dim: Hashable) -> \"Dataset\"","id":1669,"name":"_unstack_once","nodeType":"Function","startLoc":3324,"text":"def _unstack_once(self, dim: Hashable) -> \"Dataset\":\n index = self.get_index(dim)\n index = index.remove_unused_levels()\n full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)\n\n # take a shortcut in case the MultiIndex was not modified.\n if index.equals(full_idx):\n obj = self\n else:\n obj = self.reindex({dim: full_idx}, copy=False)\n\n new_dim_names = index.names\n new_dim_sizes = [lev.size for lev in index.levels]\n\n variables: Dict[Hashable, Variable] = {}\n indexes = {k: v for k, v in self.indexes.items() if k != dim}\n\n for name, var in obj.variables.items():\n if name != dim:\n if dim in var.dims:\n new_dims = dict(zip(new_dim_names, new_dim_sizes))\n variables[name] = var.unstack({dim: new_dims})\n else:\n variables[name] = var\n\n for name, lev in zip(new_dim_names, index.levels):\n variables[name] = IndexVariable(name, lev)\n indexes[name] = lev\n\n coord_names = set(self._coord_names) - {dim} | set(new_dim_names)\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":43,"id":1670,"name":"ds","nodeType":"Attribute","startLoc":43,"text":"self.ds"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":1671,"name":"ECCODES_LOCK","nodeType":"Attribute","startLoc":12,"text":"ECCODES_LOCK"},{"col":0,"comment":"","endLoc":1,"header":"cfgrib_.py#","id":1672,"name":"","nodeType":"Function","startLoc":1,"text":"ECCODES_LOCK = SerializableLock()"},{"attributeType":"null","col":4,"comment":"null","endLoc":229,"id":1673,"name":"daysinmonth","nodeType":"Attribute","startLoc":229,"text":"daysinmonth"},{"col":0,"comment":"\n Given a list of lists (of lists...) of objects, returns a iterator\n which returns a tuple containing the index of each object in the nested\n list structure as the key, and the object. This can then be called by the\n dict constructor to create a dictionary of the objects organised by their\n position in the original nested list.\n\n Recursively traverses the given structure, while keeping track of the\n current position. Should work for any type of object which isn't a list.\n\n Parameters\n ----------\n entry : list[list[obj, obj, ...], ...]\n List of lists of arbitrary depth, containing objects in the order\n they are to be concatenated.\n\n Returns\n -------\n combined_tile_ids : dict[tuple(int, ...), obj]\n ","endLoc":46,"header":"def _infer_tile_ids_from_nested_list(entry, current_pos)","id":1674,"name":"_infer_tile_ids_from_nested_list","nodeType":"Function","startLoc":20,"text":"def _infer_tile_ids_from_nested_list(entry, current_pos):\n \"\"\"\n Given a list of lists (of lists...) of objects, returns a iterator\n which returns a tuple containing the index of each object in the nested\n list structure as the key, and the object. This can then be called by the\n dict constructor to create a dictionary of the objects organised by their\n position in the original nested list.\n\n Recursively traverses the given structure, while keeping track of the\n current position. Should work for any type of object which isn't a list.\n\n Parameters\n ----------\n entry : list[list[obj, obj, ...], ...]\n List of lists of arbitrary depth, containing objects in the order\n they are to be concatenated.\n\n Returns\n -------\n combined_tile_ids : dict[tuple(int, ...), obj]\n \"\"\"\n\n if isinstance(entry, list):\n for i, item in enumerate(entry):\n yield from _infer_tile_ids_from_nested_list(item, current_pos + (i,))\n else:\n yield current_pos, entry"},{"fileName":"tutorial.py","filePath":"xarray","id":1675,"nodeType":"File","text":"\"\"\"\nUseful for:\n\n* users learning xarray\n* building tutorials in the documentation.\n\n\"\"\"\nimport hashlib\nimport os as _os\nfrom urllib.request import urlretrieve\n\nimport numpy as np\n\nfrom .backends.api import open_dataset as _open_dataset\nfrom .core.dataarray import DataArray\nfrom .core.dataset import Dataset\n\n_default_cache_dir = _os.sep.join((\"~\", \".xarray_tutorial_data\"))\n\n\ndef file_md5_checksum(fname):\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n hash_md5.update(f.read())\n return hash_md5.hexdigest()\n\n\n# idea borrowed from Seaborn\ndef open_dataset(\n name,\n cache=True,\n cache_dir=_default_cache_dir,\n github_url=\"https://github.com/pydata/xarray-data\",\n branch=\"master\",\n **kws\n):\n \"\"\"\n Open a dataset from the online repository (requires internet).\n\n If a local copy is found then always use that to avoid network traffic.\n\n Parameters\n ----------\n name : str\n Name of the netcdf file containing the dataset\n ie. 'air_temperature'\n cache_dir : string, optional\n The directory in which to search for and write cached data.\n cache : boolean, optional\n If True, then cache data locally for use on subsequent calls\n github_url : string\n Github repository where the data is stored\n branch : string\n The git branch to download from\n kws : dict, optional\n Passed to xarray.open_dataset\n\n See Also\n --------\n xarray.open_dataset\n\n \"\"\"\n longdir = _os.path.expanduser(cache_dir)\n fullname = name + \".nc\"\n localfile = _os.sep.join((longdir, fullname))\n md5name = name + \".md5\"\n md5file = _os.sep.join((longdir, md5name))\n\n if not _os.path.exists(localfile):\n\n # This will always leave this directory on disk.\n # May want to add an option to remove it.\n if not _os.path.isdir(longdir):\n _os.mkdir(longdir)\n\n url = \"/\".join((github_url, \"raw\", branch, fullname))\n urlretrieve(url, localfile)\n url = \"/\".join((github_url, \"raw\", branch, md5name))\n urlretrieve(url, md5file)\n\n localmd5 = file_md5_checksum(localfile)\n with open(md5file, \"r\") as f:\n remotemd5 = f.read()\n if localmd5 != remotemd5:\n _os.remove(localfile)\n msg = \"\"\"\n MD5 checksum does not match, try downloading dataset again.\n \"\"\"\n raise OSError(msg)\n\n ds = _open_dataset(localfile, **kws)\n\n if not cache:\n ds = ds.load()\n _os.remove(localfile)\n\n return ds\n\n\ndef load_dataset(*args, **kwargs):\n \"\"\"\n Open, load into memory, and close a dataset from the online repository\n (requires internet).\n\n See Also\n --------\n open_dataset\n \"\"\"\n with open_dataset(*args, **kwargs) as ds:\n return ds.load()\n\n\ndef scatter_example_dataset():\n A = DataArray(\n np.zeros([3, 11, 4, 4]),\n dims=[\"x\", \"y\", \"z\", \"w\"],\n coords=[\n np.arange(3),\n np.linspace(0, 1, 11),\n np.arange(4),\n 0.1 * np.random.randn(4),\n ],\n )\n B = 0.1 * A.x ** 2 + A.y ** 2.5 + 0.1 * A.z * A.w\n A = -0.1 * A.x + A.y / (5 + A.z) + A.w\n ds = Dataset({\"A\": A, \"B\": B})\n ds[\"w\"] = [\"one\", \"two\", \"three\", \"five\"]\n\n ds.x.attrs[\"units\"] = \"xunits\"\n ds.y.attrs[\"units\"] = \"yunits\"\n ds.z.attrs[\"units\"] = \"zunits\"\n ds.w.attrs[\"units\"] = \"wunits\"\n\n ds.A.attrs[\"units\"] = \"Aunits\"\n ds.B.attrs[\"units\"] = \"Bunits\"\n\n return ds\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":231,"id":1676,"name":"season","nodeType":"Attribute","startLoc":231,"text":"season"},{"col":0,"comment":"null","endLoc":137,"header":"def scatter_example_dataset()","id":1677,"name":"scatter_example_dataset","nodeType":"Function","startLoc":113,"text":"def scatter_example_dataset():\n A = DataArray(\n np.zeros([3, 11, 4, 4]),\n dims=[\"x\", \"y\", \"z\", \"w\"],\n coords=[\n np.arange(3),\n np.linspace(0, 1, 11),\n np.arange(4),\n 0.1 * np.random.randn(4),\n ],\n )\n B = 0.1 * A.x ** 2 + A.y ** 2.5 + 0.1 * A.z * A.w\n A = -0.1 * A.x + A.y / (5 + A.z) + A.w\n ds = Dataset({\"A\": A, \"B\": B})\n ds[\"w\"] = [\"one\", \"two\", \"three\", \"five\"]\n\n ds.x.attrs[\"units\"] = \"xunits\"\n ds.y.attrs[\"units\"] = \"yunits\"\n ds.z.attrs[\"units\"] = \"zunits\"\n ds.w.attrs[\"units\"] = \"wunits\"\n\n ds.A.attrs[\"units\"] = \"Aunits\"\n ds.B.attrs[\"units\"] = \"Bunits\"\n\n return ds"},{"attributeType":"null","col":4,"comment":"null","endLoc":233,"id":1678,"name":"time","nodeType":"Attribute","startLoc":233,"text":"time"},{"attributeType":"null","col":8,"comment":"null","endLoc":179,"id":1679,"name":"_obj","nodeType":"Attribute","startLoc":179,"text":"self._obj"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":1680,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":2,"id":1681,"name":"pd","nodeType":"Attribute","startLoc":2,"text":"pd"},{"fileName":"__init__.py","filePath":"xarray","id":1682,"nodeType":"File","text":"\"\"\" isort:skip_file \"\"\"\n# flake8: noqa\n\nfrom ._version import get_versions\n\n__version__ = get_versions()[\"version\"]\ndel get_versions\n\nfrom .core.alignment import align, broadcast\nfrom .core.common import full_like, zeros_like, ones_like\nfrom .core.concat import concat\nfrom .core.combine import combine_by_coords, combine_nested, auto_combine\nfrom .core.computation import apply_ufunc, dot, where\nfrom .core.extensions import register_dataarray_accessor, register_dataset_accessor\nfrom .core.variable import as_variable, Variable, IndexVariable, Coordinate\nfrom .core.dataset import Dataset\nfrom .core.dataarray import DataArray\nfrom .core.merge import merge, MergeError\nfrom .core.options import set_options\nfrom .core.parallel import map_blocks\n\nfrom .backends.api import (\n open_dataset,\n open_dataarray,\n open_mfdataset,\n save_mfdataset,\n load_dataset,\n load_dataarray,\n)\nfrom .backends.rasterio_ import open_rasterio\nfrom .backends.zarr import open_zarr\n\nfrom .conventions import decode_cf, SerializationWarning\n\nfrom .coding.cftime_offsets import cftime_range\nfrom .coding.cftimeindex import CFTimeIndex\n\nfrom .util.print_versions import show_versions\n\nfrom . import tutorial\nfrom . import ufuncs\nfrom . import testing\n\nfrom .core.common import ALL_DIMS\n"},{"col":4,"comment":"Return a new DataArray object with transposed dimensions.\n\n Parameters\n ----------\n *dims : hashable, optional\n By default, reverse the dimensions. Otherwise, reorder the\n dimensions to this order.\n transpose_coords : boolean, optional\n If True, also transpose the coordinates of this DataArray.\n\n Returns\n -------\n transposed : DataArray\n The returned DataArray's array is transposed.\n\n Notes\n -----\n This operation returns a view of this array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded.\n\n See Also\n --------\n numpy.transpose\n Dataset.transpose\n ","endLoc":1889,"header":"def transpose(self, *dims: Hashable, transpose_coords: bool = None) -> \"DataArray\"","id":1683,"name":"transpose","nodeType":"Function","startLoc":1838,"text":"def transpose(self, *dims: Hashable, transpose_coords: bool = None) -> \"DataArray\":\n \"\"\"Return a new DataArray object with transposed dimensions.\n\n Parameters\n ----------\n *dims : hashable, optional\n By default, reverse the dimensions. Otherwise, reorder the\n dimensions to this order.\n transpose_coords : boolean, optional\n If True, also transpose the coordinates of this DataArray.\n\n Returns\n -------\n transposed : DataArray\n The returned DataArray's array is transposed.\n\n Notes\n -----\n This operation returns a view of this array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded.\n\n See Also\n --------\n numpy.transpose\n Dataset.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims):\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted array dimensions (%s)\" % (dims, tuple(self.dims))\n )\n\n variable = self.variable.transpose(*dims)\n if transpose_coords:\n coords: Dict[Hashable, Variable] = {}\n for name, coord in self.coords.items():\n coord_dims = tuple(dim for dim in dims if dim in coord.dims)\n coords[name] = coord.variable.transpose(*coord_dims)\n return self._replace(variable, coords)\n else:\n if transpose_coords is None and any(self[c].ndim > 1 for c in self.coords):\n warnings.warn(\n \"This DataArray contains multi-dimensional \"\n \"coordinates. In the future, these coordinates \"\n \"will be transposed as well unless you specify \"\n \"transpose_coords=False.\",\n FutureWarning,\n stacklevel=2,\n )\n return self._replace(variable)"},{"col":0,"comment":"Get version information or return default if unable to do so.","endLoc":556,"header":"def get_versions()","id":1684,"name":"get_versions","nodeType":"Function","startLoc":507,"text":"def get_versions():\n \"\"\"Get version information or return default if unable to do so.\"\"\"\n # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have\n # __file__, we can work backwards from there to the root. Some\n # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which\n # case we can only use expanded keywords.\n\n cfg = get_config()\n verbose = cfg.verbose\n\n try:\n return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)\n except NotThisMethod:\n pass\n\n try:\n root = os.path.realpath(__file__)\n # versionfile_source is the relative path from the top of the source\n # tree (where the .git directory might live) to this file. Invert\n # this to find the root from __file__.\n for i in cfg.versionfile_source.split(\"/\"):\n root = os.path.dirname(root)\n except NameError:\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to find root of source tree\",\n \"date\": None,\n }\n\n try:\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\n return render(pieces, cfg.style)\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n except NotThisMethod:\n pass\n\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\",\n \"date\": None,\n }"},{"col":0,"comment":"\n Attempt to auto-magically combine the given datasets into one.\n\n This entire function is deprecated in favour of ``combine_nested`` and\n ``combine_by_coords``.\n\n This method attempts to combine a list of datasets into a single entity by\n inspecting metadata and using a combination of concat and merge.\n It does not concatenate along more than one dimension or sort data under\n any circumstances. It does align coordinates, but different variables on\n datasets can cause it to fail under some scenarios. In complex cases, you\n may need to clean up your data and use ``concat``/``merge`` explicitly.\n ``auto_combine`` works well if you have N years of data and M data\n variables, and each combination of a distinct time period and set of data\n variables is saved its own dataset.\n\n Parameters\n ----------\n datasets : sequence of xarray.Dataset\n Dataset objects to merge.\n concat_dim : str or DataArray or Index, optional\n Dimension along which to concatenate variables, as used by\n :py:func:`xarray.concat`. You only need to provide this argument if\n the dimension along which you want to concatenate is not a dimension\n in the original datasets, e.g., if you want to stack a collection of\n 2D arrays along a third dimension.\n By default, xarray attempts to infer this argument by examining\n component files. Set ``concat_dim=None`` explicitly to disable\n concatenation.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n coords : {'minimal', 'different', 'all' o list of str}, optional\n Details are in the documentation of concat\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n See also\n --------\n concat\n Dataset.merge\n ","endLoc":854,"header":"def auto_combine(\n datasets,\n concat_dim=\"_not_supplied\",\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n from_openmfds=False,\n)","id":1685,"name":"auto_combine","nodeType":"Function","startLoc":707,"text":"def auto_combine(\n datasets,\n concat_dim=\"_not_supplied\",\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n from_openmfds=False,\n):\n \"\"\"\n Attempt to auto-magically combine the given datasets into one.\n\n This entire function is deprecated in favour of ``combine_nested`` and\n ``combine_by_coords``.\n\n This method attempts to combine a list of datasets into a single entity by\n inspecting metadata and using a combination of concat and merge.\n It does not concatenate along more than one dimension or sort data under\n any circumstances. It does align coordinates, but different variables on\n datasets can cause it to fail under some scenarios. In complex cases, you\n may need to clean up your data and use ``concat``/``merge`` explicitly.\n ``auto_combine`` works well if you have N years of data and M data\n variables, and each combination of a distinct time period and set of data\n variables is saved its own dataset.\n\n Parameters\n ----------\n datasets : sequence of xarray.Dataset\n Dataset objects to merge.\n concat_dim : str or DataArray or Index, optional\n Dimension along which to concatenate variables, as used by\n :py:func:`xarray.concat`. You only need to provide this argument if\n the dimension along which you want to concatenate is not a dimension\n in the original datasets, e.g., if you want to stack a collection of\n 2D arrays along a third dimension.\n By default, xarray attempts to infer this argument by examining\n component files. Set ``concat_dim=None`` explicitly to disable\n concatenation.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n coords : {'minimal', 'different', 'all' o list of str}, optional\n Details are in the documentation of concat\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n See also\n --------\n concat\n Dataset.merge\n \"\"\"\n\n if not from_openmfds:\n basic_msg = dedent(\n \"\"\"\\\n In xarray version 0.15 `auto_combine` will be deprecated. See\n http://xarray.pydata.org/en/stable/combining.html#combining-multi\"\"\"\n )\n warnings.warn(basic_msg, FutureWarning, stacklevel=2)\n\n if concat_dim == \"_not_supplied\":\n concat_dim = _CONCAT_DIM_DEFAULT\n message = \"\"\n else:\n message = dedent(\n \"\"\"\\\n Also `open_mfdataset` will no longer accept a `concat_dim` argument.\n To get equivalent behaviour from now on please use the new\n `combine_nested` function instead (or the `combine='nested'` option to\n `open_mfdataset`).\"\"\"\n )\n\n if _dimension_coords_exist(datasets):\n message += dedent(\n \"\"\"\\\n The datasets supplied have global dimension coordinates. You may want\n to use the new `combine_by_coords` function (or the\n `combine='by_coords'` option to `open_mfdataset`) to order the datasets\n before concatenation. Alternatively, to continue concatenating based\n on the order the datasets are supplied in future, please use the new\n `combine_nested` function (or the `combine='nested'` option to\n open_mfdataset).\"\"\"\n )\n else:\n message += dedent(\n \"\"\"\\\n The datasets supplied do not have global dimension coordinates. In\n future, to continue concatenating without supplying dimension\n coordinates, please use the new `combine_nested` function (or the\n `combine='nested'` option to open_mfdataset.\"\"\"\n )\n\n if _requires_concat_and_merge(datasets):\n manual_dims = [concat_dim].append(None)\n message += dedent(\n \"\"\"\\\n The datasets supplied require both concatenation and merging. From\n xarray version 0.15 this will operation will require either using the\n new `combine_nested` function (or the `combine='nested'` option to\n open_mfdataset), with a nested list structure such that you can combine\n along the dimensions {}. Alternatively if your datasets have global\n dimension coordinates then you can use the new `combine_by_coords`\n function.\"\"\".format(\n manual_dims\n )\n )\n\n warnings.warn(message, FutureWarning, stacklevel=2)\n\n return _old_auto_combine(\n datasets,\n concat_dim=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n fill_value=fill_value,\n join=join,\n )"},{"col":0,"comment":"\n Check if the datasets have consistent global dimension coordinates\n which would in future be used by `auto_combine` for concatenation ordering.\n ","endLoc":876,"header":"def _dimension_coords_exist(datasets)","id":1686,"name":"_dimension_coords_exist","nodeType":"Function","startLoc":857,"text":"def _dimension_coords_exist(datasets):\n \"\"\"\n Check if the datasets have consistent global dimension coordinates\n which would in future be used by `auto_combine` for concatenation ordering.\n \"\"\"\n\n # Group by data vars\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n # Simulates performing the multidimensional combine on each group of data\n # variables before merging back together\n try:\n for vars, datasets_with_same_vars in grouped_by_vars:\n _infer_concat_order_from_coords(list(datasets_with_same_vars))\n return True\n except ValueError:\n # ValueError means datasets don't have global dimension coordinates\n # Or something else went wrong in trying to determine them\n return False"},{"col":0,"comment":"null","endLoc":115,"header":"def _infer_concat_order_from_coords(datasets)","id":1687,"name":"_infer_concat_order_from_coords","nodeType":"Function","startLoc":49,"text":"def _infer_concat_order_from_coords(datasets):\n\n concat_dims = []\n tile_ids = [() for ds in datasets]\n\n # All datasets have same variables because they've been grouped as such\n ds0 = datasets[0]\n for dim in ds0.dims:\n\n # Check if dim is a coordinate dimension\n if dim in ds0:\n\n # Need to read coordinate values to do ordering\n indexes = [ds.indexes.get(dim) for ds in datasets]\n if any(index is None for index in indexes):\n raise ValueError(\n \"Every dimension needs a coordinate for \"\n \"inferring concatenation order\"\n )\n\n # If dimension coordinate values are same on every dataset then\n # should be leaving this dimension alone (it's just a \"bystander\")\n if not all(index.equals(indexes[0]) for index in indexes[1:]):\n\n # Infer order datasets should be arranged in along this dim\n concat_dims.append(dim)\n\n if all(index.is_monotonic_increasing for index in indexes):\n ascending = True\n elif all(index.is_monotonic_decreasing for index in indexes):\n ascending = False\n else:\n raise ValueError(\n \"Coordinate variable {} is neither \"\n \"monotonically increasing nor \"\n \"monotonically decreasing on all datasets\".format(dim)\n )\n\n # Assume that any two datasets whose coord along dim starts\n # with the same value have the same coord values throughout.\n if any(index.size == 0 for index in indexes):\n raise ValueError(\"Cannot handle size zero dimensions\")\n first_items = pd.Index([index.take([0]) for index in indexes])\n\n # Sort datasets along dim\n # We want rank but with identical elements given identical\n # position indices - they should be concatenated along another\n # dimension, not along this one\n series = first_items.to_series()\n rank = series.rank(method=\"dense\", ascending=ascending)\n order = rank.astype(int).values - 1\n\n # Append positions along extra dimension to structure which\n # encodes the multi-dimensional concatentation order\n tile_ids = [\n tile_id + (position,) for tile_id, position in zip(tile_ids, order)\n ]\n\n if len(datasets) > 1 and not concat_dims:\n raise ValueError(\n \"Could not find any dimension coordinates to use to \"\n \"order the datasets for concatenation\"\n )\n\n combined_ids = dict(zip(tile_ids, datasets))\n\n return combined_ids, concat_dims"},{"col":4,"comment":"null","endLoc":1893,"header":"@property\n def T(self) -> \"DataArray\"","id":1688,"name":"T","nodeType":"Function","startLoc":1891,"text":"@property\n def T(self) -> \"DataArray\":\n return self.transpose()"},{"col":4,"comment":"Merge the arrays of two datasets into a single dataset.\n\n This method generally does not allow for overriding data, with the\n exception of attributes, which are ignored on the second dataset.\n Variables with the same name are checked for conflicts via the equals\n or identical methods.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Dataset or variables to merge with this dataset.\n overwrite_vars : Hashable or iterable of Hashable, optional\n If provided, update variables of these name(s) without checking for\n conflicts in this dataset.\n compat : {'broadcast_equals', 'equals', 'identical',\n 'no_conflicts'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining ``self`` and ``other`` along shared dimensions:\n\n - 'outer': use the union of the indexes\n - 'inner': use the intersection of the indexes\n - 'left': use indexes from ``self``\n - 'right': use indexes from ``other``\n - 'exact': error instead of aligning non-equal indexes\n fill_value: scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n merged : Dataset\n Merged dataset.\n\n Raises\n ------\n MergeError\n If any variables conflict (see ``compat``).\n ","endLoc":3506,"header":"def merge(\n self,\n other: \"CoercibleMapping\",\n inplace: bool = None,\n overwrite_vars: Union[Hashable, Iterable[Hashable]] = frozenset(),\n compat","id":1689,"name":"merge","nodeType":"Function","startLoc":3441,"text":"def merge(\n self,\n other: \"CoercibleMapping\",\n inplace: bool = None,\n overwrite_vars: Union[Hashable, Iterable[Hashable]] = frozenset(),\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Merge the arrays of two datasets into a single dataset.\n\n This method generally does not allow for overriding data, with the\n exception of attributes, which are ignored on the second dataset.\n Variables with the same name are checked for conflicts via the equals\n or identical methods.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Dataset or variables to merge with this dataset.\n overwrite_vars : Hashable or iterable of Hashable, optional\n If provided, update variables of these name(s) without checking for\n conflicts in this dataset.\n compat : {'broadcast_equals', 'equals', 'identical',\n 'no_conflicts'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining ``self`` and ``other`` along shared dimensions:\n\n - 'outer': use the union of the indexes\n - 'inner': use the intersection of the indexes\n - 'left': use indexes from ``self``\n - 'right': use indexes from ``other``\n - 'exact': error instead of aligning non-equal indexes\n fill_value: scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n merged : Dataset\n Merged dataset.\n\n Raises\n ------\n MergeError\n If any variables conflict (see ``compat``).\n \"\"\"\n _check_inplace(inplace)\n merge_result = dataset_merge_method(\n self,\n other,\n overwrite_vars=overwrite_vars,\n compat=compat,\n join=join,\n fill_value=fill_value,\n )\n return self._replace(**merge_result._asdict())"},{"col":4,"comment":"Drop coordinates or index labels from this DataArray.\n\n Parameters\n ----------\n labels : hashable or sequence of hashables\n Name(s) of coordinates or index labels to drop.\n If dim is not None, labels can be any array-like.\n dim : hashable, optional\n Dimension along which to drop index labels. By default (if\n ``dim is None``), drops coordinates rather than index labels.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the coordinates or index labels passed are not\n in the array. If 'ignore', any given labels that are in the\n array are dropped and no error is raised.\n Returns\n -------\n dropped : DataArray\n ","endLoc":1930,"header":"def drop(self, labels, dim=None, *, errors=\"raise\")","id":1690,"name":"drop","nodeType":"Function","startLoc":1909,"text":"def drop(self, labels, dim=None, *, errors=\"raise\"): # noqa: F811\n \"\"\"Drop coordinates or index labels from this DataArray.\n\n Parameters\n ----------\n labels : hashable or sequence of hashables\n Name(s) of coordinates or index labels to drop.\n If dim is not None, labels can be any array-like.\n dim : hashable, optional\n Dimension along which to drop index labels. By default (if\n ``dim is None``), drops coordinates rather than index labels.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the coordinates or index labels passed are not\n in the array. If 'ignore', any given labels that are in the\n array are dropped and no error is raised.\n Returns\n -------\n dropped : DataArray\n \"\"\"\n ds = self._to_temp_dataset().drop(labels, dim, errors=errors)\n return self._from_temp_dataset(ds)"},{"attributeType":"null","col":4,"comment":"null","endLoc":483,"id":1691,"name":"__slots__","nodeType":"Attribute","startLoc":483,"text":"__slots__"},{"col":4,"comment":"Returns a new array with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {'any', 'all'}, optional\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n If supplied, require this many non-NA values.\n\n Returns\n -------\n DataArray\n ","endLoc":1954,"header":"def dropna(\n self, dim: Hashable, how: str = \"any\", thresh: int = None\n ) -> \"DataArray\"","id":1692,"name":"dropna","nodeType":"Function","startLoc":1932,"text":"def dropna(\n self, dim: Hashable, how: str = \"any\", thresh: int = None\n ) -> \"DataArray\":\n \"\"\"Returns a new array with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {'any', 'all'}, optional\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n If supplied, require this many non-NA values.\n\n Returns\n -------\n DataArray\n \"\"\"\n ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh)\n return self._from_temp_dataset(ds)"},{"attributeType":"ExplicitlyIndexed | NumpyIndexingAdapter | PandasIndexAdapter | DaskIndexingAdapter | NdArrayLikeIndexingAdapter","col":8,"comment":"null","endLoc":486,"id":1693,"name":"array","nodeType":"Attribute","startLoc":486,"text":"self.array"},{"col":4,"comment":"Drop variables or index labels from this dataset.\n\n Parameters\n ----------\n labels : hashable or iterable of hashables\n Name(s) of variables or index labels to drop.\n If dim is not None, labels can be any array-like.\n dim : None or hashable, optional\n Dimension along which to drop index labels. By default (if\n ``dim is None``), drops variables rather than index labels.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the variable or index labels passed are not\n in the dataset. If 'ignore', any given labels that are in the\n dataset are dropped and no error is raised.\n **labels_kwargs : {dim: label, ...}, optional\n The keyword arguments form of ``dim`` and ``labels``.\n\n Returns\n -------\n dropped : Dataset\n\n Examples\n --------\n >>> data = np.random.randn(2, 3)\n >>> labels = ['a', 'b', 'c']\n >>> ds = xr.Dataset({'A': (['x', 'y'], data), 'y': labels})\n >>> ds.drop(y=['a', 'c'])\n \n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) >> ds.drop(y='b')\n \n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) >> data = np.random.randn(2, 3)\n >>> labels = ['a', 'b', 'c']\n >>> ds = xr.Dataset({'A': (['x', 'y'], data), 'y': labels})\n >>> ds.drop(y=['a', 'c'])\n \n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) >> ds.drop(y='b')\n \n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) = thresh\n elif how == \"any\":\n mask = count == size\n elif how == \"all\":\n mask = count > 0\n elif how is not None:\n raise ValueError(\"invalid how option: %s\" % how)\n else:\n raise TypeError(\"must specify how or thresh\")\n\n return self.isel({dim: mask})"},{"col":0,"comment":"\n Check if the datasets require the use of both xarray.concat and\n xarray.merge, which in future might require the user to use\n `manual_combine` instead.\n ","endLoc":889,"header":"def _requires_concat_and_merge(datasets)","id":1696,"name":"_requires_concat_and_merge","nodeType":"Function","startLoc":879,"text":"def _requires_concat_and_merge(datasets):\n \"\"\"\n Check if the datasets require the use of both xarray.concat and\n xarray.merge, which in future might require the user to use\n `manual_combine` instead.\n \"\"\"\n # Group by data vars\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n return len(list(grouped_by_vars)) > 1"},{"col":0,"comment":"null","endLoc":922,"header":"def _old_auto_combine(\n datasets,\n concat_dim=_CONCAT_DIM_DEFAULT,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":1697,"name":"_old_auto_combine","nodeType":"Function","startLoc":892,"text":"def _old_auto_combine(\n datasets,\n concat_dim=_CONCAT_DIM_DEFAULT,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n if concat_dim is not None:\n dim = None if concat_dim is _CONCAT_DIM_DEFAULT else concat_dim\n\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n concatenated = [\n _auto_concat(\n list(datasets),\n dim=dim,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n for vars, datasets in grouped\n ]\n else:\n concatenated = datasets\n merged = merge(concatenated, compat=compat, fill_value=fill_value, join=join)\n return merged"},{"col":0,"comment":"null","endLoc":965,"header":"def _auto_concat(\n datasets,\n dim=None,\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n compat=\"no_conflicts\",\n)","id":1698,"name":"_auto_concat","nodeType":"Function","startLoc":925,"text":"def _auto_concat(\n datasets,\n dim=None,\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n compat=\"no_conflicts\",\n):\n if len(datasets) == 1 and dim is None:\n # There is nothing more to combine, so kick out early.\n return datasets[0]\n else:\n if dim is None:\n ds0 = datasets[0]\n ds1 = datasets[1]\n concat_dims = set(ds0.dims)\n if ds0.dims != ds1.dims:\n dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())\n concat_dims = {i for i, _ in dim_tuples}\n if len(concat_dims) > 1:\n concat_dims = {d for d in concat_dims if not ds0[d].equals(ds1[d])}\n if len(concat_dims) > 1:\n raise ValueError(\n \"too many different dimensions to \" \"concatenate: %s\" % concat_dims\n )\n elif len(concat_dims) == 0:\n raise ValueError(\n \"cannot infer dimension to concatenate: \"\n \"supply the ``concat_dim`` argument \"\n \"explicitly\"\n )\n dim, = concat_dims\n return concat(\n datasets,\n dim=dim,\n data_vars=data_vars,\n coords=coords,\n fill_value=fill_value,\n compat=compat,\n )"},{"col":4,"comment":"null","endLoc":3627,"header":"def _drop_labels(self, labels=None, dim=None, errors=\"raise\")","id":1699,"name":"_drop_labels","nodeType":"Function","startLoc":3616,"text":"def _drop_labels(self, labels=None, dim=None, errors=\"raise\"):\n # Don't cast to set, as it would harm performance when labels\n # is a large numpy array\n if utils.is_scalar(labels):\n labels = [labels]\n labels = np.asarray(labels)\n try:\n index = self.indexes[dim]\n except KeyError:\n raise ValueError(\"dimension %r does not have coordinate labels\" % dim)\n new_index = index.drop(labels, errors=errors)\n return self.loc[{dim: new_index}]"},{"col":0,"comment":"Create, populate and return the VersioneerConfig() object.","endLoc":47,"header":"def get_config()","id":1700,"name":"get_config","nodeType":"Function","startLoc":36,"text":"def get_config():\n \"\"\"Create, populate and return the VersioneerConfig() object.\"\"\"\n # these strings are filled in when 'setup.py versioneer' creates\n # _version.py\n cfg = VersioneerConfig()\n cfg.VCS = \"git\"\n cfg.style = \"pep440\"\n cfg.tag_prefix = \"v\"\n cfg.parentdir_prefix = \"xarray-\"\n cfg.versionfile_source = \"xarray/_version.py\"\n cfg.verbose = False\n return cfg"},{"col":0,"comment":"null","endLoc":309,"header":"def _nested_combine(\n datasets,\n concat_dims,\n compat,\n data_vars,\n coords,\n ids,\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":1701,"name":"_nested_combine","nodeType":"Function","startLoc":272,"text":"def _nested_combine(\n datasets,\n concat_dims,\n compat,\n data_vars,\n coords,\n ids,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n\n if len(datasets) == 0:\n return Dataset()\n\n # Arrange datasets for concatenation\n # Use information from the shape of the user input\n if not ids:\n # Determine tile_IDs by structure of input in N-D\n # (i.e. ordering in list-of-lists)\n combined_ids = _infer_concat_order_from_positions(datasets)\n else:\n # Already sorted so just use the ids already passed\n combined_ids = dict(zip(ids, datasets))\n\n # Check that the inferred shape is combinable\n _check_shape_tile_ids(combined_ids)\n\n # Apply series of concatenate or merge operations along each dimension\n combined = _combine_nd(\n combined_ids,\n concat_dims,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n fill_value=fill_value,\n join=join,\n )\n return combined"},{"col":4,"comment":"null","endLoc":3638,"header":"def _drop_vars(self, names: set, errors: str = \"raise\") -> \"Dataset\"","id":1702,"name":"_drop_vars","nodeType":"Function","startLoc":3629,"text":"def _drop_vars(self, names: set, errors: str = \"raise\") -> \"Dataset\":\n if errors == \"raise\":\n self._assert_all_in_dataset(names)\n\n variables = {k: v for k, v in self._variables.items() if k not in names}\n coord_names = {k for k in self._coord_names if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k not in names}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )"},{"attributeType":"BasicIndexer","col":8,"comment":"null","endLoc":487,"id":1703,"name":"indexer_cls","nodeType":"Attribute","startLoc":487,"text":"self.indexer_cls"},{"className":"LazilyOuterIndexedArray","col":0,"comment":"Wrap an array to make basic and outer indexing lazy.\n ","endLoc":577,"id":1704,"nodeType":"Class","startLoc":503,"text":"class LazilyOuterIndexedArray(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap an array to make basic and outer indexing lazy.\n \"\"\"\n\n __slots__ = (\"array\", \"key\")\n\n def __init__(self, array, key=None):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : ExplicitIndexer, optional\n Array indexer. If provided, it is assumed to already be in\n canonical expanded form.\n \"\"\"\n if isinstance(array, type(self)) and key is None:\n # unwrap\n key = array.key\n array = array.array\n\n if key is None:\n key = BasicIndexer((slice(None),) * array.ndim)\n\n self.array = as_indexable(array)\n self.key = key\n\n def _updated_key(self, new_key):\n iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))\n full_key = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, integer_types):\n full_key.append(k)\n else:\n full_key.append(_index_indexer_1d(k, next(iter_new_key), size))\n full_key = tuple(full_key)\n\n if all(isinstance(k, integer_types + (slice,)) for k in full_key):\n return BasicIndexer(full_key)\n return OuterIndexer(full_key)\n\n @property\n def shape(self):\n shape = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, slice):\n shape.append(len(range(*k.indices(size))))\n elif isinstance(k, np.ndarray):\n shape.append(k.size)\n return tuple(shape)\n\n def __array__(self, dtype=None):\n array = as_indexable(self.array)\n return np.asarray(array[self.key], dtype=None)\n\n def transpose(self, order):\n return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order)\n\n def __getitem__(self, indexer):\n if isinstance(indexer, VectorizedIndexer):\n array = LazilyVectorizedIndexedArray(self.array, self.key)\n return array[indexer]\n return type(self)(self.array, self._updated_key(indexer))\n\n def __setitem__(self, key, value):\n if isinstance(key, VectorizedIndexer):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )\n full_key = self._updated_key(key)\n self.array[full_key] = value\n\n def __repr__(self):\n return \"%s(array=%r, key=%r)\" % (type(self).__name__, self.array, self.key)"},{"col":4,"comment":"null","endLoc":542,"header":"def _updated_key(self, new_key)","id":1705,"name":"_updated_key","nodeType":"Function","startLoc":530,"text":"def _updated_key(self, new_key):\n iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))\n full_key = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, integer_types):\n full_key.append(k)\n else:\n full_key.append(_index_indexer_1d(k, next(iter_new_key), size))\n full_key = tuple(full_key)\n\n if all(isinstance(k, integer_types + (slice,)) for k in full_key):\n return BasicIndexer(full_key)\n return OuterIndexer(full_key)"},{"col":0,"comment":"Get the keywords needed to look up the version information.","endLoc":29,"header":"def get_keywords()","id":1706,"name":"get_keywords","nodeType":"Function","startLoc":19,"text":"def get_keywords():\n \"\"\"Get the keywords needed to look up the version information.\"\"\"\n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"\"\n git_full = \"863e49066ca4d61c9adfe62aca3bf21b90e1af8c\"\n git_date = \"2019-10-12 17:33:33 -0400\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full, \"date\": git_date}\n return keywords"},{"col":0,"comment":"null","endLoc":141,"header":"def _check_shape_tile_ids(combined_tile_ids)","id":1707,"name":"_check_shape_tile_ids","nodeType":"Function","startLoc":118,"text":"def _check_shape_tile_ids(combined_tile_ids):\n tile_ids = combined_tile_ids.keys()\n\n # Check all tuples are the same length\n # i.e. check that all lists are nested to the same depth\n nesting_depths = [len(tile_id) for tile_id in tile_ids]\n if not nesting_depths:\n nesting_depths = [0]\n if not set(nesting_depths) == {nesting_depths[0]}:\n raise ValueError(\n \"The supplied objects do not form a hypercube because\"\n \" sub-lists do not have consistent depths\"\n )\n\n # Check all lists along one dimension are same length\n for dim in range(nesting_depths[0]):\n indices_along_dim = [tile_id[dim] for tile_id in tile_ids]\n occurrences = Counter(indices_along_dim)\n if len(set(occurrences.values())) != 1:\n raise ValueError(\n \"The supplied objects do not form a hypercube \"\n \"because sub-lists do not have consistent \"\n \"lengths along dimension\" + str(dim)\n )"},{"col":0,"comment":"Get version information from git keywords.","endLoc":229,"header":"@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose)","id":1708,"name":"git_versions_from_keywords","nodeType":"Function","startLoc":170,"text":"@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n date = keywords.get(\"date\")\n if date is not None:\n # git-2.2.0 added \"%cI\", which expands to an ISO-8601 -compliant\n # datestamp. However we prefer \"%ci\" (which expands to an \"ISO-8601\n # -like\" string, which we must then edit to make compliant), because\n # it's been around since git-1.5.3, and it's too difficult to\n # discover which version we're using, or to work around using an\n # older one.\n date = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = {r.strip() for r in refnames.strip(\"()\").split(\",\")}\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = {r for r in refs if re.search(r\"\\d\", r)}\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix) :]\n if verbose:\n print(\"picking %s\" % r)\n return {\n \"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": None,\n \"date\": date,\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": \"no suitable tags\",\n \"date\": None,\n }"},{"col":0,"comment":"Decorator to mark a method as the handler for a particular VCS.","endLoc":68,"header":"def register_vcs_handler(vcs, method)","id":1709,"name":"register_vcs_handler","nodeType":"Function","startLoc":58,"text":"def register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n\n return decorate"},{"col":0,"comment":"null","endLoc":303,"header":"def _index_indexer_1d(old_indexer, applied_indexer, size)","id":1710,"name":"_index_indexer_1d","nodeType":"Function","startLoc":291,"text":"def _index_indexer_1d(old_indexer, applied_indexer, size):\n assert isinstance(applied_indexer, integer_types + (slice, np.ndarray))\n if isinstance(applied_indexer, slice) and applied_indexer == slice(None):\n # shortcut for the usual case\n return old_indexer\n if isinstance(old_indexer, slice):\n if isinstance(applied_indexer, slice):\n indexer = slice_slice(old_indexer, applied_indexer, size)\n else:\n indexer = _expand_slice(old_indexer, size)[applied_indexer]\n else:\n indexer = old_indexer[applied_indexer]\n return indexer"},{"col":0,"comment":"Given a slice and the size of the dimension to which it will be applied,\n index it with another slice to return a new slice equivalent to applying\n the slices sequentially\n ","endLoc":288,"header":"def slice_slice(old_slice, applied_slice, size)","id":1711,"name":"slice_slice","nodeType":"Function","startLoc":269,"text":"def slice_slice(old_slice, applied_slice, size):\n \"\"\"Given a slice and the size of the dimension to which it will be applied,\n index it with another slice to return a new slice equivalent to applying\n the slices sequentially\n \"\"\"\n step = (old_slice.step or 1) * (applied_slice.step or 1)\n\n # For now, use the hack of turning old_slice into an ndarray to reconstruct\n # the slice start and stop. This is not entirely ideal, but it is still\n # definitely better than leaving the indexer as an array.\n items = _expand_slice(old_slice, size)[applied_slice]\n if len(items) > 0:\n start = items[0]\n stop = items[-1] + int(np.sign(step))\n if stop < 0:\n stop = None\n else:\n start = 0\n stop = 0\n return slice(start, stop, step)"},{"col":0,"comment":"null","endLoc":49,"header":"def _expand_slice(slice_, size)","id":1712,"name":"_expand_slice","nodeType":"Function","startLoc":48,"text":"def _expand_slice(slice_, size):\n return np.arange(*slice_.indices(size))"},{"col":4,"comment":"Drop dimensions and associated variables from this dataset.\n\n Parameters\n ----------\n drop_dims : hashable or iterable of hashable\n Dimension or dimensions to drop.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if any of the\n dimensions passed are not in the dataset. If 'ignore', any given\n labels that are in the dataset are dropped and no error is raised.\n\n Returns\n -------\n obj : Dataset\n The dataset without the given dimensions (or any variables\n containing those dimensions)\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the dimensions passed are not\n in the dataset. If 'ignore', any given dimensions that are in the\n dataset are dropped and no error is raised.\n ","endLoc":3681,"header":"def drop_dims(\n self, drop_dims: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\"","id":1713,"name":"drop_dims","nodeType":"Function","startLoc":3640,"text":"def drop_dims(\n self, drop_dims: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n \"\"\"Drop dimensions and associated variables from this dataset.\n\n Parameters\n ----------\n drop_dims : hashable or iterable of hashable\n Dimension or dimensions to drop.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if any of the\n dimensions passed are not in the dataset. If 'ignore', any given\n labels that are in the dataset are dropped and no error is raised.\n\n Returns\n -------\n obj : Dataset\n The dataset without the given dimensions (or any variables\n containing those dimensions)\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the dimensions passed are not\n in the dataset. If 'ignore', any given dimensions that are in the\n dataset are dropped and no error is raised.\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable):\n drop_dims = {drop_dims}\n else:\n drop_dims = set(drop_dims)\n\n if errors == \"raise\":\n missing_dims = drop_dims - set(self.dims)\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims}\n return self._drop_vars(drop_vars)"},{"col":0,"comment":"\n Combines an N-dimensional structure of datasets into one by applying a\n series of either concat and merge operations along each dimension.\n\n No checks are performed on the consistency of the datasets, concat_dims or\n tile_IDs, because it is assumed that this has already been done.\n\n Parameters\n ----------\n combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]\n Structure containing all datasets to be concatenated with \"tile_IDs\" as\n keys, which specify position within the desired final combined result.\n concat_dims : sequence of str\n The dimensions along which the datasets should be concatenated. Must be\n in order, and the length must match the length of the tuples used as\n keys in combined_ids. If the string is a dimension name then concat\n along that dimension, if it is None then merge.\n\n Returns\n -------\n combined_ds : xarray.Dataset\n ","endLoc":201,"header":"def _combine_nd(\n combined_ids,\n concat_dims,\n data_vars=\"all\",\n coords=\"different\",\n compat=\"no_conflicts\",\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":1714,"name":"_combine_nd","nodeType":"Function","startLoc":144,"text":"def _combine_nd(\n combined_ids,\n concat_dims,\n data_vars=\"all\",\n coords=\"different\",\n compat=\"no_conflicts\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Combines an N-dimensional structure of datasets into one by applying a\n series of either concat and merge operations along each dimension.\n\n No checks are performed on the consistency of the datasets, concat_dims or\n tile_IDs, because it is assumed that this has already been done.\n\n Parameters\n ----------\n combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]\n Structure containing all datasets to be concatenated with \"tile_IDs\" as\n keys, which specify position within the desired final combined result.\n concat_dims : sequence of str\n The dimensions along which the datasets should be concatenated. Must be\n in order, and the length must match the length of the tuples used as\n keys in combined_ids. If the string is a dimension name then concat\n along that dimension, if it is None then merge.\n\n Returns\n -------\n combined_ds : xarray.Dataset\n \"\"\"\n\n example_tile_id = next(iter(combined_ids.keys()))\n\n n_dims = len(example_tile_id)\n if len(concat_dims) != n_dims:\n raise ValueError(\n \"concat_dims has length {} but the datasets \"\n \"passed are nested in a {}-dimensional structure\".format(\n len(concat_dims), n_dims\n )\n )\n\n # Each iteration of this loop reduces the length of the tile_ids tuples\n # by one. It always combines along the first dimension, removing the first\n # element of the tuple\n for concat_dim in concat_dims:\n combined_ids = _combine_all_along_first_dim(\n combined_ids,\n dim=concat_dim,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n (combined_ds,) = combined_ids.values()\n return combined_ds"},{"col":4,"comment":"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray or DataArray\n Used to fill all matching missing values in this array. If the\n argument is a DataArray, it is first aligned with (reindexed to)\n this array.\n\n Returns\n -------\n DataArray\n ","endLoc":1981,"header":"def fillna(self, value: Any) -> \"DataArray\"","id":1715,"name":"fillna","nodeType":"Function","startLoc":1956,"text":"def fillna(self, value: Any) -> \"DataArray\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray or DataArray\n Used to fill all matching missing values in this array. If the\n argument is a DataArray, it is first aligned with (reindexed to)\n this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n if utils.is_dict_like(value):\n raise TypeError(\n \"cannot provide fill value as a dictionary with \"\n \"fillna on a DataArray\"\n )\n out = ops.fillna(self, value)\n return out"},{"col":4,"comment":"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline', 'akima'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline', and `akima`: use their\n respective``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n\n Returns\n -------\n DataArray\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n ","endLoc":2038,"header":"def interpolate_na(\n self,\n dim=None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, str] = True,\n **kwargs: Any\n ) -> \"DataArray\"","id":1716,"name":"interpolate_na","nodeType":"Function","startLoc":1983,"text":"def interpolate_na(\n self,\n dim=None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, str] = True,\n **kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline', 'akima'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline', and `akima`: use their\n respective``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n\n Returns\n -------\n DataArray\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n \"\"\"\n from .missing import interp_na\n\n return interp_na(\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n **kwargs\n )"},{"col":4,"comment":"Return a new Dataset object with all array dimensions transposed.\n\n Although the order of dimensions on each array will change, the dataset\n dimensions themselves will remain in fixed (sorted) order.\n\n Parameters\n ----------\n *dims : Hashable, optional\n By default, reverse the dimensions on each array. Otherwise,\n reorder the dimensions to this order.\n\n Returns\n -------\n transposed : Dataset\n Each array in the dataset (including) coordinates will be\n transposed to the given order.\n\n Notes\n -----\n This operation returns a view of each array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded into memory.\n\n See Also\n --------\n numpy.transpose\n DataArray.transpose\n ","endLoc":3722,"header":"def transpose(self, *dims: Hashable) -> \"Dataset\"","id":1717,"name":"transpose","nodeType":"Function","startLoc":3683,"text":"def transpose(self, *dims: Hashable) -> \"Dataset\":\n \"\"\"Return a new Dataset object with all array dimensions transposed.\n\n Although the order of dimensions on each array will change, the dataset\n dimensions themselves will remain in fixed (sorted) order.\n\n Parameters\n ----------\n *dims : Hashable, optional\n By default, reverse the dimensions on each array. Otherwise,\n reorder the dimensions to this order.\n\n Returns\n -------\n transposed : Dataset\n Each array in the dataset (including) coordinates will be\n transposed to the given order.\n\n Notes\n -----\n This operation returns a view of each array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded into memory.\n\n See Also\n --------\n numpy.transpose\n DataArray.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims):\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted dataset dimensions (%s)\" % (dims, tuple(self.dims))\n )\n ds = self.copy()\n for name, var in self._variables.items():\n var_dims = tuple(dim for dim in dims if dim in var.dims)\n ds._variables[name] = var.transpose(*var_dims)\n return ds"},{"col":0,"comment":"Interpolate values according to different methods.\n ","endLoc":256,"header":"def interp_na(\n self, dim=None, use_coordinate=True, method=\"linear\", limit=None, **kwargs\n)","id":1718,"name":"interp_na","nodeType":"Function","startLoc":222,"text":"def interp_na(\n self, dim=None, use_coordinate=True, method=\"linear\", limit=None, **kwargs\n):\n \"\"\"Interpolate values according to different methods.\n \"\"\"\n if dim is None:\n raise NotImplementedError(\"dim is a required argument\")\n\n if limit is not None:\n valids = _get_valid_fill_mask(self, dim, limit)\n\n # method\n index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate)\n interp_class, kwargs = _get_interpolator(method, **kwargs)\n interpolator = partial(func_interpolate_na, interp_class, **kwargs)\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"overflow\", RuntimeWarning)\n warnings.filterwarnings(\"ignore\", \"invalid value\", RuntimeWarning)\n arr = apply_ufunc(\n interpolator,\n index,\n self,\n input_core_dims=[[dim], [dim]],\n output_core_dims=[[dim]],\n output_dtypes=[self.dtype],\n dask=\"parallelized\",\n vectorize=True,\n keep_attrs=True,\n ).transpose(*self.dims)\n\n if limit is not None:\n arr = arr.where(valids)\n\n return arr"},{"col":0,"comment":"helper function to determine values that can be filled when limit is not\n None","endLoc":430,"header":"def _get_valid_fill_mask(arr, dim, limit)","id":1719,"name":"_get_valid_fill_mask","nodeType":"Function","startLoc":419,"text":"def _get_valid_fill_mask(arr, dim, limit):\n \"\"\"helper function to determine values that can be filled when limit is not\n None\"\"\"\n kw = {dim: limit + 1}\n # we explicitly use construct method to avoid copy.\n new_dim = utils.get_temp_dimname(arr.dims, \"_window\")\n return (\n arr.isnull()\n .rolling(min_periods=1, **kw)\n .construct(new_dim, fill_value=False)\n .sum(new_dim, skipna=False)\n ) <= limit"},{"col":0,"comment":"get index to use for x values in interpolation.\n\n If use_coordinate is True, the coordinate that shares the name of the\n dimension along which interpolation is being performed will be used as the\n x values.\n\n If use_coordinate is False, the x values are set as an equally spaced\n sequence.\n ","endLoc":219,"header":"def get_clean_interp_index(arr, dim, use_coordinate=True)","id":1720,"name":"get_clean_interp_index","nodeType":"Function","startLoc":181,"text":"def get_clean_interp_index(arr, dim, use_coordinate=True):\n \"\"\"get index to use for x values in interpolation.\n\n If use_coordinate is True, the coordinate that shares the name of the\n dimension along which interpolation is being performed will be used as the\n x values.\n\n If use_coordinate is False, the x values are set as an equally spaced\n sequence.\n \"\"\"\n if use_coordinate:\n if use_coordinate is True:\n index = arr.get_index(dim)\n else:\n index = arr.coords[use_coordinate]\n if index.ndim != 1:\n raise ValueError(\n \"Coordinates used for interpolation must be 1D, \"\n \"%s is %dD.\" % (use_coordinate, index.ndim)\n )\n\n # raise if index cannot be cast to a float (e.g. MultiIndex)\n try:\n index = index.values.astype(np.float64)\n except (TypeError, ValueError):\n # pandas raises a TypeError\n # xarray/nuppy raise a ValueError\n raise TypeError(\n \"Index must be castable to float64 to support\"\n \"interpolation, got: %s\" % type(index)\n )\n # check index sorting now so we can skip it later\n if not (np.diff(index) > 0).all():\n raise ValueError(\"Index must be monotonicly increasing\")\n else:\n axis = arr.get_axis_num(dim)\n index = np.arange(arr.shape[axis], dtype=np.float64)\n\n return index"},{"col":4,"comment":"null","endLoc":552,"header":"@property\n def shape(self)","id":1721,"name":"shape","nodeType":"Function","startLoc":544,"text":"@property\n def shape(self):\n shape = []\n for size, k in zip(self.array.shape, self.key.tuple):\n if isinstance(k, slice):\n shape.append(len(range(*k.indices(size))))\n elif isinstance(k, np.ndarray):\n shape.append(k.size)\n return tuple(shape)"},{"col":4,"comment":"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray, DataArray, dict or Dataset\n Used to fill all matching missing values in this dataset's data\n variables. Scalars, ndarrays or DataArrays arguments are used to\n fill all data with aligned coordinates (for DataArrays).\n Dictionaries or datasets match data variables and then align\n coordinates if necessary.\n\n Returns\n -------\n Dataset\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, 4])\n ... },\n ... coords={\"x\": [0, 1, 2, 3]})\n >>> ds\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 nan 2.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0\n C (x) float64 nan nan nan 5.0\n D (x) float64 nan 3.0 nan 4.0\n\n Replace all `NaN` values with 0s.\n\n >>> ds.fillna(0)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 0.0 1.0\n C (x) float64 0.0 0.0 0.0 5.0\n D (x) float64 0.0 3.0 0.0 4.0\n\n Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> ds.fillna(value=values)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 1.0 1.0\n C (x) float64 2.0 2.0 2.0 5.0\n D (x) float64 3.0 3.0 3.0 4.0\n ","endLoc":3865,"header":"def fillna(self, value: Any) -> \"Dataset\"","id":1722,"name":"fillna","nodeType":"Function","startLoc":3785,"text":"def fillna(self, value: Any) -> \"Dataset\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray, DataArray, dict or Dataset\n Used to fill all matching missing values in this dataset's data\n variables. Scalars, ndarrays or DataArrays arguments are used to\n fill all data with aligned coordinates (for DataArrays).\n Dictionaries or datasets match data variables and then align\n coordinates if necessary.\n\n Returns\n -------\n Dataset\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, 4])\n ... },\n ... coords={\"x\": [0, 1, 2, 3]})\n >>> ds\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 nan 2.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0\n C (x) float64 nan nan nan 5.0\n D (x) float64 nan 3.0 nan 4.0\n\n Replace all `NaN` values with 0s.\n\n >>> ds.fillna(0)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 0.0 1.0\n C (x) float64 0.0 0.0 0.0 5.0\n D (x) float64 0.0 3.0 0.0 4.0\n\n Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> ds.fillna(value=values)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 1.0 1.0\n C (x) float64 2.0 2.0 2.0 5.0\n D (x) float64 3.0 3.0 3.0 4.0\n \"\"\"\n if utils.is_dict_like(value):\n value_keys = getattr(value, \"data_vars\", value).keys()\n if not set(value_keys) <= set(self.data_vars.keys()):\n raise ValueError(\n \"all variables in the argument to `fillna` \"\n \"must be contained in the original dataset\"\n )\n out = ops.fillna(self, value)\n return out"},{"col":0,"comment":"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n ","endLoc":330,"header":"@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command)","id":1723,"name":"git_pieces_from_vcs","nodeType":"Function","startLoc":232,"text":"@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n\n out, rc = run_command(GITS, [\"rev-parse\", \"--git-dir\"], cwd=root, hide_stderr=True)\n if rc != 0:\n if verbose:\n print(\"Directory %s not under git control\" % root)\n raise NotThisMethod(\"'git rev-parse --git-dir' returned error\")\n\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out, rc = run_command(\n GITS,\n [\n \"describe\",\n \"--tags\",\n \"--dirty\",\n \"--always\",\n \"--long\",\n \"--match\",\n \"%s*\" % tag_prefix,\n ],\n cwd=root,\n )\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[: git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r\"^(.+)-(\\d+)-g([0-9a-f]+)$\", git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = \"unable to parse git-describe output: '%s'\" % describe_out\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = \"tag '%s' doesn't start with prefix '%s'\" % (\n full_tag,\n tag_prefix,\n )\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix) :]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out, rc = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"], cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n # commit date: see ISO-8601 comment in git_versions_from_keywords()\n date = run_command(GITS, [\"show\", \"-s\", \"--format=%ci\", \"HEAD\"], cwd=root)[\n 0\n ].strip()\n pieces[\"date\"] = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n\n return pieces"},{"col":4,"comment":"null","endLoc":556,"header":"def __array__(self, dtype=None)","id":1724,"name":"__array__","nodeType":"Function","startLoc":554,"text":"def __array__(self, dtype=None):\n array = as_indexable(self.array)\n return np.asarray(array[self.key], dtype=None)"},{"col":0,"comment":"null","endLoc":222,"header":"def _combine_all_along_first_dim(\n combined_ids, dim, data_vars, coords, compat, fill_value=dtypes.NA, join=\"outer\"\n)","id":1725,"name":"_combine_all_along_first_dim","nodeType":"Function","startLoc":204,"text":"def _combine_all_along_first_dim(\n combined_ids, dim, data_vars, coords, compat, fill_value=dtypes.NA, join=\"outer\"\n):\n\n # Group into lines of datasets which must be combined along dim\n # need to sort by _new_tile_id first for groupby to work\n # TODO: is the sorted need?\n combined_ids = dict(sorted(combined_ids.items(), key=_new_tile_id))\n grouped = itertools.groupby(combined_ids.items(), key=_new_tile_id)\n\n # Combine all of these datasets along dim\n new_combined_ids = {}\n for new_id, group in grouped:\n combined_ids = dict(sorted(group))\n datasets = combined_ids.values()\n new_combined_ids[new_id] = _combine_1d(\n datasets, dim, compat, data_vars, coords, fill_value, join\n )\n return new_combined_ids"},{"attributeType":"null","col":0,"comment":"null","endLoc":18,"id":1726,"name":"_default_cache_dir","nodeType":"Attribute","startLoc":18,"text":"_default_cache_dir"},{"col":4,"comment":"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline': use their respective\n ``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n kwargs : any\n parameters passed verbatim to the underlying interplation function\n\n Returns\n -------\n Dataset\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n ","endLoc":3926,"header":"def interpolate_na(\n self,\n dim: Hashable = None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, Hashable] = True,\n **kwargs: Any,\n ) -> \"Dataset\"","id":1727,"name":"interpolate_na","nodeType":"Function","startLoc":3867,"text":"def interpolate_na(\n self,\n dim: Hashable = None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, Hashable] = True,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline': use their respective\n ``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n kwargs : any\n parameters passed verbatim to the underlying interplation function\n\n Returns\n -------\n Dataset\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n \"\"\"\n from .missing import interp_na, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(\n interp_na,\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n **kwargs,\n )\n return new"},{"col":0,"comment":"\n Applies either concat or merge to 1D list of datasets depending on value\n of concat_dim\n ","endLoc":264,"header":"def _combine_1d(\n datasets,\n concat_dim,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":1728,"name":"_combine_1d","nodeType":"Function","startLoc":225,"text":"def _combine_1d(\n datasets,\n concat_dim,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Applies either concat or merge to 1D list of datasets depending on value\n of concat_dim\n \"\"\"\n\n if concat_dim is not None:\n try:\n combined = concat(\n datasets,\n dim=concat_dim,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n except ValueError as err:\n if \"encountered unexpected variable\" in str(err):\n raise ValueError(\n \"These objects cannot be combined using only \"\n \"xarray.combine_nested, instead either use \"\n \"xarray.combine_by_coords, or do it manually \"\n \"with xarray.concat, xarray.merge and \"\n \"xarray.align\"\n )\n else:\n raise\n else:\n combined = merge(datasets, compat=compat, fill_value=fill_value, join=join)\n\n return combined"},{"col":0,"comment":"Wrapper for datasets\n ","endLoc":178,"header":"def _apply_over_vars_with_dim(func, self, dim=None, **kwargs)","id":1729,"name":"_apply_over_vars_with_dim","nodeType":"Function","startLoc":167,"text":"def _apply_over_vars_with_dim(func, self, dim=None, **kwargs):\n \"\"\"Wrapper for datasets\n \"\"\"\n ds = type(self)(coords=self.coords, attrs=self.attrs)\n\n for name, var in self.data_vars.items():\n if dim in var.dims:\n ds[name] = func(var, dim=dim, **kwargs)\n else:\n ds[name] = var\n\n return ds"},{"col":0,"comment":"","endLoc":7,"header":"tutorial.py#","id":1730,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"\nUseful for:\n\n* users learning xarray\n* building tutorials in the documentation.\n\n\"\"\"\n\n_default_cache_dir = _os.sep.join((\"~\", \".xarray_tutorial_data\"))"},{"col":0,"comment":"\n Attempt to auto-magically combine the given datasets into one by using\n dimension coordinates.\n\n This method attempts to combine a group of datasets along any number of\n dimensions into a single entity by inspecting coords and metadata and using\n a combination of concat and merge.\n\n Will attempt to order the datasets such that the values in their dimension\n coordinates are monotonic along all dimensions. If it cannot determine the\n order in which to concatenate the datasets, it will raise a ValueError.\n Non-coordinate dimensions will be ignored, as will any coordinate\n dimensions which do not vary between each dataset.\n\n Aligns coordinates, but different variables on datasets can cause it\n to fail under some scenarios. In complex cases, you may need to clean up\n your data and use concat/merge explicitly (also see `manual_combine`).\n\n Works well if, for example, you have N years of data and M data variables,\n and each combination of a distinct time period and set of data variables is\n saved as its own dataset. Also useful for if you have a simulation which is\n parallelized in multiple dimensions, but has global coordinates saved in\n each file specifying the positions of points within the global domain.\n\n Parameters\n ----------\n datasets : sequence of xarray.Dataset\n Dataset objects to combine.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n If objects are DataArrays, `data_vars` must be 'all'.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n As per the 'data_vars' kwarg, but for coordinate variables.\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n See also\n --------\n concat\n merge\n combine_nested\n\n Examples\n --------\n\n Combining two datasets using their common dimension coordinates. Notice\n they are concatenated based on the values in their dimension coordinates,\n not on their position in the list passed to `combine_by_coords`.\n\n >>> import numpy as np\n >>> import xarray as xr\n\n >>> x1 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [0, 1], \"x\": [10, 20, 30]},\n ... )\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [2, 3], \"x\": [10, 20, 30]},\n ... )\n >>> x3 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [2, 3], \"x\": [40, 50, 60]},\n ... )\n\n >>> x1\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 0 1\n * x (x) int64 10 20 30\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 13.93 9.436\n precipitation (y, x) float64 0.2136 0.9974 0.7603 0.4679 0.3115 0.945\n\n >>> x2\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 2 3\n * x (x) int64 10 20 30\n Data variables:\n temperature (y, x) float64 9.341 0.1251 6.269 7.709 8.82 2.316\n precipitation (y, x) float64 0.1728 0.1178 0.03018 0.6509 0.06938 0.3792\n\n >>> x3\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 2 3\n * x (x) int64 40 50 60\n Data variables:\n temperature (y, x) float64 2.789 2.446 6.551 12.46 2.22 15.96\n precipitation (y, x) float64 0.4804 0.1902 0.2457 0.6125 0.4654 0.5953\n\n >>> xr.combine_by_coords([x2, x1])\n \n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 10 20 30\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 7.709 8.82 2.316\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6509 0.06938 0.3792\n\n >>> xr.combine_by_coords([x3, x1])\n \n Dimensions: (x: 6, y: 4)\n Coordinates:\n * x (x) int64 10 20 30 40 50 60\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 nan ... nan 12.46 2.22 15.96\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953\n\n >>> xr.combine_by_coords([x3, x1], join='override')\n \n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 10 20 30\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 12.46 2.22 15.96\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953\n\n ","endLoc":697,"header":"def combine_by_coords(\n datasets,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":1731,"name":"combine_by_coords","nodeType":"Function","startLoc":472,"text":"def combine_by_coords(\n datasets,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Attempt to auto-magically combine the given datasets into one by using\n dimension coordinates.\n\n This method attempts to combine a group of datasets along any number of\n dimensions into a single entity by inspecting coords and metadata and using\n a combination of concat and merge.\n\n Will attempt to order the datasets such that the values in their dimension\n coordinates are monotonic along all dimensions. If it cannot determine the\n order in which to concatenate the datasets, it will raise a ValueError.\n Non-coordinate dimensions will be ignored, as will any coordinate\n dimensions which do not vary between each dataset.\n\n Aligns coordinates, but different variables on datasets can cause it\n to fail under some scenarios. In complex cases, you may need to clean up\n your data and use concat/merge explicitly (also see `manual_combine`).\n\n Works well if, for example, you have N years of data and M data variables,\n and each combination of a distinct time period and set of data variables is\n saved as its own dataset. Also useful for if you have a simulation which is\n parallelized in multiple dimensions, but has global coordinates saved in\n each file specifying the positions of points within the global domain.\n\n Parameters\n ----------\n datasets : sequence of xarray.Dataset\n Dataset objects to combine.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n If objects are DataArrays, `data_vars` must be 'all'.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n As per the 'data_vars' kwarg, but for coordinate variables.\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n See also\n --------\n concat\n merge\n combine_nested\n\n Examples\n --------\n\n Combining two datasets using their common dimension coordinates. Notice\n they are concatenated based on the values in their dimension coordinates,\n not on their position in the list passed to `combine_by_coords`.\n\n >>> import numpy as np\n >>> import xarray as xr\n\n >>> x1 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [0, 1], \"x\": [10, 20, 30]},\n ... )\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [2, 3], \"x\": [10, 20, 30]},\n ... )\n >>> x3 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [2, 3], \"x\": [40, 50, 60]},\n ... )\n\n >>> x1\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 0 1\n * x (x) int64 10 20 30\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 13.93 9.436\n precipitation (y, x) float64 0.2136 0.9974 0.7603 0.4679 0.3115 0.945\n\n >>> x2\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 2 3\n * x (x) int64 10 20 30\n Data variables:\n temperature (y, x) float64 9.341 0.1251 6.269 7.709 8.82 2.316\n precipitation (y, x) float64 0.1728 0.1178 0.03018 0.6509 0.06938 0.3792\n\n >>> x3\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 2 3\n * x (x) int64 40 50 60\n Data variables:\n temperature (y, x) float64 2.789 2.446 6.551 12.46 2.22 15.96\n precipitation (y, x) float64 0.4804 0.1902 0.2457 0.6125 0.4654 0.5953\n\n >>> xr.combine_by_coords([x2, x1])\n \n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 10 20 30\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 7.709 8.82 2.316\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6509 0.06938 0.3792\n\n >>> xr.combine_by_coords([x3, x1])\n \n Dimensions: (x: 6, y: 4)\n Coordinates:\n * x (x) int64 10 20 30 40 50 60\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 nan ... nan 12.46 2.22 15.96\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953\n\n >>> xr.combine_by_coords([x3, x1], join='override')\n \n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 10 20 30\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 12.46 2.22 15.96\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953\n\n \"\"\"\n\n # Group by data vars\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n # Perform the multidimensional combine on each group of data variables\n # before merging back together\n concatenated_grouped_by_data_vars = []\n for vars, datasets_with_same_vars in grouped_by_vars:\n combined_ids, concat_dims = _infer_concat_order_from_coords(\n list(datasets_with_same_vars)\n )\n\n _check_shape_tile_ids(combined_ids)\n\n # Concatenate along all of concat_dims one by one to create single ds\n concatenated = _combine_nd(\n combined_ids,\n concat_dims=concat_dims,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n\n # Check the overall coordinates are monotonically increasing\n for dim in concat_dims:\n indexes = concatenated.indexes.get(dim)\n if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing):\n raise ValueError(\n \"Resulting object does not have monotonic\"\n \" global indexes along dimension {}\".format(dim)\n )\n concatenated_grouped_by_data_vars.append(concatenated)\n\n return merge(\n concatenated_grouped_by_data_vars,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )"},{"fileName":"pycompat.py","filePath":"xarray/core","id":1732,"nodeType":"File","text":"import numpy as np\n\ninteger_types = (int, np.integer)\n\ntry:\n # solely for isinstance checks\n import dask.array\n\n dask_array_type = (dask.array.Array,)\nexcept ImportError: # pragma: no cover\n dask_array_type = ()\n\ntry:\n # solely for isinstance checks\n import sparse\n\n sparse_array_type = (sparse.SparseArray,)\nexcept ImportError: # pragma: no cover\n sparse_array_type = ()\n"},{"col":0,"comment":"","endLoc":1,"header":"pycompat.py#","id":1733,"name":"","nodeType":"Function","startLoc":1,"text":"integer_types = (int, np.integer)\n\ntry:\n # solely for isinstance checks\n import dask.array\n\n dask_array_type = (dask.array.Array,)\nexcept ImportError: # pragma: no cover\n dask_array_type = ()\n\ntry:\n # solely for isinstance checks\n import sparse\n\n sparse_array_type = (sparse.SparseArray,)\nexcept ImportError: # pragma: no cover\n sparse_array_type = ()"},{"col":0,"comment":"Render the given version pieces into the requested style.","endLoc":504,"header":"def render(pieces, style)","id":1734,"name":"render","nodeType":"Function","startLoc":469,"text":"def render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\n \"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n \"date\": None,\n }\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\n \"version\": rendered,\n \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"],\n \"error\": None,\n \"date\": pieces.get(\"date\"),\n }"},{"col":4,"comment":"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n ","endLoc":3951,"header":"def ffill(self, dim: Hashable, limit: int = None) -> \"Dataset\"","id":1735,"name":"ffill","nodeType":"Function","startLoc":3928,"text":"def ffill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import ffill, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)\n return new"},{"id":1736,"name":"py36-min-nep18.yml","nodeType":"TextFile","path":"ci/requirements","text":"name: xarray-tests\nchannels:\n - conda-forge\ndependencies:\n # Optional dependencies that require NEP18, such as sparse,\n # require drastically newer packages than everything else\n - python=3.6\n - coveralls\n - dask=2.4\n - distributed=2.4\n - numpy=1.17\n - pandas=0.24\n - pytest\n - pytest-cov\n - pytest-env\n - scipy=1.2\n - sparse=0.8\n"},{"col":4,"comment":"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n ","endLoc":3976,"header":"def bfill(self, dim: Hashable, limit: int = None) -> \"Dataset\"","id":1737,"name":"bfill","nodeType":"Function","startLoc":3953,"text":"def bfill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import bfill, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)\n return new"},{"col":0,"comment":"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n ","endLoc":361,"header":"def render_pep440(pieces)","id":1738,"name":"render_pep440","nodeType":"Function","startLoc":340,"text":"def render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered"},{"col":4,"comment":"null","endLoc":559,"header":"def transpose(self, order)","id":1739,"name":"transpose","nodeType":"Function","startLoc":558,"text":"def transpose(self, order):\n return LazilyVectorizedIndexedArray(self.array, self.key).transpose(order)"},{"id":1740,"name":"add-conda-to-path.yml","nodeType":"TextFile","path":"ci/azure","text":"# https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/anaconda\nsteps:\n\n- bash: |\n echo \"##vso[task.prependpath]$CONDA/bin\"\n displayName: Add conda to PATH (Linux)\n condition: eq(variables['Agent.OS'], 'Linux')\n\n- bash: |\n echo \"##vso[task.prependpath]$CONDA/bin\"\n sudo chown -R $USER $CONDA\n displayName: Add conda to PATH (OS X)\n condition: eq(variables['Agent.OS'], 'Darwin')\n\n- powershell: |\n Write-Host \"##vso[task.prependpath]$env:CONDA\\Scripts\"\n displayName: Add conda to PATH (Windows)\n condition: eq(variables['Agent.OS'], 'Windows_NT')\n"},{"col":0,"comment":"Return a + if we don't already have one, else return a .","endLoc":337,"header":"def plus_or_dot(pieces)","id":1741,"name":"plus_or_dot","nodeType":"Function","startLoc":333,"text":"def plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\""},{"col":4,"comment":"Combine two Datasets, default to data_vars of self.\n\n The new coordinates follow the normal broadcasting and alignment rules\n of ``join='outer'``. Vacant cells in the expanded coordinates are\n filled with np.nan.\n\n Parameters\n ----------\n other : Dataset\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n ","endLoc":3995,"header":"def combine_first(self, other: \"Dataset\") -> \"Dataset\"","id":1742,"name":"combine_first","nodeType":"Function","startLoc":3978,"text":"def combine_first(self, other: \"Dataset\") -> \"Dataset\":\n \"\"\"Combine two Datasets, default to data_vars of self.\n\n The new coordinates follow the normal broadcasting and alignment rules\n of ``join='outer'``. Vacant cells in the expanded coordinates are\n filled with np.nan.\n\n Parameters\n ----------\n other : Dataset\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n out = ops.fillna(self, other, join=\"outer\", dataset_join=\"outer\")\n return out"},{"id":1743,"name":".pre-commit-config.yaml","nodeType":"TextFile","path":"","text":"# https://pre-commit.com/\n# https://github.com/python/black#version-control-integration\nrepos:\n - repo: https://github.com/python/black\n rev: stable\n hooks:\n - id: black\n language_version: python3.7\n - repo: https://github.com/pre-commit/pre-commit-hooks\n rev: v2.2.3\n hooks:\n - id: flake8\n - repo: https://github.com/pre-commit/mirrors-mypy\n rev: v0.730 # Must match ci/requirements/*.yml\n hooks:\n - id: mypy\n # run these occasionally, ref discussion https://github.com/pydata/xarray/pull/3194\n # - repo: https://github.com/asottile/pyupgrade\n # rev: v1.22.1\n # hooks:\n # - id: pyupgrade\n # args:\n # - \"--py3-only\"\n # # remove on f-strings in Py3.7\n # - \"--keep-percent-format\"\n # - repo: https://github.com/timothycrosley/isort\n # rev: 4.3.21-2\n # hooks:\n # - id: isort\n"},{"fileName":"options.py","filePath":"xarray/core","id":1744,"nodeType":"File","text":"import warnings\n\nDISPLAY_WIDTH = \"display_width\"\nARITHMETIC_JOIN = \"arithmetic_join\"\nENABLE_CFTIMEINDEX = \"enable_cftimeindex\"\nFILE_CACHE_MAXSIZE = \"file_cache_maxsize\"\nWARN_FOR_UNCLOSED_FILES = \"warn_for_unclosed_files\"\nCMAP_SEQUENTIAL = \"cmap_sequential\"\nCMAP_DIVERGENT = \"cmap_divergent\"\nKEEP_ATTRS = \"keep_attrs\"\n\n\nOPTIONS = {\n DISPLAY_WIDTH: 80,\n ARITHMETIC_JOIN: \"inner\",\n ENABLE_CFTIMEINDEX: True,\n FILE_CACHE_MAXSIZE: 128,\n WARN_FOR_UNCLOSED_FILES: False,\n CMAP_SEQUENTIAL: \"viridis\",\n CMAP_DIVERGENT: \"RdBu_r\",\n KEEP_ATTRS: \"default\",\n}\n\n_JOIN_OPTIONS = frozenset([\"inner\", \"outer\", \"left\", \"right\", \"exact\"])\n\n\ndef _positive_integer(value):\n return isinstance(value, int) and value > 0\n\n\n_VALIDATORS = {\n DISPLAY_WIDTH: _positive_integer,\n ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,\n ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),\n FILE_CACHE_MAXSIZE: _positive_integer,\n WARN_FOR_UNCLOSED_FILES: lambda value: isinstance(value, bool),\n KEEP_ATTRS: lambda choice: choice in [True, False, \"default\"],\n}\n\n\ndef _set_file_cache_maxsize(value):\n from ..backends.file_manager import FILE_CACHE\n\n FILE_CACHE.maxsize = value\n\n\ndef _warn_on_setting_enable_cftimeindex(enable_cftimeindex):\n warnings.warn(\n \"The enable_cftimeindex option is now a no-op \"\n \"and will be removed in a future version of xarray.\",\n FutureWarning,\n )\n\n\n_SETTERS = {\n FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,\n ENABLE_CFTIMEINDEX: _warn_on_setting_enable_cftimeindex,\n}\n\n\ndef _get_keep_attrs(default):\n global_choice = OPTIONS[\"keep_attrs\"]\n\n if global_choice == \"default\":\n return default\n elif global_choice in [True, False]:\n return global_choice\n else:\n raise ValueError(\n \"The global option keep_attrs must be one of\" \" True, False or 'default'.\"\n )\n\n\nclass set_options:\n \"\"\"Set options for xarray in a controlled context.\n\n Currently supported options:\n\n - ``display_width``: maximum display width for ``repr`` on xarray objects.\n Default: ``80``.\n - ``arithmetic_join``: DataArray/Dataset alignment in binary operations.\n Default: ``'inner'``.\n - ``file_cache_maxsize``: maximum number of open files to hold in xarray's\n global least-recently-usage cached. This should be smaller than your\n system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.\n Default: 128.\n - ``warn_for_unclosed_files``: whether or not to issue a warning when\n unclosed files are deallocated (default False). This is mostly useful\n for debugging.\n - ``cmap_sequential``: colormap to use for nondivergent data plots.\n Default: ``viridis``. If string, must be matplotlib built-in colormap.\n Can also be a Colormap object (e.g. mpl.cm.magma)\n - ``cmap_divergent``: colormap to use for divergent data plots.\n Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.\n Can also be a Colormap object (e.g. mpl.cm.magma)\n - ``keep_attrs``: rule for whether to keep attributes on xarray\n Datasets/dataarrays after operations. Either ``True`` to always keep\n attrs, ``False`` to always discard them, or ``'default'`` to use original\n logic that attrs should only be kept in unambiguous circumstances.\n Default: ``'default'``.\n\n You can use ``set_options`` either as a context manager:\n\n >>> ds = xr.Dataset({'x': np.arange(1000)})\n >>> with xr.set_options(display_width=40):\n ... print(ds)\n \n Dimensions: (x: 1000)\n Coordinates:\n * x (x) int64 0 1 2 3 4 5 6 ...\n Data variables:\n *empty*\n\n Or to set global options:\n\n >>> xr.set_options(display_width=80)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = {}\n for k, v in kwargs.items():\n if k not in OPTIONS:\n raise ValueError(\n \"argument name %r is not in the set of valid options %r\"\n % (k, set(OPTIONS))\n )\n if k in _VALIDATORS and not _VALIDATORS[k](v):\n raise ValueError(\"option %r given an invalid value: %r\" % (k, v))\n self.old[k] = OPTIONS[k]\n self._apply_update(kwargs)\n\n def _apply_update(self, options_dict):\n for k, v in options_dict.items():\n if k in _SETTERS:\n _SETTERS[k](v)\n OPTIONS.update(options_dict)\n\n def __enter__(self):\n return\n\n def __exit__(self, type, value, traceback):\n self._apply_update(self.old)\n"},{"col":0,"comment":"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n ","endLoc":377,"header":"def render_pep440_pre(pieces)","id":1745,"name":"render_pep440_pre","nodeType":"Function","startLoc":364,"text":"def render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered"},{"className":"set_options","col":0,"comment":"Set options for xarray in a controlled context.\n\n Currently supported options:\n\n - ``display_width``: maximum display width for ``repr`` on xarray objects.\n Default: ``80``.\n - ``arithmetic_join``: DataArray/Dataset alignment in binary operations.\n Default: ``'inner'``.\n - ``file_cache_maxsize``: maximum number of open files to hold in xarray's\n global least-recently-usage cached. This should be smaller than your\n system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.\n Default: 128.\n - ``warn_for_unclosed_files``: whether or not to issue a warning when\n unclosed files are deallocated (default False). This is mostly useful\n for debugging.\n - ``cmap_sequential``: colormap to use for nondivergent data plots.\n Default: ``viridis``. If string, must be matplotlib built-in colormap.\n Can also be a Colormap object (e.g. mpl.cm.magma)\n - ``cmap_divergent``: colormap to use for divergent data plots.\n Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.\n Can also be a Colormap object (e.g. mpl.cm.magma)\n - ``keep_attrs``: rule for whether to keep attributes on xarray\n Datasets/dataarrays after operations. Either ``True`` to always keep\n attrs, ``False`` to always discard them, or ``'default'`` to use original\n logic that attrs should only be kept in unambiguous circumstances.\n Default: ``'default'``.\n\n You can use ``set_options`` either as a context manager:\n\n >>> ds = xr.Dataset({'x': np.arange(1000)})\n >>> with xr.set_options(display_width=40):\n ... print(ds)\n \n Dimensions: (x: 1000)\n Coordinates:\n * x (x) int64 0 1 2 3 4 5 6 ...\n Data variables:\n *empty*\n\n Or to set global options:\n\n >>> xr.set_options(display_width=80)\n ","endLoc":142,"id":1746,"nodeType":"Class","startLoc":74,"text":"class set_options:\n \"\"\"Set options for xarray in a controlled context.\n\n Currently supported options:\n\n - ``display_width``: maximum display width for ``repr`` on xarray objects.\n Default: ``80``.\n - ``arithmetic_join``: DataArray/Dataset alignment in binary operations.\n Default: ``'inner'``.\n - ``file_cache_maxsize``: maximum number of open files to hold in xarray's\n global least-recently-usage cached. This should be smaller than your\n system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.\n Default: 128.\n - ``warn_for_unclosed_files``: whether or not to issue a warning when\n unclosed files are deallocated (default False). This is mostly useful\n for debugging.\n - ``cmap_sequential``: colormap to use for nondivergent data plots.\n Default: ``viridis``. If string, must be matplotlib built-in colormap.\n Can also be a Colormap object (e.g. mpl.cm.magma)\n - ``cmap_divergent``: colormap to use for divergent data plots.\n Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.\n Can also be a Colormap object (e.g. mpl.cm.magma)\n - ``keep_attrs``: rule for whether to keep attributes on xarray\n Datasets/dataarrays after operations. Either ``True`` to always keep\n attrs, ``False`` to always discard them, or ``'default'`` to use original\n logic that attrs should only be kept in unambiguous circumstances.\n Default: ``'default'``.\n\n You can use ``set_options`` either as a context manager:\n\n >>> ds = xr.Dataset({'x': np.arange(1000)})\n >>> with xr.set_options(display_width=40):\n ... print(ds)\n \n Dimensions: (x: 1000)\n Coordinates:\n * x (x) int64 0 1 2 3 4 5 6 ...\n Data variables:\n *empty*\n\n Or to set global options:\n\n >>> xr.set_options(display_width=80)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.old = {}\n for k, v in kwargs.items():\n if k not in OPTIONS:\n raise ValueError(\n \"argument name %r is not in the set of valid options %r\"\n % (k, set(OPTIONS))\n )\n if k in _VALIDATORS and not _VALIDATORS[k](v):\n raise ValueError(\"option %r given an invalid value: %r\" % (k, v))\n self.old[k] = OPTIONS[k]\n self._apply_update(kwargs)\n\n def _apply_update(self, options_dict):\n for k, v in options_dict.items():\n if k in _SETTERS:\n _SETTERS[k](v)\n OPTIONS.update(options_dict)\n\n def __enter__(self):\n return\n\n def __exit__(self, type, value, traceback):\n self._apply_update(self.old)"},{"col":4,"comment":"Reduce this dataset by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`. By default `func` is\n applied over all dimensions.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n **kwargs : Any\n Additional keyword arguments passed on to ``func``.\n\n Returns\n -------\n reduced : Dataset\n Dataset with this object's DataArrays replaced with new DataArrays\n of summarized data and the indicated dimension(s) removed.\n ","endLoc":4088,"header":"def reduce(\n self,\n func: Callable,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n numeric_only: bool = False,\n allow_lazy: bool = False,\n **kwargs: Any,\n ) -> \"Dataset\"","id":1747,"name":"reduce","nodeType":"Function","startLoc":3997,"text":"def reduce(\n self,\n func: Callable,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n numeric_only: bool = False,\n allow_lazy: bool = False,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Reduce this dataset by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`. By default `func` is\n applied over all dimensions.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n **kwargs : Any\n Additional keyword arguments passed on to ``func``.\n\n Returns\n -------\n reduced : Dataset\n Dataset with this object's DataArrays replaced with new DataArrays\n of summarized data and the indicated dimension(s) removed.\n \"\"\"\n if dim is None or dim is ALL_DIMS:\n dims = set(self.dims)\n elif isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = {dim}\n else:\n dims = set(dim)\n\n missing_dimensions = [d for d in dims if d not in self.dims]\n if missing_dimensions:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dimensions\n )\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in self._variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if name in self.coords:\n if not reduce_dims:\n variables[name] = var\n else:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or (var.dtype == np.bool_)\n ):\n if len(reduce_dims) == 1:\n # unpack dimensions for the benefit of functions\n # like np.argmin which can't handle tuple arguments\n reduce_dims, = reduce_dims\n elif len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None # type: ignore\n variables[name] = var.reduce(\n func,\n dim=reduce_dims,\n keep_attrs=keep_attrs,\n keepdims=keepdims,\n allow_lazy=allow_lazy,\n **kwargs,\n )\n\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n attrs = self.attrs if keep_attrs else None\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )"},{"col":4,"comment":"null","endLoc":130,"header":"def __init__(self, **kwargs)","id":1748,"name":"__init__","nodeType":"Function","startLoc":119,"text":"def __init__(self, **kwargs):\n self.old = {}\n for k, v in kwargs.items():\n if k not in OPTIONS:\n raise ValueError(\n \"argument name %r is not in the set of valid options %r\"\n % (k, set(OPTIONS))\n )\n if k in _VALIDATORS and not _VALIDATORS[k](v):\n raise ValueError(\"option %r given an invalid value: %r\" % (k, v))\n self.old[k] = OPTIONS[k]\n self._apply_update(kwargs)"},{"col":0,"comment":"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n ","endLoc":404,"header":"def render_pep440_post(pieces)","id":1749,"name":"render_pep440_post","nodeType":"Function","startLoc":380,"text":"def render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered"},{"col":0,"comment":"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n ","endLoc":426,"header":"def render_pep440_old(pieces)","id":1750,"name":"render_pep440_old","nodeType":"Function","startLoc":407,"text":"def render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered"},{"col":0,"comment":"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n ","endLoc":446,"header":"def render_git_describe(pieces)","id":1751,"name":"render_git_describe","nodeType":"Function","startLoc":429,"text":"def render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered"},{"col":0,"comment":"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n ","endLoc":466,"header":"def render_git_describe_long(pieces)","id":1752,"name":"render_git_describe_long","nodeType":"Function","startLoc":449,"text":"def render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered"},{"col":4,"comment":"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n ","endLoc":2062,"header":"def ffill(self, dim: Hashable, limit: int = None) -> \"DataArray\"","id":1753,"name":"ffill","nodeType":"Function","startLoc":2040,"text":"def ffill(self, dim: Hashable, limit: int = None) -> \"DataArray\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n \"\"\"\n from .missing import ffill\n\n return ffill(self, dim, limit=limit)"},{"col":0,"comment":"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n ","endLoc":138,"header":"def versions_from_parentdir(parentdir_prefix, root, verbose)","id":1754,"name":"versions_from_parentdir","nodeType":"Function","startLoc":110,"text":"def versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \"\"\"\n rootdirs = []\n\n for i in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parentdir_prefix):\n return {\n \"version\": dirname[len(parentdir_prefix) :],\n \"full-revisionid\": None,\n \"dirty\": False,\n \"error\": None,\n \"date\": None,\n }\n else:\n rootdirs.append(root)\n root = os.path.dirname(root) # up a level\n\n if verbose:\n print(\n \"Tried directories %s but none started with prefix %s\"\n % (str(rootdirs), parentdir_prefix)\n )\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")"},{"col":0,"comment":"forward fill missing values","endLoc":307,"header":"def ffill(arr, dim=None, limit=None)","id":1755,"name":"ffill","nodeType":"Function","startLoc":291,"text":"def ffill(arr, dim=None, limit=None):\n \"\"\"forward fill missing values\"\"\"\n import bottleneck as bn\n\n axis = arr.get_axis_num(dim)\n\n # work around for bottleneck 178\n _limit = limit if limit is not None else arr.shape[axis]\n\n return apply_ufunc(\n bn.push,\n arr,\n dask=\"parallelized\",\n keep_attrs=True,\n output_dtypes=[arr.dtype],\n kwargs=dict(n=_limit, axis=axis),\n ).transpose(*arr.dims)"},{"col":4,"comment":"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n ","endLoc":2086,"header":"def bfill(self, dim: Hashable, limit: int = None) -> \"DataArray\"","id":1756,"name":"bfill","nodeType":"Function","startLoc":2064,"text":"def bfill(self, dim: Hashable, limit: int = None) -> \"DataArray\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n \"\"\"\n from .missing import bfill\n\n return bfill(self, dim, limit=limit)"},{"col":0,"comment":"backfill missing values","endLoc":324,"header":"def bfill(arr, dim=None, limit=None)","id":1757,"name":"bfill","nodeType":"Function","startLoc":310,"text":"def bfill(arr, dim=None, limit=None):\n \"\"\"backfill missing values\"\"\"\n axis = arr.get_axis_num(dim)\n\n # work around for bottleneck 178\n _limit = limit if limit is not None else arr.shape[axis]\n\n return apply_ufunc(\n _bfill,\n arr,\n dask=\"parallelized\",\n keep_attrs=True,\n output_dtypes=[arr.dtype],\n kwargs=dict(n=_limit, axis=axis),\n ).transpose(*arr.dims)"},{"col":0,"comment":"Return a new object with the same shape and type as a given object.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object in input\n fill_value : scalar\n Value to fill the new object with before returning it.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object with the same shape and type as other, with the data\n filled with fill_value. Coords will be copied from other.\n If other is based on dask, the new one will be as well, and will be\n split in the same chunks.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 1)\n \n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5)\n \n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5, dtype=np.double)\n \n array([[0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, np.nan, dtype=np.double)\n \n array([[nan, nan, nan],\n [nan, nan, nan]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n ones_like\n\n ","endLoc":1296,"header":"def full_like(other, fill_value, dtype: DTypeLike = None)","id":1758,"name":"full_like","nodeType":"Function","startLoc":1200,"text":"def full_like(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Return a new object with the same shape and type as a given object.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object in input\n fill_value : scalar\n Value to fill the new object with before returning it.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object with the same shape and type as other, with the data\n filled with fill_value. Coords will be copied from other.\n If other is based on dask, the new one will be as well, and will be\n split in the same chunks.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 1)\n \n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5)\n \n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5, dtype=np.double)\n \n array([[0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, np.nan, dtype=np.double)\n \n array([[nan, nan, nan],\n [nan, nan, nan]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n ones_like\n\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n from .variable import Variable\n\n if isinstance(other, Dataset):\n data_vars = {\n k: _full_like_variable(v, fill_value, dtype)\n for k, v in other.data_vars.items()\n }\n return Dataset(data_vars, coords=other.coords, attrs=other.attrs)\n elif isinstance(other, DataArray):\n return DataArray(\n _full_like_variable(other.variable, fill_value, dtype),\n dims=other.dims,\n coords=other.coords,\n attrs=other.attrs,\n name=other.name,\n )\n elif isinstance(other, Variable):\n return _full_like_variable(other, fill_value, dtype)\n else:\n raise TypeError(\"Expected DataArray, Dataset, or Variable\")"},{"col":4,"comment":"Apply a function over the data variables in this dataset.\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form `func(x, *args, **kwargs)`\n to transform each DataArray `x` in this dataset into another\n DataArray.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False, the new object will\n be returned without attributes.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs : Any\n Keyword arguments passed on to `func`.\n\n Returns\n -------\n applied : Dataset\n Resulting dataset from applying ``func`` over each data variable.\n\n Examples\n --------\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})\n >>> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948\n bar (x) int64 -1 2\n >>> ds.apply(np.fabs)\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948\n bar (x) float64 1.0 2.0\n ","endLoc":4145,"header":"def apply(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs","id":1759,"name":"apply","nodeType":"Function","startLoc":4090,"text":"def apply(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Apply a function over the data variables in this dataset.\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form `func(x, *args, **kwargs)`\n to transform each DataArray `x` in this dataset into another\n DataArray.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False, the new object will\n be returned without attributes.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs : Any\n Keyword arguments passed on to `func`.\n\n Returns\n -------\n applied : Dataset\n Resulting dataset from applying ``func`` over each data variable.\n\n Examples\n --------\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})\n >>> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948\n bar (x) int64 -1 2\n >>> ds.apply(np.fabs)\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948\n bar (x) float64 1.0 2.0\n \"\"\"\n variables = {\n k: maybe_wrap_array(v, func(v, *args, **kwargs))\n for k, v in self.data_vars.items()\n }\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return type(self)(variables, attrs=attrs)"},{"col":4,"comment":"Combine two DataArray objects, with union of coordinates.\n\n This operation follows the normal broadcasting and alignment rules of\n ``join='outer'``. Default to non-null values of array calling the\n method. Use np.nan to fill in vacant cells after alignment.\n\n Parameters\n ----------\n other : DataArray\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n ","endLoc":2104,"header":"def combine_first(self, other: \"DataArray\") -> \"DataArray\"","id":1760,"name":"combine_first","nodeType":"Function","startLoc":2088,"text":"def combine_first(self, other: \"DataArray\") -> \"DataArray\":\n \"\"\"Combine two DataArray objects, with union of coordinates.\n\n This operation follows the normal broadcasting and alignment rules of\n ``join='outer'``. Default to non-null values of array calling the\n method. Use np.nan to fill in vacant cells after alignment.\n\n Parameters\n ----------\n other : DataArray\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n return ops.fillna(self, other, join=\"outer\")"},{"col":4,"comment":"null","endLoc":136,"header":"def _apply_update(self, options_dict)","id":1761,"name":"_apply_update","nodeType":"Function","startLoc":132,"text":"def _apply_update(self, options_dict):\n for k, v in options_dict.items():\n if k in _SETTERS:\n _SETTERS[k](v)\n OPTIONS.update(options_dict)"},{"col":4,"comment":"null","endLoc":139,"header":"def __enter__(self)","id":1762,"name":"__enter__","nodeType":"Function","startLoc":138,"text":"def __enter__(self):\n return"},{"col":4,"comment":"null","endLoc":142,"header":"def __exit__(self, type, value, traceback)","id":1763,"name":"__exit__","nodeType":"Function","startLoc":141,"text":"def __exit__(self, type, value, traceback):\n self._apply_update(self.old)"},{"attributeType":"null","col":8,"comment":"null","endLoc":120,"id":1764,"name":"old","nodeType":"Attribute","startLoc":120,"text":"self.old"},{"col":0,"comment":"Inner function of full_like, where other must be a variable\n ","endLoc":1315,"header":"def _full_like_variable(other, fill_value, dtype: DTypeLike = None)","id":1765,"name":"_full_like_variable","nodeType":"Function","startLoc":1299,"text":"def _full_like_variable(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Inner function of full_like, where other must be a variable\n \"\"\"\n from .variable import Variable\n\n if isinstance(other.data, dask_array_type):\n import dask.array\n\n if dtype is None:\n dtype = other.dtype\n data = dask.array.full(\n other.shape, fill_value, dtype=dtype, chunks=other.data.chunks\n )\n else:\n data = np.full_like(other, fill_value, dtype=dtype)\n\n return Variable(dims=other.dims, data=data, attrs=other.attrs)"},{"col":0,"comment":"null","endLoc":28,"header":"def _positive_integer(value)","id":1766,"name":"_positive_integer","nodeType":"Function","startLoc":27,"text":"def _positive_integer(value):\n return isinstance(value, int) and value > 0"},{"col":0,"comment":"null","endLoc":44,"header":"def _set_file_cache_maxsize(value)","id":1767,"name":"_set_file_cache_maxsize","nodeType":"Function","startLoc":41,"text":"def _set_file_cache_maxsize(value):\n from ..backends.file_manager import FILE_CACHE\n\n FILE_CACHE.maxsize = value"},{"col":0,"comment":"null","endLoc":52,"header":"def _warn_on_setting_enable_cftimeindex(enable_cftimeindex)","id":1768,"name":"_warn_on_setting_enable_cftimeindex","nodeType":"Function","startLoc":47,"text":"def _warn_on_setting_enable_cftimeindex(enable_cftimeindex):\n warnings.warn(\n \"The enable_cftimeindex option is now a no-op \"\n \"and will be removed in a future version of xarray.\",\n FutureWarning,\n )"},{"attributeType":"null","col":0,"comment":"null","endLoc":3,"id":1769,"name":"DISPLAY_WIDTH","nodeType":"Attribute","startLoc":3,"text":"DISPLAY_WIDTH"},{"attributeType":"null","col":0,"comment":"null","endLoc":4,"id":1770,"name":"ARITHMETIC_JOIN","nodeType":"Attribute","startLoc":4,"text":"ARITHMETIC_JOIN"},{"attributeType":"null","col":0,"comment":"null","endLoc":5,"id":1771,"name":"ENABLE_CFTIMEINDEX","nodeType":"Attribute","startLoc":5,"text":"ENABLE_CFTIMEINDEX"},{"col":4,"comment":"Reduce this array by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : hashable or sequence of hashables, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to repeatedly apply `func`. Only one of the\n 'dim' and 'axis' arguments can be supplied. If neither are\n supplied, then the reduction is calculated over the flattened array\n (by calling `f(x)` without an axis argument).\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n DataArray with this object's array replaced with an array with\n summarized data and the indicated dimension(s) removed.\n ","endLoc":2149,"header":"def reduce(\n self,\n func: Callable[..., Any],\n dim: Union[None, Hashable, Sequence[Hashable]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n **kwargs: Any\n ) -> \"DataArray\"","id":1772,"name":"reduce","nodeType":"Function","startLoc":2106,"text":"def reduce(\n self,\n func: Callable[..., Any],\n dim: Union[None, Hashable, Sequence[Hashable]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n **kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Reduce this array by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : hashable or sequence of hashables, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to repeatedly apply `func`. Only one of the\n 'dim' and 'axis' arguments can be supplied. If neither are\n supplied, then the reduction is calculated over the flattened array\n (by calling `f(x)` without an axis argument).\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n DataArray with this object's array replaced with an array with\n summarized data and the indicated dimension(s) removed.\n \"\"\"\n\n var = self.variable.reduce(func, dim, axis, keep_attrs, keepdims, **kwargs)\n return self._replace_maybe_drop_dims(var)"},{"col":4,"comment":"null","endLoc":700,"header":"def __init__(self, file_objs)","id":1773,"name":"__init__","nodeType":"Function","startLoc":699,"text":"def __init__(self, file_objs):\n self.file_objs = file_objs"},{"attributeType":"null","col":0,"comment":"null","endLoc":6,"id":1774,"name":"FILE_CACHE_MAXSIZE","nodeType":"Attribute","startLoc":6,"text":"FILE_CACHE_MAXSIZE"},{"attributeType":"null","col":0,"comment":"null","endLoc":7,"id":1775,"name":"WARN_FOR_UNCLOSED_FILES","nodeType":"Attribute","startLoc":7,"text":"WARN_FOR_UNCLOSED_FILES"},{"attributeType":"null","col":0,"comment":"null","endLoc":8,"id":1776,"name":"CMAP_SEQUENTIAL","nodeType":"Attribute","startLoc":8,"text":"CMAP_SEQUENTIAL"},{"attributeType":"null","col":0,"comment":"null","endLoc":9,"id":1777,"name":"CMAP_DIVERGENT","nodeType":"Attribute","startLoc":9,"text":"CMAP_DIVERGENT"},{"attributeType":"null","col":0,"comment":"null","endLoc":10,"id":1778,"name":"KEEP_ATTRS","nodeType":"Attribute","startLoc":10,"text":"KEEP_ATTRS"},{"attributeType":"null","col":0,"comment":"null","endLoc":24,"id":1779,"name":"_JOIN_OPTIONS","nodeType":"Attribute","startLoc":24,"text":"_JOIN_OPTIONS"},{"attributeType":"null","col":0,"comment":"null","endLoc":31,"id":1780,"name":"_VALIDATORS","nodeType":"Attribute","startLoc":31,"text":"_VALIDATORS"},{"attributeType":"null","col":0,"comment":"null","endLoc":55,"id":1781,"name":"_SETTERS","nodeType":"Attribute","startLoc":55,"text":"_SETTERS"},{"col":0,"comment":"","endLoc":1,"header":"options.py#","id":1782,"name":"","nodeType":"Function","startLoc":1,"text":"DISPLAY_WIDTH = \"display_width\"\n\nARITHMETIC_JOIN = \"arithmetic_join\"\n\nENABLE_CFTIMEINDEX = \"enable_cftimeindex\"\n\nFILE_CACHE_MAXSIZE = \"file_cache_maxsize\"\n\nWARN_FOR_UNCLOSED_FILES = \"warn_for_unclosed_files\"\n\nCMAP_SEQUENTIAL = \"cmap_sequential\"\n\nCMAP_DIVERGENT = \"cmap_divergent\"\n\nKEEP_ATTRS = \"keep_attrs\"\n\nOPTIONS = {\n DISPLAY_WIDTH: 80,\n ARITHMETIC_JOIN: \"inner\",\n ENABLE_CFTIMEINDEX: True,\n FILE_CACHE_MAXSIZE: 128,\n WARN_FOR_UNCLOSED_FILES: False,\n CMAP_SEQUENTIAL: \"viridis\",\n CMAP_DIVERGENT: \"RdBu_r\",\n KEEP_ATTRS: \"default\",\n}\n\n_JOIN_OPTIONS = frozenset([\"inner\", \"outer\", \"left\", \"right\", \"exact\"])\n\n_VALIDATORS = {\n DISPLAY_WIDTH: _positive_integer,\n ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,\n ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),\n FILE_CACHE_MAXSIZE: _positive_integer,\n WARN_FOR_UNCLOSED_FILES: lambda value: isinstance(value, bool),\n KEEP_ATTRS: lambda choice: choice in [True, False, \"default\"],\n}\n\n_SETTERS = {\n FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,\n ENABLE_CFTIMEINDEX: _warn_on_setting_enable_cftimeindex,\n}"},{"col":24,"endLoc":34,"id":1783,"nodeType":"Lambda","startLoc":34,"text":"lambda value: isinstance(value, bool)"},{"col":29,"endLoc":36,"id":1784,"nodeType":"Lambda","startLoc":36,"text":"lambda value: isinstance(value, bool)"},{"col":16,"endLoc":37,"id":1785,"nodeType":"Lambda","startLoc":37,"text":"lambda choice: choice in [True, False, \"default\"]"},{"col":4,"comment":"Assign new data variables to a Dataset, returning a new object\n with all the original variables in addition to the new ones.\n\n Parameters\n ----------\n variables : mapping, value pairs\n Mapping from variables names to the new values. If the new values\n are callable, they are computed on the Dataset and assigned to new\n data variables. If the values are not callable, (e.g. a DataArray,\n scalar, or array), they are simply assigned.\n **variables_kwargs:\n The keyword arguments form of ``variables``.\n One of variables or variables_kwargs must be provided.\n\n Returns\n -------\n ds : Dataset\n A new Dataset with the new variables in addition to all the\n existing variables.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your arguments may not\n be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign`` is\n possible, but you cannot reference other variables created within the\n same ``assign`` call.\n\n See Also\n --------\n pandas.DataFrame.assign\n\n Examples\n --------\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n\n Where the value is a callable, evaluated on dataset:\n\n >>> x.assign(temperature_f = lambda x: x.temperature_c * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:\n\n >>> x.assign(temperature_f=x[\"temperature_c\"] * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n ","endLoc":4236,"header":"def assign(\n self, variables: Mapping[Hashable, Any] = None, **variables_kwargs: Hashable\n ) -> \"Dataset\"","id":1786,"name":"assign","nodeType":"Function","startLoc":4147,"text":"def assign(\n self, variables: Mapping[Hashable, Any] = None, **variables_kwargs: Hashable\n ) -> \"Dataset\":\n \"\"\"Assign new data variables to a Dataset, returning a new object\n with all the original variables in addition to the new ones.\n\n Parameters\n ----------\n variables : mapping, value pairs\n Mapping from variables names to the new values. If the new values\n are callable, they are computed on the Dataset and assigned to new\n data variables. If the values are not callable, (e.g. a DataArray,\n scalar, or array), they are simply assigned.\n **variables_kwargs:\n The keyword arguments form of ``variables``.\n One of variables or variables_kwargs must be provided.\n\n Returns\n -------\n ds : Dataset\n A new Dataset with the new variables in addition to all the\n existing variables.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your arguments may not\n be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign`` is\n possible, but you cannot reference other variables created within the\n same ``assign`` call.\n\n See Also\n --------\n pandas.DataFrame.assign\n\n Examples\n --------\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n\n Where the value is a callable, evaluated on dataset:\n\n >>> x.assign(temperature_f = lambda x: x.temperature_c * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:\n\n >>> x.assign(temperature_f=x[\"temperature_c\"] * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n \"\"\"\n variables = either_dict_or_kwargs(variables, variables_kwargs, \"assign\")\n data = self.copy()\n # do all calculations first...\n results = data._calc_assign_results(variables)\n # ... and then assign\n data.update(results)\n return data"},{"id":1787,"name":"related-projects.rst","nodeType":"TextFile","path":"doc","text":".. _related-projects:\n\nXarray related projects\n-----------------------\n\nHere below is a list of existing open source projects that build\nfunctionality upon xarray. See also section :ref:`internals` for more\ndetails on how to build xarray extensions.\n\nGeosciences\n~~~~~~~~~~~\n\n- `aospy `_: Automated analysis and management of gridded climate data.\n- `climpred `_: Analysis of ensemble forecast models for climate prediction.\n- `geocube `_: Tool to convert geopandas vector data into rasterized xarray data.\n- `infinite-diff `_: xarray-based finite-differencing, focused on gridded climate/meterology data\n- `marc_analysis `_: Analysis package for CESM/MARC experiments and output.\n- `MetPy `_: A collection of tools in Python for reading, visualizing, and performing calculations with weather data.\n- `MPAS-Analysis `_: Analysis for simulations produced with Model for Prediction Across Scales (MPAS) components and the Accelerated Climate Model for Energy (ACME).\n- `OGGM `_: Open Global Glacier Model\n- `Oocgcm `_: Analysis of large gridded geophysical datasets\n- `Open Data Cube `_: Analysis toolkit of continental scale Earth Observation data from satellites.\n- `Pangaea: `_: xarray extension for gridded land surface & weather model output).\n- `Pangeo `_: A community effort for big data geoscience in the cloud.\n- `PyGDX `_: Python 3 package for\n accessing data stored in GAMS Data eXchange (GDX) files. Also uses a custom\n subclass.\n- `Regionmask `_: plotting and creation of masks of spatial regions\n- `rioxarray `_: geospatial xarray extension powered by rasterio\n- `salem `_: Adds geolocalised subsetting, masking, and plotting operations to xarray's data structures via accessors.\n- `SatPy `_ : Library for reading and manipulating meteorological remote sensing data and writing it to various image and data file formats.\n- `Spyfit `_: FTIR spectroscopy of the atmosphere\n- `windspharm `_: Spherical\n harmonic wind analysis in Python.\n- `wrf-python `_: A collection of diagnostic and interpolation routines for use with output of the Weather Research and Forecasting (WRF-ARW) Model.\n- `xarray-simlab `_: xarray extension for computer model simulations.\n- `xarray-topo `_: xarray extension for topographic analysis and modelling.\n- `xbpch `_: xarray interface for bpch files.\n- `xclim `_: A library for calculating climate science indices with unit handling built from xarray and dask.\n- `xESMF `_: Universal Regridder for Geospatial Data.\n- `xgcm `_: Extends the xarray data model to understand finite volume grid cells (common in General Circulation Models) and provides interpolation and difference operations for such grids.\n- `xmitgcm `_: a python package for reading `MITgcm `_ binary MDS files into xarray data structures.\n- `xshape `_: Tools for working with shapefiles, topographies, and polygons in xarray.\n\nMachine Learning\n~~~~~~~~~~~~~~~~\n- `ArviZ `_: Exploratory analysis of Bayesian models, built on top of xarray.\n- `Elm `_: Parallel machine learning on xarray data structures\n- `sklearn-xarray (1) `_: Combines scikit-learn and xarray (1).\n- `sklearn-xarray (2) `_: Combines scikit-learn and xarray (2).\n\nOther domains\n~~~~~~~~~~~~~\n- `ptsa `_: EEG Time Series Analysis\n- `pycalphad `_: Computational Thermodynamics in Python\n\nExtend xarray capabilities\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n- `Collocate `_: Collocate xarray trajectories in arbitrary physical dimensions\n- `eofs `_: EOF analysis in Python.\n- `hypothesis-gufunc `_: Extension to hypothesis. Makes it easy to write unit tests with xarray objects as input.\n- `xarray_extras `_: Advanced algorithms for xarray objects (e.g. integrations/interpolations).\n- `xrft `_: Fourier transforms for xarray data.\n- `xr-scipy `_: A lightweight scipy wrapper for xarray.\n- `X-regression `_: Multiple linear regression from Statsmodels library coupled with Xarray library.\n- `xskillscore `_: Metrics for verifying forecasts.\n- `xyzpy `_: Easily generate high dimensional data, including parallelization.\n\nVisualization\n~~~~~~~~~~~~~\n- `Datashader `_, `geoviews `_, `holoviews `_, : visualization packages for large data.\n- `hvplot `_ : A high-level plotting API for the PyData ecosystem built on HoloViews.\n- `psyplot `_: Interactive data visualization with python.\n\nNon-Python projects\n~~~~~~~~~~~~~~~~~~~\n- `xframe `_: C++ data structures inspired by xarray.\n- `AxisArrays `_ and\n `NamedArrays `_: similar data structures for Julia.\n\nMore projects can be found at the `\"xarray\" Github topic `_.\n"},{"fileName":"reindexing.py","filePath":"asv_bench/benchmarks","id":1788,"nodeType":"File","text":"import numpy as np\n\nimport xarray as xr\n\nfrom . import requires_dask\n\n\nclass Reindex:\n def setup(self):\n data = np.random.RandomState(0).randn(1000, 100, 100)\n self.ds = xr.Dataset(\n {\"temperature\": ((\"time\", \"x\", \"y\"), data)},\n coords={\"time\": np.arange(1000), \"x\": np.arange(100), \"y\": np.arange(100)},\n )\n\n def time_1d_coarse(self):\n self.ds.reindex(time=np.arange(0, 1000, 5)).load()\n\n def time_1d_fine_all_found(self):\n self.ds.reindex(time=np.arange(0, 1000, 0.5), method=\"nearest\").load()\n\n def time_1d_fine_some_missing(self):\n self.ds.reindex(\n time=np.arange(0, 1000, 0.5), method=\"nearest\", tolerance=0.1\n ).load()\n\n def time_2d_coarse(self):\n self.ds.reindex(x=np.arange(0, 100, 2), y=np.arange(0, 100, 2)).load()\n\n def time_2d_fine_all_found(self):\n self.ds.reindex(\n x=np.arange(0, 100, 0.5), y=np.arange(0, 100, 0.5), method=\"nearest\"\n ).load()\n\n def time_2d_fine_some_missing(self):\n self.ds.reindex(\n x=np.arange(0, 100, 0.5),\n y=np.arange(0, 100, 0.5),\n method=\"nearest\",\n tolerance=0.1,\n ).load()\n\n\nclass ReindexDask(Reindex):\n def setup(self):\n requires_dask()\n super().setup()\n self.ds = self.ds.chunk({\"time\": 100})\n"},{"className":"Reindex","col":0,"comment":"null","endLoc":41,"id":1789,"nodeType":"Class","startLoc":8,"text":"class Reindex:\n def setup(self):\n data = np.random.RandomState(0).randn(1000, 100, 100)\n self.ds = xr.Dataset(\n {\"temperature\": ((\"time\", \"x\", \"y\"), data)},\n coords={\"time\": np.arange(1000), \"x\": np.arange(100), \"y\": np.arange(100)},\n )\n\n def time_1d_coarse(self):\n self.ds.reindex(time=np.arange(0, 1000, 5)).load()\n\n def time_1d_fine_all_found(self):\n self.ds.reindex(time=np.arange(0, 1000, 0.5), method=\"nearest\").load()\n\n def time_1d_fine_some_missing(self):\n self.ds.reindex(\n time=np.arange(0, 1000, 0.5), method=\"nearest\", tolerance=0.1\n ).load()\n\n def time_2d_coarse(self):\n self.ds.reindex(x=np.arange(0, 100, 2), y=np.arange(0, 100, 2)).load()\n\n def time_2d_fine_all_found(self):\n self.ds.reindex(\n x=np.arange(0, 100, 0.5), y=np.arange(0, 100, 0.5), method=\"nearest\"\n ).load()\n\n def time_2d_fine_some_missing(self):\n self.ds.reindex(\n x=np.arange(0, 100, 0.5),\n y=np.arange(0, 100, 0.5),\n method=\"nearest\",\n tolerance=0.1,\n ).load()"},{"col":4,"comment":"null","endLoc":14,"header":"def setup(self)","id":1790,"name":"setup","nodeType":"Function","startLoc":9,"text":"def setup(self):\n data = np.random.RandomState(0).randn(1000, 100, 100)\n self.ds = xr.Dataset(\n {\"temperature\": ((\"time\", \"x\", \"y\"), data)},\n coords={\"time\": np.arange(1000), \"x\": np.arange(100), \"y\": np.arange(100)},\n )"},{"col":4,"comment":"Convert this dataset into an xarray.DataArray\n\n The data variables of this dataset will be broadcast against each other\n and stacked along the first axis of the new array. All coordinates of\n this dataset will remain coordinates.\n\n Parameters\n ----------\n dim : str, optional\n Name of the new dimension.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n array : xarray.DataArray\n ","endLoc":4267,"header":"def to_array(self, dim=\"variable\", name=None)","id":1791,"name":"to_array","nodeType":"Function","startLoc":4238,"text":"def to_array(self, dim=\"variable\", name=None):\n \"\"\"Convert this dataset into an xarray.DataArray\n\n The data variables of this dataset will be broadcast against each other\n and stacked along the first axis of the new array. All coordinates of\n this dataset will remain coordinates.\n\n Parameters\n ----------\n dim : str, optional\n Name of the new dimension.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n array : xarray.DataArray\n \"\"\"\n from .dataarray import DataArray\n\n data_vars = [self.variables[k] for k in self.data_vars]\n broadcast_vars = broadcast_variables(*data_vars)\n data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)\n\n coords = dict(self.coords)\n coords[dim] = list(self.data_vars)\n\n dims = (dim,) + broadcast_vars[0].dims\n\n return DataArray(data, coords, dims, attrs=self.attrs, name=name)"},{"col":4,"comment":"Convert this array into a pandas object with the same shape.\n\n The type of the returned object depends on the number of DataArray\n dimensions:\n\n * 0D -> `xarray.DataArray`\n * 1D -> `pandas.Series`\n * 2D -> `pandas.DataFrame`\n * 3D -> `pandas.Panel` *(deprecated)*\n\n Only works for arrays with 3 or fewer dimensions.\n\n The DataArray constructor performs the inverse transformation.\n ","endLoc":2182,"header":"def to_pandas(self) -> Union[\"DataArray\", pd.Series, pd.DataFrame]","id":1792,"name":"to_pandas","nodeType":"Function","startLoc":2151,"text":"def to_pandas(self) -> Union[\"DataArray\", pd.Series, pd.DataFrame]:\n \"\"\"Convert this array into a pandas object with the same shape.\n\n The type of the returned object depends on the number of DataArray\n dimensions:\n\n * 0D -> `xarray.DataArray`\n * 1D -> `pandas.Series`\n * 2D -> `pandas.DataFrame`\n * 3D -> `pandas.Panel` *(deprecated)*\n\n Only works for arrays with 3 or fewer dimensions.\n\n The DataArray constructor performs the inverse transformation.\n \"\"\"\n # TODO: consolidate the info about pandas constructors and the\n # attributes that correspond to their indexes into a separate module?\n constructors = {\n 0: lambda x: x,\n 1: pd.Series,\n 2: pd.DataFrame,\n 3: pdcompat.Panel,\n }\n try:\n constructor = constructors[self.ndim]\n except KeyError:\n raise ValueError(\n \"cannot convert arrays with %s dimensions into \"\n \"pandas objects\" % self.ndim\n )\n indexes = [self.get_index(dim) for dim in self.dims]\n return constructor(self.values, *indexes)"},{"col":4,"comment":"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : VectorizedIndexer\n ","endLoc":598,"header":"def __init__(self, array, key)","id":1793,"name":"__init__","nodeType":"Function","startLoc":586,"text":"def __init__(self, array, key):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : VectorizedIndexer\n \"\"\"\n if isinstance(key, (BasicIndexer, OuterIndexer)):\n self.key = _outer_to_vectorized_indexer(key, array.shape)\n else:\n self.key = _arrayize_vectorized_indexer(key, array.shape)\n self.array = as_indexable(array)"},{"col":15,"endLoc":2169,"id":1794,"nodeType":"Lambda","startLoc":2169,"text":"lambda x: x"},{"col":4,"comment":"Convert this array and its coordinates into a tidy pandas.DataFrame.\n\n The DataFrame is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n\n Other coordinates are included as columns in the DataFrame.\n ","endLoc":2210,"header":"def to_dataframe(self, name: Hashable = None) -> pd.DataFrame","id":1795,"name":"to_dataframe","nodeType":"Function","startLoc":2184,"text":"def to_dataframe(self, name: Hashable = None) -> pd.DataFrame:\n \"\"\"Convert this array and its coordinates into a tidy pandas.DataFrame.\n\n The DataFrame is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n\n Other coordinates are included as columns in the DataFrame.\n \"\"\"\n if name is None:\n name = self.name\n if name is None:\n raise ValueError(\n \"cannot convert an unnamed DataArray to a \"\n \"DataFrame: use the ``name`` parameter\"\n )\n\n dims = dict(zip(self.dims, self.shape))\n # By using a unique name, we can convert a DataArray into a DataFrame\n # even if it shares a name with one of its coordinates.\n # I would normally use unique_name = object() but that results in a\n # dataframe with columns in the wrong order, for reasons I have not\n # been able to debug (possibly a pandas bug?).\n unique_name = \"__unique_name_identifier_z98xfz98xugfg73ho__\"\n ds = self._to_dataset_whole(name=unique_name)\n df = ds._to_dataframe(dims)\n df.columns = [name if c == unique_name else c for c in df.columns]\n return df"},{"col":4,"comment":"null","endLoc":328,"header":"def time_open_dataset_netcdf4(self)","id":1796,"name":"time_open_dataset_netcdf4","nodeType":"Function","startLoc":327,"text":"def time_open_dataset_netcdf4(self):\n xr.open_mfdataset(self.filenames_list, engine=\"netcdf4\")"},{"col":4,"comment":"Convert this array into a pandas.Series.\n\n The Series is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n ","endLoc":2219,"header":"def to_series(self) -> pd.Series","id":1797,"name":"to_series","nodeType":"Function","startLoc":2212,"text":"def to_series(self) -> pd.Series:\n \"\"\"Convert this array into a pandas.Series.\n\n The Series is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n \"\"\"\n index = self.coords.to_index()\n return pd.Series(self.values.reshape(-1), index=index, name=self.name)"},{"attributeType":"null","col":8,"comment":"null","endLoc":321,"id":1798,"name":"format","nodeType":"Attribute","startLoc":321,"text":"self.format"},{"className":"IOReadMultipleNetCDF3","col":0,"comment":"null","endLoc":344,"id":1799,"nodeType":"Class","startLoc":331,"text":"class IOReadMultipleNetCDF3(IOReadMultipleNetCDF4):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_scipy(self):\n xr.open_mfdataset(self.filenames_list, engine=\"scipy\").load()\n\n def time_open_dataset_scipy(self):\n xr.open_mfdataset(self.filenames_list, engine=\"scipy\")"},{"col":4,"comment":"null","endLoc":338,"header":"def setup(self)","id":1800,"name":"setup","nodeType":"Function","startLoc":332,"text":"def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)"},{"col":4,"comment":"null","endLoc":4276,"header":"def _to_dataframe(self, ordered_dims)","id":1801,"name":"_to_dataframe","nodeType":"Function","startLoc":4269,"text":"def _to_dataframe(self, ordered_dims):\n columns = [k for k in self.variables if k not in self.dims]\n data = [\n self._variables[k].set_dims(ordered_dims).values.reshape(-1)\n for k in columns\n ]\n index = self.coords.to_index(ordered_dims)\n return pd.DataFrame(dict(zip(columns, data)), index=index)"},{"col":4,"comment":"null","endLoc":341,"header":"def time_load_dataset_scipy(self)","id":1802,"name":"time_load_dataset_scipy","nodeType":"Function","startLoc":340,"text":"def time_load_dataset_scipy(self):\n xr.open_mfdataset(self.filenames_list, engine=\"scipy\").load()"},{"col":4,"comment":"Convert this dataset into a pandas.DataFrame.\n\n Non-index variables in this dataset form the columns of the\n DataFrame. The DataFrame is be indexed by the Cartesian product of\n this dataset's indices.\n ","endLoc":4285,"header":"def to_dataframe(self)","id":1803,"name":"to_dataframe","nodeType":"Function","startLoc":4278,"text":"def to_dataframe(self):\n \"\"\"Convert this dataset into a pandas.DataFrame.\n\n Non-index variables in this dataset form the columns of the\n DataFrame. The DataFrame is be indexed by the Cartesian product of\n this dataset's indices.\n \"\"\"\n return self._to_dataframe(self.dims)"},{"col":4,"comment":"null","endLoc":4325,"header":"def _set_sparse_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None","id":1804,"name":"_set_sparse_data_from_dataframe","nodeType":"Function","startLoc":4287,"text":"def _set_sparse_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None:\n from sparse import COO\n\n idx = dataframe.index\n if isinstance(idx, pd.MultiIndex):\n try:\n codes = idx.codes\n except AttributeError:\n # deprecated since pandas 0.24\n codes = idx.labels\n coords = np.stack([np.asarray(code) for code in codes], axis=0)\n is_sorted = idx.is_lexsorted\n else:\n coords = np.arange(idx.size).reshape(1, -1)\n is_sorted = True\n\n for name, series in dataframe.items():\n # Cast to a NumPy array first, in case the Series is a pandas\n # Extension array (which doesn't have a valid NumPy dtype)\n values = np.asarray(series)\n\n # In virtually all real use cases, the sparse array will now have\n # missing values and needs a fill_value. For consistency, don't\n # special case the rare exceptions (e.g., dtype=int without a\n # MultiIndex).\n dtype, fill_value = dtypes.maybe_promote(values.dtype)\n values = np.asarray(values, dtype=dtype)\n\n data = COO(\n coords,\n values,\n shape,\n has_duplicates=False,\n sorted=is_sorted,\n fill_value=fill_value,\n )\n self[name] = (dims, data)"},{"col":4,"comment":"null","endLoc":344,"header":"def time_open_dataset_scipy(self)","id":1805,"name":"time_open_dataset_scipy","nodeType":"Function","startLoc":343,"text":"def time_open_dataset_scipy(self):\n xr.open_mfdataset(self.filenames_list, engine=\"scipy\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":337,"id":1806,"name":"format","nodeType":"Attribute","startLoc":337,"text":"self.format"},{"col":4,"comment":"null","endLoc":17,"header":"def time_1d_coarse(self)","id":1807,"name":"time_1d_coarse","nodeType":"Function","startLoc":16,"text":"def time_1d_coarse(self):\n self.ds.reindex(time=np.arange(0, 1000, 5)).load()"},{"className":"IOReadMultipleNetCDF4Dask","col":0,"comment":"null","endLoc":398,"id":1808,"nodeType":"Class","startLoc":347,"text":"class IOReadMultipleNetCDF4Dask(IOMultipleNetCDF):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF4\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_netcdf4_with_block_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_time_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()\n\n def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()\n\n def time_open_dataset_netcdf4_with_block_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n )\n\n def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n )\n\n def time_open_dataset_netcdf4_with_time_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n )\n\n def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n )"},{"col":4,"comment":"null","endLoc":354,"header":"def setup(self)","id":1809,"name":"setup","nodeType":"Function","startLoc":348,"text":"def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF4\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)"},{"col":4,"comment":"null","endLoc":619,"header":"def transpose(self, order)","id":1810,"name":"transpose","nodeType":"Function","startLoc":617,"text":"def transpose(self, order):\n key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple))\n return type(self)(self.array, key)"},{"col":4,"comment":"null","endLoc":565,"header":"def __getitem__(self, indexer)","id":1811,"name":"__getitem__","nodeType":"Function","startLoc":561,"text":"def __getitem__(self, indexer):\n if isinstance(indexer, VectorizedIndexer):\n array = LazilyVectorizedIndexedArray(self.array, self.key)\n return array[indexer]\n return type(self)(self.array, self._updated_key(indexer))"},{"col":4,"comment":"Convert this array into a numpy.ma.MaskedArray\n\n Parameters\n ----------\n copy : bool\n If True (default) make a copy of the array in the result. If False,\n a MaskedArray view of DataArray.values is returned.\n\n Returns\n -------\n result : MaskedArray\n Masked where invalid values (nan or inf) occur.\n ","endLoc":2237,"header":"def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray","id":1812,"name":"to_masked_array","nodeType":"Function","startLoc":2221,"text":"def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray:\n \"\"\"Convert this array into a numpy.ma.MaskedArray\n\n Parameters\n ----------\n copy : bool\n If True (default) make a copy of the array in the result. If False,\n a MaskedArray view of DataArray.values is returned.\n\n Returns\n -------\n result : MaskedArray\n Masked where invalid values (nan or inf) occur.\n \"\"\"\n values = self.values # only compute lazy arrays once\n isnull = pd.isnull(values)\n return np.ma.MaskedArray(data=values, mask=isnull, copy=copy)"},{"col":4,"comment":"Write DataArray contents to a netCDF file.\n\n All parameters are passed directly to `xarray.Dataset.to_netcdf`.\n\n Notes\n -----\n Only xarray.Dataset objects can be written to netCDF files, so\n the xarray.DataArray is converted to a xarray.Dataset object\n containing a single variable. If the DataArray has no name, or if the\n name is the same as a co-ordinate name, then it is given the name\n '__xarray_dataarray_variable__'.\n ","endLoc":2266,"header":"def to_netcdf(self, *args, **kwargs) -> Union[bytes, \"Delayed\", None]","id":1813,"name":"to_netcdf","nodeType":"Function","startLoc":2239,"text":"def to_netcdf(self, *args, **kwargs) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write DataArray contents to a netCDF file.\n\n All parameters are passed directly to `xarray.Dataset.to_netcdf`.\n\n Notes\n -----\n Only xarray.Dataset objects can be written to netCDF files, so\n the xarray.DataArray is converted to a xarray.Dataset object\n containing a single variable. If the DataArray has no name, or if the\n name is the same as a co-ordinate name, then it is given the name\n '__xarray_dataarray_variable__'.\n \"\"\"\n from ..backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE\n\n if self.name is None:\n # If no name is set then use a generic xarray name\n dataset = self.to_dataset(name=DATAARRAY_VARIABLE)\n elif self.name in self.coords or self.name in self.dims:\n # The name is the same as one of the coords names, which netCDF\n # doesn't support, so rename it but keep track of the old name\n dataset = self.to_dataset(name=DATAARRAY_VARIABLE)\n dataset.attrs[DATAARRAY_NAME] = self.name\n else:\n # No problems with the name - so we're fine!\n dataset = self.to_dataset()\n\n return dataset.to_netcdf(*args, **kwargs)"},{"col":4,"comment":"null","endLoc":359,"header":"def time_load_dataset_netcdf4_with_block_chunks(self)","id":1814,"name":"time_load_dataset_netcdf4_with_block_chunks","nodeType":"Function","startLoc":356,"text":"def time_load_dataset_netcdf4_with_block_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()"},{"col":4,"comment":"null","endLoc":4338,"header":"def _set_numpy_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None","id":1815,"name":"_set_numpy_data_from_dataframe","nodeType":"Function","startLoc":4327,"text":"def _set_numpy_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None:\n idx = dataframe.index\n if isinstance(idx, pd.MultiIndex):\n # expand the DataFrame to include the product of all levels\n full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)\n dataframe = dataframe.reindex(full_idx)\n\n for name, series in dataframe.items():\n data = np.asarray(series).reshape(shape)\n self[name] = (dims, data)"},{"col":4,"comment":"null","endLoc":365,"header":"def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self)","id":1816,"name":"time_load_dataset_netcdf4_with_block_chunks_multiprocessing","nodeType":"Function","startLoc":361,"text":"def time_load_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n ).load()"},{"col":4,"comment":"null","endLoc":370,"header":"def time_load_dataset_netcdf4_with_time_chunks(self)","id":1817,"name":"time_load_dataset_netcdf4_with_time_chunks","nodeType":"Function","startLoc":367,"text":"def time_load_dataset_netcdf4_with_time_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()"},{"col":4,"comment":"Convert a pandas.DataFrame into an xarray.Dataset\n\n Each column will be converted into an independent variable in the\n Dataset. If the dataframe's index is a MultiIndex, it will be expanded\n into a tensor product of one-dimensional indices (filling in missing\n values with NaN). This method will produce a Dataset very similar to\n that on which the 'to_dataframe' method was called, except with\n possibly redundant dimensions (since all dataset variables will have\n the same dimensionality)\n\n Parameters\n ----------\n dataframe : pandas.DataFrame\n DataFrame from which to copy data and indices.\n sparse : bool\n If true, create a sparse arrays instead of dense numpy arrays. This\n can potentially save a large amount of memory if the DataFrame has\n a MultiIndex. Requires the sparse package (sparse.pydata.org).\n\n Returns\n -------\n New Dataset.\n\n See also\n --------\n xarray.DataArray.from_series\n ","endLoc":4397,"header":"@classmethod\n def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> \"Dataset\"","id":1818,"name":"from_dataframe","nodeType":"Function","startLoc":4340,"text":"@classmethod\n def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> \"Dataset\":\n \"\"\"Convert a pandas.DataFrame into an xarray.Dataset\n\n Each column will be converted into an independent variable in the\n Dataset. If the dataframe's index is a MultiIndex, it will be expanded\n into a tensor product of one-dimensional indices (filling in missing\n values with NaN). This method will produce a Dataset very similar to\n that on which the 'to_dataframe' method was called, except with\n possibly redundant dimensions (since all dataset variables will have\n the same dimensionality)\n\n Parameters\n ----------\n dataframe : pandas.DataFrame\n DataFrame from which to copy data and indices.\n sparse : bool\n If true, create a sparse arrays instead of dense numpy arrays. This\n can potentially save a large amount of memory if the DataFrame has\n a MultiIndex. Requires the sparse package (sparse.pydata.org).\n\n Returns\n -------\n New Dataset.\n\n See also\n --------\n xarray.DataArray.from_series\n \"\"\"\n # TODO: Add an option to remove dimensions along which the variables\n # are constant, to enable consistent serialization to/from a dataframe,\n # even if some variables have different dimensionality.\n\n if not dataframe.columns.is_unique:\n raise ValueError(\"cannot convert DataFrame with non-unique columns\")\n\n idx = dataframe.index\n obj = cls()\n\n if isinstance(idx, pd.MultiIndex):\n dims = tuple(\n name if name is not None else \"level_%i\" % n\n for n, name in enumerate(idx.names)\n )\n for dim, lev in zip(dims, idx.levels):\n obj[dim] = (dim, lev)\n shape = tuple(lev.size for lev in idx.levels)\n else:\n index_name = idx.name if idx.name is not None else \"index\"\n dims = (index_name,)\n obj[index_name] = (dims, idx)\n shape = (idx.size,)\n\n if sparse:\n obj._set_sparse_data_from_dataframe(dataframe, dims, shape)\n else:\n obj._set_numpy_data_from_dataframe(dataframe, dims, shape)\n return obj"},{"col":4,"comment":"\n Convert this xarray.DataArray into a dictionary following xarray\n naming conventions.\n\n Converts all variables and attributes to native Python objects.\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n DataArray.from_dict\n ","endLoc":2291,"header":"def to_dict(self, data: bool = True) -> dict","id":1819,"name":"to_dict","nodeType":"Function","startLoc":2268,"text":"def to_dict(self, data: bool = True) -> dict:\n \"\"\"\n Convert this xarray.DataArray into a dictionary following xarray\n naming conventions.\n\n Converts all variables and attributes to native Python objects.\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n DataArray.from_dict\n \"\"\"\n d = self.variable.to_dict(data=data)\n d.update({\"coords\": {}, \"name\": self.name})\n for k in self.coords:\n d[\"coords\"][k] = self.coords[k].variable.to_dict(data=data)\n return d"},{"col":4,"comment":"null","endLoc":574,"header":"def __setitem__(self, key, value)","id":1820,"name":"__setitem__","nodeType":"Function","startLoc":567,"text":"def __setitem__(self, key, value):\n if isinstance(key, VectorizedIndexer):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )\n full_key = self._updated_key(key)\n self.array[full_key] = value"},{"col":4,"comment":"null","endLoc":376,"header":"def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self)","id":1821,"name":"time_load_dataset_netcdf4_with_time_chunks_multiprocessing","nodeType":"Function","startLoc":372,"text":"def time_load_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n ).load()"},{"col":4,"comment":"\n Convert a dictionary into an xarray.DataArray\n\n Input dict can take several forms::\n\n d = {'dims': ('t'), 'data': x}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data': x,\n 'name': 'a'}\n\n where 't' is the name of the dimesion, 'a' is the name of the array,\n and x and t are lists, numpy.arrays, or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'dims': [..], 'data': [..]}\n\n Returns\n -------\n obj : xarray.DataArray\n\n See also\n --------\n DataArray.to_dict\n Dataset.from_dict\n ","endLoc":2343,"header":"@classmethod\n def from_dict(cls, d: dict) -> \"DataArray\"","id":1822,"name":"from_dict","nodeType":"Function","startLoc":2293,"text":"@classmethod\n def from_dict(cls, d: dict) -> \"DataArray\":\n \"\"\"\n Convert a dictionary into an xarray.DataArray\n\n Input dict can take several forms::\n\n d = {'dims': ('t'), 'data': x}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data': x,\n 'name': 'a'}\n\n where 't' is the name of the dimesion, 'a' is the name of the array,\n and x and t are lists, numpy.arrays, or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'dims': [..], 'data': [..]}\n\n Returns\n -------\n obj : xarray.DataArray\n\n See also\n --------\n DataArray.to_dict\n Dataset.from_dict\n \"\"\"\n coords = None\n if \"coords\" in d:\n try:\n coords = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\"))\n for k, v in d[\"coords\"].items()\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict when coords are missing the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n try:\n data = d[\"data\"]\n except KeyError:\n raise ValueError(\"cannot convert dict without the key 'data''\")\n else:\n obj = cls(data, coords, d.get(\"dims\"), d.get(\"name\"), d.get(\"attrs\"))\n return obj"},{"col":4,"comment":"null","endLoc":577,"header":"def __repr__(self)","id":1823,"name":"__repr__","nodeType":"Function","startLoc":576,"text":"def __repr__(self):\n return \"%s(array=%r, key=%r)\" % (type(self).__name__, self.array, self.key)"},{"col":4,"comment":"null","endLoc":20,"header":"def time_1d_fine_all_found(self)","id":1824,"name":"time_1d_fine_all_found","nodeType":"Function","startLoc":19,"text":"def time_1d_fine_all_found(self):\n self.ds.reindex(time=np.arange(0, 1000, 0.5), method=\"nearest\").load()"},{"attributeType":"null","col":4,"comment":"null","endLoc":507,"id":1825,"name":"__slots__","nodeType":"Attribute","startLoc":507,"text":"__slots__"},{"attributeType":"ExplicitlyIndexed | NumpyIndexingAdapter | PandasIndexAdapter | DaskIndexingAdapter | NdArrayLikeIndexingAdapter","col":8,"comment":"null","endLoc":527,"id":1826,"name":"array","nodeType":"Attribute","startLoc":527,"text":"self.array"},{"col":4,"comment":"null","endLoc":381,"header":"def time_open_dataset_netcdf4_with_block_chunks(self)","id":1827,"name":"time_open_dataset_netcdf4_with_block_chunks","nodeType":"Function","startLoc":378,"text":"def time_open_dataset_netcdf4_with_block_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n )"},{"col":4,"comment":"null","endLoc":387,"header":"def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self)","id":1828,"name":"time_open_dataset_netcdf4_with_block_chunks_multiprocessing","nodeType":"Function","startLoc":383,"text":"def time_open_dataset_netcdf4_with_block_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.block_chunks\n )"},{"col":4,"comment":"null","endLoc":392,"header":"def time_open_dataset_netcdf4_with_time_chunks(self)","id":1829,"name":"time_open_dataset_netcdf4_with_time_chunks","nodeType":"Function","startLoc":389,"text":"def time_open_dataset_netcdf4_with_time_chunks(self):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n )"},{"col":0,"comment":"Return a new object of zeros with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of zeros with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x)\n \n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x, dtype=np.float)\n \n array([[0., 0., 0.],\n [0., 0., 0.]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n ones_like\n full_like\n\n ","endLoc":1373,"header":"def zeros_like(other, dtype: DTypeLike = None)","id":1830,"name":"zeros_like","nodeType":"Function","startLoc":1318,"text":"def zeros_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of zeros with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of zeros with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x)\n \n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x, dtype=np.float)\n \n array([[0., 0., 0.],\n [0., 0., 0.]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n ones_like\n full_like\n\n \"\"\"\n return full_like(other, 0, dtype)"},{"attributeType":"BasicIndexer","col":8,"comment":"null","endLoc":528,"id":1831,"name":"key","nodeType":"Attribute","startLoc":528,"text":"self.key"},{"col":4,"comment":"null","endLoc":398,"header":"def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self)","id":1832,"name":"time_open_dataset_netcdf4_with_time_chunks_multiprocessing","nodeType":"Function","startLoc":394,"text":"def time_open_dataset_netcdf4_with_time_chunks_multiprocessing(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"netcdf4\", chunks=self.time_chunks\n )"},{"col":0,"comment":"Return a new object of ones with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of ones with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> >>> xr.ones_like(x)\n \n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n full_like\n\n ","endLoc":1423,"header":"def ones_like(other, dtype: DTypeLike = None)","id":1833,"name":"ones_like","nodeType":"Function","startLoc":1376,"text":"def ones_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of ones with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of ones with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> >>> xr.ones_like(x)\n \n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n full_like\n\n \"\"\"\n return full_like(other, 1, dtype)"},{"attributeType":"null","col":8,"comment":"null","endLoc":353,"id":1834,"name":"format","nodeType":"Attribute","startLoc":353,"text":"self.format"},{"className":"IOReadMultipleNetCDF3Dask","col":0,"comment":"null","endLoc":432,"id":1835,"nodeType":"Class","startLoc":401,"text":"class IOReadMultipleNetCDF3Dask(IOReadMultipleNetCDF4Dask):\n def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)\n\n def time_load_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.block_chunks\n ).load()\n\n def time_load_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.time_chunks\n ).load()\n\n def time_open_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.block_chunks\n )\n\n def time_open_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.time_chunks\n )"},{"col":0,"comment":"\n Explicitly combine an N-dimensional grid of datasets into one by using a\n succession of concat and merge operations along each dimension of the grid.\n\n Does not sort the supplied datasets under any circumstances, so the\n datasets must be passed in the order you wish them to be concatenated. It\n does align coordinates, but different variables on datasets can cause it to\n fail under some scenarios. In complex cases, you may need to clean up your\n data and use concat/merge explicitly.\n\n To concatenate along multiple dimensions the datasets must be passed as a\n nested list-of-lists, with a depth equal to the length of ``concat_dims``.\n ``manual_combine`` will concatenate along the top-level list first.\n\n Useful for combining datasets from a set of nested directories, or for\n collecting the output of a simulation parallelized along multiple\n dimensions.\n\n Parameters\n ----------\n datasets : list or nested list of xarray.Dataset objects.\n Dataset objects to combine.\n If concatenation or merging along more than one dimension is desired,\n then datasets must be supplied in a nested list-of-lists.\n concat_dim : str, or list of str, DataArray, Index or None\n Dimensions along which to concatenate variables, as used by\n :py:func:`xarray.concat`.\n Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation\n and merge instead along a particular dimension.\n The position of ``None`` in the list specifies the dimension of the\n nested-list input along which to merge.\n Must be the same length as the depth of the list passed to\n ``datasets``.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential merge conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n coords : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n Examples\n --------\n\n A common task is collecting data from a parallelized simulation in which\n each process wrote out to a separate file. A domain which was decomposed\n into 4 parts, 2 each along both the x and y axes, requires organising the\n datasets into a doubly-nested list, e.g:\n\n >>> x1y1\n \n Dimensions: (x: 2, y: 2)\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y) float64 11.04 23.57 20.77 ...\n precipitation (x, y) float64 5.904 2.453 3.404 ...\n\n >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]\n >>> combined = xr.combine_nested(ds_grid, concat_dim=['x', 'y'])\n \n Dimensions: (x: 4, y: 4)\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y) float64 11.04 23.57 20.77 ...\n precipitation (x, y) float64 5.904 2.453 3.404 ...\n\n ``manual_combine`` can also be used to explicitly merge datasets with\n different variables. For example if we have 4 datasets, which are divided\n along two times, and contain two different variables, we can pass ``None``\n to ``concat_dim`` to specify the dimension of the nested list over which\n we wish to use ``merge`` instead of ``concat``:\n\n >>> t1temp\n \n Dimensions: (t: 5)\n Dimensions without coordinates: t\n Data variables:\n temperature (t) float64 11.04 23.57 20.77 ...\n\n >>> t1precip\n \n Dimensions: (t: 5)\n Dimensions without coordinates: t\n Data variables:\n precipitation (t) float64 5.904 2.453 3.404 ...\n\n >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]\n >>> combined = xr.combine_nested(ds_grid, concat_dim=['t', None])\n \n Dimensions: (t: 10)\n Dimensions without coordinates: t\n Data variables:\n temperature (t) float64 11.04 23.57 20.77 ...\n precipitation (t) float64 5.904 2.453 3.404 ...\n\n See also\n --------\n concat\n merge\n auto_combine\n ","endLoc":465,"header":"def combine_nested(\n datasets,\n concat_dim,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n)","id":1836,"name":"combine_nested","nodeType":"Function","startLoc":312,"text":"def combine_nested(\n datasets,\n concat_dim,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Explicitly combine an N-dimensional grid of datasets into one by using a\n succession of concat and merge operations along each dimension of the grid.\n\n Does not sort the supplied datasets under any circumstances, so the\n datasets must be passed in the order you wish them to be concatenated. It\n does align coordinates, but different variables on datasets can cause it to\n fail under some scenarios. In complex cases, you may need to clean up your\n data and use concat/merge explicitly.\n\n To concatenate along multiple dimensions the datasets must be passed as a\n nested list-of-lists, with a depth equal to the length of ``concat_dims``.\n ``manual_combine`` will concatenate along the top-level list first.\n\n Useful for combining datasets from a set of nested directories, or for\n collecting the output of a simulation parallelized along multiple\n dimensions.\n\n Parameters\n ----------\n datasets : list or nested list of xarray.Dataset objects.\n Dataset objects to combine.\n If concatenation or merging along more than one dimension is desired,\n then datasets must be supplied in a nested list-of-lists.\n concat_dim : str, or list of str, DataArray, Index or None\n Dimensions along which to concatenate variables, as used by\n :py:func:`xarray.concat`.\n Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation\n and merge instead along a particular dimension.\n The position of ``None`` in the list specifies the dimension of the\n nested-list input along which to merge.\n Must be the same length as the depth of the list passed to\n ``datasets``.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential merge conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n coords : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n Examples\n --------\n\n A common task is collecting data from a parallelized simulation in which\n each process wrote out to a separate file. A domain which was decomposed\n into 4 parts, 2 each along both the x and y axes, requires organising the\n datasets into a doubly-nested list, e.g:\n\n >>> x1y1\n \n Dimensions: (x: 2, y: 2)\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y) float64 11.04 23.57 20.77 ...\n precipitation (x, y) float64 5.904 2.453 3.404 ...\n\n >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]\n >>> combined = xr.combine_nested(ds_grid, concat_dim=['x', 'y'])\n \n Dimensions: (x: 4, y: 4)\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y) float64 11.04 23.57 20.77 ...\n precipitation (x, y) float64 5.904 2.453 3.404 ...\n\n ``manual_combine`` can also be used to explicitly merge datasets with\n different variables. For example if we have 4 datasets, which are divided\n along two times, and contain two different variables, we can pass ``None``\n to ``concat_dim`` to specify the dimension of the nested list over which\n we wish to use ``merge`` instead of ``concat``:\n\n >>> t1temp\n \n Dimensions: (t: 5)\n Dimensions without coordinates: t\n Data variables:\n temperature (t) float64 11.04 23.57 20.77 ...\n\n >>> t1precip\n \n Dimensions: (t: 5)\n Dimensions without coordinates: t\n Data variables:\n precipitation (t) float64 5.904 2.453 3.404 ...\n\n >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]\n >>> combined = xr.combine_nested(ds_grid, concat_dim=['t', None])\n \n Dimensions: (t: 10)\n Dimensions without coordinates: t\n Data variables:\n temperature (t) float64 11.04 23.57 20.77 ...\n precipitation (t) float64 5.904 2.453 3.404 ...\n\n See also\n --------\n concat\n merge\n auto_combine\n \"\"\"\n if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:\n concat_dim = [concat_dim]\n\n # The IDs argument tells _manual_combine that datasets aren't yet sorted\n return _nested_combine(\n datasets,\n concat_dims=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n ids=False,\n fill_value=fill_value,\n join=join,\n )"},{"col":4,"comment":"\n Convert this dataset into a dask.dataframe.DataFrame.\n\n The dimensions, coordinates and data variables in this dataset form\n the columns of the DataFrame.\n\n Parameters\n ----------\n dim_order : list, optional\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting dask\n dataframe.\n\n If provided, must include all dimensions on this dataset. By\n default, dimensions are sorted alphabetically.\n set_index : bool, optional\n If set_index=True, the dask DataFrame is indexed by this dataset's\n coordinate. Since dask DataFrames to not support multi-indexes,\n set_index only works if the dataset only contains one dimension.\n\n Returns\n -------\n dask.dataframe.DataFrame\n ","endLoc":4474,"header":"def to_dask_dataframe(self, dim_order=None, set_index=False)","id":1837,"name":"to_dask_dataframe","nodeType":"Function","startLoc":4399,"text":"def to_dask_dataframe(self, dim_order=None, set_index=False):\n \"\"\"\n Convert this dataset into a dask.dataframe.DataFrame.\n\n The dimensions, coordinates and data variables in this dataset form\n the columns of the DataFrame.\n\n Parameters\n ----------\n dim_order : list, optional\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting dask\n dataframe.\n\n If provided, must include all dimensions on this dataset. By\n default, dimensions are sorted alphabetically.\n set_index : bool, optional\n If set_index=True, the dask DataFrame is indexed by this dataset's\n coordinate. Since dask DataFrames to not support multi-indexes,\n set_index only works if the dataset only contains one dimension.\n\n Returns\n -------\n dask.dataframe.DataFrame\n \"\"\"\n\n import dask.array as da\n import dask.dataframe as dd\n\n if dim_order is None:\n dim_order = list(self.dims)\n elif set(dim_order) != set(self.dims):\n raise ValueError(\n \"dim_order {} does not match the set of dimensions on this \"\n \"Dataset: {}\".format(dim_order, list(self.dims))\n )\n\n ordered_dims = {k: self.dims[k] for k in dim_order}\n\n columns = list(ordered_dims)\n columns.extend(k for k in self.coords if k not in self.dims)\n columns.extend(self.data_vars)\n\n series_list = []\n for name in columns:\n try:\n var = self.variables[name]\n except KeyError:\n # dimension without a matching coordinate\n size = self.dims[name]\n data = da.arange(size, chunks=size, dtype=np.int64)\n var = Variable((name,), data)\n\n # IndexVariable objects have a dummy .chunk() method\n if isinstance(var, IndexVariable):\n var = var.to_base_variable()\n\n dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data\n series = dd.from_array(dask_array.reshape(-1), columns=[name])\n series_list.append(series)\n\n df = dd.concat(series_list, axis=1)\n\n if set_index:\n if len(dim_order) == 1:\n (dim,) = dim_order\n df = df.set_index(dim)\n else:\n # triggers an error about multi-indexes, even if only one\n # dimension is passed\n df = df.set_index(dim_order)\n\n return df"},{"col":4,"comment":"null","endLoc":408,"header":"def setup(self)","id":1838,"name":"setup","nodeType":"Function","startLoc":402,"text":"def setup(self):\n\n requires_dask()\n\n self.make_ds()\n self.format = \"NETCDF3_64BIT\"\n xr.save_mfdataset(self.ds_list, self.filenames_list, format=self.format)"},{"col":4,"comment":"null","endLoc":25,"header":"def time_1d_fine_some_missing(self)","id":1839,"name":"time_1d_fine_some_missing","nodeType":"Function","startLoc":22,"text":"def time_1d_fine_some_missing(self):\n self.ds.reindex(\n time=np.arange(0, 1000, 0.5), method=\"nearest\", tolerance=0.1\n ).load()"},{"col":4,"comment":"Convert a pandas.Series into an xarray.DataArray.\n\n If the series's index is a MultiIndex, it will be expanded into a\n tensor product of one-dimensional coordinates (filling in missing\n values with NaN). Thus this operation should be the inverse of the\n `to_series` method.\n\n If sparse=True, creates a sparse array instead of a dense NumPy array.\n Requires the pydata/sparse package.\n\n See also\n --------\n xarray.Dataset.from_dataframe\n ","endLoc":2366,"header":"@classmethod\n def from_series(cls, series: pd.Series, sparse: bool = False) -> \"DataArray\"","id":1840,"name":"from_series","nodeType":"Function","startLoc":2345,"text":"@classmethod\n def from_series(cls, series: pd.Series, sparse: bool = False) -> \"DataArray\":\n \"\"\"Convert a pandas.Series into an xarray.DataArray.\n\n If the series's index is a MultiIndex, it will be expanded into a\n tensor product of one-dimensional coordinates (filling in missing\n values with NaN). Thus this operation should be the inverse of the\n `to_series` method.\n\n If sparse=True, creates a sparse array instead of a dense NumPy array.\n Requires the pydata/sparse package.\n\n See also\n --------\n xarray.Dataset.from_dataframe\n \"\"\"\n temp_name = \"__temporary_name\"\n df = pd.DataFrame({temp_name: series})\n ds = Dataset.from_dataframe(df, sparse=sparse)\n result = cast(DataArray, ds[temp_name])\n result.name = series.name\n return result"},{"col":0,"comment":"Generalized dot product for xarray objects. Like np.einsum, but\n provides a simpler interface based on array dimensions.\n\n Parameters\n ----------\n arrays: DataArray (or Variable) objects\n Arrays to compute.\n dims: str or tuple of strings, optional\n Which dimensions to sum over.\n If not speciified, then all the common dimensions are summed over.\n **kwargs: dict\n Additional keyword arguments passed to numpy.einsum or\n dask.array.einsum\n\n Returns\n -------\n dot: DataArray\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xp\n >>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=['a', 'b'])\n >>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2),\n ... dims=['a', 'b', 'c'])\n >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=['c', 'd'])\n\n >>> da_a\n \n array([[0, 1],\n [2, 3],\n [4, 5]])\n Dimensions without coordinates: a, b\n\n >>> da_b\n \n array([[[ 0, 1],\n [ 2, 3]],\n [[ 4, 5],\n [ 6, 7]],\n [[ 8, 9],\n [10, 11]]])\n Dimensions without coordinates: a, b, c\n\n >>> da_c\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Dimensions without coordinates: c, d\n\n >>> xr.dot(da_a, da_b, dims=['a', 'b'])\n \n array([110, 125])\n Dimensions without coordinates: c\n\n >>> xr.dot(da_a, da_b, dims=['a'])\n \n array([[40, 46],\n [70, 79]])\n Dimensions without coordinates: b, c\n\n >>> xr.dot(da_a, da_b, da_c, dims=['b', 'c'])\n \n array([[ 9, 14, 19],\n [ 93, 150, 207],\n [273, 446, 619]])\n Dimensions without coordinates: a, d\n\n ","endLoc":1180,"header":"def dot(*arrays, dims=None, **kwargs)","id":1841,"name":"dot","nodeType":"Function","startLoc":1052,"text":"def dot(*arrays, dims=None, **kwargs):\n \"\"\"Generalized dot product for xarray objects. Like np.einsum, but\n provides a simpler interface based on array dimensions.\n\n Parameters\n ----------\n arrays: DataArray (or Variable) objects\n Arrays to compute.\n dims: str or tuple of strings, optional\n Which dimensions to sum over.\n If not speciified, then all the common dimensions are summed over.\n **kwargs: dict\n Additional keyword arguments passed to numpy.einsum or\n dask.array.einsum\n\n Returns\n -------\n dot: DataArray\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xp\n >>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=['a', 'b'])\n >>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2),\n ... dims=['a', 'b', 'c'])\n >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=['c', 'd'])\n\n >>> da_a\n \n array([[0, 1],\n [2, 3],\n [4, 5]])\n Dimensions without coordinates: a, b\n\n >>> da_b\n \n array([[[ 0, 1],\n [ 2, 3]],\n [[ 4, 5],\n [ 6, 7]],\n [[ 8, 9],\n [10, 11]]])\n Dimensions without coordinates: a, b, c\n\n >>> da_c\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Dimensions without coordinates: c, d\n\n >>> xr.dot(da_a, da_b, dims=['a', 'b'])\n \n array([110, 125])\n Dimensions without coordinates: c\n\n >>> xr.dot(da_a, da_b, dims=['a'])\n \n array([[40, 46],\n [70, 79]])\n Dimensions without coordinates: b, c\n\n >>> xr.dot(da_a, da_b, da_c, dims=['b', 'c'])\n \n array([[ 9, 14, 19],\n [ 93, 150, 207],\n [273, 446, 619]])\n Dimensions without coordinates: a, d\n\n \"\"\"\n from .dataarray import DataArray\n from .variable import Variable\n\n if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):\n raise TypeError(\n \"Only xr.DataArray and xr.Variable are supported.\"\n \"Given {}.\".format([type(arr) for arr in arrays])\n )\n\n if len(arrays) == 0:\n raise TypeError(\"At least one array should be given.\")\n\n if isinstance(dims, str):\n dims = (dims,)\n\n common_dims = set.intersection(*[set(arr.dims) for arr in arrays])\n all_dims = []\n for arr in arrays:\n all_dims += [d for d in arr.dims if d not in all_dims]\n\n einsum_axes = \"abcdefghijklmnopqrstuvwxyz\"\n dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}\n\n if dims is None:\n # find dimensions that occur more than one times\n dim_counts = Counter()\n for arr in arrays:\n dim_counts.update(arr.dims)\n dims = tuple(d for d, c in dim_counts.items() if c > 1)\n\n dims = tuple(dims) # make dims a tuple\n\n # dimensions to be parallelized\n broadcast_dims = tuple(d for d in all_dims if d in common_dims and d not in dims)\n input_core_dims = [\n [d for d in arr.dims if d not in broadcast_dims] for arr in arrays\n ]\n output_core_dims = [tuple(d for d in all_dims if d not in dims + broadcast_dims)]\n\n # construct einsum subscripts, such as '...abc,...ab->...c'\n # Note: input_core_dims are always moved to the last position\n subscripts_list = [\n \"...\" + \"\".join([dim_map[d] for d in ds]) for ds in input_core_dims\n ]\n subscripts = \",\".join(subscripts_list)\n subscripts += \"->...\" + \"\".join([dim_map[d] for d in output_core_dims[0]])\n\n # subscripts should be passed to np.einsum as arg, not as kwargs. We need\n # to construct a partial function for apply_ufunc to work.\n func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)\n result = apply_ufunc(\n func,\n *arrays,\n input_core_dims=input_core_dims,\n output_core_dims=output_core_dims,\n dask=\"allowed\"\n )\n return result.transpose(*[d for d in all_dims if d in result.dims])"},{"col":4,"comment":"null","endLoc":414,"header":"def time_load_dataset_scipy_with_block_chunks(self)","id":1842,"name":"time_load_dataset_scipy_with_block_chunks","nodeType":"Function","startLoc":410,"text":"def time_load_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.block_chunks\n ).load()"},{"col":4,"comment":"Convert this array into a cdms2.Variable\n ","endLoc":2373,"header":"def to_cdms2(self) -> \"cdms2_Variable\"","id":1843,"name":"to_cdms2","nodeType":"Function","startLoc":2368,"text":"def to_cdms2(self) -> \"cdms2_Variable\":\n \"\"\"Convert this array into a cdms2.Variable\n \"\"\"\n from ..convert import to_cdms2\n\n return to_cdms2(self)"},{"className":"LazilyVectorizedIndexedArray","col":0,"comment":"Wrap an array to make vectorized indexing lazy.\n ","endLoc":628,"id":1844,"nodeType":"Class","startLoc":580,"text":"class LazilyVectorizedIndexedArray(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap an array to make vectorized indexing lazy.\n \"\"\"\n\n __slots__ = (\"array\", \"key\")\n\n def __init__(self, array, key):\n \"\"\"\n Parameters\n ----------\n array : array_like\n Array like object to index.\n key : VectorizedIndexer\n \"\"\"\n if isinstance(key, (BasicIndexer, OuterIndexer)):\n self.key = _outer_to_vectorized_indexer(key, array.shape)\n else:\n self.key = _arrayize_vectorized_indexer(key, array.shape)\n self.array = as_indexable(array)\n\n @property\n def shape(self):\n return np.broadcast(*self.key.tuple).shape\n\n def __array__(self, dtype=None):\n return np.asarray(self.array[self.key], dtype=None)\n\n def _updated_key(self, new_key):\n return _combine_indexers(self.key, self.shape, new_key)\n\n def __getitem__(self, indexer):\n # If the indexed array becomes a scalar, return LazilyOuterIndexedArray\n if all(isinstance(ind, integer_types) for ind in indexer.tuple):\n key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))\n return LazilyOuterIndexedArray(self.array, key)\n return type(self)(self.array, self._updated_key(indexer))\n\n def transpose(self, order):\n key = VectorizedIndexer(tuple(k.transpose(order) for k in self.key.tuple))\n return type(self)(self.array, key)\n\n def __setitem__(self, key, value):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )\n\n def __repr__(self):\n return \"%s(array=%r, key=%r)\" % (type(self).__name__, self.array, self.key)"},{"col":4,"comment":"null","endLoc":28,"header":"def time_2d_coarse(self)","id":1845,"name":"time_2d_coarse","nodeType":"Function","startLoc":27,"text":"def time_2d_coarse(self):\n self.ds.reindex(x=np.arange(0, 100, 2), y=np.arange(0, 100, 2)).load()"},{"col":0,"comment":"Convert a DataArray into a cdms2 variable\n ","endLoc":150,"header":"def to_cdms2(dataarray, copy=True)","id":1846,"name":"to_cdms2","nodeType":"Function","startLoc":91,"text":"def to_cdms2(dataarray, copy=True):\n \"\"\"Convert a DataArray into a cdms2 variable\n \"\"\"\n # we don't want cdms2 to be a hard dependency\n import cdms2\n\n def set_cdms2_attrs(var, attrs):\n for k, v in attrs.items():\n setattr(var, k, v)\n\n # 1D axes\n axes = []\n for dim in dataarray.dims:\n coord = encode(dataarray.coords[dim])\n axis = cdms2.createAxis(coord.values, id=dim)\n set_cdms2_attrs(axis, coord.attrs)\n axes.append(axis)\n\n # Data\n var = encode(dataarray)\n cdms2_var = cdms2.createVariable(\n var.values, axes=axes, id=dataarray.name, mask=pd.isnull(var.values), copy=copy\n )\n\n # Attributes\n set_cdms2_attrs(cdms2_var, var.attrs)\n\n # Curvilinear and unstructured grids\n if dataarray.name not in dataarray.coords:\n\n cdms2_axes = {}\n for coord_name in set(dataarray.coords.keys()) - set(dataarray.dims):\n\n coord_array = dataarray.coords[coord_name].to_cdms2()\n\n cdms2_axis_cls = (\n cdms2.coord.TransientAxis2D\n if coord_array.ndim\n else cdms2.auxcoord.TransientAuxAxis1D\n )\n cdms2_axis = cdms2_axis_cls(coord_array)\n if cdms2_axis.isLongitude():\n cdms2_axes[\"lon\"] = cdms2_axis\n elif cdms2_axis.isLatitude():\n cdms2_axes[\"lat\"] = cdms2_axis\n\n if \"lon\" in cdms2_axes and \"lat\" in cdms2_axes:\n if len(cdms2_axes[\"lon\"].shape) == 2:\n cdms2_grid = cdms2.hgrid.TransientCurveGrid(\n cdms2_axes[\"lat\"], cdms2_axes[\"lon\"]\n )\n else:\n cdms2_grid = cdms2.gengrid.AbstractGenericGrid(\n cdms2_axes[\"lat\"], cdms2_axes[\"lon\"]\n )\n for axis in cdms2_grid.getAxisList():\n cdms2_var.setAxis(cdms2_var.getAxisIds().index(axis.id), axis)\n cdms2_var.setGrid(cdms2_grid)\n\n return cdms2_var"},{"col":4,"comment":"null","endLoc":602,"header":"@property\n def shape(self)","id":1847,"name":"shape","nodeType":"Function","startLoc":600,"text":"@property\n def shape(self):\n return np.broadcast(*self.key.tuple).shape"},{"col":4,"comment":"null","endLoc":420,"header":"def time_load_dataset_scipy_with_time_chunks(self)","id":1848,"name":"time_load_dataset_scipy_with_time_chunks","nodeType":"Function","startLoc":416,"text":"def time_load_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.time_chunks\n ).load()"},{"col":4,"comment":"null","endLoc":605,"header":"def __array__(self, dtype=None)","id":1849,"name":"__array__","nodeType":"Function","startLoc":604,"text":"def __array__(self, dtype=None):\n return np.asarray(self.array[self.key], dtype=None)"},{"col":4,"comment":"null","endLoc":608,"header":"def _updated_key(self, new_key)","id":1850,"name":"_updated_key","nodeType":"Function","startLoc":607,"text":"def _updated_key(self, new_key):\n return _combine_indexers(self.key, self.shape, new_key)"},{"col":4,"comment":"null","endLoc":33,"header":"def time_2d_fine_all_found(self)","id":1851,"name":"time_2d_fine_all_found","nodeType":"Function","startLoc":30,"text":"def time_2d_fine_all_found(self):\n self.ds.reindex(\n x=np.arange(0, 100, 0.5), y=np.arange(0, 100, 0.5), method=\"nearest\"\n ).load()"},{"col":4,"comment":"null","endLoc":615,"header":"def __getitem__(self, indexer)","id":1852,"name":"__getitem__","nodeType":"Function","startLoc":610,"text":"def __getitem__(self, indexer):\n # If the indexed array becomes a scalar, return LazilyOuterIndexedArray\n if all(isinstance(ind, integer_types) for ind in indexer.tuple):\n key = BasicIndexer(tuple(k[indexer.tuple] for k in self.key.tuple))\n return LazilyOuterIndexedArray(self.array, key)\n return type(self)(self.array, self._updated_key(indexer))"},{"col":4,"comment":"null","endLoc":41,"header":"def time_2d_fine_some_missing(self)","id":1853,"name":"time_2d_fine_some_missing","nodeType":"Function","startLoc":35,"text":"def time_2d_fine_some_missing(self):\n self.ds.reindex(\n x=np.arange(0, 100, 0.5),\n y=np.arange(0, 100, 0.5),\n method=\"nearest\",\n tolerance=0.1,\n ).load()"},{"col":4,"comment":"null","endLoc":426,"header":"def time_open_dataset_scipy_with_block_chunks(self)","id":1854,"name":"time_open_dataset_scipy_with_block_chunks","nodeType":"Function","startLoc":422,"text":"def time_open_dataset_scipy_with_block_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.block_chunks\n )"},{"col":4,"comment":"null","endLoc":432,"header":"def time_open_dataset_scipy_with_time_chunks(self)","id":1855,"name":"time_open_dataset_scipy_with_time_chunks","nodeType":"Function","startLoc":428,"text":"def time_open_dataset_scipy_with_time_chunks(self):\n with dask.config.set(scheduler=\"multiprocessing\"):\n xr.open_mfdataset(\n self.filenames_list, engine=\"scipy\", chunks=self.time_chunks\n )"},{"col":0,"comment":"Return elements from `x` or `y` depending on `cond`.\n\n Performs xarray-like broadcasting across input arguments.\n\n Parameters\n ----------\n cond : scalar, array, Variable, DataArray or Dataset with boolean dtype\n When True, return values from `x`, otherwise returns values from `y`.\n x, y : scalar, array, Variable, DataArray or Dataset\n Values from which to choose. All dimension coordinates on these objects\n must be aligned with each other and with `cond`.\n\n Returns\n -------\n In priority order: Dataset, DataArray, Variable or array, whichever\n type appears as an input argument.\n\n Examples\n --------\n >>> import xarray as xr\n >>> import numpy as np\n >>> x = xr.DataArray(0.1 * np.arange(10), dims=['lat'],\n ... coords={'lat': np.arange(10)}, name='sst')\n >>> x\n \n array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n Coordinates:\n * lat (lat) int64 0 1 2 3 4 5 6 7 8 9\n\n >>> xr.where(x < 0.5, x, 100*x)\n \n array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])\n Coordinates:\n * lat (lat) int64 0 1 2 3 4 5 6 7 8 9\n\n >>> >>> y = xr.DataArray(\n ... 0.1 * np.arange(9).reshape(3, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": np.arange(3), \"lon\": 10 + np.arange(3)},\n ... name=\"sst\",\n ... )\n >>> y\n \n array([[0. , 0.1, 0.2],\n [0.3, 0.4, 0.5],\n [0.6, 0.7, 0.8]])\n Coordinates:\n * lat (lat) int64 0 1 2\n * lon (lon) int64 10 11 12\n\n >>> xr.where(y.lat < 1, y, -1)\n \n array([[ 0. , 0.1, 0.2],\n [-1. , -1. , -1. ],\n [-1. , -1. , -1. ]])\n Coordinates:\n * lat (lat) int64 0 1 2\n * lon (lon) int64 10 11 12\n\n >>> cond = xr.DataArray([True, False], dims=['x'])\n >>> x = xr.DataArray([1, 2], dims=['y'])\n >>> xr.where(cond, x, 0)\n \n array([[1, 2],\n [0, 0]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n Dataset.where, DataArray.where : equivalent methods\n ","endLoc":1265,"header":"def where(cond, x, y)","id":1856,"name":"where","nodeType":"Function","startLoc":1183,"text":"def where(cond, x, y):\n \"\"\"Return elements from `x` or `y` depending on `cond`.\n\n Performs xarray-like broadcasting across input arguments.\n\n Parameters\n ----------\n cond : scalar, array, Variable, DataArray or Dataset with boolean dtype\n When True, return values from `x`, otherwise returns values from `y`.\n x, y : scalar, array, Variable, DataArray or Dataset\n Values from which to choose. All dimension coordinates on these objects\n must be aligned with each other and with `cond`.\n\n Returns\n -------\n In priority order: Dataset, DataArray, Variable or array, whichever\n type appears as an input argument.\n\n Examples\n --------\n >>> import xarray as xr\n >>> import numpy as np\n >>> x = xr.DataArray(0.1 * np.arange(10), dims=['lat'],\n ... coords={'lat': np.arange(10)}, name='sst')\n >>> x\n \n array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n Coordinates:\n * lat (lat) int64 0 1 2 3 4 5 6 7 8 9\n\n >>> xr.where(x < 0.5, x, 100*x)\n \n array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])\n Coordinates:\n * lat (lat) int64 0 1 2 3 4 5 6 7 8 9\n\n >>> >>> y = xr.DataArray(\n ... 0.1 * np.arange(9).reshape(3, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": np.arange(3), \"lon\": 10 + np.arange(3)},\n ... name=\"sst\",\n ... )\n >>> y\n \n array([[0. , 0.1, 0.2],\n [0.3, 0.4, 0.5],\n [0.6, 0.7, 0.8]])\n Coordinates:\n * lat (lat) int64 0 1 2\n * lon (lon) int64 10 11 12\n\n >>> xr.where(y.lat < 1, y, -1)\n \n array([[ 0. , 0.1, 0.2],\n [-1. , -1. , -1. ],\n [-1. , -1. , -1. ]])\n Coordinates:\n * lat (lat) int64 0 1 2\n * lon (lon) int64 10 11 12\n\n >>> cond = xr.DataArray([True, False], dims=['x'])\n >>> x = xr.DataArray([1, 2], dims=['y'])\n >>> xr.where(cond, x, 0)\n \n array([[1, 2],\n [0, 0]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n Dataset.where, DataArray.where : equivalent methods\n \"\"\"\n # alignment for three arguments is complicated, so don't support it yet\n return apply_ufunc(\n duck_array_ops.where,\n cond,\n x,\n y,\n join=\"exact\",\n dataset_join=\"exact\",\n dask=\"allowed\",\n )"},{"col":0,"comment":"Register a custom accessor on xarray.DataArray objects.\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n See also\n --------\n register_dataset_accessor\n ","endLoc":71,"header":"def register_dataarray_accessor(name)","id":1857,"name":"register_dataarray_accessor","nodeType":"Function","startLoc":58,"text":"def register_dataarray_accessor(name):\n \"\"\"Register a custom accessor on xarray.DataArray objects.\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n See also\n --------\n register_dataset_accessor\n \"\"\"\n return _register_accessor(name, DataArray)"},{"col":0,"comment":"null","endLoc":54,"header":"def encode(var)","id":1858,"name":"encode","nodeType":"Function","startLoc":53,"text":"def encode(var):\n return CFTimedeltaCoder().encode(CFDatetimeCoder().encode(var.variable))"},{"col":0,"comment":"null","endLoc":55,"header":"def _register_accessor(name, cls)","id":1859,"name":"_register_accessor","nodeType":"Function","startLoc":42,"text":"def _register_accessor(name, cls):\n def decorator(accessor):\n if hasattr(cls, name):\n warnings.warn(\n \"registration of accessor %r under name %r for type %r is \"\n \"overriding a preexisting attribute with the same name.\"\n % (accessor, name, cls),\n AccessorRegistrationWarning,\n stacklevel=2,\n )\n setattr(cls, name, _CachedAccessor(name, accessor))\n return accessor\n\n return decorator"},{"attributeType":"null","col":8,"comment":"null","endLoc":407,"id":1860,"name":"format","nodeType":"Attribute","startLoc":407,"text":"self.format"},{"className":"IOWriteNetCDFDask","col":0,"comment":"null","endLoc":453,"id":1861,"nodeType":"Class","startLoc":443,"text":"class IOWriteNetCDFDask:\n timeout = 60\n repeat = 1\n number = 5\n\n def setup(self):\n requires_dask()\n self.write = create_delayed_write()\n\n def time_write(self):\n self.write.compute()"},{"col":4,"comment":"null","endLoc":450,"header":"def setup(self)","id":1862,"name":"setup","nodeType":"Function","startLoc":448,"text":"def setup(self):\n requires_dask()\n self.write = create_delayed_write()"},{"col":4,"comment":"\n Convert this dataset to a dictionary following xarray naming\n conventions.\n\n Converts all variables and attributes to native Python objects\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n Dataset.from_dict\n ","endLoc":4505,"header":"def to_dict(self, data=True)","id":1863,"name":"to_dict","nodeType":"Function","startLoc":4476,"text":"def to_dict(self, data=True):\n \"\"\"\n Convert this dataset to a dictionary following xarray naming\n conventions.\n\n Converts all variables and attributes to native Python objects\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n Dataset.from_dict\n \"\"\"\n d = {\n \"coords\": {},\n \"attrs\": decode_numpy_dict_values(self.attrs),\n \"dims\": dict(self.dims),\n \"data_vars\": {},\n }\n for k in self.coords:\n d[\"coords\"].update({k: self[k].variable.to_dict(data=data)})\n for k in self.data_vars:\n d[\"data_vars\"].update({k: self[k].variable.to_dict(data=data)})\n return d"},{"col":0,"comment":"null","endLoc":440,"header":"def create_delayed_write()","id":1864,"name":"create_delayed_write","nodeType":"Function","startLoc":435,"text":"def create_delayed_write():\n import dask.array as da\n\n vals = da.random.random(300, chunks=(1,))\n ds = xr.Dataset({\"vals\": ([\"a\"], vals)})\n return ds.to_netcdf(\"file.nc\", engine=\"netcdf4\", compute=False)"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":11,"id":1865,"name":"ds","nodeType":"Attribute","startLoc":11,"text":"self.ds"},{"className":"ReindexDask","col":0,"comment":"null","endLoc":48,"id":1866,"nodeType":"Class","startLoc":44,"text":"class ReindexDask(Reindex):\n def setup(self):\n requires_dask()\n super().setup()\n self.ds = self.ds.chunk({\"time\": 100})"},{"col":4,"comment":"null","endLoc":48,"header":"def setup(self)","id":1867,"name":"setup","nodeType":"Function","startLoc":45,"text":"def setup(self):\n requires_dask()\n super().setup()\n self.ds = self.ds.chunk({\"time\": 100})"},{"col":4,"comment":"null","endLoc":451,"header":"def encode(self, variable, name=None)","id":1868,"name":"encode","nodeType":"Function","startLoc":440,"text":"def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n if np.issubdtype(data.dtype, np.datetime64) or contains_cftime_datetimes(\n variable\n ):\n (data, units, calendar) = encode_cf_datetime(\n data, encoding.pop(\"units\", None), encoding.pop(\"calendar\", None)\n )\n safe_setitem(attrs, \"units\", units, name=name)\n safe_setitem(attrs, \"calendar\", calendar, name=name)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":16,"header":"def __init__(self, name, accessor)","id":1869,"name":"__init__","nodeType":"Function","startLoc":14,"text":"def __init__(self, name, accessor):\n self._name = name\n self._accessor = accessor"},{"col":0,"comment":"Register a custom property on xarray.Dataset objects.\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n Examples\n --------\n\n In your library code::\n\n import xarray as xr\n\n @xr.register_dataset_accessor('geo')\n class GeoAccessor:\n def __init__(self, xarray_obj):\n self._obj = xarray_obj\n\n @property\n def center(self):\n # return the geographic center point of this dataset\n lon = self._obj.latitude\n lat = self._obj.longitude\n return (float(lon.mean()), float(lat.mean()))\n\n def plot(self):\n # plot this array's data on a map, e.g., using Cartopy\n pass\n\n Back in an interactive IPython session:\n\n >>> ds = xarray.Dataset({'longitude': np.linspace(0, 10),\n ... 'latitude': np.linspace(0, 20)})\n >>> ds.geo.center\n (5.0, 10.0)\n >>> ds.geo.plot()\n # plots data on a map\n\n See also\n --------\n register_dataarray_accessor\n ","endLoc":119,"header":"def register_dataset_accessor(name)","id":1870,"name":"register_dataset_accessor","nodeType":"Function","startLoc":74,"text":"def register_dataset_accessor(name):\n \"\"\"Register a custom property on xarray.Dataset objects.\n\n Parameters\n ----------\n name : str\n Name under which the accessor should be registered. A warning is issued\n if this name conflicts with a preexisting attribute.\n\n Examples\n --------\n\n In your library code::\n\n import xarray as xr\n\n @xr.register_dataset_accessor('geo')\n class GeoAccessor:\n def __init__(self, xarray_obj):\n self._obj = xarray_obj\n\n @property\n def center(self):\n # return the geographic center point of this dataset\n lon = self._obj.latitude\n lat = self._obj.longitude\n return (float(lon.mean()), float(lat.mean()))\n\n def plot(self):\n # plot this array's data on a map, e.g., using Cartopy\n pass\n\n Back in an interactive IPython session:\n\n >>> ds = xarray.Dataset({'longitude': np.linspace(0, 10),\n ... 'latitude': np.linspace(0, 20)})\n >>> ds.geo.center\n (5.0, 10.0)\n >>> ds.geo.plot()\n # plots data on a map\n\n See also\n --------\n register_dataarray_accessor\n \"\"\"\n return _register_accessor(name, Dataset)"},{"col":0,"comment":"null","endLoc":107,"header":"def unpack_for_encoding(var)","id":1871,"name":"unpack_for_encoding","nodeType":"Function","startLoc":106,"text":"def unpack_for_encoding(var):\n return var.dims, var.data, var.attrs.copy(), var.encoding.copy()"},{"col":4,"comment":"null","endLoc":625,"header":"def __setitem__(self, key, value)","id":1872,"name":"__setitem__","nodeType":"Function","startLoc":621,"text":"def __setitem__(self, key, value):\n raise NotImplementedError(\n \"Lazy item assignment with the vectorized indexer is not yet \"\n \"implemented. Load your data first by .load() or compute().\"\n )"},{"col":4,"comment":"null","endLoc":628,"header":"def __repr__(self)","id":1873,"name":"__repr__","nodeType":"Function","startLoc":627,"text":"def __repr__(self):\n return \"%s(array=%r, key=%r)\" % (type(self).__name__, self.array, self.key)"},{"attributeType":"null","col":4,"comment":"null","endLoc":584,"id":1874,"name":"__slots__","nodeType":"Attribute","startLoc":584,"text":"__slots__"},{"attributeType":"ExplicitlyIndexed | NumpyIndexingAdapter | PandasIndexAdapter | DaskIndexingAdapter | NdArrayLikeIndexingAdapter","col":8,"comment":"null","endLoc":598,"id":1875,"name":"array","nodeType":"Attribute","startLoc":598,"text":"self.array"},{"attributeType":"null","col":0,"comment":"null","endLoc":2131,"id":1876,"name":"Coordinate","nodeType":"Attribute","startLoc":2131,"text":"Coordinate"},{"attributeType":"{tuple} | VectorizedIndexer","col":12,"comment":"null","endLoc":597,"id":1877,"name":"key","nodeType":"Attribute","startLoc":597,"text":"self.key"},{"className":"CopyOnWriteArray","col":0,"comment":"null","endLoc":662,"id":1878,"nodeType":"Class","startLoc":639,"text":"class CopyOnWriteArray(ExplicitlyIndexedNDArrayMixin):\n __slots__ = (\"array\", \"_copied\")\n\n def __init__(self, array):\n self.array = as_indexable(array)\n self._copied = False\n\n def _ensure_copied(self):\n if not self._copied:\n self.array = as_indexable(np.array(self.array))\n self._copied = True\n\n def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __setitem__(self, key, value):\n self._ensure_copied()\n self.array[key] = value"},{"col":4,"comment":"null","endLoc":649,"header":"def _ensure_copied(self)","id":1879,"name":"_ensure_copied","nodeType":"Function","startLoc":646,"text":"def _ensure_copied(self):\n if not self._copied:\n self.array = as_indexable(np.array(self.array))\n self._copied = True"},{"col":0,"comment":"Given an array of datetime objects, returns the tuple `(num, units,\n calendar)` suitable for a CF compliant time variable.\n\n Unlike `date2num`, this function can handle datetime64 arrays.\n\n See also\n --------\n cftime.date2num\n ","endLoc":422,"header":"def encode_cf_datetime(dates, units=None, calendar=None)","id":1880,"name":"encode_cf_datetime","nodeType":"Function","startLoc":376,"text":"def encode_cf_datetime(dates, units=None, calendar=None):\n \"\"\"Given an array of datetime objects, returns the tuple `(num, units,\n calendar)` suitable for a CF compliant time variable.\n\n Unlike `date2num`, this function can handle datetime64 arrays.\n\n See also\n --------\n cftime.date2num\n \"\"\"\n dates = np.asarray(dates)\n\n if units is None:\n units = infer_datetime_units(dates)\n else:\n units = _cleanup_netcdf_time_units(units)\n\n if calendar is None:\n calendar = infer_calendar_name(dates)\n\n delta, ref_date = _unpack_netcdf_time_units(units)\n try:\n if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == \"O\":\n # parse with cftime instead\n raise OutOfBoundsDatetime\n assert dates.dtype == \"datetime64[ns]\"\n\n delta_units = _netcdf_to_numpy_timeunit(delta)\n time_delta = np.timedelta64(1, delta_units).astype(\"timedelta64[ns]\")\n ref_date = pd.Timestamp(ref_date)\n\n # If the ref_date Timestamp is timezone-aware, convert to UTC and\n # make it timezone-naive (GH 2649).\n if ref_date.tz is not None:\n ref_date = ref_date.tz_convert(None)\n\n # Wrap the dates in a DatetimeIndex to do the subtraction to ensure\n # an OverflowError is raised if the ref_date is too far away from\n # dates to be encoded (GH 2272).\n num = (pd.DatetimeIndex(dates.ravel()) - ref_date) / time_delta\n num = num.values.reshape(dates.shape)\n\n except (OutOfBoundsDatetime, OverflowError):\n num = _encode_datetime_with_cftime(dates, units, calendar)\n\n num = cast_to_int_if_safe(num)\n return (num, units, calendar)"},{"col":0,"comment":"Given an array of datetimes, returns a CF compatible time-unit string of\n the form \"{time_unit} since {date[0]}\", where `time_unit` is 'days',\n 'hours', 'minutes' or 'seconds' (the first one that can evenly divide all\n unique time deltas in `dates`)\n ","endLoc":289,"header":"def infer_datetime_units(dates)","id":1881,"name":"infer_datetime_units","nodeType":"Function","startLoc":268,"text":"def infer_datetime_units(dates):\n \"\"\"Given an array of datetimes, returns a CF compatible time-unit string of\n the form \"{time_unit} since {date[0]}\", where `time_unit` is 'days',\n 'hours', 'minutes' or 'seconds' (the first one that can evenly divide all\n unique time deltas in `dates`)\n \"\"\"\n dates = np.asarray(dates).ravel()\n if np.asarray(dates).dtype == \"datetime64[ns]\":\n dates = to_datetime_unboxed(dates)\n dates = dates[pd.notnull(dates)]\n reference_date = dates[0] if len(dates) > 0 else \"1970-01-01\"\n reference_date = pd.Timestamp(reference_date)\n else:\n reference_date = dates[0] if len(dates) > 0 else \"1970-01-01\"\n reference_date = format_cftime_datetime(reference_date)\n unique_timedeltas = np.unique(np.diff(dates))\n if unique_timedeltas.dtype == np.dtype(\"O\"):\n # Convert to np.timedelta64 objects using pandas to work around a\n # NumPy casting bug: https://github.com/numpy/numpy/issues/11096\n unique_timedeltas = to_timedelta_unboxed(unique_timedeltas)\n units = _infer_time_units_from_diff(unique_timedeltas)\n return \"%s since %s\" % (units, reference_date)"},{"col":0,"comment":"null","endLoc":237,"header":"def to_datetime_unboxed(value, **kwargs)","id":1882,"name":"to_datetime_unboxed","nodeType":"Function","startLoc":231,"text":"def to_datetime_unboxed(value, **kwargs):\n if LooseVersion(pd.__version__) < \"0.25.0\":\n result = pd.to_datetime(value, **kwargs, box=False)\n else:\n result = pd.to_datetime(value, **kwargs).to_numpy()\n assert result.dtype == \"datetime64[ns]\"\n return result"},{"col":4,"comment":"null","endLoc":453,"header":"def time_write(self)","id":1883,"name":"time_write","nodeType":"Function","startLoc":452,"text":"def time_write(self):\n self.write.compute()"},{"col":4,"comment":"null","endLoc":652,"header":"def __array__(self, dtype=None)","id":1884,"name":"__array__","nodeType":"Function","startLoc":651,"text":"def __array__(self, dtype=None):\n return np.asarray(self.array, dtype=dtype)"},{"col":0,"comment":"Converts a cftime.datetime object to a string with the format:\n YYYY-MM-DD HH:MM:SS.UUUUUU\n ","endLoc":304,"header":"def format_cftime_datetime(date)","id":1885,"name":"format_cftime_datetime","nodeType":"Function","startLoc":292,"text":"def format_cftime_datetime(date):\n \"\"\"Converts a cftime.datetime object to a string with the format:\n YYYY-MM-DD HH:MM:SS.UUUUUU\n \"\"\"\n return \"{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}\".format(\n date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second,\n date.microsecond,\n )"},{"col":4,"comment":"null","endLoc":655,"header":"def __getitem__(self, key)","id":1886,"name":"__getitem__","nodeType":"Function","startLoc":654,"text":"def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))"},{"col":0,"comment":"Apply a function to each chunk of a DataArray or Dataset. This function is\n experimental and its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a DataArray or Dataset as its first\n parameter. The function will receive a subset of 'obj' (see below),\n corresponding to one chunk along each chunked dimension. ``func`` will be\n executed as ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like 'obj' but\n has sizes 0, to determine properties of the returned object such as dtype,\n variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n obj: DataArray, Dataset\n Passed to the function as its first argument, one dask chunk at a time.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced obj. xarray objects,\n if any, will not be split by chunks. Passing dask collections is not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of the\n function.\n\n Notes\n -----\n This function is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays, it is\n recommended to use apply_ufunc.\n\n If none of the variables in obj is backed by dask, calling this function is\n equivalent to calling ``func(obj, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks,\n xarray.DataArray.map_blocks\n ","endLoc":339,"header":"def map_blocks(\n func: Callable[..., T_DSorDA],\n obj: Union[DataArray, Dataset],\n args: Sequence[Any] = (),\n kwargs","id":1887,"name":"map_blocks","nodeType":"Function","startLoc":103,"text":"def map_blocks(\n func: Callable[..., T_DSorDA],\n obj: Union[DataArray, Dataset],\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n) -> T_DSorDA:\n \"\"\"Apply a function to each chunk of a DataArray or Dataset. This function is\n experimental and its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a DataArray or Dataset as its first\n parameter. The function will receive a subset of 'obj' (see below),\n corresponding to one chunk along each chunked dimension. ``func`` will be\n executed as ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like 'obj' but\n has sizes 0, to determine properties of the returned object such as dtype,\n variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n obj: DataArray, Dataset\n Passed to the function as its first argument, one dask chunk at a time.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced obj. xarray objects,\n if any, will not be split by chunks. Passing dask collections is not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of the\n function.\n\n Notes\n -----\n This function is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays, it is\n recommended to use apply_ufunc.\n\n If none of the variables in obj is backed by dask, calling this function is\n equivalent to calling ``func(obj, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks,\n xarray.DataArray.map_blocks\n \"\"\"\n\n def _wrapper(func, obj, to_array, args, kwargs):\n if to_array:\n obj = dataset_to_dataarray(obj)\n\n result = func(obj, *args, **kwargs)\n\n for name, index in result.indexes.items():\n if name in obj.indexes:\n if len(index) != len(obj.indexes[name]):\n raise ValueError(\n \"Length of the %r dimension has changed. This is not allowed.\"\n % name\n )\n\n return make_dict(result)\n\n if not isinstance(args, Sequence):\n raise TypeError(\"args must be a sequence (for example, a list or tuple).\")\n if kwargs is None:\n kwargs = {}\n elif not isinstance(kwargs, Mapping):\n raise TypeError(\"kwargs must be a mapping (for example, a dict)\")\n\n for value in list(args) + list(kwargs.values()):\n if dask.is_dask_collection(value):\n raise TypeError(\n \"Cannot pass dask collections in args or kwargs yet. Please compute or \"\n \"load values before passing to map_blocks.\"\n )\n\n if not dask.is_dask_collection(obj):\n return func(obj, *args, **kwargs)\n\n if isinstance(obj, DataArray):\n # only using _to_temp_dataset would break\n # func = lambda x: x.to_dataset()\n # since that relies on preserving name.\n if obj.name is None:\n dataset = obj._to_temp_dataset()\n else:\n dataset = obj.to_dataset()\n input_is_array = True\n else:\n dataset = obj\n input_is_array = False\n\n input_chunks = dataset.chunks\n\n template: Union[DataArray, Dataset] = infer_template(func, obj, *args, **kwargs)\n if isinstance(template, DataArray):\n result_is_array = True\n template_name = template.name\n template = template._to_temp_dataset()\n elif isinstance(template, Dataset):\n result_is_array = False\n else:\n raise TypeError(\n f\"func output must be DataArray or Dataset; got {type(template)}\"\n )\n\n template_indexes = set(template.indexes)\n dataset_indexes = set(dataset.indexes)\n preserved_indexes = template_indexes & dataset_indexes\n new_indexes = template_indexes - dataset_indexes\n indexes = {dim: dataset.indexes[dim] for dim in preserved_indexes}\n indexes.update({k: template.indexes[k] for k in new_indexes})\n\n graph: Dict[Any, Any] = {}\n gname = \"%s-%s\" % (\n dask.utils.funcname(func),\n dask.base.tokenize(dataset, args, kwargs),\n )\n\n # map dims to list of chunk indexes\n ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()}\n # mapping from chunk index to slice bounds\n chunk_index_bounds = {\n dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items()\n }\n\n # iterate over all possible chunk combinations\n for v in itertools.product(*ichunk.values()):\n chunk_index_dict = dict(zip(dataset.dims, v))\n\n # this will become [[name1, variable1],\n # [name2, variable2],\n # ...]\n # which is passed to dict and then to Dataset\n data_vars = []\n coords = []\n\n for name, variable in dataset.variables.items():\n # make a task that creates tuple of (dims, chunk)\n if dask.is_dask_collection(variable.data):\n # recursively index into dask_keys nested list to get chunk\n chunk = variable.__dask_keys__()\n for dim in variable.dims:\n chunk = chunk[chunk_index_dict[dim]]\n\n chunk_variable_task = (\"%s-%s\" % (gname, chunk[0]),) + v\n graph[chunk_variable_task] = (\n tuple,\n [variable.dims, chunk, variable.attrs],\n )\n else:\n # non-dask array with possibly chunked dimensions\n # index into variable appropriately\n subsetter = {}\n for dim in variable.dims:\n if dim in chunk_index_dict:\n which_chunk = chunk_index_dict[dim]\n subsetter[dim] = slice(\n chunk_index_bounds[dim][which_chunk],\n chunk_index_bounds[dim][which_chunk + 1],\n )\n\n subset = variable.isel(subsetter)\n chunk_variable_task = (\n \"%s-%s\" % (gname, dask.base.tokenize(subset)),\n ) + v\n graph[chunk_variable_task] = (\n tuple,\n [subset.dims, subset, subset.attrs],\n )\n\n # this task creates dict mapping variable name to above tuple\n if name in dataset._coord_names:\n coords.append([name, chunk_variable_task])\n else:\n data_vars.append([name, chunk_variable_task])\n\n from_wrapper = (gname,) + v\n graph[from_wrapper] = (\n _wrapper,\n func,\n (Dataset, (dict, data_vars), (dict, coords), dataset.attrs),\n input_is_array,\n args,\n kwargs,\n )\n\n # mapping from variable name to dask graph key\n var_key_map: Dict[Hashable, str] = {}\n for name, variable in template.variables.items():\n if name in indexes:\n continue\n gname_l = \"%s-%s\" % (gname, name)\n var_key_map[name] = gname_l\n\n key: Tuple[Any, ...] = (gname_l,)\n for dim in variable.dims:\n if dim in chunk_index_dict:\n key += (chunk_index_dict[dim],)\n else:\n # unchunked dimensions in the input have one chunk in the result\n key += (0,)\n\n graph[key] = (operator.getitem, from_wrapper, name)\n\n graph = HighLevelGraph.from_collections(gname, graph, dependencies=[dataset])\n\n result = Dataset(coords=indexes, attrs=template.attrs)\n for name, gname_l in var_key_map.items():\n dims = template[name].dims\n var_chunks = []\n for dim in dims:\n if dim in input_chunks:\n var_chunks.append(input_chunks[dim])\n elif dim in indexes:\n var_chunks.append((len(indexes[dim]),))\n\n data = dask.array.Array(\n graph, name=gname_l, chunks=var_chunks, dtype=template[name].dtype\n )\n result[name] = (dims, data, template[name].attrs)\n\n result = result.set_coords(template._coord_names)\n\n if result_is_array:\n da = dataset_to_dataarray(result)\n da.name = template_name\n return da # type: ignore\n return result # type: ignore"},{"col":0,"comment":"null","endLoc":228,"header":"def to_timedelta_unboxed(value, **kwargs)","id":1888,"name":"to_timedelta_unboxed","nodeType":"Function","startLoc":222,"text":"def to_timedelta_unboxed(value, **kwargs):\n if LooseVersion(pd.__version__) < \"0.25.0\":\n result = pd.to_timedelta(value, **kwargs, box=False)\n else:\n result = pd.to_timedelta(value, **kwargs).to_numpy()\n assert result.dtype == \"timedelta64[ns]\"\n return result"},{"col":4,"comment":"\n Convert a dictionary into an xarray.Dataset.\n\n Input dict can take several forms::\n\n d = {'t': {'dims': ('t'), 'data': t},\n 'a': {'dims': ('t'), 'data': x},\n 'b': {'dims': ('t'), 'data': y}}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data_vars': {'a': {'dims': 't', 'data': x, },\n 'b': {'dims': 't', 'data': y}}}\n\n where 't' is the name of the dimesion, 'a' and 'b' are names of data\n variables and t, x, and y are lists, numpy.arrays or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'var_0': {'dims': [..], \n 'data': [..]}, \n ...}\n\n Returns\n -------\n obj : xarray.Dataset\n\n See also\n --------\n Dataset.to_dict\n DataArray.from_dict\n ","endLoc":4569,"header":"@classmethod\n def from_dict(cls, d)","id":1889,"name":"from_dict","nodeType":"Function","startLoc":4507,"text":"@classmethod\n def from_dict(cls, d):\n \"\"\"\n Convert a dictionary into an xarray.Dataset.\n\n Input dict can take several forms::\n\n d = {'t': {'dims': ('t'), 'data': t},\n 'a': {'dims': ('t'), 'data': x},\n 'b': {'dims': ('t'), 'data': y}}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data_vars': {'a': {'dims': 't', 'data': x, },\n 'b': {'dims': 't', 'data': y}}}\n\n where 't' is the name of the dimesion, 'a' and 'b' are names of data\n variables and t, x, and y are lists, numpy.arrays or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'var_0': {'dims': [..], \\\n 'data': [..]}, \\\n ...}\n\n Returns\n -------\n obj : xarray.Dataset\n\n See also\n --------\n Dataset.to_dict\n DataArray.from_dict\n \"\"\"\n\n if not {\"coords\", \"data_vars\"}.issubset(set(d)):\n variables = d.items()\n else:\n import itertools\n\n variables = itertools.chain(\n d.get(\"coords\", {}).items(), d.get(\"data_vars\", {}).items()\n )\n try:\n variable_dict = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\")) for k, v in variables\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict without the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n obj = cls(variable_dict)\n\n # what if coords aren't dims?\n coords = set(d.get(\"coords\", {})) - set(d.get(\"dims\", {}))\n obj = obj.set_coords(coords)\n\n obj.attrs.update(d.get(\"attrs\", {}))\n\n return obj"},{"col":0,"comment":"null","endLoc":257,"header":"def _infer_time_units_from_diff(unique_timedeltas)","id":1890,"name":"_infer_time_units_from_diff","nodeType":"Function","startLoc":250,"text":"def _infer_time_units_from_diff(unique_timedeltas):\n for time_unit in [\"days\", \"hours\", \"minutes\", \"seconds\"]:\n delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]\n unit_delta = np.timedelta64(delta_ns, \"ns\")\n diffs = unique_timedeltas / unit_delta\n if np.all(diffs == diffs.astype(int)):\n return time_unit\n return \"seconds\""},{"col":4,"comment":"null","endLoc":658,"header":"def transpose(self, order)","id":1891,"name":"transpose","nodeType":"Function","startLoc":657,"text":"def transpose(self, order):\n return self.array.transpose(order)"},{"col":0,"comment":"null","endLoc":81,"header":"def _netcdf_to_numpy_timeunit(units)","id":1892,"name":"_netcdf_to_numpy_timeunit","nodeType":"Function","startLoc":70,"text":"def _netcdf_to_numpy_timeunit(units):\n units = units.lower()\n if not units.endswith(\"s\"):\n units = \"%ss\" % units\n return {\n \"microseconds\": \"us\",\n \"milliseconds\": \"ms\",\n \"seconds\": \"s\",\n \"minutes\": \"m\",\n \"hours\": \"h\",\n \"days\": \"D\",\n }[units]"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":48,"id":1893,"name":"ds","nodeType":"Attribute","startLoc":48,"text":"self.ds"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":1894,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"attributeType":"null","col":4,"comment":"null","endLoc":444,"id":1895,"name":"timeout","nodeType":"Attribute","startLoc":444,"text":"timeout"},{"attributeType":"null","col":17,"comment":"null","endLoc":3,"id":1896,"name":"xr","nodeType":"Attribute","startLoc":3,"text":"xr"},{"attributeType":"null","col":4,"comment":"null","endLoc":445,"id":1897,"name":"repeat","nodeType":"Attribute","startLoc":445,"text":"repeat"},{"attributeType":"null","col":4,"comment":"null","endLoc":446,"id":1898,"name":"number","nodeType":"Attribute","startLoc":446,"text":"number"},{"attributeType":"null","col":8,"comment":"null","endLoc":450,"id":1899,"name":"write","nodeType":"Attribute","startLoc":450,"text":"self.write"},{"className":"IOWriteNetCDFDaskDistributed","col":0,"comment":"null","endLoc":469,"id":1900,"nodeType":"Class","startLoc":456,"text":"class IOWriteNetCDFDaskDistributed:\n def setup(self):\n try:\n import distributed\n except ImportError:\n raise NotImplementedError()\n self.client = distributed.Client()\n self.write = create_delayed_write()\n\n def cleanup(self):\n self.client.shutdown()\n\n def time_write(self):\n self.write.compute()"},{"col":4,"comment":"null","endLoc":463,"header":"def setup(self)","id":1901,"name":"setup","nodeType":"Function","startLoc":457,"text":"def setup(self):\n try:\n import distributed\n except ImportError:\n raise NotImplementedError()\n self.client = distributed.Client()\n self.write = create_delayed_write()"},{"id":1902,"name":"doc/_static","nodeType":"Package"},{"id":1903,"name":".gitignore","nodeType":"TextFile","path":"doc/_static","text":"examples*.png\n*.log\n*.pdf\n*.fbd_latexmk\n*.aux\n"},{"id":1904,"name":"xarray/coding","nodeType":"Package"},{"fileName":"cftimeindex.py","filePath":"xarray/coding","id":1905,"nodeType":"File","text":"\"\"\"DatetimeIndex analog for cftime.datetime objects\"\"\"\n# The pandas.Index subclass defined here was copied and adapted for\n# use with cftime.datetime objects based on the source code defining\n# pandas.DatetimeIndex.\n\n# For reference, here is a copy of the pandas copyright notice:\n\n# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team\n# All rights reserved.\n\n# Copyright (c) 2008-2011 AQR Capital Management, LLC\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n\n# * Neither the name of the copyright holder nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport re\nimport warnings\nfrom datetime import timedelta\nfrom distutils.version import LooseVersion\n\nimport numpy as np\nimport pandas as pd\n\nfrom xarray.core.utils import is_scalar\n\nfrom .times import _STANDARD_CALENDARS, cftime_to_nptime, infer_calendar_name\n\n\ndef named(name, pattern):\n return \"(?P<\" + name + \">\" + pattern + \")\"\n\n\ndef optional(x):\n return \"(?:\" + x + \")?\"\n\n\ndef trailing_optional(xs):\n if not xs:\n return \"\"\n return xs[0] + optional(trailing_optional(xs[1:]))\n\n\ndef build_pattern(date_sep=r\"\\-\", datetime_sep=r\"T\", time_sep=r\"\\:\"):\n pieces = [\n (None, \"year\", r\"\\d{4}\"),\n (date_sep, \"month\", r\"\\d{2}\"),\n (date_sep, \"day\", r\"\\d{2}\"),\n (datetime_sep, \"hour\", r\"\\d{2}\"),\n (time_sep, \"minute\", r\"\\d{2}\"),\n (time_sep, \"second\", r\"\\d{2}\"),\n ]\n pattern_list = []\n for sep, name, sub_pattern in pieces:\n pattern_list.append((sep if sep else \"\") + named(name, sub_pattern))\n # TODO: allow timezone offsets?\n return \"^\" + trailing_optional(pattern_list) + \"$\"\n\n\n_BASIC_PATTERN = build_pattern(date_sep=\"\", time_sep=\"\")\n_EXTENDED_PATTERN = build_pattern()\n_PATTERNS = [_BASIC_PATTERN, _EXTENDED_PATTERN]\n\n\ndef parse_iso8601(datetime_string):\n for pattern in _PATTERNS:\n match = re.match(pattern, datetime_string)\n if match:\n return match.groupdict()\n raise ValueError(\"no ISO-8601 match for string: %s\" % datetime_string)\n\n\ndef _parse_iso8601_with_reso(date_type, timestr):\n default = date_type(1, 1, 1)\n result = parse_iso8601(timestr)\n replace = {}\n\n for attr in [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\"]:\n value = result.get(attr, None)\n if value is not None:\n # Note ISO8601 conventions allow for fractional seconds.\n # TODO: Consider adding support for sub-second resolution?\n replace[attr] = int(value)\n resolution = attr\n\n # dayofwk=-1 is required to update the dayofwk and dayofyr attributes of\n # the returned date object in versions of cftime between 1.0.2 and\n # 1.0.3.4. It can be removed for versions of cftime greater than\n # 1.0.3.4.\n replace[\"dayofwk\"] = -1\n return default.replace(**replace), resolution\n\n\ndef _parsed_string_to_bounds(date_type, resolution, parsed):\n \"\"\"Generalization of\n pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds\n for use with non-standard calendars and cftime.datetime\n objects.\n \"\"\"\n if resolution == \"year\":\n return (\n date_type(parsed.year, 1, 1),\n date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1),\n )\n elif resolution == \"month\":\n if parsed.month == 12:\n end = date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1)\n else:\n end = date_type(parsed.year, parsed.month + 1, 1) - timedelta(\n microseconds=1\n )\n return date_type(parsed.year, parsed.month, 1), end\n elif resolution == \"day\":\n start = date_type(parsed.year, parsed.month, parsed.day)\n return start, start + timedelta(days=1, microseconds=-1)\n elif resolution == \"hour\":\n start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour)\n return start, start + timedelta(hours=1, microseconds=-1)\n elif resolution == \"minute\":\n start = date_type(\n parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute\n )\n return start, start + timedelta(minutes=1, microseconds=-1)\n elif resolution == \"second\":\n start = date_type(\n parsed.year,\n parsed.month,\n parsed.day,\n parsed.hour,\n parsed.minute,\n parsed.second,\n )\n return start, start + timedelta(seconds=1, microseconds=-1)\n else:\n raise KeyError\n\n\ndef get_date_field(datetimes, field):\n \"\"\"Adapted from pandas.tslib.get_date_field\"\"\"\n return np.array([getattr(date, field) for date in datetimes])\n\n\ndef _field_accessor(name, docstring=None, min_cftime_version=\"0.0\"):\n \"\"\"Adapted from pandas.tseries.index._field_accessor\"\"\"\n\n def f(self, min_cftime_version=min_cftime_version):\n import cftime\n\n version = cftime.__version__\n\n if LooseVersion(version) >= LooseVersion(min_cftime_version):\n return get_date_field(self._data, name)\n else:\n raise ImportError(\n \"The {!r} accessor requires a minimum \"\n \"version of cftime of {}. Found an \"\n \"installed version of {}.\".format(name, min_cftime_version, version)\n )\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)\n\n\ndef get_date_type(self):\n if self._data.size:\n return type(self._data[0])\n else:\n return None\n\n\ndef assert_all_valid_date_type(data):\n import cftime\n\n if len(data) > 0:\n sample = data[0]\n date_type = type(sample)\n if not isinstance(sample, cftime.datetime):\n raise TypeError(\n \"CFTimeIndex requires cftime.datetime \"\n \"objects. Got object of {}.\".format(date_type)\n )\n if not all(isinstance(value, date_type) for value in data):\n raise TypeError(\n \"CFTimeIndex requires using datetime \"\n \"objects of all the same type. Got\\n{}.\".format(data)\n )\n\n\nclass CFTimeIndex(pd.Index):\n \"\"\"Custom Index for working with CF calendars and dates\n\n All elements of a CFTimeIndex must be cftime.datetime objects.\n\n Parameters\n ----------\n data : array or CFTimeIndex\n Sequence of cftime.datetime objects to use in index\n name : str, default None\n Name of the resulting index\n\n See Also\n --------\n cftime_range\n \"\"\"\n\n year = _field_accessor(\"year\", \"The year of the datetime\")\n month = _field_accessor(\"month\", \"The month of the datetime\")\n day = _field_accessor(\"day\", \"The days of the datetime\")\n hour = _field_accessor(\"hour\", \"The hours of the datetime\")\n minute = _field_accessor(\"minute\", \"The minutes of the datetime\")\n second = _field_accessor(\"second\", \"The seconds of the datetime\")\n microsecond = _field_accessor(\"microsecond\", \"The microseconds of the datetime\")\n dayofyear = _field_accessor(\n \"dayofyr\", \"The ordinal day of year of the datetime\", \"1.0.2.1\"\n )\n dayofweek = _field_accessor(\"dayofwk\", \"The day of week of the datetime\", \"1.0.2.1\")\n date_type = property(get_date_type)\n\n def __new__(cls, data, name=None):\n assert_all_valid_date_type(data)\n if name is None and hasattr(data, \"name\"):\n name = data.name\n\n result = object.__new__(cls)\n result._data = np.array(data, dtype=\"O\")\n result.name = name\n return result\n\n def _partial_date_slice(self, resolution, parsed):\n \"\"\"Adapted from\n pandas.tseries.index.DatetimeIndex._partial_date_slice\n\n Note that when using a CFTimeIndex, if a partial-date selection\n returns a single element, it will never be converted to a scalar\n coordinate; this is in slight contrast to the behavior when using\n a DatetimeIndex, which sometimes will return a DataArray with a scalar\n coordinate depending on the resolution of the datetimes used in\n defining the index. For example:\n\n >>> from cftime import DatetimeNoLeap\n >>> import pandas as pd\n >>> import xarray as xr\n >>> da = xr.DataArray([1, 2],\n coords=[[DatetimeNoLeap(2001, 1, 1),\n DatetimeNoLeap(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) object 2001-01-01 00:00:00\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array(1)\n Coordinates:\n time datetime64[ns] 2001-01-01\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) datetime64[ns] 2001-01-01T01:00:00\n \"\"\"\n start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)\n\n times = self._data\n\n if self.is_monotonic:\n if len(times) and (\n (start < times[0] and end < times[0])\n or (start > times[-1] and end > times[-1])\n ):\n # we are out of range\n raise KeyError\n\n # a monotonic (sorted) series can be sliced\n left = times.searchsorted(start, side=\"left\")\n right = times.searchsorted(end, side=\"right\")\n return slice(left, right)\n\n lhs_mask = times >= start\n rhs_mask = times <= end\n return np.flatnonzero(lhs_mask & rhs_mask)\n\n def _get_string_slice(self, key):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex._get_string_slice\"\"\"\n parsed, resolution = _parse_iso8601_with_reso(self.date_type, key)\n try:\n loc = self._partial_date_slice(resolution, parsed)\n except KeyError:\n raise KeyError(key)\n return loc\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex.get_loc\"\"\"\n if isinstance(key, str):\n return self._get_string_slice(key)\n else:\n return pd.Index.get_loc(self, key, method=method, tolerance=tolerance)\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n \"\"\"Adapted from\n pandas.tseries.index.DatetimeIndex._maybe_cast_slice_bound\"\"\"\n if isinstance(label, str):\n parsed, resolution = _parse_iso8601_with_reso(self.date_type, label)\n start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)\n if self.is_monotonic_decreasing and len(self) > 1:\n return end if side == \"left\" else start\n return start if side == \"left\" else end\n else:\n return label\n\n # TODO: Add ability to use integer range outside of iloc?\n # e.g. series[1:5].\n def get_value(self, series, key):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex.get_value\"\"\"\n if np.asarray(key).dtype == np.dtype(bool):\n return series.iloc[key]\n elif isinstance(key, slice):\n return series.iloc[self.slice_indexer(key.start, key.stop, key.step)]\n else:\n return series.iloc[self.get_loc(key)]\n\n def __contains__(self, key):\n \"\"\"Adapted from\n pandas.tseries.base.DatetimeIndexOpsMixin.__contains__\"\"\"\n try:\n result = self.get_loc(key)\n return (\n is_scalar(result)\n or type(result) == slice\n or (isinstance(result, np.ndarray) and result.size)\n )\n except (KeyError, TypeError, ValueError):\n return False\n\n def contains(self, key):\n \"\"\"Needed for .loc based partial-string indexing\"\"\"\n return self.__contains__(key)\n\n def shift(self, n, freq):\n \"\"\"Shift the CFTimeIndex a multiple of the given frequency.\n\n See the documentation for :py:func:`~xarray.cftime_range` for a\n complete listing of valid frequency strings.\n\n Parameters\n ----------\n n : int\n Periods to shift by\n freq : str or datetime.timedelta\n A frequency string or datetime.timedelta object to shift by\n\n Returns\n -------\n CFTimeIndex\n\n See also\n --------\n pandas.DatetimeIndex.shift\n\n Examples\n --------\n >>> index = xr.cftime_range('2000', periods=1, freq='M')\n >>> index\n CFTimeIndex([2000-01-31 00:00:00], dtype='object')\n >>> index.shift(1, 'M')\n CFTimeIndex([2000-02-29 00:00:00], dtype='object')\n \"\"\"\n from .cftime_offsets import to_offset\n\n if not isinstance(n, int):\n raise TypeError(\"'n' must be an int, got {}.\".format(n))\n if isinstance(freq, timedelta):\n return self + n * freq\n elif isinstance(freq, str):\n return self + n * to_offset(freq)\n else:\n raise TypeError(\n \"'freq' must be of type \"\n \"str or datetime.timedelta, got {}.\".format(freq)\n )\n\n def __add__(self, other):\n if isinstance(other, pd.TimedeltaIndex):\n other = other.to_pytimedelta()\n return CFTimeIndex(np.array(self) + other)\n\n def __radd__(self, other):\n if isinstance(other, pd.TimedeltaIndex):\n other = other.to_pytimedelta()\n return CFTimeIndex(other + np.array(self))\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, (CFTimeIndex, cftime.datetime)):\n return pd.TimedeltaIndex(np.array(self) - np.array(other))\n elif isinstance(other, pd.TimedeltaIndex):\n return CFTimeIndex(np.array(self) - other.to_pytimedelta())\n else:\n return CFTimeIndex(np.array(self) - other)\n\n def __rsub__(self, other):\n return pd.TimedeltaIndex(other - np.array(self))\n\n def to_datetimeindex(self, unsafe=False):\n \"\"\"If possible, convert this index to a pandas.DatetimeIndex.\n\n Parameters\n ----------\n unsafe : bool\n Flag to turn off warning when converting from a CFTimeIndex with\n a non-standard calendar to a DatetimeIndex (default ``False``).\n\n Returns\n -------\n pandas.DatetimeIndex\n\n Raises\n ------\n ValueError\n If the CFTimeIndex contains dates that are not possible in the\n standard calendar or outside the pandas.Timestamp-valid range.\n\n Warns\n -----\n RuntimeWarning\n If converting from a non-standard calendar to a DatetimeIndex.\n\n Warnings\n --------\n Note that for non-standard calendars, this will change the calendar\n type of the index. In that case the result of this method should be\n used with caution.\n\n Examples\n --------\n >>> import xarray as xr\n >>> times = xr.cftime_range('2000', periods=2, calendar='gregorian')\n >>> times\n CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object')\n >>> times.to_datetimeindex()\n DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)\n \"\"\"\n nptimes = cftime_to_nptime(self)\n calendar = infer_calendar_name(self)\n if calendar not in _STANDARD_CALENDARS and not unsafe:\n warnings.warn(\n \"Converting a CFTimeIndex with dates from a non-standard \"\n \"calendar, {!r}, to a pandas.DatetimeIndex, which uses dates \"\n \"from the standard calendar. This may lead to subtle errors \"\n \"in operations that depend on the length of time between \"\n \"dates.\".format(calendar),\n RuntimeWarning,\n stacklevel=2,\n )\n return pd.DatetimeIndex(nptimes)\n\n def strftime(self, date_format):\n \"\"\"\n Return an Index of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n Index\n Index of formatted strings\n\n Examples\n --------\n >>> rng = xr.cftime_range(start='2000', periods=5, freq='2MS',\n ... calendar='noleap')\n >>> rng.strftime('%B %d, %Y, %r')\n Index(['January 01, 2000, 12:00:00 AM', 'March 01, 2000, 12:00:00 AM',\n 'May 01, 2000, 12:00:00 AM', 'July 01, 2000, 12:00:00 AM',\n 'September 01, 2000, 12:00:00 AM'],\n dtype='object')\n \"\"\"\n return pd.Index([date.strftime(date_format) for date in self._data])\n\n\ndef _parse_iso8601_without_reso(date_type, datetime_str):\n date, _ = _parse_iso8601_with_reso(date_type, datetime_str)\n return date\n\n\ndef _parse_array_of_cftime_strings(strings, date_type):\n \"\"\"Create a numpy array from an array of strings.\n\n For use in generating dates from strings for use with interp. Assumes the\n array is either 0-dimensional or 1-dimensional.\n\n Parameters\n ----------\n strings : array of strings\n Strings to convert to dates\n date_type : cftime.datetime type\n Calendar type to use for dates\n\n Returns\n -------\n np.array\n \"\"\"\n return np.array(\n [_parse_iso8601_without_reso(date_type, s) for s in strings.ravel()]\n ).reshape(strings.shape)\n"},{"col":4,"comment":"null","endLoc":466,"header":"def cleanup(self)","id":1906,"name":"cleanup","nodeType":"Function","startLoc":465,"text":"def cleanup(self):\n self.client.shutdown()"},{"attributeType":"null","col":0,"comment":"null","endLoc":26,"id":1907,"name":"_STANDARD_CALENDARS","nodeType":"Attribute","startLoc":26,"text":"_STANDARD_CALENDARS"},{"col":4,"comment":"null","endLoc":4584,"header":"@staticmethod\n def _unary_op(f, keep_attrs=False)","id":1908,"name":"_unary_op","nodeType":"Function","startLoc":4571,"text":"@staticmethod\n def _unary_op(f, keep_attrs=False):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n variables = {}\n for k, v in self._variables.items():\n if k in self._coord_names:\n variables[k] = v\n else:\n variables[k] = f(v, *args, **kwargs)\n attrs = self._attrs if keep_attrs else None\n return self._replace_with_new_dims(variables, attrs=attrs)\n\n return func"},{"col":0,"comment":"Given an array of cftime.datetime objects, return an array of\n numpy.datetime64 objects of the same size","endLoc":338,"header":"def cftime_to_nptime(times)","id":1909,"name":"cftime_to_nptime","nodeType":"Function","startLoc":318,"text":"def cftime_to_nptime(times):\n \"\"\"Given an array of cftime.datetime objects, return an array of\n numpy.datetime64 objects of the same size\"\"\"\n times = np.asarray(times)\n new = np.empty(times.shape, dtype=\"M8[ns]\")\n for i, t in np.ndenumerate(times):\n try:\n # Use pandas.Timestamp in place of datetime.datetime, because\n # NumPy casts it safely it np.datetime64[ns] for dates outside\n # 1678 to 2262 (this is not currently the case for\n # datetime.datetime).\n dt = pd.Timestamp(\n t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond\n )\n except ValueError as e:\n raise ValueError(\n \"Cannot convert date {} to a date in the \"\n \"standard calendar. Reason: {}.\".format(t, e)\n )\n new[i] = np.datetime64(dt)\n return new"},{"col":4,"comment":"null","endLoc":469,"header":"def time_write(self)","id":1910,"name":"time_write","nodeType":"Function","startLoc":468,"text":"def time_write(self):\n self.write.compute()"},{"col":4,"comment":"null","endLoc":4601,"header":"@staticmethod\n def _binary_op(f, reflexive=False, join=None)","id":1911,"name":"_binary_op","nodeType":"Function","startLoc":4586,"text":"@staticmethod\n def _binary_op(f, reflexive=False, join=None):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n return NotImplemented\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n if isinstance(other, (DataArray, Dataset)):\n self, other = align(self, other, join=align_type, copy=False)\n g = f if not reflexive else lambda x, y: f(y, x)\n ds = self._calculate_binary_op(g, other, join=align_type)\n return ds\n\n return func"},{"col":0,"comment":"Given an array of datetimes, infer the CF calendar name","endLoc":265,"header":"def infer_calendar_name(dates)","id":1912,"name":"infer_calendar_name","nodeType":"Function","startLoc":260,"text":"def infer_calendar_name(dates):\n \"\"\"Given an array of datetimes, infer the CF calendar name\"\"\"\n if np.asarray(dates).dtype == \"datetime64[ns]\":\n return \"proleptic_gregorian\"\n else:\n return np.asarray(dates).ravel()[0].calendar"},{"className":"CFTimeIndex","col":0,"comment":"Custom Index for working with CF calendars and dates\n\n All elements of a CFTimeIndex must be cftime.datetime objects.\n\n Parameters\n ----------\n data : array or CFTimeIndex\n Sequence of cftime.datetime objects to use in index\n name : str, default None\n Name of the resulting index\n\n See Also\n --------\n cftime_range\n ","endLoc":520,"id":1913,"nodeType":"Class","startLoc":215,"text":"class CFTimeIndex(pd.Index):\n \"\"\"Custom Index for working with CF calendars and dates\n\n All elements of a CFTimeIndex must be cftime.datetime objects.\n\n Parameters\n ----------\n data : array or CFTimeIndex\n Sequence of cftime.datetime objects to use in index\n name : str, default None\n Name of the resulting index\n\n See Also\n --------\n cftime_range\n \"\"\"\n\n year = _field_accessor(\"year\", \"The year of the datetime\")\n month = _field_accessor(\"month\", \"The month of the datetime\")\n day = _field_accessor(\"day\", \"The days of the datetime\")\n hour = _field_accessor(\"hour\", \"The hours of the datetime\")\n minute = _field_accessor(\"minute\", \"The minutes of the datetime\")\n second = _field_accessor(\"second\", \"The seconds of the datetime\")\n microsecond = _field_accessor(\"microsecond\", \"The microseconds of the datetime\")\n dayofyear = _field_accessor(\n \"dayofyr\", \"The ordinal day of year of the datetime\", \"1.0.2.1\"\n )\n dayofweek = _field_accessor(\"dayofwk\", \"The day of week of the datetime\", \"1.0.2.1\")\n date_type = property(get_date_type)\n\n def __new__(cls, data, name=None):\n assert_all_valid_date_type(data)\n if name is None and hasattr(data, \"name\"):\n name = data.name\n\n result = object.__new__(cls)\n result._data = np.array(data, dtype=\"O\")\n result.name = name\n return result\n\n def _partial_date_slice(self, resolution, parsed):\n \"\"\"Adapted from\n pandas.tseries.index.DatetimeIndex._partial_date_slice\n\n Note that when using a CFTimeIndex, if a partial-date selection\n returns a single element, it will never be converted to a scalar\n coordinate; this is in slight contrast to the behavior when using\n a DatetimeIndex, which sometimes will return a DataArray with a scalar\n coordinate depending on the resolution of the datetimes used in\n defining the index. For example:\n\n >>> from cftime import DatetimeNoLeap\n >>> import pandas as pd\n >>> import xarray as xr\n >>> da = xr.DataArray([1, 2],\n coords=[[DatetimeNoLeap(2001, 1, 1),\n DatetimeNoLeap(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) object 2001-01-01 00:00:00\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array(1)\n Coordinates:\n time datetime64[ns] 2001-01-01\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) datetime64[ns] 2001-01-01T01:00:00\n \"\"\"\n start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)\n\n times = self._data\n\n if self.is_monotonic:\n if len(times) and (\n (start < times[0] and end < times[0])\n or (start > times[-1] and end > times[-1])\n ):\n # we are out of range\n raise KeyError\n\n # a monotonic (sorted) series can be sliced\n left = times.searchsorted(start, side=\"left\")\n right = times.searchsorted(end, side=\"right\")\n return slice(left, right)\n\n lhs_mask = times >= start\n rhs_mask = times <= end\n return np.flatnonzero(lhs_mask & rhs_mask)\n\n def _get_string_slice(self, key):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex._get_string_slice\"\"\"\n parsed, resolution = _parse_iso8601_with_reso(self.date_type, key)\n try:\n loc = self._partial_date_slice(resolution, parsed)\n except KeyError:\n raise KeyError(key)\n return loc\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex.get_loc\"\"\"\n if isinstance(key, str):\n return self._get_string_slice(key)\n else:\n return pd.Index.get_loc(self, key, method=method, tolerance=tolerance)\n\n def _maybe_cast_slice_bound(self, label, side, kind):\n \"\"\"Adapted from\n pandas.tseries.index.DatetimeIndex._maybe_cast_slice_bound\"\"\"\n if isinstance(label, str):\n parsed, resolution = _parse_iso8601_with_reso(self.date_type, label)\n start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)\n if self.is_monotonic_decreasing and len(self) > 1:\n return end if side == \"left\" else start\n return start if side == \"left\" else end\n else:\n return label\n\n # TODO: Add ability to use integer range outside of iloc?\n # e.g. series[1:5].\n def get_value(self, series, key):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex.get_value\"\"\"\n if np.asarray(key).dtype == np.dtype(bool):\n return series.iloc[key]\n elif isinstance(key, slice):\n return series.iloc[self.slice_indexer(key.start, key.stop, key.step)]\n else:\n return series.iloc[self.get_loc(key)]\n\n def __contains__(self, key):\n \"\"\"Adapted from\n pandas.tseries.base.DatetimeIndexOpsMixin.__contains__\"\"\"\n try:\n result = self.get_loc(key)\n return (\n is_scalar(result)\n or type(result) == slice\n or (isinstance(result, np.ndarray) and result.size)\n )\n except (KeyError, TypeError, ValueError):\n return False\n\n def contains(self, key):\n \"\"\"Needed for .loc based partial-string indexing\"\"\"\n return self.__contains__(key)\n\n def shift(self, n, freq):\n \"\"\"Shift the CFTimeIndex a multiple of the given frequency.\n\n See the documentation for :py:func:`~xarray.cftime_range` for a\n complete listing of valid frequency strings.\n\n Parameters\n ----------\n n : int\n Periods to shift by\n freq : str or datetime.timedelta\n A frequency string or datetime.timedelta object to shift by\n\n Returns\n -------\n CFTimeIndex\n\n See also\n --------\n pandas.DatetimeIndex.shift\n\n Examples\n --------\n >>> index = xr.cftime_range('2000', periods=1, freq='M')\n >>> index\n CFTimeIndex([2000-01-31 00:00:00], dtype='object')\n >>> index.shift(1, 'M')\n CFTimeIndex([2000-02-29 00:00:00], dtype='object')\n \"\"\"\n from .cftime_offsets import to_offset\n\n if not isinstance(n, int):\n raise TypeError(\"'n' must be an int, got {}.\".format(n))\n if isinstance(freq, timedelta):\n return self + n * freq\n elif isinstance(freq, str):\n return self + n * to_offset(freq)\n else:\n raise TypeError(\n \"'freq' must be of type \"\n \"str or datetime.timedelta, got {}.\".format(freq)\n )\n\n def __add__(self, other):\n if isinstance(other, pd.TimedeltaIndex):\n other = other.to_pytimedelta()\n return CFTimeIndex(np.array(self) + other)\n\n def __radd__(self, other):\n if isinstance(other, pd.TimedeltaIndex):\n other = other.to_pytimedelta()\n return CFTimeIndex(other + np.array(self))\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, (CFTimeIndex, cftime.datetime)):\n return pd.TimedeltaIndex(np.array(self) - np.array(other))\n elif isinstance(other, pd.TimedeltaIndex):\n return CFTimeIndex(np.array(self) - other.to_pytimedelta())\n else:\n return CFTimeIndex(np.array(self) - other)\n\n def __rsub__(self, other):\n return pd.TimedeltaIndex(other - np.array(self))\n\n def to_datetimeindex(self, unsafe=False):\n \"\"\"If possible, convert this index to a pandas.DatetimeIndex.\n\n Parameters\n ----------\n unsafe : bool\n Flag to turn off warning when converting from a CFTimeIndex with\n a non-standard calendar to a DatetimeIndex (default ``False``).\n\n Returns\n -------\n pandas.DatetimeIndex\n\n Raises\n ------\n ValueError\n If the CFTimeIndex contains dates that are not possible in the\n standard calendar or outside the pandas.Timestamp-valid range.\n\n Warns\n -----\n RuntimeWarning\n If converting from a non-standard calendar to a DatetimeIndex.\n\n Warnings\n --------\n Note that for non-standard calendars, this will change the calendar\n type of the index. In that case the result of this method should be\n used with caution.\n\n Examples\n --------\n >>> import xarray as xr\n >>> times = xr.cftime_range('2000', periods=2, calendar='gregorian')\n >>> times\n CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object')\n >>> times.to_datetimeindex()\n DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)\n \"\"\"\n nptimes = cftime_to_nptime(self)\n calendar = infer_calendar_name(self)\n if calendar not in _STANDARD_CALENDARS and not unsafe:\n warnings.warn(\n \"Converting a CFTimeIndex with dates from a non-standard \"\n \"calendar, {!r}, to a pandas.DatetimeIndex, which uses dates \"\n \"from the standard calendar. This may lead to subtle errors \"\n \"in operations that depend on the length of time between \"\n \"dates.\".format(calendar),\n RuntimeWarning,\n stacklevel=2,\n )\n return pd.DatetimeIndex(nptimes)\n\n def strftime(self, date_format):\n \"\"\"\n Return an Index of formatted strings specified by date_format, which\n supports the same string format as the python standard library. Details\n of the string format can be found in `python string format doc\n `__\n\n Parameters\n ----------\n date_format : str\n Date format string (e.g. \"%Y-%m-%d\")\n\n Returns\n -------\n Index\n Index of formatted strings\n\n Examples\n --------\n >>> rng = xr.cftime_range(start='2000', periods=5, freq='2MS',\n ... calendar='noleap')\n >>> rng.strftime('%B %d, %Y, %r')\n Index(['January 01, 2000, 12:00:00 AM', 'March 01, 2000, 12:00:00 AM',\n 'May 01, 2000, 12:00:00 AM', 'July 01, 2000, 12:00:00 AM',\n 'September 01, 2000, 12:00:00 AM'],\n dtype='object')\n \"\"\"\n return pd.Index([date.strftime(date_format) for date in self._data])"},{"attributeType":"null","col":8,"comment":"null","endLoc":462,"id":1914,"name":"client","nodeType":"Attribute","startLoc":462,"text":"self.client"},{"attributeType":"null","col":8,"comment":"null","endLoc":463,"id":1915,"name":"write","nodeType":"Attribute","startLoc":463,"text":"self.write"},{"attributeType":"null","col":16,"comment":"null","endLoc":3,"id":1916,"name":"np","nodeType":"Attribute","startLoc":3,"text":"np"},{"col":40,"endLoc":4597,"id":1917,"nodeType":"Lambda","startLoc":4597,"text":"lambda x, y: f(y, x)"},{"col":4,"comment":"Adapted from\n pandas.tseries.index.DatetimeIndex._partial_date_slice\n\n Note that when using a CFTimeIndex, if a partial-date selection\n returns a single element, it will never be converted to a scalar\n coordinate; this is in slight contrast to the behavior when using\n a DatetimeIndex, which sometimes will return a DataArray with a scalar\n coordinate depending on the resolution of the datetimes used in\n defining the index. For example:\n\n >>> from cftime import DatetimeNoLeap\n >>> import pandas as pd\n >>> import xarray as xr\n >>> da = xr.DataArray([1, 2],\n coords=[[DatetimeNoLeap(2001, 1, 1),\n DatetimeNoLeap(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) object 2001-01-01 00:00:00\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array(1)\n Coordinates:\n time datetime64[ns] 2001-01-01\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) datetime64[ns] 2001-01-01T01:00:00\n ","endLoc":316,"header":"def _partial_date_slice(self, resolution, parsed)","id":1918,"name":"_partial_date_slice","nodeType":"Function","startLoc":255,"text":"def _partial_date_slice(self, resolution, parsed):\n \"\"\"Adapted from\n pandas.tseries.index.DatetimeIndex._partial_date_slice\n\n Note that when using a CFTimeIndex, if a partial-date selection\n returns a single element, it will never be converted to a scalar\n coordinate; this is in slight contrast to the behavior when using\n a DatetimeIndex, which sometimes will return a DataArray with a scalar\n coordinate depending on the resolution of the datetimes used in\n defining the index. For example:\n\n >>> from cftime import DatetimeNoLeap\n >>> import pandas as pd\n >>> import xarray as xr\n >>> da = xr.DataArray([1, 2],\n coords=[[DatetimeNoLeap(2001, 1, 1),\n DatetimeNoLeap(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) object 2001-01-01 00:00:00\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array(1)\n Coordinates:\n time datetime64[ns] 2001-01-01\n >>> da = xr.DataArray([1, 2],\n coords=[[pd.Timestamp(2001, 1, 1, 1),\n pd.Timestamp(2001, 2, 1)]],\n dims=['time'])\n >>> da.sel(time='2001-01-01')\n \n array([1])\n Coordinates:\n * time (time) datetime64[ns] 2001-01-01T01:00:00\n \"\"\"\n start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)\n\n times = self._data\n\n if self.is_monotonic:\n if len(times) and (\n (start < times[0] and end < times[0])\n or (start > times[-1] and end > times[-1])\n ):\n # we are out of range\n raise KeyError\n\n # a monotonic (sorted) series can be sliced\n left = times.searchsorted(start, side=\"left\")\n right = times.searchsorted(end, side=\"right\")\n return slice(left, right)\n\n lhs_mask = times >= start\n rhs_mask = times <= end\n return np.flatnonzero(lhs_mask & rhs_mask)"},{"attributeType":"null","col":17,"comment":"null","endLoc":4,"id":1919,"name":"pd","nodeType":"Attribute","startLoc":4,"text":"pd"},{"attributeType":"null","col":17,"comment":"null","endLoc":6,"id":1920,"name":"xr","nodeType":"Attribute","startLoc":6,"text":"xr"},{"col":0,"comment":"","endLoc":1,"header":"dataset_io.py#","id":1921,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import dask\n import dask.multiprocessing\nexcept ImportError:\n pass\n\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\""},{"col":4,"comment":"null","endLoc":4629,"header":"@staticmethod\n def _inplace_binary_op(f)","id":1922,"name":"_inplace_binary_op","nodeType":"Function","startLoc":4603,"text":"@staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a Dataset and \"\n \"a grouped object are not permitted\"\n )\n # we don't actually modify arrays in-place with in-place Dataset\n # arithmetic -- this lets us automatically align things\n if isinstance(other, (DataArray, Dataset)):\n other = other.reindex_like(self, copy=False)\n g = ops.inplace_to_noninplace_op(f)\n ds = self._calculate_binary_op(g, other, inplace=True)\n self._replace_with_new_dims(\n ds._variables,\n ds._coord_names,\n attrs=ds._attrs,\n indexes=ds._indexes,\n inplace=True,\n )\n return self\n\n return func"},{"col":0,"comment":"Generalization of\n pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds\n for use with non-standard calendars and cftime.datetime\n objects.\n ","endLoc":160,"header":"def _parsed_string_to_bounds(date_type, resolution, parsed)","id":1923,"name":"_parsed_string_to_bounds","nodeType":"Function","startLoc":119,"text":"def _parsed_string_to_bounds(date_type, resolution, parsed):\n \"\"\"Generalization of\n pandas.tseries.index.DatetimeIndex._parsed_string_to_bounds\n for use with non-standard calendars and cftime.datetime\n objects.\n \"\"\"\n if resolution == \"year\":\n return (\n date_type(parsed.year, 1, 1),\n date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1),\n )\n elif resolution == \"month\":\n if parsed.month == 12:\n end = date_type(parsed.year + 1, 1, 1) - timedelta(microseconds=1)\n else:\n end = date_type(parsed.year, parsed.month + 1, 1) - timedelta(\n microseconds=1\n )\n return date_type(parsed.year, parsed.month, 1), end\n elif resolution == \"day\":\n start = date_type(parsed.year, parsed.month, parsed.day)\n return start, start + timedelta(days=1, microseconds=-1)\n elif resolution == \"hour\":\n start = date_type(parsed.year, parsed.month, parsed.day, parsed.hour)\n return start, start + timedelta(hours=1, microseconds=-1)\n elif resolution == \"minute\":\n start = date_type(\n parsed.year, parsed.month, parsed.day, parsed.hour, parsed.minute\n )\n return start, start + timedelta(minutes=1, microseconds=-1)\n elif resolution == \"second\":\n start = date_type(\n parsed.year,\n parsed.month,\n parsed.day,\n parsed.hour,\n parsed.minute,\n parsed.second,\n )\n return start, start + timedelta(seconds=1, microseconds=-1)\n else:\n raise KeyError"},{"col":0,"comment":"null","endLoc":304,"header":"def inplace_to_noninplace_op(f)","id":1924,"name":"inplace_to_noninplace_op","nodeType":"Function","startLoc":303,"text":"def inplace_to_noninplace_op(f):\n return NON_INPLACE_OP[f]"},{"col":0,"comment":"null","endLoc":348,"header":"def _cleanup_netcdf_time_units(units)","id":1925,"name":"_cleanup_netcdf_time_units","nodeType":"Function","startLoc":341,"text":"def _cleanup_netcdf_time_units(units):\n delta, ref_date = _unpack_netcdf_time_units(units)\n try:\n units = \"%s since %s\" % (delta, format_timestamp(ref_date))\n except OutOfBoundsDatetime:\n # don't worry about reifying the units if they're out of bounds\n pass\n return units"},{"col":4,"comment":"null","endLoc":4672,"header":"def _calculate_binary_op(self, f, other, join=\"inner\", inplace=False)","id":1926,"name":"_calculate_binary_op","nodeType":"Function","startLoc":4631,"text":"def _calculate_binary_op(self, f, other, join=\"inner\", inplace=False):\n def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):\n if inplace and set(lhs_data_vars) != set(rhs_data_vars):\n raise ValueError(\n \"datasets must have the same data variables \"\n \"for in-place arithmetic operations: %s, %s\"\n % (list(lhs_data_vars), list(rhs_data_vars))\n )\n\n dest_vars = {}\n\n for k in lhs_data_vars:\n if k in rhs_data_vars:\n dest_vars[k] = f(lhs_vars[k], rhs_vars[k])\n elif join in [\"left\", \"outer\"]:\n dest_vars[k] = f(lhs_vars[k], np.nan)\n for k in rhs_data_vars:\n if k not in dest_vars and join in [\"right\", \"outer\"]:\n dest_vars[k] = f(rhs_vars[k], np.nan)\n return dest_vars\n\n if utils.is_dict_like(other) and not isinstance(other, Dataset):\n # can't use our shortcut of doing the binary operation with\n # Variable objects, so apply over our data vars instead.\n new_data_vars = apply_over_both(\n self.data_vars, other, self.data_vars, other\n )\n return Dataset(new_data_vars)\n\n other_coords = getattr(other, \"coords\", None)\n ds = self.coords.merge(other_coords)\n\n if isinstance(other, Dataset):\n new_vars = apply_over_both(\n self.data_vars, other.data_vars, self.variables, other.variables\n )\n else:\n other_variable = getattr(other, \"variable\", other)\n new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars}\n ds._variables.update(new_vars)\n ds._dims = calculate_dimensions(ds._variables)\n return ds"},{"id":1927,"name":".gitattributes","nodeType":"TextFile","path":"","text":"# reduce the number of merge conflicts\ndoc/whats-new.rst merge=union\nxarray/_version.py export-subst\n"},{"col":0,"comment":"null","endLoc":92,"header":"def _unpack_netcdf_time_units(units)","id":1928,"name":"_unpack_netcdf_time_units","nodeType":"Function","startLoc":84,"text":"def _unpack_netcdf_time_units(units):\n # CF datetime units follow the format: \"UNIT since DATE\"\n # this parses out the unit and date allowing for extraneous\n # whitespace.\n matches = re.match(\"(.+) since (.+)\", units)\n if not matches:\n raise ValueError(\"invalid time units: %s\" % units)\n delta_units, ref_date = [s.strip() for s in matches.groups()]\n return delta_units, ref_date"},{"fileName":"variables.py","filePath":"xarray/coding","id":1929,"nodeType":"File","text":"\"\"\"Coders for individual Variable objects.\"\"\"\nimport warnings\nfrom functools import partial\nfrom typing import Any, Hashable\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..core import dtypes, duck_array_ops, indexing\nfrom ..core.pycompat import dask_array_type\nfrom ..core.utils import equivalent\nfrom ..core.variable import Variable\n\n\nclass SerializationWarning(RuntimeWarning):\n \"\"\"Warnings about encoding/decoding issues in serialization.\"\"\"\n\n\nclass VariableCoder:\n \"\"\"Base class for encoding and decoding transformations on variables.\n\n We use coders for transforming variables between xarray's data model and\n a format suitable for serialization. For example, coders apply CF\n conventions for how data should be represented in netCDF files.\n\n Subclasses should implement encode() and decode(), which should satisfy\n the identity ``coder.decode(coder.encode(variable)) == variable``. If any\n options are necessary, they should be implemented as arguments to the\n __init__ method.\n\n The optional name argument to encode() and decode() exists solely for the\n sake of better error messages, and should correspond to the name of\n variables in the underlying store.\n \"\"\"\n\n def encode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an encoded variable to a decoded variable\n \"\"\"\n raise NotImplementedError()\n\n def decode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an decoded variable to a encoded variable\n \"\"\"\n raise NotImplementedError()\n\n\nclass _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Lazily computed array holding values of elemwise-function.\n\n Do not construct this object directly: call lazy_elemwise_func instead.\n\n Values are computed upon indexing or coercion to a NumPy array.\n \"\"\"\n\n def __init__(self, array, func, dtype):\n assert not isinstance(array, dask_array_type)\n self.array = indexing.as_indexable(array)\n self.func = func\n self._dtype = dtype\n\n @property\n def dtype(self):\n return np.dtype(self._dtype)\n\n def __getitem__(self, key):\n return type(self)(self.array[key], self.func, self.dtype)\n\n def __array__(self, dtype=None):\n return self.func(self.array)\n\n def __repr__(self):\n return \"%s(%r, func=%r, dtype=%r)\" % (\n type(self).__name__,\n self.array,\n self.func,\n self.dtype,\n )\n\n\ndef lazy_elemwise_func(array, func, dtype):\n \"\"\"Lazily apply an element-wise function to an array.\n\n Parameters\n ----------\n array : any valid value of Variable._data\n func : callable\n Function to apply to indexed slices of an array. For use with dask,\n this should be a pickle-able object.\n dtype : coercible to np.dtype\n Dtype for the result of this function.\n\n Returns\n -------\n Either a dask.array.Array or _ElementwiseFunctionArray.\n \"\"\"\n if isinstance(array, dask_array_type):\n return array.map_blocks(func, dtype=dtype)\n else:\n return _ElementwiseFunctionArray(array, func, dtype)\n\n\ndef unpack_for_encoding(var):\n return var.dims, var.data, var.attrs.copy(), var.encoding.copy()\n\n\ndef unpack_for_decoding(var):\n return var.dims, var._data, var.attrs.copy(), var.encoding.copy()\n\n\ndef safe_setitem(dest, key, value, name=None):\n if key in dest:\n var_str = \" on variable {!r}\".format(name) if name else \"\"\n raise ValueError(\n \"failed to prevent overwriting existing key {} in attrs{}. \"\n \"This is probably an encoding field used by xarray to describe \"\n \"how a variable is serialized. To proceed, remove this key from \"\n \"the variable's attributes manually.\".format(key, var_str)\n )\n dest[key] = value\n\n\ndef pop_to(source, dest, key, name=None):\n \"\"\"\n A convenience function which pops a key k from source to dest.\n None values are not passed on. If k already exists in dest an\n error is raised.\n \"\"\"\n value = source.pop(key, None)\n if value is not None:\n safe_setitem(dest, key, value, name=name)\n return value\n\n\ndef _apply_mask(\n data: np.ndarray, encoded_fill_values: list, decoded_fill_value: Any, dtype: Any\n) -> np.ndarray:\n \"\"\"Mask all matching values in a NumPy arrays.\"\"\"\n data = np.asarray(data, dtype=dtype)\n condition = False\n for fv in encoded_fill_values:\n condition |= data == fv\n return np.where(condition, decoded_fill_value, data)\n\n\nclass CFMaskCoder(VariableCoder):\n \"\"\"Mask or unmask fill values according to CF conventions.\"\"\"\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n fv = encoding.get(\"_FillValue\")\n mv = encoding.get(\"missing_value\")\n\n if fv is not None and mv is not None and not equivalent(fv, mv):\n raise ValueError(\n \"Variable {!r} has multiple fill values {}. \"\n \"Cannot encode data. \".format(name, [fv, mv])\n )\n\n if fv is not None:\n fill_value = pop_to(encoding, attrs, \"_FillValue\", name=name)\n if not pd.isnull(fill_value):\n data = duck_array_ops.fillna(data, fill_value)\n\n if mv is not None:\n fill_value = pop_to(encoding, attrs, \"missing_value\", name=name)\n if not pd.isnull(fill_value) and fv is None:\n data = duck_array_ops.fillna(data, fill_value)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n raw_fill_values = [\n pop_to(attrs, encoding, attr, name=name)\n for attr in (\"missing_value\", \"_FillValue\")\n ]\n if raw_fill_values:\n encoded_fill_values = {\n fv\n for option in raw_fill_values\n for fv in np.ravel(option)\n if not pd.isnull(fv)\n }\n\n if len(encoded_fill_values) > 1:\n warnings.warn(\n \"variable {!r} has multiple fill values {}, \"\n \"decoding all values to NaN.\".format(name, encoded_fill_values),\n SerializationWarning,\n stacklevel=3,\n )\n\n dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype)\n\n if encoded_fill_values:\n transform = partial(\n _apply_mask,\n encoded_fill_values=encoded_fill_values,\n decoded_fill_value=decoded_fill_value,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n\ndef _scale_offset_decoding(data, scale_factor, add_offset, dtype):\n data = np.array(data, dtype=dtype, copy=True)\n if scale_factor is not None:\n data *= scale_factor\n if add_offset is not None:\n data += add_offset\n return data\n\n\ndef _choose_float_dtype(dtype, has_offset):\n \"\"\"Return a float dtype that can losslessly represent `dtype` values.\"\"\"\n # Keep float32 as-is. Upcast half-precision to single-precision,\n # because float16 is \"intended for storage but not computation\"\n if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating):\n return np.float32\n # float32 can exactly represent all integers up to 24 bits\n if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer):\n # A scale factor is entirely safe (vanishing into the mantissa),\n # but a large integer offset could lead to loss of precision.\n # Sensitivity analysis can be tricky, so we just use a float64\n # if there's any offset at all - better unoptimised than wrong!\n if not has_offset:\n return np.float32\n # For all other types and circumstances, we just use float64.\n # (safe because eg. complex numbers are not supported in NetCDF)\n return np.float64\n\n\nclass CFScaleOffsetCoder(VariableCoder):\n \"\"\"Scale and offset variables according to CF conventions.\n\n Follows the formula:\n decode_values = encoded_values * scale_factor + add_offset\n \"\"\"\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n if \"scale_factor\" in encoding or \"add_offset\" in encoding:\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in encoding)\n data = data.astype(dtype=dtype, copy=True)\n if \"add_offset\" in encoding:\n data -= pop_to(encoding, attrs, \"add_offset\", name=name)\n if \"scale_factor\" in encoding:\n data /= pop_to(encoding, attrs, \"scale_factor\", name=name)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"scale_factor\" in attrs or \"add_offset\" in attrs:\n scale_factor = pop_to(attrs, encoding, \"scale_factor\", name=name)\n add_offset = pop_to(attrs, encoding, \"add_offset\", name=name)\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in attrs)\n transform = partial(\n _scale_offset_decoding,\n scale_factor=scale_factor,\n add_offset=add_offset,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n\nclass UnsignedIntegerCoder(VariableCoder):\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n # from netCDF best practices\n # https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html\n # \"_Unsigned = \"true\" to indicate that\n # integer data should be treated as unsigned\"\n if encoding.get(\"_Unsigned\", \"false\") == \"true\":\n pop_to(encoding, attrs, \"_Unsigned\")\n signed_dtype = np.dtype(\"i%s\" % data.dtype.itemsize)\n if \"_FillValue\" in attrs:\n new_fill = signed_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n data = duck_array_ops.around(data).astype(signed_dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"_Unsigned\" in attrs:\n unsigned = pop_to(attrs, encoding, \"_Unsigned\")\n\n if data.dtype.kind == \"i\":\n if unsigned == \"true\":\n unsigned_dtype = np.dtype(\"u%s\" % data.dtype.itemsize)\n transform = partial(np.asarray, dtype=unsigned_dtype)\n data = lazy_elemwise_func(data, transform, unsigned_dtype)\n if \"_FillValue\" in attrs:\n new_fill = unsigned_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n else:\n warnings.warn(\n \"variable %r has _Unsigned attribute but is not \"\n \"of integer type. Ignoring attribute.\" % name,\n SerializationWarning,\n stacklevel=3,\n )\n\n return Variable(dims, data, attrs, encoding)\n"},{"className":"SerializationWarning","col":0,"comment":"Warnings about encoding/decoding issues in serialization.","endLoc":16,"id":1931,"nodeType":"Class","startLoc":15,"text":"class SerializationWarning(RuntimeWarning):\n \"\"\"Warnings about encoding/decoding issues in serialization.\"\"\""},{"className":"VariableCoder","col":0,"comment":"Base class for encoding and decoding transformations on variables.\n\n We use coders for transforming variables between xarray's data model and\n a format suitable for serialization. For example, coders apply CF\n conventions for how data should be represented in netCDF files.\n\n Subclasses should implement encode() and decode(), which should satisfy\n the identity ``coder.decode(coder.encode(variable)) == variable``. If any\n options are necessary, they should be implemented as arguments to the\n __init__ method.\n\n The optional name argument to encode() and decode() exists solely for the\n sake of better error messages, and should correspond to the name of\n variables in the underlying store.\n ","endLoc":48,"id":1932,"nodeType":"Class","startLoc":19,"text":"class VariableCoder:\n \"\"\"Base class for encoding and decoding transformations on variables.\n\n We use coders for transforming variables between xarray's data model and\n a format suitable for serialization. For example, coders apply CF\n conventions for how data should be represented in netCDF files.\n\n Subclasses should implement encode() and decode(), which should satisfy\n the identity ``coder.decode(coder.encode(variable)) == variable``. If any\n options are necessary, they should be implemented as arguments to the\n __init__ method.\n\n The optional name argument to encode() and decode() exists solely for the\n sake of better error messages, and should correspond to the name of\n variables in the underlying store.\n \"\"\"\n\n def encode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an encoded variable to a decoded variable\n \"\"\"\n raise NotImplementedError()\n\n def decode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an decoded variable to a encoded variable\n \"\"\"\n raise NotImplementedError()"},{"col":4,"comment":"Convert an encoded variable to a decoded variable\n ","endLoc":41,"header":"def encode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable","id":1933,"name":"encode","nodeType":"Function","startLoc":36,"text":"def encode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an encoded variable to a decoded variable\n \"\"\"\n raise NotImplementedError()"},{"col":4,"comment":"Convert an decoded variable to a encoded variable\n ","endLoc":48,"header":"def decode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable","id":1934,"name":"decode","nodeType":"Function","startLoc":43,"text":"def decode(\n self, variable: Variable, name: Hashable = None\n ) -> Variable: # pragma: no cover\n \"\"\"Convert an decoded variable to a encoded variable\n \"\"\"\n raise NotImplementedError()"},{"className":"_ElementwiseFunctionArray","col":0,"comment":"Lazily computed array holding values of elemwise-function.\n\n Do not construct this object directly: call lazy_elemwise_func instead.\n\n Values are computed upon indexing or coercion to a NumPy array.\n ","endLoc":81,"id":1935,"nodeType":"Class","startLoc":51,"text":"class _ElementwiseFunctionArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Lazily computed array holding values of elemwise-function.\n\n Do not construct this object directly: call lazy_elemwise_func instead.\n\n Values are computed upon indexing or coercion to a NumPy array.\n \"\"\"\n\n def __init__(self, array, func, dtype):\n assert not isinstance(array, dask_array_type)\n self.array = indexing.as_indexable(array)\n self.func = func\n self._dtype = dtype\n\n @property\n def dtype(self):\n return np.dtype(self._dtype)\n\n def __getitem__(self, key):\n return type(self)(self.array[key], self.func, self.dtype)\n\n def __array__(self, dtype=None):\n return self.func(self.array)\n\n def __repr__(self):\n return \"%s(%r, func=%r, dtype=%r)\" % (\n type(self).__name__,\n self.array,\n self.func,\n self.dtype,\n )"},{"col":4,"comment":"null","endLoc":67,"header":"@property\n def dtype(self)","id":1936,"name":"dtype","nodeType":"Function","startLoc":65,"text":"@property\n def dtype(self):\n return np.dtype(self._dtype)"},{"col":4,"comment":"null","endLoc":70,"header":"def __getitem__(self, key)","id":1937,"name":"__getitem__","nodeType":"Function","startLoc":69,"text":"def __getitem__(self, key):\n return type(self)(self.array[key], self.func, self.dtype)"},{"col":4,"comment":"null","endLoc":4678,"header":"def _copy_attrs_from(self, other)","id":1938,"name":"_copy_attrs_from","nodeType":"Function","startLoc":4674,"text":"def _copy_attrs_from(self, other):\n self.attrs = other.attrs\n for v in other.variables:\n if v in self.variables:\n self.variables[v].attrs = other.variables[v].attrs"},{"col":4,"comment":"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : str, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> ds = xr.Dataset({'foo': ('x', [5, 5, 6, 6])})\n >>> ds.diff('x')\n \n Dimensions: (x: 3)\n Coordinates:\n * x (x) int64 1 2 3\n Data variables:\n foo (x) int64 0 1 0\n >>> ds.diff('x', 2)\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) int64 2 3\n Data variables:\n foo (x) int64 1 -1\n\n See Also\n --------\n Dataset.differentiate\n ","endLoc":4761,"header":"def diff(self, dim, n=1, label=\"upper\")","id":1939,"name":"diff","nodeType":"Function","startLoc":4680,"text":"def diff(self, dim, n=1, label=\"upper\"):\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : str, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> ds = xr.Dataset({'foo': ('x', [5, 5, 6, 6])})\n >>> ds.diff('x')\n \n Dimensions: (x: 3)\n Coordinates:\n * x (x) int64 1 2 3\n Data variables:\n foo (x) int64 0 1 0\n >>> ds.diff('x', 2)\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) int64 2 3\n Data variables:\n foo (x) int64 1 -1\n\n See Also\n --------\n Dataset.differentiate\n \"\"\"\n if n == 0:\n return self\n if n < 0:\n raise ValueError(\"order `n` must be non-negative but got {}\".format(n))\n\n # prepare slices\n kwargs_start = {dim: slice(None, -1)}\n kwargs_end = {dim: slice(1, None)}\n\n # prepare new coordinate\n if label == \"upper\":\n kwargs_new = kwargs_end\n elif label == \"lower\":\n kwargs_new = kwargs_start\n else:\n raise ValueError(\n \"The 'label' argument has to be either \" \"'upper' or 'lower'\"\n )\n\n variables = {}\n\n for name, var in self.variables.items():\n if dim in var.dims:\n if name in self.data_vars:\n variables[name] = var.isel(**kwargs_end) - var.isel(**kwargs_start)\n else:\n variables[name] = var.isel(**kwargs_new)\n else:\n variables[name] = var\n\n indexes = dict(self.indexes)\n if dim in indexes:\n indexes[dim] = indexes[dim][kwargs_new[dim]]\n\n difference = self._replace_with_new_dims(variables, indexes=indexes)\n\n if n > 1:\n return difference.diff(dim, n - 1)\n else:\n return difference"},{"col":4,"comment":"null","endLoc":1413,"header":"def transpose(self, order) -> pd.Index","id":1940,"name":"transpose","nodeType":"Function","startLoc":1412,"text":"def transpose(self, order) -> pd.Index:\n return self.array # self.array should be always one-dimensional"},{"col":4,"comment":"null","endLoc":73,"header":"def __array__(self, dtype=None)","id":1941,"name":"__array__","nodeType":"Function","startLoc":72,"text":"def __array__(self, dtype=None):\n return self.func(self.array)"},{"col":4,"comment":"null","endLoc":1330,"header":"def transpose(self, order)","id":1942,"name":"transpose","nodeType":"Function","startLoc":1329,"text":"def transpose(self, order):\n return self.array.transpose(order)"},{"col":4,"comment":"null","endLoc":662,"header":"def __setitem__(self, key, value)","id":1943,"name":"__setitem__","nodeType":"Function","startLoc":660,"text":"def __setitem__(self, key, value):\n self._ensure_copied()\n self.array[key] = value"},{"attributeType":"null","col":4,"comment":"null","endLoc":640,"id":1944,"name":"__slots__","nodeType":"Attribute","startLoc":640,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":644,"id":1945,"name":"_copied","nodeType":"Attribute","startLoc":644,"text":"self._copied"},{"attributeType":"ExplicitlyIndexed | NumpyIndexingAdapter | PandasIndexAdapter | DaskIndexingAdapter | NdArrayLikeIndexingAdapter","col":8,"comment":"null","endLoc":643,"id":1946,"name":"array","nodeType":"Attribute","startLoc":643,"text":"self.array"},{"className":"MemoryCachedArray","col":0,"comment":"null","endLoc":686,"id":1947,"nodeType":"Class","startLoc":665,"text":"class MemoryCachedArray(ExplicitlyIndexedNDArrayMixin):\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = _wrap_numpy_scalars(as_indexable(array))\n\n def _ensure_cached(self):\n if not isinstance(self.array, NumpyIndexingAdapter):\n self.array = NumpyIndexingAdapter(np.asarray(self.array))\n\n def __array__(self, dtype=None):\n self._ensure_cached()\n return np.asarray(self.array, dtype=dtype)\n\n def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))\n\n def transpose(self, order):\n return self.array.transpose(order)\n\n def __setitem__(self, key, value):\n self.array[key] = value"},{"col":4,"comment":"null","endLoc":81,"header":"def __repr__(self)","id":1948,"name":"__repr__","nodeType":"Function","startLoc":75,"text":"def __repr__(self):\n return \"%s(%r, func=%r, dtype=%r)\" % (\n type(self).__name__,\n self.array,\n self.func,\n self.dtype,\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":62,"id":1949,"name":"func","nodeType":"Attribute","startLoc":62,"text":"self.func"},{"attributeType":"null","col":8,"comment":"null","endLoc":61,"id":1950,"name":"array","nodeType":"Attribute","startLoc":61,"text":"self.array"},{"col":0,"comment":"null","endLoc":41,"header":"def dataset_to_dataarray(obj: Dataset) -> DataArray","id":1951,"name":"dataset_to_dataarray","nodeType":"Function","startLoc":32,"text":"def dataset_to_dataarray(obj: Dataset) -> DataArray:\n if not isinstance(obj, Dataset):\n raise TypeError(\"Expected Dataset, got %s\" % type(obj))\n\n if len(obj.data_vars) > 1:\n raise TypeError(\n \"Trying to convert Dataset with more than one data variable to DataArray\"\n )\n\n return next(iter(obj.data_vars.values()))"},{"col":4,"comment":"Adapted from pandas.tseries.index.DatetimeIndex._get_string_slice","endLoc":325,"header":"def _get_string_slice(self, key)","id":1952,"name":"_get_string_slice","nodeType":"Function","startLoc":318,"text":"def _get_string_slice(self, key):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex._get_string_slice\"\"\"\n parsed, resolution = _parse_iso8601_with_reso(self.date_type, key)\n try:\n loc = self._partial_date_slice(resolution, parsed)\n except KeyError:\n raise KeyError(key)\n return loc"},{"col":4,"comment":"null","endLoc":673,"header":"def _ensure_cached(self)","id":1953,"name":"_ensure_cached","nodeType":"Function","startLoc":671,"text":"def _ensure_cached(self):\n if not isinstance(self.array, NumpyIndexingAdapter):\n self.array = NumpyIndexingAdapter(np.asarray(self.array))"},{"attributeType":"null","col":8,"comment":"null","endLoc":63,"id":1954,"name":"_dtype","nodeType":"Attribute","startLoc":63,"text":"self._dtype"},{"col":4,"comment":"Shift this dataset by an offset along one or more dimensions.\n\n Only data variables are moved; coordinates stay in place. This is\n consistent with the behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Dataset\n Dataset with the same coordinates and attributes but shifted data\n variables.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.shift(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n foo (x) object nan nan 'a' 'b' 'c'\n ","endLoc":4816,"header":"def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs)","id":1955,"name":"shift","nodeType":"Function","startLoc":4763,"text":"def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):\n \"\"\"Shift this dataset by an offset along one or more dimensions.\n\n Only data variables are moved; coordinates stay in place. This is\n consistent with the behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Dataset\n Dataset with the same coordinates and attributes but shifted data\n variables.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.shift(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n foo (x) object nan nan 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"shift\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n var_shifts = {k: v for k, v in shifts.items() if k in var.dims}\n variables[name] = var.shift(fill_value=fill_value, shifts=var_shifts)\n else:\n variables[name] = var\n\n return self._replace(variables)"},{"className":"CFMaskCoder","col":0,"comment":"Mask or unmask fill values according to CF conventions.","endLoc":210,"id":1956,"nodeType":"Class","startLoc":149,"text":"class CFMaskCoder(VariableCoder):\n \"\"\"Mask or unmask fill values according to CF conventions.\"\"\"\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n fv = encoding.get(\"_FillValue\")\n mv = encoding.get(\"missing_value\")\n\n if fv is not None and mv is not None and not equivalent(fv, mv):\n raise ValueError(\n \"Variable {!r} has multiple fill values {}. \"\n \"Cannot encode data. \".format(name, [fv, mv])\n )\n\n if fv is not None:\n fill_value = pop_to(encoding, attrs, \"_FillValue\", name=name)\n if not pd.isnull(fill_value):\n data = duck_array_ops.fillna(data, fill_value)\n\n if mv is not None:\n fill_value = pop_to(encoding, attrs, \"missing_value\", name=name)\n if not pd.isnull(fill_value) and fv is None:\n data = duck_array_ops.fillna(data, fill_value)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n raw_fill_values = [\n pop_to(attrs, encoding, attr, name=name)\n for attr in (\"missing_value\", \"_FillValue\")\n ]\n if raw_fill_values:\n encoded_fill_values = {\n fv\n for option in raw_fill_values\n for fv in np.ravel(option)\n if not pd.isnull(fv)\n }\n\n if len(encoded_fill_values) > 1:\n warnings.warn(\n \"variable {!r} has multiple fill values {}, \"\n \"decoding all values to NaN.\".format(name, encoded_fill_values),\n SerializationWarning,\n stacklevel=3,\n )\n\n dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype)\n\n if encoded_fill_values:\n transform = partial(\n _apply_mask,\n encoded_fill_values=encoded_fill_values,\n decoded_fill_value=decoded_fill_value,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":174,"header":"def encode(self, variable, name=None)","id":1957,"name":"encode","nodeType":"Function","startLoc":152,"text":"def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n fv = encoding.get(\"_FillValue\")\n mv = encoding.get(\"missing_value\")\n\n if fv is not None and mv is not None and not equivalent(fv, mv):\n raise ValueError(\n \"Variable {!r} has multiple fill values {}. \"\n \"Cannot encode data. \".format(name, [fv, mv])\n )\n\n if fv is not None:\n fill_value = pop_to(encoding, attrs, \"_FillValue\", name=name)\n if not pd.isnull(fill_value):\n data = duck_array_ops.fillna(data, fill_value)\n\n if mv is not None:\n fill_value = pop_to(encoding, attrs, \"missing_value\", name=name)\n if not pd.isnull(fill_value) and fv is None:\n data = duck_array_ops.fillna(data, fill_value)\n\n return Variable(dims, data, attrs, encoding)"},{"col":0,"comment":"Fallback method for encoding dates using cftime.\n\n This method is more flexible than xarray's parsing using datetime64[ns]\n arrays but also slower because it loops over each element.\n ","endLoc":366,"header":"def _encode_datetime_with_cftime(dates, units, calendar)","id":1958,"name":"_encode_datetime_with_cftime","nodeType":"Function","startLoc":351,"text":"def _encode_datetime_with_cftime(dates, units, calendar):\n \"\"\"Fallback method for encoding dates using cftime.\n\n This method is more flexible than xarray's parsing using datetime64[ns]\n arrays but also slower because it loops over each element.\n \"\"\"\n cftime = _import_cftime()\n\n if np.issubdtype(dates.dtype, np.datetime64):\n # numpy's broken datetime conversion only works for us precision\n dates = dates.astype(\"M8[us]\").astype(datetime)\n\n def encode_datetime(d):\n return np.nan if d is None else cftime.date2num(d, units, calendar)\n\n return np.vectorize(encode_datetime)(dates)"},{"col":0,"comment":"\n helper function handle the transition to netcdftime/cftime\n as a stand-alone package\n ","endLoc":55,"header":"def _import_cftime()","id":1959,"name":"_import_cftime","nodeType":"Function","startLoc":42,"text":"def _import_cftime():\n \"\"\"\n helper function handle the transition to netcdftime/cftime\n as a stand-alone package\n \"\"\"\n try:\n import cftime\n except ImportError:\n # in netCDF4 the num2date/date2num function are top-level api\n try:\n import netCDF4 as cftime\n except ImportError:\n raise ImportError(\"Failed to import cftime\")\n return cftime"},{"col":4,"comment":"null","endLoc":677,"header":"def __array__(self, dtype=None)","id":1960,"name":"__array__","nodeType":"Function","startLoc":675,"text":"def __array__(self, dtype=None):\n self._ensure_cached()\n return np.asarray(self.array, dtype=dtype)"},{"col":4,"comment":"Roll this dataset by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n\n shifts : dict, optional\n A dict with keys matching dimensions and values given\n by integers to rotate each of the given dimensions. Positive\n offsets roll to the right; negative offsets roll to the left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : {dim: offset, ...}, optional\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n Returns\n -------\n rolled : Dataset\n Dataset with the same coordinates and attributes but rolled\n variables.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.roll(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 3 4 0 1 2\n Data variables:\n foo (x) object 'd' 'e' 'a' 'b' 'c'\n ","endLoc":4896,"header":"def roll(self, shifts=None, roll_coords=None, **shifts_kwargs)","id":1961,"name":"roll","nodeType":"Function","startLoc":4818,"text":"def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):\n \"\"\"Roll this dataset by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n\n shifts : dict, optional\n A dict with keys matching dimensions and values given\n by integers to rotate each of the given dimensions. Positive\n offsets roll to the right; negative offsets roll to the left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : {dim: offset, ...}, optional\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n Returns\n -------\n rolled : Dataset\n Dataset with the same coordinates and attributes but rolled\n variables.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.roll(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 3 4 0 1 2\n Data variables:\n foo (x) object 'd' 'e' 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"roll\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n if roll_coords is None:\n warnings.warn(\n \"roll_coords will be set to False in the future.\"\n \" Explicitly set roll_coords to silence warning.\",\n FutureWarning,\n stacklevel=2,\n )\n roll_coords = True\n\n unrolled_vars = () if roll_coords else self.coords\n\n variables = {}\n for k, v in self.variables.items():\n if k not in unrolled_vars:\n variables[k] = v.roll(\n **{k: s for k, s in shifts.items() if k in v.dims}\n )\n else:\n variables[k] = v\n\n if roll_coords:\n indexes = {}\n for k, v in self.indexes.items():\n (dim,) = self.variables[k].dims\n if dim in shifts:\n indexes[k] = roll_index(v, shifts[dim])\n else:\n indexes = dict(self.indexes)\n\n return self._replace(variables, indexes=indexes)"},{"col":4,"comment":"null","endLoc":680,"header":"def __getitem__(self, key)","id":1962,"name":"__getitem__","nodeType":"Function","startLoc":679,"text":"def __getitem__(self, key):\n return type(self)(_wrap_numpy_scalars(self.array[key]))"},{"col":0,"comment":"null","endLoc":373,"header":"def cast_to_int_if_safe(num)","id":1963,"name":"cast_to_int_if_safe","nodeType":"Function","startLoc":369,"text":"def cast_to_int_if_safe(num):\n int_num = np.array(num, dtype=np.int64)\n if (num == int_num).all():\n num = int_num\n return num"},{"col":0,"comment":"Map variable name to numpy(-like) data\n (Dataset.to_dict() is too complicated).\n ","endLoc":100,"header":"def make_dict(x: Union[DataArray, Dataset]) -> Dict[Hashable, Any]","id":1964,"name":"make_dict","nodeType":"Function","startLoc":93,"text":"def make_dict(x: Union[DataArray, Dataset]) -> Dict[Hashable, Any]:\n \"\"\"Map variable name to numpy(-like) data\n (Dataset.to_dict() is too complicated).\n \"\"\"\n if isinstance(x, DataArray):\n x = x._to_temp_dataset()\n\n return {k: v.data for k, v in x.variables.items()}"},{"col":4,"comment":"Adapted from pandas.tseries.index.DatetimeIndex.get_loc","endLoc":332,"header":"def get_loc(self, key, method=None, tolerance=None)","id":1965,"name":"get_loc","nodeType":"Function","startLoc":327,"text":"def get_loc(self, key, method=None, tolerance=None):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex.get_loc\"\"\"\n if isinstance(key, str):\n return self._get_string_slice(key)\n else:\n return pd.Index.get_loc(self, key, method=method, tolerance=tolerance)"},{"col":0,"comment":"Roll an pandas.Index.","endLoc":99,"header":"def roll_index(index: pd.Index, count: int, axis: int = 0) -> pd.Index","id":1966,"name":"roll_index","nodeType":"Function","startLoc":93,"text":"def roll_index(index: pd.Index, count: int, axis: int = 0) -> pd.Index:\n \"\"\"Roll an pandas.Index.\"\"\"\n count %= index.shape[0]\n if count != 0:\n return index[-count:].append(index[:-count])\n else:\n return index[:]"},{"col":4,"comment":"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements for each variable\n in the Dataset.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n\n Returns\n -------\n quantiles : Dataset\n If `q` is a single quantile, then the result is a scalar for each\n variable in data_vars. If multiple percentiles are given, first\n axis of the result corresponds to the quantile and a quantile\n dimension is added to the return Dataset. The other dimensions are\n the dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile\n ","endLoc":5052,"header":"def quantile(\n self, q, dim=None, interpolation=\"linear\", numeric_only=False, keep_attrs=None\n )","id":1967,"name":"quantile","nodeType":"Function","startLoc":4953,"text":"def quantile(\n self, q, dim=None, interpolation=\"linear\", numeric_only=False, keep_attrs=None\n ):\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements for each variable\n in the Dataset.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n\n Returns\n -------\n quantiles : Dataset\n If `q` is a single quantile, then the result is a scalar for each\n variable in data_vars. If multiple percentiles are given, first\n axis of the result corresponds to the quantile and a quantile\n dimension is added to the return Dataset. The other dimensions are\n the dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile\n \"\"\"\n\n if isinstance(dim, str):\n dims = {dim}\n elif dim is None or dim is ALL_DIMS:\n dims = set(self.dims)\n else:\n dims = set(dim)\n\n _assert_empty(\n [d for d in dims if d not in self.dims],\n \"Dataset does not contain the dimensions: %s\",\n )\n\n q = np.asarray(q, dtype=np.float64)\n\n variables = {}\n for name, var in self.variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if reduce_dims or not var.dims:\n if name not in self.coords:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or var.dtype == np.bool_\n ):\n if len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None\n variables[name] = var.quantile(\n q,\n dim=reduce_dims,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n )\n\n else:\n variables[name] = var\n\n # construct the new dataset\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n new = self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n if \"quantile\" in new.dims:\n new.coords[\"quantile\"] = Variable(\"quantile\", q)\n else:\n new.coords[\"quantile\"] = q\n return new"},{"col":4,"comment":"null","endLoc":683,"header":"def transpose(self, order)","id":1968,"name":"transpose","nodeType":"Function","startLoc":682,"text":"def transpose(self, order):\n return self.array.transpose(order)"},{"col":0,"comment":"null","endLoc":328,"header":"def _assert_empty(args: tuple, msg: str = \"%s\") -> None","id":1969,"name":"_assert_empty","nodeType":"Function","startLoc":326,"text":"def _assert_empty(args: tuple, msg: str = \"%s\") -> None:\n if args:\n raise ValueError(msg % args)"},{"col":4,"comment":"null","endLoc":686,"header":"def __setitem__(self, key, value)","id":1970,"name":"__setitem__","nodeType":"Function","startLoc":685,"text":"def __setitem__(self, key, value):\n self.array[key] = value"},{"attributeType":"null","col":4,"comment":"null","endLoc":666,"id":1971,"name":"__slots__","nodeType":"Attribute","startLoc":666,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":669,"id":1972,"name":"array","nodeType":"Attribute","startLoc":669,"text":"self.array"},{"className":"IndexingSupport","col":0,"comment":"null","endLoc":805,"id":1973,"nodeType":"Class","startLoc":796,"text":"@enum.unique\nclass IndexingSupport(enum.Enum):\n # for backends that support only basic indexer\n BASIC = 0\n # for backends that support basic / outer indexer\n OUTER = 1\n # for backends that support outer indexer including at most 1 vector.\n OUTER_1VECTOR = 2\n # for backends that support full vectorized indexer.\n VECTORIZED = 3"},{"col":4,"comment":"null","endLoc":479,"header":"def encode(self, variable, name=None)","id":1974,"name":"encode","nodeType":"Function","startLoc":472,"text":"def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n if np.issubdtype(data.dtype, np.timedelta64):\n data, units = encode_cf_timedelta(data, encoding.pop(\"units\", None))\n safe_setitem(attrs, \"units\", units, name=name)\n\n return Variable(dims, data, attrs, encoding)"},{"attributeType":"null","col":4,"comment":"null","endLoc":799,"id":1975,"name":"BASIC","nodeType":"Attribute","startLoc":799,"text":"BASIC"},{"attributeType":"null","col":4,"comment":"null","endLoc":801,"id":1976,"name":"OUTER","nodeType":"Attribute","startLoc":801,"text":"OUTER"},{"attributeType":"null","col":4,"comment":"null","endLoc":803,"id":1977,"name":"OUTER_1VECTOR","nodeType":"Attribute","startLoc":803,"text":"OUTER_1VECTOR"},{"attributeType":"null","col":4,"comment":"null","endLoc":805,"id":1978,"name":"VECTORIZED","nodeType":"Attribute","startLoc":805,"text":"VECTORIZED"},{"className":"NdArrayLikeIndexingAdapter","col":0,"comment":"null","endLoc":1288,"id":1979,"nodeType":"Class","startLoc":1279,"text":"class NdArrayLikeIndexingAdapter(NumpyIndexingAdapter):\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n if not hasattr(array, \"__array_function__\"):\n raise TypeError(\n \"NdArrayLikeIndexingAdapter must wrap an object that \"\n \"implements the __array_function__ protocol\"\n )\n self.array = array"},{"attributeType":"null","col":4,"comment":"null","endLoc":1280,"id":1980,"name":"__slots__","nodeType":"Attribute","startLoc":1280,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":1288,"id":1981,"name":"array","nodeType":"Attribute","startLoc":1288,"text":"self.array"},{"className":"DaskIndexingAdapter","col":0,"comment":"Wrap a dask array to support explicit indexing.","endLoc":1330,"id":1982,"nodeType":"Class","startLoc":1291,"text":"class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a dask array to support explicit indexing.\"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n \"\"\" This adapter is created in Variable.__getitem__ in\n Variable._broadcast_indexes.\n \"\"\"\n self.array = array\n\n def __getitem__(self, key):\n if isinstance(key, BasicIndexer):\n return self.array[key.tuple]\n elif isinstance(key, VectorizedIndexer):\n return self.array.vindex[key.tuple]\n else:\n assert isinstance(key, OuterIndexer)\n key = key.tuple\n try:\n return self.array[key]\n except NotImplementedError:\n # manual orthogonal indexing.\n # TODO: port this upstream into dask in a saner way.\n value = self.array\n for axis, subkey in reversed(list(enumerate(key))):\n value = value[(slice(None),) * axis + (subkey,)]\n return value\n\n def __setitem__(self, key, value):\n raise TypeError(\n \"this variable's data is stored in a dask array, \"\n \"which does not support item assignment. To \"\n \"assign to this variable, you must first load it \"\n \"into memory explicitly using the .load() \"\n \"method or accessing its .values attribute.\"\n )\n\n def transpose(self, order):\n return self.array.transpose(order)"},{"col":4,"comment":"null","endLoc":1318,"header":"def __getitem__(self, key)","id":1983,"name":"__getitem__","nodeType":"Function","startLoc":1302,"text":"def __getitem__(self, key):\n if isinstance(key, BasicIndexer):\n return self.array[key.tuple]\n elif isinstance(key, VectorizedIndexer):\n return self.array.vindex[key.tuple]\n else:\n assert isinstance(key, OuterIndexer)\n key = key.tuple\n try:\n return self.array[key]\n except NotImplementedError:\n # manual orthogonal indexing.\n # TODO: port this upstream into dask in a saner way.\n value = self.array\n for axis, subkey in reversed(list(enumerate(key))):\n value = value[(slice(None),) * axis + (subkey,)]\n return value"},{"col":4,"comment":"null","endLoc":1327,"header":"def __setitem__(self, key, value)","id":1984,"name":"__setitem__","nodeType":"Function","startLoc":1320,"text":"def __setitem__(self, key, value):\n raise TypeError(\n \"this variable's data is stored in a dask array, \"\n \"which does not support item assignment. To \"\n \"assign to this variable, you must first load it \"\n \"into memory explicitly using the .load() \"\n \"method or accessing its .values attribute.\"\n )"},{"attributeType":"null","col":4,"comment":"null","endLoc":1294,"id":1985,"name":"__slots__","nodeType":"Attribute","startLoc":1294,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":1300,"id":1986,"name":"array","nodeType":"Attribute","startLoc":1300,"text":"self.array"},{"col":4,"comment":"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within\n that set.\n Ranks begin at 1, not 0. If pct is True, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : Dataset\n Variables that do not depend on `dim` are dropped.\n ","endLoc":5097,"header":"def rank(self, dim, pct=False, keep_attrs=None)","id":1987,"name":"rank","nodeType":"Function","startLoc":5054,"text":"def rank(self, dim, pct=False, keep_attrs=None):\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within\n that set.\n Ranks begin at 1, not 0. If pct is True, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : Dataset\n Variables that do not depend on `dim` are dropped.\n \"\"\"\n if dim not in self.dims:\n raise ValueError(\"Dataset does not contain the dimension: %s\" % dim)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n if dim in var.dims:\n variables[name] = var.rank(dim, pct=pct)\n else:\n variables[name] = var\n\n coord_names = set(self.coords)\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return self._replace(variables, coord_names, attrs=attrs)"},{"col":0,"comment":"null","endLoc":433,"header":"def encode_cf_timedelta(timedeltas, units=None)","id":1988,"name":"encode_cf_timedelta","nodeType":"Function","startLoc":425,"text":"def encode_cf_timedelta(timedeltas, units=None):\n if units is None:\n units = infer_timedelta_units(timedeltas)\n\n np_unit = _netcdf_to_numpy_timeunit(units)\n num = 1.0 * timedeltas / np.timedelta64(1, np_unit)\n num = np.where(pd.isnull(timedeltas), np.nan, num)\n num = cast_to_int_if_safe(num)\n return (num, units)"},{"className":"PandasIndexAdapter","col":0,"comment":"Wrap a pandas.Index to preserve dtypes and handle explicit indexing.\n ","endLoc":1427,"id":1989,"nodeType":"Class","startLoc":1333,"text":"class PandasIndexAdapter(ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrap a pandas.Index to preserve dtypes and handle explicit indexing.\n \"\"\"\n\n __slots__ = (\"array\", \"_dtype\")\n\n def __init__(self, array: Any, dtype: DTypeLike = None):\n self.array = utils.safe_cast_to_index(array)\n if dtype is None:\n if isinstance(array, pd.PeriodIndex):\n dtype = np.dtype(\"O\")\n elif hasattr(array, \"categories\"):\n # category isn't a real numpy dtype\n dtype = array.categories.dtype\n elif not utils.is_valid_numpy_dtype(array.dtype):\n dtype = np.dtype(\"O\")\n else:\n dtype = array.dtype\n else:\n dtype = np.dtype(dtype)\n self._dtype = dtype\n\n @property\n def dtype(self) -> np.dtype:\n return self._dtype\n\n def __array__(self, dtype: DTypeLike = None) -> np.ndarray:\n if dtype is None:\n dtype = self.dtype\n array = self.array\n if isinstance(array, pd.PeriodIndex):\n with suppress(AttributeError):\n # this might not be public API\n array = array.astype(\"object\")\n return np.asarray(array.values, dtype=dtype)\n\n @property\n def shape(self) -> Tuple[int]:\n return (len(self.array),)\n\n def __getitem__(\n self, indexer\n ) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]:\n key = indexer.tuple\n if isinstance(key, tuple) and len(key) == 1:\n # unpack key so it can index a pandas.Index object (pandas.Index\n # objects don't like tuples)\n key, = key\n\n if getattr(key, \"ndim\", 0) > 1: # Return np-array if multidimensional\n return NumpyIndexingAdapter(self.array.values)[indexer]\n\n result = self.array[key]\n\n if isinstance(result, pd.Index):\n result = PandasIndexAdapter(result, dtype=self.dtype)\n else:\n # result is a scalar\n if result is pd.NaT:\n # work around the impossibility of casting NaT with asarray\n # note: it probably would be better in general to return\n # pd.Timestamp rather np.than datetime64 but this is easier\n # (for now)\n result = np.datetime64(\"NaT\", \"ns\")\n elif isinstance(result, timedelta):\n result = np.timedelta64(getattr(result, \"value\", result), \"ns\")\n elif isinstance(result, pd.Timestamp):\n # Work around for GH: pydata/xarray#1932 and numpy/numpy#10668\n # numpy fails to convert pd.Timestamp to np.datetime64[ns]\n result = np.asarray(result.to_datetime64())\n elif self.dtype != object:\n result = np.asarray(result, dtype=self.dtype)\n\n # as for numpy.ndarray indexing, we always want the result to be\n # a NumPy array.\n result = utils.to_0d_array(result)\n\n return result\n\n def transpose(self, order) -> pd.Index:\n return self.array # self.array should be always one-dimensional\n\n def __repr__(self) -> str:\n return \"%s(array=%r, dtype=%r)\" % (type(self).__name__, self.array, self.dtype)\n\n def copy(self, deep: bool = True) -> \"PandasIndexAdapter\":\n # Not the same as just writing `self.array.copy(deep=deep)`, as\n # shallow copies of the underlying numpy.ndarrays become deep ones\n # upon pickling\n # >>> len(pickle.dumps((self.array, self.array)))\n # 4000281\n # >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))\n # 8000341\n array = self.array.copy(deep=True) if deep else self.array\n return PandasIndexAdapter(array, self._dtype)"},{"col":0,"comment":"Given an array of timedeltas, returns a CF compatible time-unit from\n {'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly\n divide all unique time deltas in `deltas`)\n ","endLoc":315,"header":"def infer_timedelta_units(deltas)","id":1990,"name":"infer_timedelta_units","nodeType":"Function","startLoc":307,"text":"def infer_timedelta_units(deltas):\n \"\"\"Given an array of timedeltas, returns a CF compatible time-unit from\n {'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly\n divide all unique time deltas in `deltas`)\n \"\"\"\n deltas = to_timedelta_unboxed(np.asarray(deltas).ravel())\n unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])\n units = _infer_time_units_from_diff(unique_timedeltas)\n return units"},{"col":4,"comment":"null","endLoc":1357,"header":"@property\n def dtype(self) -> np.dtype","id":1991,"name":"dtype","nodeType":"Function","startLoc":1355,"text":"@property\n def dtype(self) -> np.dtype:\n return self._dtype"},{"col":4,"comment":"null","endLoc":1367,"header":"def __array__(self, dtype: DTypeLike = None) -> np.ndarray","id":1992,"name":"__array__","nodeType":"Function","startLoc":1359,"text":"def __array__(self, dtype: DTypeLike = None) -> np.ndarray:\n if dtype is None:\n dtype = self.dtype\n array = self.array\n if isinstance(array, pd.PeriodIndex):\n with suppress(AttributeError):\n # this might not be public API\n array = array.astype(\"object\")\n return np.asarray(array.values, dtype=dtype)"},{"col":4,"comment":" Differentiate with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: str\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: Dataset\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n ","endLoc":5156,"header":"def differentiate(self, coord, edge_order=1, datetime_unit=None)","id":1993,"name":"differentiate","nodeType":"Function","startLoc":5099,"text":"def differentiate(self, coord, edge_order=1, datetime_unit=None):\n \"\"\" Differentiate with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: str\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: Dataset\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n \"\"\"\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(\"Coordinate {} does not exist.\".format(coord))\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = coord_var._to_numeric(datetime_unit=datetime_unit)\n\n variables = {}\n for k, v in self.variables.items():\n if k in self.data_vars and dim in v.dims and k not in self.coords:\n if _contains_datetime_like_objects(v):\n v = v._to_numeric(datetime_unit=datetime_unit)\n grad = duck_array_ops.gradient(\n v.data, coord_var, edge_order=edge_order, axis=v.get_axis_num(dim)\n )\n variables[k] = Variable(v.dims, grad)\n else:\n variables[k] = v\n return self._replace(variables)"},{"col":4,"comment":"null","endLoc":1371,"header":"@property\n def shape(self) -> Tuple[int]","id":1994,"name":"shape","nodeType":"Function","startLoc":1369,"text":"@property\n def shape(self) -> Tuple[int]:\n return (len(self.array),)"},{"col":4,"comment":"null","endLoc":1410,"header":"def __getitem__(\n self, indexer\n ) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]","id":1995,"name":"__getitem__","nodeType":"Function","startLoc":1373,"text":"def __getitem__(\n self, indexer\n ) -> Union[NumpyIndexingAdapter, np.ndarray, np.datetime64, np.timedelta64]:\n key = indexer.tuple\n if isinstance(key, tuple) and len(key) == 1:\n # unpack key so it can index a pandas.Index object (pandas.Index\n # objects don't like tuples)\n key, = key\n\n if getattr(key, \"ndim\", 0) > 1: # Return np-array if multidimensional\n return NumpyIndexingAdapter(self.array.values)[indexer]\n\n result = self.array[key]\n\n if isinstance(result, pd.Index):\n result = PandasIndexAdapter(result, dtype=self.dtype)\n else:\n # result is a scalar\n if result is pd.NaT:\n # work around the impossibility of casting NaT with asarray\n # note: it probably would be better in general to return\n # pd.Timestamp rather np.than datetime64 but this is easier\n # (for now)\n result = np.datetime64(\"NaT\", \"ns\")\n elif isinstance(result, timedelta):\n result = np.timedelta64(getattr(result, \"value\", result), \"ns\")\n elif isinstance(result, pd.Timestamp):\n # Work around for GH: pydata/xarray#1932 and numpy/numpy#10668\n # numpy fails to convert pd.Timestamp to np.datetime64[ns]\n result = np.asarray(result.to_datetime64())\n elif self.dtype != object:\n result = np.asarray(result, dtype=self.dtype)\n\n # as for numpy.ndarray indexing, we always want the result to be\n # a NumPy array.\n result = utils.to_0d_array(result)\n\n return result"},{"col":0,"comment":"null","endLoc":138,"header":"def gradient(x, coord, axis, edge_order)","id":1996,"name":"gradient","nodeType":"Function","startLoc":135,"text":"def gradient(x, coord, axis, edge_order):\n if isinstance(x, dask_array_type):\n return dask_array.gradient(x, coord, axis=axis, edge_order=edge_order)\n return np.gradient(x, coord, axis=axis, edge_order=edge_order)"},{"col":0,"comment":"Infer return object by running the function on meta objects.\n ","endLoc":90,"header":"def infer_template(\n func: Callable[..., T_DSorDA], obj: Union[DataArray, Dataset], *args, **kwargs\n) -> T_DSorDA","id":1997,"name":"infer_template","nodeType":"Function","startLoc":70,"text":"def infer_template(\n func: Callable[..., T_DSorDA], obj: Union[DataArray, Dataset], *args, **kwargs\n) -> T_DSorDA:\n \"\"\"Infer return object by running the function on meta objects.\n \"\"\"\n meta_args = [make_meta(arg) for arg in (obj,) + args]\n\n try:\n template = func(*meta_args, **kwargs)\n except Exception as e:\n raise Exception(\n \"Cannot infer object returned from running user provided function.\"\n ) from e\n\n if not isinstance(template, (Dataset, DataArray)):\n raise TypeError(\n \"Function must return an xarray DataArray or Dataset. Instead it returned \"\n f\"{type(template)}\"\n )\n\n return template"},{"col":0,"comment":"If obj is a DataArray or Dataset, return a new object of the same type and with\n the same variables and dtypes, but where all variables have size 0 and numpy\n backend.\n If obj is neither a DataArray nor Dataset, return it unaltered.\n ","endLoc":67,"header":"def make_meta(obj)","id":1999,"name":"make_meta","nodeType":"Function","startLoc":44,"text":"def make_meta(obj):\n \"\"\"If obj is a DataArray or Dataset, return a new object of the same type and with\n the same variables and dtypes, but where all variables have size 0 and numpy\n backend.\n If obj is neither a DataArray nor Dataset, return it unaltered.\n \"\"\"\n if isinstance(obj, DataArray):\n obj_array = obj\n obj = obj._to_temp_dataset()\n elif isinstance(obj, Dataset):\n obj_array = None\n else:\n return obj\n\n meta = Dataset()\n for name, variable in obj.variables.items():\n meta_obj = meta_from_array(variable.data, ndim=variable.ndim)\n meta[name] = (variable.dims, meta_obj, variable.attrs)\n meta.attrs = obj.attrs\n meta = meta.set_coords(obj.coords)\n\n if obj_array is not None:\n return obj_array._from_temp_dataset(meta)\n return meta"},{"col":4,"comment":"Adapted from\n pandas.tseries.index.DatetimeIndex._maybe_cast_slice_bound","endLoc":344,"header":"def _maybe_cast_slice_bound(self, label, side, kind)","id":2000,"name":"_maybe_cast_slice_bound","nodeType":"Function","startLoc":334,"text":"def _maybe_cast_slice_bound(self, label, side, kind):\n \"\"\"Adapted from\n pandas.tseries.index.DatetimeIndex._maybe_cast_slice_bound\"\"\"\n if isinstance(label, str):\n parsed, resolution = _parse_iso8601_with_reso(self.date_type, label)\n start, end = _parsed_string_to_bounds(self.date_type, resolution, parsed)\n if self.is_monotonic_decreasing and len(self) > 1:\n return end if side == \"left\" else start\n return start if side == \"left\" else end\n else:\n return label"},{"col":4,"comment":" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: str, or a sequence of str\n Coordinate(s) used for the integration.\n datetime_unit\n Can be specify the unit if datetime coordinate is used. One of\n {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs',\n 'as'}\n\n Returns\n -------\n integrated: Dataset\n\n See also\n --------\n DataArray.integrate\n numpy.trapz: corresponding numpy function\n ","endLoc":5188,"header":"def integrate(self, coord, datetime_unit=None)","id":2001,"name":"integrate","nodeType":"Function","startLoc":5158,"text":"def integrate(self, coord, datetime_unit=None):\n \"\"\" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: str, or a sequence of str\n Coordinate(s) used for the integration.\n datetime_unit\n Can be specify the unit if datetime coordinate is used. One of\n {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs',\n 'as'}\n\n Returns\n -------\n integrated: Dataset\n\n See also\n --------\n DataArray.integrate\n numpy.trapz: corresponding numpy function\n \"\"\"\n if not isinstance(coord, (list, tuple)):\n coord = (coord,)\n result = self\n for c in coord:\n result = result._integrate_one(c, datetime_unit=datetime_unit)\n return result"},{"col":4,"comment":"null","endLoc":210,"header":"def decode(self, variable, name=None)","id":2002,"name":"decode","nodeType":"Function","startLoc":176,"text":"def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n raw_fill_values = [\n pop_to(attrs, encoding, attr, name=name)\n for attr in (\"missing_value\", \"_FillValue\")\n ]\n if raw_fill_values:\n encoded_fill_values = {\n fv\n for option in raw_fill_values\n for fv in np.ravel(option)\n if not pd.isnull(fv)\n }\n\n if len(encoded_fill_values) > 1:\n warnings.warn(\n \"variable {!r} has multiple fill values {}, \"\n \"decoding all values to NaN.\".format(name, encoded_fill_values),\n SerializationWarning,\n stacklevel=3,\n )\n\n dtype, decoded_fill_value = dtypes.maybe_promote(data.dtype)\n\n if encoded_fill_values:\n transform = partial(\n _apply_mask,\n encoded_fill_values=encoded_fill_values,\n decoded_fill_value=decoded_fill_value,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":1416,"header":"def __repr__(self) -> str","id":2003,"name":"__repr__","nodeType":"Function","startLoc":1415,"text":"def __repr__(self) -> str:\n return \"%s(array=%r, dtype=%r)\" % (type(self).__name__, self.array, self.dtype)"},{"col":4,"comment":"null","endLoc":1427,"header":"def copy(self, deep: bool = True) -> \"PandasIndexAdapter\"","id":2004,"name":"copy","nodeType":"Function","startLoc":1418,"text":"def copy(self, deep: bool = True) -> \"PandasIndexAdapter\":\n # Not the same as just writing `self.array.copy(deep=deep)`, as\n # shallow copies of the underlying numpy.ndarrays become deep ones\n # upon pickling\n # >>> len(pickle.dumps((self.array, self.array)))\n # 4000281\n # >>> len(pickle.dumps((self.array, self.array.copy(deep=False))))\n # 8000341\n array = self.array.copy(deep=True) if deep else self.array\n return PandasIndexAdapter(array, self._dtype)"},{"col":4,"comment":"null","endLoc":5233,"header":"def _integrate_one(self, coord, datetime_unit=None)","id":2005,"name":"_integrate_one","nodeType":"Function","startLoc":5190,"text":"def _integrate_one(self, coord, datetime_unit=None):\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(\"Coordinate {} does not exist.\".format(coord))\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = datetime_to_numeric(coord_var, datetime_unit=datetime_unit)\n\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n if k in self.coords:\n if dim not in v.dims:\n variables[k] = v\n coord_names.add(k)\n else:\n if k in self.data_vars and dim in v.dims:\n if _contains_datetime_like_objects(v):\n v = datetime_to_numeric(v, datetime_unit=datetime_unit)\n integ = duck_array_ops.trapz(\n v.data, coord_var.data, axis=v.get_axis_num(dim)\n )\n v_dims = list(v.dims)\n v_dims.remove(dim)\n variables[k] = Variable(v_dims, integ)\n else:\n variables[k] = v\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )"},{"attributeType":"null","col":4,"comment":"null","endLoc":1337,"id":2006,"name":"__slots__","nodeType":"Attribute","startLoc":1337,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":1340,"id":2007,"name":"array","nodeType":"Attribute","startLoc":1340,"text":"self.array"},{"attributeType":"null","col":8,"comment":"null","endLoc":1353,"id":2008,"name":"_dtype","nodeType":"Attribute","startLoc":1353,"text":"self._dtype"},{"col":0,"comment":"Convert an array containing datetime-like data to an array of floats.\n\n Parameters\n ----------\n da : np.array\n Input data\n offset: Scalar with the same type of array or None\n If None, subtract minimum values to reduce round off error\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n dtype: target dtype\n\n Returns\n -------\n array\n ","endLoc":357,"header":"def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float)","id":2009,"name":"datetime_to_numeric","nodeType":"Function","startLoc":322,"text":"def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):\n \"\"\"Convert an array containing datetime-like data to an array of floats.\n\n Parameters\n ----------\n da : np.array\n Input data\n offset: Scalar with the same type of array or None\n If None, subtract minimum values to reduce round off error\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n dtype: target dtype\n\n Returns\n -------\n array\n \"\"\"\n # TODO: make this function dask-compatible?\n if offset is None:\n offset = array.min()\n array = array - offset\n\n if not hasattr(array, \"dtype\"): # scalar is converted to 0d-array\n array = np.array(array)\n\n if array.dtype.kind in \"O\":\n # possibly convert object array containing datetime.timedelta\n array = np.asarray(pd.Series(array.ravel())).reshape(array.shape)\n\n if datetime_unit:\n array = array / np.timedelta64(1, datetime_unit)\n\n # convert np.NaT to np.nan\n if array.dtype.kind in \"mM\":\n return np.where(isnull(array), np.nan, array.astype(dtype))\n return array.astype(dtype)"},{"col":4,"comment":"Convert a cdms2.Variable into an xarray.DataArray\n ","endLoc":2381,"header":"@classmethod\n def from_cdms2(cls, variable: \"cdms2_Variable\") -> \"DataArray\"","id":2010,"name":"from_cdms2","nodeType":"Function","startLoc":2375,"text":"@classmethod\n def from_cdms2(cls, variable: \"cdms2_Variable\") -> \"DataArray\":\n \"\"\"Convert a cdms2.Variable into an xarray.DataArray\n \"\"\"\n from ..convert import from_cdms2\n\n return from_cdms2(variable)"},{"col":0,"comment":"Convert a cdms2 variable into an DataArray\n ","endLoc":88,"header":"def from_cdms2(variable)","id":2011,"name":"from_cdms2","nodeType":"Function","startLoc":63,"text":"def from_cdms2(variable):\n \"\"\"Convert a cdms2 variable into an DataArray\n \"\"\"\n values = np.asarray(variable)\n name = variable.id\n dims = variable.getAxisIds()\n coords = {}\n for axis in variable.getAxisList():\n coords[axis.id] = DataArray(\n np.asarray(axis),\n dims=[axis.id],\n attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs),\n )\n grid = variable.getGrid()\n if grid is not None:\n ids = [a.id for a in grid.getAxisList()]\n for axis in grid.getLongitude(), grid.getLatitude():\n if axis.id not in variable.getAxisIds():\n coords[axis.id] = DataArray(\n np.asarray(axis[:]),\n dims=ids,\n attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs),\n )\n attrs = _filter_attrs(variable.attributes, cdms2_ignored_attrs)\n dataarray = DataArray(values, dims=dims, coords=coords, name=name, attrs=attrs)\n return decode_cf(dataarray.to_dataset())[dataarray.name]"},{"col":4,"comment":"Adapted from pandas.tseries.index.DatetimeIndex.get_value","endLoc":355,"header":"def get_value(self, series, key)","id":2012,"name":"get_value","nodeType":"Function","startLoc":348,"text":"def get_value(self, series, key):\n \"\"\"Adapted from pandas.tseries.index.DatetimeIndex.get_value\"\"\"\n if np.asarray(key).dtype == np.dtype(bool):\n return series.iloc[key]\n elif isinstance(key, slice):\n return series.iloc[self.slice_indexer(key.start, key.stop, key.step)]\n else:\n return series.iloc[self.get_loc(key)]"},{"col":0,"comment":"null","endLoc":150,"header":"def trapz(y, x, axis)","id":2013,"name":"trapz","nodeType":"Function","startLoc":141,"text":"def trapz(y, x, axis):\n if axis < 0:\n axis = y.ndim + axis\n x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1)\n x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1)\n slice1 = (slice(None),) * axis + (slice(1, None),)\n slice2 = (slice(None),) * axis + (slice(None, -1),)\n dx = x[x_sl1] - x[x_sl2]\n integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)])\n return sum(integrand, axis=axis, skipna=False)"},{"col":0,"comment":" Return attrs that are not in ignored_attrs\n ","endLoc":60,"header":"def _filter_attrs(attrs, ignored_attrs)","id":2014,"name":"_filter_attrs","nodeType":"Function","startLoc":57,"text":"def _filter_attrs(attrs, ignored_attrs):\n \"\"\" Return attrs that are not in ignored_attrs\n \"\"\"\n return {k: v for k, v in attrs.items() if k not in ignored_attrs}"},{"col":0,"comment":"Create a dask array using the chunks hint for dimensions of size > 1.","endLoc":1098,"header":"def _dask_array_with_chunks_hint(array, chunks)","id":2015,"name":"_dask_array_with_chunks_hint","nodeType":"Function","startLoc":1089,"text":"def _dask_array_with_chunks_hint(array, chunks):\n \"\"\"Create a dask array using the chunks hint for dimensions of size > 1.\"\"\"\n import dask.array as da\n\n if len(chunks) < array.ndim:\n raise ValueError(\"not enough chunks in hint\")\n new_chunks = []\n for chunk, size in zip(chunks, array.shape):\n new_chunks.append(chunk if size > 1 else (1,))\n return da.from_array(array, new_chunks)"},{"className":"CFScaleOffsetCoder","col":0,"comment":"Scale and offset variables according to CF conventions.\n\n Follows the formula:\n decode_values = encoded_values * scale_factor + add_offset\n ","endLoc":276,"id":2016,"nodeType":"Class","startLoc":241,"text":"class CFScaleOffsetCoder(VariableCoder):\n \"\"\"Scale and offset variables according to CF conventions.\n\n Follows the formula:\n decode_values = encoded_values * scale_factor + add_offset\n \"\"\"\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n if \"scale_factor\" in encoding or \"add_offset\" in encoding:\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in encoding)\n data = data.astype(dtype=dtype, copy=True)\n if \"add_offset\" in encoding:\n data -= pop_to(encoding, attrs, \"add_offset\", name=name)\n if \"scale_factor\" in encoding:\n data /= pop_to(encoding, attrs, \"scale_factor\", name=name)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"scale_factor\" in attrs or \"add_offset\" in attrs:\n scale_factor = pop_to(attrs, encoding, \"scale_factor\", name=name)\n add_offset = pop_to(attrs, encoding, \"add_offset\", name=name)\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in attrs)\n transform = partial(\n _scale_offset_decoding,\n scale_factor=scale_factor,\n add_offset=add_offset,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":259,"header":"def encode(self, variable, name=None)","id":2017,"name":"encode","nodeType":"Function","startLoc":248,"text":"def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n if \"scale_factor\" in encoding or \"add_offset\" in encoding:\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in encoding)\n data = data.astype(dtype=dtype, copy=True)\n if \"add_offset\" in encoding:\n data -= pop_to(encoding, attrs, \"add_offset\", name=name)\n if \"scale_factor\" in encoding:\n data /= pop_to(encoding, attrs, \"scale_factor\", name=name)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":5237,"header":"@property\n def real(self)","id":2018,"name":"real","nodeType":"Function","startLoc":5235,"text":"@property\n def real(self):\n return self._unary_op(lambda x: x.real, keep_attrs=True)(self)"},{"col":30,"endLoc":5237,"id":2019,"nodeType":"Lambda","startLoc":5237,"text":"lambda x: x.real"},{"col":0,"comment":"Return a float dtype that can losslessly represent `dtype` values.","endLoc":238,"header":"def _choose_float_dtype(dtype, has_offset)","id":2020,"name":"_choose_float_dtype","nodeType":"Function","startLoc":222,"text":"def _choose_float_dtype(dtype, has_offset):\n \"\"\"Return a float dtype that can losslessly represent `dtype` values.\"\"\"\n # Keep float32 as-is. Upcast half-precision to single-precision,\n # because float16 is \"intended for storage but not computation\"\n if dtype.itemsize <= 4 and np.issubdtype(dtype, np.floating):\n return np.float32\n # float32 can exactly represent all integers up to 24 bits\n if dtype.itemsize <= 2 and np.issubdtype(dtype, np.integer):\n # A scale factor is entirely safe (vanishing into the mantissa),\n # but a large integer offset could lead to loss of precision.\n # Sensitivity analysis can be tricky, so we just use a float64\n # if there's any offset at all - better unoptimised than wrong!\n if not has_offset:\n return np.float32\n # For all other types and circumstances, we just use float64.\n # (safe because eg. complex numbers are not supported in NetCDF)\n return np.float64"},{"col":4,"comment":"Adapted from\n pandas.tseries.base.DatetimeIndexOpsMixin.__contains__","endLoc":368,"header":"def __contains__(self, key)","id":2021,"name":"__contains__","nodeType":"Function","startLoc":357,"text":"def __contains__(self, key):\n \"\"\"Adapted from\n pandas.tseries.base.DatetimeIndexOpsMixin.__contains__\"\"\"\n try:\n result = self.get_loc(key)\n return (\n is_scalar(result)\n or type(result) == slice\n or (isinstance(result, np.ndarray) and result.size)\n )\n except (KeyError, TypeError, ValueError):\n return False"},{"col":4,"comment":"null","endLoc":5241,"header":"@property\n def imag(self)","id":2022,"name":"imag","nodeType":"Function","startLoc":5239,"text":"@property\n def imag(self):\n return self._unary_op(lambda x: x.imag, keep_attrs=True)(self)"},{"col":30,"endLoc":5241,"id":2023,"nodeType":"Lambda","startLoc":5241,"text":"lambda x: x.imag"},{"col":4,"comment":"\n Access plotting functions. Use it as a namespace to use\n xarray.plot functions as Dataset methods\n\n >>> ds.plot.scatter(...) # equivalent to xarray.plot.scatter(ds,...)\n\n ","endLoc":5252,"header":"@property\n def plot(self)","id":2024,"name":"plot","nodeType":"Function","startLoc":5243,"text":"@property\n def plot(self):\n \"\"\"\n Access plotting functions. Use it as a namespace to use\n xarray.plot functions as Dataset methods\n\n >>> ds.plot.scatter(...) # equivalent to xarray.plot.scatter(ds,...)\n\n \"\"\"\n return _Dataset_PlotMethods(self)"},{"col":4,"comment":"Needed for .loc based partial-string indexing","endLoc":372,"header":"def contains(self, key)","id":2025,"name":"contains","nodeType":"Function","startLoc":370,"text":"def contains(self, key):\n \"\"\"Needed for .loc based partial-string indexing\"\"\"\n return self.__contains__(key)"},{"col":4,"comment":"Returns a ``Dataset`` with variables that match specific conditions.\n\n Can pass in ``key=value`` or ``key=callable``. A Dataset is returned\n containing only the variables for which all the filter tests pass.\n These tests are either ``key=value`` for which the attribute ``key``\n has the exact value ``value`` or the callable passed into\n ``key=callable`` returns True. The callable will be passed a single\n value, either the value of the attribute ``key`` or ``None`` if the\n DataArray does not have an attribute with the name ``key``.\n\n Parameters\n ----------\n **kwargs : key=value\n key : str\n Attribute name.\n value : callable or obj\n If value is a callable, it should return a boolean in the form\n of bool = func(attr) where attr is da.attrs[key].\n Otherwise, value will be compared to the each\n DataArray's attrs[key].\n\n Returns\n -------\n new : Dataset\n New dataset with variables filtered by attribute.\n\n Examples\n --------\n >>> # Create an example dataset:\n >>> import numpy as np\n >>> import pandas as pd\n >>> import xarray as xr\n >>> temp = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precip = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> dims = ['x', 'y', 'time']\n >>> temp_attr = dict(standard_name='air_potential_temperature')\n >>> precip_attr = dict(standard_name='convective_precipitation_flux')\n >>> ds = xr.Dataset({\n ... 'temperature': (dims, temp, temp_attr),\n ... 'precipitation': (dims, precip, precip_attr)},\n ... coords={\n ... 'lon': (['x', 'y'], lon),\n ... 'lat': (['x', 'y'], lat),\n ... 'time': pd.date_range('2014-09-06', periods=3),\n ... 'reference_time': pd.Timestamp('2014-09-05')})\n >>> # Get variables matching a specific standard_name.\n >>> ds.filter_by_attrs(standard_name='convective_precipitation_flux')\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n * x (x) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * y (y) int64 0 1\n reference_time datetime64[ns] 2014-09-05\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n Data variables:\n precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ...\n >>> # Get all variables that have a standard_name attribute.\n >>> standard_name = lambda v: v is not None\n >>> ds.filter_by_attrs(standard_name=standard_name)\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * x (x) int64 0 1\n * y (y) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Data variables:\n temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...\n precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...\n\n ","endLoc":5344,"header":"def filter_by_attrs(self, **kwargs)","id":2026,"name":"filter_by_attrs","nodeType":"Function","startLoc":5254,"text":"def filter_by_attrs(self, **kwargs):\n \"\"\"Returns a ``Dataset`` with variables that match specific conditions.\n\n Can pass in ``key=value`` or ``key=callable``. A Dataset is returned\n containing only the variables for which all the filter tests pass.\n These tests are either ``key=value`` for which the attribute ``key``\n has the exact value ``value`` or the callable passed into\n ``key=callable`` returns True. The callable will be passed a single\n value, either the value of the attribute ``key`` or ``None`` if the\n DataArray does not have an attribute with the name ``key``.\n\n Parameters\n ----------\n **kwargs : key=value\n key : str\n Attribute name.\n value : callable or obj\n If value is a callable, it should return a boolean in the form\n of bool = func(attr) where attr is da.attrs[key].\n Otherwise, value will be compared to the each\n DataArray's attrs[key].\n\n Returns\n -------\n new : Dataset\n New dataset with variables filtered by attribute.\n\n Examples\n --------\n >>> # Create an example dataset:\n >>> import numpy as np\n >>> import pandas as pd\n >>> import xarray as xr\n >>> temp = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precip = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> dims = ['x', 'y', 'time']\n >>> temp_attr = dict(standard_name='air_potential_temperature')\n >>> precip_attr = dict(standard_name='convective_precipitation_flux')\n >>> ds = xr.Dataset({\n ... 'temperature': (dims, temp, temp_attr),\n ... 'precipitation': (dims, precip, precip_attr)},\n ... coords={\n ... 'lon': (['x', 'y'], lon),\n ... 'lat': (['x', 'y'], lat),\n ... 'time': pd.date_range('2014-09-06', periods=3),\n ... 'reference_time': pd.Timestamp('2014-09-05')})\n >>> # Get variables matching a specific standard_name.\n >>> ds.filter_by_attrs(standard_name='convective_precipitation_flux')\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n * x (x) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * y (y) int64 0 1\n reference_time datetime64[ns] 2014-09-05\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n Data variables:\n precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ...\n >>> # Get all variables that have a standard_name attribute.\n >>> standard_name = lambda v: v is not None\n >>> ds.filter_by_attrs(standard_name=standard_name)\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * x (x) int64 0 1\n * y (y) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Data variables:\n temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...\n precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...\n\n \"\"\"\n selection = []\n for var_name, variable in self.variables.items():\n has_value_flag = False\n for attr_name, pattern in kwargs.items():\n attr_value = variable.attrs.get(attr_name)\n if (callable(pattern) and pattern(attr_value)) or attr_value == pattern:\n has_value_flag = True\n else:\n has_value_flag = False\n break\n if has_value_flag is True:\n selection.append(var_name)\n return self[selection]"},{"col":0,"comment":"null","endLoc":1102,"header":"def _logical_any(args)","id":2027,"name":"_logical_any","nodeType":"Function","startLoc":1101,"text":"def _logical_any(args):\n return functools.reduce(operator.or_, args)"},{"col":0,"comment":"null","endLoc":1125,"header":"def _masked_result_drop_slice(key, data=None)","id":2028,"name":"_masked_result_drop_slice","nodeType":"Function","startLoc":1105,"text":"def _masked_result_drop_slice(key, data=None):\n\n key = (k for k in key if not isinstance(k, slice))\n chunks_hint = getattr(data, \"chunks\", None)\n\n new_keys = []\n for k in key:\n if isinstance(k, np.ndarray):\n if isinstance(data, dask_array_type):\n new_keys.append(_dask_array_with_chunks_hint(k, chunks_hint))\n elif isinstance(data, sparse_array_type):\n import sparse\n\n new_keys.append(sparse.COO.from_numpy(k))\n else:\n new_keys.append(k)\n else:\n new_keys.append(k)\n\n mask = _logical_any(k == -1 for k in new_keys)\n return mask"},{"col":4,"comment":"Shift the CFTimeIndex a multiple of the given frequency.\n\n See the documentation for :py:func:`~xarray.cftime_range` for a\n complete listing of valid frequency strings.\n\n Parameters\n ----------\n n : int\n Periods to shift by\n freq : str or datetime.timedelta\n A frequency string or datetime.timedelta object to shift by\n\n Returns\n -------\n CFTimeIndex\n\n See also\n --------\n pandas.DatetimeIndex.shift\n\n Examples\n --------\n >>> index = xr.cftime_range('2000', periods=1, freq='M')\n >>> index\n CFTimeIndex([2000-01-31 00:00:00], dtype='object')\n >>> index.shift(1, 'M')\n CFTimeIndex([2000-02-29 00:00:00], dtype='object')\n ","endLoc":415,"header":"def shift(self, n, freq)","id":2029,"name":"shift","nodeType":"Function","startLoc":374,"text":"def shift(self, n, freq):\n \"\"\"Shift the CFTimeIndex a multiple of the given frequency.\n\n See the documentation for :py:func:`~xarray.cftime_range` for a\n complete listing of valid frequency strings.\n\n Parameters\n ----------\n n : int\n Periods to shift by\n freq : str or datetime.timedelta\n A frequency string or datetime.timedelta object to shift by\n\n Returns\n -------\n CFTimeIndex\n\n See also\n --------\n pandas.DatetimeIndex.shift\n\n Examples\n --------\n >>> index = xr.cftime_range('2000', periods=1, freq='M')\n >>> index\n CFTimeIndex([2000-01-31 00:00:00], dtype='object')\n >>> index.shift(1, 'M')\n CFTimeIndex([2000-02-29 00:00:00], dtype='object')\n \"\"\"\n from .cftime_offsets import to_offset\n\n if not isinstance(n, int):\n raise TypeError(\"'n' must be an int, got {}.\".format(n))\n if isinstance(freq, timedelta):\n return self + n * freq\n elif isinstance(freq, str):\n return self + n * to_offset(freq)\n else:\n raise TypeError(\n \"'freq' must be of type \"\n \"str or datetime.timedelta, got {}.\".format(freq)\n )"},{"col":4,"comment":" Unify chunk size along all chunked dimensions of this Dataset.\n\n Returns\n -------\n\n Dataset with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n ","endLoc":5390,"header":"def unify_chunks(self) -> \"Dataset\"","id":2030,"name":"unify_chunks","nodeType":"Function","startLoc":5346,"text":"def unify_chunks(self) -> \"Dataset\":\n \"\"\" Unify chunk size along all chunked dimensions of this Dataset.\n\n Returns\n -------\n\n Dataset with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n \"\"\"\n\n try:\n self.chunks\n except ValueError: # \"inconsistent chunks\"\n pass\n else:\n # No variables with dask backend, or all chunks are already aligned\n return self.copy()\n\n # import dask is placed after the quick exit test above to allow\n # running this method if dask isn't installed and there are no chunks\n import dask.array\n\n ds = self.copy()\n\n dims_pos_map = {dim: index for index, dim in enumerate(ds.dims)}\n\n dask_array_names = []\n dask_unify_args = []\n for name, variable in ds.variables.items():\n if isinstance(variable.data, dask.array.Array):\n dims_tuple = [dims_pos_map[dim] for dim in variable.dims]\n dask_array_names.append(name)\n dask_unify_args.append(variable.data)\n dask_unify_args.append(dims_tuple)\n\n _, rechunked_arrays = dask.array.core.unify_chunks(*dask_unify_args)\n\n for name, new_array in zip(dask_array_names, rechunked_arrays):\n ds.variables[name]._data = new_array\n\n return ds"},{"col":4,"comment":"null","endLoc":276,"header":"def decode(self, variable, name=None)","id":2031,"name":"decode","nodeType":"Function","startLoc":261,"text":"def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"scale_factor\" in attrs or \"add_offset\" in attrs:\n scale_factor = pop_to(attrs, encoding, \"scale_factor\", name=name)\n add_offset = pop_to(attrs, encoding, \"add_offset\", name=name)\n dtype = _choose_float_dtype(data.dtype, \"add_offset\" in attrs)\n transform = partial(\n _scale_offset_decoding,\n scale_factor=scale_factor,\n add_offset=add_offset,\n dtype=dtype,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":420,"header":"def __add__(self, other)","id":2032,"name":"__add__","nodeType":"Function","startLoc":417,"text":"def __add__(self, other):\n if isinstance(other, pd.TimedeltaIndex):\n other = other.to_pytimedelta()\n return CFTimeIndex(np.array(self) + other)"},{"col":4,"comment":"Convert this array into a iris.cube.Cube\n ","endLoc":2388,"header":"def to_iris(self) -> \"iris_Cube\"","id":2033,"name":"to_iris","nodeType":"Function","startLoc":2383,"text":"def to_iris(self) -> \"iris_Cube\":\n \"\"\"Convert this array into a iris.cube.Cube\n \"\"\"\n from ..convert import to_iris\n\n return to_iris(self)"},{"col":4,"comment":"\n Apply a function to each chunk of this Dataset. This method is experimental and\n its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a Dataset as its first parameter. The\n function will receive a subset of this Dataset, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this\n Dataset but has sizes 0, to determine properties of the returned object such\n as dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this Dataset is backed by dask, calling this method\n is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.DataArray.map_blocks\n ","endLoc":5447,"header":"def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs","id":2034,"name":"map_blocks","nodeType":"Function","startLoc":5392,"text":"def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each chunk of this Dataset. This method is experimental and\n its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a Dataset as its first parameter. The\n function will receive a subset of this Dataset, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this\n Dataset but has sizes 0, to determine properties of the returned object such\n as dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this Dataset is backed by dask, calling this method\n is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.DataArray.map_blocks\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs)"},{"col":0,"comment":" Convert a DataArray into a Iris Cube\n ","endLoc":212,"header":"def to_iris(dataarray)","id":2035,"name":"to_iris","nodeType":"Function","startLoc":174,"text":"def to_iris(dataarray):\n \"\"\" Convert a DataArray into a Iris Cube\n \"\"\"\n # Iris not a hard dependency\n import iris\n from iris.fileformats.netcdf import parse_cell_methods\n\n dim_coords = []\n aux_coords = []\n\n for coord_name in dataarray.coords:\n coord = encode(dataarray.coords[coord_name])\n coord_args = _get_iris_args(coord.attrs)\n coord_args[\"var_name\"] = coord_name\n axis = None\n if coord.dims:\n axis = dataarray.get_axis_num(coord.dims)\n if coord_name in dataarray.dims:\n try:\n iris_coord = iris.coords.DimCoord(coord.values, **coord_args)\n dim_coords.append((iris_coord, axis))\n except ValueError:\n iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)\n aux_coords.append((iris_coord, axis))\n else:\n iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)\n aux_coords.append((iris_coord, axis))\n\n args = _get_iris_args(dataarray.attrs)\n args[\"var_name\"] = dataarray.name\n args[\"dim_coords_and_dims\"] = dim_coords\n args[\"aux_coords_and_dims\"] = aux_coords\n if \"cell_methods\" in dataarray.attrs:\n args[\"cell_methods\"] = parse_cell_methods(dataarray.attrs[\"cell_methods\"])\n\n masked_data = duck_array_ops.masked_invalid(dataarray.data)\n cube = iris.cube.Cube(masked_data, **args)\n\n return cube"},{"col":4,"comment":"null","endLoc":425,"header":"def __radd__(self, other)","id":2036,"name":"__radd__","nodeType":"Function","startLoc":422,"text":"def __radd__(self, other):\n if isinstance(other, pd.TimedeltaIndex):\n other = other.to_pytimedelta()\n return CFTimeIndex(other + np.array(self))"},{"col":0,"comment":" Converts the xarray attrs into args that can be passed into Iris\n ","endLoc":170,"header":"def _get_iris_args(attrs)","id":2037,"name":"_get_iris_args","nodeType":"Function","startLoc":159,"text":"def _get_iris_args(attrs):\n \"\"\" Converts the xarray attrs into args that can be passed into Iris\n \"\"\"\n # iris.unit is deprecated in Iris v1.9\n import cf_units\n\n args = {\"attributes\": _filter_attrs(attrs, iris_forbidden_keys)}\n args.update(_pick_attrs(attrs, (\"standard_name\", \"long_name\")))\n unit_args = _pick_attrs(attrs, (\"calendar\",))\n if \"units\" in attrs:\n args[\"units\"] = cf_units.Unit(attrs[\"units\"], **unit_args)\n return args"},{"attributeType":"null","col":4,"comment":"null","endLoc":411,"id":2038,"name":"_accessors","nodeType":"Attribute","startLoc":411,"text":"_accessors"},{"col":0,"comment":"Open an DataArray from a file or file-like object containing a single\n data variable.\n\n This is designed to read netCDF files with only one data variable. If\n multiple variables are present then a ValueError is raised.\n\n Parameters\n ----------\n filename_or_obj : str, Path, file or xarray.backends.*DataStore\n Strings and Paths are interpreted as a path to a netCDF file or an\n OpenDAP URL and opened with python-netCDF4, unless the filename ends\n with .gz, in which case the file is gunzipped and opened with\n scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like\n objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n netCDF4 files).\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA. mask_and_scale defaults to True except for the\n pseudonetcdf backend.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \n optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n chunks : int or dict, optional\n If chunks is provided, it used to load the new dataset into dask\n arrays.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False. Does not\n change the behavior of coordinates corresponding to dimensions, which\n always load their data from disk into a ``pandas.Index``.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n backend_kwargs: dictionary, optional\n A dictionary of keyword arguments to pass on to the backend. This\n may be useful when backend options would improve performance or\n allow user control of dataset processing.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Notes\n -----\n This is designed to be fully compatible with `DataArray.to_netcdf`. Saving\n using `DataArray.to_netcdf` and then loading with this function will\n produce an identical result.\n\n All parameters are passed directly to `xarray.open_dataset`. See that\n documentation for further details.\n\n See also\n --------\n open_dataset\n ","endLoc":693,"header":"def open_dataarray(\n filename_or_obj,\n group=None,\n decode_cf=True,\n mask_and_scale=None,\n decode_times=True,\n autoclose=None,\n concat_characters=True,\n decode_coords=True,\n engine=None,\n chunks=None,\n lock=None,\n cache=None,\n drop_variables=None,\n backend_kwargs=None,\n use_cftime=None,\n)","id":2039,"name":"open_dataarray","nodeType":"Function","startLoc":545,"text":"def open_dataarray(\n filename_or_obj,\n group=None,\n decode_cf=True,\n mask_and_scale=None,\n decode_times=True,\n autoclose=None,\n concat_characters=True,\n decode_coords=True,\n engine=None,\n chunks=None,\n lock=None,\n cache=None,\n drop_variables=None,\n backend_kwargs=None,\n use_cftime=None,\n):\n \"\"\"Open an DataArray from a file or file-like object containing a single\n data variable.\n\n This is designed to read netCDF files with only one data variable. If\n multiple variables are present then a ValueError is raised.\n\n Parameters\n ----------\n filename_or_obj : str, Path, file or xarray.backends.*DataStore\n Strings and Paths are interpreted as a path to a netCDF file or an\n OpenDAP URL and opened with python-netCDF4, unless the filename ends\n with .gz, in which case the file is gunzipped and opened with\n scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like\n objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n netCDF4 files).\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA. mask_and_scale defaults to True except for the\n pseudonetcdf backend.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \\\n optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n chunks : int or dict, optional\n If chunks is provided, it used to load the new dataset into dask\n arrays.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False. Does not\n change the behavior of coordinates corresponding to dimensions, which\n always load their data from disk into a ``pandas.Index``.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n backend_kwargs: dictionary, optional\n A dictionary of keyword arguments to pass on to the backend. This\n may be useful when backend options would improve performance or\n allow user control of dataset processing.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Notes\n -----\n This is designed to be fully compatible with `DataArray.to_netcdf`. Saving\n using `DataArray.to_netcdf` and then loading with this function will\n produce an identical result.\n\n All parameters are passed directly to `xarray.open_dataset`. See that\n documentation for further details.\n\n See also\n --------\n open_dataset\n \"\"\"\n\n dataset = open_dataset(\n filename_or_obj,\n group=group,\n decode_cf=decode_cf,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n autoclose=autoclose,\n concat_characters=concat_characters,\n decode_coords=decode_coords,\n engine=engine,\n chunks=chunks,\n lock=lock,\n cache=cache,\n drop_variables=drop_variables,\n backend_kwargs=backend_kwargs,\n use_cftime=use_cftime,\n )\n\n if len(dataset.data_vars) != 1:\n raise ValueError(\n \"Given file dataset contains more than one data \"\n \"variable. Please read with xarray.open_dataset and \"\n \"then select the variable you want.\"\n )\n else:\n data_array, = dataset.data_vars.values()\n\n data_array._file_obj = dataset._file_obj\n\n # Reset names if they were changed during saving\n # to ensure that we can 'roundtrip' perfectly\n if DATAARRAY_NAME in dataset.attrs:\n data_array.name = dataset.attrs[DATAARRAY_NAME]\n del dataset.attrs[DATAARRAY_NAME]\n\n if data_array.name == DATAARRAY_VARIABLE:\n data_array.name = None\n\n return data_array"},{"col":0,"comment":" Return attrs with keys in keys list\n ","endLoc":156,"header":"def _pick_attrs(attrs, keys)","id":2040,"name":"_pick_attrs","nodeType":"Function","startLoc":153,"text":"def _pick_attrs(attrs, keys):\n \"\"\" Return attrs with keys in keys list\n \"\"\"\n return {k: v for k, v in attrs.items() if k in keys}"},{"col":0,"comment":"Create a mask for indexing with a fill-value.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Indexer with -1 in integer or ndarray value to indicate locations in\n the result that should be masked.\n shape : tuple\n Shape of the array being indexed.\n data : optional\n Data for which mask is being created. If data is a dask arrays, its chunks\n are used as a hint for chunks on the resulting mask. If data is a sparse\n array, the returned mask is also a sparse array.\n\n Returns\n -------\n mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool\n Same type as data. Has the same shape as the indexing result.\n ","endLoc":1170,"header":"def create_mask(indexer, shape, data=None)","id":2041,"name":"create_mask","nodeType":"Function","startLoc":1128,"text":"def create_mask(indexer, shape, data=None):\n \"\"\"Create a mask for indexing with a fill-value.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Indexer with -1 in integer or ndarray value to indicate locations in\n the result that should be masked.\n shape : tuple\n Shape of the array being indexed.\n data : optional\n Data for which mask is being created. If data is a dask arrays, its chunks\n are used as a hint for chunks on the resulting mask. If data is a sparse\n array, the returned mask is also a sparse array.\n\n Returns\n -------\n mask : bool, np.ndarray, SparseArray or dask.array.Array with dtype=bool\n Same type as data. Has the same shape as the indexing result.\n \"\"\"\n if isinstance(indexer, OuterIndexer):\n key = _outer_to_vectorized_indexer(indexer, shape).tuple\n assert not any(isinstance(k, slice) for k in key)\n mask = _masked_result_drop_slice(key, data)\n\n elif isinstance(indexer, VectorizedIndexer):\n key = indexer.tuple\n base_mask = _masked_result_drop_slice(key, data)\n slice_shape = tuple(\n np.arange(*k.indices(size)).size\n for k, size in zip(key, shape)\n if isinstance(k, slice)\n )\n expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)]\n mask = duck_array_ops.broadcast_to(expanded_mask, base_mask.shape + slice_shape)\n\n elif isinstance(indexer, BasicIndexer):\n mask = any(k == -1 for k in indexer.tuple)\n\n else:\n raise TypeError(\"unexpected key type: {}\".format(type(indexer)))\n\n return mask"},{"col":0,"comment":"Open, load into memory, and close a Dataset from a file or file-like\n object.\n\n This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs\n from `open_dataset` in that it loads the Dataset into memory, closes the\n file, and returns the Dataset. In contrast, `open_dataset` keeps the file\n handle open and lazy loads its contents. All parameters are passed directly\n to `open_dataset`. See that documentation for further details.\n\n Returns\n -------\n dataset : Dataset\n The newly created Dataset.\n\n See Also\n --------\n open_dataset\n ","endLoc":258,"header":"def load_dataset(filename_or_obj, **kwargs)","id":2042,"name":"load_dataset","nodeType":"Function","startLoc":235,"text":"def load_dataset(filename_or_obj, **kwargs):\n \"\"\"Open, load into memory, and close a Dataset from a file or file-like\n object.\n\n This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs\n from `open_dataset` in that it loads the Dataset into memory, closes the\n file, and returns the Dataset. In contrast, `open_dataset` keeps the file\n handle open and lazy loads its contents. All parameters are passed directly\n to `open_dataset`. See that documentation for further details.\n\n Returns\n -------\n dataset : Dataset\n The newly created Dataset.\n\n See Also\n --------\n open_dataset\n \"\"\"\n if \"cache\" in kwargs:\n raise TypeError(\"cache has no effect in this context\")\n\n with open_dataset(filename_or_obj, **kwargs) as ds:\n return ds.load()"},{"col":0,"comment":"Open, load into memory, and close a DataArray from a file or file-like\n object containing a single data variable.\n\n This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs\n from `open_dataarray` in that it loads the Dataset into memory, closes the\n file, and returns the Dataset. In contrast, `open_dataarray` keeps the file\n handle open and lazy loads its contents. All parameters are passed directly\n to `open_dataarray`. See that documentation for further details.\n\n Returns\n -------\n datarray : DataArray\n The newly created DataArray.\n\n See Also\n --------\n open_dataarray\n ","endLoc":284,"header":"def load_dataarray(filename_or_obj, **kwargs)","id":2043,"name":"load_dataarray","nodeType":"Function","startLoc":261,"text":"def load_dataarray(filename_or_obj, **kwargs):\n \"\"\"Open, load into memory, and close a DataArray from a file or file-like\n object containing a single data variable.\n\n This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs\n from `open_dataarray` in that it loads the Dataset into memory, closes the\n file, and returns the Dataset. In contrast, `open_dataarray` keeps the file\n handle open and lazy loads its contents. All parameters are passed directly\n to `open_dataarray`. See that documentation for further details.\n\n Returns\n -------\n datarray : DataArray\n The newly created DataArray.\n\n See Also\n --------\n open_dataarray\n \"\"\"\n if \"cache\" in kwargs:\n raise TypeError(\"cache has no effect in this context\")\n\n with open_dataarray(filename_or_obj, **kwargs) as da:\n return da.load()"},{"attributeType":"null","col":4,"comment":"null","endLoc":412,"id":2044,"name":"_attrs","nodeType":"Attribute","startLoc":412,"text":"_attrs"},{"col":4,"comment":"null","endLoc":435,"header":"def __sub__(self, other)","id":2045,"name":"__sub__","nodeType":"Function","startLoc":427,"text":"def __sub__(self, other):\n import cftime\n\n if isinstance(other, (CFTimeIndex, cftime.datetime)):\n return pd.TimedeltaIndex(np.array(self) - np.array(other))\n elif isinstance(other, pd.TimedeltaIndex):\n return CFTimeIndex(np.array(self) - other.to_pytimedelta())\n else:\n return CFTimeIndex(np.array(self) - other)"},{"col":0,"comment":"Return a fixed frequency CFTimeIndex.\n\n Parameters\n ----------\n start : str or cftime.datetime, optional\n Left bound for generating dates.\n end : str or cftime.datetime, optional\n Right bound for generating dates.\n periods : integer, optional\n Number of periods to generate.\n freq : str, default 'D', BaseCFTimeOffset, or None\n Frequency strings can have multiples, e.g. '5H'.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting index\n closed : {None, 'left', 'right'}, optional\n Make the interval closed with respect to the given frequency to the\n 'left', 'right', or both sides (None, the default).\n calendar : str\n Calendar type for the datetimes (default 'standard').\n\n Returns\n -------\n CFTimeIndex\n\n Notes\n -----\n\n This function is an analog of ``pandas.date_range`` for use in generating\n sequences of ``cftime.datetime`` objects. It supports most of the\n features of ``pandas.date_range`` (e.g. specifying how the index is\n ``closed`` on either side, or whether or not to ``normalize`` the start and\n end bounds); however, there are some notable exceptions:\n\n - You cannot specify a ``tz`` (time zone) argument.\n - Start or end dates specified as partial-datetime strings must use the\n `ISO-8601 format `_.\n - It supports many, but not all, frequencies supported by\n ``pandas.date_range``. For example it does not currently support any of\n the business-related, semi-monthly, or sub-second frequencies.\n - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as\n these can easily be written in terms of the finest common resolution,\n e.g. '61min'.\n\n Valid simple frequency strings for use with ``cftime``-calendars include\n any multiples of the following.\n\n +--------+--------------------------+\n | Alias | Description |\n +========+==========================+\n | A, Y | Year-end frequency |\n +--------+--------------------------+\n | AS, YS | Year-start frequency |\n +--------+--------------------------+\n | Q | Quarter-end frequency |\n +--------+--------------------------+\n | QS | Quarter-start frequency |\n +--------+--------------------------+\n | M | Month-end frequency |\n +--------+--------------------------+\n | MS | Month-start frequency |\n +--------+--------------------------+\n | D | Day frequency |\n +--------+--------------------------+\n | H | Hour frequency |\n +--------+--------------------------+\n | T, min | Minute frequency |\n +--------+--------------------------+\n | S | Second frequency |\n +--------+--------------------------+\n\n Any multiples of the following anchored offsets are also supported.\n\n +----------+--------------------------------------------------------------------+\n | Alias | Description |\n +==========+====================================================================+\n | A(S)-JAN | Annual frequency, anchored at the end (or beginning) of January |\n +----------+--------------------------------------------------------------------+\n | A(S)-FEB | Annual frequency, anchored at the end (or beginning) of February |\n +----------+--------------------------------------------------------------------+\n | A(S)-MAR | Annual frequency, anchored at the end (or beginning) of March |\n +----------+--------------------------------------------------------------------+\n | A(S)-APR | Annual frequency, anchored at the end (or beginning) of April |\n +----------+--------------------------------------------------------------------+\n | A(S)-MAY | Annual frequency, anchored at the end (or beginning) of May |\n +----------+--------------------------------------------------------------------+\n | A(S)-JUN | Annual frequency, anchored at the end (or beginning) of June |\n +----------+--------------------------------------------------------------------+\n | A(S)-JUL | Annual frequency, anchored at the end (or beginning) of July |\n +----------+--------------------------------------------------------------------+\n | A(S)-AUG | Annual frequency, anchored at the end (or beginning) of August |\n +----------+--------------------------------------------------------------------+\n | A(S)-SEP | Annual frequency, anchored at the end (or beginning) of September |\n +----------+--------------------------------------------------------------------+\n | A(S)-OCT | Annual frequency, anchored at the end (or beginning) of October |\n +----------+--------------------------------------------------------------------+\n | A(S)-NOV | Annual frequency, anchored at the end (or beginning) of November |\n +----------+--------------------------------------------------------------------+\n | A(S)-DEC | Annual frequency, anchored at the end (or beginning) of December |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JAN | Quarter frequency, anchored at the end (or beginning) of January |\n +----------+--------------------------------------------------------------------+\n | Q(S)-FEB | Quarter frequency, anchored at the end (or beginning) of February |\n +----------+--------------------------------------------------------------------+\n | Q(S)-MAR | Quarter frequency, anchored at the end (or beginning) of March |\n +----------+--------------------------------------------------------------------+\n | Q(S)-APR | Quarter frequency, anchored at the end (or beginning) of April |\n +----------+--------------------------------------------------------------------+\n | Q(S)-MAY | Quarter frequency, anchored at the end (or beginning) of May |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JUN | Quarter frequency, anchored at the end (or beginning) of June |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JUL | Quarter frequency, anchored at the end (or beginning) of July |\n +----------+--------------------------------------------------------------------+\n | Q(S)-AUG | Quarter frequency, anchored at the end (or beginning) of August |\n +----------+--------------------------------------------------------------------+\n | Q(S)-SEP | Quarter frequency, anchored at the end (or beginning) of September |\n +----------+--------------------------------------------------------------------+\n | Q(S)-OCT | Quarter frequency, anchored at the end (or beginning) of October |\n +----------+--------------------------------------------------------------------+\n | Q(S)-NOV | Quarter frequency, anchored at the end (or beginning) of November |\n +----------+--------------------------------------------------------------------+\n | Q(S)-DEC | Quarter frequency, anchored at the end (or beginning) of December |\n +----------+--------------------------------------------------------------------+\n\n\n Finally, the following calendar aliases are supported.\n\n +--------------------------------+---------------------------------------+\n | Alias | Date type |\n +================================+=======================================+\n | standard, gregorian | ``cftime.DatetimeGregorian`` |\n +--------------------------------+---------------------------------------+\n | proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |\n +--------------------------------+---------------------------------------+\n | noleap, 365_day | ``cftime.DatetimeNoLeap`` |\n +--------------------------------+---------------------------------------+\n | all_leap, 366_day | ``cftime.DatetimeAllLeap`` |\n +--------------------------------+---------------------------------------+\n | 360_day | ``cftime.Datetime360Day`` |\n +--------------------------------+---------------------------------------+\n | julian | ``cftime.DatetimeJulian`` |\n +--------------------------------+---------------------------------------+\n\n Examples\n --------\n\n This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``\n objects associated with the specified calendar type, e.g.\n\n >>> xr.cftime_range(start='2000', periods=6, freq='2MS', calendar='noleap')\n CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,\n 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],\n dtype='object')\n\n As in the standard pandas function, three of the ``start``, ``end``,\n ``periods``, or ``freq`` arguments must be specified at a given time, with\n the other set to ``None``. See the `pandas documentation\n `_\n for more examples of the behavior of ``date_range`` with each of the\n parameters.\n\n See Also\n --------\n pandas.date_range\n ","endLoc":993,"header":"def cftime_range(\n start=None,\n end=None,\n periods=None,\n freq=\"D\",\n normalize=False,\n name=None,\n closed=None,\n calendar=\"standard\",\n)","id":2046,"name":"cftime_range","nodeType":"Function","startLoc":778,"text":"def cftime_range(\n start=None,\n end=None,\n periods=None,\n freq=\"D\",\n normalize=False,\n name=None,\n closed=None,\n calendar=\"standard\",\n):\n \"\"\"Return a fixed frequency CFTimeIndex.\n\n Parameters\n ----------\n start : str or cftime.datetime, optional\n Left bound for generating dates.\n end : str or cftime.datetime, optional\n Right bound for generating dates.\n periods : integer, optional\n Number of periods to generate.\n freq : str, default 'D', BaseCFTimeOffset, or None\n Frequency strings can have multiples, e.g. '5H'.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting index\n closed : {None, 'left', 'right'}, optional\n Make the interval closed with respect to the given frequency to the\n 'left', 'right', or both sides (None, the default).\n calendar : str\n Calendar type for the datetimes (default 'standard').\n\n Returns\n -------\n CFTimeIndex\n\n Notes\n -----\n\n This function is an analog of ``pandas.date_range`` for use in generating\n sequences of ``cftime.datetime`` objects. It supports most of the\n features of ``pandas.date_range`` (e.g. specifying how the index is\n ``closed`` on either side, or whether or not to ``normalize`` the start and\n end bounds); however, there are some notable exceptions:\n\n - You cannot specify a ``tz`` (time zone) argument.\n - Start or end dates specified as partial-datetime strings must use the\n `ISO-8601 format `_.\n - It supports many, but not all, frequencies supported by\n ``pandas.date_range``. For example it does not currently support any of\n the business-related, semi-monthly, or sub-second frequencies.\n - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as\n these can easily be written in terms of the finest common resolution,\n e.g. '61min'.\n\n Valid simple frequency strings for use with ``cftime``-calendars include\n any multiples of the following.\n\n +--------+--------------------------+\n | Alias | Description |\n +========+==========================+\n | A, Y | Year-end frequency |\n +--------+--------------------------+\n | AS, YS | Year-start frequency |\n +--------+--------------------------+\n | Q | Quarter-end frequency |\n +--------+--------------------------+\n | QS | Quarter-start frequency |\n +--------+--------------------------+\n | M | Month-end frequency |\n +--------+--------------------------+\n | MS | Month-start frequency |\n +--------+--------------------------+\n | D | Day frequency |\n +--------+--------------------------+\n | H | Hour frequency |\n +--------+--------------------------+\n | T, min | Minute frequency |\n +--------+--------------------------+\n | S | Second frequency |\n +--------+--------------------------+\n\n Any multiples of the following anchored offsets are also supported.\n\n +----------+--------------------------------------------------------------------+\n | Alias | Description |\n +==========+====================================================================+\n | A(S)-JAN | Annual frequency, anchored at the end (or beginning) of January |\n +----------+--------------------------------------------------------------------+\n | A(S)-FEB | Annual frequency, anchored at the end (or beginning) of February |\n +----------+--------------------------------------------------------------------+\n | A(S)-MAR | Annual frequency, anchored at the end (or beginning) of March |\n +----------+--------------------------------------------------------------------+\n | A(S)-APR | Annual frequency, anchored at the end (or beginning) of April |\n +----------+--------------------------------------------------------------------+\n | A(S)-MAY | Annual frequency, anchored at the end (or beginning) of May |\n +----------+--------------------------------------------------------------------+\n | A(S)-JUN | Annual frequency, anchored at the end (or beginning) of June |\n +----------+--------------------------------------------------------------------+\n | A(S)-JUL | Annual frequency, anchored at the end (or beginning) of July |\n +----------+--------------------------------------------------------------------+\n | A(S)-AUG | Annual frequency, anchored at the end (or beginning) of August |\n +----------+--------------------------------------------------------------------+\n | A(S)-SEP | Annual frequency, anchored at the end (or beginning) of September |\n +----------+--------------------------------------------------------------------+\n | A(S)-OCT | Annual frequency, anchored at the end (or beginning) of October |\n +----------+--------------------------------------------------------------------+\n | A(S)-NOV | Annual frequency, anchored at the end (or beginning) of November |\n +----------+--------------------------------------------------------------------+\n | A(S)-DEC | Annual frequency, anchored at the end (or beginning) of December |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JAN | Quarter frequency, anchored at the end (or beginning) of January |\n +----------+--------------------------------------------------------------------+\n | Q(S)-FEB | Quarter frequency, anchored at the end (or beginning) of February |\n +----------+--------------------------------------------------------------------+\n | Q(S)-MAR | Quarter frequency, anchored at the end (or beginning) of March |\n +----------+--------------------------------------------------------------------+\n | Q(S)-APR | Quarter frequency, anchored at the end (or beginning) of April |\n +----------+--------------------------------------------------------------------+\n | Q(S)-MAY | Quarter frequency, anchored at the end (or beginning) of May |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JUN | Quarter frequency, anchored at the end (or beginning) of June |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JUL | Quarter frequency, anchored at the end (or beginning) of July |\n +----------+--------------------------------------------------------------------+\n | Q(S)-AUG | Quarter frequency, anchored at the end (or beginning) of August |\n +----------+--------------------------------------------------------------------+\n | Q(S)-SEP | Quarter frequency, anchored at the end (or beginning) of September |\n +----------+--------------------------------------------------------------------+\n | Q(S)-OCT | Quarter frequency, anchored at the end (or beginning) of October |\n +----------+--------------------------------------------------------------------+\n | Q(S)-NOV | Quarter frequency, anchored at the end (or beginning) of November |\n +----------+--------------------------------------------------------------------+\n | Q(S)-DEC | Quarter frequency, anchored at the end (or beginning) of December |\n +----------+--------------------------------------------------------------------+\n\n\n Finally, the following calendar aliases are supported.\n\n +--------------------------------+---------------------------------------+\n | Alias | Date type |\n +================================+=======================================+\n | standard, gregorian | ``cftime.DatetimeGregorian`` |\n +--------------------------------+---------------------------------------+\n | proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |\n +--------------------------------+---------------------------------------+\n | noleap, 365_day | ``cftime.DatetimeNoLeap`` |\n +--------------------------------+---------------------------------------+\n | all_leap, 366_day | ``cftime.DatetimeAllLeap`` |\n +--------------------------------+---------------------------------------+\n | 360_day | ``cftime.Datetime360Day`` |\n +--------------------------------+---------------------------------------+\n | julian | ``cftime.DatetimeJulian`` |\n +--------------------------------+---------------------------------------+\n\n Examples\n --------\n\n This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``\n objects associated with the specified calendar type, e.g.\n\n >>> xr.cftime_range(start='2000', periods=6, freq='2MS', calendar='noleap')\n CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,\n 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],\n dtype='object')\n\n As in the standard pandas function, three of the ``start``, ``end``,\n ``periods``, or ``freq`` arguments must be specified at a given time, with\n the other set to ``None``. See the `pandas documentation\n `_\n for more examples of the behavior of ``date_range`` with each of the\n parameters.\n\n See Also\n --------\n pandas.date_range\n \"\"\"\n # Adapted from pandas.core.indexes.datetimes._generate_range.\n if count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the arguments 'start', 'end', 'periods', and 'freq', three \"\n \"must be specified at a time.\"\n )\n\n if start is not None:\n start = to_cftime_datetime(start, calendar)\n start = _maybe_normalize_date(start, normalize)\n if end is not None:\n end = to_cftime_datetime(end, calendar)\n end = _maybe_normalize_date(end, normalize)\n\n if freq is None:\n dates = _generate_linear_range(start, end, periods)\n else:\n offset = to_offset(freq)\n dates = np.array(list(_generate_range(start, end, periods, offset)))\n\n left_closed = False\n right_closed = False\n\n if closed is None:\n left_closed = True\n right_closed = True\n elif closed == \"left\":\n left_closed = True\n elif closed == \"right\":\n right_closed = True\n else:\n raise ValueError(\"Closed must be either 'left', 'right' or None\")\n\n if not left_closed and len(dates) and start is not None and dates[0] == start:\n dates = dates[1:]\n if not right_closed and len(dates) and end is not None and dates[-1] == end:\n dates = dates[:-1]\n\n return CFTimeIndex(dates, name=name)"},{"col":0,"comment":"Convert masked indices in a flat array to the nearest unmasked index.\n\n Parameters\n ----------\n index : np.ndarray\n One dimensional ndarray with dtype=int.\n\n Returns\n -------\n np.ndarray\n One dimensional ndarray with all values equal to -1 replaced by an\n adjacent non-masked element.\n ","endLoc":1196,"header":"def _posify_mask_subindexer(index)","id":2047,"name":"_posify_mask_subindexer","nodeType":"Function","startLoc":1173,"text":"def _posify_mask_subindexer(index):\n \"\"\"Convert masked indices in a flat array to the nearest unmasked index.\n\n Parameters\n ----------\n index : np.ndarray\n One dimensional ndarray with dtype=int.\n\n Returns\n -------\n np.ndarray\n One dimensional ndarray with all values equal to -1 replaced by an\n adjacent non-masked element.\n \"\"\"\n masked = index == -1\n unmasked_locs = np.flatnonzero(~masked)\n if not unmasked_locs.size:\n # indexing unmasked_locs is invalid\n return np.zeros_like(index)\n masked_locs = np.flatnonzero(masked)\n prev_value = np.maximum(0, np.searchsorted(unmasked_locs, masked_locs) - 1)\n new_index = index.copy()\n new_index[masked_locs] = index[unmasked_locs[prev_value]]\n return new_index"},{"col":0,"comment":"null","endLoc":688,"header":"def to_cftime_datetime(date_str_or_date, calendar=None)","id":2048,"name":"to_cftime_datetime","nodeType":"Function","startLoc":670,"text":"def to_cftime_datetime(date_str_or_date, calendar=None):\n import cftime\n\n if isinstance(date_str_or_date, str):\n if calendar is None:\n raise ValueError(\n \"If converting a string to a cftime.datetime object, \"\n \"a calendar type must be provided\"\n )\n date, _ = _parse_iso8601_with_reso(get_date_type(calendar), date_str_or_date)\n return date\n elif isinstance(date_str_or_date, cftime.datetime):\n return date_str_or_date\n else:\n raise TypeError(\n \"date_str_or_date must be a string or a \"\n \"subclass of cftime.datetime. Instead got \"\n \"{!r}.\".format(date_str_or_date)\n )"},{"col":0,"comment":"Return the cftime date type for a given calendar name.","endLoc":73,"header":"def get_date_type(calendar)","id":2049,"name":"get_date_type","nodeType":"Function","startLoc":55,"text":"def get_date_type(calendar):\n \"\"\"Return the cftime date type for a given calendar name.\"\"\"\n try:\n import cftime\n except ImportError:\n raise ImportError(\"cftime is required for dates with non-standard calendars\")\n else:\n calendars = {\n \"noleap\": cftime.DatetimeNoLeap,\n \"360_day\": cftime.Datetime360Day,\n \"365_day\": cftime.DatetimeNoLeap,\n \"366_day\": cftime.DatetimeAllLeap,\n \"gregorian\": cftime.DatetimeGregorian,\n \"proleptic_gregorian\": cftime.DatetimeProlepticGregorian,\n \"julian\": cftime.DatetimeJulian,\n \"all_leap\": cftime.DatetimeAllLeap,\n \"standard\": cftime.DatetimeGregorian,\n }\n return calendars[calendar]"},{"col":0,"comment":"Convert masked values (-1) in an indexer to nearest unmasked values.\n\n This routine is useful for dask, where it can be much faster to index\n adjacent points than arbitrary points from the end of an array.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Input indexer.\n\n Returns\n -------\n ExplicitIndexer\n Same type of input, with all values in ndarray keys equal to -1\n replaced by an adjacent non-masked element.\n ","endLoc":1222,"header":"def posify_mask_indexer(indexer)","id":2050,"name":"posify_mask_indexer","nodeType":"Function","startLoc":1199,"text":"def posify_mask_indexer(indexer):\n \"\"\"Convert masked values (-1) in an indexer to nearest unmasked values.\n\n This routine is useful for dask, where it can be much faster to index\n adjacent points than arbitrary points from the end of an array.\n\n Parameters\n ----------\n indexer : ExplicitIndexer\n Input indexer.\n\n Returns\n -------\n ExplicitIndexer\n Same type of input, with all values in ndarray keys equal to -1\n replaced by an adjacent non-masked element.\n \"\"\"\n key = tuple(\n _posify_mask_subindexer(k.ravel()).reshape(k.shape)\n if isinstance(k, np.ndarray)\n else k\n for k in indexer.tuple\n )\n return type(indexer)(key)"},{"col":4,"comment":"Convert a iris.cube.Cube into an xarray.DataArray\n ","endLoc":2396,"header":"@classmethod\n def from_iris(cls, cube: \"iris_Cube\") -> \"DataArray\"","id":2051,"name":"from_iris","nodeType":"Function","startLoc":2390,"text":"@classmethod\n def from_iris(cls, cube: \"iris_Cube\") -> \"DataArray\":\n \"\"\"Convert a iris.cube.Cube into an xarray.DataArray\n \"\"\"\n from ..convert import from_iris\n\n return from_iris(cube)"},{"className":"UnsignedIntegerCoder","col":0,"comment":"null","endLoc":319,"id":2052,"nodeType":"Class","startLoc":279,"text":"class UnsignedIntegerCoder(VariableCoder):\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n # from netCDF best practices\n # https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html\n # \"_Unsigned = \"true\" to indicate that\n # integer data should be treated as unsigned\"\n if encoding.get(\"_Unsigned\", \"false\") == \"true\":\n pop_to(encoding, attrs, \"_Unsigned\")\n signed_dtype = np.dtype(\"i%s\" % data.dtype.itemsize)\n if \"_FillValue\" in attrs:\n new_fill = signed_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n data = duck_array_ops.around(data).astype(signed_dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"_Unsigned\" in attrs:\n unsigned = pop_to(attrs, encoding, \"_Unsigned\")\n\n if data.dtype.kind == \"i\":\n if unsigned == \"true\":\n unsigned_dtype = np.dtype(\"u%s\" % data.dtype.itemsize)\n transform = partial(np.asarray, dtype=unsigned_dtype)\n data = lazy_elemwise_func(data, transform, unsigned_dtype)\n if \"_FillValue\" in attrs:\n new_fill = unsigned_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n else:\n warnings.warn(\n \"variable %r has _Unsigned attribute but is not \"\n \"of integer type. Ignoring attribute.\" % name,\n SerializationWarning,\n stacklevel=3,\n )\n\n return Variable(dims, data, attrs, encoding)"},{"col":0,"comment":" Convert a Iris cube into an DataArray\n ","endLoc":308,"header":"def from_iris(cube)","id":2053,"name":"from_iris","nodeType":"Function","startLoc":255,"text":"def from_iris(cube):\n \"\"\" Convert a Iris cube into an DataArray\n \"\"\"\n import iris.exceptions\n from xarray.core.pycompat import dask_array_type\n\n name = _name(cube)\n if name == \"unknown\":\n name = None\n dims = []\n for i in range(cube.ndim):\n try:\n dim_coord = cube.coord(dim_coords=True, dimensions=(i,))\n dims.append(_name(dim_coord))\n except iris.exceptions.CoordinateNotFoundError:\n dims.append(\"dim_{}\".format(i))\n\n if len(set(dims)) != len(dims):\n duplicates = [k for k, v in Counter(dims).items() if v > 1]\n raise ValueError(\"Duplicate coordinate name {}.\".format(duplicates))\n\n coords = {}\n\n for coord in cube.coords():\n coord_attrs = _iris_obj_to_attrs(coord)\n coord_dims = [dims[i] for i in cube.coord_dims(coord)]\n if coord_dims:\n coords[_name(coord)] = (coord_dims, coord.points, coord_attrs)\n else:\n coords[_name(coord)] = ((), coord.points.item(), coord_attrs)\n\n array_attrs = _iris_obj_to_attrs(cube)\n cell_methods = _iris_cell_methods_to_str(cube.cell_methods)\n if cell_methods:\n array_attrs[\"cell_methods\"] = cell_methods\n\n # Deal with iris 1.* and 2.*\n cube_data = cube.core_data() if hasattr(cube, \"core_data\") else cube.data\n\n # Deal with dask and numpy masked arrays\n if isinstance(cube_data, dask_array_type):\n from dask.array import ma as dask_ma\n\n filled_data = dask_ma.filled(cube_data, get_fill_value(cube.dtype))\n elif isinstance(cube_data, np.ma.MaskedArray):\n filled_data = np.ma.filled(cube_data, get_fill_value(cube.dtype))\n else:\n filled_data = cube_data\n\n dataarray = DataArray(\n filled_data, coords=coords, name=name, attrs=array_attrs, dims=dims\n )\n decoded_ds = decode_cf(dataarray._to_temp_dataset())\n return dataarray._from_temp_dataset(decoded_ds)"},{"col":4,"comment":"null","endLoc":295,"header":"def encode(self, variable, name=None)","id":2054,"name":"encode","nodeType":"Function","startLoc":280,"text":"def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n # from netCDF best practices\n # https://www.unidata.ucar.edu/software/netcdf/docs/BestPractices.html\n # \"_Unsigned = \"true\" to indicate that\n # integer data should be treated as unsigned\"\n if encoding.get(\"_Unsigned\", \"false\") == \"true\":\n pop_to(encoding, attrs, \"_Unsigned\")\n signed_dtype = np.dtype(\"i%s\" % data.dtype.itemsize)\n if \"_FillValue\" in attrs:\n new_fill = signed_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n data = duck_array_ops.around(data).astype(signed_dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"col":0,"comment":"Round datetime down to midnight if normalize is True.","endLoc":701,"header":"def _maybe_normalize_date(date, normalize)","id":2055,"name":"_maybe_normalize_date","nodeType":"Function","startLoc":696,"text":"def _maybe_normalize_date(date, normalize):\n \"\"\"Round datetime down to midnight if normalize is True.\"\"\"\n if normalize:\n return normalize_date(date)\n else:\n return date"},{"col":0,"comment":" Mimicks `iris_obj.name()` but with different name resolution order.\n\n Similar to iris_obj.name() method, but using iris_obj.var_name first to\n enable roundtripping.\n ","endLoc":252,"header":"def _name(iris_obj, default=\"unknown\")","id":2056,"name":"_name","nodeType":"Function","startLoc":246,"text":"def _name(iris_obj, default=\"unknown\"):\n \"\"\" Mimicks `iris_obj.name()` but with different name resolution order.\n\n Similar to iris_obj.name() method, but using iris_obj.var_name first to\n enable roundtripping.\n \"\"\"\n return iris_obj.var_name or iris_obj.standard_name or iris_obj.long_name or default"},{"col":0,"comment":"Round datetime down to midnight.","endLoc":693,"header":"def normalize_date(date)","id":2057,"name":"normalize_date","nodeType":"Function","startLoc":691,"text":"def normalize_date(date):\n \"\"\"Round datetime down to midnight.\"\"\"\n return date.replace(hour=0, minute=0, second=0, microsecond=0)"},{"attributeType":"null","col":4,"comment":"null","endLoc":413,"id":2058,"name":"_coord_names","nodeType":"Attribute","startLoc":413,"text":"_coord_names"},{"col":0,"comment":"Generate an equally-spaced sequence of cftime.datetime objects between\n and including two dates (whose length equals the number of periods).","endLoc":715,"header":"def _generate_linear_range(start, end, periods)","id":2059,"name":"_generate_linear_range","nodeType":"Function","startLoc":704,"text":"def _generate_linear_range(start, end, periods):\n \"\"\"Generate an equally-spaced sequence of cftime.datetime objects between\n and including two dates (whose length equals the number of periods).\"\"\"\n import cftime\n\n total_seconds = (end - start).total_seconds()\n values = np.linspace(0.0, total_seconds, periods, endpoint=True)\n units = \"seconds since {}\".format(format_cftime_datetime(start))\n calendar = start.calendar\n return cftime.num2date(\n values, units=units, calendar=calendar, only_use_cftime_datetimes=True\n )"},{"col":0,"comment":" Return a dictionary of attrs when given a Iris object\n ","endLoc":224,"header":"def _iris_obj_to_attrs(obj)","id":2060,"name":"_iris_obj_to_attrs","nodeType":"Function","startLoc":215,"text":"def _iris_obj_to_attrs(obj):\n \"\"\" Return a dictionary of attrs when given a Iris object\n \"\"\"\n attrs = {\"standard_name\": obj.standard_name, \"long_name\": obj.long_name}\n if obj.units.calendar:\n attrs[\"calendar\"] = obj.units.calendar\n if obj.units.origin != \"1\" and not obj.units.is_unknown():\n attrs[\"units\"] = obj.units.origin\n attrs.update(obj.attributes)\n return {k: v for k, v in attrs.items() if v is not None}"},{"attributeType":"null","col":4,"comment":"null","endLoc":414,"id":2061,"name":"_dims","nodeType":"Attribute","startLoc":414,"text":"_dims"},{"col":4,"comment":"null","endLoc":438,"header":"def __rsub__(self, other)","id":2062,"name":"__rsub__","nodeType":"Function","startLoc":437,"text":"def __rsub__(self, other):\n return pd.TimedeltaIndex(other - np.array(self))"},{"col":4,"comment":"If possible, convert this index to a pandas.DatetimeIndex.\n\n Parameters\n ----------\n unsafe : bool\n Flag to turn off warning when converting from a CFTimeIndex with\n a non-standard calendar to a DatetimeIndex (default ``False``).\n\n Returns\n -------\n pandas.DatetimeIndex\n\n Raises\n ------\n ValueError\n If the CFTimeIndex contains dates that are not possible in the\n standard calendar or outside the pandas.Timestamp-valid range.\n\n Warns\n -----\n RuntimeWarning\n If converting from a non-standard calendar to a DatetimeIndex.\n\n Warnings\n --------\n Note that for non-standard calendars, this will change the calendar\n type of the index. In that case the result of this method should be\n used with caution.\n\n Examples\n --------\n >>> import xarray as xr\n >>> times = xr.cftime_range('2000', periods=2, calendar='gregorian')\n >>> times\n CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object')\n >>> times.to_datetimeindex()\n DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)\n ","endLoc":491,"header":"def to_datetimeindex(self, unsafe=False)","id":2063,"name":"to_datetimeindex","nodeType":"Function","startLoc":440,"text":"def to_datetimeindex(self, unsafe=False):\n \"\"\"If possible, convert this index to a pandas.DatetimeIndex.\n\n Parameters\n ----------\n unsafe : bool\n Flag to turn off warning when converting from a CFTimeIndex with\n a non-standard calendar to a DatetimeIndex (default ``False``).\n\n Returns\n -------\n pandas.DatetimeIndex\n\n Raises\n ------\n ValueError\n If the CFTimeIndex contains dates that are not possible in the\n standard calendar or outside the pandas.Timestamp-valid range.\n\n Warns\n -----\n RuntimeWarning\n If converting from a non-standard calendar to a DatetimeIndex.\n\n Warnings\n --------\n Note that for non-standard calendars, this will change the calendar\n type of the index. In that case the result of this method should be\n used with caution.\n\n Examples\n --------\n >>> import xarray as xr\n >>> times = xr.cftime_range('2000', periods=2, calendar='gregorian')\n >>> times\n CFTimeIndex([2000-01-01 00:00:00, 2000-01-02 00:00:00], dtype='object')\n >>> times.to_datetimeindex()\n DatetimeIndex(['2000-01-01', '2000-01-02'], dtype='datetime64[ns]', freq=None)\n \"\"\"\n nptimes = cftime_to_nptime(self)\n calendar = infer_calendar_name(self)\n if calendar not in _STANDARD_CALENDARS and not unsafe:\n warnings.warn(\n \"Converting a CFTimeIndex with dates from a non-standard \"\n \"calendar, {!r}, to a pandas.DatetimeIndex, which uses dates \"\n \"from the standard calendar. This may lead to subtle errors \"\n \"in operations that depend on the length of time between \"\n \"dates.\".format(calendar),\n RuntimeWarning,\n stacklevel=2,\n )\n return pd.DatetimeIndex(nptimes)"},{"col":4,"comment":"null","endLoc":319,"header":"def decode(self, variable, name=None)","id":2064,"name":"decode","nodeType":"Function","startLoc":297,"text":"def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"_Unsigned\" in attrs:\n unsigned = pop_to(attrs, encoding, \"_Unsigned\")\n\n if data.dtype.kind == \"i\":\n if unsigned == \"true\":\n unsigned_dtype = np.dtype(\"u%s\" % data.dtype.itemsize)\n transform = partial(np.asarray, dtype=unsigned_dtype)\n data = lazy_elemwise_func(data, transform, unsigned_dtype)\n if \"_FillValue\" in attrs:\n new_fill = unsigned_dtype.type(attrs[\"_FillValue\"])\n attrs[\"_FillValue\"] = new_fill\n else:\n warnings.warn(\n \"variable %r has _Unsigned attribute but is not \"\n \"of integer type. Ignoring attribute.\" % name,\n SerializationWarning,\n stacklevel=3,\n )\n\n return Variable(dims, data, attrs, encoding)"},{"col":0,"comment":" Converts a Iris cell methods into a string\n ","endLoc":243,"header":"def _iris_cell_methods_to_str(cell_methods_obj)","id":2065,"name":"_iris_cell_methods_to_str","nodeType":"Function","startLoc":227,"text":"def _iris_cell_methods_to_str(cell_methods_obj):\n \"\"\" Converts a Iris cell methods into a string\n \"\"\"\n cell_methods = []\n for cell_method in cell_methods_obj:\n names = \"\".join([\"{}: \".format(n) for n in cell_method.coord_names])\n intervals = \" \".join(\n [\"interval: {}\".format(interval) for interval in cell_method.intervals]\n )\n comments = \" \".join(\n [\"comment: {}\".format(comment) for comment in cell_method.comments]\n )\n extra = \" \".join([intervals, comments]).strip()\n if extra:\n extra = \" ({})\".format(extra)\n cell_methods.append(names + cell_method.method + extra)\n return \" \".join(cell_methods)"},{"attributeType":"null","col":4,"comment":"null","endLoc":415,"id":2066,"name":"_encoding","nodeType":"Attribute","startLoc":415,"text":"_encoding"},{"attributeType":"null","col":4,"comment":"null","endLoc":232,"id":2067,"name":"year","nodeType":"Attribute","startLoc":232,"text":"year"},{"attributeType":"null","col":4,"comment":"null","endLoc":233,"id":2068,"name":"month","nodeType":"Attribute","startLoc":233,"text":"month"},{"fileName":"conf.py","filePath":"doc","id":2069,"nodeType":"File","text":"# -*- coding: utf-8 -*-\n#\n# xarray documentation build configuration file, created by\n# sphinx-quickstart on Thu Feb 6 18:57:54 2014.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n\nimport datetime\nimport os\nimport subprocess\nimport sys\nfrom contextlib import suppress\n\nimport xarray\n\nallowed_failures = set()\n\nprint(\"python exec:\", sys.executable)\nprint(\"sys.path:\", sys.path)\n\nif \"conda\" in sys.executable:\n print(\"conda environment:\")\n subprocess.run([\"conda\", \"list\"])\nelse:\n print(\"pip environment:\")\n subprocess.run([\"pip\", \"list\"])\n\nprint(\"xarray: %s, %s\" % (xarray.__version__, xarray.__file__))\n\nwith suppress(ImportError):\n import matplotlib\n\n matplotlib.use(\"Agg\")\n\ntry:\n import rasterio\nexcept ImportError:\n allowed_failures.update(\n [\"gallery/plot_rasterio_rgb.py\", \"gallery/plot_rasterio.py\"]\n )\n\ntry:\n import cartopy\nexcept ImportError:\n allowed_failures.update(\n [\n \"gallery/plot_cartopy_facetgrid.py\",\n \"gallery/plot_rasterio_rgb.py\",\n \"gallery/plot_rasterio.py\",\n ]\n )\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"numpydoc\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n \"sphinx_gallery.gen_gallery\",\n]\n\nextlinks = {\n \"issue\": (\"https://github.com/pydata/xarray/issues/%s\", \"GH\"),\n \"pull\": (\"https://github.com/pydata/xarray/pull/%s\", \"PR\"),\n}\n\nsphinx_gallery_conf = {\n \"examples_dirs\": \"gallery\",\n \"gallery_dirs\": \"auto_gallery\",\n \"backreferences_dir\": False,\n \"expected_failing_examples\": list(allowed_failures),\n}\n\nautosummary_generate = True\nautodoc_typehints = \"none\"\n\nnapoleon_use_param = True\nnapoleon_use_rtype = True\n\nnumpydoc_class_members_toctree = True\nnumpydoc_show_class_members = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"xarray\"\ncopyright = \"2014-%s, xarray Developers\" % datetime.datetime.now().year\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = xarray.__version__.split(\"+\")[0]\n# The full version, including alpha/beta/rc tags.\nrelease = xarray.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\ntoday_fmt = \"%Y-%m-%d\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\"logo_only\": True}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"_static/dataset-diagram-logo.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = \"_static/favicon.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Sometimes the savefig directory doesn't exist and needs to be created\n# https://github.com/ipython/ipython/issues/8733\n# becomes obsolete when we can pin ipython>=5.2; see ci/requirements/doc.yml\nipython_savefig_dir = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"_build\", \"html\", \"_static\"\n)\nif not os.path.exists(ipython_savefig_dir):\n os.makedirs(ipython_savefig_dir)\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = today_fmt\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"xarraydoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\"index\", \"xarray.tex\", \"xarray Documentation\", \"xarray Developers\", \"manual\")\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(\"index\", \"xarray\", \"xarray Documentation\", [\"xarray Developers\"], 1)]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n \"index\",\n \"xarray\",\n \"xarray Documentation\",\n \"xarray Developers\",\n \"xarray\",\n \"N-D labeled arrays and datasets in Python.\",\n \"Miscellaneous\",\n )\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable/\", None),\n \"iris\": (\"http://scitools.org.uk/iris/docs/latest/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"numba\": (\"https://numba.pydata.org/numba-doc/latest/\", None),\n \"matplotlib\": (\"https://matplotlib.org/\", None),\n}\n"},{"col":0,"comment":"Generate a regular range of cftime.datetime objects with a\n given time offset.\n\n Adapted from pandas.tseries.offsets.generate_range.\n\n Parameters\n ----------\n start : cftime.datetime, or None\n Start of range\n end : cftime.datetime, or None\n End of range\n periods : int, or None\n Number of elements in the sequence\n offset : BaseCFTimeOffset\n An offset class designed for working with cftime.datetime objects\n\n Returns\n -------\n A generator object\n ","endLoc":775,"header":"def _generate_range(start, end, periods, offset)","id":2070,"name":"_generate_range","nodeType":"Function","startLoc":718,"text":"def _generate_range(start, end, periods, offset):\n \"\"\"Generate a regular range of cftime.datetime objects with a\n given time offset.\n\n Adapted from pandas.tseries.offsets.generate_range.\n\n Parameters\n ----------\n start : cftime.datetime, or None\n Start of range\n end : cftime.datetime, or None\n End of range\n periods : int, or None\n Number of elements in the sequence\n offset : BaseCFTimeOffset\n An offset class designed for working with cftime.datetime objects\n\n Returns\n -------\n A generator object\n \"\"\"\n if start:\n start = offset.rollforward(start)\n\n if end:\n end = offset.rollback(end)\n\n if periods is None and end < start:\n end = None\n periods = 0\n\n if end is None:\n end = start + (periods - 1) * offset\n\n if start is None:\n start = end - (periods - 1) * offset\n\n current = start\n if offset.n >= 0:\n while current <= end:\n yield current\n\n next_date = current + offset\n if next_date <= current:\n raise ValueError(\n \"Offset {offset} did not increment date\".format(offset=offset)\n )\n current = next_date\n else:\n while current >= end:\n yield current\n\n next_date = current + offset\n if next_date >= current:\n raise ValueError(\n \"Offset {offset} did not decrement date\".format(offset=offset)\n )\n current = next_date"},{"attributeType":"null","col":4,"comment":"null","endLoc":234,"id":2071,"name":"day","nodeType":"Attribute","startLoc":234,"text":"day"},{"attributeType":"null","col":4,"comment":"null","endLoc":235,"id":2072,"name":"hour","nodeType":"Attribute","startLoc":235,"text":"hour"},{"attributeType":"null","col":4,"comment":"null","endLoc":236,"id":2073,"name":"minute","nodeType":"Attribute","startLoc":236,"text":"minute"},{"col":4,"comment":"null","endLoc":140,"header":"def rollforward(self, date)","id":2074,"name":"rollforward","nodeType":"Function","startLoc":136,"text":"def rollforward(self, date):\n if self.onOffset(date):\n return date\n else:\n return date + type(self)()"},{"attributeType":"null","col":4,"comment":"null","endLoc":237,"id":2075,"name":"second","nodeType":"Attribute","startLoc":237,"text":"second"},{"col":4,"comment":"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.","endLoc":134,"header":"def onOffset(self, date)","id":2076,"name":"onOffset","nodeType":"Function","startLoc":130,"text":"def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n test_date = (self + date) - self\n return date == test_date"},{"attributeType":"null","col":4,"comment":"null","endLoc":238,"id":2077,"name":"microsecond","nodeType":"Attribute","startLoc":238,"text":"microsecond"},{"attributeType":"null","col":0,"comment":"null","endLoc":24,"id":2078,"name":"allowed_failures","nodeType":"Attribute","startLoc":24,"text":"allowed_failures"},{"attributeType":"null","col":4,"comment":"null","endLoc":239,"id":2079,"name":"dayofyear","nodeType":"Attribute","startLoc":239,"text":"dayofyear"},{"col":4,"comment":"null","endLoc":86,"header":"def __init__(self, n=1)","id":2080,"name":"__init__","nodeType":"Function","startLoc":80,"text":"def __init__(self, n=1):\n if not isinstance(n, int):\n raise TypeError(\n \"The provided multiple 'n' must be an integer. \"\n \"Instead a value of type {!r} was provided.\".format(type(n))\n )\n self.n = n"},{"attributeType":"null","col":4,"comment":"null","endLoc":416,"id":2081,"name":"_indexes","nodeType":"Attribute","startLoc":416,"text":"_indexes"},{"attributeType":"null","col":4,"comment":"null","endLoc":242,"id":2082,"name":"dayofweek","nodeType":"Attribute","startLoc":242,"text":"dayofweek"},{"attributeType":"null","col":0,"comment":"null","endLoc":69,"id":2083,"name":"extensions","nodeType":"Attribute","startLoc":69,"text":"extensions"},{"col":4,"comment":"null","endLoc":146,"header":"def rollback(self, date)","id":2084,"name":"rollback","nodeType":"Function","startLoc":142,"text":"def rollback(self, date):\n if self.onOffset(date):\n return date\n else:\n return date - type(self)()"},{"attributeType":"null","col":4,"comment":"null","endLoc":243,"id":2085,"name":"date_type","nodeType":"Attribute","startLoc":243,"text":"date_type"},{"attributeType":"null","col":0,"comment":"null","endLoc":82,"id":2086,"name":"extlinks","nodeType":"Attribute","startLoc":82,"text":"extlinks"},{"attributeType":"null","col":8,"comment":"null","endLoc":250,"id":2087,"name":"result","nodeType":"Attribute","startLoc":250,"text":"result"},{"attributeType":"null","col":8,"comment":"null","endLoc":251,"id":2088,"name":"_data","nodeType":"Attribute","startLoc":251,"text":"result._data"},{"attributeType":"null","col":8,"comment":"null","endLoc":252,"id":2089,"name":"name","nodeType":"Attribute","startLoc":252,"text":"result.name"},{"attributeType":"null","col":0,"comment":"null","endLoc":87,"id":2090,"name":"sphinx_gallery_conf","nodeType":"Attribute","startLoc":87,"text":"sphinx_gallery_conf"},{"col":0,"comment":"null","endLoc":56,"header":"def named(name, pattern)","id":2091,"name":"named","nodeType":"Function","startLoc":55,"text":"def named(name, pattern):\n return \"(?P<\" + name + \">\" + pattern + \")\""},{"col":0,"comment":"null","endLoc":60,"header":"def optional(x)","id":2092,"name":"optional","nodeType":"Function","startLoc":59,"text":"def optional(x):\n return \"(?:\" + x + \")?\""},{"attributeType":"null","col":0,"comment":"null","endLoc":94,"id":2093,"name":"autosummary_generate","nodeType":"Attribute","startLoc":94,"text":"autosummary_generate"},{"col":0,"comment":"null","endLoc":66,"header":"def trailing_optional(xs)","id":2094,"name":"trailing_optional","nodeType":"Function","startLoc":63,"text":"def trailing_optional(xs):\n if not xs:\n return \"\"\n return xs[0] + optional(trailing_optional(xs[1:]))"},{"col":4,"comment":"Helper function for equals, broadcast_equals, and identical\n ","endLoc":2407,"header":"def _all_compat(self, other: \"DataArray\", compat_str: str) -> bool","id":2095,"name":"_all_compat","nodeType":"Function","startLoc":2398,"text":"def _all_compat(self, other: \"DataArray\", compat_str: str) -> bool:\n \"\"\"Helper function for equals, broadcast_equals, and identical\n \"\"\"\n\n def compat(x, y):\n return getattr(x.variable, compat_str)(y.variable)\n\n return utils.dict_equiv(self.coords, other.coords, compat=compat) and compat(\n self, other\n )"},{"col":0,"comment":"null","endLoc":82,"header":"def build_pattern(date_sep=r\"\\-\", datetime_sep=r\"T\", time_sep=r\"\\:\")","id":2096,"name":"build_pattern","nodeType":"Function","startLoc":69,"text":"def build_pattern(date_sep=r\"\\-\", datetime_sep=r\"T\", time_sep=r\"\\:\"):\n pieces = [\n (None, \"year\", r\"\\d{4}\"),\n (date_sep, \"month\", r\"\\d{2}\"),\n (date_sep, \"day\", r\"\\d{2}\"),\n (datetime_sep, \"hour\", r\"\\d{2}\"),\n (time_sep, \"minute\", r\"\\d{2}\"),\n (time_sep, \"second\", r\"\\d{2}\"),\n ]\n pattern_list = []\n for sep, name, sub_pattern in pieces:\n pattern_list.append((sep if sep else \"\") + named(name, sub_pattern))\n # TODO: allow timezone offsets?\n return \"^\" + trailing_optional(pattern_list) + \"$\""},{"attributeType":"null","col":0,"comment":"null","endLoc":95,"id":2097,"name":"autodoc_typehints","nodeType":"Attribute","startLoc":95,"text":"autodoc_typehints"},{"col":4,"comment":"Two DataArrays are broadcast equal if they are equal after\n broadcasting them against each other such that they have the same\n dimensions.\n\n See Also\n --------\n DataArray.equals\n DataArray.identical\n ","endLoc":2422,"header":"def broadcast_equals(self, other: \"DataArray\") -> bool","id":2099,"name":"broadcast_equals","nodeType":"Function","startLoc":2409,"text":"def broadcast_equals(self, other: \"DataArray\") -> bool:\n \"\"\"Two DataArrays are broadcast equal if they are equal after\n broadcasting them against each other such that they have the same\n dimensions.\n\n See Also\n --------\n DataArray.equals\n DataArray.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False"},{"attributeType":"null","col":4,"comment":"null","endLoc":417,"id":2100,"name":"_variables","nodeType":"Attribute","startLoc":417,"text":"_variables"},{"attributeType":"null","col":0,"comment":"null","endLoc":97,"id":2101,"name":"napoleon_use_param","nodeType":"Attribute","startLoc":97,"text":"napoleon_use_param"},{"col":0,"comment":"Adapted from pandas.tslib.get_date_field","endLoc":165,"header":"def get_date_field(datetimes, field)","id":2102,"name":"get_date_field","nodeType":"Function","startLoc":163,"text":"def get_date_field(datetimes, field):\n \"\"\"Adapted from pandas.tslib.get_date_field\"\"\"\n return np.array([getattr(date, field) for date in datetimes])"},{"col":0,"comment":"Adapted from pandas.tseries.index._field_accessor","endLoc":187,"header":"def _field_accessor(name, docstring=None, min_cftime_version=\"0.0\")","id":2103,"name":"_field_accessor","nodeType":"Function","startLoc":168,"text":"def _field_accessor(name, docstring=None, min_cftime_version=\"0.0\"):\n \"\"\"Adapted from pandas.tseries.index._field_accessor\"\"\"\n\n def f(self, min_cftime_version=min_cftime_version):\n import cftime\n\n version = cftime.__version__\n\n if LooseVersion(version) >= LooseVersion(min_cftime_version):\n return get_date_field(self._data, name)\n else:\n raise ImportError(\n \"The {!r} accessor requires a minimum \"\n \"version of cftime of {}. Found an \"\n \"installed version of {}.\".format(name, min_cftime_version, version)\n )\n\n f.__name__ = name\n f.__doc__ = docstring\n return property(f)"},{"attributeType":"null","col":0,"comment":"null","endLoc":98,"id":2104,"name":"napoleon_use_rtype","nodeType":"Attribute","startLoc":98,"text":"napoleon_use_rtype"},{"attributeType":"null","col":0,"comment":"null","endLoc":100,"id":2105,"name":"numpydoc_class_members_toctree","nodeType":"Attribute","startLoc":100,"text":"numpydoc_class_members_toctree"},{"attributeType":"null","col":0,"comment":"null","endLoc":101,"id":2106,"name":"numpydoc_show_class_members","nodeType":"Attribute","startLoc":101,"text":"numpydoc_show_class_members"},{"col":4,"comment":"True if two DataArrays have the same dimensions, coordinates and\n values; otherwise False.\n\n DataArrays can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``DataArray``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.identical\n ","endLoc":2442,"header":"def equals(self, other: \"DataArray\") -> bool","id":2107,"name":"equals","nodeType":"Function","startLoc":2424,"text":"def equals(self, other: \"DataArray\") -> bool:\n \"\"\"True if two DataArrays have the same dimensions, coordinates and\n values; otherwise False.\n\n DataArrays can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``DataArray``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False"},{"attributeType":"null","col":0,"comment":"null","endLoc":104,"id":2108,"name":"templates_path","nodeType":"Attribute","startLoc":104,"text":"templates_path"},{"attributeType":"null","col":0,"comment":"null","endLoc":107,"id":2109,"name":"source_suffix","nodeType":"Attribute","startLoc":107,"text":"source_suffix"},{"attributeType":"null","col":0,"comment":"null","endLoc":113,"id":2110,"name":"master_doc","nodeType":"Attribute","startLoc":113,"text":"master_doc"},{"attributeType":"null","col":0,"comment":"null","endLoc":116,"id":2111,"name":"project","nodeType":"Attribute","startLoc":116,"text":"project"},{"col":0,"comment":"null","endLoc":194,"header":"def get_date_type(self)","id":2112,"name":"get_date_type","nodeType":"Function","startLoc":190,"text":"def get_date_type(self):\n if self._data.size:\n return type(self._data[0])\n else:\n return None"},{"attributeType":"null","col":0,"comment":"null","endLoc":117,"id":2113,"name":"copyright","nodeType":"Attribute","startLoc":117,"text":"copyright"},{"attributeType":"null","col":0,"comment":"null","endLoc":85,"id":2114,"name":"_BASIC_PATTERN","nodeType":"Attribute","startLoc":85,"text":"_BASIC_PATTERN"},{"col":4,"comment":"Like equals, but also checks the array name and attributes, and\n attributes on all coordinates.\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.equal\n ","endLoc":2456,"header":"def identical(self, other: \"DataArray\") -> bool","id":2115,"name":"identical","nodeType":"Function","startLoc":2444,"text":"def identical(self, other: \"DataArray\") -> bool:\n \"\"\"Like equals, but also checks the array name and attributes, and\n attributes on all coordinates.\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.equal\n \"\"\"\n try:\n return self.name == other.name and self._all_compat(other, \"identical\")\n except (TypeError, AttributeError):\n return False"},{"attributeType":"null","col":0,"comment":"null","endLoc":124,"id":2116,"name":"version","nodeType":"Attribute","startLoc":124,"text":"version"},{"col":4,"comment":"null","endLoc":2467,"header":"def _result_name(self, other: Any = None) -> Optional[Hashable]","id":2117,"name":"_result_name","nodeType":"Function","startLoc":2460,"text":"def _result_name(self, other: Any = None) -> Optional[Hashable]:\n # use the same naming heuristics as pandas:\n # https://github.com/ContinuumIO/blaze/issues/458#issuecomment-51936356\n other_name = getattr(other, \"name\", self.__default_name)\n if other_name is self.__default_name or other_name == self.name:\n return self.name\n else:\n return None"},{"attributeType":"null","col":0,"comment":"null","endLoc":126,"id":2118,"name":"release","nodeType":"Attribute","startLoc":126,"text":"release"},{"attributeType":"null","col":4,"comment":"null","endLoc":419,"id":2119,"name":"__slots__","nodeType":"Attribute","startLoc":419,"text":"__slots__"},{"col":4,"comment":"null","endLoc":2471,"header":"def __array_wrap__(self, obj, context=None) -> \"DataArray\"","id":2120,"name":"__array_wrap__","nodeType":"Function","startLoc":2469,"text":"def __array_wrap__(self, obj, context=None) -> \"DataArray\":\n new_var = self.variable.__array_wrap__(obj, context)\n return self._replace(new_var)"},{"attributeType":"null","col":0,"comment":"null","endLoc":136,"id":2121,"name":"today_fmt","nodeType":"Attribute","startLoc":136,"text":"today_fmt"},{"attributeType":"null","col":4,"comment":"null","endLoc":431,"id":2122,"name":"_groupby_cls","nodeType":"Attribute","startLoc":431,"text":"_groupby_cls"},{"attributeType":"null","col":0,"comment":"null","endLoc":140,"id":2123,"name":"exclude_patterns","nodeType":"Attribute","startLoc":140,"text":"exclude_patterns"},{"attributeType":"null","col":0,"comment":"null","endLoc":158,"id":2124,"name":"pygments_style","nodeType":"Attribute","startLoc":158,"text":"pygments_style"},{"attributeType":"null","col":0,"comment":"null","endLoc":171,"id":2125,"name":"html_theme","nodeType":"Attribute","startLoc":171,"text":"html_theme"},{"attributeType":"null","col":0,"comment":"null","endLoc":176,"id":2126,"name":"html_theme_options","nodeType":"Attribute","startLoc":176,"text":"html_theme_options"},{"attributeType":"null","col":0,"comment":"null","endLoc":190,"id":2127,"name":"html_logo","nodeType":"Attribute","startLoc":190,"text":"html_logo"},{"attributeType":"null","col":0,"comment":"null","endLoc":195,"id":2128,"name":"html_favicon","nodeType":"Attribute","startLoc":195,"text":"html_favicon"},{"attributeType":"null","col":0,"comment":"null","endLoc":200,"id":2129,"name":"html_static_path","nodeType":"Attribute","startLoc":200,"text":"html_static_path"},{"attributeType":"null","col":0,"comment":"null","endLoc":205,"id":2130,"name":"ipython_savefig_dir","nodeType":"Attribute","startLoc":205,"text":"ipython_savefig_dir"},{"attributeType":"null","col":4,"comment":"null","endLoc":432,"id":2131,"name":"_rolling_cls","nodeType":"Attribute","startLoc":432,"text":"_rolling_cls"},{"attributeType":"null","col":0,"comment":"null","endLoc":218,"id":2132,"name":"html_last_updated_fmt","nodeType":"Attribute","startLoc":218,"text":"html_last_updated_fmt"},{"attributeType":"null","col":0,"comment":"null","endLoc":6,"id":2133,"name":"__version__","nodeType":"Attribute","startLoc":6,"text":"__version__"},{"col":4,"comment":"null","endLoc":2474,"header":"def __matmul__(self, obj)","id":2134,"name":"__matmul__","nodeType":"Function","startLoc":2473,"text":"def __matmul__(self, obj):\n return self.dot(obj)"},{"col":0,"comment":"","endLoc":1,"header":"__init__.py#","id":2135,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\" isort:skip_file \"\"\"\n\n__version__ = get_versions()[\"version\"]\n\ndel get_versions"},{"col":0,"comment":"Mask all matching values in a NumPy arrays.","endLoc":146,"header":"def _apply_mask(\n data: np.ndarray, encoded_fill_values: list, decoded_fill_value: Any, dtype: Any\n) -> np.ndarray","id":2136,"name":"_apply_mask","nodeType":"Function","startLoc":138,"text":"def _apply_mask(\n data: np.ndarray, encoded_fill_values: list, decoded_fill_value: Any, dtype: Any\n) -> np.ndarray:\n \"\"\"Mask all matching values in a NumPy arrays.\"\"\"\n data = np.asarray(data, dtype=dtype)\n condition = False\n for fv in encoded_fill_values:\n condition |= data == fv\n return np.where(condition, decoded_fill_value, data)"},{"col":4,"comment":"Perform dot product of two DataArrays along their shared dims.\n\n Equivalent to taking taking tensordot over all shared dims.\n\n Parameters\n ----------\n other : DataArray\n The other array with which the dot product is performed.\n dims: hashable or sequence of hashables, optional\n Along which dimensions to be summed over. Default all the common\n dimensions are summed over.\n\n Returns\n -------\n result : DataArray\n Array resulting from the dot product over all shared dimensions.\n\n See also\n --------\n dot\n numpy.tensordot\n\n Examples\n --------\n\n >>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))\n >>> da = DataArray(da_vals, dims=['x', 'y', 'z'])\n >>> dm_vals = np.arange(4)\n >>> dm = DataArray(dm_vals, dims=['z'])\n\n >>> dm.dims\n ('z')\n >>> da.dims\n ('x', 'y', 'z')\n\n >>> dot_result = da.dot(dm)\n >>> dot_result.dims\n ('x', 'y')\n ","endLoc":2788,"header":"def dot(\n self, other: \"DataArray\", dims: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\"","id":2137,"name":"dot","nodeType":"Function","startLoc":2739,"text":"def dot(\n self, other: \"DataArray\", dims: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\":\n \"\"\"Perform dot product of two DataArrays along their shared dims.\n\n Equivalent to taking taking tensordot over all shared dims.\n\n Parameters\n ----------\n other : DataArray\n The other array with which the dot product is performed.\n dims: hashable or sequence of hashables, optional\n Along which dimensions to be summed over. Default all the common\n dimensions are summed over.\n\n Returns\n -------\n result : DataArray\n Array resulting from the dot product over all shared dimensions.\n\n See also\n --------\n dot\n numpy.tensordot\n\n Examples\n --------\n\n >>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))\n >>> da = DataArray(da_vals, dims=['x', 'y', 'z'])\n >>> dm_vals = np.arange(4)\n >>> dm = DataArray(dm_vals, dims=['z'])\n\n >>> dm.dims\n ('z')\n >>> da.dims\n ('x', 'y', 'z')\n\n >>> dot_result = da.dot(dm)\n >>> dot_result.dims\n ('x', 'y')\n \"\"\"\n if isinstance(other, Dataset):\n raise NotImplementedError(\n \"dot products are not yet supported with Dataset objects.\"\n )\n if not isinstance(other, DataArray):\n raise TypeError(\"dot only operates on DataArrays.\")\n\n return computation.dot(self, other, dims=dims)"},{"col":0,"comment":"null","endLoc":219,"header":"def _scale_offset_decoding(data, scale_factor, add_offset, dtype)","id":2138,"name":"_scale_offset_decoding","nodeType":"Function","startLoc":213,"text":"def _scale_offset_decoding(data, scale_factor, add_offset, dtype):\n data = np.array(data, dtype=dtype, copy=True)\n if scale_factor is not None:\n data *= scale_factor\n if add_offset is not None:\n data += add_offset\n return data"},{"attributeType":"null","col":0,"comment":"null","endLoc":258,"id":2139,"name":"htmlhelp_basename","nodeType":"Attribute","startLoc":258,"text":"htmlhelp_basename"},{"col":0,"comment":"","endLoc":1,"header":"variables.py#","id":2140,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Coders for individual Variable objects.\"\"\""},{"attributeType":"null","col":0,"comment":"null","endLoc":263,"id":2141,"name":"latex_elements","nodeType":"Attribute","startLoc":263,"text":"latex_elements"},{"fileName":"__init__.py","filePath":"xarray/coding","id":2142,"nodeType":"File","text":""},{"attributeType":"null","col":0,"comment":"null","endLoc":275,"id":2143,"name":"latex_documents","nodeType":"Attribute","startLoc":275,"text":"latex_documents"},{"fileName":"__init__.py","filePath":"xarray/core","id":2144,"nodeType":"File","text":""},{"attributeType":"null","col":4,"comment":"null","endLoc":433,"id":2145,"name":"_coarsen_cls","nodeType":"Attribute","startLoc":433,"text":"_coarsen_cls"},{"attributeType":"null","col":0,"comment":"null","endLoc":304,"id":2146,"name":"man_pages","nodeType":"Attribute","startLoc":304,"text":"man_pages"},{"col":4,"comment":"null","endLoc":2479,"header":"def __rmatmul__(self, other)","id":2147,"name":"__rmatmul__","nodeType":"Function","startLoc":2476,"text":"def __rmatmul__(self, other):\n # currently somewhat duplicative, as only other DataArrays are\n # compatible with matmul\n return computation.dot(other, self)"},{"id":2148,"name":"groupby.rst","nodeType":"TextFile","path":"doc","text":".. _groupby:\n\nGroupBy: split-apply-combine\n----------------------------\n\nxarray supports `\"group by\"`__ operations with the same API as pandas to\nimplement the `split-apply-combine`__ strategy:\n\n__ http://pandas.pydata.org/pandas-docs/stable/groupby.html\n__ http://www.jstatsoft.org/v40/i01/paper\n\n- Split your data into multiple independent groups.\n- Apply some function to each group.\n- Combine your groups back into a single data object.\n\nGroup by operations work on both :py:class:`~xarray.Dataset` and\n:py:class:`~xarray.DataArray` objects. Most of the examples focus on grouping by\na single one-dimensional variable, although support for grouping\nover a multi-dimensional variable has recently been implemented. Note that for\none-dimensional data, it is usually faster to rely on pandas' implementation of\nthe same pipeline.\n\nSplit\n~~~~~\n\nLet's create a simple example dataset:\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\n.. ipython:: python\n\n ds = xr.Dataset({'foo': (('x', 'y'), np.random.rand(4, 3))},\n coords={'x': [10, 20, 30, 40],\n 'letters': ('x', list('abba'))})\n arr = ds['foo']\n ds\n\nIf we groupby the name of a variable or coordinate in a dataset (we can also\nuse a DataArray directly), we get back a ``GroupBy`` object:\n\n.. ipython:: python\n\n ds.groupby('letters')\n\nThis object works very similarly to a pandas GroupBy object. You can view\nthe group indices with the ``groups`` attribute:\n\n.. ipython:: python\n\n ds.groupby('letters').groups\n\nYou can also iterate over groups in ``(label, group)`` pairs:\n\n.. ipython:: python\n\n list(ds.groupby('letters'))\n\nJust like in pandas, creating a GroupBy object is cheap: it does not actually\nsplit the data until you access particular values.\n\nBinning\n~~~~~~~\n\nSometimes you don't want to use all the unique values to determine the groups\nbut instead want to \"bin\" the data into coarser groups. You could always create\na customized coordinate, but xarray facilitates this via the\n:py:meth:`~xarray.Dataset.groupby_bins` method.\n\n.. ipython:: python\n\n x_bins = [0,25,50]\n ds.groupby_bins('x', x_bins).groups\n\nThe binning is implemented via :func:`pandas.cut`, whose documentation details how\nthe bins are assigned. As seen in the example above, by default, the bins are\nlabeled with strings using set notation to precisely identify the bin limits. To\noverride this behavior, you can specify the bin labels explicitly. Here we\nchoose `float` labels which identify the bin centers:\n\n.. ipython:: python\n\n x_bin_labels = [12.5,37.5]\n ds.groupby_bins('x', x_bins, labels=x_bin_labels).groups\n\n\nApply\n~~~~~\n\nTo apply a function to each group, you can use the flexible\n:py:meth:`~xarray.DatasetGroupBy.apply` method. The resulting objects are automatically\nconcatenated back together along the group axis:\n\n.. ipython:: python\n\n def standardize(x):\n return (x - x.mean()) / x.std()\n\n arr.groupby('letters').apply(standardize)\n\nGroupBy objects also have a :py:meth:`~xarray.DatasetGroupBy.reduce` method and\nmethods like :py:meth:`~xarray.DatasetGroupBy.mean` as shortcuts for applying an\naggregation function:\n\n.. ipython:: python\n\n arr.groupby('letters').mean(dim='x')\n\nUsing a groupby is thus also a convenient shortcut for aggregating over all\ndimensions *other than* the provided one:\n\n.. ipython:: python\n\n ds.groupby('x').std(xr.ALL_DIMS)\n\nFirst and last\n~~~~~~~~~~~~~~\n\nThere are two special aggregation operations that are currently only found on\ngroupby objects: first and last. These provide the first or last example of\nvalues for group along the grouped dimension:\n\n.. ipython:: python\n\n ds.groupby('letters').first(xr.ALL_DIMS)\n\nBy default, they skip missing values (control this with ``skipna``).\n\nGrouped arithmetic\n~~~~~~~~~~~~~~~~~~\n\nGroupBy objects also support a limited set of binary arithmetic operations, as\na shortcut for mapping over all unique labels. Binary arithmetic is supported\nfor ``(GroupBy, Dataset)`` and ``(GroupBy, DataArray)`` pairs, as long as the\ndataset or data array uses the unique grouped values as one of its index\ncoordinates. For example:\n\n.. ipython:: python\n\n alt = arr.groupby('letters').mean(xr.ALL_DIMS)\n alt\n ds.groupby('letters') - alt\n\nThis last line is roughly equivalent to the following::\n\n results = []\n for label, group in ds.groupby('letters'):\n results.append(group - alt.sel(x=label))\n xr.concat(results, dim='x')\n\nSqueezing\n~~~~~~~~~\n\nWhen grouping over a dimension, you can control whether the dimension is\nsqueezed out or if it should remain with length one on each group by using\nthe ``squeeze`` parameter:\n\n.. ipython:: python\n\n next(iter(arr.groupby('x')))\n\n.. ipython:: python\n\n next(iter(arr.groupby('x', squeeze=False)))\n\nAlthough xarray will attempt to automatically\n:py:attr:`~xarray.DataArray.transpose` dimensions back into their original order\nwhen you use apply, it is sometimes useful to set ``squeeze=False`` to\nguarantee that all original dimensions remain unchanged.\n\nYou can always squeeze explicitly later with the Dataset or DataArray\n:py:meth:`~xarray.DataArray.squeeze` methods.\n\n.. _groupby.multidim:\n\nMultidimensional Grouping\n~~~~~~~~~~~~~~~~~~~~~~~~~\n\nMany datasets have a multidimensional coordinate variable (e.g. longitude)\nwhich is different from the logical grid dimensions (e.g. nx, ny). Such\nvariables are valid under the `CF conventions`__. Xarray supports groupby\noperations over multidimensional coordinate variables:\n\n__ http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_two_dimensional_latitude_longitude_coordinate_variables\n\n.. ipython:: python\n\n da = xr.DataArray([[0,1],[2,3]],\n coords={'lon': (['ny','nx'], [[30,40],[40,50]] ),\n 'lat': (['ny','nx'], [[10,10],[20,20]] ),},\n dims=['ny','nx'])\n da\n da.groupby('lon').sum(xr.ALL_DIMS)\n da.groupby('lon').apply(lambda x: x - x.mean(), shortcut=False)\n\nBecause multidimensional groups have the ability to generate a very large\nnumber of bins, coarse-binning via :py:meth:`~xarray.Dataset.groupby_bins`\nmay be desirable:\n\n.. ipython:: python\n\n da.groupby_bins('lon', [0,45,50]).sum()\n\nThese methods group by `lon` values. It is also possible to groupby each\ncell in a grid, regardless of value, by stacking multiple dimensions, \napplying your function, and then unstacking the result:\n\n.. ipython:: python\n\n stacked = da.stack(gridcell=['ny', 'nx'])\n stacked.groupby('gridcell').sum(xr.ALL_DIMS).unstack('gridcell')\n"},{"id":2149,"name":".codecov.yml","nodeType":"TextFile","path":"","text":"codecov:\n ci:\n # by default, codecov doesn't recognize azure as a CI provider\n - dev.azure.com\n require_ci_to_pass: yes\n\ncoverage:\n status:\n project:\n default:\n # Require 1% coverage, i.e., always succeed\n target: 1\n patch: false\n changes: false\n\ncomment: off\n"},{"col":4,"comment":"null","endLoc":2488,"header":"@staticmethod\n def _unary_op(f: Callable[..., Any]) -> Callable[..., \"DataArray\"]","id":2150,"name":"_unary_op","nodeType":"Function","startLoc":2481,"text":"@staticmethod\n def _unary_op(f: Callable[..., Any]) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n with np.errstate(all=\"ignore\"):\n return self.__array_wrap__(f(self.variable.data, *args, **kwargs))\n\n return func"},{"fileName":"duck_array_ops.py","filePath":"xarray/core","id":2151,"nodeType":"File","text":"\"\"\"Compatibility module defining operations on duck numpy-arrays.\n\nCurrently, this means Dask or NumPy arrays. None of these functions should\naccept or return xarray objects.\n\"\"\"\nimport contextlib\nimport inspect\nimport warnings\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dask_array_ops, dtypes, npcompat, nputils\nfrom .nputils import nanfirst, nanlast\nfrom .pycompat import dask_array_type\n\ntry:\n import dask.array as dask_array\nexcept ImportError:\n dask_array = None # type: ignore\n\n\ndef _dask_or_eager_func(\n name,\n eager_module=np,\n dask_module=dask_array,\n list_of_args=False,\n array_args=slice(1),\n requires_dask=None,\n):\n \"\"\"Create a function that dispatches to dask for dask array inputs.\"\"\"\n if dask_module is not None:\n\n def f(*args, **kwargs):\n if list_of_args:\n dispatch_args = args[0]\n else:\n dispatch_args = args[array_args]\n if any(isinstance(a, dask_array.Array) for a in dispatch_args):\n try:\n wrapped = getattr(dask_module, name)\n except AttributeError as e:\n raise AttributeError(\"%s: requires dask >=%s\" % (e, requires_dask))\n else:\n wrapped = getattr(eager_module, name)\n return wrapped(*args, **kwargs)\n\n else:\n\n def f(*args, **kwargs):\n return getattr(eager_module, name)(*args, **kwargs)\n\n return f\n\n\ndef fail_on_dask_array_input(values, msg=None, func_name=None):\n if isinstance(values, dask_array_type):\n if msg is None:\n msg = \"%r is not yet a valid method on dask arrays\"\n if func_name is None:\n func_name = inspect.stack()[1][3]\n raise NotImplementedError(msg % func_name)\n\n\n# switch to use dask.array / __array_function__ version when dask supports it:\n# https://github.com/dask/dask/pull/4822\nmoveaxis = npcompat.moveaxis\n\naround = _dask_or_eager_func(\"around\")\nisclose = _dask_or_eager_func(\"isclose\")\n\n\nif hasattr(np, \"isnat\") and (\n dask_array is None or hasattr(dask_array_type, \"__array_ufunc__\")\n):\n # np.isnat is available since NumPy 1.13, so __array_ufunc__ is always\n # supported.\n isnat = np.isnat\nelse:\n isnat = _dask_or_eager_func(\"isnull\", eager_module=pd)\nisnan = _dask_or_eager_func(\"isnan\")\nzeros_like = _dask_or_eager_func(\"zeros_like\")\n\n\npandas_isnull = _dask_or_eager_func(\"isnull\", eager_module=pd)\n\n\ndef isnull(data):\n data = asarray(data)\n scalar_type = data.dtype.type\n if issubclass(scalar_type, (np.datetime64, np.timedelta64)):\n # datetime types use NaT for null\n # note: must check timedelta64 before integers, because currently\n # timedelta64 inherits from np.integer\n return isnat(data)\n elif issubclass(scalar_type, np.inexact):\n # float types use NaN for null\n return isnan(data)\n elif issubclass(scalar_type, (np.bool_, np.integer, np.character, np.void)):\n # these types cannot represent missing values\n return zeros_like(data, dtype=bool)\n else:\n # at this point, array should have dtype=object\n if isinstance(data, (np.ndarray, dask_array_type)):\n return pandas_isnull(data)\n else:\n # Not reachable yet, but intended for use with other duck array\n # types. For full consistency with pandas, we should accept None as\n # a null value as well as NaN, but it isn't clear how to do this\n # with duck typing.\n return data != data\n\n\ndef notnull(data):\n return ~isnull(data)\n\n\ntranspose = _dask_or_eager_func(\"transpose\")\n_where = _dask_or_eager_func(\"where\", array_args=slice(3))\nisin = _dask_or_eager_func(\"isin\", array_args=slice(2))\ntake = _dask_or_eager_func(\"take\")\nbroadcast_to = _dask_or_eager_func(\"broadcast_to\")\n\n_concatenate = _dask_or_eager_func(\"concatenate\", list_of_args=True)\n_stack = _dask_or_eager_func(\"stack\", list_of_args=True)\n\narray_all = _dask_or_eager_func(\"all\")\narray_any = _dask_or_eager_func(\"any\")\n\ntensordot = _dask_or_eager_func(\"tensordot\", array_args=slice(2))\neinsum = _dask_or_eager_func(\"einsum\", array_args=slice(1, None))\n\n\ndef gradient(x, coord, axis, edge_order):\n if isinstance(x, dask_array_type):\n return dask_array.gradient(x, coord, axis=axis, edge_order=edge_order)\n return np.gradient(x, coord, axis=axis, edge_order=edge_order)\n\n\ndef trapz(y, x, axis):\n if axis < 0:\n axis = y.ndim + axis\n x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1)\n x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1)\n slice1 = (slice(None),) * axis + (slice(1, None),)\n slice2 = (slice(None),) * axis + (slice(None, -1),)\n dx = x[x_sl1] - x[x_sl2]\n integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)])\n return sum(integrand, axis=axis, skipna=False)\n\n\nmasked_invalid = _dask_or_eager_func(\n \"masked_invalid\", eager_module=np.ma, dask_module=getattr(dask_array, \"ma\", None)\n)\n\n\ndef asarray(data):\n return (\n data\n if (isinstance(data, dask_array_type) or hasattr(data, \"__array_function__\"))\n else np.asarray(data)\n )\n\n\ndef as_shared_dtype(scalars_or_arrays):\n \"\"\"Cast a arrays to a shared dtype using xarray's type promotion rules.\"\"\"\n arrays = [asarray(x) for x in scalars_or_arrays]\n # Pass arrays directly instead of dtypes to result_type so scalars\n # get handled properly.\n # Note that result_type() safely gets the dtype from dask arrays without\n # evaluating them.\n out_type = dtypes.result_type(*arrays)\n return [x.astype(out_type, copy=False) for x in arrays]\n\n\ndef allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8):\n \"\"\"Like np.allclose, but also allows values to be NaN in both arrays\n \"\"\"\n arr1 = asarray(arr1)\n arr2 = asarray(arr2)\n if arr1.shape != arr2.shape:\n return False\n return bool(isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True).all())\n\n\ndef array_equiv(arr1, arr2):\n \"\"\"Like np.array_equal, but also allows values to be NaN in both arrays\n \"\"\"\n arr1 = asarray(arr1)\n arr2 = asarray(arr2)\n if arr1.shape != arr2.shape:\n return False\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"In the future, 'NAT == x'\")\n flag_array = (arr1 == arr2) | (isnull(arr1) & isnull(arr2))\n return bool(flag_array.all())\n\n\ndef array_notnull_equiv(arr1, arr2):\n \"\"\"Like np.array_equal, but also allows values to be NaN in either or both\n arrays\n \"\"\"\n arr1 = asarray(arr1)\n arr2 = asarray(arr2)\n if arr1.shape != arr2.shape:\n return False\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"In the future, 'NAT == x'\")\n flag_array = (arr1 == arr2) | isnull(arr1) | isnull(arr2)\n return bool(flag_array.all())\n\n\ndef count(data, axis=None):\n \"\"\"Count the number of non-NA in this array along the given axis or axes\n \"\"\"\n return np.sum(np.logical_not(isnull(data)), axis=axis)\n\n\ndef where(condition, x, y):\n \"\"\"Three argument where() with better dtype promotion rules.\"\"\"\n return _where(condition, *as_shared_dtype([x, y]))\n\n\ndef where_method(data, cond, other=dtypes.NA):\n if other is dtypes.NA:\n other = dtypes.get_fill_value(data.dtype)\n return where(cond, data, other)\n\n\ndef fillna(data, other):\n return where(isnull(data), other, data)\n\n\ndef concatenate(arrays, axis=0):\n \"\"\"concatenate() with better dtype promotion rules.\"\"\"\n return _concatenate(as_shared_dtype(arrays), axis=axis)\n\n\ndef stack(arrays, axis=0):\n \"\"\"stack() with better dtype promotion rules.\"\"\"\n return _stack(as_shared_dtype(arrays), axis=axis)\n\n\n@contextlib.contextmanager\ndef _ignore_warnings_if(condition):\n if condition:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n yield\n else:\n yield\n\n\ndef _create_nan_agg_method(name, coerce_strings=False):\n from . import nanops\n\n def f(values, axis=None, skipna=None, **kwargs):\n if kwargs.pop(\"out\", None) is not None:\n raise TypeError(\"`out` is not valid for {}\".format(name))\n\n values = asarray(values)\n\n if coerce_strings and values.dtype.kind in \"SU\":\n values = values.astype(object)\n\n func = None\n if skipna or (skipna is None and values.dtype.kind in \"cfO\"):\n nanname = \"nan\" + name\n func = getattr(nanops, nanname)\n else:\n func = _dask_or_eager_func(name)\n\n try:\n return func(values, axis=axis, **kwargs)\n except AttributeError:\n if isinstance(values, dask_array_type):\n try: # dask/dask#3133 dask sometimes needs dtype argument\n # if func does not accept dtype, then raises TypeError\n return func(values, axis=axis, dtype=values.dtype, **kwargs)\n except (AttributeError, TypeError):\n msg = \"%s is not yet implemented on dask arrays\" % name\n else:\n msg = (\n \"%s is not available with skipna=False with the \"\n \"installed version of numpy; upgrade to numpy 1.12 \"\n \"or newer to use skipna=True or skipna=None\" % name\n )\n raise NotImplementedError(msg)\n\n f.__name__ = name\n return f\n\n\n# Attributes `numeric_only`, `available_min_count` is used for docs.\n# See ops.inject_reduce_methods\nargmax = _create_nan_agg_method(\"argmax\", coerce_strings=True)\nargmin = _create_nan_agg_method(\"argmin\", coerce_strings=True)\nmax = _create_nan_agg_method(\"max\", coerce_strings=True)\nmin = _create_nan_agg_method(\"min\", coerce_strings=True)\nsum = _create_nan_agg_method(\"sum\")\nsum.numeric_only = True\nsum.available_min_count = True\nstd = _create_nan_agg_method(\"std\")\nstd.numeric_only = True\nvar = _create_nan_agg_method(\"var\")\nvar.numeric_only = True\nmedian = _create_nan_agg_method(\"median\")\nmedian.numeric_only = True\nprod = _create_nan_agg_method(\"prod\")\nprod.numeric_only = True\nsum.available_min_count = True\ncumprod_1d = _create_nan_agg_method(\"cumprod\")\ncumprod_1d.numeric_only = True\ncumsum_1d = _create_nan_agg_method(\"cumsum\")\ncumsum_1d.numeric_only = True\n\n\n_mean = _create_nan_agg_method(\"mean\")\n\n\ndef datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):\n \"\"\"Convert an array containing datetime-like data to an array of floats.\n\n Parameters\n ----------\n da : np.array\n Input data\n offset: Scalar with the same type of array or None\n If None, subtract minimum values to reduce round off error\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n dtype: target dtype\n\n Returns\n -------\n array\n \"\"\"\n # TODO: make this function dask-compatible?\n if offset is None:\n offset = array.min()\n array = array - offset\n\n if not hasattr(array, \"dtype\"): # scalar is converted to 0d-array\n array = np.array(array)\n\n if array.dtype.kind in \"O\":\n # possibly convert object array containing datetime.timedelta\n array = np.asarray(pd.Series(array.ravel())).reshape(array.shape)\n\n if datetime_unit:\n array = array / np.timedelta64(1, datetime_unit)\n\n # convert np.NaT to np.nan\n if array.dtype.kind in \"mM\":\n return np.where(isnull(array), np.nan, array.astype(dtype))\n return array.astype(dtype)\n\n\ndef _to_pytimedelta(array, unit=\"us\"):\n index = pd.TimedeltaIndex(array.ravel(), unit=unit)\n return index.to_pytimedelta().reshape(array.shape)\n\n\ndef mean(array, axis=None, skipna=None, **kwargs):\n \"\"\"inhouse mean that can handle np.datetime64 or cftime.datetime\n dtypes\"\"\"\n from .common import _contains_cftime_datetimes\n\n array = asarray(array)\n if array.dtype.kind in \"Mm\":\n offset = min(array)\n # xarray always uses np.datetime64[ns] for np.datetime64 data\n dtype = \"timedelta64[ns]\"\n return (\n _mean(\n datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs\n ).astype(dtype)\n + offset\n )\n elif _contains_cftime_datetimes(array):\n if isinstance(array, dask_array_type):\n raise NotImplementedError(\n \"Computing the mean of an array containing \"\n \"cftime.datetime objects is not yet implemented on \"\n \"dask arrays.\"\n )\n offset = min(array)\n timedeltas = datetime_to_numeric(array, offset, datetime_unit=\"us\")\n mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs)\n return _to_pytimedelta(mean_timedeltas, unit=\"us\") + offset\n else:\n return _mean(array, axis=axis, skipna=skipna, **kwargs)\n\n\nmean.numeric_only = True # type: ignore\n\n\ndef _nd_cum_func(cum_func, array, axis, **kwargs):\n array = asarray(array)\n if axis is None:\n axis = tuple(range(array.ndim))\n if isinstance(axis, int):\n axis = (axis,)\n\n out = array\n for ax in axis:\n out = cum_func(out, axis=ax, **kwargs)\n return out\n\n\ndef cumprod(array, axis=None, **kwargs):\n \"\"\"N-dimensional version of cumprod.\"\"\"\n return _nd_cum_func(cumprod_1d, array, axis, **kwargs)\n\n\ndef cumsum(array, axis=None, **kwargs):\n \"\"\"N-dimensional version of cumsum.\"\"\"\n return _nd_cum_func(cumsum_1d, array, axis, **kwargs)\n\n\n_fail_on_dask_array_input_skipna = partial(\n fail_on_dask_array_input,\n msg=\"%r with skipna=True is not yet implemented on dask arrays\",\n)\n\n\ndef first(values, axis, skipna=None):\n \"\"\"Return the first non-NA elements in this array along the given axis\n \"\"\"\n if (skipna or skipna is None) and values.dtype.kind not in \"iSU\":\n # only bother for dtypes that can hold NaN\n _fail_on_dask_array_input_skipna(values)\n return nanfirst(values, axis)\n return take(values, 0, axis=axis)\n\n\ndef last(values, axis, skipna=None):\n \"\"\"Return the last non-NA elements in this array along the given axis\n \"\"\"\n if (skipna or skipna is None) and values.dtype.kind not in \"iSU\":\n # only bother for dtypes that can hold NaN\n _fail_on_dask_array_input_skipna(values)\n return nanlast(values, axis)\n return take(values, -1, axis=axis)\n\n\ndef rolling_window(array, axis, window, center, fill_value):\n \"\"\"\n Make an ndarray with a rolling window of axis-th dimension.\n The rolling dimension will be placed at the last dimension.\n \"\"\"\n if isinstance(array, dask_array_type):\n return dask_array_ops.rolling_window(array, axis, window, center, fill_value)\n else: # np.ndarray\n return nputils.rolling_window(array, axis, window, center, fill_value)\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":315,"id":2152,"name":"texinfo_documents","nodeType":"Attribute","startLoc":315,"text":"texinfo_documents"},{"attributeType":"null","col":0,"comment":"null","endLoc":341,"id":2153,"name":"intersphinx_mapping","nodeType":"Attribute","startLoc":341,"text":"intersphinx_mapping"},{"attributeType":"null","col":4,"comment":"null","endLoc":434,"id":2154,"name":"_resample_cls","nodeType":"Attribute","startLoc":434,"text":"_resample_cls"},{"col":0,"comment":"","endLoc":16,"header":"conf.py#","id":2155,"name":"","nodeType":"Function","startLoc":16,"text":"allowed_failures = set()\n\nprint(\"python exec:\", sys.executable)\n\nprint(\"sys.path:\", sys.path)\n\nif \"conda\" in sys.executable:\n print(\"conda environment:\")\n subprocess.run([\"conda\", \"list\"])\nelse:\n print(\"pip environment:\")\n subprocess.run([\"pip\", \"list\"])\n\nprint(\"xarray: %s, %s\" % (xarray.__version__, xarray.__file__))\n\nwith suppress(ImportError):\n import matplotlib\n\n matplotlib.use(\"Agg\")\n\ntry:\n import rasterio\nexcept ImportError:\n allowed_failures.update(\n [\"gallery/plot_rasterio_rgb.py\", \"gallery/plot_rasterio.py\"]\n )\n\ntry:\n import cartopy\nexcept ImportError:\n allowed_failures.update(\n [\n \"gallery/plot_cartopy_facetgrid.py\",\n \"gallery/plot_rasterio_rgb.py\",\n \"gallery/plot_rasterio.py\",\n ]\n )\n\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.extlinks\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"numpydoc\",\n \"IPython.sphinxext.ipython_directive\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n \"sphinx_gallery.gen_gallery\",\n]\n\nextlinks = {\n \"issue\": (\"https://github.com/pydata/xarray/issues/%s\", \"GH\"),\n \"pull\": (\"https://github.com/pydata/xarray/pull/%s\", \"PR\"),\n}\n\nsphinx_gallery_conf = {\n \"examples_dirs\": \"gallery\",\n \"gallery_dirs\": \"auto_gallery\",\n \"backreferences_dir\": False,\n \"expected_failing_examples\": list(allowed_failures),\n}\n\nautosummary_generate = True\n\nautodoc_typehints = \"none\"\n\nnapoleon_use_param = True\n\nnapoleon_use_rtype = True\n\nnumpydoc_class_members_toctree = True\n\nnumpydoc_show_class_members = False\n\ntemplates_path = [\"_templates\"]\n\nsource_suffix = \".rst\"\n\nmaster_doc = \"index\"\n\nproject = \"xarray\"\n\ncopyright = \"2014-%s, xarray Developers\" % datetime.datetime.now().year\n\nversion = xarray.__version__.split(\"+\")[0]\n\nrelease = xarray.__version__\n\ntoday_fmt = \"%Y-%m-%d\"\n\nexclude_patterns = [\"_build\"]\n\npygments_style = \"sphinx\"\n\nhtml_theme = \"sphinx_rtd_theme\"\n\nhtml_theme_options = {\"logo_only\": True}\n\nhtml_logo = \"_static/dataset-diagram-logo.png\"\n\nhtml_favicon = \"_static/favicon.ico\"\n\nhtml_static_path = [\"_static\"]\n\nipython_savefig_dir = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"_build\", \"html\", \"_static\"\n)\n\nif not os.path.exists(ipython_savefig_dir):\n os.makedirs(ipython_savefig_dir)\n\nhtml_last_updated_fmt = today_fmt\n\nhtmlhelp_basename = \"xarraydoc\"\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n}\n\nlatex_documents = [\n (\"index\", \"xarray.tex\", \"xarray Documentation\", \"xarray Developers\", \"manual\")\n]\n\nman_pages = [(\"index\", \"xarray\", \"xarray Documentation\", [\"xarray Developers\"], 1)]\n\ntexinfo_documents = [\n (\n \"index\",\n \"xarray\",\n \"xarray Documentation\",\n \"xarray Developers\",\n \"xarray\",\n \"N-D labeled arrays and datasets in Python.\",\n \"Miscellaneous\",\n )\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"pandas\": (\"https://pandas.pydata.org/pandas-docs/stable/\", None),\n \"iris\": (\"http://scitools.org.uk/iris/docs/latest/\", None),\n \"numpy\": (\"https://docs.scipy.org/doc/numpy/\", None),\n \"numba\": (\"https://numba.pydata.org/numba-doc/latest/\", None),\n \"matplotlib\": (\"https://matplotlib.org/\", None),\n}"},{"col":0,"comment":"null","endLoc":63,"header":"def fail_on_dask_array_input(values, msg=None, func_name=None)","id":2156,"name":"fail_on_dask_array_input","nodeType":"Function","startLoc":57,"text":"def fail_on_dask_array_input(values, msg=None, func_name=None):\n if isinstance(values, dask_array_type):\n if msg is None:\n msg = \"%r is not yet a valid method on dask arrays\"\n if func_name is None:\n func_name = inspect.stack()[1][3]\n raise NotImplementedError(msg % func_name)"},{"col":0,"comment":"null","endLoc":116,"header":"def notnull(data)","id":2157,"name":"notnull","nodeType":"Function","startLoc":115,"text":"def notnull(data):\n return ~isnull(data)"},{"fileName":"dataarray_missing.py","filePath":"asv_bench/benchmarks","id":2158,"nodeType":"File","text":"import pandas as pd\n\nimport xarray as xr\n\nfrom . import randn, requires_dask\n\ntry:\n import dask # noqa: F401\nexcept ImportError:\n pass\n\n\ndef make_bench_data(shape, frac_nan, chunks):\n vals = randn(shape, frac_nan)\n coords = {\"time\": pd.date_range(\"2000-01-01\", freq=\"D\", periods=shape[0])}\n da = xr.DataArray(vals, dims=(\"time\", \"x\", \"y\"), coords=coords)\n\n if chunks is not None:\n da = da.chunk(chunks)\n\n return da\n\n\ndef time_interpolate_na(shape, chunks, method, limit):\n if chunks is not None:\n requires_dask()\n da = make_bench_data(shape, 0.1, chunks=chunks)\n actual = da.interpolate_na(dim=\"time\", method=\"linear\", limit=limit)\n\n if chunks is not None:\n actual = actual.compute()\n\n\ntime_interpolate_na.param_names = [\"shape\", \"chunks\", \"method\", \"limit\"]\ntime_interpolate_na.params = (\n [(3650, 200, 400), (100, 25, 25)],\n [None, {\"x\": 25, \"y\": 25}],\n [\"linear\", \"spline\", \"quadratic\", \"cubic\"],\n [None, 3],\n)\n\n\ndef time_ffill(shape, chunks, limit):\n\n da = make_bench_data(shape, 0.1, chunks=chunks)\n actual = da.ffill(dim=\"time\", limit=limit)\n\n if chunks is not None:\n actual = actual.compute()\n\n\ntime_ffill.param_names = [\"shape\", \"chunks\", \"limit\"]\ntime_ffill.params = (\n [(3650, 200, 400), (100, 25, 25)],\n [None, {\"x\": 25, \"y\": 25}],\n [None, 3],\n)\n\n\ndef time_bfill(shape, chunks, limit):\n\n da = make_bench_data(shape, 0.1, chunks=chunks)\n actual = da.bfill(dim=\"time\", limit=limit)\n\n if chunks is not None:\n actual = actual.compute()\n\n\ntime_bfill.param_names = [\"shape\", \"chunks\", \"limit\"]\ntime_bfill.params = (\n [(3650, 200, 400), (100, 25, 25)],\n [None, {\"x\": 25, \"y\": 25}],\n [None, 3],\n)\n"},{"col":4,"comment":"null","endLoc":2517,"header":"@staticmethod\n def _binary_op(\n f: Callable[..., Any],\n reflexive: bool = False,\n join: str = None, # see xarray.align\n **ignored_kwargs\n ) -> Callable[..., \"DataArray\"]","id":2159,"name":"_binary_op","nodeType":"Function","startLoc":2490,"text":"@staticmethod\n def _binary_op(\n f: Callable[..., Any],\n reflexive: bool = False,\n join: str = None, # see xarray.align\n **ignored_kwargs\n ) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, (Dataset, groupby.GroupBy)):\n return NotImplemented\n if isinstance(other, DataArray):\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n self, other = align(self, other, join=align_type, copy=False)\n other_variable = getattr(other, \"variable\", other)\n other_coords = getattr(other, \"coords\", None)\n\n variable = (\n f(self.variable, other_variable)\n if not reflexive\n else f(other_variable, self.variable)\n )\n coords, indexes = self.coords._merge_raw(other_coords)\n name = self._result_name(other)\n\n return self._replace(variable, coords, name)\n\n return func"},{"col":0,"comment":"Like np.allclose, but also allows values to be NaN in both arrays\n ","endLoc":184,"header":"def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8)","id":2160,"name":"allclose_or_equiv","nodeType":"Function","startLoc":177,"text":"def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8):\n \"\"\"Like np.allclose, but also allows values to be NaN in both arrays\n \"\"\"\n arr1 = asarray(arr1)\n arr2 = asarray(arr2)\n if arr1.shape != arr2.shape:\n return False\n return bool(isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True).all())"},{"col":0,"comment":"null","endLoc":21,"header":"def make_bench_data(shape, frac_nan, chunks)","id":2161,"name":"make_bench_data","nodeType":"Function","startLoc":13,"text":"def make_bench_data(shape, frac_nan, chunks):\n vals = randn(shape, frac_nan)\n coords = {\"time\": pd.date_range(\"2000-01-01\", freq=\"D\", periods=shape[0])}\n da = xr.DataArray(vals, dims=(\"time\", \"x\", \"y\"), coords=coords)\n\n if chunks is not None:\n da = da.chunk(chunks)\n\n return da"},{"attributeType":"null","col":4,"comment":"null","endLoc":858,"id":2162,"name":"__default","nodeType":"Attribute","startLoc":858,"text":"__default"},{"attributeType":"null","col":4,"comment":"null","endLoc":1270,"id":2163,"name":"__hash__","nodeType":"Attribute","startLoc":1270,"text":"__hash__"},{"col":4,"comment":"null","endLoc":2538,"header":"@staticmethod\n def _inplace_binary_op(f: Callable) -> Callable[..., \"DataArray\"]","id":2164,"name":"_inplace_binary_op","nodeType":"Function","startLoc":2519,"text":"@staticmethod\n def _inplace_binary_op(f: Callable) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a DataArray and \"\n \"a grouped object are not permitted\"\n )\n # n.b. we can't align other to self (with other.reindex_like(self))\n # because `other` may be converted into floats, which would cause\n # in-place arithmetic to fail unpredictably. Instead, we simply\n # don't support automatic alignment with in-place arithmetic.\n other_coords = getattr(other, \"coords\", None)\n other_variable = getattr(other, \"variable\", other)\n with self.coords._merge_inplace(other_coords):\n f(self.variable, other_variable)\n return self\n\n return func"},{"attributeType":"null","col":0,"comment":"null","endLoc":86,"id":2165,"name":"_EXTENDED_PATTERN","nodeType":"Attribute","startLoc":86,"text":"_EXTENDED_PATTERN"},{"attributeType":"null","col":8,"comment":"null","endLoc":532,"id":2166,"name":"_coord_names","nodeType":"Attribute","startLoc":532,"text":"self._coord_names"},{"attributeType":"null","col":8,"comment":"null","endLoc":529,"id":2167,"name":"_file_obj","nodeType":"Attribute","startLoc":529,"text":"self._file_obj"},{"attributeType":"null","col":8,"comment":"null","endLoc":534,"id":2168,"name":"_indexes","nodeType":"Attribute","startLoc":534,"text":"self._indexes"},{"attributeType":"null","col":8,"comment":"null","endLoc":533,"id":2169,"name":"_dims","nodeType":"Attribute","startLoc":533,"text":"self._dims"},{"col":4,"comment":"null","endLoc":2541,"header":"def _copy_attrs_from(self, other: Union[\"DataArray\", Dataset, Variable]) -> None","id":2170,"name":"_copy_attrs_from","nodeType":"Function","startLoc":2540,"text":"def _copy_attrs_from(self, other: Union[\"DataArray\", Dataset, Variable]) -> None:\n self.attrs = other.attrs"},{"col":4,"comment":"\n Access plotting functions\n\n >>> d = DataArray([[1, 2], [3, 4]])\n\n For convenience just call this directly\n\n >>> d.plot()\n\n Or use it as a namespace to use xarray.plot functions as\n DataArray methods\n\n >>> d.plot.imshow() # equivalent to xarray.plot.imshow(d)\n\n ","endLoc":2560,"header":"@property\n def plot(self) -> _PlotMethods","id":2171,"name":"plot","nodeType":"Function","startLoc":2543,"text":"@property\n def plot(self) -> _PlotMethods:\n \"\"\"\n Access plotting functions\n\n >>> d = DataArray([[1, 2], [3, 4]])\n\n For convenience just call this directly\n\n >>> d.plot()\n\n Or use it as a namespace to use xarray.plot functions as\n DataArray methods\n\n >>> d.plot.imshow() # equivalent to xarray.plot.imshow(d)\n\n \"\"\"\n return _PlotMethods(self)"},{"attributeType":"null","col":0,"comment":"null","endLoc":87,"id":2172,"name":"_PATTERNS","nodeType":"Attribute","startLoc":87,"text":"_PATTERNS"},{"attributeType":"null","col":8,"comment":"null","endLoc":530,"id":2173,"name":"_encoding","nodeType":"Attribute","startLoc":530,"text":"self._encoding"},{"col":0,"comment":"","endLoc":1,"header":"cftimeindex.py#","id":2174,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"DatetimeIndex analog for cftime.datetime objects\"\"\"\n\n_BASIC_PATTERN = build_pattern(date_sep=\"\", time_sep=\"\")\n\n_EXTENDED_PATTERN = build_pattern()\n\n_PATTERNS = [_BASIC_PATTERN, _EXTENDED_PATTERN]"},{"attributeType":"null","col":8,"comment":"null","endLoc":528,"id":2175,"name":"_attrs","nodeType":"Attribute","startLoc":528,"text":"self._attrs"},{"fileName":"ops.py","filePath":"xarray/core","id":2176,"nodeType":"File","text":"\"\"\"Define core operations for xarray objects.\n\nTODO(shoyer): rewrite this module, making use of xarray.core.computation,\nNumPy's __array_ufunc__ and mixin classes instead of the unintuitive \"inject\"\nfunctions.\n\"\"\"\n\nimport operator\n\nimport numpy as np\n\nfrom . import dtypes, duck_array_ops\nfrom .nputils import array_eq, array_ne\n\ntry:\n import bottleneck as bn\n\n has_bottleneck = True\nexcept ImportError:\n # use numpy methods instead\n bn = np\n has_bottleneck = False\n\n\nUNARY_OPS = [\"neg\", \"pos\", \"abs\", \"invert\"]\nCMP_BINARY_OPS = [\"lt\", \"le\", \"ge\", \"gt\"]\nNUM_BINARY_OPS = [\n \"add\",\n \"sub\",\n \"mul\",\n \"truediv\",\n \"floordiv\",\n \"mod\",\n \"pow\",\n \"and\",\n \"xor\",\n \"or\",\n]\n\n# methods which pass on the numpy return value unchanged\n# be careful not to list methods that we would want to wrap later\nNUMPY_SAME_METHODS = [\"item\", \"searchsorted\"]\n# methods which don't modify the data shape, so the result should still be\n# wrapped in an Variable/DataArray\nNUMPY_UNARY_METHODS = [\"astype\", \"argsort\", \"clip\", \"conj\", \"conjugate\"]\nPANDAS_UNARY_FUNCTIONS = [\"isnull\", \"notnull\"]\n# methods which remove an axis\nREDUCE_METHODS = [\"all\", \"any\"]\nNAN_REDUCE_METHODS = [\n \"argmax\",\n \"argmin\",\n \"max\",\n \"min\",\n \"mean\",\n \"prod\",\n \"sum\",\n \"std\",\n \"var\",\n \"median\",\n]\nNAN_CUM_METHODS = [\"cumsum\", \"cumprod\"]\n# TODO: wrap take, dot, sort\n\n\n_CUM_DOCSTRING_TEMPLATE = \"\"\"\\\nApply `{name}` along some dimension of {cls}.\n\nParameters\n----------\n{extra_args}\nskipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).\nkeep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n**kwargs : dict\n Additional keyword arguments passed on to `{name}`.\n\nReturns\n-------\ncumvalue : {cls}\n New {cls} object with `{name}` applied to its data along the\n indicated dimension.\n\"\"\"\n\n_REDUCE_DOCSTRING_TEMPLATE = \"\"\"\\\nReduce this {cls}'s data by applying `{name}` along some dimension(s).\n\nParameters\n----------\n{extra_args}\nskipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).{min_count_docs}\nkeep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n**kwargs : dict\n Additional keyword arguments passed on to the appropriate array\n function for calculating `{name}` on this object's data.\n\nReturns\n-------\nreduced : {cls}\n New {cls} object with `{name}` applied to its data and the\n indicated dimension(s) removed.\n\"\"\"\n\n_MINCOUNT_DOCSTRING = \"\"\"\nmin_count : int, default None\n The required number of valid values to perform the operation.\n If fewer than min_count non-NA values are present the result will\n be NA. New in version 0.10.8: Added with the default being None.\"\"\"\n\n_COARSEN_REDUCE_DOCSTRING_TEMPLATE = \"\"\"\\\nCoarsen this object by applying `{name}` along its dimensions.\n\nParameters\n----------\n**kwargs : dict\n Additional keyword arguments passed on to `{name}`.\n\nReturns\n-------\nreduced : DataArray or Dataset\n New object with `{name}` applied along its coasen dimnensions.\n\"\"\"\n\n\ndef fillna(data, other, join=\"left\", dataset_join=\"left\"):\n \"\"\"Fill missing values in this object with data from the other object.\n Follows normal broadcasting and alignment rules.\n\n Parameters\n ----------\n join : {'outer', 'inner', 'left', 'right'}, optional\n Method for joining the indexes of the passed objects along each\n dimension\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': raise `ValueError` instead of aligning when indexes to be\n aligned are not equal\n dataset_join : {'outer', 'inner', 'left', 'right'}, optional\n Method for joining variables of Dataset objects with mismatched\n data variables.\n - 'outer': take variables from both Dataset objects\n - 'inner': take only overlapped variables\n - 'left': take only variables from the first object\n - 'right': take only variables from the last object\n \"\"\"\n from .computation import apply_ufunc\n\n return apply_ufunc(\n duck_array_ops.fillna,\n data,\n other,\n join=join,\n dask=\"allowed\",\n dataset_join=dataset_join,\n dataset_fill_value=np.nan,\n keep_attrs=True,\n )\n\n\ndef where_method(self, cond, other=dtypes.NA):\n \"\"\"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this objects values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n Same type as caller.\n \"\"\"\n from .computation import apply_ufunc\n\n # alignment for three arguments is complicated, so don't support it yet\n join = \"inner\" if other is dtypes.NA else \"exact\"\n return apply_ufunc(\n duck_array_ops.where_method,\n self,\n cond,\n other,\n join=join,\n dataset_join=join,\n dask=\"allowed\",\n keep_attrs=True,\n )\n\n\ndef _call_possibly_missing_method(arg, name, args, kwargs):\n try:\n method = getattr(arg, name)\n except AttributeError:\n duck_array_ops.fail_on_dask_array_input(arg, func_name=name)\n if hasattr(arg, \"data\"):\n duck_array_ops.fail_on_dask_array_input(arg.data, func_name=name)\n raise\n else:\n return method(*args, **kwargs)\n\n\ndef _values_method_wrapper(name):\n def func(self, *args, **kwargs):\n return _call_possibly_missing_method(self.data, name, args, kwargs)\n\n func.__name__ = name\n func.__doc__ = getattr(np.ndarray, name).__doc__\n return func\n\n\ndef _method_wrapper(name):\n def func(self, *args, **kwargs):\n return _call_possibly_missing_method(self, name, args, kwargs)\n\n func.__name__ = name\n func.__doc__ = getattr(np.ndarray, name).__doc__\n return func\n\n\ndef _func_slash_method_wrapper(f, name=None):\n # try to wrap a method, but if not found use the function\n # this is useful when patching in a function as both a DataArray and\n # Dataset method\n if name is None:\n name = f.__name__\n\n def func(self, *args, **kwargs):\n try:\n return getattr(self, name)(*args, **kwargs)\n except AttributeError:\n return f(self, *args, **kwargs)\n\n func.__name__ = name\n func.__doc__ = f.__doc__\n return func\n\n\ndef inject_reduce_methods(cls):\n methods = (\n [\n (name, getattr(duck_array_ops, \"array_%s\" % name), False)\n for name in REDUCE_METHODS\n ]\n + [(name, getattr(duck_array_ops, name), True) for name in NAN_REDUCE_METHODS]\n + [(\"count\", duck_array_ops.count, False)]\n )\n for name, f, include_skipna in methods:\n numeric_only = getattr(f, \"numeric_only\", False)\n available_min_count = getattr(f, \"available_min_count\", False)\n min_count_docs = _MINCOUNT_DOCSTRING if available_min_count else \"\"\n\n func = cls._reduce_method(f, include_skipna, numeric_only)\n func.__name__ = name\n func.__doc__ = _REDUCE_DOCSTRING_TEMPLATE.format(\n name=name,\n cls=cls.__name__,\n extra_args=cls._reduce_extra_args_docstring.format(name=name),\n min_count_docs=min_count_docs,\n )\n setattr(cls, name, func)\n\n\ndef inject_cum_methods(cls):\n methods = [(name, getattr(duck_array_ops, name), True) for name in NAN_CUM_METHODS]\n for name, f, include_skipna in methods:\n numeric_only = getattr(f, \"numeric_only\", False)\n func = cls._reduce_method(f, include_skipna, numeric_only)\n func.__name__ = name\n func.__doc__ = _CUM_DOCSTRING_TEMPLATE.format(\n name=name,\n cls=cls.__name__,\n extra_args=cls._cum_extra_args_docstring.format(name=name),\n )\n setattr(cls, name, func)\n\n\ndef op_str(name):\n return \"__%s__\" % name\n\n\ndef get_op(name):\n return getattr(operator, op_str(name))\n\n\nNON_INPLACE_OP = {get_op(\"i\" + name): get_op(name) for name in NUM_BINARY_OPS}\n\n\ndef inplace_to_noninplace_op(f):\n return NON_INPLACE_OP[f]\n\n\ndef inject_binary_ops(cls, inplace=False):\n for name in CMP_BINARY_OPS + NUM_BINARY_OPS:\n setattr(cls, op_str(name), cls._binary_op(get_op(name)))\n\n for name, f in [(\"eq\", array_eq), (\"ne\", array_ne)]:\n setattr(cls, op_str(name), cls._binary_op(f))\n\n for name in NUM_BINARY_OPS:\n # only numeric operations have in-place and reflexive variants\n setattr(cls, op_str(\"r\" + name), cls._binary_op(get_op(name), reflexive=True))\n if inplace:\n setattr(cls, op_str(\"i\" + name), cls._inplace_binary_op(get_op(\"i\" + name)))\n\n\ndef inject_all_ops_and_reduce_methods(cls, priority=50, array_only=True):\n # prioritize our operations over those of numpy.ndarray (priority=1)\n # and numpy.matrix (priority=10)\n cls.__array_priority__ = priority\n\n # patch in standard special operations\n for name in UNARY_OPS:\n setattr(cls, op_str(name), cls._unary_op(get_op(name)))\n inject_binary_ops(cls, inplace=True)\n\n # patch in numpy/pandas methods\n for name in NUMPY_UNARY_METHODS:\n setattr(cls, name, cls._unary_op(_method_wrapper(name)))\n\n for name in PANDAS_UNARY_FUNCTIONS:\n f = _func_slash_method_wrapper(getattr(duck_array_ops, name), name=name)\n setattr(cls, name, cls._unary_op(f))\n\n f = _func_slash_method_wrapper(duck_array_ops.around, name=\"round\")\n setattr(cls, \"round\", cls._unary_op(f))\n\n if array_only:\n # these methods don't return arrays of the same shape as the input, so\n # don't try to patch these in for Dataset objects\n for name in NUMPY_SAME_METHODS:\n setattr(cls, name, _values_method_wrapper(name))\n\n inject_reduce_methods(cls)\n inject_cum_methods(cls)\n\n\ndef inject_coarsen_methods(cls):\n # standard numpy reduce methods\n methods = [(name, getattr(duck_array_ops, name)) for name in NAN_REDUCE_METHODS]\n for name, f in methods:\n func = cls._reduce_method(f)\n func.__name__ = name\n func.__doc__ = _COARSEN_REDUCE_DOCSTRING_TEMPLATE.format(name=func.__name__)\n setattr(cls, name, func)\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":527,"id":2177,"name":"_accessors","nodeType":"Attribute","startLoc":527,"text":"self._accessors"},{"col":4,"comment":"null","endLoc":462,"header":"def __init__(self, darray)","id":2178,"name":"__init__","nodeType":"Function","startLoc":461,"text":"def __init__(self, darray):\n self._da = darray"},{"col":4,"comment":"\n If the dataarray has 1 dimensional coordinates or comes from a slice\n we can show that info in the title\n\n Parameters\n ----------\n truncate : integer\n maximum number of characters for title\n\n Returns\n -------\n title : string\n Can be used for plot titles\n\n ","endLoc":2589,"header":"def _title_for_slice(self, truncate: int = 50) -> str","id":2179,"name":"_title_for_slice","nodeType":"Function","startLoc":2562,"text":"def _title_for_slice(self, truncate: int = 50) -> str:\n \"\"\"\n If the dataarray has 1 dimensional coordinates or comes from a slice\n we can show that info in the title\n\n Parameters\n ----------\n truncate : integer\n maximum number of characters for title\n\n Returns\n -------\n title : string\n Can be used for plot titles\n\n \"\"\"\n one_dims = []\n for dim, coord in self.coords.items():\n if coord.size == 1:\n one_dims.append(\n \"{dim} = {v}\".format(dim=dim, v=format_item(coord.values))\n )\n\n title = \", \".join(one_dims)\n if len(title) > truncate:\n title = title[: (truncate - 3)] + \"...\"\n\n return title"},{"col":0,"comment":"null","endLoc":214,"header":"def _call_possibly_missing_method(arg, name, args, kwargs)","id":2180,"name":"_call_possibly_missing_method","nodeType":"Function","startLoc":205,"text":"def _call_possibly_missing_method(arg, name, args, kwargs):\n try:\n method = getattr(arg, name)\n except AttributeError:\n duck_array_ops.fail_on_dask_array_input(arg, func_name=name)\n if hasattr(arg, \"data\"):\n duck_array_ops.fail_on_dask_array_input(arg.data, func_name=name)\n raise\n else:\n return method(*args, **kwargs)"},{"attributeType":"null","col":8,"comment":"null","endLoc":531,"id":2181,"name":"_variables","nodeType":"Attribute","startLoc":531,"text":"self._variables"},{"attributeType":"null","col":8,"comment":"null","endLoc":4675,"id":2182,"name":"attrs","nodeType":"Attribute","startLoc":4675,"text":"self.attrs"},{"className":"_UFuncDispatcher","col":0,"comment":"Wrapper for dispatching ufuncs.","endLoc":78,"id":2183,"nodeType":"Class","startLoc":38,"text":"class _UFuncDispatcher:\n \"\"\"Wrapper for dispatching ufuncs.\"\"\"\n\n def __init__(self, name):\n self._name = name\n\n def __call__(self, *args, **kwargs):\n if self._name not in [\"angle\", \"iscomplex\"]:\n _warnings.warn(\n \"xarray.ufuncs will be deprecated when xarray no longer \"\n \"supports versions of numpy older than v1.17. Instead, use \"\n \"numpy ufuncs directly.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n\n new_args = args\n f = _dask_or_eager_func(self._name, array_args=slice(len(args)))\n if len(args) > 2 or len(args) == 0:\n raise TypeError(\n \"cannot handle %s arguments for %r\" % (len(args), self._name)\n )\n elif len(args) == 1:\n if isinstance(args[0], _xarray_types):\n f = args[0]._unary_op(self)\n else: # len(args) = 2\n p1, p2 = map(_dispatch_priority, args)\n if p1 >= p2:\n if isinstance(args[0], _xarray_types):\n f = args[0]._binary_op(self)\n else:\n if isinstance(args[1], _xarray_types):\n f = args[1]._binary_op(self, reflexive=True)\n new_args = tuple(reversed(args))\n res = f(*new_args, **kwargs)\n if res is NotImplemented:\n raise TypeError(\n \"%r not implemented for types (%r, %r)\"\n % (self._name, type(args[0]), type(args[1]))\n )\n return res"},{"col":4,"comment":"null","endLoc":42,"header":"def __init__(self, name)","id":2184,"name":"__init__","nodeType":"Function","startLoc":41,"text":"def __init__(self, name):\n self._name = name"},{"col":4,"comment":"null","endLoc":78,"header":"def __call__(self, *args, **kwargs)","id":2185,"name":"__call__","nodeType":"Function","startLoc":44,"text":"def __call__(self, *args, **kwargs):\n if self._name not in [\"angle\", \"iscomplex\"]:\n _warnings.warn(\n \"xarray.ufuncs will be deprecated when xarray no longer \"\n \"supports versions of numpy older than v1.17. Instead, use \"\n \"numpy ufuncs directly.\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n\n new_args = args\n f = _dask_or_eager_func(self._name, array_args=slice(len(args)))\n if len(args) > 2 or len(args) == 0:\n raise TypeError(\n \"cannot handle %s arguments for %r\" % (len(args), self._name)\n )\n elif len(args) == 1:\n if isinstance(args[0], _xarray_types):\n f = args[0]._unary_op(self)\n else: # len(args) = 2\n p1, p2 = map(_dispatch_priority, args)\n if p1 >= p2:\n if isinstance(args[0], _xarray_types):\n f = args[0]._binary_op(self)\n else:\n if isinstance(args[1], _xarray_types):\n f = args[1]._binary_op(self, reflexive=True)\n new_args = tuple(reversed(args))\n res = f(*new_args, **kwargs)\n if res is NotImplemented:\n raise TypeError(\n \"%r not implemented for types (%r, %r)\"\n % (self._name, type(args[0]), type(args[1]))\n )\n return res"},{"fileName":"alignment.py","filePath":"xarray/core","id":2187,"nodeType":"File","text":"import functools\nimport operator\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom typing import TYPE_CHECKING, Any, Dict, Hashable, Mapping, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dtypes, utils\nfrom .indexing import get_indexer_nd\nfrom .utils import is_dict_like, is_full_slice\nfrom .variable import IndexVariable, Variable\n\nif TYPE_CHECKING:\n from .dataarray import DataArray\n from .dataset import Dataset\n\n\ndef _get_joiner(join):\n if join == \"outer\":\n return functools.partial(functools.reduce, operator.or_)\n elif join == \"inner\":\n return functools.partial(functools.reduce, operator.and_)\n elif join == \"left\":\n return operator.itemgetter(0)\n elif join == \"right\":\n return operator.itemgetter(-1)\n elif join == \"exact\":\n # We cannot return a function to \"align\" in this case, because it needs\n # access to the dimension name to give a good error message.\n return None\n elif join == \"override\":\n # We rewrite all indexes and then use join='left'\n return operator.itemgetter(0)\n else:\n raise ValueError(\"invalid value for join: %s\" % join)\n\n\ndef _override_indexes(objects, all_indexes, exclude):\n for dim, dim_indexes in all_indexes.items():\n if dim not in exclude:\n lengths = {index.size for index in dim_indexes}\n if len(lengths) != 1:\n raise ValueError(\n \"Indexes along dimension %r don't have the same length.\"\n \" Cannot use join='override'.\" % dim\n )\n\n objects = list(objects)\n for idx, obj in enumerate(objects[1:]):\n new_indexes = {}\n for dim in obj.dims:\n if dim not in exclude:\n new_indexes[dim] = all_indexes[dim][0]\n objects[idx + 1] = obj._overwrite_indexes(new_indexes)\n\n return objects\n\n\ndef align(\n *objects,\n join=\"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n fill_value=dtypes.NA\n):\n \"\"\"\n Given any number of Dataset and/or DataArray objects, returns new\n objects with aligned indexes and dimension sizes.\n\n Array from the aligned objects are suitable as input to mathematical\n operators, because along each dimension they have the same index and size.\n\n Missing values (if ``join != 'inner'``) are filled with ``fill_value``.\n The default fill value is NaN.\n\n Parameters\n ----------\n *objects : Dataset or DataArray\n Objects to align.\n join : {'outer', 'inner', 'left', 'right', 'exact', 'override'}, optional\n Method for joining the indexes of the passed objects along each\n dimension:\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n copy : bool, optional\n If ``copy=True``, data in the return values is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed with\n only slice operations, then the output may share memory with the input.\n In either case, new xarray objects are always returned.\n indexes : dict-like, optional\n Any indexes explicitly provided with the `indexes` argument should be\n used in preference to the aligned indexes.\n exclude : sequence of str, optional\n Dimensions that must be excluded from alignment\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n aligned : same as *objects\n Tuple of objects with aligned coordinates.\n\n Raises\n ------\n ValueError\n If any dimensions without labels on the arguments have different sizes,\n or a different size than the size of the aligned dimension labels.\n\n Examples\n --------\n\n >>> import xarray as xr\n >>> x = xr.DataArray([[25, 35], [10, 24]], dims=('lat', 'lon'),\n ... coords={'lat': [35., 40.], 'lon': [100., 120.]})\n >>> y = xr.DataArray([[20, 5], [7, 13]], dims=('lat', 'lon'),\n ... coords={'lat': [35., 42.], 'lon': [100., 120.]})\n\n >>> x\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> y\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y)\n >>> a\n \n array([[25, 35]])\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5]])\n Coordinates:\n * lat (lat) float64 35.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='outer')\n >>> a\n \n array([[25., 35.],\n [10., 24.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20., 5.],\n [nan, nan],\n [ 7., 13.]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='outer', fill_value=-999)\n >>> a\n \n array([[ 25, 35],\n [ 10, 24],\n [-999, -999]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[ 20, 5],\n [-999, -999],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 40.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='left')\n >>> a\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20., 5.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='right')\n >>> a\n \n array([[25., 35.],\n [nan, nan]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 42.0\n * lon (lon) float64 100.0 120.0\n\n >>> a, b = xr.align(x, y, join='exact')\n Traceback (most recent call last):\n ...\n \"indexes along dimension {!r} are not equal\".format(dim)\n ValueError: indexes along dimension 'lat' are not equal\n\n >>> a, b = xr.align(x, y, join='override')\n >>> a\n \n array([[25, 35],\n [10, 24]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n >>> b\n \n array([[20, 5],\n [ 7, 13]])\n Coordinates:\n * lat (lat) float64 35.0 40.0\n * lon (lon) float64 100.0 120.0\n\n \"\"\"\n if indexes is None:\n indexes = {}\n\n if not indexes and len(objects) == 1:\n # fast path for the trivial case\n obj, = objects\n return (obj.copy(deep=copy),)\n\n all_indexes = defaultdict(list)\n unlabeled_dim_sizes = defaultdict(set)\n for obj in objects:\n for dim in obj.dims:\n if dim not in exclude:\n try:\n index = obj.indexes[dim]\n except KeyError:\n unlabeled_dim_sizes[dim].add(obj.sizes[dim])\n else:\n all_indexes[dim].append(index)\n\n if join == \"override\":\n objects = _override_indexes(objects, all_indexes, exclude)\n\n # We don't reindex over dimensions with all equal indexes for two reasons:\n # - It's faster for the usual case (already aligned objects).\n # - It ensures it's possible to do operations that don't require alignment\n # on indexes with duplicate values (which cannot be reindexed with\n # pandas). This is useful, e.g., for overwriting such duplicate indexes.\n joiner = _get_joiner(join)\n joined_indexes = {}\n for dim, matching_indexes in all_indexes.items():\n if dim in indexes:\n index = utils.safe_cast_to_index(indexes[dim])\n if (\n any(not index.equals(other) for other in matching_indexes)\n or dim in unlabeled_dim_sizes\n ):\n joined_indexes[dim] = index\n else:\n if (\n any(\n not matching_indexes[0].equals(other)\n for other in matching_indexes[1:]\n )\n or dim in unlabeled_dim_sizes\n ):\n if join == \"exact\":\n raise ValueError(\n \"indexes along dimension {!r} are not equal\".format(dim)\n )\n index = joiner(matching_indexes)\n joined_indexes[dim] = index\n else:\n index = matching_indexes[0]\n\n if dim in unlabeled_dim_sizes:\n unlabeled_sizes = unlabeled_dim_sizes[dim]\n labeled_size = index.size\n if len(unlabeled_sizes | {labeled_size}) > 1:\n raise ValueError(\n \"arguments without labels along dimension %r cannot be \"\n \"aligned because they have different dimension size(s) %r \"\n \"than the size of the aligned dimension labels: %r\"\n % (dim, unlabeled_sizes, labeled_size)\n )\n\n for dim in unlabeled_dim_sizes:\n if dim not in all_indexes:\n sizes = unlabeled_dim_sizes[dim]\n if len(sizes) > 1:\n raise ValueError(\n \"arguments without labels along dimension %r cannot be \"\n \"aligned because they have different dimension sizes: %r\"\n % (dim, sizes)\n )\n\n result = []\n for obj in objects:\n valid_indexers = {k: v for k, v in joined_indexes.items() if k in obj.dims}\n if not valid_indexers:\n # fast path for no reindexing necessary\n new_obj = obj.copy(deep=copy)\n else:\n new_obj = obj.reindex(copy=copy, fill_value=fill_value, **valid_indexers)\n new_obj.encoding = obj.encoding\n result.append(new_obj)\n\n return tuple(result)\n\n\ndef deep_align(\n objects,\n join=\"inner\",\n copy=True,\n indexes=None,\n exclude=frozenset(),\n raise_on_invalid=True,\n fill_value=dtypes.NA,\n):\n \"\"\"Align objects for merging, recursing into dictionary values.\n\n This function is not public API.\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if indexes is None:\n indexes = {}\n\n def is_alignable(obj):\n return isinstance(obj, (DataArray, Dataset))\n\n positions = []\n keys = []\n out = []\n targets = []\n no_key = object()\n not_replaced = object()\n for position, variables in enumerate(objects):\n if is_alignable(variables):\n positions.append(position)\n keys.append(no_key)\n targets.append(variables)\n out.append(not_replaced)\n elif is_dict_like(variables):\n current_out = {}\n for k, v in variables.items():\n if is_alignable(v) and k not in indexes:\n # Skip variables in indexes for alignment, because these\n # should to be overwritten instead:\n # https://github.com/pydata/xarray/issues/725\n # https://github.com/pydata/xarray/issues/3377\n # TODO(shoyer): doing this here feels super-hacky -- can we\n # move it explicitly into merge instead?\n positions.append(position)\n keys.append(k)\n targets.append(v)\n current_out[k] = not_replaced\n else:\n current_out[k] = v\n out.append(current_out)\n elif raise_on_invalid:\n raise ValueError(\n \"object to align is neither an xarray.Dataset, \"\n \"an xarray.DataArray nor a dictionary: {!r}\".format(variables)\n )\n else:\n out.append(variables)\n\n aligned = align(\n *targets,\n join=join,\n copy=copy,\n indexes=indexes,\n exclude=exclude,\n fill_value=fill_value\n )\n\n for position, key, aligned_obj in zip(positions, keys, aligned):\n if key is no_key:\n out[position] = aligned_obj\n else:\n out[position][key] = aligned_obj\n\n # something went wrong: we should have replaced all sentinel values\n for arg in out:\n assert arg is not not_replaced\n if is_dict_like(arg):\n assert all(value is not not_replaced for value in arg.values())\n\n return out\n\n\ndef reindex_like_indexers(\n target: \"Union[DataArray, Dataset]\", other: \"Union[DataArray, Dataset]\"\n) -> Dict[Hashable, pd.Index]:\n \"\"\"Extract indexers to align target with other.\n\n Not public API.\n\n Parameters\n ----------\n target : Dataset or DataArray\n Object to be aligned.\n other : Dataset or DataArray\n Object to be aligned with.\n\n Returns\n -------\n Dict[Hashable, pandas.Index] providing indexes for reindex keyword\n arguments.\n\n Raises\n ------\n ValueError\n If any dimensions without labels have different sizes.\n \"\"\"\n indexers = {k: v for k, v in other.indexes.items() if k in target.dims}\n\n for dim in other.dims:\n if dim not in indexers and dim in target.dims:\n other_size = other.sizes[dim]\n target_size = target.sizes[dim]\n if other_size != target_size:\n raise ValueError(\n \"different size for unlabeled \"\n \"dimension on argument %r: %r vs %r\"\n % (dim, other_size, target_size)\n )\n return indexers\n\n\ndef reindex_variables(\n variables: Mapping[Any, Variable],\n sizes: Mapping[Any, int],\n indexes: Mapping[Any, pd.Index],\n indexers: Mapping,\n method: Optional[str] = None,\n tolerance: Any = None,\n copy: bool = True,\n fill_value: Optional[Any] = dtypes.NA,\n) -> Tuple[Dict[Hashable, Variable], Dict[Hashable, pd.Index]]:\n \"\"\"Conform a dictionary of aligned variables onto a new set of variables,\n filling in missing values with NaN.\n\n Not public API.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of xarray.Variable objects.\n sizes : dict-like\n Dictionary from dimension names to integer sizes.\n indexes : dict-like\n Dictionary of indexes associated with variables.\n indexers : dict\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate values\n will be filled in with NaN, and any mis-matched dimension names will\n simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact matches.\n The values of the index at the matching locations must satisfy the\n equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return values is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, new xarray objects are always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : dict\n Dict of reindexed variables.\n new_indexes : dict\n Dict of indexes associated with the reindexed variables.\n \"\"\"\n from .dataarray import DataArray\n\n # create variables for the new dataset\n reindexed: Dict[Hashable, Variable] = {}\n\n # build up indexers for assignment along each dimension\n int_indexers = {}\n new_indexes = dict(indexes)\n masked_dims = set()\n unchanged_dims = set()\n\n for dim, indexer in indexers.items():\n if isinstance(indexer, DataArray) and indexer.dims != (dim,):\n raise ValueError(\n \"Indexer has dimensions {:s} that are different \"\n \"from that to be indexed along {:s}\".format(str(indexer.dims), dim)\n )\n\n target = new_indexes[dim] = utils.safe_cast_to_index(indexers[dim])\n\n if dim in indexes:\n index = indexes[dim]\n\n if not index.is_unique:\n raise ValueError(\n \"cannot reindex or align along dimension %r because the \"\n \"index has duplicate values\" % dim\n )\n\n int_indexer = get_indexer_nd(index, target, method, tolerance)\n\n # We uses negative values from get_indexer_nd to signify\n # values that are missing in the index.\n if (int_indexer < 0).any():\n masked_dims.add(dim)\n elif np.array_equal(int_indexer, np.arange(len(index))):\n unchanged_dims.add(dim)\n\n int_indexers[dim] = int_indexer\n\n if dim in variables:\n var = variables[dim]\n args: tuple = (var.attrs, var.encoding)\n else:\n args = ()\n reindexed[dim] = IndexVariable((dim,), target, *args)\n\n for dim in sizes:\n if dim not in indexes and dim in indexers:\n existing_size = sizes[dim]\n new_size = indexers[dim].size\n if existing_size != new_size:\n raise ValueError(\n \"cannot reindex or align along dimension %r without an \"\n \"index because its size %r is different from the size of \"\n \"the new index %r\" % (dim, existing_size, new_size)\n )\n\n for name, var in variables.items():\n if name not in indexers:\n key = tuple(\n slice(None) if d in unchanged_dims else int_indexers.get(d, slice(None))\n for d in var.dims\n )\n needs_masking = any(d in masked_dims for d in var.dims)\n\n if needs_masking:\n new_var = var._getitem_with_mask(key, fill_value=fill_value)\n elif all(is_full_slice(k) for k in key):\n # no reindexing necessary\n # here we need to manually deal with copying data, since\n # we neither created a new ndarray nor used fancy indexing\n new_var = var.copy(deep=copy)\n else:\n new_var = var[key]\n\n reindexed[name] = new_var\n\n return reindexed, new_indexes\n\n\ndef _get_broadcast_dims_map_common_coords(args, exclude):\n\n common_coords = {}\n dims_map = {}\n for arg in args:\n for dim in arg.dims:\n if dim not in common_coords and dim not in exclude:\n dims_map[dim] = arg.sizes[dim]\n if dim in arg.coords:\n common_coords[dim] = arg.coords[dim].variable\n\n return dims_map, common_coords\n\n\ndef _broadcast_helper(arg, exclude, dims_map, common_coords):\n\n from .dataarray import DataArray\n from .dataset import Dataset\n\n def _set_dims(var):\n # Add excluded dims to a copy of dims_map\n var_dims_map = dims_map.copy()\n for dim in exclude:\n with suppress(ValueError):\n # ignore dim not in var.dims\n var_dims_map[dim] = var.shape[var.dims.index(dim)]\n\n return var.set_dims(var_dims_map)\n\n def _broadcast_array(array):\n data = _set_dims(array.variable)\n coords = dict(array.coords)\n coords.update(common_coords)\n return DataArray(data, coords, data.dims, name=array.name, attrs=array.attrs)\n\n def _broadcast_dataset(ds):\n data_vars = {k: _set_dims(ds.variables[k]) for k in ds.data_vars}\n coords = dict(ds.coords)\n coords.update(common_coords)\n return Dataset(data_vars, coords, ds.attrs)\n\n if isinstance(arg, DataArray):\n return _broadcast_array(arg)\n elif isinstance(arg, Dataset):\n return _broadcast_dataset(arg)\n else:\n raise ValueError(\"all input must be Dataset or DataArray objects\")\n\n\ndef broadcast(*args, exclude=None):\n \"\"\"Explicitly broadcast any number of DataArray or Dataset objects against\n one another.\n\n xarray objects automatically broadcast against each other in arithmetic\n operations, so this function should not be necessary for normal use.\n\n If no change is needed, the input data is returned to the output without\n being copied.\n\n Parameters\n ----------\n *args : DataArray or Dataset objects\n Arrays to broadcast against each other.\n exclude : sequence of str, optional\n Dimensions that must not be broadcasted\n\n Returns\n -------\n broadcast : tuple of xarray objects\n The same data as the input arrays, but with additional dimensions\n inserted so that all data arrays have the same dimensions and shape.\n\n Examples\n --------\n\n Broadcast two data arrays against one another to fill out their dimensions:\n\n >>> a = xr.DataArray([1, 2, 3], dims='x')\n >>> b = xr.DataArray([5, 6], dims='y')\n >>> a\n \n array([1, 2, 3])\n Coordinates:\n * x (x) int64 0 1 2\n >>> b\n \n array([5, 6])\n Coordinates:\n * y (y) int64 0 1\n >>> a2, b2 = xr.broadcast(a, b)\n >>> a2\n \n array([[1, 1],\n [2, 2],\n [3, 3]])\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1\n >>> b2\n \n array([[5, 6],\n [5, 6],\n [5, 6]])\n Coordinates:\n * y (y) int64 0 1\n * x (x) int64 0 1 2\n\n Fill out the dimensions of all data variables in a dataset:\n\n >>> ds = xr.Dataset({'a': a, 'b': b})\n >>> ds2, = xr.broadcast(ds) # use tuple unpacking to extract one dataset\n >>> ds2\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * x (x) int64 0 1 2\n * y (y) int64 0 1\n Data variables:\n a (x, y) int64 1 1 2 2 3 3\n b (x, y) int64 5 6 5 6 5 6\n \"\"\"\n\n if exclude is None:\n exclude = set()\n args = align(*args, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n result = []\n for arg in args:\n result.append(_broadcast_helper(arg, exclude, dims_map, common_coords))\n\n return tuple(result)\n"},{"col":0,"comment":"Like np.array_equal, but also allows values to be NaN in either or both\n arrays\n ","endLoc":211,"header":"def array_notnull_equiv(arr1, arr2)","id":2189,"name":"array_notnull_equiv","nodeType":"Function","startLoc":200,"text":"def array_notnull_equiv(arr1, arr2):\n \"\"\"Like np.array_equal, but also allows values to be NaN in either or both\n arrays\n \"\"\"\n arr1 = asarray(arr1)\n arr2 = asarray(arr2)\n if arr1.shape != arr2.shape:\n return False\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"In the future, 'NAT == x'\")\n flag_array = (arr1 == arr2) | isnull(arr1) | isnull(arr2)\n return bool(flag_array.all())"},{"col":0,"comment":"","endLoc":1,"header":"alignment.py#","id":2190,"name":"","nodeType":"Function","startLoc":1,"text":"if TYPE_CHECKING:\n from .dataarray import DataArray\n from .dataset import Dataset"},{"fileName":"accessor_str.py","filePath":"xarray/core","id":2191,"nodeType":"File","text":"# The StringAccessor class defined below is an adaptation of the\n# pandas string methods source code (see pd.core.strings)\n\n# For reference, here is a copy of the pandas copyright notice:\n\n# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team\n# All rights reserved.\n\n# Copyright (c) 2008-2011 AQR Capital Management, LLC\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n\n# * Neither the name of the copyright holder nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport codecs\nimport re\nimport textwrap\n\nimport numpy as np\n\nfrom .computation import apply_ufunc\n\n_cpython_optimized_encoders = (\n \"utf-8\",\n \"utf8\",\n \"latin-1\",\n \"latin1\",\n \"iso-8859-1\",\n \"mbcs\",\n \"ascii\",\n)\n_cpython_optimized_decoders = _cpython_optimized_encoders + (\"utf-16\", \"utf-32\")\n\n\ndef _is_str_like(x):\n return isinstance(x, str) or isinstance(x, bytes)\n\n\nclass StringAccessor:\n \"\"\"Vectorized string functions for string-like arrays.\n\n Similar to pandas, fields can be accessed through the `.str` attribute\n for applicable DataArrays.\n\n >>> da = xr.DataArray(['some', 'text', 'in', 'an', 'array'])\n >>> ds.str.len()\n \n array([4, 4, 2, 2, 5])\n Dimensions without coordinates: dim_0\n\n \"\"\"\n\n __slots__ = (\"_obj\",)\n\n def __init__(self, obj):\n self._obj = obj\n\n def _apply(self, f, dtype=None):\n # TODO handling of na values ?\n if dtype is None:\n dtype = self._obj.dtype\n\n g = np.vectorize(f, otypes=[dtype])\n return apply_ufunc(g, self._obj, dask=\"parallelized\", output_dtypes=[dtype])\n\n def len(self):\n \"\"\"\n Compute the length of each element in the array.\n\n Returns\n -------\n lengths array : array of int\n \"\"\"\n return self._apply(len, dtype=int)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n return self.slice(start=key.start, stop=key.stop, step=key.step)\n else:\n return self.get(key)\n\n def get(self, i):\n \"\"\"\n Extract element from indexable in each element in the array.\n\n Parameters\n ----------\n i : int\n Position of element to extract.\n default : optional\n Value for out-of-range index. If not specified (None) defaults to\n an empty string.\n\n Returns\n -------\n items : array of objects\n \"\"\"\n obj = slice(-1, None) if i == -1 else slice(i, i + 1)\n return self._apply(lambda x: x[obj])\n\n def slice(self, start=None, stop=None, step=None):\n \"\"\"\n Slice substrings from each element in the array.\n\n Parameters\n ----------\n start : int, optional\n Start position for slice operation.\n stop : int, optional\n Stop position for slice operation.\n step : int, optional\n Step size for slice operation.\n\n Returns\n -------\n sliced strings : same type as values\n \"\"\"\n s = slice(start, stop, step)\n f = lambda x: x[s]\n return self._apply(f)\n\n def slice_replace(self, start=None, stop=None, repl=\"\"):\n \"\"\"\n Replace a positional slice of a string with another value.\n\n Parameters\n ----------\n start : int, optional\n Left index position to use for the slice. If not specified (None),\n the slice is unbounded on the left, i.e. slice from the start\n of the string.\n stop : int, optional\n Right index position to use for the slice. If not specified (None),\n the slice is unbounded on the right, i.e. slice until the\n end of the string.\n repl : str, optional\n String for replacement. If not specified, the sliced region\n is replaced with an empty string.\n\n Returns\n -------\n replaced : same type as values\n \"\"\"\n repl = self._obj.dtype.type(repl)\n\n def f(x):\n if len(x[start:stop]) == 0:\n local_stop = start\n else:\n local_stop = stop\n y = self._obj.dtype.type(\"\")\n if start is not None:\n y += x[:start]\n y += repl\n if stop is not None:\n y += x[local_stop:]\n return y\n\n return self._apply(f)\n\n def capitalize(self):\n \"\"\"\n Convert strings in the array to be capitalized.\n\n Returns\n -------\n capitalized : same type as values\n \"\"\"\n return self._apply(lambda x: x.capitalize())\n\n def lower(self):\n \"\"\"\n Convert strings in the array to lowercase.\n\n Returns\n -------\n lowerd : same type as values\n \"\"\"\n return self._apply(lambda x: x.lower())\n\n def swapcase(self):\n \"\"\"\n Convert strings in the array to be swapcased.\n\n Returns\n -------\n swapcased : same type as values\n \"\"\"\n return self._apply(lambda x: x.swapcase())\n\n def title(self):\n \"\"\"\n Convert strings in the array to titlecase.\n\n Returns\n -------\n titled : same type as values\n \"\"\"\n return self._apply(lambda x: x.title())\n\n def upper(self):\n \"\"\"\n Convert strings in the array to uppercase.\n\n Returns\n -------\n uppered : same type as values\n \"\"\"\n return self._apply(lambda x: x.upper())\n\n def isalnum(self):\n \"\"\"\n Check whether all characters in each string are alphanumeric.\n\n Returns\n -------\n isalnum : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isalnum(), dtype=bool)\n\n def isalpha(self):\n \"\"\"\n Check whether all characters in each string are alphabetic.\n\n Returns\n -------\n isalpha : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isalpha(), dtype=bool)\n\n def isdecimal(self):\n \"\"\"\n Check whether all characters in each string are decimal.\n\n Returns\n -------\n isdecimal : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isdecimal(), dtype=bool)\n\n def isdigit(self):\n \"\"\"\n Check whether all characters in each string are digits.\n\n Returns\n -------\n isdigit : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isdigit(), dtype=bool)\n\n def islower(self):\n \"\"\"\n Check whether all characters in each string are lowercase.\n\n Returns\n -------\n islower : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.islower(), dtype=bool)\n\n def isnumeric(self):\n \"\"\"\n Check whether all characters in each string are numeric.\n\n Returns\n -------\n isnumeric : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isnumeric(), dtype=bool)\n\n def isspace(self):\n \"\"\"\n Check whether all characters in each string are spaces.\n\n Returns\n -------\n isspace : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isspace(), dtype=bool)\n\n def istitle(self):\n \"\"\"\n Check whether all characters in each string are titlecase.\n\n Returns\n -------\n istitle : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.istitle(), dtype=bool)\n\n def isupper(self):\n \"\"\"\n Check whether all characters in each string are uppercase.\n\n Returns\n -------\n isupper : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isupper(), dtype=bool)\n\n def count(self, pat, flags=0):\n \"\"\"\n Count occurrences of pattern in each string of the array.\n\n This function is used to count the number of times a particular regex\n pattern is repeated in each of the string elements of the\n :class:`~xarray.DatArray`.\n\n Parameters\n ----------\n pat : str\n Valid regular expression.\n flags : int, default 0, meaning no flags\n Flags for the `re` module. For a complete list, `see here\n `_.\n\n Returns\n -------\n counts : array of int\n \"\"\"\n pat = self._obj.dtype.type(pat)\n regex = re.compile(pat, flags=flags)\n f = lambda x: len(regex.findall(x))\n return self._apply(f, dtype=int)\n\n def startswith(self, pat):\n \"\"\"\n Test if the start of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n startswith : array of bool\n An array of booleans indicating whether the given pattern matches\n the start of each string element.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n f = lambda x: x.startswith(pat)\n return self._apply(f, dtype=bool)\n\n def endswith(self, pat):\n \"\"\"\n Test if the end of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n endswith : array of bool\n A Series of booleans indicating whether the given pattern matches\n the end of each string element.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n f = lambda x: x.endswith(pat)\n return self._apply(f, dtype=bool)\n\n def pad(self, width, side=\"left\", fillchar=\" \"):\n \"\"\"\n Pad strings in the array up to width.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with character defined in `fillchar`.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to fill resulting string.\n fillchar : str, default ' '\n Additional character for filling, default is whitespace.\n\n Returns\n -------\n filled : same type as values\n Array with a minimum number of char in each element.\n \"\"\"\n width = int(width)\n fillchar = self._obj.dtype.type(fillchar)\n if len(fillchar) != 1:\n raise TypeError(\"fillchar must be a character, not str\")\n\n if side == \"left\":\n f = lambda s: s.rjust(width, fillchar)\n elif side == \"right\":\n f = lambda s: s.ljust(width, fillchar)\n elif side == \"both\":\n f = lambda s: s.center(width, fillchar)\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n return self._apply(f)\n\n def center(self, width, fillchar=\" \"):\n \"\"\"\n Filling left and right side of strings in the array with an\n additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"both\", fillchar=fillchar)\n\n def ljust(self, width, fillchar=\" \"):\n \"\"\"\n Filling right side of strings in the array with an additional\n character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"right\", fillchar=fillchar)\n\n def rjust(self, width, fillchar=\" \"):\n \"\"\"\n Filling left side of strings in the array with an additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"left\", fillchar=fillchar)\n\n def zfill(self, width):\n \"\"\"\n Pad strings in the array by prepending '0' characters.\n\n Strings in the array are padded with '0' characters on the\n left of the string to reach a total string length `width`. Strings\n in the array with length greater or equal to `width` are unchanged.\n\n Parameters\n ----------\n width : int\n Minimum length of resulting string; strings with length less\n than `width` be prepended with '0' characters.\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"left\", fillchar=\"0\")\n\n def contains(self, pat, case=True, flags=0, regex=True):\n \"\"\"\n Test if pattern or regex is contained within a string of the array.\n\n Return boolean array based on whether a given pattern or regex is\n contained within a string of the array.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n Flags to pass through to the re module, e.g. re.IGNORECASE.\n regex : bool, default True\n If True, assumes the pat is a regular expression.\n If False, treats the pat as a literal string.\n\n Returns\n -------\n contains : array of bool\n An array of boolean values indicating whether the\n given pattern is contained within the string of each element\n of the array.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n if regex:\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n if regex.groups > 0: # pragma: no cover\n raise ValueError(\"This pattern has match groups.\")\n\n f = lambda x: bool(regex.search(x))\n else:\n if case:\n f = lambda x: pat in x\n else:\n uppered = self._obj.str.upper()\n return uppered.str.contains(pat.upper(), regex=False)\n\n return self._apply(f, dtype=bool)\n\n def match(self, pat, case=True, flags=0):\n \"\"\"\n Determine if each string matches a regular expression.\n\n Parameters\n ----------\n pat : string\n Character sequence or regular expression\n case : boolean, default True\n If True, case sensitive\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n\n Returns\n -------\n matched : array of bool\n \"\"\"\n if not case:\n flags |= re.IGNORECASE\n\n pat = self._obj.dtype.type(pat)\n regex = re.compile(pat, flags=flags)\n f = lambda x: bool(regex.match(x))\n return self._apply(f, dtype=bool)\n\n def strip(self, to_strip=None, side=\"both\"):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from left and/or right sides.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to strip.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n if to_strip is not None:\n to_strip = self._obj.dtype.type(to_strip)\n\n if side == \"both\":\n f = lambda x: x.strip(to_strip)\n elif side == \"left\":\n f = lambda x: x.lstrip(to_strip)\n elif side == \"right\":\n f = lambda x: x.rstrip(to_strip)\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n return self._apply(f)\n\n def lstrip(self, to_strip=None):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the left side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n return self.strip(to_strip, side=\"left\")\n\n def rstrip(self, to_strip=None):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the right side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n return self.strip(to_strip, side=\"right\")\n\n def wrap(self, width, **kwargs):\n \"\"\"\n Wrap long strings in the array to be formatted in paragraphs with\n length less than a given width.\n\n This method has the same keyword parameters and defaults as\n :class:`textwrap.TextWrapper`.\n\n Parameters\n ----------\n width : int\n Maximum line-width\n expand_tabs : bool, optional\n If true, tab characters will be expanded to spaces (default: True)\n replace_whitespace : bool, optional\n If true, each whitespace character (as defined by\n string.whitespace) remaining after tab expansion will be replaced\n by a single space (default: True)\n drop_whitespace : bool, optional\n If true, whitespace that, after wrapping, happens to end up at the\n beginning or end of a line is dropped (default: True)\n break_long_words : bool, optional\n If true, then words longer than width will be broken in order to\n ensure that no lines are longer than width. If it is false, long\n words will not be broken, and some lines may be longer than width.\n (default: True)\n break_on_hyphens : bool, optional\n If true, wrapping will occur preferably on whitespace and right\n after hyphens in compound words, as it is customary in English. If\n false, only whitespaces will be considered as potentially good\n places for line breaks, but you need to set break_long_words to\n false if you want truly insecable words. (default: True)\n\n Returns\n -------\n wrapped : same type as values\n \"\"\"\n tw = textwrap.TextWrapper(width=width)\n f = lambda x: \"\\n\".join(tw.wrap(x))\n return self._apply(f)\n\n def translate(self, table):\n \"\"\"\n Map all characters in the string through the given mapping table.\n\n Parameters\n ----------\n table : dict\n A a mapping of Unicode ordinals to Unicode ordinals, strings,\n or None. Unmapped characters are left untouched. Characters mapped\n to None are deleted. :meth:`str.maketrans` is a helper function for\n making translation tables.\n\n Returns\n -------\n translated : same type as values\n \"\"\"\n f = lambda x: x.translate(table)\n return self._apply(f)\n\n def repeat(self, repeats):\n \"\"\"\n Duplicate each string in the array.\n\n Parameters\n ----------\n repeats : int\n Number of repetitions.\n\n Returns\n -------\n repeated : same type as values\n Array of repeated string objects.\n \"\"\"\n f = lambda x: repeats * x\n return self._apply(f)\n\n def find(self, sub, start=0, end=None, side=\"left\"):\n \"\"\"\n Return lowest or highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n sub = self._obj.dtype.type(sub)\n\n if side == \"left\":\n method = \"find\"\n elif side == \"right\":\n method = \"rfind\"\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return self._apply(f, dtype=int)\n\n def rfind(self, sub, start=0, end=None):\n \"\"\"\n Return highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n return self.find(sub, start=start, end=end, side=\"right\")\n\n def index(self, sub, start=0, end=None, side=\"left\"):\n \"\"\"\n Return lowest or highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.find`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n sub = self._obj.dtype.type(sub)\n\n if side == \"left\":\n method = \"index\"\n elif side == \"right\":\n method = \"rindex\"\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return self._apply(f, dtype=int)\n\n def rindex(self, sub, start=0, end=None):\n \"\"\"\n Return highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.rfind`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n return self.index(sub, start=start, end=end, side=\"right\")\n\n def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n \"\"\"\n Replace occurrences of pattern/regex in the array with some string.\n\n Parameters\n ----------\n pat : string or compiled regex\n String can be a character sequence or regular expression.\n\n repl : string or callable\n Replacement string or a callable. The callable is passed the regex\n match object and must return a replacement string to be used.\n See :func:`re.sub`.\n\n n : int, default -1 (all)\n Number of replacements to make from start\n case : boolean, default None\n - If True, case sensitive (the default if `pat` is a string)\n - Set to False for case insensitive\n - Cannot be set if `pat` is a compiled regex\n flags : int, default 0 (no flags)\n - re module flags, e.g. re.IGNORECASE\n - Cannot be set if `pat` is a compiled regex\n regex : boolean, default True\n - If True, assumes the passed-in pattern is a regular expression.\n - If False, treats the pattern as a literal string\n - Cannot be set to False if `pat` is a compiled regex or `repl` is\n a callable.\n\n Returns\n -------\n replaced : same type as values\n A copy of the object with all matching occurrences of `pat`\n replaced by `repl`.\n \"\"\"\n if not (_is_str_like(repl) or callable(repl)): # pragma: no cover\n raise TypeError(\"repl must be a string or callable\")\n\n if _is_str_like(pat):\n pat = self._obj.dtype.type(pat)\n\n if _is_str_like(repl):\n repl = self._obj.dtype.type(repl)\n\n is_compiled_re = isinstance(pat, type(re.compile(\"\")))\n if regex:\n if is_compiled_re:\n if (case is not None) or (flags != 0):\n raise ValueError(\n \"case and flags cannot be set\" \" when pat is a compiled regex\"\n )\n else:\n # not a compiled regex\n # set default case\n if case is None:\n case = True\n\n # add case flag, if provided\n if case is False:\n flags |= re.IGNORECASE\n if is_compiled_re or len(pat) > 1 or flags or callable(repl):\n n = n if n >= 0 else 0\n compiled = re.compile(pat, flags=flags)\n f = lambda x: compiled.sub(repl=repl, string=x, count=n)\n else:\n f = lambda x: x.replace(pat, repl, n)\n else:\n if is_compiled_re:\n raise ValueError(\n \"Cannot use a compiled regex as replacement \"\n \"pattern with regex=False\"\n )\n if callable(repl):\n raise ValueError(\n \"Cannot use a callable replacement when \" \"regex=False\"\n )\n f = lambda x: x.replace(pat, repl, n)\n return self._apply(f)\n\n def decode(self, encoding, errors=\"strict\"):\n \"\"\"\n Decode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n decoded : same type as values\n \"\"\"\n if encoding in _cpython_optimized_decoders:\n f = lambda x: x.decode(encoding, errors)\n else:\n decoder = codecs.getdecoder(encoding)\n f = lambda x: decoder(x, errors)[0]\n return self._apply(f, dtype=np.str_)\n\n def encode(self, encoding, errors=\"strict\"):\n \"\"\"\n Encode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n encoded : same type as values\n \"\"\"\n if encoding in _cpython_optimized_encoders:\n f = lambda x: x.encode(encoding, errors)\n else:\n encoder = codecs.getencoder(encoding)\n f = lambda x: encoder(x, errors)[0]\n return self._apply(f, dtype=np.bytes_)\n"},{"col":4,"comment":"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : hashable, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : hashable, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ['x'])\n >>> arr.diff('x')\n \n array([0, 1, 0])\n Coordinates:\n * x (x) int64 2 3 4\n >>> arr.diff('x', 2)\n \n array([ 1, -1])\n Coordinates:\n * x (x) int64 3 4\n\n See Also\n --------\n DataArray.differentiate\n ","endLoc":2630,"header":"def diff(self, dim: Hashable, n: int = 1, label: Hashable = \"upper\") -> \"DataArray\"","id":2192,"name":"diff","nodeType":"Function","startLoc":2591,"text":"def diff(self, dim: Hashable, n: int = 1, label: Hashable = \"upper\") -> \"DataArray\":\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : hashable, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : hashable, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ['x'])\n >>> arr.diff('x')\n \n array([0, 1, 0])\n Coordinates:\n * x (x) int64 2 3 4\n >>> arr.diff('x', 2)\n \n array([ 1, -1])\n Coordinates:\n * x (x) int64 3 4\n\n See Also\n --------\n DataArray.differentiate\n \"\"\"\n ds = self._to_temp_dataset().diff(n=n, dim=dim, label=label)\n return self._from_temp_dataset(ds)"},{"className":"StringAccessor","col":0,"comment":"Vectorized string functions for string-like arrays.\n\n Similar to pandas, fields can be accessed through the `.str` attribute\n for applicable DataArrays.\n\n >>> da = xr.DataArray(['some', 'text', 'in', 'an', 'array'])\n >>> ds.str.len()\n \n array([4, 4, 2, 2, 5])\n Dimensions without coordinates: dim_0\n\n ","endLoc":966,"id":2193,"nodeType":"Class","startLoc":64,"text":"class StringAccessor:\n \"\"\"Vectorized string functions for string-like arrays.\n\n Similar to pandas, fields can be accessed through the `.str` attribute\n for applicable DataArrays.\n\n >>> da = xr.DataArray(['some', 'text', 'in', 'an', 'array'])\n >>> ds.str.len()\n \n array([4, 4, 2, 2, 5])\n Dimensions without coordinates: dim_0\n\n \"\"\"\n\n __slots__ = (\"_obj\",)\n\n def __init__(self, obj):\n self._obj = obj\n\n def _apply(self, f, dtype=None):\n # TODO handling of na values ?\n if dtype is None:\n dtype = self._obj.dtype\n\n g = np.vectorize(f, otypes=[dtype])\n return apply_ufunc(g, self._obj, dask=\"parallelized\", output_dtypes=[dtype])\n\n def len(self):\n \"\"\"\n Compute the length of each element in the array.\n\n Returns\n -------\n lengths array : array of int\n \"\"\"\n return self._apply(len, dtype=int)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n return self.slice(start=key.start, stop=key.stop, step=key.step)\n else:\n return self.get(key)\n\n def get(self, i):\n \"\"\"\n Extract element from indexable in each element in the array.\n\n Parameters\n ----------\n i : int\n Position of element to extract.\n default : optional\n Value for out-of-range index. If not specified (None) defaults to\n an empty string.\n\n Returns\n -------\n items : array of objects\n \"\"\"\n obj = slice(-1, None) if i == -1 else slice(i, i + 1)\n return self._apply(lambda x: x[obj])\n\n def slice(self, start=None, stop=None, step=None):\n \"\"\"\n Slice substrings from each element in the array.\n\n Parameters\n ----------\n start : int, optional\n Start position for slice operation.\n stop : int, optional\n Stop position for slice operation.\n step : int, optional\n Step size for slice operation.\n\n Returns\n -------\n sliced strings : same type as values\n \"\"\"\n s = slice(start, stop, step)\n f = lambda x: x[s]\n return self._apply(f)\n\n def slice_replace(self, start=None, stop=None, repl=\"\"):\n \"\"\"\n Replace a positional slice of a string with another value.\n\n Parameters\n ----------\n start : int, optional\n Left index position to use for the slice. If not specified (None),\n the slice is unbounded on the left, i.e. slice from the start\n of the string.\n stop : int, optional\n Right index position to use for the slice. If not specified (None),\n the slice is unbounded on the right, i.e. slice until the\n end of the string.\n repl : str, optional\n String for replacement. If not specified, the sliced region\n is replaced with an empty string.\n\n Returns\n -------\n replaced : same type as values\n \"\"\"\n repl = self._obj.dtype.type(repl)\n\n def f(x):\n if len(x[start:stop]) == 0:\n local_stop = start\n else:\n local_stop = stop\n y = self._obj.dtype.type(\"\")\n if start is not None:\n y += x[:start]\n y += repl\n if stop is not None:\n y += x[local_stop:]\n return y\n\n return self._apply(f)\n\n def capitalize(self):\n \"\"\"\n Convert strings in the array to be capitalized.\n\n Returns\n -------\n capitalized : same type as values\n \"\"\"\n return self._apply(lambda x: x.capitalize())\n\n def lower(self):\n \"\"\"\n Convert strings in the array to lowercase.\n\n Returns\n -------\n lowerd : same type as values\n \"\"\"\n return self._apply(lambda x: x.lower())\n\n def swapcase(self):\n \"\"\"\n Convert strings in the array to be swapcased.\n\n Returns\n -------\n swapcased : same type as values\n \"\"\"\n return self._apply(lambda x: x.swapcase())\n\n def title(self):\n \"\"\"\n Convert strings in the array to titlecase.\n\n Returns\n -------\n titled : same type as values\n \"\"\"\n return self._apply(lambda x: x.title())\n\n def upper(self):\n \"\"\"\n Convert strings in the array to uppercase.\n\n Returns\n -------\n uppered : same type as values\n \"\"\"\n return self._apply(lambda x: x.upper())\n\n def isalnum(self):\n \"\"\"\n Check whether all characters in each string are alphanumeric.\n\n Returns\n -------\n isalnum : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isalnum(), dtype=bool)\n\n def isalpha(self):\n \"\"\"\n Check whether all characters in each string are alphabetic.\n\n Returns\n -------\n isalpha : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isalpha(), dtype=bool)\n\n def isdecimal(self):\n \"\"\"\n Check whether all characters in each string are decimal.\n\n Returns\n -------\n isdecimal : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isdecimal(), dtype=bool)\n\n def isdigit(self):\n \"\"\"\n Check whether all characters in each string are digits.\n\n Returns\n -------\n isdigit : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isdigit(), dtype=bool)\n\n def islower(self):\n \"\"\"\n Check whether all characters in each string are lowercase.\n\n Returns\n -------\n islower : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.islower(), dtype=bool)\n\n def isnumeric(self):\n \"\"\"\n Check whether all characters in each string are numeric.\n\n Returns\n -------\n isnumeric : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isnumeric(), dtype=bool)\n\n def isspace(self):\n \"\"\"\n Check whether all characters in each string are spaces.\n\n Returns\n -------\n isspace : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isspace(), dtype=bool)\n\n def istitle(self):\n \"\"\"\n Check whether all characters in each string are titlecase.\n\n Returns\n -------\n istitle : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.istitle(), dtype=bool)\n\n def isupper(self):\n \"\"\"\n Check whether all characters in each string are uppercase.\n\n Returns\n -------\n isupper : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isupper(), dtype=bool)\n\n def count(self, pat, flags=0):\n \"\"\"\n Count occurrences of pattern in each string of the array.\n\n This function is used to count the number of times a particular regex\n pattern is repeated in each of the string elements of the\n :class:`~xarray.DatArray`.\n\n Parameters\n ----------\n pat : str\n Valid regular expression.\n flags : int, default 0, meaning no flags\n Flags for the `re` module. For a complete list, `see here\n `_.\n\n Returns\n -------\n counts : array of int\n \"\"\"\n pat = self._obj.dtype.type(pat)\n regex = re.compile(pat, flags=flags)\n f = lambda x: len(regex.findall(x))\n return self._apply(f, dtype=int)\n\n def startswith(self, pat):\n \"\"\"\n Test if the start of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n startswith : array of bool\n An array of booleans indicating whether the given pattern matches\n the start of each string element.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n f = lambda x: x.startswith(pat)\n return self._apply(f, dtype=bool)\n\n def endswith(self, pat):\n \"\"\"\n Test if the end of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n endswith : array of bool\n A Series of booleans indicating whether the given pattern matches\n the end of each string element.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n f = lambda x: x.endswith(pat)\n return self._apply(f, dtype=bool)\n\n def pad(self, width, side=\"left\", fillchar=\" \"):\n \"\"\"\n Pad strings in the array up to width.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with character defined in `fillchar`.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to fill resulting string.\n fillchar : str, default ' '\n Additional character for filling, default is whitespace.\n\n Returns\n -------\n filled : same type as values\n Array with a minimum number of char in each element.\n \"\"\"\n width = int(width)\n fillchar = self._obj.dtype.type(fillchar)\n if len(fillchar) != 1:\n raise TypeError(\"fillchar must be a character, not str\")\n\n if side == \"left\":\n f = lambda s: s.rjust(width, fillchar)\n elif side == \"right\":\n f = lambda s: s.ljust(width, fillchar)\n elif side == \"both\":\n f = lambda s: s.center(width, fillchar)\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n return self._apply(f)\n\n def center(self, width, fillchar=\" \"):\n \"\"\"\n Filling left and right side of strings in the array with an\n additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"both\", fillchar=fillchar)\n\n def ljust(self, width, fillchar=\" \"):\n \"\"\"\n Filling right side of strings in the array with an additional\n character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"right\", fillchar=fillchar)\n\n def rjust(self, width, fillchar=\" \"):\n \"\"\"\n Filling left side of strings in the array with an additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"left\", fillchar=fillchar)\n\n def zfill(self, width):\n \"\"\"\n Pad strings in the array by prepending '0' characters.\n\n Strings in the array are padded with '0' characters on the\n left of the string to reach a total string length `width`. Strings\n in the array with length greater or equal to `width` are unchanged.\n\n Parameters\n ----------\n width : int\n Minimum length of resulting string; strings with length less\n than `width` be prepended with '0' characters.\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"left\", fillchar=\"0\")\n\n def contains(self, pat, case=True, flags=0, regex=True):\n \"\"\"\n Test if pattern or regex is contained within a string of the array.\n\n Return boolean array based on whether a given pattern or regex is\n contained within a string of the array.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n Flags to pass through to the re module, e.g. re.IGNORECASE.\n regex : bool, default True\n If True, assumes the pat is a regular expression.\n If False, treats the pat as a literal string.\n\n Returns\n -------\n contains : array of bool\n An array of boolean values indicating whether the\n given pattern is contained within the string of each element\n of the array.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n if regex:\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n if regex.groups > 0: # pragma: no cover\n raise ValueError(\"This pattern has match groups.\")\n\n f = lambda x: bool(regex.search(x))\n else:\n if case:\n f = lambda x: pat in x\n else:\n uppered = self._obj.str.upper()\n return uppered.str.contains(pat.upper(), regex=False)\n\n return self._apply(f, dtype=bool)\n\n def match(self, pat, case=True, flags=0):\n \"\"\"\n Determine if each string matches a regular expression.\n\n Parameters\n ----------\n pat : string\n Character sequence or regular expression\n case : boolean, default True\n If True, case sensitive\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n\n Returns\n -------\n matched : array of bool\n \"\"\"\n if not case:\n flags |= re.IGNORECASE\n\n pat = self._obj.dtype.type(pat)\n regex = re.compile(pat, flags=flags)\n f = lambda x: bool(regex.match(x))\n return self._apply(f, dtype=bool)\n\n def strip(self, to_strip=None, side=\"both\"):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from left and/or right sides.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to strip.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n if to_strip is not None:\n to_strip = self._obj.dtype.type(to_strip)\n\n if side == \"both\":\n f = lambda x: x.strip(to_strip)\n elif side == \"left\":\n f = lambda x: x.lstrip(to_strip)\n elif side == \"right\":\n f = lambda x: x.rstrip(to_strip)\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n return self._apply(f)\n\n def lstrip(self, to_strip=None):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the left side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n return self.strip(to_strip, side=\"left\")\n\n def rstrip(self, to_strip=None):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the right side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n return self.strip(to_strip, side=\"right\")\n\n def wrap(self, width, **kwargs):\n \"\"\"\n Wrap long strings in the array to be formatted in paragraphs with\n length less than a given width.\n\n This method has the same keyword parameters and defaults as\n :class:`textwrap.TextWrapper`.\n\n Parameters\n ----------\n width : int\n Maximum line-width\n expand_tabs : bool, optional\n If true, tab characters will be expanded to spaces (default: True)\n replace_whitespace : bool, optional\n If true, each whitespace character (as defined by\n string.whitespace) remaining after tab expansion will be replaced\n by a single space (default: True)\n drop_whitespace : bool, optional\n If true, whitespace that, after wrapping, happens to end up at the\n beginning or end of a line is dropped (default: True)\n break_long_words : bool, optional\n If true, then words longer than width will be broken in order to\n ensure that no lines are longer than width. If it is false, long\n words will not be broken, and some lines may be longer than width.\n (default: True)\n break_on_hyphens : bool, optional\n If true, wrapping will occur preferably on whitespace and right\n after hyphens in compound words, as it is customary in English. If\n false, only whitespaces will be considered as potentially good\n places for line breaks, but you need to set break_long_words to\n false if you want truly insecable words. (default: True)\n\n Returns\n -------\n wrapped : same type as values\n \"\"\"\n tw = textwrap.TextWrapper(width=width)\n f = lambda x: \"\\n\".join(tw.wrap(x))\n return self._apply(f)\n\n def translate(self, table):\n \"\"\"\n Map all characters in the string through the given mapping table.\n\n Parameters\n ----------\n table : dict\n A a mapping of Unicode ordinals to Unicode ordinals, strings,\n or None. Unmapped characters are left untouched. Characters mapped\n to None are deleted. :meth:`str.maketrans` is a helper function for\n making translation tables.\n\n Returns\n -------\n translated : same type as values\n \"\"\"\n f = lambda x: x.translate(table)\n return self._apply(f)\n\n def repeat(self, repeats):\n \"\"\"\n Duplicate each string in the array.\n\n Parameters\n ----------\n repeats : int\n Number of repetitions.\n\n Returns\n -------\n repeated : same type as values\n Array of repeated string objects.\n \"\"\"\n f = lambda x: repeats * x\n return self._apply(f)\n\n def find(self, sub, start=0, end=None, side=\"left\"):\n \"\"\"\n Return lowest or highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n sub = self._obj.dtype.type(sub)\n\n if side == \"left\":\n method = \"find\"\n elif side == \"right\":\n method = \"rfind\"\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return self._apply(f, dtype=int)\n\n def rfind(self, sub, start=0, end=None):\n \"\"\"\n Return highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n return self.find(sub, start=start, end=end, side=\"right\")\n\n def index(self, sub, start=0, end=None, side=\"left\"):\n \"\"\"\n Return lowest or highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.find`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n sub = self._obj.dtype.type(sub)\n\n if side == \"left\":\n method = \"index\"\n elif side == \"right\":\n method = \"rindex\"\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return self._apply(f, dtype=int)\n\n def rindex(self, sub, start=0, end=None):\n \"\"\"\n Return highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.rfind`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n return self.index(sub, start=start, end=end, side=\"right\")\n\n def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n \"\"\"\n Replace occurrences of pattern/regex in the array with some string.\n\n Parameters\n ----------\n pat : string or compiled regex\n String can be a character sequence or regular expression.\n\n repl : string or callable\n Replacement string or a callable. The callable is passed the regex\n match object and must return a replacement string to be used.\n See :func:`re.sub`.\n\n n : int, default -1 (all)\n Number of replacements to make from start\n case : boolean, default None\n - If True, case sensitive (the default if `pat` is a string)\n - Set to False for case insensitive\n - Cannot be set if `pat` is a compiled regex\n flags : int, default 0 (no flags)\n - re module flags, e.g. re.IGNORECASE\n - Cannot be set if `pat` is a compiled regex\n regex : boolean, default True\n - If True, assumes the passed-in pattern is a regular expression.\n - If False, treats the pattern as a literal string\n - Cannot be set to False if `pat` is a compiled regex or `repl` is\n a callable.\n\n Returns\n -------\n replaced : same type as values\n A copy of the object with all matching occurrences of `pat`\n replaced by `repl`.\n \"\"\"\n if not (_is_str_like(repl) or callable(repl)): # pragma: no cover\n raise TypeError(\"repl must be a string or callable\")\n\n if _is_str_like(pat):\n pat = self._obj.dtype.type(pat)\n\n if _is_str_like(repl):\n repl = self._obj.dtype.type(repl)\n\n is_compiled_re = isinstance(pat, type(re.compile(\"\")))\n if regex:\n if is_compiled_re:\n if (case is not None) or (flags != 0):\n raise ValueError(\n \"case and flags cannot be set\" \" when pat is a compiled regex\"\n )\n else:\n # not a compiled regex\n # set default case\n if case is None:\n case = True\n\n # add case flag, if provided\n if case is False:\n flags |= re.IGNORECASE\n if is_compiled_re or len(pat) > 1 or flags or callable(repl):\n n = n if n >= 0 else 0\n compiled = re.compile(pat, flags=flags)\n f = lambda x: compiled.sub(repl=repl, string=x, count=n)\n else:\n f = lambda x: x.replace(pat, repl, n)\n else:\n if is_compiled_re:\n raise ValueError(\n \"Cannot use a compiled regex as replacement \"\n \"pattern with regex=False\"\n )\n if callable(repl):\n raise ValueError(\n \"Cannot use a callable replacement when \" \"regex=False\"\n )\n f = lambda x: x.replace(pat, repl, n)\n return self._apply(f)\n\n def decode(self, encoding, errors=\"strict\"):\n \"\"\"\n Decode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n decoded : same type as values\n \"\"\"\n if encoding in _cpython_optimized_decoders:\n f = lambda x: x.decode(encoding, errors)\n else:\n decoder = codecs.getdecoder(encoding)\n f = lambda x: decoder(x, errors)[0]\n return self._apply(f, dtype=np.str_)\n\n def encode(self, encoding, errors=\"strict\"):\n \"\"\"\n Encode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n encoded : same type as values\n \"\"\"\n if encoding in _cpython_optimized_encoders:\n f = lambda x: x.encode(encoding, errors)\n else:\n encoder = codecs.getencoder(encoding)\n f = lambda x: encoder(x, errors)[0]\n return self._apply(f, dtype=np.bytes_)"},{"col":4,"comment":"null","endLoc":81,"header":"def __init__(self, obj)","id":2194,"name":"__init__","nodeType":"Function","startLoc":80,"text":"def __init__(self, obj):\n self._obj = obj"},{"col":4,"comment":"null","endLoc":89,"header":"def _apply(self, f, dtype=None)","id":2195,"name":"_apply","nodeType":"Function","startLoc":83,"text":"def _apply(self, f, dtype=None):\n # TODO handling of na values ?\n if dtype is None:\n dtype = self._obj.dtype\n\n g = np.vectorize(f, otypes=[dtype])\n return apply_ufunc(g, self._obj, dask=\"parallelized\", output_dtypes=[dtype])"},{"attributeType":"null","col":8,"comment":"null","endLoc":42,"id":2196,"name":"_name","nodeType":"Attribute","startLoc":42,"text":"self._name"},{"col":0,"comment":"null","endLoc":35,"header":"def _dispatch_priority(obj)","id":2197,"name":"_dispatch_priority","nodeType":"Function","startLoc":31,"text":"def _dispatch_priority(obj):\n for priority, cls in enumerate(_dispatch_order):\n if isinstance(obj, cls):\n return priority\n return -1"},{"col":0,"comment":"null","endLoc":92,"header":"def _create_op(name)","id":2198,"name":"_create_op","nodeType":"Function","startLoc":81,"text":"def _create_op(name):\n func = _UFuncDispatcher(name)\n func.__name__ = name\n doc = getattr(_np, name).__doc__\n func.__doc__ = (\n \"xarray specific variant of numpy.%s. Handles \"\n \"xarray.Dataset, xarray.DataArray, xarray.Variable, \"\n \"numpy.ndarray and dask.array.Array objects with \"\n \"automatic dispatching.\\n\\n\"\n \"Documentation from numpy:\\n\\n%s\" % (name, doc)\n )\n return func"},{"col":4,"comment":"\n Compute the length of each element in the array.\n\n Returns\n -------\n lengths array : array of int\n ","endLoc":99,"header":"def len(self)","id":2199,"name":"len","nodeType":"Function","startLoc":91,"text":"def len(self):\n \"\"\"\n Compute the length of each element in the array.\n\n Returns\n -------\n lengths array : array of int\n \"\"\"\n return self._apply(len, dtype=int)"},{"attributeType":"null","col":19,"comment":"null","endLoc":16,"id":2200,"name":"_warnings","nodeType":"Attribute","startLoc":16,"text":"_warnings"},{"attributeType":"null","col":16,"comment":"null","endLoc":18,"id":2201,"name":"_np","nodeType":"Attribute","startLoc":18,"text":"_np"},{"attributeType":"null","col":41,"comment":"null","endLoc":20,"id":2202,"name":"_DataArray","nodeType":"Attribute","startLoc":20,"text":"_DataArray"},{"attributeType":"null","col":37,"comment":"null","endLoc":21,"id":2203,"name":"_Dataset","nodeType":"Attribute","startLoc":21,"text":"_Dataset"},{"attributeType":"null","col":37,"comment":"null","endLoc":23,"id":2204,"name":"_GroupBy","nodeType":"Attribute","startLoc":23,"text":"_GroupBy"},{"attributeType":"null","col":46,"comment":"null","endLoc":24,"id":2205,"name":"_dask_array_type","nodeType":"Attribute","startLoc":24,"text":"_dask_array_type"},{"attributeType":"null","col":39,"comment":"null","endLoc":25,"id":2206,"name":"_Variable","nodeType":"Attribute","startLoc":25,"text":"_Variable"},{"attributeType":"null","col":0,"comment":"null","endLoc":27,"id":2207,"name":"_xarray_types","nodeType":"Attribute","startLoc":27,"text":"_xarray_types"},{"col":0,"comment":"null","endLoc":223,"header":"def _values_method_wrapper(name)","id":2208,"name":"_values_method_wrapper","nodeType":"Function","startLoc":217,"text":"def _values_method_wrapper(name):\n def func(self, *args, **kwargs):\n return _call_possibly_missing_method(self.data, name, args, kwargs)\n\n func.__name__ = name\n func.__doc__ = getattr(np.ndarray, name).__doc__\n return func"},{"col":0,"comment":"null","endLoc":232,"header":"def _method_wrapper(name)","id":2209,"name":"_method_wrapper","nodeType":"Function","startLoc":226,"text":"def _method_wrapper(name):\n def func(self, *args, **kwargs):\n return _call_possibly_missing_method(self, name, args, kwargs)\n\n func.__name__ = name\n func.__doc__ = getattr(np.ndarray, name).__doc__\n return func"},{"col":0,"comment":"null","endLoc":250,"header":"def _func_slash_method_wrapper(f, name=None)","id":2210,"name":"_func_slash_method_wrapper","nodeType":"Function","startLoc":235,"text":"def _func_slash_method_wrapper(f, name=None):\n # try to wrap a method, but if not found use the function\n # this is useful when patching in a function as both a DataArray and\n # Dataset method\n if name is None:\n name = f.__name__\n\n def func(self, *args, **kwargs):\n try:\n return getattr(self, name)(*args, **kwargs)\n except AttributeError:\n return f(self, *args, **kwargs)\n\n func.__name__ = name\n func.__doc__ = f.__doc__\n return func"},{"col":4,"comment":"Shift this array by an offset along one or more dimensions.\n\n Only the data is moved; coordinates stay in place. Values shifted from\n beyond array bounds are replaced by NaN. This is consistent with the\n behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : DataArray\n DataArray with the same coordinates and attributes but shifted\n data.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.shift(x=1)\n \n array([ nan, 5., 6.])\n Coordinates:\n * x (x) int64 0 1 2\n ","endLoc":2679,"header":"def shift(\n self,\n shifts: Mapping[Hashable, int] = None,\n fill_value: Any = dtypes.NA,\n **shifts_kwargs: int\n ) -> \"DataArray\"","id":2211,"name":"shift","nodeType":"Function","startLoc":2632,"text":"def shift(\n self,\n shifts: Mapping[Hashable, int] = None,\n fill_value: Any = dtypes.NA,\n **shifts_kwargs: int\n ) -> \"DataArray\":\n \"\"\"Shift this array by an offset along one or more dimensions.\n\n Only the data is moved; coordinates stay in place. Values shifted from\n beyond array bounds are replaced by NaN. This is consistent with the\n behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : DataArray\n DataArray with the same coordinates and attributes but shifted\n data.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.shift(x=1)\n \n array([ nan, 5., 6.])\n Coordinates:\n * x (x) int64 0 1 2\n \"\"\"\n variable = self.variable.shift(\n shifts=shifts, fill_value=fill_value, **shifts_kwargs\n )\n return self._replace(variable=variable)"},{"col":0,"comment":"null","endLoc":289,"header":"def inject_cum_methods(cls)","id":2212,"name":"inject_cum_methods","nodeType":"Function","startLoc":278,"text":"def inject_cum_methods(cls):\n methods = [(name, getattr(duck_array_ops, name), True) for name in NAN_CUM_METHODS]\n for name, f, include_skipna in methods:\n numeric_only = getattr(f, \"numeric_only\", False)\n func = cls._reduce_method(f, include_skipna, numeric_only)\n func.__name__ = name\n func.__doc__ = _CUM_DOCSTRING_TEMPLATE.format(\n name=name,\n cls=cls.__name__,\n extra_args=cls._cum_extra_args_docstring.format(name=name),\n )\n setattr(cls, name, func)"},{"col":0,"comment":"null","endLoc":349,"header":"def inject_all_ops_and_reduce_methods(cls, priority=50, array_only=True)","id":2213,"name":"inject_all_ops_and_reduce_methods","nodeType":"Function","startLoc":321,"text":"def inject_all_ops_and_reduce_methods(cls, priority=50, array_only=True):\n # prioritize our operations over those of numpy.ndarray (priority=1)\n # and numpy.matrix (priority=10)\n cls.__array_priority__ = priority\n\n # patch in standard special operations\n for name in UNARY_OPS:\n setattr(cls, op_str(name), cls._unary_op(get_op(name)))\n inject_binary_ops(cls, inplace=True)\n\n # patch in numpy/pandas methods\n for name in NUMPY_UNARY_METHODS:\n setattr(cls, name, cls._unary_op(_method_wrapper(name)))\n\n for name in PANDAS_UNARY_FUNCTIONS:\n f = _func_slash_method_wrapper(getattr(duck_array_ops, name), name=name)\n setattr(cls, name, cls._unary_op(f))\n\n f = _func_slash_method_wrapper(duck_array_ops.around, name=\"round\")\n setattr(cls, \"round\", cls._unary_op(f))\n\n if array_only:\n # these methods don't return arrays of the same shape as the input, so\n # don't try to patch these in for Dataset objects\n for name in NUMPY_SAME_METHODS:\n setattr(cls, name, _values_method_wrapper(name))\n\n inject_reduce_methods(cls)\n inject_cum_methods(cls)"},{"attributeType":"null","col":0,"comment":"null","endLoc":28,"id":2214,"name":"_dispatch_order","nodeType":"Attribute","startLoc":28,"text":"_dispatch_order"},{"attributeType":"null","col":0,"comment":"null","endLoc":95,"id":2215,"name":"__all__","nodeType":"Attribute","startLoc":95,"text":"__all__"},{"attributeType":"null","col":4,"comment":"null","endLoc":104,"id":2216,"name":"name","nodeType":"Attribute","startLoc":104,"text":"name"},{"col":0,"comment":"","endLoc":15,"header":"ufuncs.py#","id":2217,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"xarray specific universal functions\n\nHandles unary and binary operations for the following types, in ascending\npriority order:\n- scalars\n- numpy.ndarray\n- dask.array.Array\n- xarray.Variable\n- xarray.DataArray\n- xarray.Dataset\n- xarray.core.groupby.GroupBy\n\nOnce NumPy 1.10 comes out with support for overriding ufuncs, this module will\nhopefully no longer be necessary.\n\"\"\"\n\n_xarray_types = (_Variable, _DataArray, _Dataset, _GroupBy)\n\n_dispatch_order = (_np.ndarray, _dask_array_type) + _xarray_types\n\n__all__ = \"\"\"logaddexp logaddexp2 conj exp log log2 log10 log1p expm1 sqrt\n square sin cos tan arcsin arccos arctan arctan2 hypot sinh cosh\n tanh arcsinh arccosh arctanh deg2rad rad2deg logical_and\n logical_or logical_xor logical_not maximum minimum fmax fmin\n isreal iscomplex isfinite isinf isnan signbit copysign nextafter\n ldexp fmod floor ceil trunc degrees radians rint fix angle real\n imag fabs sign frexp fmod\n \"\"\".split()\n\nfor name in __all__:\n globals()[name] = _create_op(name)"},{"col":4,"comment":"Roll this array by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to rotate each of the given dimensions.\n Positive offsets roll to the right; negative offsets roll to the\n left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n rolled : DataArray\n DataArray with the same attributes but rolled data and coordinates.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.roll(x=1)\n \n array([7, 5, 6])\n Coordinates:\n * x (x) int64 2 0 1\n ","endLoc":2729,"header":"def roll(\n self,\n shifts: Mapping[Hashable, int] = None,\n roll_coords: bool = None,\n **shifts_kwargs: int\n ) -> \"DataArray\"","id":2218,"name":"roll","nodeType":"Function","startLoc":2681,"text":"def roll(\n self,\n shifts: Mapping[Hashable, int] = None,\n roll_coords: bool = None,\n **shifts_kwargs: int\n ) -> \"DataArray\":\n \"\"\"Roll this array by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to rotate each of the given dimensions.\n Positive offsets roll to the right; negative offsets roll to the\n left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n rolled : DataArray\n DataArray with the same attributes but rolled data and coordinates.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.roll(x=1)\n \n array([7, 5, 6])\n Coordinates:\n * x (x) int64 2 0 1\n \"\"\"\n ds = self._to_temp_dataset().roll(\n shifts=shifts, roll_coords=roll_coords, **shifts_kwargs\n )\n return self._from_temp_dataset(ds)"},{"col":0,"comment":"null","endLoc":31,"header":"def time_interpolate_na(shape, chunks, method, limit)","id":2219,"name":"time_interpolate_na","nodeType":"Function","startLoc":24,"text":"def time_interpolate_na(shape, chunks, method, limit):\n if chunks is not None:\n requires_dask()\n da = make_bench_data(shape, 0.1, chunks=chunks)\n actual = da.interpolate_na(dim=\"time\", method=\"linear\", limit=limit)\n\n if chunks is not None:\n actual = actual.compute()"},{"col":4,"comment":"null","endLoc":105,"header":"def __getitem__(self, key)","id":2220,"name":"__getitem__","nodeType":"Function","startLoc":101,"text":"def __getitem__(self, key):\n if isinstance(key, slice):\n return self.slice(start=key.start, stop=key.stop, step=key.step)\n else:\n return self.get(key)"},{"col":4,"comment":"null","endLoc":2733,"header":"@property\n def real(self) -> \"DataArray\"","id":2221,"name":"real","nodeType":"Function","startLoc":2731,"text":"@property\n def real(self) -> \"DataArray\":\n return self._replace(self.variable.real)"},{"col":4,"comment":"null","endLoc":2737,"header":"@property\n def imag(self) -> \"DataArray\"","id":2222,"name":"imag","nodeType":"Function","startLoc":2735,"text":"@property\n def imag(self) -> \"DataArray\":\n return self._replace(self.variable.imag)"},{"col":4,"comment":"\n Slice substrings from each element in the array.\n\n Parameters\n ----------\n start : int, optional\n Start position for slice operation.\n stop : int, optional\n Stop position for slice operation.\n step : int, optional\n Step size for slice operation.\n\n Returns\n -------\n sliced strings : same type as values\n ","endLoc":145,"header":"def slice(self, start=None, stop=None, step=None)","id":2223,"name":"slice","nodeType":"Function","startLoc":126,"text":"def slice(self, start=None, stop=None, step=None):\n \"\"\"\n Slice substrings from each element in the array.\n\n Parameters\n ----------\n start : int, optional\n Start position for slice operation.\n stop : int, optional\n Stop position for slice operation.\n step : int, optional\n Step size for slice operation.\n\n Returns\n -------\n sliced strings : same type as values\n \"\"\"\n s = slice(start, stop, step)\n f = lambda x: x[s]\n return self._apply(f)"},{"col":12,"endLoc":144,"id":2224,"nodeType":"Lambda","startLoc":144,"text":"lambda x: x[s]"},{"col":4,"comment":"Sort object by labels or values (along an axis).\n\n Sorts the dataarray, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: hashable, DataArray, or sequence of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords whose values are used to sort this array.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: DataArray\n A new dataarray where all the specified dims are sorted by dim\n labels.\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.random.rand(5),\n ... coords=[pd.date_range('1/1/2000', periods=5)],\n ... dims='time')\n >>> da\n \n array([ 0.965471, 0.615637, 0.26532 , 0.270962, 0.552878])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n\n >>> da.sortby(da)\n \n array([ 0.26532 , 0.270962, 0.552878, 0.615637, 0.965471])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-03 2000-01-04 2000-01-05 ...\n ","endLoc":2844,"header":"def sortby(\n self,\n variables: Union[Hashable, \"DataArray\", Sequence[Union[Hashable, \"DataArray\"]]],\n ascending: bool = True,\n ) -> \"DataArray\"","id":2225,"name":"sortby","nodeType":"Function","startLoc":2790,"text":"def sortby(\n self,\n variables: Union[Hashable, \"DataArray\", Sequence[Union[Hashable, \"DataArray\"]]],\n ascending: bool = True,\n ) -> \"DataArray\":\n \"\"\"Sort object by labels or values (along an axis).\n\n Sorts the dataarray, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: hashable, DataArray, or sequence of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords whose values are used to sort this array.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: DataArray\n A new dataarray where all the specified dims are sorted by dim\n labels.\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.random.rand(5),\n ... coords=[pd.date_range('1/1/2000', periods=5)],\n ... dims='time')\n >>> da\n \n array([ 0.965471, 0.615637, 0.26532 , 0.270962, 0.552878])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n\n >>> da.sortby(da)\n \n array([ 0.26532 , 0.270962, 0.552878, 0.615637, 0.965471])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-03 2000-01-04 2000-01-05 ...\n \"\"\"\n ds = self._to_temp_dataset().sortby(variables, ascending=ascending)\n return self._from_temp_dataset(ds)"},{"col":0,"comment":"null","endLoc":359,"header":"def inject_coarsen_methods(cls)","id":2226,"name":"inject_coarsen_methods","nodeType":"Function","startLoc":352,"text":"def inject_coarsen_methods(cls):\n # standard numpy reduce methods\n methods = [(name, getattr(duck_array_ops, name)) for name in NAN_REDUCE_METHODS]\n for name, f in methods:\n func = cls._reduce_method(f)\n func.__name__ = name\n func.__doc__ = _COARSEN_REDUCE_DOCSTRING_TEMPLATE.format(name=func.__name__)\n setattr(cls, name, func)"},{"attributeType":"null","col":4,"comment":"null","endLoc":18,"id":2227,"name":"has_bottleneck","nodeType":"Attribute","startLoc":18,"text":"has_bottleneck"},{"attributeType":"null","col":0,"comment":"null","endLoc":25,"id":2228,"name":"UNARY_OPS","nodeType":"Attribute","startLoc":25,"text":"UNARY_OPS"},{"attributeType":"null","col":0,"comment":"null","endLoc":26,"id":2229,"name":"CMP_BINARY_OPS","nodeType":"Attribute","startLoc":26,"text":"CMP_BINARY_OPS"},{"attributeType":"null","col":0,"comment":"null","endLoc":27,"id":2230,"name":"NUM_BINARY_OPS","nodeType":"Attribute","startLoc":27,"text":"NUM_BINARY_OPS"},{"attributeType":"null","col":0,"comment":"null","endLoc":42,"id":2231,"name":"NUMPY_SAME_METHODS","nodeType":"Attribute","startLoc":42,"text":"NUMPY_SAME_METHODS"},{"attributeType":"null","col":0,"comment":"null","endLoc":45,"id":2232,"name":"NUMPY_UNARY_METHODS","nodeType":"Attribute","startLoc":45,"text":"NUMPY_UNARY_METHODS"},{"attributeType":"null","col":0,"comment":"null","endLoc":46,"id":2233,"name":"PANDAS_UNARY_FUNCTIONS","nodeType":"Attribute","startLoc":46,"text":"PANDAS_UNARY_FUNCTIONS"},{"attributeType":"null","col":0,"comment":"null","endLoc":48,"id":2234,"name":"REDUCE_METHODS","nodeType":"Attribute","startLoc":48,"text":"REDUCE_METHODS"},{"attributeType":"null","col":0,"comment":"null","endLoc":49,"id":2235,"name":"NAN_REDUCE_METHODS","nodeType":"Attribute","startLoc":49,"text":"NAN_REDUCE_METHODS"},{"attributeType":"null","col":0,"comment":"null","endLoc":61,"id":2236,"name":"NAN_CUM_METHODS","nodeType":"Attribute","startLoc":61,"text":"NAN_CUM_METHODS"},{"attributeType":"null","col":0,"comment":"null","endLoc":65,"id":2237,"name":"_CUM_DOCSTRING_TEMPLATE","nodeType":"Attribute","startLoc":65,"text":"_CUM_DOCSTRING_TEMPLATE"},{"attributeType":"null","col":0,"comment":"null","endLoc":90,"id":2238,"name":"_REDUCE_DOCSTRING_TEMPLATE","nodeType":"Attribute","startLoc":90,"text":"_REDUCE_DOCSTRING_TEMPLATE"},{"attributeType":"null","col":0,"comment":"null","endLoc":116,"id":2239,"name":"_MINCOUNT_DOCSTRING","nodeType":"Attribute","startLoc":116,"text":"_MINCOUNT_DOCSTRING"},{"attributeType":"null","col":0,"comment":"null","endLoc":122,"id":2240,"name":"_COARSEN_REDUCE_DOCSTRING_TEMPLATE","nodeType":"Attribute","startLoc":122,"text":"_COARSEN_REDUCE_DOCSTRING_TEMPLATE"},{"attributeType":"null","col":0,"comment":"null","endLoc":300,"id":2241,"name":"NON_INPLACE_OP","nodeType":"Attribute","startLoc":300,"text":"NON_INPLACE_OP"},{"col":0,"comment":"","endLoc":6,"header":"ops.py#","id":2242,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Define core operations for xarray objects.\n\nTODO(shoyer): rewrite this module, making use of xarray.core.computation,\nNumPy's __array_ufunc__ and mixin classes instead of the unintuitive \"inject\"\nfunctions.\n\"\"\"\n\ntry:\n import bottleneck as bn\n\n has_bottleneck = True\nexcept ImportError:\n # use numpy methods instead\n bn = np\n has_bottleneck = False\n\nUNARY_OPS = [\"neg\", \"pos\", \"abs\", \"invert\"]\n\nCMP_BINARY_OPS = [\"lt\", \"le\", \"ge\", \"gt\"]\n\nNUM_BINARY_OPS = [\n \"add\",\n \"sub\",\n \"mul\",\n \"truediv\",\n \"floordiv\",\n \"mod\",\n \"pow\",\n \"and\",\n \"xor\",\n \"or\",\n]\n\nNUMPY_SAME_METHODS = [\"item\", \"searchsorted\"]\n\nNUMPY_UNARY_METHODS = [\"astype\", \"argsort\", \"clip\", \"conj\", \"conjugate\"]\n\nPANDAS_UNARY_FUNCTIONS = [\"isnull\", \"notnull\"]\n\nREDUCE_METHODS = [\"all\", \"any\"]\n\nNAN_REDUCE_METHODS = [\n \"argmax\",\n \"argmin\",\n \"max\",\n \"min\",\n \"mean\",\n \"prod\",\n \"sum\",\n \"std\",\n \"var\",\n \"median\",\n]\n\nNAN_CUM_METHODS = [\"cumsum\", \"cumprod\"]\n\n_CUM_DOCSTRING_TEMPLATE = \"\"\"\\\nApply `{name}` along some dimension of {cls}.\n\nParameters\n----------\n{extra_args}\nskipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).\nkeep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n**kwargs : dict\n Additional keyword arguments passed on to `{name}`.\n\nReturns\n-------\ncumvalue : {cls}\n New {cls} object with `{name}` applied to its data along the\n indicated dimension.\n\"\"\"\n\n_REDUCE_DOCSTRING_TEMPLATE = \"\"\"\\\nReduce this {cls}'s data by applying `{name}` along some dimension(s).\n\nParameters\n----------\n{extra_args}\nskipna : bool, optional\n If True, skip missing values (as marked by NaN). By default, only\n skips missing values for float dtypes; other dtypes either do not\n have a sentinel missing value (int) or skipna=True has not been\n implemented (object, datetime64 or timedelta64).{min_count_docs}\nkeep_attrs : bool, optional\n If True, the attributes (`attrs`) will be copied from the original\n object to the new one. If False (default), the new object will be\n returned without attributes.\n**kwargs : dict\n Additional keyword arguments passed on to the appropriate array\n function for calculating `{name}` on this object's data.\n\nReturns\n-------\nreduced : {cls}\n New {cls} object with `{name}` applied to its data and the\n indicated dimension(s) removed.\n\"\"\"\n\n_MINCOUNT_DOCSTRING = \"\"\"\nmin_count : int, default None\n The required number of valid values to perform the operation.\n If fewer than min_count non-NA values are present the result will\n be NA. New in version 0.10.8: Added with the default being None.\"\"\"\n\n_COARSEN_REDUCE_DOCSTRING_TEMPLATE = \"\"\"\\\nCoarsen this object by applying `{name}` along its dimensions.\n\nParameters\n----------\n**kwargs : dict\n Additional keyword arguments passed on to `{name}`.\n\nReturns\n-------\nreduced : DataArray or Dataset\n New object with `{name}` applied along its coasen dimnensions.\n\"\"\"\n\nNON_INPLACE_OP = {get_op(\"i\" + name): get_op(name) for name in NUM_BINARY_OPS}"},{"id":2243,"name":"dataset-diagram.tex","nodeType":"TextFile","path":"doc/_static","text":"\\documentclass[class=minimal,border=0pt,convert={density=300,outext=.png}]{standalone}\n% \\documentclass[class=minimal,border=0pt]{standalone}\n\\usepackage[scaled]{helvet}\n\\renewcommand*\\familydefault{\\sfdefault}\n\n% ===========================================================================\n% The code below (used to define the \\tikzcuboid command) is copied,\n% unmodified, from a tex.stackexchange.com answer by the user \"Tom Bombadil\":\n% http://tex.stackexchange.com/a/29882/8335\n%\n% It is licensed under the Creative Commons Attribution-ShareAlike 3.0\n% Unported license: http://creativecommons.org/licenses/by-sa/3.0/\n% ===========================================================================\n\n\\usepackage[usenames,dvipsnames]{color}\n\\usepackage{tikz}\n\\usepackage{keyval}\n\\usepackage{ifthen}\n\n%====================================\n%emphasize vertices --> switch and emph style (e.g. thick,black)\n%====================================\n\\makeatletter\n% Standard Values for Parameters\n\\newcommand{\\tikzcuboid@shiftx}{0}\n\\newcommand{\\tikzcuboid@shifty}{0}\n\\newcommand{\\tikzcuboid@dimx}{3}\n\\newcommand{\\tikzcuboid@dimy}{3}\n\\newcommand{\\tikzcuboid@dimz}{3}\n\\newcommand{\\tikzcuboid@scale}{1}\n\\newcommand{\\tikzcuboid@densityx}{1}\n\\newcommand{\\tikzcuboid@densityy}{1}\n\\newcommand{\\tikzcuboid@densityz}{1}\n\\newcommand{\\tikzcuboid@rotation}{0}\n\\newcommand{\\tikzcuboid@anglex}{0}\n\\newcommand{\\tikzcuboid@angley}{90}\n\\newcommand{\\tikzcuboid@anglez}{225}\n\\newcommand{\\tikzcuboid@scalex}{1}\n\\newcommand{\\tikzcuboid@scaley}{1}\n\\newcommand{\\tikzcuboid@scalez}{sqrt(0.5)}\n\\newcommand{\\tikzcuboid@linefront}{black}\n\\newcommand{\\tikzcuboid@linetop}{black}\n\\newcommand{\\tikzcuboid@lineright}{black}\n\\newcommand{\\tikzcuboid@fillfront}{white}\n\\newcommand{\\tikzcuboid@filltop}{white}\n\\newcommand{\\tikzcuboid@fillright}{white}\n\\newcommand{\\tikzcuboid@shaded}{N}\n\\newcommand{\\tikzcuboid@shadecolor}{black}\n\\newcommand{\\tikzcuboid@shadeperc}{25}\n\\newcommand{\\tikzcuboid@emphedge}{N}\n\\newcommand{\\tikzcuboid@emphstyle}{thick}\n\n% Definition of Keys\n\\define@key{tikzcuboid}{shiftx}[\\tikzcuboid@shiftx]{\\renewcommand{\\tikzcuboid@shiftx}{#1}}\n\\define@key{tikzcuboid}{shifty}[\\tikzcuboid@shifty]{\\renewcommand{\\tikzcuboid@shifty}{#1}}\n\\define@key{tikzcuboid}{dimx}[\\tikzcuboid@dimx]{\\renewcommand{\\tikzcuboid@dimx}{#1}}\n\\define@key{tikzcuboid}{dimy}[\\tikzcuboid@dimy]{\\renewcommand{\\tikzcuboid@dimy}{#1}}\n\\define@key{tikzcuboid}{dimz}[\\tikzcuboid@dimz]{\\renewcommand{\\tikzcuboid@dimz}{#1}}\n\\define@key{tikzcuboid}{scale}[\\tikzcuboid@scale]{\\renewcommand{\\tikzcuboid@scale}{#1}}\n\\define@key{tikzcuboid}{densityx}[\\tikzcuboid@densityx]{\\renewcommand{\\tikzcuboid@densityx}{#1}}\n\\define@key{tikzcuboid}{densityy}[\\tikzcuboid@densityy]{\\renewcommand{\\tikzcuboid@densityy}{#1}}\n\\define@key{tikzcuboid}{densityz}[\\tikzcuboid@densityz]{\\renewcommand{\\tikzcuboid@densityz}{#1}}\n\\define@key{tikzcuboid}{rotation}[\\tikzcuboid@rotation]{\\renewcommand{\\tikzcuboid@rotation}{#1}}\n\\define@key{tikzcuboid}{anglex}[\\tikzcuboid@anglex]{\\renewcommand{\\tikzcuboid@anglex}{#1}}\n\\define@key{tikzcuboid}{angley}[\\tikzcuboid@angley]{\\renewcommand{\\tikzcuboid@angley}{#1}}\n\\define@key{tikzcuboid}{anglez}[\\tikzcuboid@anglez]{\\renewcommand{\\tikzcuboid@anglez}{#1}}\n\\define@key{tikzcuboid}{scalex}[\\tikzcuboid@scalex]{\\renewcommand{\\tikzcuboid@scalex}{#1}}\n\\define@key{tikzcuboid}{scaley}[\\tikzcuboid@scaley]{\\renewcommand{\\tikzcuboid@scaley}{#1}}\n\\define@key{tikzcuboid}{scalez}[\\tikzcuboid@scalez]{\\renewcommand{\\tikzcuboid@scalez}{#1}}\n\\define@key{tikzcuboid}{linefront}[\\tikzcuboid@linefront]{\\renewcommand{\\tikzcuboid@linefront}{#1}}\n\\define@key{tikzcuboid}{linetop}[\\tikzcuboid@linetop]{\\renewcommand{\\tikzcuboid@linetop}{#1}}\n\\define@key{tikzcuboid}{lineright}[\\tikzcuboid@lineright]{\\renewcommand{\\tikzcuboid@lineright}{#1}}\n\\define@key{tikzcuboid}{fillfront}[\\tikzcuboid@fillfront]{\\renewcommand{\\tikzcuboid@fillfront}{#1}}\n\\define@key{tikzcuboid}{filltop}[\\tikzcuboid@filltop]{\\renewcommand{\\tikzcuboid@filltop}{#1}}\n\\define@key{tikzcuboid}{fillright}[\\tikzcuboid@fillright]{\\renewcommand{\\tikzcuboid@fillright}{#1}}\n\\define@key{tikzcuboid}{shaded}[\\tikzcuboid@shaded]{\\renewcommand{\\tikzcuboid@shaded}{#1}}\n\\define@key{tikzcuboid}{shadecolor}[\\tikzcuboid@shadecolor]{\\renewcommand{\\tikzcuboid@shadecolor}{#1}}\n\\define@key{tikzcuboid}{shadeperc}[\\tikzcuboid@shadeperc]{\\renewcommand{\\tikzcuboid@shadeperc}{#1}}\n\\define@key{tikzcuboid}{emphedge}[\\tikzcuboid@emphedge]{\\renewcommand{\\tikzcuboid@emphedge}{#1}}\n\\define@key{tikzcuboid}{emphstyle}[\\tikzcuboid@emphstyle]{\\renewcommand{\\tikzcuboid@emphstyle}{#1}}\n% Commands\n\\newcommand{\\tikzcuboid}[1]{\n \\setkeys{tikzcuboid}{#1} % Process Keys passed to command\n \\pgfmathsetmacro{\\vectorxx}{\\tikzcuboid@scalex*cos(\\tikzcuboid@anglex)}\n \\pgfmathsetmacro{\\vectorxy}{\\tikzcuboid@scalex*sin(\\tikzcuboid@anglex)}\n \\pgfmathsetmacro{\\vectoryx}{\\tikzcuboid@scaley*cos(\\tikzcuboid@angley)}\n \\pgfmathsetmacro{\\vectoryy}{\\tikzcuboid@scaley*sin(\\tikzcuboid@angley)}\n \\pgfmathsetmacro{\\vectorzx}{\\tikzcuboid@scalez*cos(\\tikzcuboid@anglez)}\n \\pgfmathsetmacro{\\vectorzy}{\\tikzcuboid@scalez*sin(\\tikzcuboid@anglez)}\n \\begin{scope}[xshift=\\tikzcuboid@shiftx, yshift=\\tikzcuboid@shifty, scale=\\tikzcuboid@scale, rotate=\\tikzcuboid@rotation, x={(\\vectorxx,\\vectorxy)}, y={(\\vectoryx,\\vectoryy)}, z={(\\vectorzx,\\vectorzy)}]\n \\pgfmathsetmacro{\\steppingx}{1/\\tikzcuboid@densityx}\n \\pgfmathsetmacro{\\steppingy}{1/\\tikzcuboid@densityy}\n \\pgfmathsetmacro{\\steppingz}{1/\\tikzcuboid@densityz}\n \\newcommand{\\dimx}{\\tikzcuboid@dimx}\n \\newcommand{\\dimy}{\\tikzcuboid@dimy}\n \\newcommand{\\dimz}{\\tikzcuboid@dimz}\n \\pgfmathsetmacro{\\secondx}{2*\\steppingx}\n \\pgfmathsetmacro{\\secondy}{2*\\steppingy}\n \\pgfmathsetmacro{\\secondz}{2*\\steppingz}\n \\foreach \\x in {\\steppingx,\\secondx,...,\\dimx}\n { \\foreach \\y in {\\steppingy,\\secondy,...,\\dimy}\n { \\pgfmathsetmacro{\\lowx}{(\\x-\\steppingx)}\n \\pgfmathsetmacro{\\lowy}{(\\y-\\steppingy)}\n \\filldraw[fill=\\tikzcuboid@fillfront,draw=\\tikzcuboid@linefront] (\\lowx,\\lowy,\\dimz) -- (\\lowx,\\y,\\dimz) -- (\\x,\\y,\\dimz) -- (\\x,\\lowy,\\dimz) -- cycle;\n\n }\n }\n \\foreach \\x in {\\steppingx,\\secondx,...,\\dimx}\n { \\foreach \\z in {\\steppingz,\\secondz,...,\\dimz}\n { \\pgfmathsetmacro{\\lowx}{(\\x-\\steppingx)}\n \\pgfmathsetmacro{\\lowz}{(\\z-\\steppingz)}\n \\filldraw[fill=\\tikzcuboid@filltop,draw=\\tikzcuboid@linetop] (\\lowx,\\dimy,\\lowz) -- (\\lowx,\\dimy,\\z) -- (\\x,\\dimy,\\z) -- (\\x,\\dimy,\\lowz) -- cycle;\n }\n }\n \\foreach \\y in {\\steppingy,\\secondy,...,\\dimy}\n { \\foreach \\z in {\\steppingz,\\secondz,...,\\dimz}\n { \\pgfmathsetmacro{\\lowy}{(\\y-\\steppingy)}\n \\pgfmathsetmacro{\\lowz}{(\\z-\\steppingz)}\n \\filldraw[fill=\\tikzcuboid@fillright,draw=\\tikzcuboid@lineright] (\\dimx,\\lowy,\\lowz) -- (\\dimx,\\lowy,\\z) -- (\\dimx,\\y,\\z) -- (\\dimx,\\y,\\lowz) -- cycle;\n }\n }\n \\ifthenelse{\\equal{\\tikzcuboid@emphedge}{Y}}%\n {\\draw[\\tikzcuboid@emphstyle](0,\\dimy,0) -- (\\dimx,\\dimy,0) -- (\\dimx,\\dimy,\\dimz) -- (0,\\dimy,\\dimz) -- cycle;%\n \\draw[\\tikzcuboid@emphstyle] (0,0,\\dimz) -- (0,\\dimy,\\dimz) -- (\\dimx,\\dimy,\\dimz) -- (\\dimx,0,\\dimz) -- cycle;%\n \\draw[\\tikzcuboid@emphstyle](\\dimx,0,0) -- (\\dimx,\\dimy,0) -- (\\dimx,\\dimy,\\dimz) -- (\\dimx,0,\\dimz) -- cycle;%\n }%\n {}\n \\end{scope}\n}\n\n\\makeatother\n\n\\begin{document}\n\n\\begin{tikzpicture}\n \\tikzcuboid{%\n shiftx=16cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=2,%\n densityy=2,%\n densityz=2,%\n dimx=4,%\n dimy=3,%\n dimz=3,%\n linefront=teal!75!black,%\n linetop=teal!50!black,%\n lineright=teal!25!black,%\n fillfront=teal!25!white,%\n filltop=teal!50!white,%\n fillright=teal!75!white,%\n emphedge=Y,%\n emphstyle=very thick,\n }\n \\tikzcuboid{%\n shiftx=21cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=2,%\n densityy=2,%\n densityz=2,%\n dimx=4,%\n dimy=3,%\n dimz=3,%\n linefront=purple!75!black,%\n linetop=purple!50!black,%\n lineright=purple!25!black,%\n fillfront=purple!25!white,%\n filltop=purple!50!white,%\n fillright=purple!75!white,%\n emphedge=Y,%\n emphstyle=very thick,\n }\n \\tikzcuboid{%\n shiftx=26.2cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=10000,%\n densityy=2,%\n densityz=2,%\n dimx=0,%\n dimy=3,%\n dimz=3,%\n linefront=orange!75!black,%\n linetop=orange!50!black,%\n lineright=orange!25!black,%\n fillfront=orange!25!white,%\n filltop=orange!50!white,%\n fillright=orange!100!white,%\n emphedge=Y,%\n emphstyle=very thick,\n }\n \\tikzcuboid{%\n shiftx=27.6cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=10000,%\n densityy=2,%\n densityz=2,%\n dimx=0,%\n dimy=3,%\n dimz=3,%\n linefront=purple!75!black,%\n linetop=purple!50!black,%\n lineright=purple!25!black,%\n fillfront=purple!25!white,%\n filltop=purple!50!white,%\n fillright=red!75!white,%\n emphedge=Y,%\n emphstyle=very thick,\n }\n \\tikzcuboid{%\n shiftx=28cm,%\n shifty=6.5cm,%\n scale=1.00,%\n rotation=0,%\n densityx=2,%\n densityx=2,%\n densityy=100,%\n densityz=100,%\n dimx=4,%\n dimy=0,%\n dimz=0,%\n emphedge=Y,%\n emphstyle=very thick,\n }\n \\tikzcuboid{%\n shiftx=28cm,%\n shifty=6.5cm,%\n scale=1.00,%\n rotation=0,%\n densityx=100,%\n densityy=2,%\n densityz=100,%\n dimx=0,%\n dimy=3,%\n dimz=0,%\n emphedge=Y,%\n emphstyle=very thick,\n }\n \\tikzcuboid{%\n shiftx=28cm,%\n shifty=6.5cm,%\n scale=1.00,%\n rotation=180,%\n densityx=100,%\n densityy=100,%\n densityz=2,%\n dimx=0,%\n dimy=0,%\n dimz=3,%\n emphedge=Y,%\n emphstyle=very thick,\n }\n \\node [font=\\fontsize{11}{11}\\selectfont] at (18,11.5) {temperature};\n \\node [font=\\fontsize{11}{11}\\selectfont] at (23,11.5) {precipitation};\n \\node [font=\\fontsize{11}{11}\\selectfont] at (25.8,11.5) {latitude};\n \\node [font=\\fontsize{11}{11}\\selectfont] at (27.5,11.47) {longitude};\n \\node [font=\\fontsize{11}{11}\\selectfont] at (28,10) {x};\n \\node [font=\\fontsize{11}{11}\\selectfont] at (29.5,8.5) {y};\n \\node [font=\\fontsize{11}{11}\\selectfont] at (32,7) {t};\n \\node [font=\\fontsize{11}{11}\\selectfont] at (31,10) {reference\\_time};\n \\fill (31,9.5) circle[radius=2pt];\n\\end{tikzpicture}\n\n\\end{document}\n"},{"id":2244,"name":"dataset-diagram-build.sh","nodeType":"TextFile","path":"doc/_static","text":"#!/usr/bin/env bash\npdflatex -shell-escape dataset-diagram.tex\n"},{"fileName":"indexing.py","filePath":"asv_bench/benchmarks","id":2245,"nodeType":"File","text":"import numpy as np\nimport pandas as pd\n\nimport xarray as xr\n\nfrom . import randint, randn, requires_dask\n\nnx = 3000\nny = 2000\nnt = 1000\n\nbasic_indexes = {\n \"1slice\": {\"x\": slice(0, 3)},\n \"1slice-1scalar\": {\"x\": 0, \"y\": slice(None, None, 3)},\n \"2slicess-1scalar\": {\"x\": slice(3, -3, 3), \"y\": 1, \"t\": slice(None, -3, 3)},\n}\n\nbasic_assignment_values = {\n \"1slice\": xr.DataArray(randn((3, ny), frac_nan=0.1), dims=[\"x\", \"y\"]),\n \"1slice-1scalar\": xr.DataArray(randn(int(ny / 3) + 1, frac_nan=0.1), dims=[\"y\"]),\n \"2slicess-1scalar\": xr.DataArray(\n randn(int((nx - 6) / 3), frac_nan=0.1), dims=[\"x\"]\n ),\n}\n\nouter_indexes = {\n \"1d\": {\"x\": randint(0, nx, 400)},\n \"2d\": {\"x\": randint(0, nx, 500), \"y\": randint(0, ny, 400)},\n \"2d-1scalar\": {\"x\": randint(0, nx, 100), \"y\": 1, \"t\": randint(0, nt, 400)},\n}\n\nouter_assignment_values = {\n \"1d\": xr.DataArray(randn((400, ny), frac_nan=0.1), dims=[\"x\", \"y\"]),\n \"2d\": xr.DataArray(randn((500, 400), frac_nan=0.1), dims=[\"x\", \"y\"]),\n \"2d-1scalar\": xr.DataArray(randn(100, frac_nan=0.1), dims=[\"x\"]),\n}\n\nvectorized_indexes = {\n \"1-1d\": {\"x\": xr.DataArray(randint(0, nx, 400), dims=\"a\")},\n \"2-1d\": {\n \"x\": xr.DataArray(randint(0, nx, 400), dims=\"a\"),\n \"y\": xr.DataArray(randint(0, ny, 400), dims=\"a\"),\n },\n \"3-2d\": {\n \"x\": xr.DataArray(randint(0, nx, 400).reshape(4, 100), dims=[\"a\", \"b\"]),\n \"y\": xr.DataArray(randint(0, ny, 400).reshape(4, 100), dims=[\"a\", \"b\"]),\n \"t\": xr.DataArray(randint(0, nt, 400).reshape(4, 100), dims=[\"a\", \"b\"]),\n },\n}\n\nvectorized_assignment_values = {\n \"1-1d\": xr.DataArray(randn((400, 2000)), dims=[\"a\", \"y\"], coords={\"a\": randn(400)}),\n \"2-1d\": xr.DataArray(randn(400), dims=[\"a\"], coords={\"a\": randn(400)}),\n \"3-2d\": xr.DataArray(\n randn((4, 100)), dims=[\"a\", \"b\"], coords={\"a\": randn(4), \"b\": randn(100)}\n ),\n}\n\n\nclass Base:\n def setup(self, key):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn((nx, ny), frac_nan=0.1)),\n \"var2\": ((\"x\", \"t\"), randn((nx, nt))),\n \"var3\": ((\"t\",), randn(nt)),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )\n\n\nclass Indexing(Base):\n def time_indexing_basic(self, key):\n self.ds.isel(**basic_indexes[key]).load()\n\n time_indexing_basic.param_names = [\"key\"]\n time_indexing_basic.params = [list(basic_indexes.keys())]\n\n def time_indexing_outer(self, key):\n self.ds.isel(**outer_indexes[key]).load()\n\n time_indexing_outer.param_names = [\"key\"]\n time_indexing_outer.params = [list(outer_indexes.keys())]\n\n def time_indexing_vectorized(self, key):\n self.ds.isel(**vectorized_indexes[key]).load()\n\n time_indexing_vectorized.param_names = [\"key\"]\n time_indexing_vectorized.params = [list(vectorized_indexes.keys())]\n\n\nclass Assignment(Base):\n def time_assignment_basic(self, key):\n ind = basic_indexes[key]\n val = basic_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val\n\n time_assignment_basic.param_names = [\"key\"]\n time_assignment_basic.params = [list(basic_indexes.keys())]\n\n def time_assignment_outer(self, key):\n ind = outer_indexes[key]\n val = outer_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val\n\n time_assignment_outer.param_names = [\"key\"]\n time_assignment_outer.params = [list(outer_indexes.keys())]\n\n def time_assignment_vectorized(self, key):\n ind = vectorized_indexes[key]\n val = vectorized_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val\n\n time_assignment_vectorized.param_names = [\"key\"]\n time_assignment_vectorized.params = [list(vectorized_indexes.keys())]\n\n\nclass IndexingDask(Indexing):\n def setup(self, key):\n requires_dask()\n super().setup(key)\n self.ds = self.ds.chunk({\"x\": 100, \"y\": 50, \"t\": 50})\n\n\nclass BooleanIndexing:\n # https://github.com/pydata/xarray/issues/2227\n def setup(self):\n self.ds = xr.Dataset(\n {\"a\": (\"time\", np.arange(10_000_000))},\n coords={\"time\": np.arange(10_000_000)},\n )\n self.time_filter = self.ds.time > 50_000\n\n def time_indexing(self):\n self.ds.isel(time=self.time_filter)\n"},{"col":4,"comment":"\n Extract element from indexable in each element in the array.\n\n Parameters\n ----------\n i : int\n Position of element to extract.\n default : optional\n Value for out-of-range index. If not specified (None) defaults to\n an empty string.\n\n Returns\n -------\n items : array of objects\n ","endLoc":124,"header":"def get(self, i)","id":2246,"name":"get","nodeType":"Function","startLoc":107,"text":"def get(self, i):\n \"\"\"\n Extract element from indexable in each element in the array.\n\n Parameters\n ----------\n i : int\n Position of element to extract.\n default : optional\n Value for out-of-range index. If not specified (None) defaults to\n an empty string.\n\n Returns\n -------\n items : array of objects\n \"\"\"\n obj = slice(-1, None) if i == -1 else slice(i, i + 1)\n return self._apply(lambda x: x[obj])"},{"col":4,"comment":"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n - linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n - lower: ``i``.\n - higher: ``j``.\n - nearest: ``i`` or ``j``, whichever is nearest.\n - midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n quantiles : DataArray\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile\n ","endLoc":2897,"header":"def quantile(\n self,\n q: Any,\n dim: Union[Hashable, Sequence[Hashable], None] = None,\n interpolation: str = \"linear\",\n keep_attrs: bool = None,\n ) -> \"DataArray\"","id":2247,"name":"quantile","nodeType":"Function","startLoc":2846,"text":"def quantile(\n self,\n q: Any,\n dim: Union[Hashable, Sequence[Hashable], None] = None,\n interpolation: str = \"linear\",\n keep_attrs: bool = None,\n ) -> \"DataArray\":\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n - linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n - lower: ``i``.\n - higher: ``j``.\n - nearest: ``i`` or ``j``, whichever is nearest.\n - midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n quantiles : DataArray\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile\n \"\"\"\n\n ds = self._to_temp_dataset().quantile(\n q, dim=dim, keep_attrs=keep_attrs, interpolation=interpolation\n )\n return self._from_temp_dataset(ds)"},{"col":27,"endLoc":124,"id":2248,"nodeType":"Lambda","startLoc":124,"text":"lambda x: x[obj]"},{"col":4,"comment":"\n Replace a positional slice of a string with another value.\n\n Parameters\n ----------\n start : int, optional\n Left index position to use for the slice. If not specified (None),\n the slice is unbounded on the left, i.e. slice from the start\n of the string.\n stop : int, optional\n Right index position to use for the slice. If not specified (None),\n the slice is unbounded on the right, i.e. slice until the\n end of the string.\n repl : str, optional\n String for replacement. If not specified, the sliced region\n is replaced with an empty string.\n\n Returns\n -------\n replaced : same type as values\n ","endLoc":184,"header":"def slice_replace(self, start=None, stop=None, repl=\"\")","id":2249,"name":"slice_replace","nodeType":"Function","startLoc":147,"text":"def slice_replace(self, start=None, stop=None, repl=\"\"):\n \"\"\"\n Replace a positional slice of a string with another value.\n\n Parameters\n ----------\n start : int, optional\n Left index position to use for the slice. If not specified (None),\n the slice is unbounded on the left, i.e. slice from the start\n of the string.\n stop : int, optional\n Right index position to use for the slice. If not specified (None),\n the slice is unbounded on the right, i.e. slice until the\n end of the string.\n repl : str, optional\n String for replacement. If not specified, the sliced region\n is replaced with an empty string.\n\n Returns\n -------\n replaced : same type as values\n \"\"\"\n repl = self._obj.dtype.type(repl)\n\n def f(x):\n if len(x[start:stop]) == 0:\n local_stop = start\n else:\n local_stop = stop\n y = self._obj.dtype.type(\"\")\n if start is not None:\n y += x[:start]\n y += repl\n if stop is not None:\n y += x[local_stop:]\n return y\n\n return self._apply(f)"},{"col":4,"comment":"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within that\n set. Ranks begin at 1, not 0. If pct, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : hashable\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : DataArray\n DataArray with the same coordinates and dtype 'float64'.\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.rank('x')\n \n array([ 1., 2., 3.])\n Dimensions without coordinates: x\n ","endLoc":2939,"header":"def rank(\n self, dim: Hashable, pct: bool = False, keep_attrs: bool = None\n ) -> \"DataArray\"","id":2250,"name":"rank","nodeType":"Function","startLoc":2899,"text":"def rank(\n self, dim: Hashable, pct: bool = False, keep_attrs: bool = None\n ) -> \"DataArray\":\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within that\n set. Ranks begin at 1, not 0. If pct, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : hashable\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : DataArray\n DataArray with the same coordinates and dtype 'float64'.\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.rank('x')\n \n array([ 1., 2., 3.])\n Dimensions without coordinates: x\n \"\"\"\n\n ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs)\n return self._from_temp_dataset(ds)"},{"className":"Base","col":0,"comment":"null","endLoc":74,"id":2251,"nodeType":"Class","startLoc":60,"text":"class Base:\n def setup(self, key):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn((nx, ny), frac_nan=0.1)),\n \"var2\": ((\"x\", \"t\"), randn((nx, nt))),\n \"var3\": ((\"t\",), randn(nt)),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )"},{"col":4,"comment":"null","endLoc":74,"header":"def setup(self, key)","id":2252,"name":"setup","nodeType":"Function","startLoc":61,"text":"def setup(self, key):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn((nx, ny), frac_nan=0.1)),\n \"var2\": ((\"x\", \"t\"), randn((nx, nt))),\n \"var3\": ((\"t\",), randn(nt)),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )"},{"col":0,"comment":"null","endLoc":49,"header":"def time_ffill(shape, chunks, limit)","id":2253,"name":"time_ffill","nodeType":"Function","startLoc":43,"text":"def time_ffill(shape, chunks, limit):\n\n da = make_bench_data(shape, 0.1, chunks=chunks)\n actual = da.ffill(dim=\"time\", limit=limit)\n\n if chunks is not None:\n actual = actual.compute()"},{"fileName":"combine.py","filePath":"asv_bench/benchmarks","id":2254,"nodeType":"File","text":"import numpy as np\n\nimport xarray as xr\n\n\nclass Combine:\n \"\"\"Benchmark concatenating and merging large datasets\"\"\"\n\n def setup(self):\n \"\"\"Create 4 datasets with two different variables\"\"\"\n\n t_size, x_size, y_size = 100, 900, 800\n t = np.arange(t_size)\n data = np.random.randn(t_size, x_size, y_size)\n\n self.dsA0 = xr.Dataset(\n {\"A\": xr.DataArray(data, coords={\"T\": t}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsA1 = xr.Dataset(\n {\"A\": xr.DataArray(data, coords={\"T\": t + t_size}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsB0 = xr.Dataset(\n {\"B\": xr.DataArray(data, coords={\"T\": t}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsB1 = xr.Dataset(\n {\"B\": xr.DataArray(data, coords={\"T\": t + t_size}, dims=(\"T\", \"X\", \"Y\"))}\n )\n\n def time_combine_manual(self):\n datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]]\n\n xr.combine_manual(datasets, concat_dim=[None, \"t\"])\n\n def time_auto_combine(self):\n \"\"\"Also has to load and arrange t coordinate\"\"\"\n datasets = [self.dsA0, self.dsA1, self.dsB0, self.dsB1]\n\n xr.combine_auto(datasets)\n"},{"col":0,"comment":"null","endLoc":252,"header":"@contextlib.contextmanager\ndef _ignore_warnings_if(condition)","id":2255,"name":"_ignore_warnings_if","nodeType":"Function","startLoc":245,"text":"@contextlib.contextmanager\ndef _ignore_warnings_if(condition):\n if condition:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n yield\n else:\n yield"},{"className":"Combine","col":0,"comment":"Benchmark concatenating and merging large datasets","endLoc":38,"id":2256,"nodeType":"Class","startLoc":6,"text":"class Combine:\n \"\"\"Benchmark concatenating and merging large datasets\"\"\"\n\n def setup(self):\n \"\"\"Create 4 datasets with two different variables\"\"\"\n\n t_size, x_size, y_size = 100, 900, 800\n t = np.arange(t_size)\n data = np.random.randn(t_size, x_size, y_size)\n\n self.dsA0 = xr.Dataset(\n {\"A\": xr.DataArray(data, coords={\"T\": t}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsA1 = xr.Dataset(\n {\"A\": xr.DataArray(data, coords={\"T\": t + t_size}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsB0 = xr.Dataset(\n {\"B\": xr.DataArray(data, coords={\"T\": t}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsB1 = xr.Dataset(\n {\"B\": xr.DataArray(data, coords={\"T\": t + t_size}, dims=(\"T\", \"X\", \"Y\"))}\n )\n\n def time_combine_manual(self):\n datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]]\n\n xr.combine_manual(datasets, concat_dim=[None, \"t\"])\n\n def time_auto_combine(self):\n \"\"\"Also has to load and arrange t coordinate\"\"\"\n datasets = [self.dsA0, self.dsA1, self.dsB0, self.dsB1]\n\n xr.combine_auto(datasets)"},{"col":4,"comment":"Create 4 datasets with two different variables","endLoc":27,"header":"def setup(self)","id":2257,"name":"setup","nodeType":"Function","startLoc":9,"text":"def setup(self):\n \"\"\"Create 4 datasets with two different variables\"\"\"\n\n t_size, x_size, y_size = 100, 900, 800\n t = np.arange(t_size)\n data = np.random.randn(t_size, x_size, y_size)\n\n self.dsA0 = xr.Dataset(\n {\"A\": xr.DataArray(data, coords={\"T\": t}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsA1 = xr.Dataset(\n {\"A\": xr.DataArray(data, coords={\"T\": t + t_size}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsB0 = xr.Dataset(\n {\"B\": xr.DataArray(data, coords={\"T\": t}, dims=(\"T\", \"X\", \"Y\"))}\n )\n self.dsB1 = xr.Dataset(\n {\"B\": xr.DataArray(data, coords={\"T\": t + t_size}, dims=(\"T\", \"X\", \"Y\"))}\n )"},{"col":0,"comment":"null","endLoc":292,"header":"def _create_nan_agg_method(name, coerce_strings=False)","id":2258,"name":"_create_nan_agg_method","nodeType":"Function","startLoc":255,"text":"def _create_nan_agg_method(name, coerce_strings=False):\n from . import nanops\n\n def f(values, axis=None, skipna=None, **kwargs):\n if kwargs.pop(\"out\", None) is not None:\n raise TypeError(\"`out` is not valid for {}\".format(name))\n\n values = asarray(values)\n\n if coerce_strings and values.dtype.kind in \"SU\":\n values = values.astype(object)\n\n func = None\n if skipna or (skipna is None and values.dtype.kind in \"cfO\"):\n nanname = \"nan\" + name\n func = getattr(nanops, nanname)\n else:\n func = _dask_or_eager_func(name)\n\n try:\n return func(values, axis=axis, **kwargs)\n except AttributeError:\n if isinstance(values, dask_array_type):\n try: # dask/dask#3133 dask sometimes needs dtype argument\n # if func does not accept dtype, then raises TypeError\n return func(values, axis=axis, dtype=values.dtype, **kwargs)\n except (AttributeError, TypeError):\n msg = \"%s is not yet implemented on dask arrays\" % name\n else:\n msg = (\n \"%s is not available with skipna=False with the \"\n \"installed version of numpy; upgrade to numpy 1.12 \"\n \"or newer to use skipna=True or skipna=None\" % name\n )\n raise NotImplementedError(msg)\n\n f.__name__ = name\n return f"},{"col":4,"comment":"\n Convert strings in the array to be capitalized.\n\n Returns\n -------\n capitalized : same type as values\n ","endLoc":194,"header":"def capitalize(self)","id":2259,"name":"capitalize","nodeType":"Function","startLoc":186,"text":"def capitalize(self):\n \"\"\"\n Convert strings in the array to be capitalized.\n\n Returns\n -------\n capitalized : same type as values\n \"\"\"\n return self._apply(lambda x: x.capitalize())"},{"col":27,"endLoc":194,"id":2260,"nodeType":"Lambda","startLoc":194,"text":"lambda x: x.capitalize()"},{"col":4,"comment":" Differentiate the array with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: hashable\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: DataArray\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.differentiate('x')\n \n array([[30. , 30. , 30. ],\n [27.545455, 27.545455, 27.545455],\n [27.545455, 27.545455, 27.545455],\n [30. , 30. , 30. ]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n ","endLoc":2995,"header":"def differentiate(\n self, coord: Hashable, edge_order: int = 1, datetime_unit: str = None\n ) -> \"DataArray\"","id":2261,"name":"differentiate","nodeType":"Function","startLoc":2941,"text":"def differentiate(\n self, coord: Hashable, edge_order: int = 1, datetime_unit: str = None\n ) -> \"DataArray\":\n \"\"\" Differentiate the array with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: hashable\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: DataArray\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.differentiate('x')\n \n array([[30. , 30. , 30. ],\n [27.545455, 27.545455, 27.545455],\n [27.545455, 27.545455, 27.545455],\n [30. , 30. , 30. ]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n \"\"\"\n ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit)\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"\n Convert strings in the array to lowercase.\n\n Returns\n -------\n lowerd : same type as values\n ","endLoc":204,"header":"def lower(self)","id":2262,"name":"lower","nodeType":"Function","startLoc":196,"text":"def lower(self):\n \"\"\"\n Convert strings in the array to lowercase.\n\n Returns\n -------\n lowerd : same type as values\n \"\"\"\n return self._apply(lambda x: x.lower())"},{"col":27,"endLoc":204,"id":2263,"nodeType":"Lambda","startLoc":204,"text":"lambda x: x.lower()"},{"col":4,"comment":"\n Convert strings in the array to be swapcased.\n\n Returns\n -------\n swapcased : same type as values\n ","endLoc":214,"header":"def swapcase(self)","id":2264,"name":"swapcase","nodeType":"Function","startLoc":206,"text":"def swapcase(self):\n \"\"\"\n Convert strings in the array to be swapcased.\n\n Returns\n -------\n swapcased : same type as values\n \"\"\"\n return self._apply(lambda x: x.swapcase())"},{"col":27,"endLoc":214,"id":2265,"nodeType":"Lambda","startLoc":214,"text":"lambda x: x.swapcase()"},{"col":4,"comment":"\n Convert strings in the array to titlecase.\n\n Returns\n -------\n titled : same type as values\n ","endLoc":224,"header":"def title(self)","id":2266,"name":"title","nodeType":"Function","startLoc":216,"text":"def title(self):\n \"\"\"\n Convert strings in the array to titlecase.\n\n Returns\n -------\n titled : same type as values\n \"\"\"\n return self._apply(lambda x: x.title())"},{"col":27,"endLoc":224,"id":2267,"nodeType":"Lambda","startLoc":224,"text":"lambda x: x.title()"},{"col":4,"comment":"\n Convert strings in the array to uppercase.\n\n Returns\n -------\n uppered : same type as values\n ","endLoc":234,"header":"def upper(self)","id":2268,"name":"upper","nodeType":"Function","startLoc":226,"text":"def upper(self):\n \"\"\"\n Convert strings in the array to uppercase.\n\n Returns\n -------\n uppered : same type as values\n \"\"\"\n return self._apply(lambda x: x.upper())"},{"col":27,"endLoc":234,"id":2269,"nodeType":"Lambda","startLoc":234,"text":"lambda x: x.upper()"},{"col":4,"comment":"\n Check whether all characters in each string are alphanumeric.\n\n Returns\n -------\n isalnum : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":245,"header":"def isalnum(self)","id":2270,"name":"isalnum","nodeType":"Function","startLoc":236,"text":"def isalnum(self):\n \"\"\"\n Check whether all characters in each string are alphanumeric.\n\n Returns\n -------\n isalnum : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isalnum(), dtype=bool)"},{"col":27,"endLoc":245,"id":2271,"nodeType":"Lambda","startLoc":245,"text":"lambda x: x.isalnum()"},{"col":4,"comment":" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: hashable, or a sequence of hashable\n Coordinate(s) used for the integration.\n datetime_unit: str, optional\n Can be used to specify the unit if datetime coordinate is used.\n One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns',\n 'ps', 'fs', 'as'}\n\n Returns\n -------\n integrated: DataArray\n\n See also\n --------\n numpy.trapz: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.integrate('x')\n \n array([5.4, 6.6, 7.8])\n Dimensions without coordinates: y\n ","endLoc":3044,"header":"def integrate(\n self, dim: Union[Hashable, Sequence[Hashable]], datetime_unit: str = None\n ) -> \"DataArray\"","id":2272,"name":"integrate","nodeType":"Function","startLoc":2997,"text":"def integrate(\n self, dim: Union[Hashable, Sequence[Hashable]], datetime_unit: str = None\n ) -> \"DataArray\":\n \"\"\" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: hashable, or a sequence of hashable\n Coordinate(s) used for the integration.\n datetime_unit: str, optional\n Can be used to specify the unit if datetime coordinate is used.\n One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns',\n 'ps', 'fs', 'as'}\n\n Returns\n -------\n integrated: DataArray\n\n See also\n --------\n numpy.trapz: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.integrate('x')\n \n array([5.4, 6.6, 7.8])\n Dimensions without coordinates: y\n \"\"\"\n ds = self._to_temp_dataset().integrate(dim, datetime_unit)\n return self._from_temp_dataset(ds)"},{"col":4,"comment":"\n Check whether all characters in each string are alphabetic.\n\n Returns\n -------\n isalpha : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":256,"header":"def isalpha(self)","id":2273,"name":"isalpha","nodeType":"Function","startLoc":247,"text":"def isalpha(self):\n \"\"\"\n Check whether all characters in each string are alphabetic.\n\n Returns\n -------\n isalpha : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isalpha(), dtype=bool)"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":62,"id":2274,"name":"ds","nodeType":"Attribute","startLoc":62,"text":"self.ds"},{"col":0,"comment":"null","endLoc":362,"header":"def _to_pytimedelta(array, unit=\"us\")","id":2275,"name":"_to_pytimedelta","nodeType":"Function","startLoc":360,"text":"def _to_pytimedelta(array, unit=\"us\"):\n index = pd.TimedeltaIndex(array.ravel(), unit=unit)\n return index.to_pytimedelta().reshape(array.shape)"},{"col":0,"comment":"null","endLoc":66,"header":"def time_bfill(shape, chunks, limit)","id":2276,"name":"time_bfill","nodeType":"Function","startLoc":60,"text":"def time_bfill(shape, chunks, limit):\n\n da = make_bench_data(shape, 0.1, chunks=chunks)\n actual = da.bfill(dim=\"time\", limit=limit)\n\n if chunks is not None:\n actual = actual.compute()"},{"col":27,"endLoc":256,"id":2277,"nodeType":"Lambda","startLoc":256,"text":"lambda x: x.isalpha()"},{"col":0,"comment":"inhouse mean that can handle np.datetime64 or cftime.datetime\n dtypes","endLoc":393,"header":"def mean(array, axis=None, skipna=None, **kwargs)","id":2278,"name":"mean","nodeType":"Function","startLoc":365,"text":"def mean(array, axis=None, skipna=None, **kwargs):\n \"\"\"inhouse mean that can handle np.datetime64 or cftime.datetime\n dtypes\"\"\"\n from .common import _contains_cftime_datetimes\n\n array = asarray(array)\n if array.dtype.kind in \"Mm\":\n offset = min(array)\n # xarray always uses np.datetime64[ns] for np.datetime64 data\n dtype = \"timedelta64[ns]\"\n return (\n _mean(\n datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs\n ).astype(dtype)\n + offset\n )\n elif _contains_cftime_datetimes(array):\n if isinstance(array, dask_array_type):\n raise NotImplementedError(\n \"Computing the mean of an array containing \"\n \"cftime.datetime objects is not yet implemented on \"\n \"dask arrays.\"\n )\n offset = min(array)\n timedeltas = datetime_to_numeric(array, offset, datetime_unit=\"us\")\n mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs)\n return _to_pytimedelta(mean_timedeltas, unit=\"us\") + offset\n else:\n return _mean(array, axis=axis, skipna=skipna, **kwargs)"},{"col":4,"comment":" Unify chunk size along all chunked dimensions of this DataArray.\n\n Returns\n -------\n\n DataArray with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n ","endLoc":3060,"header":"def unify_chunks(self) -> \"DataArray\"","id":2279,"name":"unify_chunks","nodeType":"Function","startLoc":3046,"text":"def unify_chunks(self) -> \"DataArray\":\n \"\"\" Unify chunk size along all chunked dimensions of this DataArray.\n\n Returns\n -------\n\n DataArray with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n \"\"\"\n ds = self._to_temp_dataset().unify_chunks()\n return self._from_temp_dataset(ds)"},{"className":"Indexing","col":0,"comment":"null","endLoc":94,"id":2280,"nodeType":"Class","startLoc":77,"text":"class Indexing(Base):\n def time_indexing_basic(self, key):\n self.ds.isel(**basic_indexes[key]).load()\n\n time_indexing_basic.param_names = [\"key\"]\n time_indexing_basic.params = [list(basic_indexes.keys())]\n\n def time_indexing_outer(self, key):\n self.ds.isel(**outer_indexes[key]).load()\n\n time_indexing_outer.param_names = [\"key\"]\n time_indexing_outer.params = [list(outer_indexes.keys())]\n\n def time_indexing_vectorized(self, key):\n self.ds.isel(**vectorized_indexes[key]).load()\n\n time_indexing_vectorized.param_names = [\"key\"]\n time_indexing_vectorized.params = [list(vectorized_indexes.keys())]"},{"col":4,"comment":"\n Apply a function to each chunk of this DataArray. This method is experimental\n and its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a DataArray as its first parameter. The\n function will receive a subset of this DataArray, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this array\n but has sizes 0, to determine properties of the returned object such as\n dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this DataArray is backed by dask, calling this\n method is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.Dataset.map_blocks\n ","endLoc":3117,"header":"def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs","id":2281,"name":"map_blocks","nodeType":"Function","startLoc":3062,"text":"def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each chunk of this DataArray. This method is experimental\n and its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a DataArray as its first parameter. The\n function will receive a subset of this DataArray, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this array\n but has sizes 0, to determine properties of the returned object such as\n dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this DataArray is backed by dask, calling this\n method is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.Dataset.map_blocks\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs)"},{"attributeType":"null","col":4,"comment":"null","endLoc":252,"id":2282,"name":"_accessors","nodeType":"Attribute","startLoc":252,"text":"_accessors"},{"col":4,"comment":"null","endLoc":79,"header":"def time_indexing_basic(self, key)","id":2283,"name":"time_indexing_basic","nodeType":"Function","startLoc":78,"text":"def time_indexing_basic(self, key):\n self.ds.isel(**basic_indexes[key]).load()"},{"attributeType":"null","col":17,"comment":"null","endLoc":1,"id":2284,"name":"pd","nodeType":"Attribute","startLoc":1,"text":"pd"},{"attributeType":"null","col":17,"comment":"null","endLoc":3,"id":2285,"name":"xr","nodeType":"Attribute","startLoc":3,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":34,"id":2286,"name":"param_names","nodeType":"Attribute","startLoc":34,"text":"time_interpolate_na.param_names"},{"attributeType":"null","col":0,"comment":"null","endLoc":35,"id":2287,"name":"params","nodeType":"Attribute","startLoc":35,"text":"time_interpolate_na.params"},{"attributeType":"null","col":0,"comment":"null","endLoc":52,"id":2288,"name":"param_names","nodeType":"Attribute","startLoc":52,"text":"time_ffill.param_names"},{"attributeType":"null","col":0,"comment":"null","endLoc":53,"id":2289,"name":"params","nodeType":"Attribute","startLoc":53,"text":"time_ffill.params"},{"attributeType":"null","col":0,"comment":"null","endLoc":69,"id":2290,"name":"param_names","nodeType":"Attribute","startLoc":69,"text":"time_bfill.param_names"},{"attributeType":"null","col":0,"comment":"null","endLoc":70,"id":2291,"name":"params","nodeType":"Attribute","startLoc":70,"text":"time_bfill.params"},{"col":0,"comment":"","endLoc":1,"header":"dataarray_missing.py#","id":2292,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import dask # noqa: F401\nexcept ImportError:\n pass\n\ntime_interpolate_na.param_names = [\"shape\", \"chunks\", \"method\", \"limit\"]\n\ntime_interpolate_na.params = (\n [(3650, 200, 400), (100, 25, 25)],\n [None, {\"x\": 25, \"y\": 25}],\n [\"linear\", \"spline\", \"quadratic\", \"cubic\"],\n [None, 3],\n)\n\ntime_ffill.param_names = [\"shape\", \"chunks\", \"limit\"]\n\ntime_ffill.params = (\n [(3650, 200, 400), (100, 25, 25)],\n [None, {\"x\": 25, \"y\": 25}],\n [None, 3],\n)\n\ntime_bfill.param_names = [\"shape\", \"chunks\", \"limit\"]\n\ntime_bfill.params = (\n [(3650, 200, 400), (100, 25, 25)],\n [None, {\"x\": 25, \"y\": 25}],\n [None, 3],\n)"},{"col":4,"comment":"\n Check whether all characters in each string are decimal.\n\n Returns\n -------\n isdecimal : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":267,"header":"def isdecimal(self)","id":2293,"name":"isdecimal","nodeType":"Function","startLoc":258,"text":"def isdecimal(self):\n \"\"\"\n Check whether all characters in each string are decimal.\n\n Returns\n -------\n isdecimal : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isdecimal(), dtype=bool)"},{"id":2295,"name":"asv_bench","nodeType":"Package"},{"id":2296,"name":"asv.conf.json","nodeType":"TextFile","path":"asv_bench","text":"{\n // The version of the config file format. Do not change, unless\n // you know what you are doing.\n \"version\": 1,\n\n // The name of the project being benchmarked\n \"project\": \"xarray\",\n\n // The project's homepage\n \"project_url\": \"http://xarray.pydata.org/\",\n\n // The URL or local path of the source code repository for the\n // project being benchmarked\n \"repo\": \"..\",\n\n // List of branches to benchmark. If not provided, defaults to \"master\"\n // (for git) or \"default\" (for mercurial).\n \"branches\": [\"master\"], // for git\n // \"branches\": [\"default\"], // for mercurial\n\n // The DVCS being used. If not set, it will be automatically\n // determined from \"repo\" by looking at the protocol in the URL\n // (if remote), or by looking for special directories, such as\n // \".git\" (if local).\n \"dvcs\": \"git\",\n\n // The tool to use to create environments. May be \"conda\",\n // \"virtualenv\" or other value depending on the plugins in use.\n // If missing or the empty string, the tool will be automatically\n // determined by looking for tools on the PATH environment\n // variable.\n \"environment_type\": \"conda\",\n\n // timeout in seconds for installing any dependencies in environment\n // defaults to 10 min\n \"install_timeout\": 600,\n\n // the base URL to show a commit for the project.\n \"show_commit_url\": \"https://github.com/pydata/xarray/commit/\",\n\n // The Pythons you'd like to test against. If not provided, defaults\n // to the current version of Python used to run `asv`.\n \"pythons\": [\"3.6\"],\n\n // The matrix of dependencies to test. Each key is the name of a\n // package (in PyPI) and the values are version numbers. An empty\n // list or empty string indicates to just test against the default\n // (latest) version. null indicates that the package is to not be\n // installed. If the package to be tested is only available from\n // PyPi, and the 'environment_type' is conda, then you can preface\n // the package name by 'pip+', and the package will be installed via\n // pip (with all the conda available packages installed first,\n // followed by the pip installed packages).\n //\n // \"matrix\": {\n // \"numpy\": [\"1.6\", \"1.7\"],\n // \"six\": [\"\", null], // test with and without six installed\n // \"pip+emcee\": [\"\"], // emcee is only available for install with pip.\n // },\n \"matrix\": {\n \"numpy\": [\"\"],\n \"pandas\": [\"\"],\n \"netcdf4\": [\"\"],\n \"scipy\": [\"\"],\n \"bottleneck\": [\"\", null],\n \"dask\": [\"\"],\n \"distributed\": [\"\"],\n },\n\n\n // Combinations of libraries/python versions can be excluded/included\n // from the set to test. Each entry is a dictionary containing additional\n // key-value pairs to include/exclude.\n //\n // An exclude entry excludes entries where all values match. The\n // values are regexps that should match the whole string.\n //\n // An include entry adds an environment. Only the packages listed\n // are installed. The 'python' key is required. The exclude rules\n // do not apply to includes.\n //\n // In addition to package names, the following keys are available:\n //\n // - python\n // Python version, as in the *pythons* variable above.\n // - environment_type\n // Environment type, as above.\n // - sys_platform\n // Platform, as in sys.platform. Possible values for the common\n // cases: 'linux2', 'win32', 'cygwin', 'darwin'.\n //\n // \"exclude\": [\n // {\"python\": \"3.2\", \"sys_platform\": \"win32\"}, // skip py3.2 on windows\n // {\"environment_type\": \"conda\", \"six\": null}, // don't run without six on conda\n // ],\n //\n // \"include\": [\n // // additional env for python2.7\n // {\"python\": \"2.7\", \"numpy\": \"1.8\"},\n // // additional env if run on windows+conda\n // {\"platform\": \"win32\", \"environment_type\": \"conda\", \"python\": \"2.7\", \"libpython\": \"\"},\n // ],\n\n // The directory (relative to the current directory) that benchmarks are\n // stored in. If not provided, defaults to \"benchmarks\"\n \"benchmark_dir\": \"benchmarks\",\n\n // The directory (relative to the current directory) to cache the Python\n // environments in. If not provided, defaults to \"env\"\n \"env_dir\": \".asv/env\",\n\n // The directory (relative to the current directory) that raw benchmark\n // results are stored in. If not provided, defaults to \"results\".\n \"results_dir\": \".asv/results\",\n\n // The directory (relative to the current directory) that the html tree\n // should be written to. If not provided, defaults to \"html\".\n \"html_dir\": \".asv/html\",\n\n // The number of characters to retain in the commit hashes.\n // \"hash_length\": 8,\n\n // `asv` will cache wheels of the recent builds in each\n // environment, making them faster to install next time. This is\n // number of builds to keep, per environment.\n // \"wheel_cache_size\": 0\n\n // The commits after which the regression search in `asv publish`\n // should start looking for regressions. Dictionary whose keys are\n // regexps matching to benchmark names, and values corresponding to\n // the commit (exclusive) after which to start looking for\n // regressions. The default is to start from the first commit\n // with results. If the commit is `null`, regression detection is\n // skipped for the matching benchmark.\n //\n // \"regressions_first_commits\": {\n // \"some_benchmark\": \"352cdf\", // Consider regressions only after this commit\n // \"another_benchmark\": null, // Skip regression detection altogether\n // }\n\n // The thresholds for relative change in results, after which `asv\n // publish` starts reporting regressions. Dictionary of the same\n // form as in ``regressions_first_commits``, with values\n // indicating the thresholds. If multiple entries match, the\n // maximum is taken. If no entry matches, the default is 5%.\n //\n // \"regressions_thresholds\": {\n // \"some_benchmark\": 0.01, // Threshold of 1%\n // \"another_benchmark\": 0.5, // Threshold of 50%\n // }\n}\n"},{"col":27,"endLoc":267,"id":2297,"nodeType":"Lambda","startLoc":267,"text":"lambda x: x.isdecimal()"},{"id":2298,"name":"NUMPY_LICENSE","nodeType":"TextFile","path":"licenses","text":"Copyright (c) 2005-2011, NumPy Developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n\n * Neither the name of the NumPy Developers nor the names of any\n contributors may be used to endorse or promote products derived\n from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"},{"fileName":"rolling.py","filePath":"asv_bench/benchmarks","id":2299,"nodeType":"File","text":"import numpy as np\nimport pandas as pd\n\nimport xarray as xr\n\nfrom . import parameterized, randn, requires_dask\n\nnx = 3000\nlong_nx = 30000000\nny = 2000\nnt = 1000\nwindow = 20\n\nrandn_xy = randn((nx, ny), frac_nan=0.1)\nrandn_xt = randn((nx, nt))\nrandn_t = randn((nt,))\nrandn_long = randn((long_nx,), frac_nan=0.1)\n\n\nclass Rolling:\n def setup(self, *args, **kwargs):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn_xy),\n \"var2\": ((\"x\", \"t\"), randn_xt),\n \"var3\": ((\"t\",), randn_t),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )\n self.da_long = xr.DataArray(\n randn_long, dims=\"x\", coords={\"x\": np.arange(long_nx) * 0.1}\n )\n\n @parameterized([\"func\", \"center\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling(self, func, center):\n getattr(self.ds.rolling(x=window, center=center), func)().load()\n\n @parameterized([\"func\", \"pandas\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling_long(self, func, pandas):\n if pandas:\n se = self.da_long.to_series()\n getattr(se.rolling(window=window), func)()\n else:\n getattr(self.da_long.rolling(x=window), func)().load()\n\n @parameterized([\"window_\", \"min_periods\"], ([20, 40], [5, None]))\n def time_rolling_np(self, window_, min_periods):\n self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce(\n getattr(np, \"nanmean\")\n ).load()\n\n @parameterized([\"center\", \"stride\"], ([True, False], [1, 200]))\n def time_rolling_construct(self, center, stride):\n self.ds.rolling(x=window, center=center).construct(\n \"window_dim\", stride=stride\n ).mean(dim=\"window_dim\").load()\n\n\nclass RollingDask(Rolling):\n def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"x\": 100, \"y\": 50, \"t\": 50})\n self.da_long = self.da_long.chunk({\"x\": 10000})\n"},{"col":0,"comment":"null","endLoc":14,"header":"def parameterized(names, params)","id":2300,"name":"parameterized","nodeType":"Function","startLoc":8,"text":"def parameterized(names, params):\n def decorator(func):\n func.param_names = names\n func.params = params\n return func\n\n return decorator"},{"className":"Rolling","col":0,"comment":"null","endLoc":61,"id":2301,"nodeType":"Class","startLoc":20,"text":"class Rolling:\n def setup(self, *args, **kwargs):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn_xy),\n \"var2\": ((\"x\", \"t\"), randn_xt),\n \"var3\": ((\"t\",), randn_t),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )\n self.da_long = xr.DataArray(\n randn_long, dims=\"x\", coords={\"x\": np.arange(long_nx) * 0.1}\n )\n\n @parameterized([\"func\", \"center\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling(self, func, center):\n getattr(self.ds.rolling(x=window, center=center), func)().load()\n\n @parameterized([\"func\", \"pandas\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling_long(self, func, pandas):\n if pandas:\n se = self.da_long.to_series()\n getattr(se.rolling(window=window), func)()\n else:\n getattr(self.da_long.rolling(x=window), func)().load()\n\n @parameterized([\"window_\", \"min_periods\"], ([20, 40], [5, None]))\n def time_rolling_np(self, window_, min_periods):\n self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce(\n getattr(np, \"nanmean\")\n ).load()\n\n @parameterized([\"center\", \"stride\"], ([True, False], [1, 200]))\n def time_rolling_construct(self, center, stride):\n self.ds.rolling(x=window, center=center).construct(\n \"window_dim\", stride=stride\n ).mean(dim=\"window_dim\").load()"},{"col":4,"comment":"null","endLoc":37,"header":"def setup(self, *args, **kwargs)","id":2302,"name":"setup","nodeType":"Function","startLoc":21,"text":"def setup(self, *args, **kwargs):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn_xy),\n \"var2\": ((\"x\", \"t\"), randn_xt),\n \"var3\": ((\"t\",), randn_t),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )\n self.da_long = xr.DataArray(\n randn_long, dims=\"x\", coords={\"x\": np.arange(long_nx) * 0.1}\n )"},{"col":4,"comment":"\n Check whether all characters in each string are digits.\n\n Returns\n -------\n isdigit : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":278,"header":"def isdigit(self)","id":2303,"name":"isdigit","nodeType":"Function","startLoc":269,"text":"def isdigit(self):\n \"\"\"\n Check whether all characters in each string are digits.\n\n Returns\n -------\n isdigit : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isdigit(), dtype=bool)"},{"col":27,"endLoc":278,"id":2304,"nodeType":"Lambda","startLoc":278,"text":"lambda x: x.isdigit()"},{"col":0,"comment":"null","endLoc":409,"header":"def _nd_cum_func(cum_func, array, axis, **kwargs)","id":2305,"name":"_nd_cum_func","nodeType":"Function","startLoc":399,"text":"def _nd_cum_func(cum_func, array, axis, **kwargs):\n array = asarray(array)\n if axis is None:\n axis = tuple(range(array.ndim))\n if isinstance(axis, int):\n axis = (axis,)\n\n out = array\n for ax in axis:\n out = cum_func(out, axis=ax, **kwargs)\n return out"},{"col":4,"comment":"\n Check whether all characters in each string are lowercase.\n\n Returns\n -------\n islower : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":289,"header":"def islower(self)","id":2306,"name":"islower","nodeType":"Function","startLoc":280,"text":"def islower(self):\n \"\"\"\n Check whether all characters in each string are lowercase.\n\n Returns\n -------\n islower : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.islower(), dtype=bool)"},{"col":27,"endLoc":289,"id":2307,"nodeType":"Lambda","startLoc":289,"text":"lambda x: x.islower()"},{"col":4,"comment":"null","endLoc":85,"header":"def time_indexing_outer(self, key)","id":2308,"name":"time_indexing_outer","nodeType":"Function","startLoc":84,"text":"def time_indexing_outer(self, key):\n self.ds.isel(**outer_indexes[key]).load()"},{"attributeType":"null","col":4,"comment":"null","endLoc":253,"id":2309,"name":"_coords","nodeType":"Attribute","startLoc":253,"text":"_coords"},{"attributeType":"null","col":4,"comment":"null","endLoc":254,"id":2310,"name":"_indexes","nodeType":"Attribute","startLoc":254,"text":"_indexes"},{"attributeType":"null","col":4,"comment":"null","endLoc":255,"id":2311,"name":"_name","nodeType":"Attribute","startLoc":255,"text":"_name"},{"attributeType":"Variable","col":4,"comment":"null","endLoc":256,"id":2312,"name":"_variable","nodeType":"Attribute","startLoc":256,"text":"_variable"},{"attributeType":"null","col":4,"comment":"null","endLoc":258,"id":2313,"name":"__slots__","nodeType":"Attribute","startLoc":258,"text":"__slots__"},{"col":0,"comment":"N-dimensional version of cumprod.","endLoc":414,"header":"def cumprod(array, axis=None, **kwargs)","id":2314,"name":"cumprod","nodeType":"Function","startLoc":412,"text":"def cumprod(array, axis=None, **kwargs):\n \"\"\"N-dimensional version of cumprod.\"\"\"\n return _nd_cum_func(cumprod_1d, array, axis, **kwargs)"},{"attributeType":"null","col":4,"comment":"null","endLoc":268,"id":2315,"name":"_groupby_cls","nodeType":"Attribute","startLoc":268,"text":"_groupby_cls"},{"attributeType":"null","col":4,"comment":"null","endLoc":269,"id":2316,"name":"_rolling_cls","nodeType":"Attribute","startLoc":269,"text":"_rolling_cls"},{"col":4,"comment":"\n Check whether all characters in each string are numeric.\n\n Returns\n -------\n isnumeric : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":300,"header":"def isnumeric(self)","id":2317,"name":"isnumeric","nodeType":"Function","startLoc":291,"text":"def isnumeric(self):\n \"\"\"\n Check whether all characters in each string are numeric.\n\n Returns\n -------\n isnumeric : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isnumeric(), dtype=bool)"},{"col":27,"endLoc":300,"id":2318,"nodeType":"Lambda","startLoc":300,"text":"lambda x: x.isnumeric()"},{"col":0,"comment":"N-dimensional version of cumsum.","endLoc":419,"header":"def cumsum(array, axis=None, **kwargs)","id":2319,"name":"cumsum","nodeType":"Function","startLoc":417,"text":"def cumsum(array, axis=None, **kwargs):\n \"\"\"N-dimensional version of cumsum.\"\"\"\n return _nd_cum_func(cumsum_1d, array, axis, **kwargs)"},{"col":4,"comment":"\n Check whether all characters in each string are spaces.\n\n Returns\n -------\n isspace : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":311,"header":"def isspace(self)","id":2320,"name":"isspace","nodeType":"Function","startLoc":302,"text":"def isspace(self):\n \"\"\"\n Check whether all characters in each string are spaces.\n\n Returns\n -------\n isspace : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isspace(), dtype=bool)"},{"col":27,"endLoc":311,"id":2321,"nodeType":"Lambda","startLoc":311,"text":"lambda x: x.isspace()"},{"col":4,"comment":"\n Check whether all characters in each string are titlecase.\n\n Returns\n -------\n istitle : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":322,"header":"def istitle(self)","id":2322,"name":"istitle","nodeType":"Function","startLoc":313,"text":"def istitle(self):\n \"\"\"\n Check whether all characters in each string are titlecase.\n\n Returns\n -------\n istitle : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.istitle(), dtype=bool)"},{"col":27,"endLoc":322,"id":2323,"nodeType":"Lambda","startLoc":322,"text":"lambda x: x.istitle()"},{"attributeType":"null","col":4,"comment":"null","endLoc":270,"id":2324,"name":"_coarsen_cls","nodeType":"Attribute","startLoc":270,"text":"_coarsen_cls"},{"col":4,"comment":"null","endLoc":91,"header":"def time_indexing_vectorized(self, key)","id":2325,"name":"time_indexing_vectorized","nodeType":"Function","startLoc":90,"text":"def time_indexing_vectorized(self, key):\n self.ds.isel(**vectorized_indexes[key]).load()"},{"col":4,"comment":"\n Check whether all characters in each string are uppercase.\n\n Returns\n -------\n isupper : array of bool\n Array of boolean values with the same shape as the original array.\n ","endLoc":333,"header":"def isupper(self)","id":2326,"name":"isupper","nodeType":"Function","startLoc":324,"text":"def isupper(self):\n \"\"\"\n Check whether all characters in each string are uppercase.\n\n Returns\n -------\n isupper : array of bool\n Array of boolean values with the same shape as the original array.\n \"\"\"\n return self._apply(lambda x: x.isupper(), dtype=bool)"},{"col":0,"comment":"Return the first non-NA elements in this array along the given axis\n ","endLoc":435,"header":"def first(values, axis, skipna=None)","id":2327,"name":"first","nodeType":"Function","startLoc":428,"text":"def first(values, axis, skipna=None):\n \"\"\"Return the first non-NA elements in this array along the given axis\n \"\"\"\n if (skipna or skipna is None) and values.dtype.kind not in \"iSU\":\n # only bother for dtypes that can hold NaN\n _fail_on_dask_array_input_skipna(values)\n return nanfirst(values, axis)\n return take(values, 0, axis=axis)"},{"col":27,"endLoc":333,"id":2328,"nodeType":"Lambda","startLoc":333,"text":"lambda x: x.isupper()"},{"col":4,"comment":"\n Count occurrences of pattern in each string of the array.\n\n This function is used to count the number of times a particular regex\n pattern is repeated in each of the string elements of the\n :class:`~xarray.DatArray`.\n\n Parameters\n ----------\n pat : str\n Valid regular expression.\n flags : int, default 0, meaning no flags\n Flags for the `re` module. For a complete list, `see here\n `_.\n\n Returns\n -------\n counts : array of int\n ","endLoc":358,"header":"def count(self, pat, flags=0)","id":2329,"name":"count","nodeType":"Function","startLoc":335,"text":"def count(self, pat, flags=0):\n \"\"\"\n Count occurrences of pattern in each string of the array.\n\n This function is used to count the number of times a particular regex\n pattern is repeated in each of the string elements of the\n :class:`~xarray.DatArray`.\n\n Parameters\n ----------\n pat : str\n Valid regular expression.\n flags : int, default 0, meaning no flags\n Flags for the `re` module. For a complete list, `see here\n `_.\n\n Returns\n -------\n counts : array of int\n \"\"\"\n pat = self._obj.dtype.type(pat)\n regex = re.compile(pat, flags=flags)\n f = lambda x: len(regex.findall(x))\n return self._apply(f, dtype=int)"},{"attributeType":"null","col":4,"comment":"null","endLoc":81,"id":2330,"name":"param_names","nodeType":"Attribute","startLoc":81,"text":"time_indexing_basic.param_names"},{"attributeType":"null","col":4,"comment":"null","endLoc":82,"id":2331,"name":"params","nodeType":"Attribute","startLoc":82,"text":"time_indexing_basic.params"},{"attributeType":"null","col":4,"comment":"null","endLoc":87,"id":2332,"name":"param_names","nodeType":"Attribute","startLoc":87,"text":"time_indexing_outer.param_names"},{"attributeType":"null","col":4,"comment":"null","endLoc":88,"id":2333,"name":"params","nodeType":"Attribute","startLoc":88,"text":"time_indexing_outer.params"},{"attributeType":"null","col":4,"comment":"null","endLoc":93,"id":2334,"name":"param_names","nodeType":"Attribute","startLoc":93,"text":"time_indexing_vectorized.param_names"},{"col":12,"endLoc":357,"id":2335,"nodeType":"Lambda","startLoc":357,"text":"lambda x: len(regex.findall(x))"},{"attributeType":"null","col":4,"comment":"null","endLoc":94,"id":2336,"name":"params","nodeType":"Attribute","startLoc":94,"text":"time_indexing_vectorized.params"},{"className":"Assignment","col":0,"comment":"null","endLoc":120,"id":2337,"nodeType":"Class","startLoc":97,"text":"class Assignment(Base):\n def time_assignment_basic(self, key):\n ind = basic_indexes[key]\n val = basic_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val\n\n time_assignment_basic.param_names = [\"key\"]\n time_assignment_basic.params = [list(basic_indexes.keys())]\n\n def time_assignment_outer(self, key):\n ind = outer_indexes[key]\n val = outer_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val\n\n time_assignment_outer.param_names = [\"key\"]\n time_assignment_outer.params = [list(outer_indexes.keys())]\n\n def time_assignment_vectorized(self, key):\n ind = vectorized_indexes[key]\n val = vectorized_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val\n\n time_assignment_vectorized.param_names = [\"key\"]\n time_assignment_vectorized.params = [list(vectorized_indexes.keys())]"},{"col":4,"comment":"null","endLoc":101,"header":"def time_assignment_basic(self, key)","id":2338,"name":"time_assignment_basic","nodeType":"Function","startLoc":98,"text":"def time_assignment_basic(self, key):\n ind = basic_indexes[key]\n val = basic_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val"},{"attributeType":"null","col":4,"comment":"null","endLoc":271,"id":2339,"name":"_resample_cls","nodeType":"Attribute","startLoc":271,"text":"_resample_cls"},{"col":4,"comment":"\n Test if the start of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n startswith : array of bool\n An array of booleans indicating whether the given pattern matches\n the start of each string element.\n ","endLoc":377,"header":"def startswith(self, pat)","id":2340,"name":"startswith","nodeType":"Function","startLoc":360,"text":"def startswith(self, pat):\n \"\"\"\n Test if the start of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n startswith : array of bool\n An array of booleans indicating whether the given pattern matches\n the start of each string element.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n f = lambda x: x.startswith(pat)\n return self._apply(f, dtype=bool)"},{"col":4,"comment":"null","endLoc":109,"header":"def time_assignment_outer(self, key)","id":2341,"name":"time_assignment_outer","nodeType":"Function","startLoc":106,"text":"def time_assignment_outer(self, key):\n ind = outer_indexes[key]\n val = outer_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val"},{"col":12,"endLoc":376,"id":2342,"nodeType":"Lambda","startLoc":376,"text":"lambda x: x.startswith(pat)"},{"col":4,"comment":"\n Test if the end of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n endswith : array of bool\n A Series of booleans indicating whether the given pattern matches\n the end of each string element.\n ","endLoc":396,"header":"def endswith(self, pat)","id":2343,"name":"endswith","nodeType":"Function","startLoc":379,"text":"def endswith(self, pat):\n \"\"\"\n Test if the end of each string element matches a pattern.\n\n Parameters\n ----------\n pat : str\n Character sequence. Regular expressions are not accepted.\n\n Returns\n -------\n endswith : array of bool\n A Series of booleans indicating whether the given pattern matches\n the end of each string element.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n f = lambda x: x.endswith(pat)\n return self._apply(f, dtype=bool)"},{"col":4,"comment":"null","endLoc":117,"header":"def time_assignment_vectorized(self, key)","id":2344,"name":"time_assignment_vectorized","nodeType":"Function","startLoc":114,"text":"def time_assignment_vectorized(self, key):\n ind = vectorized_indexes[key]\n val = vectorized_assignment_values[key]\n self.ds[\"var1\"][ind.get(\"x\", slice(None)), ind.get(\"y\", slice(None))] = val"},{"col":0,"comment":"Return the last non-NA elements in this array along the given axis\n ","endLoc":445,"header":"def last(values, axis, skipna=None)","id":2345,"name":"last","nodeType":"Function","startLoc":438,"text":"def last(values, axis, skipna=None):\n \"\"\"Return the last non-NA elements in this array along the given axis\n \"\"\"\n if (skipna or skipna is None) and values.dtype.kind not in \"iSU\":\n # only bother for dtypes that can hold NaN\n _fail_on_dask_array_input_skipna(values)\n return nanlast(values, axis)\n return take(values, -1, axis=axis)"},{"attributeType":"null","col":4,"comment":"null","endLoc":273,"id":2346,"name":"__default","nodeType":"Attribute","startLoc":273,"text":"__default"},{"attributeType":"null","col":4,"comment":"null","endLoc":275,"id":2347,"name":"dt","nodeType":"Attribute","startLoc":275,"text":"dt"},{"col":0,"comment":"\n Make an ndarray with a rolling window of axis-th dimension.\n The rolling dimension will be placed at the last dimension.\n ","endLoc":456,"header":"def rolling_window(array, axis, window, center, fill_value)","id":2348,"name":"rolling_window","nodeType":"Function","startLoc":448,"text":"def rolling_window(array, axis, window, center, fill_value):\n \"\"\"\n Make an ndarray with a rolling window of axis-th dimension.\n The rolling dimension will be placed at the last dimension.\n \"\"\"\n if isinstance(array, dask_array_type):\n return dask_array_ops.rolling_window(array, axis, window, center, fill_value)\n else: # np.ndarray\n return nputils.rolling_window(array, axis, window, center, fill_value)"},{"attributeType":"null","col":4,"comment":"null","endLoc":938,"id":2349,"name":"__hash__","nodeType":"Attribute","startLoc":938,"text":"__hash__"},{"attributeType":"function","col":0,"comment":"null","endLoc":68,"id":2350,"name":"moveaxis","nodeType":"Attribute","startLoc":68,"text":"moveaxis"},{"attributeType":"null","col":4,"comment":"null","endLoc":2458,"id":2351,"name":"__default_name","nodeType":"Attribute","startLoc":2458,"text":"__default_name"},{"col":12,"endLoc":395,"id":2352,"nodeType":"Lambda","startLoc":395,"text":"lambda x: x.endswith(pat)"},{"attributeType":"null","col":4,"comment":"null","endLoc":3121,"id":2353,"name":"str","nodeType":"Attribute","startLoc":3121,"text":"str"},{"attributeType":"Variable","col":8,"comment":"null","endLoc":374,"id":2354,"name":"_variable","nodeType":"Attribute","startLoc":374,"text":"self._variable"},{"attributeType":"null","col":8,"comment":"null","endLoc":384,"id":2355,"name":"_file_obj","nodeType":"Attribute","startLoc":384,"text":"self._file_obj"},{"attributeType":"null","col":4,"comment":"null","endLoc":103,"id":2356,"name":"param_names","nodeType":"Attribute","startLoc":103,"text":"time_assignment_basic.param_names"},{"col":4,"comment":"null","endLoc":41,"header":"@parameterized([\"func\", \"center\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling(self, func, center)","id":2357,"name":"time_rolling","nodeType":"Function","startLoc":39,"text":"@parameterized([\"func\", \"center\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling(self, func, center):\n getattr(self.ds.rolling(x=window, center=center), func)().load()"},{"attributeType":"null","col":4,"comment":"null","endLoc":104,"id":2358,"name":"params","nodeType":"Attribute","startLoc":104,"text":"time_assignment_basic.params"},{"attributeType":"null","col":4,"comment":"null","endLoc":111,"id":2359,"name":"param_names","nodeType":"Attribute","startLoc":111,"text":"time_assignment_outer.param_names"},{"attributeType":"null","col":4,"comment":"null","endLoc":112,"id":2360,"name":"params","nodeType":"Attribute","startLoc":112,"text":"time_assignment_outer.params"},{"attributeType":"null","col":4,"comment":"null","endLoc":119,"id":2361,"name":"param_names","nodeType":"Attribute","startLoc":119,"text":"time_assignment_vectorized.param_names"},{"attributeType":"null","col":4,"comment":"null","endLoc":120,"id":2362,"name":"params","nodeType":"Attribute","startLoc":120,"text":"time_assignment_vectorized.params"},{"className":"IndexingDask","col":0,"comment":"null","endLoc":127,"id":2363,"nodeType":"Class","startLoc":123,"text":"class IndexingDask(Indexing):\n def setup(self, key):\n requires_dask()\n super().setup(key)\n self.ds = self.ds.chunk({\"x\": 100, \"y\": 50, \"t\": 50})"},{"col":4,"comment":"null","endLoc":127,"header":"def setup(self, key)","id":2364,"name":"setup","nodeType":"Function","startLoc":124,"text":"def setup(self, key):\n requires_dask()\n super().setup(key)\n self.ds = self.ds.chunk({\"x\": 100, \"y\": 50, \"t\": 50})"},{"attributeType":"None","col":8,"comment":"null","endLoc":382,"id":2365,"name":"_indexes","nodeType":"Attribute","startLoc":382,"text":"self._indexes"},{"col":4,"comment":"\n Pad strings in the array up to width.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with character defined in `fillchar`.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to fill resulting string.\n fillchar : str, default ' '\n Additional character for filling, default is whitespace.\n\n Returns\n -------\n filled : same type as values\n Array with a minimum number of char in each element.\n ","endLoc":431,"header":"def pad(self, width, side=\"left\", fillchar=\" \")","id":2366,"name":"pad","nodeType":"Function","startLoc":398,"text":"def pad(self, width, side=\"left\", fillchar=\" \"):\n \"\"\"\n Pad strings in the array up to width.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with character defined in `fillchar`.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to fill resulting string.\n fillchar : str, default ' '\n Additional character for filling, default is whitespace.\n\n Returns\n -------\n filled : same type as values\n Array with a minimum number of char in each element.\n \"\"\"\n width = int(width)\n fillchar = self._obj.dtype.type(fillchar)\n if len(fillchar) != 1:\n raise TypeError(\"fillchar must be a character, not str\")\n\n if side == \"left\":\n f = lambda s: s.rjust(width, fillchar)\n elif side == \"right\":\n f = lambda s: s.ljust(width, fillchar)\n elif side == \"both\":\n f = lambda s: s.center(width, fillchar)\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n return self._apply(f)"},{"attributeType":"None","col":8,"comment":"null","endLoc":377,"id":2367,"name":"_name","nodeType":"Attribute","startLoc":377,"text":"self._name"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":70,"id":2368,"name":"around","nodeType":"Attribute","startLoc":70,"text":"around"},{"attributeType":"None","col":8,"comment":"null","endLoc":376,"id":2369,"name":"_coords","nodeType":"Attribute","startLoc":376,"text":"self._coords"},{"col":16,"endLoc":423,"id":2370,"nodeType":"Lambda","startLoc":423,"text":"lambda s: s.rjust(width, fillchar)"},{"attributeType":"null","col":8,"comment":"null","endLoc":378,"id":2371,"name":"_accessors","nodeType":"Attribute","startLoc":378,"text":"self._accessors"},{"col":16,"endLoc":425,"id":2372,"nodeType":"Lambda","startLoc":425,"text":"lambda s: s.ljust(width, fillchar)"},{"col":16,"endLoc":427,"id":2373,"nodeType":"Lambda","startLoc":427,"text":"lambda s: s.center(width, fillchar)"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":71,"id":2374,"name":"isclose","nodeType":"Attribute","startLoc":71,"text":"isclose"},{"attributeType":"null","col":4,"comment":"null","endLoc":79,"id":2375,"name":"isnat","nodeType":"Attribute","startLoc":79,"text":"isnat"},{"attributeType":"function | function","col":4,"comment":"null","endLoc":81,"id":2376,"name":"isnat","nodeType":"Attribute","startLoc":81,"text":"isnat"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":82,"id":2377,"name":"isnan","nodeType":"Attribute","startLoc":82,"text":"isnan"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":83,"id":2378,"name":"zeros_like","nodeType":"Attribute","startLoc":83,"text":"zeros_like"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":86,"id":2379,"name":"pandas_isnull","nodeType":"Attribute","startLoc":86,"text":"pandas_isnull"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":119,"id":2380,"name":"transpose","nodeType":"Attribute","startLoc":119,"text":"transpose"},{"col":4,"comment":"\n Filling left and right side of strings in the array with an\n additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n ","endLoc":450,"header":"def center(self, width, fillchar=\" \")","id":2381,"name":"center","nodeType":"Function","startLoc":433,"text":"def center(self, width, fillchar=\" \"):\n \"\"\"\n Filling left and right side of strings in the array with an\n additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"both\", fillchar=fillchar)"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":120,"id":2382,"name":"_where","nodeType":"Attribute","startLoc":120,"text":"_where"},{"col":4,"comment":"\n Filling right side of strings in the array with an additional\n character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n ","endLoc":469,"header":"def ljust(self, width, fillchar=\" \")","id":2383,"name":"ljust","nodeType":"Function","startLoc":452,"text":"def ljust(self, width, fillchar=\" \"):\n \"\"\"\n Filling right side of strings in the array with an additional\n character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"right\", fillchar=fillchar)"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":127,"id":2384,"name":"ds","nodeType":"Attribute","startLoc":127,"text":"self.ds"},{"attributeType":"null","col":8,"comment":"null","endLoc":2541,"id":2385,"name":"attrs","nodeType":"Attribute","startLoc":2541,"text":"self.attrs"},{"className":"BooleanIndexing","col":0,"comment":"null","endLoc":140,"id":2386,"nodeType":"Class","startLoc":130,"text":"class BooleanIndexing:\n # https://github.com/pydata/xarray/issues/2227\n def setup(self):\n self.ds = xr.Dataset(\n {\"a\": (\"time\", np.arange(10_000_000))},\n coords={\"time\": np.arange(10_000_000)},\n )\n self.time_filter = self.ds.time > 50_000\n\n def time_indexing(self):\n self.ds.isel(time=self.time_filter)"},{"col":4,"comment":"null","endLoc":137,"header":"def setup(self)","id":2387,"name":"setup","nodeType":"Function","startLoc":132,"text":"def setup(self):\n self.ds = xr.Dataset(\n {\"a\": (\"time\", np.arange(10_000_000))},\n coords={\"time\": np.arange(10_000_000)},\n )\n self.time_filter = self.ds.time > 50_000"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":121,"id":2388,"name":"isin","nodeType":"Attribute","startLoc":121,"text":"isin"},{"className":"AccessorRegistrationWarning","col":0,"comment":"Warning for conflicts in accessor registration.","endLoc":8,"id":2389,"nodeType":"Class","startLoc":7,"text":"class AccessorRegistrationWarning(Warning):\n \"\"\"Warning for conflicts in accessor registration.\"\"\""},{"className":"_CachedAccessor","col":0,"comment":"Custom property-like object (descriptor) for caching accessors.","endLoc":39,"id":2390,"nodeType":"Class","startLoc":11,"text":"class _CachedAccessor:\n \"\"\"Custom property-like object (descriptor) for caching accessors.\"\"\"\n\n def __init__(self, name, accessor):\n self._name = name\n self._accessor = accessor\n\n def __get__(self, obj, cls):\n if obj is None:\n # we're accessing the attribute of the class, i.e., Dataset.geo\n return self._accessor\n\n try:\n return obj._accessors[self._name]\n except TypeError:\n obj._accessors = {}\n except KeyError:\n pass\n\n try:\n accessor_obj = self._accessor(obj)\n except AttributeError:\n # __getattr__ on data object will swallow any AttributeErrors\n # raised when initializing the accessor, so we need to raise as\n # something else (GH933):\n raise RuntimeError(\"error initializing %r accessor.\" % self._name)\n\n obj._accessors[self._name] = accessor_obj\n return accessor_obj"},{"col":4,"comment":"null","endLoc":39,"header":"def __get__(self, obj, cls)","id":2391,"name":"__get__","nodeType":"Function","startLoc":18,"text":"def __get__(self, obj, cls):\n if obj is None:\n # we're accessing the attribute of the class, i.e., Dataset.geo\n return self._accessor\n\n try:\n return obj._accessors[self._name]\n except TypeError:\n obj._accessors = {}\n except KeyError:\n pass\n\n try:\n accessor_obj = self._accessor(obj)\n except AttributeError:\n # __getattr__ on data object will swallow any AttributeErrors\n # raised when initializing the accessor, so we need to raise as\n # something else (GH933):\n raise RuntimeError(\"error initializing %r accessor.\" % self._name)\n\n obj._accessors[self._name] = accessor_obj\n return accessor_obj"},{"col":4,"comment":"null","endLoc":49,"header":"@parameterized([\"func\", \"pandas\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling_long(self, func, pandas)","id":2392,"name":"time_rolling_long","nodeType":"Function","startLoc":43,"text":"@parameterized([\"func\", \"pandas\"], ([\"mean\", \"count\"], [True, False]))\n def time_rolling_long(self, func, pandas):\n if pandas:\n se = self.da_long.to_series()\n getattr(se.rolling(window=window), func)()\n else:\n getattr(self.da_long.rolling(x=window), func)().load()"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":122,"id":2393,"name":"take","nodeType":"Attribute","startLoc":122,"text":"take"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":123,"id":2394,"name":"broadcast_to","nodeType":"Attribute","startLoc":123,"text":"broadcast_to"},{"col":4,"comment":"null","endLoc":140,"header":"def time_indexing(self)","id":2395,"name":"time_indexing","nodeType":"Function","startLoc":139,"text":"def time_indexing(self):\n self.ds.isel(time=self.time_filter)"},{"attributeType":"null","col":8,"comment":"null","endLoc":16,"id":2396,"name":"_accessor","nodeType":"Attribute","startLoc":16,"text":"self._accessor"},{"attributeType":"null","col":8,"comment":"null","endLoc":15,"id":2397,"name":"_name","nodeType":"Attribute","startLoc":15,"text":"self._name"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":125,"id":2398,"name":"_concatenate","nodeType":"Attribute","startLoc":125,"text":"_concatenate"},{"id":2399,"name":"SEABORN_LICENSE","nodeType":"TextFile","path":"licenses","text":"Copyright (c) 2012-2013, Michael L. Waskom\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of the {organization} nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":137,"id":2400,"name":"time_filter","nodeType":"Attribute","startLoc":137,"text":"self.time_filter"},{"fileName":"resample_cftime.py","filePath":"xarray/core","id":2401,"nodeType":"File","text":"\"\"\"Resampling for CFTimeIndex. Does not support non-integer freq.\"\"\"\n# The mechanisms for resampling CFTimeIndex was copied and adapted from\n# the source code defined in pandas.core.resample\n#\n# For reference, here is a copy of the pandas copyright notice:\n#\n# BSD 3-Clause License\n#\n# Copyright (c) 2008-2012, AQR Capital Management, LLC, Lambda Foundry, Inc.\n# and PyData Development Team\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport datetime\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..coding.cftime_offsets import (\n CFTIME_TICKS,\n Day,\n MonthEnd,\n QuarterEnd,\n YearEnd,\n cftime_range,\n normalize_date,\n to_offset,\n)\nfrom ..coding.cftimeindex import CFTimeIndex\n\n\nclass CFTimeGrouper:\n \"\"\"This is a simple container for the grouping parameters that implements a\n single method, the only one required for resampling in xarray. It cannot\n be used in a call to groupby like a pandas.Grouper object can.\"\"\"\n\n def __init__(self, freq, closed=None, label=None, base=0, loffset=None):\n self.freq = to_offset(freq)\n self.closed = closed\n self.label = label\n self.base = base\n self.loffset = loffset\n\n if isinstance(self.freq, (MonthEnd, QuarterEnd, YearEnd)):\n if self.closed is None:\n self.closed = \"right\"\n if self.label is None:\n self.label = \"right\"\n else:\n if self.closed is None:\n self.closed = \"left\"\n if self.label is None:\n self.label = \"left\"\n\n def first_items(self, index):\n \"\"\"Meant to reproduce the results of the following\n\n grouper = pandas.Grouper(...)\n first_items = pd.Series(np.arange(len(index)),\n index).groupby(grouper).first()\n\n with index being a CFTimeIndex instead of a DatetimeIndex.\n \"\"\"\n\n datetime_bins, labels = _get_time_bins(\n index, self.freq, self.closed, self.label, self.base\n )\n if self.loffset is not None:\n if isinstance(self.loffset, datetime.timedelta):\n labels = labels + self.loffset\n else:\n labels = labels + to_offset(self.loffset)\n\n # check binner fits data\n if index[0] < datetime_bins[0]:\n raise ValueError(\"Value falls before first bin\")\n if index[-1] > datetime_bins[-1]:\n raise ValueError(\"Value falls after last bin\")\n\n integer_bins = np.searchsorted(index, datetime_bins, side=self.closed)[:-1]\n first_items = pd.Series(integer_bins, labels)\n\n # Mask duplicate values with NaNs, preserving the last values\n non_duplicate = ~first_items.duplicated(\"last\")\n return first_items.where(non_duplicate)\n\n\ndef _get_time_bins(index, freq, closed, label, base):\n \"\"\"Obtain the bins and their respective labels for resampling operations.\n\n Parameters\n ----------\n index : CFTimeIndex\n Index object to be resampled (e.g., CFTimeIndex named 'time').\n freq : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency (e.g., 'MS', '2D', 'H', or '3T' with\n coding.cftime_offsets.to_offset() applied to it).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed.\n The default is 'left' for all frequency offsets except for 'M' and 'A',\n which have a default of 'right'.\n label : 'left' or 'right', optional\n Which bin edge label to label bucket with.\n The default is 'left' for all frequency offsets except for 'M' and 'A',\n which have a default of 'right'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n datetime_bins : CFTimeIndex\n Defines the edge of resampling bins by which original index values will\n be grouped into.\n labels : CFTimeIndex\n Define what the user actually sees the bins labeled as.\n \"\"\"\n\n if not isinstance(index, CFTimeIndex):\n raise TypeError(\n \"index must be a CFTimeIndex, but got \"\n \"an instance of %r\" % type(index).__name__\n )\n if len(index) == 0:\n datetime_bins = labels = CFTimeIndex(data=[], name=index.name)\n return datetime_bins, labels\n\n first, last = _get_range_edges(\n index.min(), index.max(), freq, closed=closed, base=base\n )\n datetime_bins = labels = cftime_range(\n freq=freq, start=first, end=last, name=index.name\n )\n\n datetime_bins, labels = _adjust_bin_edges(\n datetime_bins, freq, closed, index, labels\n )\n\n if label == \"right\":\n labels = labels[1:]\n else:\n labels = labels[:-1]\n\n # TODO: when CFTimeIndex supports missing values, if the reference index\n # contains missing values, insert the appropriate NaN value at the\n # beginning of the datetime_bins and labels indexes.\n\n return datetime_bins, labels\n\n\ndef _adjust_bin_edges(datetime_bins, offset, closed, index, labels):\n \"\"\"This is required for determining the bin edges resampling with\n daily frequencies greater than one day, month end, and year end\n frequencies.\n\n Consider the following example. Let's say you want to downsample the\n time series with the following coordinates to month end frequency:\n\n CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00,\n 2000-02-01 12:00:00], dtype='object')\n\n Without this adjustment, _get_time_bins with month-end frequency will\n return the following index for the bin edges (default closed='right' and\n label='right' in this case):\n\n CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00,\n 2000-02-29 00:00:00], dtype='object')\n\n If 2000-01-31 is used as a bound for a bin, the value on\n 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the\n month of January. To account for this, pandas adds a day minus one worth\n of microseconds to the bin edges generated by cftime range, so that we do\n bin the value at noon on January 31st in the January bin. This results in\n an index with bin edges like the following:\n\n CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59,\n 2000-02-29 23:59:59], dtype='object')\n\n The labels are still:\n\n CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object')\n\n This is also required for daily frequencies longer than one day and\n year-end frequencies.\n \"\"\"\n is_super_daily = isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)) or (\n isinstance(offset, Day) and offset.n > 1\n )\n if is_super_daily:\n if closed == \"right\":\n datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1)\n if datetime_bins[-2] > index.max():\n datetime_bins = datetime_bins[:-1]\n labels = labels[:-1]\n\n return datetime_bins, labels\n\n\ndef _get_range_edges(first, last, offset, closed=\"left\", base=0):\n \"\"\" Get the correct starting and ending datetimes for the resampled\n CFTimeIndex range.\n\n Parameters\n ----------\n first : cftime.datetime\n Uncorrected starting datetime object for resampled CFTimeIndex range.\n Usually the min of the original CFTimeIndex.\n last : cftime.datetime\n Uncorrected ending datetime object for resampled CFTimeIndex range.\n Usually the max of the original CFTimeIndex.\n offset : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency. Contains information on offset type (e.g. Day or 'D') and\n offset magnitude (e.g., n = 3).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed. Defaults to 'left'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n first : cftime.datetime\n Corrected starting datetime object for resampled CFTimeIndex range.\n last : cftime.datetime\n Corrected ending datetime object for resampled CFTimeIndex range.\n \"\"\"\n if isinstance(offset, CFTIME_TICKS):\n first, last = _adjust_dates_anchored(\n first, last, offset, closed=closed, base=base\n )\n return first, last\n else:\n first = normalize_date(first)\n last = normalize_date(last)\n\n if closed == \"left\":\n first = offset.rollback(first)\n else:\n first = first - offset\n\n last = last + offset\n return first, last\n\n\ndef _adjust_dates_anchored(first, last, offset, closed=\"right\", base=0):\n \"\"\" First and last offsets should be calculated from the start day to fix\n an error cause by resampling across multiple days when a one day period is\n not a multiple of the frequency.\n See https://github.com/pandas-dev/pandas/issues/8683\n\n Parameters\n ----------\n first : cftime.datetime\n A datetime object representing the start of a CFTimeIndex range.\n last : cftime.datetime\n A datetime object representing the end of a CFTimeIndex range.\n offset : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency. Contains information on offset type (e.g. Day or 'D') and\n offset magnitude (e.g., n = 3).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed. Defaults to 'right'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n fresult : cftime.datetime\n A datetime object representing the start of a date range that has been\n adjusted to fix resampling errors.\n lresult : cftime.datetime\n A datetime object representing the end of a date range that has been\n adjusted to fix resampling errors.\n \"\"\"\n\n base = base % offset.n\n start_day = normalize_date(first)\n base_td = type(offset)(n=base).as_timedelta()\n start_day += base_td\n foffset = exact_cftime_datetime_difference(start_day, first) % offset.as_timedelta()\n loffset = exact_cftime_datetime_difference(start_day, last) % offset.as_timedelta()\n if closed == \"right\":\n if foffset.total_seconds() > 0:\n fresult = first - foffset\n else:\n fresult = first - offset.as_timedelta()\n\n if loffset.total_seconds() > 0:\n lresult = last + (offset.as_timedelta() - loffset)\n else:\n lresult = last\n else:\n if foffset.total_seconds() > 0:\n fresult = first - foffset\n else:\n fresult = first\n\n if loffset.total_seconds() > 0:\n lresult = last + (offset.as_timedelta() - loffset)\n else:\n lresult = last + offset.as_timedelta()\n return fresult, lresult\n\n\ndef exact_cftime_datetime_difference(a, b):\n \"\"\"Exact computation of b - a\n\n Assumes:\n\n a = a_0 + a_m\n b = b_0 + b_m\n\n Here a_0, and b_0 represent the input dates rounded\n down to the nearest second, and a_m, and b_m represent\n the remaining microseconds associated with date a and\n date b.\n\n We can then express the value of b - a as:\n\n b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m\n\n By construction, we know that b_0 - a_0 must be a round number\n of seconds. Therefore we can take the result of b_0 - a_0 using\n ordinary cftime.datetime arithmetic and round to the nearest\n second. b_m - a_m is the remainder, in microseconds, and we\n can simply add this to the rounded timedelta.\n\n Parameters\n ----------\n a : cftime.datetime\n Input datetime\n b : cftime.datetime\n Input datetime\n\n Returns\n -------\n datetime.timedelta\n \"\"\"\n seconds = b.replace(microsecond=0) - a.replace(microsecond=0)\n seconds = int(round(seconds.total_seconds()))\n microseconds = b.microsecond - a.microsecond\n return datetime.timedelta(seconds=seconds, microseconds=microseconds)\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":646,"id":2402,"name":"CFTIME_TICKS","nodeType":"Attribute","startLoc":646,"text":"CFTIME_TICKS"},{"className":"Day","col":0,"comment":"null","endLoc":542,"id":2403,"nodeType":"Class","startLoc":535,"text":"class Day(BaseCFTimeOffset):\n _freq = \"D\"\n\n def as_timedelta(self):\n return timedelta(days=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":126,"id":2404,"name":"_stack","nodeType":"Attribute","startLoc":126,"text":"_stack"},{"col":4,"comment":"null","endLoc":32,"header":"def time_combine_manual(self)","id":2405,"name":"time_combine_manual","nodeType":"Function","startLoc":29,"text":"def time_combine_manual(self):\n datasets = [[self.dsA0, self.dsA1], [self.dsB0, self.dsB1]]\n\n xr.combine_manual(datasets, concat_dim=[None, \"t\"])"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":128,"id":2406,"name":"array_all","nodeType":"Attribute","startLoc":128,"text":"array_all"},{"className":"BaseCFTimeOffset","col":0,"comment":"null","endLoc":157,"id":2407,"nodeType":"Class","startLoc":76,"text":"class BaseCFTimeOffset:\n _freq: ClassVar[Optional[str]] = None\n _day_option: ClassVar[Optional[str]] = None\n\n def __init__(self, n=1):\n if not isinstance(n, int):\n raise TypeError(\n \"The provided multiple 'n' must be an integer. \"\n \"Instead a value of type {!r} was provided.\".format(type(n))\n )\n self.n = n\n\n def rule_code(self):\n return self._freq\n\n def __eq__(self, other):\n return self.n == other.n and self.rule_code() == other.rule_code()\n\n def __ne__(self, other):\n return not self == other\n\n def __add__(self, other):\n return self.__apply__(other)\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract a cftime.datetime \" \"from a time offset.\")\n elif type(other) == type(self):\n return type(self)(self.n - other.n)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n return type(self)(n=other * self.n)\n\n def __neg__(self):\n return self * -1\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __rsub__(self, other):\n if isinstance(other, BaseCFTimeOffset) and type(self) != type(other):\n raise TypeError(\"Cannot subtract cftime offsets of differing \" \"types\")\n return -self + other\n\n def __apply__(self):\n return NotImplemented\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n test_date = (self + date) - self\n return date == test_date\n\n def rollforward(self, date):\n if self.onOffset(date):\n return date\n else:\n return date + type(self)()\n\n def rollback(self, date):\n if self.onOffset(date):\n return date\n else:\n return date - type(self)()\n\n def __str__(self):\n return \"<{}: n={}>\".format(type(self).__name__, self.n)\n\n def __repr__(self):\n return str(self)\n\n def _get_offset_day(self, other):\n # subclass must implement `_day_option`; calling from the base class\n # will raise NotImplementedError.\n return _get_day_of_month(other, self._day_option)"},{"col":4,"comment":"null","endLoc":89,"header":"def rule_code(self)","id":2408,"name":"rule_code","nodeType":"Function","startLoc":88,"text":"def rule_code(self):\n return self._freq"},{"col":4,"comment":"null","endLoc":92,"header":"def __eq__(self, other)","id":2409,"name":"__eq__","nodeType":"Function","startLoc":91,"text":"def __eq__(self, other):\n return self.n == other.n and self.rule_code() == other.rule_code()"},{"col":4,"comment":"null","endLoc":95,"header":"def __ne__(self, other)","id":2410,"name":"__ne__","nodeType":"Function","startLoc":94,"text":"def __ne__(self, other):\n return not self == other"},{"col":4,"comment":"null","endLoc":98,"header":"def __add__(self, other)","id":2411,"name":"__add__","nodeType":"Function","startLoc":97,"text":"def __add__(self, other):\n return self.__apply__(other)"},{"col":4,"comment":"Also has to load and arrange t coordinate","endLoc":38,"header":"def time_auto_combine(self)","id":2412,"name":"time_auto_combine","nodeType":"Function","startLoc":34,"text":"def time_auto_combine(self):\n \"\"\"Also has to load and arrange t coordinate\"\"\"\n datasets = [self.dsA0, self.dsA1, self.dsB0, self.dsB1]\n\n xr.combine_auto(datasets)"},{"col":4,"comment":"null","endLoc":128,"header":"def __apply__(self)","id":2413,"name":"__apply__","nodeType":"Function","startLoc":127,"text":"def __apply__(self):\n return NotImplemented"},{"col":4,"comment":"null","endLoc":108,"header":"def __sub__(self, other)","id":2414,"name":"__sub__","nodeType":"Function","startLoc":100,"text":"def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract a cftime.datetime \" \"from a time offset.\")\n elif type(other) == type(self):\n return type(self)(self.n - other.n)\n else:\n return NotImplemented"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":19,"id":2415,"name":"dsA1","nodeType":"Attribute","startLoc":19,"text":"self.dsA1"},{"col":4,"comment":"null","endLoc":55,"header":"@parameterized([\"window_\", \"min_periods\"], ([20, 40], [5, None]))\n def time_rolling_np(self, window_, min_periods)","id":2416,"name":"time_rolling_np","nodeType":"Function","startLoc":51,"text":"@parameterized([\"window_\", \"min_periods\"], ([20, 40], [5, None]))\n def time_rolling_np(self, window_, min_periods):\n self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce(\n getattr(np, \"nanmean\")\n ).load()"},{"col":4,"comment":"null","endLoc":111,"header":"def __mul__(self, other)","id":2417,"name":"__mul__","nodeType":"Function","startLoc":110,"text":"def __mul__(self, other):\n return type(self)(n=other * self.n)"},{"col":4,"comment":"null","endLoc":114,"header":"def __neg__(self)","id":2418,"name":"__neg__","nodeType":"Function","startLoc":113,"text":"def __neg__(self):\n return self * -1"},{"col":4,"comment":"null","endLoc":117,"header":"def __rmul__(self, other)","id":2419,"name":"__rmul__","nodeType":"Function","startLoc":116,"text":"def __rmul__(self, other):\n return self.__mul__(other)"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":16,"id":2420,"name":"dsA0","nodeType":"Attribute","startLoc":16,"text":"self.dsA0"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":25,"id":2421,"name":"dsB1","nodeType":"Attribute","startLoc":25,"text":"self.dsB1"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":133,"id":2422,"name":"ds","nodeType":"Attribute","startLoc":133,"text":"self.ds"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":2423,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":2,"id":2424,"name":"pd","nodeType":"Attribute","startLoc":2,"text":"pd"},{"attributeType":"null","col":17,"comment":"null","endLoc":4,"id":2425,"name":"xr","nodeType":"Attribute","startLoc":4,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":8,"id":2426,"name":"nx","nodeType":"Attribute","startLoc":8,"text":"nx"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":22,"id":2427,"name":"dsB0","nodeType":"Attribute","startLoc":22,"text":"self.dsB0"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":129,"id":2428,"name":"array_any","nodeType":"Attribute","startLoc":129,"text":"array_any"},{"col":4,"comment":"\n Filling left side of strings in the array with an additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n ","endLoc":487,"header":"def rjust(self, width, fillchar=\" \")","id":2429,"name":"rjust","nodeType":"Function","startLoc":471,"text":"def rjust(self, width, fillchar=\" \"):\n \"\"\"\n Filling left side of strings in the array with an additional character.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be\n filled with ``fillchar``\n fillchar : str\n Additional character for filling, default is whitespace\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"left\", fillchar=fillchar)"},{"col":4,"comment":"null","endLoc":120,"header":"def __radd__(self, other)","id":2430,"name":"__radd__","nodeType":"Function","startLoc":119,"text":"def __radd__(self, other):\n return self.__add__(other)"},{"col":4,"comment":"null","endLoc":125,"header":"def __rsub__(self, other)","id":2431,"name":"__rsub__","nodeType":"Function","startLoc":122,"text":"def __rsub__(self, other):\n if isinstance(other, BaseCFTimeOffset) and type(self) != type(other):\n raise TypeError(\"Cannot subtract cftime offsets of differing \" \"types\")\n return -self + other"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":2432,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"col":4,"comment":"null","endLoc":149,"header":"def __str__(self)","id":2433,"name":"__str__","nodeType":"Function","startLoc":148,"text":"def __str__(self):\n return \"<{}: n={}>\".format(type(self).__name__, self.n)"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":131,"id":2434,"name":"tensordot","nodeType":"Attribute","startLoc":131,"text":"tensordot"},{"attributeType":"null","col":17,"comment":"null","endLoc":3,"id":2435,"name":"xr","nodeType":"Attribute","startLoc":3,"text":"xr"},{"col":4,"comment":"null","endLoc":152,"header":"def __repr__(self)","id":2436,"name":"__repr__","nodeType":"Function","startLoc":151,"text":"def __repr__(self):\n return str(self)"},{"col":4,"comment":"null","endLoc":157,"header":"def _get_offset_day(self, other)","id":2437,"name":"_get_offset_day","nodeType":"Function","startLoc":154,"text":"def _get_offset_day(self, other):\n # subclass must implement `_day_option`; calling from the base class\n # will raise NotImplementedError.\n return _get_day_of_month(other, self._day_option)"},{"col":0,"comment":"Find the day in `other`'s month that satisfies a BaseCFTimeOffset's\n onOffset policy, as described by the `day_option` argument.\n\n Parameters\n ----------\n other : cftime.datetime\n day_option : 'start', 'end'\n 'start': returns 1\n 'end': returns last day of the month\n\n Returns\n -------\n day_of_month : int\n\n ","endLoc":187,"header":"def _get_day_of_month(other, day_option)","id":2438,"name":"_get_day_of_month","nodeType":"Function","startLoc":160,"text":"def _get_day_of_month(other, day_option):\n \"\"\"Find the day in `other`'s month that satisfies a BaseCFTimeOffset's\n onOffset policy, as described by the `day_option` argument.\n\n Parameters\n ----------\n other : cftime.datetime\n day_option : 'start', 'end'\n 'start': returns 1\n 'end': returns last day of the month\n\n Returns\n -------\n day_of_month : int\n\n \"\"\"\n\n if day_option == \"start\":\n return 1\n elif day_option == \"end\":\n days_in_month = _days_in_month(other)\n return days_in_month\n elif day_option is None:\n # Note: unlike `_shift_month`, _get_day_of_month does not\n # allow day_option = None\n raise NotImplementedError()\n else:\n raise ValueError(day_option)"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":132,"id":2439,"name":"einsum","nodeType":"Attribute","startLoc":132,"text":"einsum"},{"col":0,"comment":"The number of days in the month of the given date","endLoc":196,"header":"def _days_in_month(date)","id":2440,"name":"_days_in_month","nodeType":"Function","startLoc":190,"text":"def _days_in_month(date):\n \"\"\"The number of days in the month of the given date\"\"\"\n if date.month == 12:\n reference = type(date)(date.year + 1, 1, 1)\n else:\n reference = type(date)(date.year, date.month + 1, 1)\n return (reference - timedelta(days=1)).day"},{"attributeType":"null","col":4,"comment":"null","endLoc":77,"id":2441,"name":"_freq","nodeType":"Attribute","startLoc":77,"text":"_freq"},{"attributeType":"function | function","col":0,"comment":"null","endLoc":153,"id":2442,"name":"masked_invalid","nodeType":"Attribute","startLoc":153,"text":"masked_invalid"},{"attributeType":"function","col":0,"comment":"null","endLoc":297,"id":2443,"name":"argmax","nodeType":"Attribute","startLoc":297,"text":"argmax"},{"fileName":"conventions.py","filePath":"xarray","id":2444,"nodeType":"File","text":"import warnings\nfrom collections import defaultdict\n\nimport numpy as np\nimport pandas as pd\n\nfrom .coding import strings, times, variables\nfrom .coding.variables import SerializationWarning\nfrom .core import duck_array_ops, indexing\nfrom .core.common import contains_cftime_datetimes\nfrom .core.pycompat import dask_array_type\nfrom .core.variable import IndexVariable, Variable, as_variable\n\n\nclass NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Decode arrays on the fly from non-native to native endianness\n\n This is useful for decoding arrays from netCDF3 files (which are all\n big endian) into native endianness, so they can be used with Cython\n functions, such as those found in bottleneck and pandas.\n\n >>> x = np.arange(5, dtype='>i2')\n\n >>> x.dtype\n dtype('>i2')\n\n >>> NativeEndianArray(x).dtype\n dtype('int16')\n\n >>> NativeEndianArray(x)[:].dtype\n dtype('int16')\n \"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = indexing.as_indexable(array)\n\n @property\n def dtype(self):\n return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))\n\n def __getitem__(self, key):\n return np.asarray(self.array[key], dtype=self.dtype)\n\n\nclass BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Decode arrays on the fly from integer to boolean datatype\n\n This is useful for decoding boolean arrays from integer typed netCDF\n variables.\n\n >>> x = np.array([1, 0, 1, 1, 0], dtype='i1')\n\n >>> x.dtype\n dtype('>i2')\n\n >>> BoolTypeArray(x).dtype\n dtype('bool')\n\n >>> BoolTypeArray(x)[:].dtype\n dtype('bool')\n \"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = indexing.as_indexable(array)\n\n @property\n def dtype(self):\n return np.dtype(\"bool\")\n\n def __getitem__(self, key):\n return np.asarray(self.array[key], dtype=self.dtype)\n\n\ndef _var_as_tuple(var):\n return var.dims, var.data, var.attrs.copy(), var.encoding.copy()\n\n\ndef maybe_encode_nonstring_dtype(var, name=None):\n if \"dtype\" in var.encoding and var.encoding[\"dtype\"] not in (\"S1\", str):\n dims, data, attrs, encoding = _var_as_tuple(var)\n dtype = np.dtype(encoding.pop(\"dtype\"))\n if dtype != var.dtype:\n if np.issubdtype(dtype, np.integer):\n if (\n np.issubdtype(var.dtype, np.floating)\n and \"_FillValue\" not in var.attrs\n and \"missing_value\" not in var.attrs\n ):\n warnings.warn(\n \"saving variable %s with floating \"\n \"point data as an integer dtype without \"\n \"any _FillValue to use for NaNs\" % name,\n SerializationWarning,\n stacklevel=10,\n )\n data = duck_array_ops.around(data)[...]\n data = data.astype(dtype=dtype)\n var = Variable(dims, data, attrs, encoding)\n return var\n\n\ndef maybe_default_fill_value(var):\n # make NaN the fill value for float types:\n if (\n \"_FillValue\" not in var.attrs\n and \"_FillValue\" not in var.encoding\n and np.issubdtype(var.dtype, np.floating)\n ):\n var.attrs[\"_FillValue\"] = var.dtype.type(np.nan)\n return var\n\n\ndef maybe_encode_bools(var):\n if (\n (var.dtype == np.bool)\n and (\"dtype\" not in var.encoding)\n and (\"dtype\" not in var.attrs)\n ):\n dims, data, attrs, encoding = _var_as_tuple(var)\n attrs[\"dtype\"] = \"bool\"\n data = data.astype(dtype=\"i1\", copy=True)\n var = Variable(dims, data, attrs, encoding)\n return var\n\n\ndef _infer_dtype(array, name=None):\n \"\"\"Given an object array with no missing values, infer its dtype from its\n first element\n \"\"\"\n if array.dtype.kind != \"O\":\n raise TypeError(\"infer_type must be called on a dtype=object array\")\n\n if array.size == 0:\n return np.dtype(float)\n\n element = array[(0,) * array.ndim]\n if isinstance(element, (bytes, str)):\n return strings.create_vlen_dtype(type(element))\n\n dtype = np.array(element).dtype\n if dtype.kind != \"O\":\n return dtype\n\n raise ValueError(\n \"unable to infer dtype on variable {!r}; xarray \"\n \"cannot serialize arbitrary Python objects\".format(name)\n )\n\n\ndef ensure_not_multiindex(var, name=None):\n if isinstance(var, IndexVariable) and isinstance(var.to_index(), pd.MultiIndex):\n raise NotImplementedError(\n \"variable {!r} is a MultiIndex, which cannot yet be \"\n \"serialized to netCDF files \"\n \"(https://github.com/pydata/xarray/issues/1077). Use \"\n \"reset_index() to convert MultiIndex levels into coordinate \"\n \"variables instead.\".format(name)\n )\n\n\ndef _copy_with_dtype(data, dtype):\n \"\"\"Create a copy of an array with the given dtype.\n\n We use this instead of np.array() to ensure that custom object dtypes end\n up on the resulting array.\n \"\"\"\n result = np.empty(data.shape, dtype)\n result[...] = data\n return result\n\n\ndef ensure_dtype_not_object(var, name=None):\n # TODO: move this from conventions to backends? (it's not CF related)\n if var.dtype.kind == \"O\":\n dims, data, attrs, encoding = _var_as_tuple(var)\n\n if isinstance(data, dask_array_type):\n warnings.warn(\n \"variable {} has data in the form of a dask array with \"\n \"dtype=object, which means it is being loaded into memory \"\n \"to determine a data type that can be safely stored on disk. \"\n \"To avoid this, coerce this variable to a fixed-size dtype \"\n \"with astype() before saving it.\".format(name),\n SerializationWarning,\n )\n data = data.compute()\n\n missing = pd.isnull(data)\n if missing.any():\n # nb. this will fail for dask.array data\n non_missing_values = data[~missing]\n inferred_dtype = _infer_dtype(non_missing_values, name)\n\n # There is no safe bit-pattern for NA in typical binary string\n # formats, we so can't set a fill_value. Unfortunately, this means\n # we can't distinguish between missing values and empty strings.\n if strings.is_bytes_dtype(inferred_dtype):\n fill_value = b\"\"\n elif strings.is_unicode_dtype(inferred_dtype):\n fill_value = \"\"\n else:\n # insist on using float for numeric values\n if not np.issubdtype(inferred_dtype, np.floating):\n inferred_dtype = np.dtype(float)\n fill_value = inferred_dtype.type(np.nan)\n\n data = _copy_with_dtype(data, dtype=inferred_dtype)\n data[missing] = fill_value\n else:\n data = _copy_with_dtype(data, dtype=_infer_dtype(data, name))\n\n assert data.dtype.kind != \"O\" or data.dtype.metadata\n var = Variable(dims, data, attrs, encoding)\n return var\n\n\ndef encode_cf_variable(var, needs_copy=True, name=None):\n \"\"\"\n Converts an Variable into an Variable which follows some\n of the CF conventions:\n\n - Nans are masked using _FillValue (or the deprecated missing_value)\n - Rescaling via: scale_factor and add_offset\n - datetimes are converted to the CF 'units since time' format\n - dtype encodings are enforced.\n\n Parameters\n ----------\n var : xarray.Variable\n A variable holding un-encoded data.\n\n Returns\n -------\n out : xarray.Variable\n A variable which has been encoded as described above.\n \"\"\"\n ensure_not_multiindex(var, name=name)\n\n for coder in [\n times.CFDatetimeCoder(),\n times.CFTimedeltaCoder(),\n variables.CFScaleOffsetCoder(),\n variables.CFMaskCoder(),\n variables.UnsignedIntegerCoder(),\n ]:\n var = coder.encode(var, name=name)\n\n # TODO(shoyer): convert all of these to use coders, too:\n var = maybe_encode_nonstring_dtype(var, name=name)\n var = maybe_default_fill_value(var)\n var = maybe_encode_bools(var)\n var = ensure_dtype_not_object(var, name=name)\n return var\n\n\ndef decode_cf_variable(\n name,\n var,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_endianness=True,\n stack_char_dim=True,\n use_cftime=None,\n):\n \"\"\"\n Decodes a variable which may hold CF encoded information.\n\n This includes variables that have been masked and scaled, which\n hold CF style time variables (this is almost always the case if\n the dataset has been serialized) and which have strings encoded\n as character arrays.\n\n Parameters\n ----------\n name: str\n Name of the variable. Used for better error messages.\n var : Variable\n A variable holding potentially CF encoded information.\n concat_characters : bool\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue). If the _Unsigned attribute is present\n treat integer arrays as unsigned.\n decode_times : bool\n Decode cf times ('hours since 2000-01-01') to np.datetime64.\n decode_endianness : bool\n Decode arrays from non-native to native endianness.\n stack_char_dim : bool\n Whether to stack characters into bytes along the last dimension of this\n array. Passed as an argument because we need to look at the full\n dataset to figure out if this is appropriate.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n out : Variable\n A variable holding the decoded equivalent of var.\n \"\"\"\n var = as_variable(var)\n original_dtype = var.dtype\n\n if concat_characters:\n if stack_char_dim:\n var = strings.CharacterArrayCoder().decode(var, name=name)\n var = strings.EncodedStringCoder().decode(var)\n\n if mask_and_scale:\n for coder in [\n variables.UnsignedIntegerCoder(),\n variables.CFMaskCoder(),\n variables.CFScaleOffsetCoder(),\n ]:\n var = coder.decode(var, name=name)\n\n if decode_times:\n for coder in [\n times.CFTimedeltaCoder(),\n times.CFDatetimeCoder(use_cftime=use_cftime),\n ]:\n var = coder.decode(var, name=name)\n\n dimensions, data, attributes, encoding = variables.unpack_for_decoding(var)\n # TODO(shoyer): convert everything below to use coders\n\n if decode_endianness and not data.dtype.isnative:\n # do this last, so it's only done if we didn't already unmask/scale\n data = NativeEndiannessArray(data)\n original_dtype = data.dtype\n\n encoding.setdefault(\"dtype\", original_dtype)\n\n if \"dtype\" in attributes and attributes[\"dtype\"] == \"bool\":\n del attributes[\"dtype\"]\n data = BoolTypeArray(data)\n\n if not isinstance(data, dask_array_type):\n data = indexing.LazilyOuterIndexedArray(data)\n\n return Variable(dimensions, data, attributes, encoding=encoding)\n\n\ndef _update_bounds_attributes(variables):\n \"\"\"Adds time attributes to time bounds variables.\n\n Variables handling time bounds (\"Cell boundaries\" in the CF\n conventions) do not necessarily carry the necessary attributes to be\n decoded. This copies the attributes from the time variable to the\n associated boundaries.\n\n See Also:\n\n http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/\n cf-conventions.html#cell-boundaries\n\n https://github.com/pydata/xarray/issues/2565\n \"\"\"\n\n # For all time variables with bounds\n for v in variables.values():\n attrs = v.attrs\n has_date_units = \"units\" in attrs and \"since\" in attrs[\"units\"]\n if has_date_units and \"bounds\" in attrs:\n if attrs[\"bounds\"] in variables:\n bounds_attrs = variables[attrs[\"bounds\"]].attrs\n bounds_attrs.setdefault(\"units\", attrs[\"units\"])\n if \"calendar\" in attrs:\n bounds_attrs.setdefault(\"calendar\", attrs[\"calendar\"])\n\n\ndef _update_bounds_encoding(variables):\n \"\"\"Adds time encoding to time bounds variables.\n\n Variables handling time bounds (\"Cell boundaries\" in the CF\n conventions) do not necessarily carry the necessary attributes to be\n decoded. This copies the encoding from the time variable to the\n associated bounds variable so that we write CF-compliant files.\n\n See Also:\n\n http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/\n cf-conventions.html#cell-boundaries\n\n https://github.com/pydata/xarray/issues/2565\n \"\"\"\n\n # For all time variables with bounds\n for v in variables.values():\n attrs = v.attrs\n encoding = v.encoding\n has_date_units = \"units\" in encoding and \"since\" in encoding[\"units\"]\n is_datetime_type = np.issubdtype(\n v.dtype, np.datetime64\n ) or contains_cftime_datetimes(v)\n\n if (\n is_datetime_type\n and not has_date_units\n and \"bounds\" in attrs\n and attrs[\"bounds\"] in variables\n ):\n warnings.warn(\n \"Variable '{0}' has datetime type and a \"\n \"bounds variable but {0}.encoding does not have \"\n \"units specified. The units encodings for '{0}' \"\n \"and '{1}' will be determined independently \"\n \"and may not be equal, counter to CF-conventions. \"\n \"If this is a concern, specify a units encoding for \"\n \"'{0}' before writing to a file.\".format(v.name, attrs[\"bounds\"]),\n UserWarning,\n )\n\n if has_date_units and \"bounds\" in attrs:\n if attrs[\"bounds\"] in variables:\n bounds_encoding = variables[attrs[\"bounds\"]].encoding\n bounds_encoding.setdefault(\"units\", encoding[\"units\"])\n if \"calendar\" in encoding:\n bounds_encoding.setdefault(\"calendar\", encoding[\"calendar\"])\n\n\ndef decode_cf_variables(\n variables,\n attributes,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_coords=True,\n drop_variables=None,\n use_cftime=None,\n):\n \"\"\"\n Decode several CF encoded variables.\n\n See: decode_cf_variable\n \"\"\"\n dimensions_used_by = defaultdict(list)\n for v in variables.values():\n for d in v.dims:\n dimensions_used_by[d].append(v)\n\n def stackable(dim):\n # figure out if a dimension can be concatenated over\n if dim in variables:\n return False\n for v in dimensions_used_by[dim]:\n if v.dtype.kind != \"S\" or dim != v.dims[-1]:\n return False\n return True\n\n coord_names = set()\n\n if isinstance(drop_variables, str):\n drop_variables = [drop_variables]\n elif drop_variables is None:\n drop_variables = []\n drop_variables = set(drop_variables)\n\n # Time bounds coordinates might miss the decoding attributes\n if decode_times:\n _update_bounds_attributes(variables)\n\n new_vars = {}\n for k, v in variables.items():\n if k in drop_variables:\n continue\n stack_char_dim = (\n concat_characters\n and v.dtype == \"S1\"\n and v.ndim > 0\n and stackable(v.dims[-1])\n )\n new_vars[k] = decode_cf_variable(\n k,\n v,\n concat_characters=concat_characters,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n stack_char_dim=stack_char_dim,\n use_cftime=use_cftime,\n )\n if decode_coords:\n var_attrs = new_vars[k].attrs\n if \"coordinates\" in var_attrs:\n coord_str = var_attrs[\"coordinates\"]\n var_coord_names = coord_str.split()\n if all(k in variables for k in var_coord_names):\n new_vars[k].encoding[\"coordinates\"] = coord_str\n del var_attrs[\"coordinates\"]\n coord_names.update(var_coord_names)\n\n if decode_coords and \"coordinates\" in attributes:\n attributes = dict(attributes)\n coord_names.update(attributes.pop(\"coordinates\").split())\n\n return new_vars, attributes, coord_names\n\n\ndef decode_cf(\n obj,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n decode_coords=True,\n drop_variables=None,\n use_cftime=None,\n):\n \"\"\"Decode the given Dataset or Datastore according to CF conventions into\n a new Dataset.\n\n Parameters\n ----------\n obj : Dataset or DataStore\n Object to decode.\n concat_characters : bool, optional\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool, optional\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue).\n decode_times : bool, optional\n Decode cf times (e.g., integers since 'hours since 2000-01-01') to\n np.datetime64.\n decode_coords : bool, optional\n Use the 'coordinates' attribute on variable (or the dataset itself) to\n identify coordinates.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n decoded : Dataset\n \"\"\"\n from .core.dataset import Dataset\n from .backends.common import AbstractDataStore\n\n if isinstance(obj, Dataset):\n vars = obj._variables\n attrs = obj.attrs\n extra_coords = set(obj.coords)\n file_obj = obj._file_obj\n encoding = obj.encoding\n elif isinstance(obj, AbstractDataStore):\n vars, attrs = obj.load()\n extra_coords = set()\n file_obj = obj\n encoding = obj.get_encoding()\n else:\n raise TypeError(\"can only decode Dataset or DataStore objects\")\n\n vars, attrs, coord_names = decode_cf_variables(\n vars,\n attrs,\n concat_characters,\n mask_and_scale,\n decode_times,\n decode_coords,\n drop_variables=drop_variables,\n use_cftime=use_cftime,\n )\n ds = Dataset(vars, attrs=attrs)\n ds = ds.set_coords(coord_names.union(extra_coords).intersection(vars))\n ds._file_obj = file_obj\n ds.encoding = encoding\n\n return ds\n\n\ndef cf_decoder(\n variables,\n attributes,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n):\n \"\"\"\n Decode a set of CF encoded variables and attributes.\n\n Parameters\n ----------\n variables : dict\n A dictionary mapping from variable name to xarray.Variable\n attributes : dict\n A dictionary mapping from attribute name to value\n concat_characters : bool\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue).\n decode_times : bool\n Decode cf times ('hours since 2000-01-01') to np.datetime64.\n\n Returns\n -------\n decoded_variables : dict\n A dictionary mapping from variable name to xarray.Variable objects.\n decoded_attributes : dict\n A dictionary mapping from attribute name to values.\n\n See also\n --------\n decode_cf_variable\n \"\"\"\n variables, attributes, _ = decode_cf_variables(\n variables, attributes, concat_characters, mask_and_scale, decode_times\n )\n return variables, attributes\n\n\ndef _encode_coordinates(variables, attributes, non_dim_coord_names):\n # calculate global and variable specific coordinates\n non_dim_coord_names = set(non_dim_coord_names)\n\n for name in list(non_dim_coord_names):\n if isinstance(name, str) and \" \" in name:\n warnings.warn(\n \"coordinate {!r} has a space in its name, which means it \"\n \"cannot be marked as a coordinate on disk and will be \"\n \"saved as a data variable instead\".format(name),\n SerializationWarning,\n stacklevel=6,\n )\n non_dim_coord_names.discard(name)\n\n global_coordinates = non_dim_coord_names.copy()\n variable_coordinates = defaultdict(set)\n for coord_name in non_dim_coord_names:\n target_dims = variables[coord_name].dims\n for k, v in variables.items():\n if (\n k not in non_dim_coord_names\n and k not in v.dims\n and set(target_dims) <= set(v.dims)\n ):\n variable_coordinates[k].add(coord_name)\n global_coordinates.discard(coord_name)\n\n variables = {k: v.copy(deep=False) for k, v in variables.items()}\n\n # These coordinates are saved according to CF conventions\n for var_name, coord_names in variable_coordinates.items():\n attrs = variables[var_name].attrs\n if \"coordinates\" in attrs:\n raise ValueError(\n \"cannot serialize coordinates because variable \"\n \"%s already has an attribute 'coordinates'\" % var_name\n )\n attrs[\"coordinates\"] = \" \".join(map(str, coord_names))\n\n # These coordinates are not associated with any particular variables, so we\n # save them under a global 'coordinates' attribute so xarray can roundtrip\n # the dataset faithfully. Because this serialization goes beyond CF\n # conventions, only do it if necessary.\n # Reference discussion:\n # http://mailman.cgd.ucar.edu/pipermail/cf-metadata/2014/057771.html\n if global_coordinates:\n attributes = dict(attributes)\n if \"coordinates\" in attributes:\n raise ValueError(\n \"cannot serialize coordinates because the global \"\n \"attribute 'coordinates' already exists\"\n )\n attributes[\"coordinates\"] = \" \".join(map(str, global_coordinates))\n\n return variables, attributes\n\n\ndef encode_dataset_coordinates(dataset):\n \"\"\"Encode coordinates on the given dataset object into variable specific\n and global attributes.\n\n When possible, this is done according to CF conventions.\n\n Parameters\n ----------\n dataset : Dataset\n Object to encode.\n\n Returns\n -------\n variables : dict\n attrs : dict\n \"\"\"\n non_dim_coord_names = set(dataset.coords) - set(dataset.dims)\n return _encode_coordinates(\n dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names\n )\n\n\ndef cf_encoder(variables, attributes):\n \"\"\"\n Encode a set of CF encoded variables and attributes.\n Takes a dicts of variables and attributes and encodes them\n to conform to CF conventions as much as possible.\n This includes masking, scaling, character array handling,\n and CF-time encoding.\n\n\n Parameters\n ----------\n variables : dict\n A dictionary mapping from variable name to xarray.Variable\n attributes : dict\n A dictionary mapping from attribute name to value\n\n Returns\n -------\n encoded_variables : dict\n A dictionary mapping from variable name to xarray.Variable,\n encoded_attributes : dict\n A dictionary mapping from attribute name to value\n\n See also\n --------\n decode_cf_variable, encode_cf_variable\n \"\"\"\n\n # add encoding for time bounds variables if present.\n _update_bounds_encoding(variables)\n\n new_vars = {k: encode_cf_variable(v, name=k) for k, v in variables.items()}\n\n # Remove attrs from bounds variables (issue #2921)\n for var in new_vars.values():\n bounds = var.attrs[\"bounds\"] if \"bounds\" in var.attrs else None\n if bounds and bounds in new_vars:\n # see http://cfconventions.org/cf-conventions/cf-conventions.html#cell-boundaries\n for attr in [\n \"units\",\n \"standard_name\",\n \"axis\",\n \"positive\",\n \"calendar\",\n \"long_name\",\n \"leap_month\",\n \"leap_year\",\n \"month_lengths\",\n ]:\n if attr in new_vars[bounds].attrs and attr in var.attrs:\n if new_vars[bounds].attrs[attr] == var.attrs[attr]:\n new_vars[bounds].attrs.pop(attr)\n\n return new_vars, attributes\n"},{"className":"NativeEndiannessArray","col":0,"comment":"Decode arrays on the fly from non-native to native endianness\n\n This is useful for decoding arrays from netCDF3 files (which are all\n big endian) into native endianness, so they can be used with Cython\n functions, such as those found in bottleneck and pandas.\n\n >>> x = np.arange(5, dtype='>i2')\n\n >>> x.dtype\n dtype('>i2')\n\n >>> NativeEndianArray(x).dtype\n dtype('int16')\n\n >>> NativeEndianArray(x)[:].dtype\n dtype('int16')\n ","endLoc":44,"id":2445,"nodeType":"Class","startLoc":15,"text":"class NativeEndiannessArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Decode arrays on the fly from non-native to native endianness\n\n This is useful for decoding arrays from netCDF3 files (which are all\n big endian) into native endianness, so they can be used with Cython\n functions, such as those found in bottleneck and pandas.\n\n >>> x = np.arange(5, dtype='>i2')\n\n >>> x.dtype\n dtype('>i2')\n\n >>> NativeEndianArray(x).dtype\n dtype('int16')\n\n >>> NativeEndianArray(x)[:].dtype\n dtype('int16')\n \"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = indexing.as_indexable(array)\n\n @property\n def dtype(self):\n return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))\n\n def __getitem__(self, key):\n return np.asarray(self.array[key], dtype=self.dtype)"},{"col":4,"comment":"null","endLoc":41,"header":"@property\n def dtype(self)","id":2446,"name":"dtype","nodeType":"Function","startLoc":39,"text":"@property\n def dtype(self):\n return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))"},{"col":4,"comment":"null","endLoc":44,"header":"def __getitem__(self, key)","id":2447,"name":"__getitem__","nodeType":"Function","startLoc":43,"text":"def __getitem__(self, key):\n return np.asarray(self.array[key], dtype=self.dtype)"},{"attributeType":"null","col":4,"comment":"null","endLoc":34,"id":2448,"name":"__slots__","nodeType":"Attribute","startLoc":34,"text":"__slots__"},{"attributeType":"function","col":0,"comment":"null","endLoc":298,"id":2449,"name":"argmin","nodeType":"Attribute","startLoc":298,"text":"argmin"},{"col":4,"comment":"\n Pad strings in the array by prepending '0' characters.\n\n Strings in the array are padded with '0' characters on the\n left of the string to reach a total string length `width`. Strings\n in the array with length greater or equal to `width` are unchanged.\n\n Parameters\n ----------\n width : int\n Minimum length of resulting string; strings with length less\n than `width` be prepended with '0' characters.\n\n Returns\n -------\n filled : same type as values\n ","endLoc":507,"header":"def zfill(self, width)","id":2450,"name":"zfill","nodeType":"Function","startLoc":489,"text":"def zfill(self, width):\n \"\"\"\n Pad strings in the array by prepending '0' characters.\n\n Strings in the array are padded with '0' characters on the\n left of the string to reach a total string length `width`. Strings\n in the array with length greater or equal to `width` are unchanged.\n\n Parameters\n ----------\n width : int\n Minimum length of resulting string; strings with length less\n than `width` be prepended with '0' characters.\n\n Returns\n -------\n filled : same type as values\n \"\"\"\n return self.pad(width, side=\"left\", fillchar=\"0\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":37,"id":2451,"name":"array","nodeType":"Attribute","startLoc":37,"text":"self.array"},{"attributeType":"null","col":4,"comment":"null","endLoc":78,"id":2452,"name":"_day_option","nodeType":"Attribute","startLoc":78,"text":"_day_option"},{"className":"BoolTypeArray","col":0,"comment":"Decode arrays on the fly from integer to boolean datatype\n\n This is useful for decoding boolean arrays from integer typed netCDF\n variables.\n\n >>> x = np.array([1, 0, 1, 1, 0], dtype='i1')\n\n >>> x.dtype\n dtype('>i2')\n\n >>> BoolTypeArray(x).dtype\n dtype('bool')\n\n >>> BoolTypeArray(x)[:].dtype\n dtype('bool')\n ","endLoc":75,"id":2453,"nodeType":"Class","startLoc":47,"text":"class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Decode arrays on the fly from integer to boolean datatype\n\n This is useful for decoding boolean arrays from integer typed netCDF\n variables.\n\n >>> x = np.array([1, 0, 1, 1, 0], dtype='i1')\n\n >>> x.dtype\n dtype('>i2')\n\n >>> BoolTypeArray(x).dtype\n dtype('bool')\n\n >>> BoolTypeArray(x)[:].dtype\n dtype('bool')\n \"\"\"\n\n __slots__ = (\"array\",)\n\n def __init__(self, array):\n self.array = indexing.as_indexable(array)\n\n @property\n def dtype(self):\n return np.dtype(\"bool\")\n\n def __getitem__(self, key):\n return np.asarray(self.array[key], dtype=self.dtype)"},{"col":4,"comment":"null","endLoc":72,"header":"@property\n def dtype(self)","id":2454,"name":"dtype","nodeType":"Function","startLoc":70,"text":"@property\n def dtype(self):\n return np.dtype(\"bool\")"},{"col":4,"comment":"null","endLoc":75,"header":"def __getitem__(self, key)","id":2455,"name":"__getitem__","nodeType":"Function","startLoc":74,"text":"def __getitem__(self, key):\n return np.asarray(self.array[key], dtype=self.dtype)"},{"col":4,"comment":"null","endLoc":61,"header":"@parameterized([\"center\", \"stride\"], ([True, False], [1, 200]))\n def time_rolling_construct(self, center, stride)","id":2456,"name":"time_rolling_construct","nodeType":"Function","startLoc":57,"text":"@parameterized([\"center\", \"stride\"], ([True, False], [1, 200]))\n def time_rolling_construct(self, center, stride):\n self.ds.rolling(x=window, center=center).construct(\n \"window_dim\", stride=stride\n ).mean(dim=\"window_dim\").load()"},{"attributeType":"null","col":4,"comment":"null","endLoc":65,"id":2457,"name":"__slots__","nodeType":"Attribute","startLoc":65,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":86,"id":2458,"name":"n","nodeType":"Attribute","startLoc":86,"text":"self.n"},{"attributeType":"null","col":8,"comment":"null","endLoc":68,"id":2459,"name":"array","nodeType":"Attribute","startLoc":68,"text":"self.array"},{"col":4,"comment":"null","endLoc":539,"header":"def as_timedelta(self)","id":2460,"name":"as_timedelta","nodeType":"Function","startLoc":538,"text":"def as_timedelta(self):\n return timedelta(days=self.n)"},{"attributeType":"function","col":0,"comment":"null","endLoc":299,"id":2461,"name":"max","nodeType":"Attribute","startLoc":299,"text":"max"},{"col":4,"comment":"null","endLoc":542,"header":"def __apply__(self, other)","id":2462,"name":"__apply__","nodeType":"Function","startLoc":541,"text":"def __apply__(self, other):\n return other + self.as_timedelta()"},{"attributeType":"null","col":4,"comment":"null","endLoc":536,"id":2463,"name":"_freq","nodeType":"Attribute","startLoc":536,"text":"_freq"},{"className":"MonthEnd","col":0,"comment":"null","endLoc":331,"id":2464,"nodeType":"Class","startLoc":321,"text":"class MonthEnd(BaseCFTimeOffset):\n _freq = \"M\"\n\n def __apply__(self, other):\n n = _adjust_n_months(other.day, self.n, _days_in_month(other))\n return _shift_month(other, n, \"end\")\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == _days_in_month(date)"},{"col":0,"comment":"\n Decode a set of CF encoded variables and attributes.\n\n Parameters\n ----------\n variables : dict\n A dictionary mapping from variable name to xarray.Variable\n attributes : dict\n A dictionary mapping from attribute name to value\n concat_characters : bool\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue).\n decode_times : bool\n Decode cf times ('hours since 2000-01-01') to np.datetime64.\n\n Returns\n -------\n decoded_variables : dict\n A dictionary mapping from variable name to xarray.Variable objects.\n decoded_attributes : dict\n A dictionary mapping from attribute name to values.\n\n See also\n --------\n decode_cf_variable\n ","endLoc":634,"header":"def cf_decoder(\n variables,\n attributes,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n)","id":2465,"name":"cf_decoder","nodeType":"Function","startLoc":595,"text":"def cf_decoder(\n variables,\n attributes,\n concat_characters=True,\n mask_and_scale=True,\n decode_times=True,\n):\n \"\"\"\n Decode a set of CF encoded variables and attributes.\n\n Parameters\n ----------\n variables : dict\n A dictionary mapping from variable name to xarray.Variable\n attributes : dict\n A dictionary mapping from attribute name to value\n concat_characters : bool\n Should character arrays be concatenated to strings, for\n example: ['h', 'e', 'l', 'l', 'o'] -> 'hello'\n mask_and_scale: bool\n Lazily scale (using scale_factor and add_offset) and mask\n (using _FillValue).\n decode_times : bool\n Decode cf times ('hours since 2000-01-01') to np.datetime64.\n\n Returns\n -------\n decoded_variables : dict\n A dictionary mapping from variable name to xarray.Variable objects.\n decoded_attributes : dict\n A dictionary mapping from attribute name to values.\n\n See also\n --------\n decode_cf_variable\n \"\"\"\n variables, attributes, _ = decode_cf_variables(\n variables, attributes, concat_characters, mask_and_scale, decode_times\n )\n return variables, attributes"},{"col":4,"comment":"\n Test if pattern or regex is contained within a string of the array.\n\n Return boolean array based on whether a given pattern or regex is\n contained within a string of the array.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n Flags to pass through to the re module, e.g. re.IGNORECASE.\n regex : bool, default True\n If True, assumes the pat is a regular expression.\n If False, treats the pat as a literal string.\n\n Returns\n -------\n contains : array of bool\n An array of boolean values indicating whether the\n given pattern is contained within the string of each element\n of the array.\n ","endLoc":553,"header":"def contains(self, pat, case=True, flags=0, regex=True)","id":2466,"name":"contains","nodeType":"Function","startLoc":509,"text":"def contains(self, pat, case=True, flags=0, regex=True):\n \"\"\"\n Test if pattern or regex is contained within a string of the array.\n\n Return boolean array based on whether a given pattern or regex is\n contained within a string of the array.\n\n Parameters\n ----------\n pat : str\n Character sequence or regular expression.\n case : bool, default True\n If True, case sensitive.\n flags : int, default 0 (no flags)\n Flags to pass through to the re module, e.g. re.IGNORECASE.\n regex : bool, default True\n If True, assumes the pat is a regular expression.\n If False, treats the pat as a literal string.\n\n Returns\n -------\n contains : array of bool\n An array of boolean values indicating whether the\n given pattern is contained within the string of each element\n of the array.\n \"\"\"\n pat = self._obj.dtype.type(pat)\n if regex:\n if not case:\n flags |= re.IGNORECASE\n\n regex = re.compile(pat, flags=flags)\n\n if regex.groups > 0: # pragma: no cover\n raise ValueError(\"This pattern has match groups.\")\n\n f = lambda x: bool(regex.search(x))\n else:\n if case:\n f = lambda x: pat in x\n else:\n uppered = self._obj.str.upper()\n return uppered.str.contains(pat.upper(), regex=False)\n\n return self._apply(f, dtype=bool)"},{"col":4,"comment":"null","endLoc":326,"header":"def __apply__(self, other)","id":2467,"name":"__apply__","nodeType":"Function","startLoc":324,"text":"def __apply__(self, other):\n n = _adjust_n_months(other.day, self.n, _days_in_month(other))\n return _shift_month(other, n, \"end\")"},{"fileName":"file_manager.py","filePath":"xarray/backends","id":2468,"nodeType":"File","text":"import contextlib\nimport io\nimport threading\nimport warnings\nfrom typing import Any, Dict, cast\n\nfrom ..core import utils\nfrom ..core.options import OPTIONS\nfrom .locks import acquire\nfrom .lru_cache import LRUCache\n\n# Global cache for storing open files.\nFILE_CACHE: LRUCache[str, io.IOBase] = LRUCache(\n maxsize=cast(int, OPTIONS[\"file_cache_maxsize\"]), on_evict=lambda k, v: v.close()\n)\nassert FILE_CACHE.maxsize, \"file cache must be at least size one\"\n\n\nREF_COUNTS: Dict[Any, int] = {}\n\n_DEFAULT_MODE = utils.ReprObject(\"\")\n\n\nclass FileManager:\n \"\"\"Manager for acquiring and closing a file object.\n\n Use FileManager subclasses (CachingFileManager in particular) on backend\n storage classes to automatically handle issues related to keeping track of\n many open files and transferring them between multiple processes.\n \"\"\"\n\n def acquire(self, needs_lock=True):\n \"\"\"Acquire the file object from this manager.\"\"\"\n raise NotImplementedError()\n\n def acquire_context(self, needs_lock=True):\n \"\"\"Context manager for acquiring a file. Yields a file object.\n\n The context manager unwinds any actions taken as part of acquisition\n (i.e., removes it from any cache) if an exception is raised from the\n context. It *does not* automatically close the file.\n \"\"\"\n raise NotImplementedError()\n\n def close(self, needs_lock=True):\n \"\"\"Close the file object associated with this manager, if needed.\"\"\"\n raise NotImplementedError()\n\n\nclass CachingFileManager(FileManager):\n \"\"\"Wrapper for automatically opening and closing file objects.\n\n Unlike files, CachingFileManager objects can be safely pickled and passed\n between processes. They should be explicitly closed to release resources,\n but a per-process least-recently-used cache for open files ensures that you\n can safely create arbitrarily large numbers of FileManager objects.\n\n Don't directly close files acquired from a FileManager. Instead, call\n FileManager.close(), which ensures that closed files are removed from the\n cache as well.\n\n Example usage:\n\n manager = FileManager(open, 'example.txt', mode='w')\n f = manager.acquire()\n f.write(...)\n manager.close() # ensures file is closed\n\n Note that as long as previous files are still cached, acquiring a file\n multiple times from the same FileManager is essentially free:\n\n f1 = manager.acquire()\n f2 = manager.acquire()\n assert f1 is f2\n\n \"\"\"\n\n def __init__(\n self,\n opener,\n *args,\n mode=_DEFAULT_MODE,\n kwargs=None,\n lock=None,\n cache=None,\n ref_counts=None\n ):\n \"\"\"Initialize a FileManager.\n\n The cache and ref_counts arguments exist solely to facilitate\n dependency injection, and should only be set for tests.\n\n Parameters\n ----------\n opener : callable\n Function that when called like ``opener(*args, **kwargs)`` returns\n an open file object. The file object must implement a ``close()``\n method.\n *args\n Positional arguments for opener. A ``mode`` argument should be\n provided as a keyword argument (see below). All arguments must be\n hashable.\n mode : optional\n If provided, passed as a keyword argument to ``opener`` along with\n ``**kwargs``. ``mode='w' `` has special treatment: after the first\n call it is replaced by ``mode='a'`` in all subsequent function to\n avoid overriding the newly created file.\n kwargs : dict, optional\n Keyword arguments for opener, excluding ``mode``. All values must\n be hashable.\n lock : duck-compatible threading.Lock, optional\n Lock to use when modifying the cache inside acquire() and close().\n By default, uses a new threading.Lock() object. If set, this object\n should be pickleable.\n cache : MutableMapping, optional\n Mapping to use as a cache for open files. By default, uses xarray's\n global LRU file cache. Because ``cache`` typically points to a\n global variable and contains non-picklable file objects, an\n unpickled FileManager objects will be restored with the default\n cache.\n ref_counts : dict, optional\n Optional dict to use for keeping track the number of references to\n the same file.\n \"\"\"\n self._opener = opener\n self._args = args\n self._mode = mode\n self._kwargs = {} if kwargs is None else dict(kwargs)\n\n self._default_lock = lock is None or lock is False\n self._lock = threading.Lock() if self._default_lock else lock\n\n # cache[self._key] stores the file associated with this object.\n if cache is None:\n cache = FILE_CACHE\n self._cache = cache\n self._key = self._make_key()\n\n # ref_counts[self._key] stores the number of CachingFileManager objects\n # in memory referencing this same file. We use this to know if we can\n # close a file when the manager is deallocated.\n if ref_counts is None:\n ref_counts = REF_COUNTS\n self._ref_counter = _RefCounter(ref_counts)\n self._ref_counter.increment(self._key)\n\n def _make_key(self):\n \"\"\"Make a key for caching files in the LRU cache.\"\"\"\n value = (\n self._opener,\n self._args,\n \"a\" if self._mode == \"w\" else self._mode,\n tuple(sorted(self._kwargs.items())),\n )\n return _HashedSequence(value)\n\n @contextlib.contextmanager\n def _optional_lock(self, needs_lock):\n \"\"\"Context manager for optionally acquiring a lock.\"\"\"\n if needs_lock:\n with self._lock:\n yield\n else:\n yield\n\n def acquire(self, needs_lock=True):\n \"\"\"Acquire a file object from the manager.\n\n A new file is only opened if it has expired from the\n least-recently-used cache.\n\n This method uses a lock, which ensures that it is thread-safe. You can\n safely acquire a file in multiple threads at the same time, as long as\n the underlying file object is thread-safe.\n\n Returns\n -------\n An open file object, as returned by ``opener(*args, **kwargs)``.\n \"\"\"\n file, _ = self._acquire_with_cache_info(needs_lock)\n return file\n\n @contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n \"\"\"Context manager for acquiring a file.\"\"\"\n file, cached = self._acquire_with_cache_info(needs_lock)\n try:\n yield file\n except Exception:\n if not cached:\n self.close(needs_lock)\n raise\n\n def _acquire_with_cache_info(self, needs_lock=True):\n \"\"\"Acquire a file, returning the file and whether it was cached.\"\"\"\n with self._optional_lock(needs_lock):\n try:\n file = self._cache[self._key]\n except KeyError:\n kwargs = self._kwargs\n if self._mode is not _DEFAULT_MODE:\n kwargs = kwargs.copy()\n kwargs[\"mode\"] = self._mode\n file = self._opener(*self._args, **kwargs)\n if self._mode == \"w\":\n # ensure file doesn't get overriden when opened again\n self._mode = \"a\"\n self._cache[self._key] = file\n return file, False\n else:\n return file, True\n\n def close(self, needs_lock=True):\n \"\"\"Explicitly close any associated file object (if necessary).\"\"\"\n # TODO: remove needs_lock if/when we have a reentrant lock in\n # dask.distributed: https://github.com/dask/dask/issues/3832\n with self._optional_lock(needs_lock):\n default = None\n file = self._cache.pop(self._key, default)\n if file is not None:\n file.close()\n\n def __del__(self):\n # If we're the only CachingFileManger referencing a unclosed file, we\n # should remove it from the cache upon garbage collection.\n #\n # Keeping our own count of file references might seem like overkill,\n # but it's actually pretty common to reopen files with the same\n # variable name in a notebook or command line environment, e.g., to\n # fix the parameters used when opening a file:\n # >>> ds = xarray.open_dataset('myfile.nc')\n # >>> ds = xarray.open_dataset('myfile.nc', decode_times=False)\n # This second assignment to \"ds\" drops CPython's ref-count on the first\n # \"ds\" argument to zero, which can trigger garbage collections. So if\n # we didn't check whether another object is referencing 'myfile.nc',\n # the newly opened file would actually be immediately closed!\n ref_count = self._ref_counter.decrement(self._key)\n\n if not ref_count and self._key in self._cache:\n if acquire(self._lock, blocking=False):\n # Only close files if we can do so immediately.\n try:\n self.close(needs_lock=False)\n finally:\n self._lock.release()\n\n if OPTIONS[\"warn_for_unclosed_files\"]:\n warnings.warn(\n \"deallocating {}, but file is not already closed. \"\n \"This may indicate a bug.\".format(self),\n RuntimeWarning,\n stacklevel=2,\n )\n\n def __getstate__(self):\n \"\"\"State for pickling.\"\"\"\n # cache and ref_counts are intentionally omitted: we don't want to try\n # to serialize these global objects.\n lock = None if self._default_lock else self._lock\n return (self._opener, self._args, self._mode, self._kwargs, lock)\n\n def __setstate__(self, state):\n \"\"\"Restore from a pickle.\"\"\"\n opener, args, mode, kwargs, lock = state\n self.__init__(opener, *args, mode=mode, kwargs=kwargs, lock=lock)\n\n def __repr__(self):\n args_string = \", \".join(map(repr, self._args))\n if self._mode is not _DEFAULT_MODE:\n args_string += \", mode={!r}\".format(self._mode)\n return \"{}({!r}, {}, kwargs={})\".format(\n type(self).__name__, self._opener, args_string, self._kwargs\n )\n\n\nclass _RefCounter:\n \"\"\"Class for keeping track of reference counts.\"\"\"\n\n def __init__(self, counts):\n self._counts = counts\n self._lock = threading.Lock()\n\n def increment(self, name):\n with self._lock:\n count = self._counts[name] = self._counts.get(name, 0) + 1\n return count\n\n def decrement(self, name):\n with self._lock:\n count = self._counts[name] - 1\n if count:\n self._counts[name] = count\n else:\n del self._counts[name]\n return count\n\n\nclass _HashedSequence(list):\n \"\"\"Speedup repeated look-ups by caching hash values.\n\n Based on what Python uses internally in functools.lru_cache.\n\n Python doesn't perform this optimization automatically:\n https://bugs.python.org/issue1462796\n \"\"\"\n\n def __init__(self, tuple_value):\n self[:] = tuple_value\n self.hashvalue = hash(tuple_value)\n\n def __hash__(self):\n return self.hashvalue\n\n\nclass DummyFileManager(FileManager):\n \"\"\"FileManager that simply wraps an open file in the FileManager interface.\n \"\"\"\n\n def __init__(self, value):\n self._value = value\n\n def acquire(self, needs_lock=True):\n del needs_lock # ignored\n return self._value\n\n @contextlib.contextmanager\n def acquire_context(self, needs_lock=True):\n del needs_lock\n yield self._value\n\n def close(self, needs_lock=True):\n del needs_lock # ignored\n self._value.close()\n"},{"className":"_RefCounter","col":0,"comment":"Class for keeping track of reference counts.","endLoc":295,"id":2469,"nodeType":"Class","startLoc":276,"text":"class _RefCounter:\n \"\"\"Class for keeping track of reference counts.\"\"\"\n\n def __init__(self, counts):\n self._counts = counts\n self._lock = threading.Lock()\n\n def increment(self, name):\n with self._lock:\n count = self._counts[name] = self._counts.get(name, 0) + 1\n return count\n\n def decrement(self, name):\n with self._lock:\n count = self._counts[name] - 1\n if count:\n self._counts[name] = count\n else:\n del self._counts[name]\n return count"},{"col":4,"comment":"null","endLoc":286,"header":"def increment(self, name)","id":2470,"name":"increment","nodeType":"Function","startLoc":283,"text":"def increment(self, name):\n with self._lock:\n count = self._counts[name] = self._counts.get(name, 0) + 1\n return count"},{"attributeType":"null","col":0,"comment":"null","endLoc":9,"id":2471,"name":"ny","nodeType":"Attribute","startLoc":9,"text":"ny"},{"attributeType":"null","col":0,"comment":"null","endLoc":10,"id":2472,"name":"nt","nodeType":"Attribute","startLoc":10,"text":"nt"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":2473,"name":"basic_indexes","nodeType":"Attribute","startLoc":12,"text":"basic_indexes"},{"attributeType":"null","col":0,"comment":"null","endLoc":18,"id":2474,"name":"basic_assignment_values","nodeType":"Attribute","startLoc":18,"text":"basic_assignment_values"},{"attributeType":"function","col":0,"comment":"null","endLoc":300,"id":2475,"name":"min","nodeType":"Attribute","startLoc":300,"text":"min"},{"attributeType":"function","col":0,"comment":"null","endLoc":301,"id":2476,"name":"sum","nodeType":"Attribute","startLoc":301,"text":"sum"},{"col":16,"endLoc":545,"id":2477,"nodeType":"Lambda","startLoc":545,"text":"lambda x: bool(regex.search(x))"},{"col":0,"comment":"Adjust the number of times a monthly offset is applied based\n on the day of a given date, and the reference day provided.\n ","endLoc":207,"header":"def _adjust_n_months(other_day, n, reference_day)","id":2478,"name":"_adjust_n_months","nodeType":"Function","startLoc":199,"text":"def _adjust_n_months(other_day, n, reference_day):\n \"\"\"Adjust the number of times a monthly offset is applied based\n on the day of a given date, and the reference day provided.\n \"\"\"\n if n > 0 and other_day < reference_day:\n n = n - 1\n elif n <= 0 and other_day > reference_day:\n n = n + 1\n return n"},{"col":20,"endLoc":548,"id":2479,"nodeType":"Lambda","startLoc":548,"text":"lambda x: pat in x"},{"attributeType":"function","col":0,"comment":"null","endLoc":304,"id":2480,"name":"std","nodeType":"Attribute","startLoc":304,"text":"std"},{"col":0,"comment":"Shift the date to a month start or end a given number of months away.\n ","endLoc":244,"header":"def _shift_month(date, months, day_option=\"start\")","id":2481,"name":"_shift_month","nodeType":"Function","startLoc":222,"text":"def _shift_month(date, months, day_option=\"start\"):\n \"\"\"Shift the date to a month start or end a given number of months away.\n \"\"\"\n delta_year = (date.month + months) // 12\n month = (date.month + months) % 12\n\n if month == 0:\n month = 12\n delta_year = delta_year - 1\n year = date.year + delta_year\n\n if day_option == \"start\":\n day = 1\n elif day_option == \"end\":\n reference = type(date)(year, month, 1)\n day = _days_in_month(reference)\n else:\n raise ValueError(day_option)\n # dayofwk=-1 is required to update the dayofwk and dayofyr attributes of\n # the returned date object in versions of cftime between 1.0.2 and\n # 1.0.3.4. It can be removed for versions of cftime greater than\n # 1.0.3.4.\n return date.replace(year=year, month=month, day=day, dayofwk=-1)"},{"attributeType":"null","col":0,"comment":"null","endLoc":26,"id":2482,"name":"outer_indexes","nodeType":"Attribute","startLoc":26,"text":"outer_indexes"},{"attributeType":"null","col":0,"comment":"null","endLoc":32,"id":2483,"name":"outer_assignment_values","nodeType":"Attribute","startLoc":32,"text":"outer_assignment_values"},{"attributeType":"function","col":0,"comment":"null","endLoc":306,"id":2484,"name":"var","nodeType":"Attribute","startLoc":306,"text":"var"},{"attributeType":"null","col":0,"comment":"null","endLoc":38,"id":2485,"name":"vectorized_indexes","nodeType":"Attribute","startLoc":38,"text":"vectorized_indexes"},{"attributeType":"null","col":0,"comment":"null","endLoc":51,"id":2486,"name":"vectorized_assignment_values","nodeType":"Attribute","startLoc":51,"text":"vectorized_assignment_values"},{"col":4,"comment":"\n Determine if each string matches a regular expression.\n\n Parameters\n ----------\n pat : string\n Character sequence or regular expression\n case : boolean, default True\n If True, case sensitive\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n\n Returns\n -------\n matched : array of bool\n ","endLoc":578,"header":"def match(self, pat, case=True, flags=0)","id":2487,"name":"match","nodeType":"Function","startLoc":555,"text":"def match(self, pat, case=True, flags=0):\n \"\"\"\n Determine if each string matches a regular expression.\n\n Parameters\n ----------\n pat : string\n Character sequence or regular expression\n case : boolean, default True\n If True, case sensitive\n flags : int, default 0 (no flags)\n re module flags, e.g. re.IGNORECASE\n\n Returns\n -------\n matched : array of bool\n \"\"\"\n if not case:\n flags |= re.IGNORECASE\n\n pat = self._obj.dtype.type(pat)\n regex = re.compile(pat, flags=flags)\n f = lambda x: bool(regex.match(x))\n return self._apply(f, dtype=bool)"},{"col":0,"comment":"","endLoc":1,"header":"indexing.py#","id":2488,"name":"","nodeType":"Function","startLoc":1,"text":"nx = 3000\n\nny = 2000\n\nnt = 1000\n\nbasic_indexes = {\n \"1slice\": {\"x\": slice(0, 3)},\n \"1slice-1scalar\": {\"x\": 0, \"y\": slice(None, None, 3)},\n \"2slicess-1scalar\": {\"x\": slice(3, -3, 3), \"y\": 1, \"t\": slice(None, -3, 3)},\n}\n\nbasic_assignment_values = {\n \"1slice\": xr.DataArray(randn((3, ny), frac_nan=0.1), dims=[\"x\", \"y\"]),\n \"1slice-1scalar\": xr.DataArray(randn(int(ny / 3) + 1, frac_nan=0.1), dims=[\"y\"]),\n \"2slicess-1scalar\": xr.DataArray(\n randn(int((nx - 6) / 3), frac_nan=0.1), dims=[\"x\"]\n ),\n}\n\nouter_indexes = {\n \"1d\": {\"x\": randint(0, nx, 400)},\n \"2d\": {\"x\": randint(0, nx, 500), \"y\": randint(0, ny, 400)},\n \"2d-1scalar\": {\"x\": randint(0, nx, 100), \"y\": 1, \"t\": randint(0, nt, 400)},\n}\n\nouter_assignment_values = {\n \"1d\": xr.DataArray(randn((400, ny), frac_nan=0.1), dims=[\"x\", \"y\"]),\n \"2d\": xr.DataArray(randn((500, 400), frac_nan=0.1), dims=[\"x\", \"y\"]),\n \"2d-1scalar\": xr.DataArray(randn(100, frac_nan=0.1), dims=[\"x\"]),\n}\n\nvectorized_indexes = {\n \"1-1d\": {\"x\": xr.DataArray(randint(0, nx, 400), dims=\"a\")},\n \"2-1d\": {\n \"x\": xr.DataArray(randint(0, nx, 400), dims=\"a\"),\n \"y\": xr.DataArray(randint(0, ny, 400), dims=\"a\"),\n },\n \"3-2d\": {\n \"x\": xr.DataArray(randint(0, nx, 400).reshape(4, 100), dims=[\"a\", \"b\"]),\n \"y\": xr.DataArray(randint(0, ny, 400).reshape(4, 100), dims=[\"a\", \"b\"]),\n \"t\": xr.DataArray(randint(0, nt, 400).reshape(4, 100), dims=[\"a\", \"b\"]),\n },\n}\n\nvectorized_assignment_values = {\n \"1-1d\": xr.DataArray(randn((400, 2000)), dims=[\"a\", \"y\"], coords={\"a\": randn(400)}),\n \"2-1d\": xr.DataArray(randn(400), dims=[\"a\"], coords={\"a\": randn(400)}),\n \"3-2d\": xr.DataArray(\n randn((4, 100)), dims=[\"a\", \"b\"], coords={\"a\": randn(4), \"b\": randn(100)}\n ),\n}"},{"attributeType":"function","col":0,"comment":"null","endLoc":308,"id":2489,"name":"median","nodeType":"Attribute","startLoc":308,"text":"median"},{"attributeType":"function","col":0,"comment":"null","endLoc":310,"id":2490,"name":"prod","nodeType":"Attribute","startLoc":310,"text":"prod"},{"col":4,"comment":"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.","endLoc":331,"header":"def onOffset(self, date)","id":2491,"name":"onOffset","nodeType":"Function","startLoc":328,"text":"def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == _days_in_month(date)"},{"attributeType":"function","col":0,"comment":"null","endLoc":313,"id":2492,"name":"cumprod_1d","nodeType":"Attribute","startLoc":313,"text":"cumprod_1d"},{"attributeType":"function","col":0,"comment":"null","endLoc":315,"id":2493,"name":"cumsum_1d","nodeType":"Attribute","startLoc":315,"text":"cumsum_1d"},{"col":12,"endLoc":577,"id":2494,"nodeType":"Lambda","startLoc":577,"text":"lambda x: bool(regex.match(x))"},{"attributeType":"function","col":0,"comment":"null","endLoc":319,"id":2495,"name":"_mean","nodeType":"Attribute","startLoc":319,"text":"_mean"},{"attributeType":"null","col":0,"comment":"null","endLoc":422,"id":2496,"name":"_fail_on_dask_array_input_skipna","nodeType":"Attribute","startLoc":422,"text":"_fail_on_dask_array_input_skipna"},{"col":0,"comment":"","endLoc":5,"header":"duck_array_ops.py#","id":2497,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Compatibility module defining operations on duck numpy-arrays.\n\nCurrently, this means Dask or NumPy arrays. None of these functions should\naccept or return xarray objects.\n\"\"\"\n\ntry:\n import dask.array as dask_array\nexcept ImportError:\n dask_array = None # type: ignore\n\nmoveaxis = npcompat.moveaxis\n\naround = _dask_or_eager_func(\"around\")\n\nisclose = _dask_or_eager_func(\"isclose\")\n\nif hasattr(np, \"isnat\") and (\n dask_array is None or hasattr(dask_array_type, \"__array_ufunc__\")\n):\n # np.isnat is available since NumPy 1.13, so __array_ufunc__ is always\n # supported.\n isnat = np.isnat\nelse:\n isnat = _dask_or_eager_func(\"isnull\", eager_module=pd)\n\nisnan = _dask_or_eager_func(\"isnan\")\n\nzeros_like = _dask_or_eager_func(\"zeros_like\")\n\npandas_isnull = _dask_or_eager_func(\"isnull\", eager_module=pd)\n\ntranspose = _dask_or_eager_func(\"transpose\")\n\n_where = _dask_or_eager_func(\"where\", array_args=slice(3))\n\nisin = _dask_or_eager_func(\"isin\", array_args=slice(2))\n\ntake = _dask_or_eager_func(\"take\")\n\nbroadcast_to = _dask_or_eager_func(\"broadcast_to\")\n\n_concatenate = _dask_or_eager_func(\"concatenate\", list_of_args=True)\n\n_stack = _dask_or_eager_func(\"stack\", list_of_args=True)\n\narray_all = _dask_or_eager_func(\"all\")\n\narray_any = _dask_or_eager_func(\"any\")\n\ntensordot = _dask_or_eager_func(\"tensordot\", array_args=slice(2))\n\neinsum = _dask_or_eager_func(\"einsum\", array_args=slice(1, None))\n\nmasked_invalid = _dask_or_eager_func(\n \"masked_invalid\", eager_module=np.ma, dask_module=getattr(dask_array, \"ma\", None)\n)\n\nargmax = _create_nan_agg_method(\"argmax\", coerce_strings=True)\n\nargmin = _create_nan_agg_method(\"argmin\", coerce_strings=True)\n\nmax = _create_nan_agg_method(\"max\", coerce_strings=True)\n\nmin = _create_nan_agg_method(\"min\", coerce_strings=True)\n\nsum = _create_nan_agg_method(\"sum\")\n\nsum.numeric_only = True\n\nsum.available_min_count = True\n\nstd = _create_nan_agg_method(\"std\")\n\nstd.numeric_only = True\n\nvar = _create_nan_agg_method(\"var\")\n\nvar.numeric_only = True\n\nmedian = _create_nan_agg_method(\"median\")\n\nmedian.numeric_only = True\n\nprod = _create_nan_agg_method(\"prod\")\n\nprod.numeric_only = True\n\nsum.available_min_count = True\n\ncumprod_1d = _create_nan_agg_method(\"cumprod\")\n\ncumprod_1d.numeric_only = True\n\ncumsum_1d = _create_nan_agg_method(\"cumsum\")\n\ncumsum_1d.numeric_only = True\n\n_mean = _create_nan_agg_method(\"mean\")\n\nmean.numeric_only = True # type: ignore\n\n_fail_on_dask_array_input_skipna = partial(\n fail_on_dask_array_input,\n msg=\"%r with skipna=True is not yet implemented on dask arrays\",\n)"},{"attributeType":"null","col":4,"comment":"null","endLoc":322,"id":2498,"name":"_freq","nodeType":"Attribute","startLoc":322,"text":"_freq"},{"col":4,"comment":"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from left and/or right sides.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to strip.\n\n Returns\n -------\n stripped : same type as values\n ","endLoc":612,"header":"def strip(self, to_strip=None, side=\"both\")","id":2499,"name":"strip","nodeType":"Function","startLoc":580,"text":"def strip(self, to_strip=None, side=\"both\"):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from left and/or right sides.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n side : {'left', 'right', 'both'}, default 'left'\n Side from which to strip.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n if to_strip is not None:\n to_strip = self._obj.dtype.type(to_strip)\n\n if side == \"both\":\n f = lambda x: x.strip(to_strip)\n elif side == \"left\":\n f = lambda x: x.lstrip(to_strip)\n elif side == \"right\":\n f = lambda x: x.rstrip(to_strip)\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n return self._apply(f)"},{"className":"QuarterEnd","col":0,"comment":"null","endLoc":447,"id":2500,"nodeType":"Class","startLoc":425,"text":"class QuarterEnd(QuarterOffset):\n # When converting a string to an offset, pandas converts\n # 'Q' to a QuarterEnd offset starting in the month of\n # December. When creating a QuarterEnd offset directly\n # from the constructor, however, the default month is March.\n # We follow that behavior here.\n _default_month = 3\n _freq = \"Q\"\n _day_option = \"end\"\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest end of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + QuarterEnd(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest end of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - QuarterEnd(month=self.month)"},{"className":"QuarterOffset","col":0,"comment":"Quarter representation copied off of pandas/tseries/offsets.py\n ","endLoc":397,"id":2501,"nodeType":"Class","startLoc":350,"text":"class QuarterOffset(BaseCFTimeOffset):\n \"\"\"Quarter representation copied off of pandas/tseries/offsets.py\n \"\"\"\n\n _freq: ClassVar[str]\n _default_month: ClassVar[int]\n\n def __init__(self, n=1, month=None):\n BaseCFTimeOffset.__init__(self, n)\n self.month = _validate_month(month, self._default_month)\n\n def __apply__(self, other):\n # months_since: find the calendar quarter containing other.month,\n # e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].\n # Then find the month in that quarter containing an onOffset date for\n # self. `months_since` is the number of months to shift other.month\n # to get to this on-offset month.\n months_since = other.month % 3 - self.month % 3\n qtrs = roll_qtrday(\n other, self.n, self.month, day_option=self._day_option, modby=3\n )\n months = qtrs * 3 - months_since\n return _shift_month(other, months, self._day_option)\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n mod_month = (date.month - self.month) % 3\n return mod_month == 0 and date.day == self._get_offset_day(date)\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract cftime.datetime from offset.\")\n elif type(other) == type(self) and other.month == self.month:\n return type(self)(self.n - other.n, month=self.month)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n return type(self)(n=other * self.n, month=self.month)\n\n def rule_code(self):\n return \"{}-{}\".format(self._freq, _MONTH_ABBREVIATIONS[self.month])\n\n def __str__(self):\n return \"<{}: n={}, month={}>\".format(type(self).__name__, self.n, self.month)"},{"col":4,"comment":"null","endLoc":359,"header":"def __init__(self, n=1, month=None)","id":2502,"name":"__init__","nodeType":"Function","startLoc":357,"text":"def __init__(self, n=1, month=None):\n BaseCFTimeOffset.__init__(self, n)\n self.month = _validate_month(month, self._default_month)"},{"col":0,"comment":"null","endLoc":305,"header":"def _validate_month(month, default_month)","id":2503,"name":"_validate_month","nodeType":"Function","startLoc":288,"text":"def _validate_month(month, default_month):\n if month is None:\n result_month = default_month\n else:\n result_month = month\n if not isinstance(result_month, int):\n raise TypeError(\n \"'self.month' must be an integer value between 1 \"\n \"and 12. Instead, it was set to a value of \"\n \"{!r}\".format(result_month)\n )\n elif not (1 <= result_month <= 12):\n raise ValueError(\n \"'self.month' must be an integer value between 1 \"\n \"and 12. Instead, it was set to a value of \"\n \"{!r}\".format(result_month)\n )\n return result_month"},{"id":2504,"name":"contributing.rst","nodeType":"TextFile","path":"doc","text":".. _contributing:\n\n**********************\nContributing to xarray\n**********************\n\n.. contents:: Table of contents:\n :local:\n\n.. note::\n\n Large parts of this document came from the `Pandas Contributing\n Guide `_.\n\nWhere to start?\n===============\n\nAll contributions, bug reports, bug fixes, documentation improvements,\nenhancements, and ideas are welcome.\n\nIf you are brand new to *xarray* or open-source development, we recommend going\nthrough the `GitHub \"issues\" tab `_\nto find issues that interest you. There are a number of issues listed under\n`Documentation `_\nand `good first issue\n`_\nwhere you could start out. Once you've found an interesting issue, you can\nreturn here to get your development environment setup.\n\nFeel free to ask questions on the `mailing list\n`_.\n\n.. _contributing.bug_reports:\n\nBug reports and enhancement requests\n====================================\n\nBug reports are an important part of making *xarray* more stable. Having a complete bug\nreport will allow others to reproduce the bug and provide insight into fixing. See\n`this stackoverflow article `_ for tips on\nwriting a good bug report.\n\nTrying the bug-producing code out on the *master* branch is often a worthwhile exercise\nto confirm the bug still exists. It is also worth searching existing bug reports and\npull requests to see if the issue has already been reported and/or fixed.\n\nBug reports must:\n\n#. Include a short, self-contained Python snippet reproducing the problem.\n You can format the code nicely by using `GitHub Flavored Markdown\n `_::\n\n ```python\n >>> from xarray import Dataset\n >>> df = Dataset(...)\n ...\n ```\n\n#. Include the full version string of *xarray* and its dependencies. You can use the\n built in function::\n\n >>> import xarray as xr\n >>> xr.show_versions()\n\n#. Explain why the current behavior is wrong/not desired and what you expect instead.\n\nThe issue will then show up to the *xarray* community and be open to comments/ideas\nfrom others.\n\n.. _contributing.github:\n\nWorking with the code\n=====================\n\nNow that you have an issue you want to fix, enhancement to add, or documentation\nto improve, you need to learn how to work with GitHub and the *xarray* code base.\n\n.. _contributing.version_control:\n\nVersion control, Git, and GitHub\n--------------------------------\n\nTo the new user, working with Git is one of the more daunting aspects of contributing\nto *xarray*. It can very quickly become overwhelming, but sticking to the guidelines\nbelow will help keep the process straightforward and mostly trouble free. As always,\nif you are having difficulties please feel free to ask for help.\n\nThe code is hosted on `GitHub `_. To\ncontribute you will need to sign up for a `free GitHub account\n`_. We use `Git `_ for\nversion control to allow many people to work together on the project.\n\nSome great resources for learning Git:\n\n* the `GitHub help pages `_.\n* the `NumPy's documentation `_.\n* Matthew Brett's `Pydagogue `_.\n\nGetting started with Git\n------------------------\n\n`GitHub has instructions `__ for installing git,\nsetting up your SSH key, and configuring git. All these steps need to be completed before\nyou can work seamlessly between your local repository and GitHub.\n\n.. _contributing.forking:\n\nForking\n-------\n\nYou will need your own fork to work on the code. Go to the `xarray project\npage `_ and hit the ``Fork`` button. You will\nwant to clone your fork to your machine::\n\n git clone https://github.com/your-user-name/xarray.git\n cd xarray\n git remote add upstream https://github.com/pydata/xarray.git\n\nThis creates the directory `xarray` and connects your repository to\nthe upstream (main project) *xarray* repository.\n\n.. _contributing.dev_env:\n\nCreating a development environment\n----------------------------------\n\nTo test out code changes, you'll need to build *xarray* from source, which\nrequires a Python environment. If you're making documentation changes, you can\nskip to :ref:`contributing.documentation` but you won't be able to build the\ndocumentation locally before pushing your changes.\n\n.. _contributiong.dev_python:\n\nCreating a Python Environment\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nBefore starting any development, you'll need to create an isolated xarray\ndevelopment environment:\n\n- Install either `Anaconda `_ or `miniconda\n `_\n- Make sure your conda is up to date (``conda update conda``)\n- Make sure that you have :ref:`cloned the repository `\n- ``cd`` to the *xarray* source directory\n\nWe'll now kick off a two-step process:\n\n1. Install the build dependencies\n2. Build and install xarray\n\n.. code-block:: none\n\n # Create and activate the build environment\n conda env create -f ci/requirements/py36.yml\n conda activate xarray-tests\n\n # or with older versions of Anaconda:\n source activate xarray-tests\n\n # Build and install xarray\n pip install -e .\n\nAt this point you should be able to import *xarray* from your locally built version::\n\n $ python # start an interpreter\n >>> import xarray\n >>> xarray.__version__\n '0.10.0+dev46.g015daca'\n\nThis will create the new environment, and not touch any of your existing environments,\nnor any existing Python installation.\n\nTo view your environments::\n\n conda info -e\n\nTo return to your root environment::\n\n conda deactivate\n\nSee the full conda docs `here `__.\n\nCreating a branch\n-----------------\n\nYou want your master branch to reflect only production-ready code, so create a\nfeature branch for making your changes. For example::\n\n git branch shiny-new-feature\n git checkout shiny-new-feature\n\nThe above can be simplified to::\n\n git checkout -b shiny-new-feature\n\nThis changes your working directory to the shiny-new-feature branch. Keep any\nchanges in this branch specific to one bug or feature so it is clear\nwhat the branch brings to *xarray*. You can have many \"shiny-new-features\"\nand switch in between them using the ``git checkout`` command.\n\nTo update this branch, you need to retrieve the changes from the master branch::\n\n git fetch upstream\n git rebase upstream/master\n\nThis will replay your commits on top of the latest *xarray* git master. If this\nleads to merge conflicts, you must resolve these before submitting your pull\nrequest. If you have uncommitted changes, you will need to ``git stash`` them\nprior to updating. This will effectively store your changes and they can be\nreapplied after updating.\n\n.. _contributing.documentation:\n\nContributing to the documentation\n=================================\n\nIf you're not the developer type, contributing to the documentation is still of\nhuge value. You don't even have to be an expert on *xarray* to do so! In fact,\nthere are sections of the docs that are worse off after being written by\nexperts. If something in the docs doesn't make sense to you, updating the\nrelevant section after you figure it out is a great way to ensure it will help\nthe next person.\n\n.. contents:: Documentation:\n :local:\n\n\nAbout the *xarray* documentation\n--------------------------------\n\nThe documentation is written in **reStructuredText**, which is almost like writing\nin plain English, and built using `Sphinx `__. The\nSphinx Documentation has an excellent `introduction to reST\n`__. Review the Sphinx docs to perform more\ncomplex changes to the documentation as well.\n\nSome other important things to know about the docs:\n\n- The *xarray* documentation consists of two parts: the docstrings in the code\n itself and the docs in this folder ``xarray/doc/``.\n\n The docstrings are meant to provide a clear explanation of the usage of the\n individual functions, while the documentation in this folder consists of\n tutorial-like overviews per topic together with some other information\n (what's new, installation, etc).\n\n- The docstrings follow the **Numpy Docstring Standard**, which is used widely\n in the Scientific Python community. This standard specifies the format of\n the different sections of the docstring. See `this document\n `_\n for a detailed explanation, or look at some of the existing functions to\n extend it in a similar manner.\n\n- The tutorials make heavy use of the `ipython directive\n `_ sphinx extension.\n This directive lets you put code in the documentation which will be run\n during the doc build. For example::\n\n .. ipython:: python\n\n x = 2\n x**3\n\n will be rendered as::\n\n In [1]: x = 2\n\n In [2]: x**3\n Out[2]: 8\n\n Almost all code examples in the docs are run (and the output saved) during the\n doc build. This approach means that code examples will always be up to date,\n but it does make the doc building a bit more complex.\n\n- Our API documentation in ``doc/api.rst`` houses the auto-generated\n documentation from the docstrings. For classes, there are a few subtleties\n around controlling which methods and attributes have pages auto-generated.\n\n Every method should be included in a ``toctree`` in ``api.rst``, else Sphinx\n will emit a warning.\n\n\nHow to build the *xarray* documentation\n---------------------------------------\n\nRequirements\n~~~~~~~~~~~~\nMake sure to follow the instructions on :ref:`creating a development environment above `, but\nto build the docs you need to use the environment file ``doc/environment.yml``.\n\n.. code-block:: none\n\n # Create and activate the docs environment\n conda env create -f doc/environment.yml\n conda activate xarray-docs\n\n # or with older versions of Anaconda:\n source activate xarray-docs\n\n # Build and install xarray\n pip install -e .\n\nBuilding the documentation\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nNavigate to your local ``xarray/doc/`` directory in the console and run::\n\n make html\n\nThen you can find the HTML output in the folder ``xarray/doc/_build/html/``.\n\nThe first time you build the docs, it will take quite a while because it has to run\nall the code examples and build all the generated docstring pages. In subsequent\nevocations, sphinx will try to only build the pages that have been modified.\n\nIf you want to do a full clean build, do::\n\n make clean\n make html\n\n.. _contributing.code:\n\nContributing to the code base\n=============================\n\n.. contents:: Code Base:\n :local:\n\nCode standards\n--------------\n\nWriting good code is not just about what you write. It is also about *how* you\nwrite it. During :ref:`Continuous Integration ` testing, several\ntools will be run to check your code for stylistic errors.\nGenerating any warnings will cause the test to fail.\nThus, good style is a requirement for submitting code to *xarray*.\n\nIn addition, because a lot of people use our library, it is important that we\ndo not make sudden changes to the code that could have the potential to break\na lot of user code as a result, that is, we need it to be as *backwards compatible*\nas possible to avoid mass breakages.\n\nCode Formatting\n~~~~~~~~~~~~~~~\n\nXarray uses `Black `_ and\n`Flake8 `_ to ensure a consistent code\nformat throughout the project. ``black`` and ``flake8`` can be installed with\n``pip``::\n\n pip install black flake8\n\nand then run from the root of the Xarray repository::\n\n black .\n flake8\n\nto auto-format your code. Additionally, many editors have plugins that will\napply ``black`` as you edit files.\n\nOther recommended but optional tools for checking code quality (not currently\nenforced in CI):\n\n- `mypy `_ performs static type checking, which can\n make it easier to catch bugs. Please run ``mypy xarray`` if you annotate any\n code with `type hints `_.\n- `isort `_ will highlight\n incorrectly sorted imports. ``isort -y`` will automatically fix them. See\n also `flake8-isort `_.\n\nOptionally, you may wish to setup `pre-commit hooks `_\nto automatically run ``black`` and ``flake8`` when you make a git commit. This\ncan be done by installing ``pre-commit``::\n\n pip install pre-commit\n\nand then running::\n\n pre-commit install\n\nfrom the root of the Xarray repository. Now ``black`` and ``flake8`` will be run\neach time you commit changes. You can skip these checks with\n``git commit --no-verify``.\n\n.. note::\n\n If you were working on a branch *prior* to the code being reformatted with black,\n you will likely face some merge conflicts. These steps can eliminate many of those\n conflicts. Because they have had limited testing, please reach out to the core devs\n on your pull request if you face any issues, and we'll help with the merge:\n\n - Merge the commit on master prior to the ``black`` commit into your branch\n ``git merge f172c673``. If you have conflicts here, resolve and commit.\n - Apply ``black .`` to your branch and commit ``git commit -am \"black\"``\n - Apply a patch of other changes we made on that commit: ``curl https://gist.githubusercontent.com/max-sixty/3cceb8472ed4ea806353999ca43aed52/raw/03cbee4e386156bddb61acaa250c0bfc726f596d/xarray%2520black%2520diff | git apply -``\n - Commit (``git commit -am \"black2\"``)\n - Merge master at the ``black`` commit, resolving in favor of 'our' changes:\n ``git merge d089df38 -X ours``. You shouldn't have any merge conflicts\n - Merge current master ``git merge master``; resolve and commit any conflicts\n\nBackwards Compatibility\n~~~~~~~~~~~~~~~~~~~~~~~\n\nPlease try to maintain backward compatibility. *xarray* has growing number of users with\nlots of existing code, so don't break it if at all possible. If you think breakage is\nrequired, clearly state why as part of the pull request. Also, be careful when changing\nmethod signatures and add deprecation warnings where needed.\n\n.. _contributing.ci:\n\nTesting With Continuous Integration\n-----------------------------------\n\nThe *xarray* test suite runs automatically the\n`Azure Pipelines `__,\ncontinuous integration service, once your pull request is submitted. However,\nif you wish to run the test suite on a branch prior to submitting the pull\nrequest, then Azure Pipelines\n`needs to be configured `_\nfor your GitHub repository.\n\nA pull-request will be considered for merging when you have an all 'green' build. If any\ntests are failing, then you will get a red 'X', where you can click through to see the\nindividual failed tests. This is an example of a green build.\n\n.. image:: _static/ci.png\n\n.. note::\n\n Each time you push to your PR branch, a new run of the tests will be\n triggered on the CI. If they haven't already finished, tests for any older\n commits on the same branch will be automatically cancelled.\n\n.. _contributing.tdd:\n\n\nTest-driven development/code writing\n------------------------------------\n\n*xarray* is serious about testing and strongly encourages contributors to embrace\n`test-driven development (TDD) `_.\nThis development process \"relies on the repetition of a very short development cycle:\nfirst the developer writes an (initially failing) automated test case that defines a desired\nimprovement or new function, then produces the minimum amount of code to pass that test.\"\nSo, before actually writing any code, you should write your tests. Often the test can be\ntaken from the original GitHub issue. However, it is always worth considering additional\nuse cases and writing corresponding tests.\n\nAdding tests is one of the most common requests after code is pushed to *xarray*. Therefore,\nit is worth getting in the habit of writing tests ahead of time so this is never an issue.\n\nLike many packages, *xarray* uses `pytest\n`_ and the convenient\nextensions in `numpy.testing\n`_.\n\nWriting tests\n~~~~~~~~~~~~~\n\nAll tests should go into the ``tests`` subdirectory of the specific package.\nThis folder contains many current examples of tests, and we suggest looking to these for\ninspiration. If your test requires working with files or\nnetwork connectivity, there is more information on the `testing page\n`_ of the wiki.\n\nThe ``xarray.testing`` module has many special ``assert`` functions that\nmake it easier to make statements about whether DataArray or Dataset objects are\nequivalent. The easiest way to verify that your code is correct is to\nexplicitly construct the result you expect, then compare the actual result to\nthe expected correct result::\n\n def test_constructor_from_0d():\n expected = Dataset({None: ([], 0)})[None]\n actual = DataArray(0)\n assert_identical(expected, actual)\n\nTransitioning to ``pytest``\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n*xarray* existing test structure is *mostly* classed based, meaning that you will\ntypically find tests wrapped in a class.\n\n.. code-block:: python\n\n class TestReallyCoolFeature:\n ....\n\nGoing forward, we are moving to a more *functional* style using the\n`pytest `__ framework, which offers a richer\ntesting framework that will facilitate testing and developing. Thus, instead of\nwriting test classes, we will write test functions like this:\n\n.. code-block:: python\n\n def test_really_cool_feature():\n ....\n\nUsing ``pytest``\n~~~~~~~~~~~~~~~~\n\nHere is an example of a self-contained set of tests that illustrate multiple\nfeatures that we like to use.\n\n- functional style: tests are like ``test_*`` and *only* take arguments that are either\n fixtures or parameters\n- ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``.\n- using ``parametrize``: allow testing of multiple cases\n- to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used\n- ``fixture``, code for object construction, on a per-test basis\n- using bare ``assert`` for scalars and truth-testing\n- ``assert_equal`` and ``assert_identical`` from the ``xarray.testing`` module for xarray object comparisons.\n- the typical pattern of constructing an ``expected`` and comparing versus the ``result``\n\nWe would name this file ``test_cool_feature.py`` and put in an appropriate place in the\n``xarray/tests/`` structure.\n\n.. TODO: confirm that this actually works\n\n.. code-block:: python\n\n import pytest\n import numpy as np\n import xarray as xr\n from xarray.testing import assert_equal\n\n\n @pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64'])\n def test_dtypes(dtype):\n assert str(np.dtype(dtype)) == dtype\n\n\n @pytest.mark.parametrize('dtype', ['float32',\n pytest.param('int16', marks=pytest.mark.skip),\n pytest.param('int32', marks=pytest.mark.xfail(\n reason='to show how it works'))])\n def test_mark(dtype):\n assert str(np.dtype(dtype)) == 'float32'\n\n\n @pytest.fixture\n def dataarray():\n return xr.DataArray([1, 2, 3])\n\n\n @pytest.fixture(params=['int8', 'int16', 'int32', 'int64'])\n def dtype(request):\n return request.param\n\n\n def test_series(dataarray, dtype):\n result = dataarray.astype(dtype)\n assert result.dtype == dtype\n\n expected = xr.DataArray(np.array([1, 2, 3], dtype=dtype))\n assert_equal(result, expected)\n\n\n\nA test run of this yields\n\n.. code-block:: shell\n\n ((xarray) $ pytest test_cool_feature.py -v\n =============================== test session starts ================================\n platform darwin -- Python 3.6.4, pytest-3.2.1, py-1.4.34, pluggy-0.4.0 --\n cachedir: ../../.cache\n plugins: cov-2.5.1, hypothesis-3.23.0\n collected 11 items\n\n test_cool_feature.py::test_dtypes[int8] PASSED\n test_cool_feature.py::test_dtypes[int16] PASSED\n test_cool_feature.py::test_dtypes[int32] PASSED\n test_cool_feature.py::test_dtypes[int64] PASSED\n test_cool_feature.py::test_mark[float32] PASSED\n test_cool_feature.py::test_mark[int16] SKIPPED\n test_cool_feature.py::test_mark[int32] xfail\n test_cool_feature.py::test_series[int8] PASSED\n test_cool_feature.py::test_series[int16] PASSED\n test_cool_feature.py::test_series[int32] PASSED\n test_cool_feature.py::test_series[int64] PASSED\n\n ================== 9 passed, 1 skipped, 1 xfailed in 1.83 seconds ==================\n\nTests that we have ``parametrized`` are now accessible via the test name, for\nexample we could run these with ``-k int8`` to sub-select *only* those tests\nwhich match ``int8``.\n\n\n.. code-block:: shell\n\n ((xarray) bash-3.2$ pytest test_cool_feature.py -v -k int8\n =========================== test session starts ===========================\n platform darwin -- Python 3.6.2, pytest-3.2.1, py-1.4.31, pluggy-0.4.0\n collected 11 items\n\n test_cool_feature.py::test_dtypes[int8] PASSED\n test_cool_feature.py::test_series[int8] PASSED\n\n\nRunning the test suite\n----------------------\n\nThe tests can then be run directly inside your Git clone (without having to\ninstall *xarray*) by typing::\n\n pytest xarray\n\nThe tests suite is exhaustive and takes a few minutes. Often it is\nworth running only a subset of tests first around your changes before running the\nentire suite.\n\nThe easiest way to do this is with::\n\n pytest xarray/path/to/test.py -k regex_matching_test_name\n\nOr with one of the following constructs::\n\n pytest xarray/tests/[test-module].py\n pytest xarray/tests/[test-module].py::[TestClass]\n pytest xarray/tests/[test-module].py::[TestClass]::[test_method]\n\nUsing `pytest-xdist `_, one can\nspeed up local testing on multicore machines. To use this feature, you will\nneed to install `pytest-xdist` via::\n\n pip install pytest-xdist\n\n\nThen, run pytest with the optional -n argument:\n\n pytest xarray -n 4\n\nThis can significantly reduce the time it takes to locally run tests before\nsubmitting a pull request.\n\nFor more, see the `pytest `_ documentation.\n\nRunning the performance test suite\n----------------------------------\n\nPerformance matters and it is worth considering whether your code has introduced\nperformance regressions. *xarray* is starting to write a suite of benchmarking tests\nusing `asv `__\nto enable easy monitoring of the performance of critical *xarray* operations.\nThese benchmarks are all found in the ``xarray/asv_bench`` directory. asv\nsupports both python2 and python3.\n\nTo use all features of asv, you will need either ``conda`` or\n``virtualenv``. For more details please check the `asv installation\nwebpage `_.\n\nTo install asv::\n\n pip install git+https://github.com/spacetelescope/asv\n\nIf you need to run a benchmark, change your directory to ``asv_bench/`` and run::\n\n asv continuous -f 1.1 upstream/master HEAD\n\nYou can replace ``HEAD`` with the name of the branch you are working on,\nand report benchmarks that changed by more than 10%.\nThe command uses ``conda`` by default for creating the benchmark\nenvironments. If you want to use virtualenv instead, write::\n\n asv continuous -f 1.1 -E virtualenv upstream/master HEAD\n\nThe ``-E virtualenv`` option should be added to all ``asv`` commands\nthat run benchmarks. The default value is defined in ``asv.conf.json``.\n\nRunning the full benchmark suite can take up to one hour and use up a few GBs of RAM.\nUsually it is sufficient to paste only a subset of the results into the pull\nrequest to show that the committed changes do not cause unexpected performance\nregressions. You can run specific benchmarks using the ``-b`` flag, which\ntakes a regular expression. For example, this will only run tests from a\n``xarray/asv_bench/benchmarks/groupby.py`` file::\n\n asv continuous -f 1.1 upstream/master HEAD -b ^groupby\n\nIf you want to only run a specific group of tests from a file, you can do it\nusing ``.`` as a separator. For example::\n\n asv continuous -f 1.1 upstream/master HEAD -b groupby.GroupByMethods\n\nwill only run the ``GroupByMethods`` benchmark defined in ``groupby.py``.\n\nYou can also run the benchmark suite using the version of *xarray*\nalready installed in your current Python environment. This can be\nuseful if you do not have ``virtualenv`` or ``conda``, or are using the\n``setup.py develop`` approach discussed above; for the in-place build\nyou need to set ``PYTHONPATH``, e.g.\n``PYTHONPATH=\"$PWD/..\" asv [remaining arguments]``.\nYou can run benchmarks using an existing Python\nenvironment by::\n\n asv run -e -E existing\n\nor, to use a specific Python interpreter,::\n\n asv run -e -E existing:python3.6\n\nThis will display stderr from the benchmarks, and use your local\n``python`` that comes from your ``$PATH``.\n\nInformation on how to write a benchmark and how to use asv can be found in the\n`asv documentation `_.\n\nThe *xarray* benchmarking suite is run remotely and the results are\navailable `here `_.\n\nDocumenting your code\n---------------------\n\nChanges should be reflected in the release notes located in ``doc/whats-new.rst``.\nThis file contains an ongoing change log for each release. Add an entry to this file to\ndocument your fix, enhancement or (unavoidable) breaking change. Make sure to include the\nGitHub issue number when adding your entry (using ``:issue:`1234```, where ``1234`` is the\nissue/pull request number).\n\nIf your code is an enhancement, it is most likely necessary to add usage\nexamples to the existing documentation. This can be done following the section\nregarding documentation :ref:`above `.\n\nContributing your changes to *xarray*\n=====================================\n\nCommitting your code\n--------------------\n\nKeep style fixes to a separate commit to make your pull request more readable.\n\nOnce you've made changes, you can see them by typing::\n\n git status\n\nIf you have created a new file, it is not being tracked by git. Add it by typing::\n\n git add path/to/file-to-be-added.py\n\nDoing 'git status' again should give something like::\n\n # On branch shiny-new-feature\n #\n # modified: /relative/path/to/file-you-added.py\n #\n\nThe following defines how a commit message should be structured:\n\n * A subject line with `< 72` chars.\n * One blank line.\n * Optionally, a commit message body.\n\nPlease reference the relevant GitHub issues in your commit message using ``GH1234`` or\n``#1234``. Either style is fine, but the former is generally preferred.\n\nNow you can commit your changes in your local repository::\n\n git commit -m\n\nPushing your changes\n--------------------\n\nWhen you want your changes to appear publicly on your GitHub page, push your\nforked feature branch's commits::\n\n git push origin shiny-new-feature\n\nHere ``origin`` is the default name given to your remote repository on GitHub.\nYou can see the remote repositories::\n\n git remote -v\n\nIf you added the upstream repository as described above you will see something\nlike::\n\n origin git@github.com:yourname/xarray.git (fetch)\n origin git@github.com:yourname/xarray.git (push)\n upstream git://github.com/pydata/xarray.git (fetch)\n upstream git://github.com/pydata/xarray.git (push)\n\nNow your code is on GitHub, but it is not yet a part of the *xarray* project. For that to\nhappen, a pull request needs to be submitted on GitHub.\n\nReview your code\n----------------\n\nWhen you're ready to ask for a code review, file a pull request. Before you do, once\nagain make sure that you have followed all the guidelines outlined in this document\nregarding code style, tests, performance tests, and documentation. You should also\ndouble check your branch changes against the branch it was based on:\n\n#. Navigate to your repository on GitHub -- https://github.com/your-user-name/xarray\n#. Click on ``Branches``\n#. Click on the ``Compare`` button for your feature branch\n#. Select the ``base`` and ``compare`` branches, if necessary. This will be ``master`` and\n ``shiny-new-feature``, respectively.\n\nFinally, make the pull request\n------------------------------\n\nIf everything looks good, you are ready to make a pull request. A pull request is how\ncode from a local repository becomes available to the GitHub community and can be looked\nat and eventually merged into the master version. This pull request and its associated\nchanges will eventually be committed to the master branch and available in the next\nrelease. To submit a pull request:\n\n#. Navigate to your repository on GitHub\n#. Click on the ``Pull Request`` button\n#. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks\n okay one last time\n#. Write a description of your changes in the ``Preview Discussion`` tab\n#. Click ``Send Pull Request``.\n\nThis request then goes to the repository maintainers, and they will review\nthe code. If you need to make more changes, you can make them in\nyour branch, add them to a new commit, push them to GitHub, and the pull request\nwill be automatically updated. Pushing them to GitHub again is done by::\n\n git push origin shiny-new-feature\n\nThis will automatically update your pull request with the latest code and restart the\n:ref:`Continuous Integration ` tests.\n\n\nDelete your merged branch (optional)\n------------------------------------\n\nOnce your feature branch is accepted into upstream, you'll probably want to get rid of\nthe branch. First, merge upstream master into your branch so git knows it is safe to\ndelete your branch::\n\n git fetch upstream\n git checkout master\n git merge upstream/master\n\nThen you can do::\n\n git branch -d shiny-new-feature\n\nMake sure you use a lower-case ``-d``, or else git won't warn you if your feature\nbranch has not actually been merged.\n\nThe branch will still exist on GitHub, so to delete it there do::\n\n git push origin --delete shiny-new-feature\n\n\nPR checklist\n------------\n\n- **Properly comment and document your code.** See `\"Documenting your code\" `_.\n- **Test that the documentation builds correctly** by typing ``make html`` in the ``doc`` directory. This is not strictly necessary, but this may be easier than waiting for CI to catch a mistake. See `\"Contributing to the documentation\" `_.\n- **Test your code**.\n\n - Write new tests if needed. See `\"Test-driven development/code writing\" `_.\n - Test the code using `Pytest `_. Running all tests (type ``pytest`` in the root directory) takes a while, so feel free to only run the tests you think are needed based on your PR (example: ``pytest xarray/tests/test_dataarray.py``). CI will catch any failing tests.\n\n- **Properly format your code** and verify that it passes the formatting guidelines set by `Black `_ and `Flake8 `_. See `\"Code formatting\" `_. You can use `pre-commit `_ to run these automatically on each commit.\n\n - Run ``black .`` in the root directory. This may modify some files. Confirm and commit any formatting changes.\n - Run ``flake8`` in the root directory. If this fails, it will log an error message.\n\n- **Push your code and** `create a PR on GitHub `_.\n- **Use a helpful title for your pull request** by summarizing the main contributions rather than using the latest commit message. If this addresses an `issue `_, please `reference it `_.\n"},{"col":16,"endLoc":604,"id":2505,"nodeType":"Lambda","startLoc":604,"text":"lambda x: x.strip(to_strip)"},{"col":16,"endLoc":606,"id":2506,"nodeType":"Lambda","startLoc":606,"text":"lambda x: x.lstrip(to_strip)"},{"col":16,"endLoc":608,"id":2507,"nodeType":"Lambda","startLoc":608,"text":"lambda x: x.rstrip(to_strip)"},{"col":4,"comment":"null","endLoc":372,"header":"def __apply__(self, other)","id":2508,"name":"__apply__","nodeType":"Function","startLoc":361,"text":"def __apply__(self, other):\n # months_since: find the calendar quarter containing other.month,\n # e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].\n # Then find the month in that quarter containing an onOffset date for\n # self. `months_since` is the number of months to shift other.month\n # to get to this on-offset month.\n months_since = other.month % 3 - self.month % 3\n qtrs = roll_qtrday(\n other, self.n, self.month, day_option=self._day_option, modby=3\n )\n months = qtrs * 3 - months_since\n return _shift_month(other, months, self._day_option)"},{"attributeType":"DataArray","col":8,"comment":"null","endLoc":35,"id":2509,"name":"da_long","nodeType":"Attribute","startLoc":35,"text":"self.da_long"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":22,"id":2510,"name":"ds","nodeType":"Attribute","startLoc":22,"text":"self.ds"},{"className":"RollingDask","col":0,"comment":"null","endLoc":69,"id":2511,"nodeType":"Class","startLoc":64,"text":"class RollingDask(Rolling):\n def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"x\": 100, \"y\": 50, \"t\": 50})\n self.da_long = self.da_long.chunk({\"x\": 10000})"},{"col":0,"comment":"Possibly increment or decrement the number of periods to shift\n based on rollforward/rollbackward conventions.\n\n Parameters\n ----------\n other : cftime.datetime\n n : number of periods to increment, before adjusting for rolling\n month : int reference month giving the first month of the year\n day_option : 'start', 'end'\n The convention to use in finding the day in a given month against\n which to compare for rollforward/rollbackward decisions.\n modby : int 3 for quarters, 12 for years\n\n Returns\n -------\n n : int number of periods to increment\n\n See Also\n --------\n _get_day_of_month : Find the day in a month provided an offset.\n ","endLoc":285,"header":"def roll_qtrday(other, n, month, day_option, modby=3)","id":2512,"name":"roll_qtrday","nodeType":"Function","startLoc":247,"text":"def roll_qtrday(other, n, month, day_option, modby=3):\n \"\"\"Possibly increment or decrement the number of periods to shift\n based on rollforward/rollbackward conventions.\n\n Parameters\n ----------\n other : cftime.datetime\n n : number of periods to increment, before adjusting for rolling\n month : int reference month giving the first month of the year\n day_option : 'start', 'end'\n The convention to use in finding the day in a given month against\n which to compare for rollforward/rollbackward decisions.\n modby : int 3 for quarters, 12 for years\n\n Returns\n -------\n n : int number of periods to increment\n\n See Also\n --------\n _get_day_of_month : Find the day in a month provided an offset.\n \"\"\"\n\n months_since = other.month % modby - month % modby\n\n if n > 0:\n if months_since < 0 or (\n months_since == 0 and other.day < _get_day_of_month(other, day_option)\n ):\n # pretend to roll back if on same month but\n # before compare_day\n n -= 1\n else:\n if months_since > 0 or (\n months_since == 0 and other.day > _get_day_of_month(other, day_option)\n ):\n # make sure to roll forward, so negate\n n += 1\n return n"},{"col":4,"comment":"null","endLoc":69,"header":"def setup(self, *args, **kwargs)","id":2513,"name":"setup","nodeType":"Function","startLoc":65,"text":"def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"x\": 100, \"y\": 50, \"t\": 50})\n self.da_long = self.da_long.chunk({\"x\": 10000})"},{"col":4,"comment":"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the left side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n ","endLoc":632,"header":"def lstrip(self, to_strip=None)","id":2514,"name":"lstrip","nodeType":"Function","startLoc":614,"text":"def lstrip(self, to_strip=None):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the left side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n return self.strip(to_strip, side=\"left\")"},{"id":2515,"name":"interpolation.rst","nodeType":"TextFile","path":"doc","text":".. _interp:\n\nInterpolating data\n==================\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\nxarray offers flexible interpolation routines, which have a similar interface\nto our :ref:`indexing `.\n\n.. note::\n\n ``interp`` requires `scipy` installed.\n\n\nScalar and 1-dimensional interpolation\n--------------------------------------\n\nInterpolating a :py:class:`~xarray.DataArray` works mostly like labeled\nindexing of a :py:class:`~xarray.DataArray`,\n\n.. ipython:: python\n\n da = xr.DataArray(np.sin(0.3 * np.arange(12).reshape(4, 3)),\n [('time', np.arange(4)),\n ('space', [0.1, 0.2, 0.3])])\n # label lookup\n da.sel(time=3)\n\n # interpolation\n da.interp(time=2.5)\n\n\nSimilar to the indexing, :py:meth:`~xarray.DataArray.interp` also accepts an\narray-like, which gives the interpolated result as an array.\n\n.. ipython:: python\n\n # label lookup\n da.sel(time=[2, 3])\n\n # interpolation\n da.interp(time=[2.5, 3.5])\n\nTo interpolate data with a :py:func:`numpy.datetime64` coordinate you can pass a string.\n\n.. ipython:: python\n\n da_dt64 = xr.DataArray([1, 3],\n [('time', pd.date_range('1/1/2000', '1/3/2000', periods=2))])\n da_dt64.interp(time='2000-01-02')\n\nThe interpolated data can be merged into the original :py:class:`~xarray.DataArray`\nby specifying the time periods required.\n\n.. ipython:: python\n\n da_dt64.interp(time=pd.date_range('1/1/2000', '1/3/2000', periods=3))\n\nInterpolation of data indexed by a :py:class:`~xarray.CFTimeIndex` is also\nallowed. See :ref:`CFTimeIndex` for examples.\n \n.. note::\n\n Currently, our interpolation only works for regular grids.\n Therefore, similarly to :py:meth:`~xarray.DataArray.sel`,\n only 1D coordinates along a dimension can be used as the\n original coordinate to be interpolated.\n\n\nMulti-dimensional Interpolation\n-------------------------------\n\nLike :py:meth:`~xarray.DataArray.sel`, :py:meth:`~xarray.DataArray.interp`\naccepts multiple coordinates. In this case, multidimensional interpolation\nis carried out.\n\n.. ipython:: python\n\n # label lookup\n da.sel(time=2, space=0.1)\n\n # interpolation\n da.interp(time=2.5, space=0.15)\n\nArray-like coordinates are also accepted:\n\n.. ipython:: python\n\n # label lookup\n da.sel(time=[2, 3], space=[0.1, 0.2])\n\n # interpolation\n da.interp(time=[1.5, 2.5], space=[0.15, 0.25])\n\n\n:py:meth:`~xarray.DataArray.interp_like` method is a useful shortcut. This\nmethod interpolates an xarray object onto the coordinates of another xarray\nobject. For example, if we want to compute the difference between\ntwo :py:class:`~xarray.DataArray` s (``da`` and ``other``) staying on slightly\ndifferent coordinates,\n\n.. ipython:: python\n\n other = xr.DataArray(np.sin(0.4 * np.arange(9).reshape(3, 3)),\n [('time', [0.9, 1.9, 2.9]),\n ('space', [0.15, 0.25, 0.35])])\n\nit might be a good idea to first interpolate ``da`` so that it will stay on the\nsame coordinates of ``other``, and then subtract it.\n:py:meth:`~xarray.DataArray.interp_like` can be used for such a case,\n\n.. ipython:: python\n\n # interpolate da along other's coordinates\n interpolated = da.interp_like(other)\n interpolated\n\nIt is now possible to safely compute the difference ``other - interpolated``.\n\n\nInterpolation methods\n---------------------\n\nWe use :py:func:`scipy.interpolate.interp1d` for 1-dimensional interpolation and\n:py:func:`scipy.interpolate.interpn` for multi-dimensional interpolation.\n\nThe interpolation method can be specified by the optional ``method`` argument.\n\n.. ipython:: python\n\n da = xr.DataArray(np.sin(np.linspace(0, 2 * np.pi, 10)), dims='x',\n coords={'x': np.linspace(0, 1, 10)})\n\n da.plot.line('o', label='original')\n da.interp(x=np.linspace(0, 1, 100)).plot.line(label='linear (default)')\n da.interp(x=np.linspace(0, 1, 100), method='cubic').plot.line(label='cubic')\n @savefig interpolation_sample1.png width=4in\n plt.legend()\n\nAdditional keyword arguments can be passed to scipy's functions.\n\n.. ipython:: python\n\n # fill 0 for the outside of the original coordinates.\n da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={'fill_value': 0.0})\n # extrapolation\n da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={'fill_value': 'extrapolate'})\n\n\nAdvanced Interpolation\n----------------------\n\n:py:meth:`~xarray.DataArray.interp` accepts :py:class:`~xarray.DataArray`\nas similar to :py:meth:`~xarray.DataArray.sel`, which enables us more advanced interpolation.\nBased on the dimension of the new coordinate passed to :py:meth:`~xarray.DataArray.interp`, the dimension of the result are determined.\n\nFor example, if you want to interpolate a two dimensional array along a particular dimension, as illustrated below,\nyou can pass two 1-dimensional :py:class:`~xarray.DataArray` s with\na common dimension as new coordinate.\n\n.. image:: _static/advanced_selection_interpolation.svg\n :height: 200px\n :width: 400 px\n :alt: advanced indexing and interpolation\n :align: center\n\nFor example:\n\n.. ipython:: python\n\n da = xr.DataArray(np.sin(0.3 * np.arange(20).reshape(5, 4)),\n [('x', np.arange(5)),\n ('y', [0.1, 0.2, 0.3, 0.4])])\n # advanced indexing\n x = xr.DataArray([0, 2, 4], dims='z')\n y = xr.DataArray([0.1, 0.2, 0.3], dims='z')\n da.sel(x=x, y=y)\n\n # advanced interpolation\n x = xr.DataArray([0.5, 1.5, 2.5], dims='z')\n y = xr.DataArray([0.15, 0.25, 0.35], dims='z')\n da.interp(x=x, y=y)\n\nwhere values on the original coordinates\n``(x, y) = ((0.5, 0.15), (1.5, 0.25), (2.5, 0.35))`` are obtained by the\n2-dimensional interpolation and mapped along a new dimension ``z``.\n\nIf you want to add a coordinate to the new dimension ``z``, you can supply\n:py:class:`~xarray.DataArray` s with a coordinate,\n\n.. ipython:: python\n\n x = xr.DataArray([0.5, 1.5, 2.5], dims='z', coords={'z': ['a', 'b','c']})\n y = xr.DataArray([0.15, 0.25, 0.35], dims='z',\n coords={'z': ['a', 'b','c']})\n da.interp(x=x, y=y)\n\nFor the details of the advanced indexing,\nsee :ref:`more advanced indexing `.\n\n\nInterpolating arrays with NaN\n-----------------------------\n\nOur :py:meth:`~xarray.DataArray.interp` works with arrays with NaN\nthe same way that\n`scipy.interpolate.interp1d `_ and\n`scipy.interpolate.interpn `_ do.\n``linear`` and ``nearest`` methods return arrays including NaN,\nwhile other methods such as ``cubic`` or ``quadratic`` return all NaN arrays.\n\n.. ipython:: python\n\n da = xr.DataArray([0, 2, np.nan, 3, 3.25], dims='x',\n coords={'x': range(5)})\n da.interp(x=[0.5, 1.5, 2.5])\n da.interp(x=[0.5, 1.5, 2.5], method='cubic')\n\nTo avoid this, you can drop NaN by :py:meth:`~xarray.DataArray.dropna`, and\nthen make the interpolation\n\n.. ipython:: python\n\n dropped = da.dropna('x')\n dropped\n dropped.interp(x=[0.5, 1.5, 2.5], method='cubic')\n\nIf NaNs are distributed randomly in your multidimensional array,\ndropping all the columns containing more than one NaNs by\n:py:meth:`~xarray.DataArray.dropna` may lose a significant amount of information.\nIn such a case, you can fill NaN by :py:meth:`~xarray.DataArray.interpolate_na`,\nwhich is similar to :py:meth:`pandas.Series.interpolate`.\n\n.. ipython:: python\n\n filled = da.interpolate_na(dim='x')\n filled\n\nThis fills NaN by interpolating along the specified dimension.\nAfter filling NaNs, you can interpolate:\n\n.. ipython:: python\n\n filled.interp(x=[0.5, 1.5, 2.5], method='cubic')\n\nFor the details of :py:meth:`~xarray.DataArray.interpolate_na`,\nsee :ref:`Missing values `.\n\n\nExample\n-------\n\nLet's see how :py:meth:`~xarray.DataArray.interp` works on real data.\n\n.. ipython:: python\n\n # Raw data\n ds = xr.tutorial.open_dataset('air_temperature').isel(time=0)\n fig, axes = plt.subplots(ncols=2, figsize=(10, 4))\n ds.air.plot(ax=axes[0])\n axes[0].set_title('Raw data')\n\n # Interpolated data\n new_lon = np.linspace(ds.lon[0], ds.lon[-1], ds.dims['lon'] * 4)\n new_lat = np.linspace(ds.lat[0], ds.lat[-1], ds.dims['lat'] * 4)\n dsi = ds.interp(lat=new_lat, lon=new_lon)\n dsi.air.plot(ax=axes[1])\n @savefig interpolation_sample3.png width=8in\n axes[1].set_title('Interpolated data')\n\nOur advanced interpolation can be used to remap the data to the new coordinate.\nConsider the new coordinates x and z on the two dimensional plane.\nThe remapping can be done as follows\n\n.. ipython:: python\n\n # new coordinate\n x = np.linspace(240, 300, 100)\n z = np.linspace(20, 70, 100)\n # relation between new and original coordinates\n lat = xr.DataArray(z, dims=['z'], coords={'z': z})\n lon = xr.DataArray((x[:, np.newaxis]-270)/np.cos(z*np.pi/180)+270,\n dims=['x', 'z'], coords={'x': x, 'z': z})\n\n fig, axes = plt.subplots(ncols=2, figsize=(10, 4))\n ds.air.plot(ax=axes[0])\n # draw the new coordinate on the original coordinates.\n for idx in [0, 33, 66, 99]:\n axes[0].plot(lon.isel(x=idx), lat, '--k')\n for idx in [0, 33, 66, 99]:\n axes[0].plot(*xr.broadcast(lon.isel(z=idx), lat.isel(z=idx)), '--k')\n axes[0].set_title('Raw data')\n\n dsi = ds.interp(lon=lon, lat=lat)\n dsi.air.plot(ax=axes[1])\n @savefig interpolation_sample4.png width=8in\n axes[1].set_title('Remapped data')\n"},{"id":2516,"name":"dataset-diagram-square-logo.tex","nodeType":"TextFile","path":"doc/_static","text":"\\documentclass[class=minimal,border=0pt,convert={size=600,outext=.png}]{standalone}\n% \\documentclass[class=minimal,border=0pt]{standalone}\n\\usepackage[scaled]{helvet}\n\\renewcommand*\\familydefault{\\sfdefault}\n\n% ===========================================================================\n% The code below (used to define the \\tikzcuboid command) is copied,\n% unmodified, from a tex.stackexchange.com answer by the user \"Tom Bombadil\":\n% http://tex.stackexchange.com/a/29882/8335\n%\n% It is licensed under the Creative Commons Attribution-ShareAlike 3.0\n% Unported license: http://creativecommons.org/licenses/by-sa/3.0/\n% ===========================================================================\n\n\\usepackage[usenames,dvipsnames]{color}\n\\usepackage{tikz}\n\\usepackage{keyval}\n\\usepackage{ifthen}\n\n%====================================\n%emphasize vertices --> switch and emph style (e.g. thick,black)\n%====================================\n\\makeatletter\n% Standard Values for Parameters\n\\newcommand{\\tikzcuboid@shiftx}{0}\n\\newcommand{\\tikzcuboid@shifty}{0}\n\\newcommand{\\tikzcuboid@dimx}{3}\n\\newcommand{\\tikzcuboid@dimy}{3}\n\\newcommand{\\tikzcuboid@dimz}{3}\n\\newcommand{\\tikzcuboid@scale}{1}\n\\newcommand{\\tikzcuboid@densityx}{1}\n\\newcommand{\\tikzcuboid@densityy}{1}\n\\newcommand{\\tikzcuboid@densityz}{1}\n\\newcommand{\\tikzcuboid@rotation}{0}\n\\newcommand{\\tikzcuboid@anglex}{0}\n\\newcommand{\\tikzcuboid@angley}{90}\n\\newcommand{\\tikzcuboid@anglez}{225}\n\\newcommand{\\tikzcuboid@scalex}{1}\n\\newcommand{\\tikzcuboid@scaley}{1}\n\\newcommand{\\tikzcuboid@scalez}{sqrt(0.5)}\n\\newcommand{\\tikzcuboid@linefront}{black}\n\\newcommand{\\tikzcuboid@linetop}{black}\n\\newcommand{\\tikzcuboid@lineright}{black}\n\\newcommand{\\tikzcuboid@fillfront}{white}\n\\newcommand{\\tikzcuboid@filltop}{white}\n\\newcommand{\\tikzcuboid@fillright}{white}\n\\newcommand{\\tikzcuboid@shaded}{N}\n\\newcommand{\\tikzcuboid@shadecolor}{black}\n\\newcommand{\\tikzcuboid@shadeperc}{25}\n\\newcommand{\\tikzcuboid@emphedge}{N}\n\\newcommand{\\tikzcuboid@emphstyle}{thick}\n\n% Definition of Keys\n\\define@key{tikzcuboid}{shiftx}[\\tikzcuboid@shiftx]{\\renewcommand{\\tikzcuboid@shiftx}{#1}}\n\\define@key{tikzcuboid}{shifty}[\\tikzcuboid@shifty]{\\renewcommand{\\tikzcuboid@shifty}{#1}}\n\\define@key{tikzcuboid}{dimx}[\\tikzcuboid@dimx]{\\renewcommand{\\tikzcuboid@dimx}{#1}}\n\\define@key{tikzcuboid}{dimy}[\\tikzcuboid@dimy]{\\renewcommand{\\tikzcuboid@dimy}{#1}}\n\\define@key{tikzcuboid}{dimz}[\\tikzcuboid@dimz]{\\renewcommand{\\tikzcuboid@dimz}{#1}}\n\\define@key{tikzcuboid}{scale}[\\tikzcuboid@scale]{\\renewcommand{\\tikzcuboid@scale}{#1}}\n\\define@key{tikzcuboid}{densityx}[\\tikzcuboid@densityx]{\\renewcommand{\\tikzcuboid@densityx}{#1}}\n\\define@key{tikzcuboid}{densityy}[\\tikzcuboid@densityy]{\\renewcommand{\\tikzcuboid@densityy}{#1}}\n\\define@key{tikzcuboid}{densityz}[\\tikzcuboid@densityz]{\\renewcommand{\\tikzcuboid@densityz}{#1}}\n\\define@key{tikzcuboid}{rotation}[\\tikzcuboid@rotation]{\\renewcommand{\\tikzcuboid@rotation}{#1}}\n\\define@key{tikzcuboid}{anglex}[\\tikzcuboid@anglex]{\\renewcommand{\\tikzcuboid@anglex}{#1}}\n\\define@key{tikzcuboid}{angley}[\\tikzcuboid@angley]{\\renewcommand{\\tikzcuboid@angley}{#1}}\n\\define@key{tikzcuboid}{anglez}[\\tikzcuboid@anglez]{\\renewcommand{\\tikzcuboid@anglez}{#1}}\n\\define@key{tikzcuboid}{scalex}[\\tikzcuboid@scalex]{\\renewcommand{\\tikzcuboid@scalex}{#1}}\n\\define@key{tikzcuboid}{scaley}[\\tikzcuboid@scaley]{\\renewcommand{\\tikzcuboid@scaley}{#1}}\n\\define@key{tikzcuboid}{scalez}[\\tikzcuboid@scalez]{\\renewcommand{\\tikzcuboid@scalez}{#1}}\n\\define@key{tikzcuboid}{linefront}[\\tikzcuboid@linefront]{\\renewcommand{\\tikzcuboid@linefront}{#1}}\n\\define@key{tikzcuboid}{linetop}[\\tikzcuboid@linetop]{\\renewcommand{\\tikzcuboid@linetop}{#1}}\n\\define@key{tikzcuboid}{lineright}[\\tikzcuboid@lineright]{\\renewcommand{\\tikzcuboid@lineright}{#1}}\n\\define@key{tikzcuboid}{fillfront}[\\tikzcuboid@fillfront]{\\renewcommand{\\tikzcuboid@fillfront}{#1}}\n\\define@key{tikzcuboid}{filltop}[\\tikzcuboid@filltop]{\\renewcommand{\\tikzcuboid@filltop}{#1}}\n\\define@key{tikzcuboid}{fillright}[\\tikzcuboid@fillright]{\\renewcommand{\\tikzcuboid@fillright}{#1}}\n\\define@key{tikzcuboid}{shaded}[\\tikzcuboid@shaded]{\\renewcommand{\\tikzcuboid@shaded}{#1}}\n\\define@key{tikzcuboid}{shadecolor}[\\tikzcuboid@shadecolor]{\\renewcommand{\\tikzcuboid@shadecolor}{#1}}\n\\define@key{tikzcuboid}{shadeperc}[\\tikzcuboid@shadeperc]{\\renewcommand{\\tikzcuboid@shadeperc}{#1}}\n\\define@key{tikzcuboid}{emphedge}[\\tikzcuboid@emphedge]{\\renewcommand{\\tikzcuboid@emphedge}{#1}}\n\\define@key{tikzcuboid}{emphstyle}[\\tikzcuboid@emphstyle]{\\renewcommand{\\tikzcuboid@emphstyle}{#1}}\n% Commands\n\\newcommand{\\tikzcuboid}[1]{\n \\setkeys{tikzcuboid}{#1} % Process Keys passed to command\n \\pgfmathsetmacro{\\vectorxx}{\\tikzcuboid@scalex*cos(\\tikzcuboid@anglex)}\n \\pgfmathsetmacro{\\vectorxy}{\\tikzcuboid@scalex*sin(\\tikzcuboid@anglex)}\n \\pgfmathsetmacro{\\vectoryx}{\\tikzcuboid@scaley*cos(\\tikzcuboid@angley)}\n \\pgfmathsetmacro{\\vectoryy}{\\tikzcuboid@scaley*sin(\\tikzcuboid@angley)}\n \\pgfmathsetmacro{\\vectorzx}{\\tikzcuboid@scalez*cos(\\tikzcuboid@anglez)}\n \\pgfmathsetmacro{\\vectorzy}{\\tikzcuboid@scalez*sin(\\tikzcuboid@anglez)}\n \\begin{scope}[xshift=\\tikzcuboid@shiftx, yshift=\\tikzcuboid@shifty, scale=\\tikzcuboid@scale, rotate=\\tikzcuboid@rotation, x={(\\vectorxx,\\vectorxy)}, y={(\\vectoryx,\\vectoryy)}, z={(\\vectorzx,\\vectorzy)}]\n \\pgfmathsetmacro{\\steppingx}{1/\\tikzcuboid@densityx}\n \\pgfmathsetmacro{\\steppingy}{1/\\tikzcuboid@densityy}\n \\pgfmathsetmacro{\\steppingz}{1/\\tikzcuboid@densityz}\n \\newcommand{\\dimx}{\\tikzcuboid@dimx}\n \\newcommand{\\dimy}{\\tikzcuboid@dimy}\n \\newcommand{\\dimz}{\\tikzcuboid@dimz}\n \\pgfmathsetmacro{\\secondx}{2*\\steppingx}\n \\pgfmathsetmacro{\\secondy}{2*\\steppingy}\n \\pgfmathsetmacro{\\secondz}{2*\\steppingz}\n \\foreach \\x in {\\steppingx,\\secondx,...,\\dimx}\n { \\foreach \\y in {\\steppingy,\\secondy,...,\\dimy}\n { \\pgfmathsetmacro{\\lowx}{(\\x-\\steppingx)}\n \\pgfmathsetmacro{\\lowy}{(\\y-\\steppingy)}\n \\filldraw[fill=\\tikzcuboid@fillfront,draw=\\tikzcuboid@linefront] (\\lowx,\\lowy,\\dimz) -- (\\lowx,\\y,\\dimz) -- (\\x,\\y,\\dimz) -- (\\x,\\lowy,\\dimz) -- cycle;\n\n }\n }\n \\foreach \\x in {\\steppingx,\\secondx,...,\\dimx}\n { \\foreach \\z in {\\steppingz,\\secondz,...,\\dimz}\n { \\pgfmathsetmacro{\\lowx}{(\\x-\\steppingx)}\n \\pgfmathsetmacro{\\lowz}{(\\z-\\steppingz)}\n \\filldraw[fill=\\tikzcuboid@filltop,draw=\\tikzcuboid@linetop] (\\lowx,\\dimy,\\lowz) -- (\\lowx,\\dimy,\\z) -- (\\x,\\dimy,\\z) -- (\\x,\\dimy,\\lowz) -- cycle;\n }\n }\n \\foreach \\y in {\\steppingy,\\secondy,...,\\dimy}\n { \\foreach \\z in {\\steppingz,\\secondz,...,\\dimz}\n { \\pgfmathsetmacro{\\lowy}{(\\y-\\steppingy)}\n \\pgfmathsetmacro{\\lowz}{(\\z-\\steppingz)}\n \\filldraw[fill=\\tikzcuboid@fillright,draw=\\tikzcuboid@lineright] (\\dimx,\\lowy,\\lowz) -- (\\dimx,\\lowy,\\z) -- (\\dimx,\\y,\\z) -- (\\dimx,\\y,\\lowz) -- cycle;\n }\n }\n \\ifthenelse{\\equal{\\tikzcuboid@emphedge}{Y}}%\n {\\draw[\\tikzcuboid@emphstyle](0,\\dimy,0) -- (\\dimx,\\dimy,0) -- (\\dimx,\\dimy,\\dimz) -- (0,\\dimy,\\dimz) -- cycle;%\n \\draw[\\tikzcuboid@emphstyle] (0,0,\\dimz) -- (0,\\dimy,\\dimz) -- (\\dimx,\\dimy,\\dimz) -- (\\dimx,0,\\dimz) -- cycle;%\n \\draw[\\tikzcuboid@emphstyle](\\dimx,0,0) -- (\\dimx,\\dimy,0) -- (\\dimx,\\dimy,\\dimz) -- (\\dimx,0,\\dimz) -- cycle;%\n }%\n {}\n \\end{scope}\n}\n\n\\makeatother\n\n\\begin{document}\n\n\\begin{tikzpicture}\n \\tikzcuboid{%\n shiftx=21cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=2,%\n densityy=2,%\n densityz=2,%\n dimx=4,%\n dimy=3,%\n dimz=3,%\n linefront=purple!75!black,%\n linetop=purple!50!black,%\n lineright=purple!25!black,%\n fillfront=purple!25!white,%\n filltop=purple!50!white,%\n fillright=purple!75!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=21cm,%\n shifty=11.6cm,%\n scale=1.00,%\n rotation=0,%\n densityx=2,%\n densityy=2,%\n densityz=2,%\n dimx=4,%\n dimy=3,%\n dimz=3,%\n linefront=teal!75!black,%\n linetop=teal!50!black,%\n lineright=teal!25!black,%\n fillfront=teal!25!white,%\n filltop=teal!50!white,%\n fillright=teal!75!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=26.8cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=10000,%\n densityy=2,%\n densityz=2,%\n dimx=0,%\n dimy=3,%\n dimz=3,%\n linefront=orange!75!black,%\n linetop=orange!50!black,%\n lineright=orange!25!black,%\n fillfront=orange!25!white,%\n filltop=orange!50!white,%\n fillright=orange!100!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=28.6cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=10000,%\n densityy=2,%\n densityz=2,%\n dimx=0,%\n dimy=3,%\n dimz=3,%\n linefront=purple!75!black,%\n linetop=purple!50!black,%\n lineright=purple!25!black,%\n fillfront=purple!25!white,%\n filltop=purple!50!white,%\n fillright=red!75!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n % \\tikzcuboid{%\n % shiftx=27.1cm,%\n % shifty=10.1cm,%\n % scale=1.00,%\n % rotation=0,%\n % densityx=100,%\n % densityy=2,%\n % densityz=100,%\n % dimx=0,%\n % dimy=3,%\n % dimz=0,%\n % emphedge=Y,%\n % emphstyle=ultra thick,\n % }\n % \\tikzcuboid{%\n % shiftx=27.1cm,%\n % shifty=10.1cm,%\n % scale=1.00,%\n % rotation=180,%\n % densityx=100,%\n % densityy=100,%\n % densityz=2,%\n % dimx=0,%\n % dimy=0,%\n % dimz=3,%\n % emphedge=Y,%\n % emphstyle=ultra thick,\n % }\n \\tikzcuboid{%\n shiftx=26.8cm,%\n shifty=11.4cm,%\n scale=1.00,%\n rotation=0,%\n densityx=100,%\n densityy=2,%\n densityz=100,%\n dimx=0,%\n dimy=3,%\n dimz=0,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=25.3cm,%\n shifty=12.9cm,%\n scale=1.00,%\n rotation=180,%\n densityx=100,%\n densityy=100,%\n densityz=2,%\n dimx=0,%\n dimy=0,%\n dimz=3,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n % \\fill (27.1,10.1) circle[radius=2pt];\n \\node [font=\\fontsize{130}{100}\\fontfamily{phv}\\selectfont, anchor=east, text width=2cm, align=right, color=white!50!black] at (19.8,4.4) {\\textbf{\\emph{x}}};\n \\node [font=\\fontsize{130}{100}\\fontfamily{phv}\\selectfont, anchor=west, text width=10cm, align=left] at (20.3,4) {{array}};\n\\end{tikzpicture}\n\n\\end{document}\n"},{"id":2517,"name":"weather-climate.rst","nodeType":"TextFile","path":"doc","text":".. _weather-climate:\n\nWeather and climate data\n========================\n\n.. ipython:: python\n :suppress:\n\n import xarray as xr\n\n``xarray`` can leverage metadata that follows the `Climate and Forecast (CF) conventions`_ if present. Examples include automatic labelling of plots with descriptive names and units if proper metadata is present (see :ref:`plotting`) and support for non-standard calendars used in climate science through the ``cftime`` module (see :ref:`CFTimeIndex`). There are also a number of geosciences-focused projects that build on xarray (see :ref:`related-projects`).\n\n.. _Climate and Forecast (CF) conventions: http://cfconventions.org\n\n.. _metpy_accessor:\n\nCF-compliant coordinate variables\n---------------------------------\n\n`MetPy`_ adds a\t``metpy`` accessor that allows accessing coordinates with appropriate CF metadata using generic names ``x``, ``y``, ``vertical`` and ``time``. There is also a `cartopy_crs` attribute that provides projection information, parsed from the appropriate CF metadata, as a `Cartopy`_ projection object. See `their documentation`_ for more information.\n\n.. _`MetPy`: https://unidata.github.io/MetPy/dev/index.html\n.. _`their documentation`:\thttps://unidata.github.io/MetPy/dev/tutorials/xarray_tutorial.html#coordinates\n.. _`Cartopy`: https://scitools.org.uk/cartopy/docs/latest/crs/projections.html\n\n.. _CFTimeIndex:\n\nNon-standard calendars and dates outside the Timestamp-valid range\n------------------------------------------------------------------\n\nThrough the standalone ``cftime`` library and a custom subclass of\n:py:class:`pandas.Index`, xarray supports a subset of the indexing\nfunctionality enabled through the standard :py:class:`pandas.DatetimeIndex` for\ndates from non-standard calendars commonly used in climate science or dates\nusing a standard calendar, but outside the `Timestamp-valid range`_\n(approximately between years 1678 and 2262).\n\n.. note::\n\n As of xarray version 0.11, by default, :py:class:`cftime.datetime` objects\n will be used to represent times (either in indexes, as a\n :py:class:`~xarray.CFTimeIndex`, or in data arrays with dtype object) if\n any of the following are true:\n\n - The dates are from a non-standard calendar\n - Any dates are outside the Timestamp-valid range.\n\n Otherwise pandas-compatible dates from a standard calendar will be\n represented with the ``np.datetime64[ns]`` data type, enabling the use of a\n :py:class:`pandas.DatetimeIndex` or arrays with dtype ``np.datetime64[ns]``\n and their full set of associated features.\n\nFor example, you can create a DataArray indexed by a time\ncoordinate with dates from a no-leap calendar and a\n:py:class:`~xarray.CFTimeIndex` will automatically be used:\n\n.. ipython:: python\n\n from itertools import product\n from cftime import DatetimeNoLeap\n dates = [DatetimeNoLeap(year, month, 1) for year, month in\n product(range(1, 3), range(1, 13))]\n da = xr.DataArray(np.arange(24), coords=[dates], dims=['time'], name='foo')\n\nxarray also includes a :py:func:`~xarray.cftime_range` function, which enables\ncreating a :py:class:`~xarray.CFTimeIndex` with regularly-spaced dates. For\ninstance, we can create the same dates and DataArray we created above using:\n\n.. ipython:: python\n\n dates = xr.cftime_range(start='0001', periods=24, freq='MS', calendar='noleap')\n da = xr.DataArray(np.arange(24), coords=[dates], dims=['time'], name='foo')\n\nWith :py:meth:`~xarray.CFTimeIndex.strftime` we can also easily generate formatted strings from\nthe datetime values of a :py:class:`~xarray.CFTimeIndex` directly or through the\n:py:meth:`~xarray.DataArray.dt` accessor for a :py:class:`~xarray.DataArray`\nusing the same formatting as the standard `datetime.strftime`_ convention .\n\n.. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior\n\n.. ipython:: python\n\n dates.strftime('%c')\n da['time'].dt.strftime('%Y%m%d')\n\nFor data indexed by a :py:class:`~xarray.CFTimeIndex` xarray currently supports:\n\n- `Partial datetime string indexing`_ using strictly `ISO 8601-format`_ partial\n datetime strings:\n\n.. ipython:: python\n\n da.sel(time='0001')\n da.sel(time=slice('0001-05', '0002-02'))\n\n- Access of basic datetime components via the ``dt`` accessor (in this case\n just \"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"microsecond\",\n \"season\", \"dayofyear\", and \"dayofweek\"):\n\n.. ipython:: python\n\n da.time.dt.year\n da.time.dt.month\n da.time.dt.season\n da.time.dt.dayofyear\n da.time.dt.dayofweek\n\n- Group-by operations based on datetime accessor attributes (e.g. by month of\n the year):\n\n.. ipython:: python\n\n da.groupby('time.month').sum()\n\n- Interpolation using :py:class:`cftime.datetime` objects:\n\n.. ipython:: python\n\n da.interp(time=[DatetimeNoLeap(1, 1, 15), DatetimeNoLeap(1, 2, 15)])\n\n- Interpolation using datetime strings:\n\n.. ipython:: python\n\n da.interp(time=['0001-01-15', '0001-02-15'])\n\n- Differentiation:\n\n.. ipython:: python\n\n da.differentiate('time')\n\n- Serialization:\n\n.. ipython:: python\n\n da.to_netcdf('example-no-leap.nc')\n xr.open_dataset('example-no-leap.nc')\n\n.. ipython:: python\n :suppress:\n\n import os\n os.remove('example-no-leap.nc')\n\n- And resampling along the time dimension for data indexed by a :py:class:`~xarray.CFTimeIndex`:\n\n.. ipython:: python\n\n da.resample(time='81T', closed='right', label='right', base=3).mean()\n\n.. note::\n\n\n For some use-cases it may still be useful to convert from\n a :py:class:`~xarray.CFTimeIndex` to a :py:class:`pandas.DatetimeIndex`,\n despite the difference in calendar types. The recommended way of doing this\n is to use the built-in :py:meth:`~xarray.CFTimeIndex.to_datetimeindex`\n method:\n\n .. ipython:: python\n :okwarning:\n\n modern_times = xr.cftime_range('2000', periods=24, freq='MS', calendar='noleap')\n da = xr.DataArray(range(24), [('time', modern_times)])\n da\n datetimeindex = da.indexes['time'].to_datetimeindex()\n da['time'] = datetimeindex\n\n However in this case one should use caution to only perform operations which\n do not depend on differences between dates (e.g. differentiation,\n interpolation, or upsampling with resample), as these could introduce subtle\n and silent errors due to the difference in calendar types between the dates\n encoded in your data and the dates stored in memory.\n\n.. _Timestamp-valid range: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#timestamp-limitations\n.. _ISO 8601-format: https://en.wikipedia.org/wiki/ISO_8601\n.. _partial datetime string indexing: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#partial-string-indexing\n"},{"fileName":"rolling.py","filePath":"xarray/core","id":2518,"nodeType":"File","text":"import functools\n\nimport numpy as np\n\nfrom . import dtypes, duck_array_ops, utils\nfrom .dask_array_ops import dask_rolling_wrapper\nfrom .ops import inject_coarsen_methods\nfrom .pycompat import dask_array_type\n\ntry:\n import bottleneck\nexcept ImportError:\n # use numpy methods instead\n bottleneck = None\n\n\n_ROLLING_REDUCE_DOCSTRING_TEMPLATE = \"\"\"\\\nReduce this object's data windows by applying `{name}` along its dimension.\n\nParameters\n----------\n**kwargs : dict\n Additional keyword arguments passed on to `{name}`.\n\nReturns\n-------\nreduced : same type as caller\n New object with `{name}` applied along its rolling dimnension.\n\"\"\"\n\n\nclass Rolling:\n \"\"\"A object that implements the moving window pattern.\n\n See Also\n --------\n Dataset.groupby\n DataArray.groupby\n Dataset.rolling\n DataArray.rolling\n \"\"\"\n\n __slots__ = (\"obj\", \"window\", \"min_periods\", \"center\", \"dim\")\n _attributes = (\"window\", \"min_periods\", \"center\", \"dim\")\n\n def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n \"\"\"\n if len(windows) != 1:\n raise ValueError(\"exactly one dim/window should be provided\")\n\n dim, window = next(iter(windows.items()))\n\n if window <= 0:\n raise ValueError(\"window must be > 0\")\n\n self.obj = obj\n\n # attributes\n self.window = window\n if min_periods is not None and min_periods <= 0:\n raise ValueError(\"min_periods must be greater than zero or None\")\n self.min_periods = min_periods\n\n self.center = center\n self.dim = dim\n\n @property\n def _min_periods(self):\n return self.min_periods if self.min_periods is not None else self.window\n\n def __repr__(self):\n \"\"\"provide a nice str repr of our rolling object\"\"\"\n\n attrs = [\n \"{k}->{v}\".format(k=k, v=getattr(self, k))\n for k in self._attributes\n if getattr(self, k, None) is not None\n ]\n return \"{klass} [{attrs}]\".format(\n klass=self.__class__.__name__, attrs=\",\".join(attrs)\n )\n\n def __len__(self):\n return self.obj.sizes[self.dim]\n\n def _reduce_method(name):\n array_agg_func = getattr(duck_array_ops, name)\n bottleneck_move_func = getattr(bottleneck, \"move_\" + name, None)\n\n def method(self, **kwargs):\n return self._numpy_or_bottleneck_reduce(\n array_agg_func, bottleneck_move_func, **kwargs\n )\n\n method.__name__ = name\n method.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=name)\n return method\n\n argmax = _reduce_method(\"argmax\")\n argmin = _reduce_method(\"argmin\")\n max = _reduce_method(\"max\")\n min = _reduce_method(\"min\")\n mean = _reduce_method(\"mean\")\n prod = _reduce_method(\"prod\")\n sum = _reduce_method(\"sum\")\n std = _reduce_method(\"std\")\n var = _reduce_method(\"var\")\n median = _reduce_method(\"median\")\n\n def count(self):\n rolling_count = self._counts()\n enough_periods = rolling_count >= self._min_periods\n return rolling_count.where(enough_periods)\n\n count.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=\"count\")\n\n\nclass DataArrayRolling(Rolling):\n __slots__ = (\"window_labels\",)\n\n def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object for DataArray.\n You should use DataArray.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n DataArray.rolling\n DataArray.groupby\n Dataset.rolling\n Dataset.groupby\n \"\"\"\n super().__init__(obj, windows, min_periods=min_periods, center=center)\n\n self.window_labels = self.obj[self.dim]\n\n def __iter__(self):\n stops = np.arange(1, len(self.window_labels) + 1)\n starts = stops - int(self.window)\n starts[: int(self.window)] = 0\n for (label, start, stop) in zip(self.window_labels, starts, stops):\n window = self.obj.isel(**{self.dim: slice(start, stop)})\n\n counts = window.count(dim=self.dim)\n window = window.where(counts >= self._min_periods)\n\n yield (label, window)\n\n def construct(self, window_dim, stride=1, fill_value=dtypes.NA):\n \"\"\"\n Convert this rolling object to xr.DataArray,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n Size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n DataArray that is a view of the original array. The returned array is\n not writeable.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling = da.rolling(b=3, center=True)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, np.nan]],\n [[np.nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, np.nan]]])\n Dimensions without coordinates: a, b, window_dim\n \"\"\"\n\n from .dataarray import DataArray\n\n window = self.obj.variable.rolling_window(\n self.dim, self.window, window_dim, self.center, fill_value=fill_value\n )\n result = DataArray(\n window, dims=self.obj.dims + (window_dim,), coords=self.obj.coords\n )\n return result.isel(**{self.dim: slice(None, None, stride)})\n\n def reduce(self, func, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling.reduce(np.sum)\n \n array([[nan, nan, 3., 6.],\n [nan, nan, 15., 18.]])\n Dimensions without coordinates: a, b\n >>>\n >>> rolling = da.rolling(b=3, min_periods=1)\n >>> rolling.reduce(np.nansum)\n \n array([[ 0., 1., 3., 6.],\n [ 4., 9., 15., 18.]])\n \"\"\"\n rolling_dim = utils.get_temp_dimname(self.obj.dims, \"_rolling_dim\")\n windows = self.construct(rolling_dim)\n result = windows.reduce(func, dim=rolling_dim, **kwargs)\n\n # Find valid windows based on count.\n counts = self._counts()\n return result.where(counts >= self._min_periods)\n\n def _counts(self):\n \"\"\" Number of non-nan entries in each rolling window. \"\"\"\n\n rolling_dim = utils.get_temp_dimname(self.obj.dims, \"_rolling_dim\")\n # We use False as the fill_value instead of np.nan, since boolean\n # array is faster to be reduced than object array.\n # The use of skipna==False is also faster since it does not need to\n # copy the strided array.\n counts = (\n self.obj.notnull()\n .rolling(center=self.center, **{self.dim: self.window})\n .construct(rolling_dim, fill_value=False)\n .sum(dim=rolling_dim, skipna=False)\n )\n return counts\n\n def _bottleneck_reduce(self, func, **kwargs):\n from .dataarray import DataArray\n\n # bottleneck doesn't allow min_count to be 0, although it should\n # work the same as if min_count = 1\n if self.min_periods is not None and self.min_periods == 0:\n min_count = 1\n else:\n min_count = self.min_periods\n\n axis = self.obj.get_axis_num(self.dim)\n\n padded = self.obj.variable\n if self.center:\n if isinstance(padded.data, dask_array_type):\n # Workaround to make the padded chunk size is larger than\n # self.window-1\n shift = -(self.window + 1) // 2\n offset = (self.window - 1) // 2\n valid = (slice(None),) * axis + (\n slice(offset, offset + self.obj.shape[axis]),\n )\n else:\n shift = (-self.window // 2) + 1\n valid = (slice(None),) * axis + (slice(-shift, None),)\n padded = padded.pad_with_fill_value({self.dim: (0, -shift)})\n\n if isinstance(padded.data, dask_array_type):\n raise AssertionError(\"should not be reachable\")\n values = dask_rolling_wrapper(\n func, padded.data, window=self.window, min_count=min_count, axis=axis\n )\n else:\n values = func(\n padded.data, window=self.window, min_count=min_count, axis=axis\n )\n\n if self.center:\n values = values[valid]\n result = DataArray(values, self.obj.coords)\n\n return result\n\n def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n ):\n if bottleneck_move_func is not None and not isinstance(\n self.obj.data, dask_array_type\n ):\n # TODO: renable bottleneck with dask after the issues\n # underlying https://github.com/pydata/xarray/issues/2940 are\n # fixed.\n return self._bottleneck_reduce(bottleneck_move_func, **kwargs)\n else:\n return self.reduce(array_agg_func, **kwargs)\n\n\nclass DatasetRolling(Rolling):\n __slots__ = (\"rollings\",)\n\n def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object for Dataset.\n You should use Dataset.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : Dataset\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n Dataset.rolling\n DataArray.rolling\n Dataset.groupby\n DataArray.groupby\n \"\"\"\n super().__init__(obj, windows, min_periods, center)\n if self.dim not in self.obj.dims:\n raise KeyError(self.dim)\n # Keep each Rolling object as a dictionary\n self.rollings = {}\n for key, da in self.obj.data_vars.items():\n # keeps rollings only for the dataset depending on slf.dim\n if self.dim in da.dims:\n self.rollings[key] = DataArrayRolling(da, windows, min_periods, center)\n\n def _dataset_implementation(self, func, **kwargs):\n from .dataset import Dataset\n\n reduced = {}\n for key, da in self.obj.data_vars.items():\n if self.dim in da.dims:\n reduced[key] = func(self.rollings[key], **kwargs)\n else:\n reduced[key] = self.obj[key]\n return Dataset(reduced, coords=self.obj.coords)\n\n def reduce(self, func, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n \"\"\"\n return self._dataset_implementation(\n functools.partial(DataArrayRolling.reduce, func=func), **kwargs\n )\n\n def _counts(self):\n return self._dataset_implementation(DataArrayRolling._counts)\n\n def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n ):\n return self._dataset_implementation(\n functools.partial(\n DataArrayRolling._numpy_or_bottleneck_reduce,\n array_agg_func=array_agg_func,\n bottleneck_move_func=bottleneck_move_func,\n ),\n **kwargs\n )\n\n def construct(self, window_dim, stride=1, fill_value=dtypes.NA):\n \"\"\"\n Convert this rolling object to xr.Dataset,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n Dataset with variables converted from rolling object.\n \"\"\"\n\n from .dataset import Dataset\n\n dataset = {}\n for key, da in self.obj.data_vars.items():\n if self.dim in da.dims:\n dataset[key] = self.rollings[key].construct(\n window_dim, fill_value=fill_value\n )\n else:\n dataset[key] = da\n return Dataset(dataset, coords=self.obj.coords).isel(\n **{self.dim: slice(None, None, stride)}\n )\n\n\nclass Coarsen:\n \"\"\"A object that implements the coarsen.\n\n See Also\n --------\n Dataset.coarsen\n DataArray.coarsen\n \"\"\"\n\n __slots__ = (\"obj\", \"boundary\", \"coord_func\", \"windows\", \"side\", \"trim_excess\")\n _attributes = (\"windows\", \"side\", \"trim_excess\")\n\n def __init__(self, obj, windows, boundary, side, coord_func):\n \"\"\"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of window size. If 'trim', the excess indexes are trimed.\n If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: mapping from coordinate name to func.\n\n Returns\n -------\n coarsen\n \"\"\"\n self.obj = obj\n self.windows = windows\n self.side = side\n self.boundary = boundary\n\n if not utils.is_dict_like(coord_func):\n coord_func = {d: coord_func for d in self.obj.dims}\n for c in self.obj.coords:\n if c not in coord_func:\n coord_func[c] = duck_array_ops.mean\n self.coord_func = coord_func\n\n def __repr__(self):\n \"\"\"provide a nice str repr of our coarsen object\"\"\"\n\n attrs = [\n \"{k}->{v}\".format(k=k, v=getattr(self, k))\n for k in self._attributes\n if getattr(self, k, None) is not None\n ]\n return \"{klass} [{attrs}]\".format(\n klass=self.__class__.__name__, attrs=\",\".join(attrs)\n )\n\n\nclass DataArrayCoarsen(Coarsen):\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func):\n \"\"\"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n \"\"\"\n\n def wrapped_func(self, **kwargs):\n from .dataarray import DataArray\n\n reduced = self.obj.variable.coarsen(\n self.windows, func, self.boundary, self.side\n )\n coords = {}\n for c, v in self.obj.coords.items():\n if c == self.obj.name:\n coords[c] = reduced\n else:\n if any(d in self.windows for d in v.dims):\n coords[c] = v.variable.coarsen(\n self.windows, self.coord_func[c], self.boundary, self.side\n )\n else:\n coords[c] = v\n return DataArray(reduced, dims=self.obj.dims, coords=coords)\n\n return wrapped_func\n\n\nclass DatasetCoarsen(Coarsen):\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func):\n \"\"\"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n \"\"\"\n\n def wrapped_func(self, **kwargs):\n from .dataset import Dataset\n\n reduced = {}\n for key, da in self.obj.data_vars.items():\n reduced[key] = da.variable.coarsen(\n self.windows, func, self.boundary, self.side\n )\n\n coords = {}\n for c, v in self.obj.coords.items():\n if any(d in self.windows for d in v.dims):\n coords[c] = v.variable.coarsen(\n self.windows, self.coord_func[c], self.boundary, self.side\n )\n else:\n coords[c] = v.variable\n return Dataset(reduced, coords=coords)\n\n return wrapped_func\n\n\ninject_coarsen_methods(DataArrayCoarsen)\ninject_coarsen_methods(DatasetCoarsen)\n"},{"className":"Rolling","col":0,"comment":"A object that implements the moving window pattern.\n\n See Also\n --------\n Dataset.groupby\n DataArray.groupby\n Dataset.rolling\n DataArray.rolling\n ","endLoc":138,"id":2519,"nodeType":"Class","startLoc":32,"text":"class Rolling:\n \"\"\"A object that implements the moving window pattern.\n\n See Also\n --------\n Dataset.groupby\n DataArray.groupby\n Dataset.rolling\n DataArray.rolling\n \"\"\"\n\n __slots__ = (\"obj\", \"window\", \"min_periods\", \"center\", \"dim\")\n _attributes = (\"window\", \"min_periods\", \"center\", \"dim\")\n\n def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n \"\"\"\n if len(windows) != 1:\n raise ValueError(\"exactly one dim/window should be provided\")\n\n dim, window = next(iter(windows.items()))\n\n if window <= 0:\n raise ValueError(\"window must be > 0\")\n\n self.obj = obj\n\n # attributes\n self.window = window\n if min_periods is not None and min_periods <= 0:\n raise ValueError(\"min_periods must be greater than zero or None\")\n self.min_periods = min_periods\n\n self.center = center\n self.dim = dim\n\n @property\n def _min_periods(self):\n return self.min_periods if self.min_periods is not None else self.window\n\n def __repr__(self):\n \"\"\"provide a nice str repr of our rolling object\"\"\"\n\n attrs = [\n \"{k}->{v}\".format(k=k, v=getattr(self, k))\n for k in self._attributes\n if getattr(self, k, None) is not None\n ]\n return \"{klass} [{attrs}]\".format(\n klass=self.__class__.__name__, attrs=\",\".join(attrs)\n )\n\n def __len__(self):\n return self.obj.sizes[self.dim]\n\n def _reduce_method(name):\n array_agg_func = getattr(duck_array_ops, name)\n bottleneck_move_func = getattr(bottleneck, \"move_\" + name, None)\n\n def method(self, **kwargs):\n return self._numpy_or_bottleneck_reduce(\n array_agg_func, bottleneck_move_func, **kwargs\n )\n\n method.__name__ = name\n method.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=name)\n return method\n\n argmax = _reduce_method(\"argmax\")\n argmin = _reduce_method(\"argmin\")\n max = _reduce_method(\"max\")\n min = _reduce_method(\"min\")\n mean = _reduce_method(\"mean\")\n prod = _reduce_method(\"prod\")\n sum = _reduce_method(\"sum\")\n std = _reduce_method(\"std\")\n var = _reduce_method(\"var\")\n median = _reduce_method(\"median\")\n\n def count(self):\n rolling_count = self._counts()\n enough_periods = rolling_count >= self._min_periods\n return rolling_count.where(enough_periods)\n\n count.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=\"count\")"},{"col":4,"comment":"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n ","endLoc":88,"header":"def __init__(self, obj, windows, min_periods=None, center=False)","id":2520,"name":"__init__","nodeType":"Function","startLoc":46,"text":"def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n \"\"\"\n if len(windows) != 1:\n raise ValueError(\"exactly one dim/window should be provided\")\n\n dim, window = next(iter(windows.items()))\n\n if window <= 0:\n raise ValueError(\"window must be > 0\")\n\n self.obj = obj\n\n # attributes\n self.window = window\n if min_periods is not None and min_periods <= 0:\n raise ValueError(\"min_periods must be greater than zero or None\")\n self.min_periods = min_periods\n\n self.center = center\n self.dim = dim"},{"col":4,"comment":"null","endLoc":295,"header":"def decrement(self, name)","id":2521,"name":"decrement","nodeType":"Function","startLoc":288,"text":"def decrement(self, name):\n with self._lock:\n count = self._counts[name] - 1\n if count:\n self._counts[name] = count\n else:\n del self._counts[name]\n return count"},{"attributeType":"DataArray","col":8,"comment":"null","endLoc":69,"id":2522,"name":"da_long","nodeType":"Attribute","startLoc":69,"text":"self.da_long"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":68,"id":2523,"name":"ds","nodeType":"Attribute","startLoc":68,"text":"self.ds"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":2524,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":2,"id":2525,"name":"pd","nodeType":"Attribute","startLoc":2,"text":"pd"},{"attributeType":"null","col":17,"comment":"null","endLoc":4,"id":2526,"name":"xr","nodeType":"Attribute","startLoc":4,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":8,"id":2527,"name":"nx","nodeType":"Attribute","startLoc":8,"text":"nx"},{"attributeType":"null","col":0,"comment":"null","endLoc":9,"id":2528,"name":"long_nx","nodeType":"Attribute","startLoc":9,"text":"long_nx"},{"attributeType":"null","col":0,"comment":"null","endLoc":10,"id":2529,"name":"ny","nodeType":"Attribute","startLoc":10,"text":"ny"},{"attributeType":"null","col":0,"comment":"null","endLoc":11,"id":2530,"name":"nt","nodeType":"Attribute","startLoc":11,"text":"nt"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":2531,"name":"window","nodeType":"Attribute","startLoc":12,"text":"window"},{"attributeType":"null","col":0,"comment":"null","endLoc":14,"id":2532,"name":"randn_xy","nodeType":"Attribute","startLoc":14,"text":"randn_xy"},{"attributeType":"null","col":8,"comment":"null","endLoc":280,"id":2533,"name":"_counts","nodeType":"Attribute","startLoc":280,"text":"self._counts"},{"attributeType":"null","col":8,"comment":"null","endLoc":281,"id":2534,"name":"_lock","nodeType":"Attribute","startLoc":281,"text":"self._lock"},{"col":4,"comment":"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the right side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n ","endLoc":652,"header":"def rstrip(self, to_strip=None)","id":2535,"name":"rstrip","nodeType":"Function","startLoc":634,"text":"def rstrip(self, to_strip=None):\n \"\"\"\n Remove leading and trailing characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the array from the right side.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n stripped : same type as values\n \"\"\"\n return self.strip(to_strip, side=\"right\")"},{"col":4,"comment":"null","endLoc":92,"header":"@property\n def _min_periods(self)","id":2536,"name":"_min_periods","nodeType":"Function","startLoc":90,"text":"@property\n def _min_periods(self):\n return self.min_periods if self.min_periods is not None else self.window"},{"col":4,"comment":"provide a nice str repr of our rolling object","endLoc":104,"header":"def __repr__(self)","id":2537,"name":"__repr__","nodeType":"Function","startLoc":94,"text":"def __repr__(self):\n \"\"\"provide a nice str repr of our rolling object\"\"\"\n\n attrs = [\n \"{k}->{v}\".format(k=k, v=getattr(self, k))\n for k in self._attributes\n if getattr(self, k, None) is not None\n ]\n return \"{klass} [{attrs}]\".format(\n klass=self.__class__.__name__, attrs=\",\".join(attrs)\n )"},{"col":4,"comment":"null","endLoc":107,"header":"def __len__(self)","id":2538,"name":"__len__","nodeType":"Function","startLoc":106,"text":"def __len__(self):\n return self.obj.sizes[self.dim]"},{"col":4,"comment":"null","endLoc":120,"header":"def _reduce_method(name)","id":2539,"name":"_reduce_method","nodeType":"Function","startLoc":109,"text":"def _reduce_method(name):\n array_agg_func = getattr(duck_array_ops, name)\n bottleneck_move_func = getattr(bottleneck, \"move_\" + name, None)\n\n def method(self, **kwargs):\n return self._numpy_or_bottleneck_reduce(\n array_agg_func, bottleneck_move_func, **kwargs\n )\n\n method.__name__ = name\n method.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=name)\n return method"},{"className":"_HashedSequence","col":0,"comment":"Speedup repeated look-ups by caching hash values.\n\n Based on what Python uses internally in functools.lru_cache.\n\n Python doesn't perform this optimization automatically:\n https://bugs.python.org/issue1462796\n ","endLoc":312,"id":2540,"nodeType":"Class","startLoc":298,"text":"class _HashedSequence(list):\n \"\"\"Speedup repeated look-ups by caching hash values.\n\n Based on what Python uses internally in functools.lru_cache.\n\n Python doesn't perform this optimization automatically:\n https://bugs.python.org/issue1462796\n \"\"\"\n\n def __init__(self, tuple_value):\n self[:] = tuple_value\n self.hashvalue = hash(tuple_value)\n\n def __hash__(self):\n return self.hashvalue"},{"col":4,"comment":"\n Wrap long strings in the array to be formatted in paragraphs with\n length less than a given width.\n\n This method has the same keyword parameters and defaults as\n :class:`textwrap.TextWrapper`.\n\n Parameters\n ----------\n width : int\n Maximum line-width\n expand_tabs : bool, optional\n If true, tab characters will be expanded to spaces (default: True)\n replace_whitespace : bool, optional\n If true, each whitespace character (as defined by\n string.whitespace) remaining after tab expansion will be replaced\n by a single space (default: True)\n drop_whitespace : bool, optional\n If true, whitespace that, after wrapping, happens to end up at the\n beginning or end of a line is dropped (default: True)\n break_long_words : bool, optional\n If true, then words longer than width will be broken in order to\n ensure that no lines are longer than width. If it is false, long\n words will not be broken, and some lines may be longer than width.\n (default: True)\n break_on_hyphens : bool, optional\n If true, wrapping will occur preferably on whitespace and right\n after hyphens in compound words, as it is customary in English. If\n false, only whitespaces will be considered as potentially good\n places for line breaks, but you need to set break_long_words to\n false if you want truly insecable words. (default: True)\n\n Returns\n -------\n wrapped : same type as values\n ","endLoc":693,"header":"def wrap(self, width, **kwargs)","id":2541,"name":"wrap","nodeType":"Function","startLoc":654,"text":"def wrap(self, width, **kwargs):\n \"\"\"\n Wrap long strings in the array to be formatted in paragraphs with\n length less than a given width.\n\n This method has the same keyword parameters and defaults as\n :class:`textwrap.TextWrapper`.\n\n Parameters\n ----------\n width : int\n Maximum line-width\n expand_tabs : bool, optional\n If true, tab characters will be expanded to spaces (default: True)\n replace_whitespace : bool, optional\n If true, each whitespace character (as defined by\n string.whitespace) remaining after tab expansion will be replaced\n by a single space (default: True)\n drop_whitespace : bool, optional\n If true, whitespace that, after wrapping, happens to end up at the\n beginning or end of a line is dropped (default: True)\n break_long_words : bool, optional\n If true, then words longer than width will be broken in order to\n ensure that no lines are longer than width. If it is false, long\n words will not be broken, and some lines may be longer than width.\n (default: True)\n break_on_hyphens : bool, optional\n If true, wrapping will occur preferably on whitespace and right\n after hyphens in compound words, as it is customary in English. If\n false, only whitespaces will be considered as potentially good\n places for line breaks, but you need to set break_long_words to\n false if you want truly insecable words. (default: True)\n\n Returns\n -------\n wrapped : same type as values\n \"\"\"\n tw = textwrap.TextWrapper(width=width)\n f = lambda x: \"\\n\".join(tw.wrap(x))\n return self._apply(f)"},{"col":4,"comment":"null","endLoc":312,"header":"def __hash__(self)","id":2542,"name":"__hash__","nodeType":"Function","startLoc":311,"text":"def __hash__(self):\n return self.hashvalue"},{"attributeType":"null","col":8,"comment":"null","endLoc":309,"id":2543,"name":"hashvalue","nodeType":"Attribute","startLoc":309,"text":"self.hashvalue"},{"col":4,"comment":"null","endLoc":136,"header":"def count(self)","id":2544,"name":"count","nodeType":"Function","startLoc":133,"text":"def count(self):\n rolling_count = self._counts()\n enough_periods = rolling_count >= self._min_periods\n return rolling_count.where(enough_periods)"},{"col":12,"endLoc":692,"id":2545,"nodeType":"Lambda","startLoc":692,"text":"lambda x: \"\\n\".join(tw.wrap(x))"},{"attributeType":"LRUCache","col":0,"comment":"null","endLoc":13,"id":2546,"name":"FILE_CACHE","nodeType":"Attribute","startLoc":13,"text":"FILE_CACHE"},{"attributeType":"null","col":4,"comment":"null","endLoc":43,"id":2547,"name":"__slots__","nodeType":"Attribute","startLoc":43,"text":"__slots__"},{"attributeType":"null","col":4,"comment":"null","endLoc":44,"id":2548,"name":"_attributes","nodeType":"Attribute","startLoc":44,"text":"_attributes"},{"col":4,"comment":"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.","endLoc":378,"header":"def onOffset(self, date)","id":2549,"name":"onOffset","nodeType":"Function","startLoc":374,"text":"def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n mod_month = (date.month - self.month) % 3\n return mod_month == 0 and date.day == self._get_offset_day(date)"},{"attributeType":"function","col":4,"comment":"null","endLoc":122,"id":2550,"name":"argmax","nodeType":"Attribute","startLoc":122,"text":"argmax"},{"attributeType":"function","col":4,"comment":"null","endLoc":123,"id":2551,"name":"argmin","nodeType":"Attribute","startLoc":123,"text":"argmin"},{"col":4,"comment":"\n Map all characters in the string through the given mapping table.\n\n Parameters\n ----------\n table : dict\n A a mapping of Unicode ordinals to Unicode ordinals, strings,\n or None. Unmapped characters are left untouched. Characters mapped\n to None are deleted. :meth:`str.maketrans` is a helper function for\n making translation tables.\n\n Returns\n -------\n translated : same type as values\n ","endLoc":712,"header":"def translate(self, table)","id":2552,"name":"translate","nodeType":"Function","startLoc":695,"text":"def translate(self, table):\n \"\"\"\n Map all characters in the string through the given mapping table.\n\n Parameters\n ----------\n table : dict\n A a mapping of Unicode ordinals to Unicode ordinals, strings,\n or None. Unmapped characters are left untouched. Characters mapped\n to None are deleted. :meth:`str.maketrans` is a helper function for\n making translation tables.\n\n Returns\n -------\n translated : same type as values\n \"\"\"\n f = lambda x: x.translate(table)\n return self._apply(f)"},{"col":12,"endLoc":711,"id":2553,"nodeType":"Lambda","startLoc":711,"text":"lambda x: x.translate(table)"},{"attributeType":"function","col":4,"comment":"null","endLoc":124,"id":2554,"name":"max","nodeType":"Attribute","startLoc":124,"text":"max"},{"col":4,"comment":"\n Duplicate each string in the array.\n\n Parameters\n ----------\n repeats : int\n Number of repetitions.\n\n Returns\n -------\n repeated : same type as values\n Array of repeated string objects.\n ","endLoc":729,"header":"def repeat(self, repeats)","id":2555,"name":"repeat","nodeType":"Function","startLoc":714,"text":"def repeat(self, repeats):\n \"\"\"\n Duplicate each string in the array.\n\n Parameters\n ----------\n repeats : int\n Number of repetitions.\n\n Returns\n -------\n repeated : same type as values\n Array of repeated string objects.\n \"\"\"\n f = lambda x: repeats * x\n return self._apply(f)"},{"col":12,"endLoc":728,"id":2556,"nodeType":"Lambda","startLoc":728,"text":"lambda x: repeats * x"},{"attributeType":"function","col":4,"comment":"null","endLoc":125,"id":2557,"name":"min","nodeType":"Attribute","startLoc":125,"text":"min"},{"col":4,"comment":"\n Return lowest or highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n ","endLoc":766,"header":"def find(self, sub, start=0, end=None, side=\"left\")","id":2558,"name":"find","nodeType":"Function","startLoc":731,"text":"def find(self, sub, start=0, end=None, side=\"left\"):\n \"\"\"\n Return lowest or highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n sub = self._obj.dtype.type(sub)\n\n if side == \"left\":\n method = \"find\"\n elif side == \"right\":\n method = \"rfind\"\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return self._apply(f, dtype=int)"},{"attributeType":"function","col":4,"comment":"null","endLoc":126,"id":2559,"name":"mean","nodeType":"Attribute","startLoc":126,"text":"mean"},{"attributeType":"function","col":4,"comment":"null","endLoc":127,"id":2560,"name":"prod","nodeType":"Attribute","startLoc":127,"text":"prod"},{"col":16,"endLoc":762,"id":2561,"nodeType":"Lambda","startLoc":762,"text":"lambda x: getattr(x, method)(sub, start)"},{"col":16,"endLoc":764,"id":2562,"nodeType":"Lambda","startLoc":764,"text":"lambda x: getattr(x, method)(sub, start, end)"},{"attributeType":"function","col":4,"comment":"null","endLoc":128,"id":2563,"name":"sum","nodeType":"Attribute","startLoc":128,"text":"sum"},{"attributeType":"function","col":4,"comment":"null","endLoc":129,"id":2564,"name":"std","nodeType":"Attribute","startLoc":129,"text":"std"},{"attributeType":"function","col":4,"comment":"null","endLoc":130,"id":2565,"name":"var","nodeType":"Attribute","startLoc":130,"text":"var"},{"attributeType":"function","col":4,"comment":"null","endLoc":131,"id":2566,"name":"median","nodeType":"Attribute","startLoc":131,"text":"median"},{"col":4,"comment":"\n Return highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n ","endLoc":787,"header":"def rfind(self, sub, start=0, end=None)","id":2567,"name":"rfind","nodeType":"Function","startLoc":768,"text":"def rfind(self, sub, start=0, end=None):\n \"\"\"\n Return highest indexes in each strings in the array\n where the substring is fully contained between [start:end].\n Return -1 on failure.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n return self.find(sub, start=start, end=end, side=\"right\")"},{"attributeType":"null","col":4,"comment":"null","endLoc":138,"id":2568,"name":"__doc__","nodeType":"Attribute","startLoc":138,"text":"count.__doc__"},{"attributeType":"null","col":8,"comment":"null","endLoc":79,"id":2569,"name":"obj","nodeType":"Attribute","startLoc":79,"text":"self.obj"},{"col":4,"comment":"\n Return lowest or highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.find`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n ","endLoc":825,"header":"def index(self, sub, start=0, end=None, side=\"left\")","id":2570,"name":"index","nodeType":"Function","startLoc":789,"text":"def index(self, sub, start=0, end=None, side=\"left\"):\n \"\"\"\n Return lowest or highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.find`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n side : {'left', 'right'}, default 'left'\n Starting side for search.\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n sub = self._obj.dtype.type(sub)\n\n if side == \"left\":\n method = \"index\"\n elif side == \"right\":\n method = \"rindex\"\n else: # pragma: no cover\n raise ValueError(\"Invalid side\")\n\n if end is None:\n f = lambda x: getattr(x, method)(sub, start)\n else:\n f = lambda x: getattr(x, method)(sub, start, end)\n\n return self._apply(f, dtype=int)"},{"col":4,"comment":"null","endLoc":388,"header":"def __sub__(self, other)","id":2571,"name":"__sub__","nodeType":"Function","startLoc":380,"text":"def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract cftime.datetime from offset.\")\n elif type(other) == type(self) and other.month == self.month:\n return type(self)(self.n - other.n, month=self.month)\n else:\n return NotImplemented"},{"attributeType":"None","col":8,"comment":"null","endLoc":85,"id":2572,"name":"min_periods","nodeType":"Attribute","startLoc":85,"text":"self.min_periods"},{"col":16,"endLoc":821,"id":2573,"nodeType":"Lambda","startLoc":821,"text":"lambda x: getattr(x, method)(sub, start)"},{"col":16,"endLoc":823,"id":2574,"nodeType":"Lambda","startLoc":823,"text":"lambda x: getattr(x, method)(sub, start, end)"},{"attributeType":"null","col":8,"comment":"null","endLoc":87,"id":2575,"name":"center","nodeType":"Attribute","startLoc":87,"text":"self.center"},{"col":4,"comment":"\n Return highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.rfind`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n ","endLoc":847,"header":"def rindex(self, sub, start=0, end=None)","id":2576,"name":"rindex","nodeType":"Function","startLoc":827,"text":"def rindex(self, sub, start=0, end=None):\n \"\"\"\n Return highest indexes in each strings where the substring is\n fully contained between [start:end]. This is the same as\n ``str.rfind`` except instead of returning -1, it raises a ValueError\n when the substring is not found.\n\n Parameters\n ----------\n sub : str\n Substring being searched\n start : int\n Left edge index\n end : int\n Right edge index\n\n Returns\n -------\n found : array of integer values\n \"\"\"\n return self.index(sub, start=start, end=end, side=\"right\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":88,"id":2577,"name":"dim","nodeType":"Attribute","startLoc":88,"text":"self.dim"},{"col":4,"comment":"\n Replace occurrences of pattern/regex in the array with some string.\n\n Parameters\n ----------\n pat : string or compiled regex\n String can be a character sequence or regular expression.\n\n repl : string or callable\n Replacement string or a callable. The callable is passed the regex\n match object and must return a replacement string to be used.\n See :func:`re.sub`.\n\n n : int, default -1 (all)\n Number of replacements to make from start\n case : boolean, default None\n - If True, case sensitive (the default if `pat` is a string)\n - Set to False for case insensitive\n - Cannot be set if `pat` is a compiled regex\n flags : int, default 0 (no flags)\n - re module flags, e.g. re.IGNORECASE\n - Cannot be set if `pat` is a compiled regex\n regex : boolean, default True\n - If True, assumes the passed-in pattern is a regular expression.\n - If False, treats the pattern as a literal string\n - Cannot be set to False if `pat` is a compiled regex or `repl` is\n a callable.\n\n Returns\n -------\n replaced : same type as values\n A copy of the object with all matching occurrences of `pat`\n replaced by `repl`.\n ","endLoc":926,"header":"def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True)","id":2578,"name":"replace","nodeType":"Function","startLoc":849,"text":"def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):\n \"\"\"\n Replace occurrences of pattern/regex in the array with some string.\n\n Parameters\n ----------\n pat : string or compiled regex\n String can be a character sequence or regular expression.\n\n repl : string or callable\n Replacement string or a callable. The callable is passed the regex\n match object and must return a replacement string to be used.\n See :func:`re.sub`.\n\n n : int, default -1 (all)\n Number of replacements to make from start\n case : boolean, default None\n - If True, case sensitive (the default if `pat` is a string)\n - Set to False for case insensitive\n - Cannot be set if `pat` is a compiled regex\n flags : int, default 0 (no flags)\n - re module flags, e.g. re.IGNORECASE\n - Cannot be set if `pat` is a compiled regex\n regex : boolean, default True\n - If True, assumes the passed-in pattern is a regular expression.\n - If False, treats the pattern as a literal string\n - Cannot be set to False if `pat` is a compiled regex or `repl` is\n a callable.\n\n Returns\n -------\n replaced : same type as values\n A copy of the object with all matching occurrences of `pat`\n replaced by `repl`.\n \"\"\"\n if not (_is_str_like(repl) or callable(repl)): # pragma: no cover\n raise TypeError(\"repl must be a string or callable\")\n\n if _is_str_like(pat):\n pat = self._obj.dtype.type(pat)\n\n if _is_str_like(repl):\n repl = self._obj.dtype.type(repl)\n\n is_compiled_re = isinstance(pat, type(re.compile(\"\")))\n if regex:\n if is_compiled_re:\n if (case is not None) or (flags != 0):\n raise ValueError(\n \"case and flags cannot be set\" \" when pat is a compiled regex\"\n )\n else:\n # not a compiled regex\n # set default case\n if case is None:\n case = True\n\n # add case flag, if provided\n if case is False:\n flags |= re.IGNORECASE\n if is_compiled_re or len(pat) > 1 or flags or callable(repl):\n n = n if n >= 0 else 0\n compiled = re.compile(pat, flags=flags)\n f = lambda x: compiled.sub(repl=repl, string=x, count=n)\n else:\n f = lambda x: x.replace(pat, repl, n)\n else:\n if is_compiled_re:\n raise ValueError(\n \"Cannot use a compiled regex as replacement \"\n \"pattern with regex=False\"\n )\n if callable(repl):\n raise ValueError(\n \"Cannot use a callable replacement when \" \"regex=False\"\n )\n f = lambda x: x.replace(pat, repl, n)\n return self._apply(f)"},{"attributeType":"null","col":8,"comment":"null","endLoc":82,"id":2579,"name":"window","nodeType":"Attribute","startLoc":82,"text":"self.window"},{"col":0,"comment":"null","endLoc":61,"header":"def _is_str_like(x)","id":2580,"name":"_is_str_like","nodeType":"Function","startLoc":60,"text":"def _is_str_like(x):\n return isinstance(x, str) or isinstance(x, bytes)"},{"col":4,"comment":"null","endLoc":391,"header":"def __mul__(self, other)","id":2581,"name":"__mul__","nodeType":"Function","startLoc":390,"text":"def __mul__(self, other):\n return type(self)(n=other * self.n, month=self.month)"},{"className":"DataArrayRolling","col":0,"comment":"null","endLoc":361,"id":2582,"nodeType":"Class","startLoc":141,"text":"class DataArrayRolling(Rolling):\n __slots__ = (\"window_labels\",)\n\n def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object for DataArray.\n You should use DataArray.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n DataArray.rolling\n DataArray.groupby\n Dataset.rolling\n Dataset.groupby\n \"\"\"\n super().__init__(obj, windows, min_periods=min_periods, center=center)\n\n self.window_labels = self.obj[self.dim]\n\n def __iter__(self):\n stops = np.arange(1, len(self.window_labels) + 1)\n starts = stops - int(self.window)\n starts[: int(self.window)] = 0\n for (label, start, stop) in zip(self.window_labels, starts, stops):\n window = self.obj.isel(**{self.dim: slice(start, stop)})\n\n counts = window.count(dim=self.dim)\n window = window.where(counts >= self._min_periods)\n\n yield (label, window)\n\n def construct(self, window_dim, stride=1, fill_value=dtypes.NA):\n \"\"\"\n Convert this rolling object to xr.DataArray,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n Size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n DataArray that is a view of the original array. The returned array is\n not writeable.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling = da.rolling(b=3, center=True)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, np.nan]],\n [[np.nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, np.nan]]])\n Dimensions without coordinates: a, b, window_dim\n \"\"\"\n\n from .dataarray import DataArray\n\n window = self.obj.variable.rolling_window(\n self.dim, self.window, window_dim, self.center, fill_value=fill_value\n )\n result = DataArray(\n window, dims=self.obj.dims + (window_dim,), coords=self.obj.coords\n )\n return result.isel(**{self.dim: slice(None, None, stride)})\n\n def reduce(self, func, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling.reduce(np.sum)\n \n array([[nan, nan, 3., 6.],\n [nan, nan, 15., 18.]])\n Dimensions without coordinates: a, b\n >>>\n >>> rolling = da.rolling(b=3, min_periods=1)\n >>> rolling.reduce(np.nansum)\n \n array([[ 0., 1., 3., 6.],\n [ 4., 9., 15., 18.]])\n \"\"\"\n rolling_dim = utils.get_temp_dimname(self.obj.dims, \"_rolling_dim\")\n windows = self.construct(rolling_dim)\n result = windows.reduce(func, dim=rolling_dim, **kwargs)\n\n # Find valid windows based on count.\n counts = self._counts()\n return result.where(counts >= self._min_periods)\n\n def _counts(self):\n \"\"\" Number of non-nan entries in each rolling window. \"\"\"\n\n rolling_dim = utils.get_temp_dimname(self.obj.dims, \"_rolling_dim\")\n # We use False as the fill_value instead of np.nan, since boolean\n # array is faster to be reduced than object array.\n # The use of skipna==False is also faster since it does not need to\n # copy the strided array.\n counts = (\n self.obj.notnull()\n .rolling(center=self.center, **{self.dim: self.window})\n .construct(rolling_dim, fill_value=False)\n .sum(dim=rolling_dim, skipna=False)\n )\n return counts\n\n def _bottleneck_reduce(self, func, **kwargs):\n from .dataarray import DataArray\n\n # bottleneck doesn't allow min_count to be 0, although it should\n # work the same as if min_count = 1\n if self.min_periods is not None and self.min_periods == 0:\n min_count = 1\n else:\n min_count = self.min_periods\n\n axis = self.obj.get_axis_num(self.dim)\n\n padded = self.obj.variable\n if self.center:\n if isinstance(padded.data, dask_array_type):\n # Workaround to make the padded chunk size is larger than\n # self.window-1\n shift = -(self.window + 1) // 2\n offset = (self.window - 1) // 2\n valid = (slice(None),) * axis + (\n slice(offset, offset + self.obj.shape[axis]),\n )\n else:\n shift = (-self.window // 2) + 1\n valid = (slice(None),) * axis + (slice(-shift, None),)\n padded = padded.pad_with_fill_value({self.dim: (0, -shift)})\n\n if isinstance(padded.data, dask_array_type):\n raise AssertionError(\"should not be reachable\")\n values = dask_rolling_wrapper(\n func, padded.data, window=self.window, min_count=min_count, axis=axis\n )\n else:\n values = func(\n padded.data, window=self.window, min_count=min_count, axis=axis\n )\n\n if self.center:\n values = values[valid]\n result = DataArray(values, self.obj.coords)\n\n return result\n\n def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n ):\n if bottleneck_move_func is not None and not isinstance(\n self.obj.data, dask_array_type\n ):\n # TODO: renable bottleneck with dask after the issues\n # underlying https://github.com/pydata/xarray/issues/2940 are\n # fixed.\n return self._bottleneck_reduce(bottleneck_move_func, **kwargs)\n else:\n return self.reduce(array_agg_func, **kwargs)"},{"col":4,"comment":"\n Moving window object for DataArray.\n You should use DataArray.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n DataArray.rolling\n DataArray.groupby\n Dataset.rolling\n Dataset.groupby\n ","endLoc":180,"header":"def __init__(self, obj, windows, min_periods=None, center=False)","id":2583,"name":"__init__","nodeType":"Function","startLoc":144,"text":"def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object for DataArray.\n You should use DataArray.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n DataArray.rolling\n DataArray.groupby\n Dataset.rolling\n Dataset.groupby\n \"\"\"\n super().__init__(obj, windows, min_periods=min_periods, center=center)\n\n self.window_labels = self.obj[self.dim]"},{"col":4,"comment":"null","endLoc":394,"header":"def rule_code(self)","id":2584,"name":"rule_code","nodeType":"Function","startLoc":393,"text":"def rule_code(self):\n return \"{}-{}\".format(self._freq, _MONTH_ABBREVIATIONS[self.month])"},{"col":4,"comment":"null","endLoc":397,"header":"def __str__(self)","id":2585,"name":"__str__","nodeType":"Function","startLoc":396,"text":"def __str__(self):\n return \"<{}: n={}, month={}>\".format(type(self).__name__, self.n, self.month)"},{"attributeType":"null","col":4,"comment":"null","endLoc":354,"id":2586,"name":"_freq","nodeType":"Attribute","startLoc":354,"text":"_freq"},{"attributeType":"null","col":4,"comment":"null","endLoc":355,"id":2587,"name":"_default_month","nodeType":"Attribute","startLoc":355,"text":"_default_month"},{"col":4,"comment":"null","endLoc":192,"header":"def __iter__(self)","id":2588,"name":"__iter__","nodeType":"Function","startLoc":182,"text":"def __iter__(self):\n stops = np.arange(1, len(self.window_labels) + 1)\n starts = stops - int(self.window)\n starts[: int(self.window)] = 0\n for (label, start, stop) in zip(self.window_labels, starts, stops):\n window = self.obj.isel(**{self.dim: slice(start, stop)})\n\n counts = window.count(dim=self.dim)\n window = window.where(counts >= self._min_periods)\n\n yield (label, window)"},{"attributeType":"null","col":8,"comment":"null","endLoc":359,"id":2589,"name":"month","nodeType":"Attribute","startLoc":359,"text":"self.month"},{"col":4,"comment":"Roll date forward to nearest end of quarter","endLoc":440,"header":"def rollforward(self, date)","id":2590,"name":"rollforward","nodeType":"Function","startLoc":435,"text":"def rollforward(self, date):\n \"\"\"Roll date forward to nearest end of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + QuarterEnd(month=self.month)"},{"attributeType":"null","col":0,"comment":"null","endLoc":15,"id":2591,"name":"randn_xt","nodeType":"Attribute","startLoc":15,"text":"randn_xt"},{"attributeType":"null","col":0,"comment":"null","endLoc":16,"id":2592,"name":"randn_t","nodeType":"Attribute","startLoc":16,"text":"randn_t"},{"col":4,"comment":"Roll date backward to nearest end of quarter","endLoc":447,"header":"def rollback(self, date)","id":2593,"name":"rollback","nodeType":"Function","startLoc":442,"text":"def rollback(self, date):\n \"\"\"Roll date backward to nearest end of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - QuarterEnd(month=self.month)"},{"attributeType":"null","col":4,"comment":"null","endLoc":431,"id":2594,"name":"_default_month","nodeType":"Attribute","startLoc":431,"text":"_default_month"},{"attributeType":"null","col":4,"comment":"null","endLoc":432,"id":2595,"name":"_freq","nodeType":"Attribute","startLoc":432,"text":"_freq"},{"attributeType":"null","col":4,"comment":"null","endLoc":433,"id":2596,"name":"_day_option","nodeType":"Attribute","startLoc":433,"text":"_day_option"},{"className":"YearEnd","col":0,"comment":"null","endLoc":532,"id":2597,"nodeType":"Class","startLoc":510,"text":"class YearEnd(YearOffset):\n _freq = \"A\"\n _day_option = \"end\"\n _default_month = 12\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == _days_in_month(date) and date.month == self.month\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest end of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + YearEnd(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest end of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - YearEnd(month=self.month)"},{"className":"YearOffset","col":0,"comment":"null","endLoc":482,"id":2598,"nodeType":"Class","startLoc":450,"text":"class YearOffset(BaseCFTimeOffset):\n _freq: ClassVar[str]\n _day_option: ClassVar[str]\n _default_month: ClassVar[int]\n\n def __init__(self, n=1, month=None):\n BaseCFTimeOffset.__init__(self, n)\n self.month = _validate_month(month, self._default_month)\n\n def __apply__(self, other):\n reference_day = _get_day_of_month(other, self._day_option)\n years = _adjust_n_years(other, self.n, self.month, reference_day)\n months = years * 12 + (self.month - other.month)\n return _shift_month(other, months, self._day_option)\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract cftime.datetime from offset.\")\n elif type(other) == type(self) and other.month == self.month:\n return type(self)(self.n - other.n, month=self.month)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n return type(self)(n=other * self.n, month=self.month)\n\n def rule_code(self):\n return \"{}-{}\".format(self._freq, _MONTH_ABBREVIATIONS[self.month])\n\n def __str__(self):\n return \"<{}: n={}, month={}>\".format(type(self).__name__, self.n, self.month)"},{"col":4,"comment":"null","endLoc":457,"header":"def __init__(self, n=1, month=None)","id":2599,"name":"__init__","nodeType":"Function","startLoc":455,"text":"def __init__(self, n=1, month=None):\n BaseCFTimeOffset.__init__(self, n)\n self.month = _validate_month(month, self._default_month)"},{"col":4,"comment":"null","endLoc":463,"header":"def __apply__(self, other)","id":2600,"name":"__apply__","nodeType":"Function","startLoc":459,"text":"def __apply__(self, other):\n reference_day = _get_day_of_month(other, self._day_option)\n years = _adjust_n_years(other, self.n, self.month, reference_day)\n months = years * 12 + (self.month - other.month)\n return _shift_month(other, months, self._day_option)"},{"col":4,"comment":"\n Convert this rolling object to xr.DataArray,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n Size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n DataArray that is a view of the original array. The returned array is\n not writeable.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling = da.rolling(b=3, center=True)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, np.nan]],\n [[np.nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, np.nan]]])\n Dimensions without coordinates: a, b, window_dim\n ","endLoc":240,"header":"def construct(self, window_dim, stride=1, fill_value=dtypes.NA)","id":2601,"name":"construct","nodeType":"Function","startLoc":194,"text":"def construct(self, window_dim, stride=1, fill_value=dtypes.NA):\n \"\"\"\n Convert this rolling object to xr.DataArray,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n Size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n DataArray that is a view of the original array. The returned array is\n not writeable.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling = da.rolling(b=3, center=True)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, np.nan]],\n [[np.nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, np.nan]]])\n Dimensions without coordinates: a, b, window_dim\n \"\"\"\n\n from .dataarray import DataArray\n\n window = self.obj.variable.rolling_window(\n self.dim, self.window, window_dim, self.center, fill_value=fill_value\n )\n result = DataArray(\n window, dims=self.obj.dims + (window_dim,), coords=self.obj.coords\n )\n return result.isel(**{self.dim: slice(None, None, stride)})"},{"attributeType":"null","col":0,"comment":"null","endLoc":17,"id":2602,"name":"randn_long","nodeType":"Attribute","startLoc":17,"text":"randn_long"},{"col":0,"comment":"Adjust the number of times an annual offset is applied based on\n another date, and the reference day provided","endLoc":219,"header":"def _adjust_n_years(other, n, month, reference_day)","id":2603,"name":"_adjust_n_years","nodeType":"Function","startLoc":210,"text":"def _adjust_n_years(other, n, month, reference_day):\n \"\"\"Adjust the number of times an annual offset is applied based on\n another date, and the reference day provided\"\"\"\n if n > 0:\n if other.month < month or (other.month == month and other.day < reference_day):\n n -= 1\n else:\n if other.month > month or (other.month == month and other.day > reference_day):\n n += 1\n return n"},{"col":0,"comment":"","endLoc":1,"header":"rolling.py#","id":2604,"name":"","nodeType":"Function","startLoc":1,"text":"nx = 3000\n\nlong_nx = 30000000\n\nny = 2000\n\nnt = 1000\n\nwindow = 20\n\nrandn_xy = randn((nx, ny), frac_nan=0.1)\n\nrandn_xt = randn((nx, nt))\n\nrandn_t = randn((nt,))\n\nrandn_long = randn((long_nx,), frac_nan=0.1)"},{"col":4,"comment":"null","endLoc":473,"header":"def __sub__(self, other)","id":2605,"name":"__sub__","nodeType":"Function","startLoc":465,"text":"def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract cftime.datetime from offset.\")\n elif type(other) == type(self) and other.month == self.month:\n return type(self)(self.n - other.n, month=self.month)\n else:\n return NotImplemented"},{"col":4,"comment":"null","endLoc":476,"header":"def __mul__(self, other)","id":2606,"name":"__mul__","nodeType":"Function","startLoc":475,"text":"def __mul__(self, other):\n return type(self)(n=other * self.n, month=self.month)"},{"fileName":"coordinates.py","filePath":"xarray/core","id":2607,"nodeType":"File","text":"from contextlib import contextmanager\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Hashable,\n Iterator,\n Mapping,\n Sequence,\n Set,\n Tuple,\n Union,\n cast,\n)\n\nimport pandas as pd\n\nfrom . import formatting, indexing\nfrom .indexes import Indexes\nfrom .merge import merge_coordinates_without_align, merge_coords\nfrom .utils import Frozen, ReprObject, either_dict_or_kwargs\nfrom .variable import Variable\n\nif TYPE_CHECKING:\n from .dataarray import DataArray\n from .dataset import Dataset\n\n# Used as the key corresponding to a DataArray's variable when converting\n# arbitrary DataArray objects to datasets\n_THIS_ARRAY = ReprObject(\"\")\n\n\nclass Coordinates(Mapping[Hashable, \"DataArray\"]):\n __slots__ = ()\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n raise NotImplementedError()\n\n def __setitem__(self, key: Hashable, value: Any) -> None:\n self.update({key: value})\n\n @property\n def _names(self) -> Set[Hashable]:\n raise NotImplementedError()\n\n @property\n def dims(self) -> Union[Mapping[Hashable, int], Tuple[Hashable, ...]]:\n raise NotImplementedError()\n\n @property\n def indexes(self) -> Indexes:\n return self._data.indexes # type: ignore\n\n @property\n def variables(self):\n raise NotImplementedError()\n\n def _update_coords(self, coords, indexes):\n raise NotImplementedError()\n\n def __iter__(self) -> Iterator[\"Hashable\"]:\n # needs to be in the same order as the dataset variables\n for k in self.variables:\n if k in self._names:\n yield k\n\n def __len__(self) -> int:\n return len(self._names)\n\n def __contains__(self, key: Hashable) -> bool:\n return key in self._names\n\n def __repr__(self) -> str:\n return formatting.coords_repr(self)\n\n def to_dataset(self) -> \"Dataset\":\n raise NotImplementedError()\n\n def to_index(self, ordered_dims: Sequence[Hashable] = None) -> pd.Index:\n \"\"\"Convert all index coordinates into a :py:class:`pandas.Index`.\n\n Parameters\n ----------\n ordered_dims : sequence of hashable, optional\n Possibly reordered version of this object's dimensions indicating\n the order in which dimensions should appear on the result.\n\n Returns\n -------\n pandas.Index\n Index subclass corresponding to the outer-product of all dimension\n coordinates. This will be a MultiIndex if this object is has more\n than more dimension.\n \"\"\"\n if ordered_dims is None:\n ordered_dims = list(self.dims)\n elif set(ordered_dims) != set(self.dims):\n raise ValueError(\n \"ordered_dims must match dims, but does not: \"\n \"{} vs {}\".format(ordered_dims, self.dims)\n )\n\n if len(ordered_dims) == 0:\n raise ValueError(\"no valid index for a 0-dimensional object\")\n elif len(ordered_dims) == 1:\n (dim,) = ordered_dims\n return self._data.get_index(dim) # type: ignore\n else:\n indexes = [self._data.get_index(k) for k in ordered_dims] # type: ignore\n names = list(ordered_dims)\n return pd.MultiIndex.from_product(indexes, names=names)\n\n def update(self, other: Mapping[Hashable, Any]) -> None:\n other_vars = getattr(other, \"variables\", other)\n coords, indexes = merge_coords(\n [self.variables, other_vars], priority_arg=1, indexes=self.indexes\n )\n self._update_coords(coords, indexes)\n\n def _merge_raw(self, other):\n \"\"\"For use with binary arithmetic.\"\"\"\n if other is None:\n variables = dict(self.variables)\n indexes = dict(self.indexes)\n else:\n variables, indexes = merge_coordinates_without_align([self, other])\n return variables, indexes\n\n @contextmanager\n def _merge_inplace(self, other):\n \"\"\"For use with in-place binary arithmetic.\"\"\"\n if other is None:\n yield\n else:\n # don't include indexes in prioritized, because we didn't align\n # first and we want indexes to be checked\n prioritized = {\n k: (v, None) for k, v in self.variables.items() if k not in self.indexes\n }\n variables, indexes = merge_coordinates_without_align(\n [self, other], prioritized\n )\n yield\n self._update_coords(variables, indexes)\n\n def merge(self, other: \"Coordinates\") -> \"Dataset\":\n \"\"\"Merge two sets of coordinates to create a new Dataset\n\n The method implements the logic used for joining coordinates in the\n result of a binary operation performed on xarray objects:\n\n - If two index coordinates conflict (are not equal), an exception is\n raised. You must align your data before passing it to this method.\n - If an index coordinate and a non-index coordinate conflict, the non-\n index coordinate is dropped.\n - If two non-index coordinates conflict, both are dropped.\n\n Parameters\n ----------\n other : DatasetCoordinates or DataArrayCoordinates\n The coordinates from another dataset or data array.\n\n Returns\n -------\n merged : Dataset\n A new Dataset with merged coordinates.\n \"\"\"\n from .dataset import Dataset\n\n if other is None:\n return self.to_dataset()\n\n if not isinstance(other, Coordinates):\n other = Dataset(coords=other).coords\n\n coords, indexes = merge_coordinates_without_align([self, other])\n coord_names = set(coords)\n merged = Dataset._construct_direct(\n variables=coords, coord_names=coord_names, indexes=indexes\n )\n return merged\n\n\nclass DatasetCoordinates(Coordinates):\n \"\"\"Dictionary like container for Dataset coordinates.\n\n Essentially an immutable dictionary with keys given by the array's\n dimensions and the values given by the corresponding xarray.Coordinate\n objects.\n \"\"\"\n\n __slots__ = (\"_data\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self._data = dataset\n\n @property\n def _names(self) -> Set[Hashable]:\n return self._data._coord_names\n\n @property\n def dims(self) -> Mapping[Hashable, int]:\n return self._data.dims\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n return Frozen(\n {k: v for k, v in self._data.variables.items() if k in self._names}\n )\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key in self._data.data_vars:\n raise KeyError(key)\n return cast(\"DataArray\", self._data[key])\n\n def to_dataset(self) -> \"Dataset\":\n \"\"\"Convert these coordinates into a new Dataset\n \"\"\"\n return self._data._copy_listed(self._names)\n\n def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None:\n from .dataset import calculate_dimensions\n\n variables = self._data._variables.copy()\n variables.update(coords)\n\n # check for inconsistent state *before* modifying anything in-place\n dims = calculate_dimensions(variables)\n new_coord_names = set(coords)\n for dim, size in dims.items():\n if dim in variables:\n new_coord_names.add(dim)\n\n self._data._variables = variables\n self._data._coord_names.update(new_coord_names)\n self._data._dims = dims\n\n # TODO(shoyer): once ._indexes is always populated by a dict, modify\n # it to update inplace instead.\n original_indexes = dict(self._data.indexes)\n original_indexes.update(indexes)\n self._data._indexes = original_indexes\n\n def __delitem__(self, key: Hashable) -> None:\n if key in self:\n del self._data[key]\n else:\n raise KeyError(key)\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._data._ipython_key_completions_()\n if key not in self._data.data_vars\n ]\n\n\nclass DataArrayCoordinates(Coordinates):\n \"\"\"Dictionary like container for DataArray coordinates.\n\n Essentially a dict with keys given by the array's\n dimensions and the values given by corresponding DataArray objects.\n \"\"\"\n\n __slots__ = (\"_data\",)\n\n def __init__(self, dataarray: \"DataArray\"):\n self._data = dataarray\n\n @property\n def dims(self) -> Tuple[Hashable, ...]:\n return self._data.dims\n\n @property\n def _names(self) -> Set[Hashable]:\n return set(self._data._coords)\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n return self._data._getitem_coord(key)\n\n def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None:\n from .dataset import calculate_dimensions\n\n coords_plus_data = coords.copy()\n coords_plus_data[_THIS_ARRAY] = self._data.variable\n dims = calculate_dimensions(coords_plus_data)\n if not set(dims) <= set(self.dims):\n raise ValueError(\n \"cannot add coordinates with new dimensions to \" \"a DataArray\"\n )\n self._data._coords = coords\n\n # TODO(shoyer): once ._indexes is always populated by a dict, modify\n # it to update inplace instead.\n original_indexes = dict(self._data.indexes)\n original_indexes.update(indexes)\n self._data._indexes = original_indexes\n\n @property\n def variables(self):\n return Frozen(self._data._coords)\n\n def to_dataset(self) -> \"Dataset\":\n from .dataset import Dataset\n\n coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()}\n return Dataset._from_vars_and_coord_names(coords, set(coords))\n\n def __delitem__(self, key: Hashable) -> None:\n del self._data._coords[key]\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return self._data._ipython_key_completions_()\n\n\nclass LevelCoordinatesSource(Mapping[Hashable, Any]):\n \"\"\"Iterator for MultiIndex level coordinates.\n\n Used for attribute style lookup with AttrAccessMixin. Not returned directly\n by any public methods.\n \"\"\"\n\n __slots__ = (\"_data\",)\n\n def __init__(self, data_object: \"Union[DataArray, Dataset]\"):\n self._data = data_object\n\n def __getitem__(self, key):\n # not necessary -- everything here can already be found in coords.\n raise KeyError()\n\n def __iter__(self) -> Iterator[Hashable]:\n return iter(self._data._level_coords)\n\n def __len__(self) -> int:\n return len(self._data._level_coords)\n\n\ndef assert_coordinate_consistent(\n obj: Union[\"DataArray\", \"Dataset\"], coords: Mapping[Hashable, Variable]\n) -> None:\n \"\"\"Make sure the dimension coordinate of obj is consistent with coords.\n\n obj: DataArray or Dataset\n coords: Dict-like of variables\n \"\"\"\n for k in obj.dims:\n # make sure there are no conflict in dimension coordinates\n if k in coords and k in obj.coords:\n if not coords[k].equals(obj[k].variable):\n raise IndexError(\n \"dimension coordinate {!r} conflicts between \"\n \"indexed and indexing objects:\\n{}\\nvs.\\n{}\".format(\n k, obj[k], coords[k]\n )\n )\n\n\ndef remap_label_indexers(\n obj: Union[\"DataArray\", \"Dataset\"],\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n **indexers_kwargs: Any\n) -> Tuple[dict, dict]: # TODO more precise return type after annotations in indexing\n \"\"\"Remap indexers from obj.coords.\n If indexer is an instance of DataArray and it has coordinate, then this coordinate\n will be attached to pos_indexers.\n\n Returns\n -------\n pos_indexers: Same type of indexers.\n np.ndarray or Variable or DataArray\n new_indexes: mapping of new dimensional-coordinate.\n \"\"\"\n from .dataarray import DataArray\n\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"remap_label_indexers\")\n\n v_indexers = {\n k: v.variable.data if isinstance(v, DataArray) else v\n for k, v in indexers.items()\n }\n\n pos_indexers, new_indexes = indexing.remap_label_indexers(\n obj, v_indexers, method=method, tolerance=tolerance\n )\n # attach indexer's coordinate to pos_indexers\n for k, v in indexers.items():\n if isinstance(v, Variable):\n pos_indexers[k] = Variable(v.dims, pos_indexers[k])\n elif isinstance(v, DataArray):\n # drop coordinates found in indexers since .sel() already\n # ensures alignments\n coords = {k: var for k, var in v._coords.items() if k not in indexers}\n pos_indexers[k] = DataArray(pos_indexers[k], coords=coords, dims=v.dims)\n return pos_indexers, new_indexes\n"},{"className":"Indexes","col":0,"comment":"Immutable proxy for Dataset or DataArrary indexes.","endLoc":39,"id":2608,"nodeType":"Class","startLoc":11,"text":"class Indexes(collections.abc.Mapping):\n \"\"\"Immutable proxy for Dataset or DataArrary indexes.\"\"\"\n\n __slots__ = (\"_indexes\",)\n\n def __init__(self, indexes):\n \"\"\"Not for public consumption.\n\n Parameters\n ----------\n indexes : Dict[Any, pandas.Index]\n Indexes held by this object.\n \"\"\"\n self._indexes = indexes\n\n def __iter__(self):\n return iter(self._indexes)\n\n def __len__(self):\n return len(self._indexes)\n\n def __contains__(self, key):\n return key in self._indexes\n\n def __getitem__(self, key):\n return self._indexes[key]\n\n def __repr__(self):\n return formatting.indexes_repr(self)"},{"attributeType":"null","col":0,"comment":"null","endLoc":19,"id":2609,"name":"REF_COUNTS","nodeType":"Attribute","startLoc":19,"text":"REF_COUNTS"},{"col":4,"comment":"null","endLoc":479,"header":"def rule_code(self)","id":2610,"name":"rule_code","nodeType":"Function","startLoc":478,"text":"def rule_code(self):\n return \"{}-{}\".format(self._freq, _MONTH_ABBREVIATIONS[self.month])"},{"col":4,"comment":"null","endLoc":482,"header":"def __str__(self)","id":2611,"name":"__str__","nodeType":"Function","startLoc":481,"text":"def __str__(self):\n return \"<{}: n={}, month={}>\".format(type(self).__name__, self.n, self.month)"},{"attributeType":"null","col":4,"comment":"null","endLoc":451,"id":2612,"name":"_freq","nodeType":"Attribute","startLoc":451,"text":"_freq"},{"attributeType":"ReprObject","col":0,"comment":"null","endLoc":21,"id":2613,"name":"_DEFAULT_MODE","nodeType":"Attribute","startLoc":21,"text":"_DEFAULT_MODE"},{"col":4,"comment":"null","endLoc":27,"header":"def __iter__(self)","id":2614,"name":"__iter__","nodeType":"Function","startLoc":26,"text":"def __iter__(self):\n return iter(self._indexes)"},{"attributeType":"null","col":4,"comment":"null","endLoc":452,"id":2615,"name":"_day_option","nodeType":"Attribute","startLoc":452,"text":"_day_option"},{"col":4,"comment":"null","endLoc":30,"header":"def __len__(self)","id":2616,"name":"__len__","nodeType":"Function","startLoc":29,"text":"def __len__(self):\n return len(self._indexes)"},{"col":4,"comment":"null","endLoc":33,"header":"def __contains__(self, key)","id":2617,"name":"__contains__","nodeType":"Function","startLoc":32,"text":"def __contains__(self, key):\n return key in self._indexes"},{"col":4,"comment":"null","endLoc":36,"header":"def __getitem__(self, key)","id":2618,"name":"__getitem__","nodeType":"Function","startLoc":35,"text":"def __getitem__(self, key):\n return self._indexes[key]"},{"col":4,"comment":"null","endLoc":39,"header":"def __repr__(self)","id":2619,"name":"__repr__","nodeType":"Function","startLoc":38,"text":"def __repr__(self):\n return formatting.indexes_repr(self)"},{"attributeType":"null","col":4,"comment":"null","endLoc":453,"id":2620,"name":"_default_month","nodeType":"Attribute","startLoc":453,"text":"_default_month"},{"attributeType":"null","col":8,"comment":"null","endLoc":457,"id":2621,"name":"month","nodeType":"Attribute","startLoc":457,"text":"self.month"},{"col":4,"comment":"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.","endLoc":518,"header":"def onOffset(self, date)","id":2622,"name":"onOffset","nodeType":"Function","startLoc":515,"text":"def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == _days_in_month(date) and date.month == self.month"},{"attributeType":"null","col":4,"comment":"null","endLoc":14,"id":2623,"name":"__slots__","nodeType":"Attribute","startLoc":14,"text":"__slots__"},{"col":4,"comment":"Roll date forward to nearest end of year","endLoc":525,"header":"def rollforward(self, date)","id":2624,"name":"rollforward","nodeType":"Function","startLoc":520,"text":"def rollforward(self, date):\n \"\"\"Roll date forward to nearest end of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + YearEnd(month=self.month)"},{"attributeType":"null","col":8,"comment":"null","endLoc":24,"id":2625,"name":"_indexes","nodeType":"Attribute","startLoc":24,"text":"self._indexes"},{"col":0,"comment":"","endLoc":1,"header":"file_manager.py#","id":2626,"name":"","nodeType":"Function","startLoc":1,"text":"FILE_CACHE: LRUCache[str, io.IOBase] = LRUCache(\n maxsize=cast(int, OPTIONS[\"file_cache_maxsize\"]), on_evict=lambda k, v: v.close()\n)\n\nassert FILE_CACHE.maxsize, \"file cache must be at least size one\"\n\nREF_COUNTS: Dict[Any, int] = {}\n\n_DEFAULT_MODE = utils.ReprObject(\"\")"},{"col":20,"endLoc":912,"id":2627,"nodeType":"Lambda","startLoc":912,"text":"lambda x: compiled.sub(repl=repl, string=x, count=n)"},{"col":63,"endLoc":14,"id":2628,"nodeType":"Lambda","startLoc":14,"text":"lambda k, v: v.close()"},{"col":20,"endLoc":914,"id":2629,"nodeType":"Lambda","startLoc":914,"text":"lambda x: x.replace(pat, repl, n)"},{"col":16,"endLoc":925,"id":2630,"nodeType":"Lambda","startLoc":925,"text":"lambda x: x.replace(pat, repl, n)"},{"col":4,"comment":"\n Decode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n decoded : same type as values\n ","endLoc":946,"header":"def decode(self, encoding, errors=\"strict\")","id":2631,"name":"decode","nodeType":"Function","startLoc":928,"text":"def decode(self, encoding, errors=\"strict\"):\n \"\"\"\n Decode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n decoded : same type as values\n \"\"\"\n if encoding in _cpython_optimized_decoders:\n f = lambda x: x.decode(encoding, errors)\n else:\n decoder = codecs.getdecoder(encoding)\n f = lambda x: decoder(x, errors)[0]\n return self._apply(f, dtype=np.str_)"},{"col":16,"endLoc":942,"id":2632,"nodeType":"Lambda","startLoc":942,"text":"lambda x: x.decode(encoding, errors)"},{"id":2633,"name":".gitignore","nodeType":"TextFile","path":"","text":"*.py[cod]\n__pycache__\n\n# example caches from Hypothesis\n.hypothesis/\n\n# temp files from docs build\ndoc/auto_gallery\ndoc/example.nc\ndoc/savefig\n\n# C extensions\n*.so\n\n# Packages\n*.egg\n*.egg-info\ndist\nbuild\neggs\nparts\nbin\nvar\nsdist\ndevelop-eggs\n.installed.cfg\nlib\nlib64\n\n# Installer logs\npip-log.txt\n\n# Unit test / coverage reports\n.coverage\n.coverage.*\n.tox\nnosetests.xml\n.cache\n.dmypy.json\n.mypy_cache\n.ropeproject/\n.tags*\n.testmon*\n.tmontmp/\n.pytest_cache\ndask-worker-space/\n\n# asv environments\n.asv\n\n# Translations\n*.mo\n\n# Mr Developer\n.mr.developer.cfg\n.project\n.pydevproject\n\n# IDEs\n.idea\n*.swp\n.DS_Store\n.vscode/\n\n# xarray specific\ndoc/_build\ndoc/generated\nxarray/version.py\nxarray/tests/data/*.grib.*.idx\n\n# Sync tools\nIcon*\n\n.ipynb_checkpoints\n"},{"col":4,"comment":"Roll date backward to nearest end of year","endLoc":532,"header":"def rollback(self, date)","id":2634,"name":"rollback","nodeType":"Function","startLoc":527,"text":"def rollback(self, date):\n \"\"\"Roll date backward to nearest end of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - YearEnd(month=self.month)"},{"col":16,"endLoc":945,"id":2635,"nodeType":"Lambda","startLoc":945,"text":"lambda x: decoder(x, errors)[0]"},{"fileName":"__init__.py","filePath":"xarray/backends","id":2636,"nodeType":"File","text":"\"\"\"Backend objects for saving and loading data\n\nDataStores provide a uniform interface for saving and loading data in different\nformats. They should not be used directly, but rather through Dataset objects.\n\"\"\"\nfrom .cfgrib_ import CfGribDataStore\nfrom .common import AbstractDataStore\nfrom .file_manager import CachingFileManager, DummyFileManager, FileManager\nfrom .h5netcdf_ import H5NetCDFStore\nfrom .memory import InMemoryDataStore\nfrom .netCDF4_ import NetCDF4DataStore\nfrom .pseudonetcdf_ import PseudoNetCDFDataStore\nfrom .pydap_ import PydapDataStore\nfrom .pynio_ import NioDataStore\nfrom .scipy_ import ScipyDataStore\nfrom .zarr import ZarrStore\n\n__all__ = [\n \"AbstractDataStore\",\n \"FileManager\",\n \"CachingFileManager\",\n \"CfGribDataStore\",\n \"DummyFileManager\",\n \"InMemoryDataStore\",\n \"NetCDF4DataStore\",\n \"PydapDataStore\",\n \"NioDataStore\",\n \"ScipyDataStore\",\n \"H5NetCDFStore\",\n \"ZarrStore\",\n \"PseudoNetCDFDataStore\",\n]\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":511,"id":2637,"name":"_freq","nodeType":"Attribute","startLoc":511,"text":"_freq"},{"attributeType":"null","col":4,"comment":"null","endLoc":512,"id":2638,"name":"_day_option","nodeType":"Attribute","startLoc":512,"text":"_day_option"},{"attributeType":"null","col":4,"comment":"null","endLoc":513,"id":2639,"name":"_default_month","nodeType":"Attribute","startLoc":513,"text":"_default_month"},{"className":"PydapDataStore","col":0,"comment":"Store for accessing OpenDAP datasets with pydap.\n\n This store provides an alternative way to access OpenDAP datasets that may\n be useful if the netCDF4 library is not available.\n ","endLoc":94,"id":2640,"nodeType":"Class","startLoc":59,"text":"class PydapDataStore(AbstractDataStore):\n \"\"\"Store for accessing OpenDAP datasets with pydap.\n\n This store provides an alternative way to access OpenDAP datasets that may\n be useful if the netCDF4 library is not available.\n \"\"\"\n\n def __init__(self, ds):\n \"\"\"\n Parameters\n ----------\n ds : pydap DatasetType\n \"\"\"\n self.ds = ds\n\n @classmethod\n def open(cls, url, session=None):\n import pydap.client\n\n ds = pydap.client.open_url(url, session=session)\n return cls(ds)\n\n def open_store_variable(self, var):\n data = indexing.LazilyOuterIndexedArray(PydapArrayWrapper(var))\n return Variable(var.dimensions, data, _fix_attributes(var.attributes))\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(self.ds[k])) for k in self.ds.keys()\n )\n\n def get_attrs(self):\n return Frozen(_fix_attributes(self.ds.attributes))\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)"},{"className":"CFTimeGrouper","col":0,"comment":"This is a simple container for the grouping parameters that implements a\n single method, the only one required for resampling in xarray. It cannot\n be used in a call to groupby like a pandas.Grouper object can.","endLoc":110,"id":2641,"nodeType":"Class","startLoc":57,"text":"class CFTimeGrouper:\n \"\"\"This is a simple container for the grouping parameters that implements a\n single method, the only one required for resampling in xarray. It cannot\n be used in a call to groupby like a pandas.Grouper object can.\"\"\"\n\n def __init__(self, freq, closed=None, label=None, base=0, loffset=None):\n self.freq = to_offset(freq)\n self.closed = closed\n self.label = label\n self.base = base\n self.loffset = loffset\n\n if isinstance(self.freq, (MonthEnd, QuarterEnd, YearEnd)):\n if self.closed is None:\n self.closed = \"right\"\n if self.label is None:\n self.label = \"right\"\n else:\n if self.closed is None:\n self.closed = \"left\"\n if self.label is None:\n self.label = \"left\"\n\n def first_items(self, index):\n \"\"\"Meant to reproduce the results of the following\n\n grouper = pandas.Grouper(...)\n first_items = pd.Series(np.arange(len(index)),\n index).groupby(grouper).first()\n\n with index being a CFTimeIndex instead of a DatetimeIndex.\n \"\"\"\n\n datetime_bins, labels = _get_time_bins(\n index, self.freq, self.closed, self.label, self.base\n )\n if self.loffset is not None:\n if isinstance(self.loffset, datetime.timedelta):\n labels = labels + self.loffset\n else:\n labels = labels + to_offset(self.loffset)\n\n # check binner fits data\n if index[0] < datetime_bins[0]:\n raise ValueError(\"Value falls before first bin\")\n if index[-1] > datetime_bins[-1]:\n raise ValueError(\"Value falls after last bin\")\n\n integer_bins = np.searchsorted(index, datetime_bins, side=self.closed)[:-1]\n first_items = pd.Series(integer_bins, labels)\n\n # Mask duplicate values with NaNs, preserving the last values\n non_duplicate = ~first_items.duplicated(\"last\")\n return first_items.where(non_duplicate)"},{"col":4,"comment":"\n Encode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n encoded : same type as values\n ","endLoc":966,"header":"def encode(self, encoding, errors=\"strict\")","id":2642,"name":"encode","nodeType":"Function","startLoc":948,"text":"def encode(self, encoding, errors=\"strict\"):\n \"\"\"\n Encode character string in the array using indicated encoding.\n\n Parameters\n ----------\n encoding : str\n errors : str, optional\n\n Returns\n -------\n encoded : same type as values\n \"\"\"\n if encoding in _cpython_optimized_encoders:\n f = lambda x: x.encode(encoding, errors)\n else:\n encoder = codecs.getencoder(encoding)\n f = lambda x: encoder(x, errors)[0]\n return self._apply(f, dtype=np.bytes_)"},{"className":"DatasetCoordinates","col":0,"comment":"Dictionary like container for Dataset coordinates.\n\n Essentially an immutable dictionary with keys given by the array's\n dimensions and the values given by the corresponding xarray.Coordinate\n objects.\n ","endLoc":258,"id":2643,"nodeType":"Class","startLoc":184,"text":"class DatasetCoordinates(Coordinates):\n \"\"\"Dictionary like container for Dataset coordinates.\n\n Essentially an immutable dictionary with keys given by the array's\n dimensions and the values given by the corresponding xarray.Coordinate\n objects.\n \"\"\"\n\n __slots__ = (\"_data\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self._data = dataset\n\n @property\n def _names(self) -> Set[Hashable]:\n return self._data._coord_names\n\n @property\n def dims(self) -> Mapping[Hashable, int]:\n return self._data.dims\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n return Frozen(\n {k: v for k, v in self._data.variables.items() if k in self._names}\n )\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key in self._data.data_vars:\n raise KeyError(key)\n return cast(\"DataArray\", self._data[key])\n\n def to_dataset(self) -> \"Dataset\":\n \"\"\"Convert these coordinates into a new Dataset\n \"\"\"\n return self._data._copy_listed(self._names)\n\n def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None:\n from .dataset import calculate_dimensions\n\n variables = self._data._variables.copy()\n variables.update(coords)\n\n # check for inconsistent state *before* modifying anything in-place\n dims = calculate_dimensions(variables)\n new_coord_names = set(coords)\n for dim, size in dims.items():\n if dim in variables:\n new_coord_names.add(dim)\n\n self._data._variables = variables\n self._data._coord_names.update(new_coord_names)\n self._data._dims = dims\n\n # TODO(shoyer): once ._indexes is always populated by a dict, modify\n # it to update inplace instead.\n original_indexes = dict(self._data.indexes)\n original_indexes.update(indexes)\n self._data._indexes = original_indexes\n\n def __delitem__(self, key: Hashable) -> None:\n if key in self:\n del self._data[key]\n else:\n raise KeyError(key)\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._data._ipython_key_completions_()\n if key not in self._data.data_vars\n ]"},{"col":16,"endLoc":962,"id":2644,"nodeType":"Lambda","startLoc":962,"text":"lambda x: x.encode(encoding, errors)"},{"col":4,"comment":"Meant to reproduce the results of the following\n\n grouper = pandas.Grouper(...)\n first_items = pd.Series(np.arange(len(index)),\n index).groupby(grouper).first()\n\n with index being a CFTimeIndex instead of a DatetimeIndex.\n ","endLoc":110,"header":"def first_items(self, index)","id":2645,"name":"first_items","nodeType":"Function","startLoc":80,"text":"def first_items(self, index):\n \"\"\"Meant to reproduce the results of the following\n\n grouper = pandas.Grouper(...)\n first_items = pd.Series(np.arange(len(index)),\n index).groupby(grouper).first()\n\n with index being a CFTimeIndex instead of a DatetimeIndex.\n \"\"\"\n\n datetime_bins, labels = _get_time_bins(\n index, self.freq, self.closed, self.label, self.base\n )\n if self.loffset is not None:\n if isinstance(self.loffset, datetime.timedelta):\n labels = labels + self.loffset\n else:\n labels = labels + to_offset(self.loffset)\n\n # check binner fits data\n if index[0] < datetime_bins[0]:\n raise ValueError(\"Value falls before first bin\")\n if index[-1] > datetime_bins[-1]:\n raise ValueError(\"Value falls after last bin\")\n\n integer_bins = np.searchsorted(index, datetime_bins, side=self.closed)[:-1]\n first_items = pd.Series(integer_bins, labels)\n\n # Mask duplicate values with NaNs, preserving the last values\n non_duplicate = ~first_items.duplicated(\"last\")\n return first_items.where(non_duplicate)"},{"col":4,"comment":"null","endLoc":199,"header":"@property\n def _names(self) -> Set[Hashable]","id":2646,"name":"_names","nodeType":"Function","startLoc":197,"text":"@property\n def _names(self) -> Set[Hashable]:\n return self._data._coord_names"},{"col":4,"comment":"null","endLoc":203,"header":"@property\n def dims(self) -> Mapping[Hashable, int]","id":2647,"name":"dims","nodeType":"Function","startLoc":201,"text":"@property\n def dims(self) -> Mapping[Hashable, int]:\n return self._data.dims"},{"col":4,"comment":"null","endLoc":83,"header":"def open_store_variable(self, var)","id":2648,"name":"open_store_variable","nodeType":"Function","startLoc":81,"text":"def open_store_variable(self, var):\n data = indexing.LazilyOuterIndexedArray(PydapArrayWrapper(var))\n return Variable(var.dimensions, data, _fix_attributes(var.attributes))"},{"col":4,"comment":"null","endLoc":209,"header":"@property\n def variables(self) -> Mapping[Hashable, Variable]","id":2649,"name":"variables","nodeType":"Function","startLoc":205,"text":"@property\n def variables(self) -> Mapping[Hashable, Variable]:\n return Frozen(\n {k: v for k, v in self._data.variables.items() if k in self._names}\n )"},{"col":16,"endLoc":965,"id":2650,"nodeType":"Lambda","startLoc":965,"text":"lambda x: encoder(x, errors)[0]"},{"attributeType":"null","col":4,"comment":"null","endLoc":78,"id":2651,"name":"__slots__","nodeType":"Attribute","startLoc":78,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":81,"id":2652,"name":"_obj","nodeType":"Attribute","startLoc":81,"text":"self._obj"},{"attributeType":"null","col":16,"comment":"null","endLoc":44,"id":2653,"name":"np","nodeType":"Attribute","startLoc":44,"text":"np"},{"attributeType":"null","col":0,"comment":"null","endLoc":48,"id":2654,"name":"_cpython_optimized_encoders","nodeType":"Attribute","startLoc":48,"text":"_cpython_optimized_encoders"},{"attributeType":"null","col":0,"comment":"null","endLoc":57,"id":2655,"name":"_cpython_optimized_decoders","nodeType":"Attribute","startLoc":57,"text":"_cpython_optimized_decoders"},{"col":4,"comment":"null","endLoc":12,"header":"def __init__(self, array)","id":2656,"name":"__init__","nodeType":"Function","startLoc":11,"text":"def __init__(self, array):\n self.array = array"},{"col":0,"comment":"","endLoc":40,"header":"accessor_str.py#","id":2657,"name":"","nodeType":"Function","startLoc":40,"text":"_cpython_optimized_encoders = (\n \"utf-8\",\n \"utf8\",\n \"latin-1\",\n \"latin1\",\n \"iso-8859-1\",\n \"mbcs\",\n \"ascii\",\n)\n\n_cpython_optimized_decoders = _cpython_optimized_encoders + (\"utf-16\", \"utf-32\")"},{"col":0,"comment":"Obtain the bins and their respective labels for resampling operations.\n\n Parameters\n ----------\n index : CFTimeIndex\n Index object to be resampled (e.g., CFTimeIndex named 'time').\n freq : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency (e.g., 'MS', '2D', 'H', or '3T' with\n coding.cftime_offsets.to_offset() applied to it).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed.\n The default is 'left' for all frequency offsets except for 'M' and 'A',\n which have a default of 'right'.\n label : 'left' or 'right', optional\n Which bin edge label to label bucket with.\n The default is 'left' for all frequency offsets except for 'M' and 'A',\n which have a default of 'right'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n datetime_bins : CFTimeIndex\n Defines the edge of resampling bins by which original index values will\n be grouped into.\n labels : CFTimeIndex\n Define what the user actually sees the bins labeled as.\n ","endLoc":175,"header":"def _get_time_bins(index, freq, closed, label, base)","id":2658,"name":"_get_time_bins","nodeType":"Function","startLoc":113,"text":"def _get_time_bins(index, freq, closed, label, base):\n \"\"\"Obtain the bins and their respective labels for resampling operations.\n\n Parameters\n ----------\n index : CFTimeIndex\n Index object to be resampled (e.g., CFTimeIndex named 'time').\n freq : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency (e.g., 'MS', '2D', 'H', or '3T' with\n coding.cftime_offsets.to_offset() applied to it).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed.\n The default is 'left' for all frequency offsets except for 'M' and 'A',\n which have a default of 'right'.\n label : 'left' or 'right', optional\n Which bin edge label to label bucket with.\n The default is 'left' for all frequency offsets except for 'M' and 'A',\n which have a default of 'right'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n datetime_bins : CFTimeIndex\n Defines the edge of resampling bins by which original index values will\n be grouped into.\n labels : CFTimeIndex\n Define what the user actually sees the bins labeled as.\n \"\"\"\n\n if not isinstance(index, CFTimeIndex):\n raise TypeError(\n \"index must be a CFTimeIndex, but got \"\n \"an instance of %r\" % type(index).__name__\n )\n if len(index) == 0:\n datetime_bins = labels = CFTimeIndex(data=[], name=index.name)\n return datetime_bins, labels\n\n first, last = _get_range_edges(\n index.min(), index.max(), freq, closed=closed, base=base\n )\n datetime_bins = labels = cftime_range(\n freq=freq, start=first, end=last, name=index.name\n )\n\n datetime_bins, labels = _adjust_bin_edges(\n datetime_bins, freq, closed, index, labels\n )\n\n if label == \"right\":\n labels = labels[1:]\n else:\n labels = labels[:-1]\n\n # TODO: when CFTimeIndex supports missing values, if the reference index\n # contains missing values, insert the appropriate NaN value at the\n # beginning of the datetime_bins and labels indexes.\n\n return datetime_bins, labels"},{"col":0,"comment":"null","endLoc":56,"header":"def _fix_attributes(attributes)","id":2659,"name":"_fix_attributes","nodeType":"Function","startLoc":40,"text":"def _fix_attributes(attributes):\n attributes = dict(attributes)\n for k in list(attributes):\n if k.lower() == \"global\" or k.lower().endswith(\"_global\"):\n # move global attributes to the top level, like the netcdf-C\n # DAP client\n attributes.update(attributes.pop(k))\n elif is_dict_like(attributes[k]):\n # Make Hierarchical attributes to a single level with a\n # dot-separated key\n attributes.update(\n {\n \"{}.{}\".format(k, k_child): v_child\n for k_child, v_child in attributes.pop(k).items()\n }\n )\n return attributes"},{"col":0,"comment":" Get the correct starting and ending datetimes for the resampled\n CFTimeIndex range.\n\n Parameters\n ----------\n first : cftime.datetime\n Uncorrected starting datetime object for resampled CFTimeIndex range.\n Usually the min of the original CFTimeIndex.\n last : cftime.datetime\n Uncorrected ending datetime object for resampled CFTimeIndex range.\n Usually the max of the original CFTimeIndex.\n offset : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency. Contains information on offset type (e.g. Day or 'D') and\n offset magnitude (e.g., n = 3).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed. Defaults to 'left'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n first : cftime.datetime\n Corrected starting datetime object for resampled CFTimeIndex range.\n last : cftime.datetime\n Corrected ending datetime object for resampled CFTimeIndex range.\n ","endLoc":271,"header":"def _get_range_edges(first, last, offset, closed=\"left\", base=0)","id":2660,"name":"_get_range_edges","nodeType":"Function","startLoc":226,"text":"def _get_range_edges(first, last, offset, closed=\"left\", base=0):\n \"\"\" Get the correct starting and ending datetimes for the resampled\n CFTimeIndex range.\n\n Parameters\n ----------\n first : cftime.datetime\n Uncorrected starting datetime object for resampled CFTimeIndex range.\n Usually the min of the original CFTimeIndex.\n last : cftime.datetime\n Uncorrected ending datetime object for resampled CFTimeIndex range.\n Usually the max of the original CFTimeIndex.\n offset : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency. Contains information on offset type (e.g. Day or 'D') and\n offset magnitude (e.g., n = 3).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed. Defaults to 'left'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n first : cftime.datetime\n Corrected starting datetime object for resampled CFTimeIndex range.\n last : cftime.datetime\n Corrected ending datetime object for resampled CFTimeIndex range.\n \"\"\"\n if isinstance(offset, CFTIME_TICKS):\n first, last = _adjust_dates_anchored(\n first, last, offset, closed=closed, base=base\n )\n return first, last\n else:\n first = normalize_date(first)\n last = normalize_date(last)\n\n if closed == \"left\":\n first = offset.rollback(first)\n else:\n first = first - offset\n\n last = last + offset\n return first, last"},{"fileName":"strings.py","filePath":"xarray/coding","id":2661,"nodeType":"File","text":"\"\"\"Coders for strings.\"\"\"\nfrom functools import partial\n\nimport numpy as np\n\nfrom ..core import indexing\nfrom ..core.pycompat import dask_array_type\nfrom ..core.variable import Variable\nfrom .variables import (\n VariableCoder,\n lazy_elemwise_func,\n pop_to,\n safe_setitem,\n unpack_for_decoding,\n unpack_for_encoding,\n)\n\n\ndef create_vlen_dtype(element_type):\n # based on h5py.special_dtype\n return np.dtype(\"O\", metadata={\"element_type\": element_type})\n\n\ndef check_vlen_dtype(dtype):\n if dtype.kind != \"O\" or dtype.metadata is None:\n return None\n else:\n return dtype.metadata.get(\"element_type\")\n\n\ndef is_unicode_dtype(dtype):\n return dtype.kind == \"U\" or check_vlen_dtype(dtype) == str\n\n\ndef is_bytes_dtype(dtype):\n return dtype.kind == \"S\" or check_vlen_dtype(dtype) == bytes\n\n\nclass EncodedStringCoder(VariableCoder):\n \"\"\"Transforms between unicode strings and fixed-width UTF-8 bytes.\"\"\"\n\n def __init__(self, allows_unicode=True):\n self.allows_unicode = allows_unicode\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n contains_unicode = is_unicode_dtype(data.dtype)\n encode_as_char = encoding.get(\"dtype\") == \"S1\"\n\n if encode_as_char:\n del encoding[\"dtype\"] # no longer relevant\n\n if contains_unicode and (encode_as_char or not self.allows_unicode):\n if \"_FillValue\" in attrs:\n raise NotImplementedError(\n \"variable {!r} has a _FillValue specified, but \"\n \"_FillValue is not yet supported on unicode strings: \"\n \"https://github.com/pydata/xarray/issues/1647\".format(name)\n )\n\n string_encoding = encoding.pop(\"_Encoding\", \"utf-8\")\n safe_setitem(attrs, \"_Encoding\", string_encoding, name=name)\n # TODO: figure out how to handle this in a lazy way with dask\n data = encode_string_array(data, string_encoding)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"_Encoding\" in attrs:\n string_encoding = pop_to(attrs, encoding, \"_Encoding\")\n func = partial(decode_bytes_array, encoding=string_encoding)\n data = lazy_elemwise_func(data, func, np.dtype(object))\n\n return Variable(dims, data, attrs, encoding)\n\n\ndef decode_bytes_array(bytes_array, encoding=\"utf-8\"):\n # This is faster than using np.char.decode() or np.vectorize()\n bytes_array = np.asarray(bytes_array)\n decoded = [x.decode(encoding) for x in bytes_array.ravel()]\n return np.array(decoded, dtype=object).reshape(bytes_array.shape)\n\n\ndef encode_string_array(string_array, encoding=\"utf-8\"):\n string_array = np.asarray(string_array)\n encoded = [x.encode(encoding) for x in string_array.ravel()]\n return np.array(encoded, dtype=bytes).reshape(string_array.shape)\n\n\ndef ensure_fixed_length_bytes(var):\n \"\"\"Ensure that a variable with vlen bytes is converted to fixed width.\"\"\"\n dims, data, attrs, encoding = unpack_for_encoding(var)\n if check_vlen_dtype(data.dtype) == bytes:\n # TODO: figure out how to handle this with dask\n data = np.asarray(data, dtype=np.string_)\n return Variable(dims, data, attrs, encoding)\n\n\nclass CharacterArrayCoder(VariableCoder):\n \"\"\"Transforms between arrays containing bytes and character arrays.\"\"\"\n\n def encode(self, variable, name=None):\n variable = ensure_fixed_length_bytes(variable)\n\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n if data.dtype.kind == \"S\" and encoding.get(\"dtype\") is not str:\n data = bytes_to_char(data)\n if \"char_dim_name\" in encoding.keys():\n char_dim_name = encoding.pop(\"char_dim_name\")\n else:\n char_dim_name = \"string%s\" % data.shape[-1]\n dims = dims + (char_dim_name,)\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if data.dtype == \"S1\" and dims:\n encoding[\"char_dim_name\"] = dims[-1]\n dims = dims[:-1]\n data = char_to_bytes(data)\n return Variable(dims, data, attrs, encoding)\n\n\ndef bytes_to_char(arr):\n \"\"\"Convert numpy/dask arrays from fixed width bytes to characters.\"\"\"\n if arr.dtype.kind != \"S\":\n raise ValueError(\"argument must have a fixed-width bytes dtype\")\n\n if isinstance(arr, dask_array_type):\n import dask.array as da\n\n return da.map_blocks(\n _numpy_bytes_to_char,\n arr,\n dtype=\"S1\",\n chunks=arr.chunks + ((arr.dtype.itemsize,)),\n new_axis=[arr.ndim],\n )\n else:\n return _numpy_bytes_to_char(arr)\n\n\ndef _numpy_bytes_to_char(arr):\n \"\"\"Like netCDF4.stringtochar, but faster and more flexible.\n \"\"\"\n # ensure the array is contiguous\n arr = np.array(arr, copy=False, order=\"C\", dtype=np.string_)\n return arr.reshape(arr.shape + (1,)).view(\"S1\")\n\n\ndef char_to_bytes(arr):\n \"\"\"Convert numpy/dask arrays from characters to fixed width bytes.\"\"\"\n if arr.dtype != \"S1\":\n raise ValueError(\"argument must have dtype='S1'\")\n\n if not arr.ndim:\n # no dimension to concatenate along\n return arr\n\n size = arr.shape[-1]\n\n if not size:\n # can't make an S0 dtype\n return np.zeros(arr.shape[:-1], dtype=np.string_)\n\n if isinstance(arr, dask_array_type):\n import dask.array as da\n\n if len(arr.chunks[-1]) > 1:\n raise ValueError(\n \"cannot stacked dask character array with \"\n \"multiple chunks in the last dimension: {}\".format(arr)\n )\n\n dtype = np.dtype(\"S\" + str(arr.shape[-1]))\n return da.map_blocks(\n _numpy_char_to_bytes,\n arr,\n dtype=dtype,\n chunks=arr.chunks[:-1],\n drop_axis=[arr.ndim - 1],\n )\n else:\n return StackedBytesArray(arr)\n\n\ndef _numpy_char_to_bytes(arr):\n \"\"\"Like netCDF4.chartostring, but faster and more flexible.\n \"\"\"\n # based on: http://stackoverflow.com/a/10984878/809705\n arr = np.array(arr, copy=False, order=\"C\")\n dtype = \"S\" + str(arr.shape[-1])\n return arr.view(dtype).reshape(arr.shape[:-1])\n\n\nclass StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrapper around array-like objects to create a new indexable object where\n values, when accessed, are automatically stacked along the last dimension.\n\n >>> StackedBytesArray(np.array(['a', 'b', 'c']))[:]\n array('abc',\n dtype='|S3')\n \"\"\"\n\n def __init__(self, array):\n \"\"\"\n Parameters\n ----------\n array : array-like\n Original array of values to wrap.\n \"\"\"\n if array.dtype != \"S1\":\n raise ValueError(\n \"can only use StackedBytesArray if argument has dtype='S1'\"\n )\n self.array = indexing.as_indexable(array)\n\n @property\n def dtype(self):\n return np.dtype(\"S\" + str(self.array.shape[-1]))\n\n @property\n def shape(self):\n return self.array.shape[:-1]\n\n def __repr__(self):\n return \"%s(%r)\" % (type(self).__name__, self.array)\n\n def __getitem__(self, key):\n # require slicing the last dimension completely\n key = type(key)(indexing.expanded_indexer(key.tuple, self.array.ndim))\n if key.tuple[-1] != slice(None):\n raise IndexError(\"too many indices\")\n return _numpy_char_to_bytes(self.array[key])\n"},{"className":"EncodedStringCoder","col":0,"comment":"Transforms between unicode strings and fixed-width UTF-8 bytes.","endLoc":77,"id":2662,"nodeType":"Class","startLoc":39,"text":"class EncodedStringCoder(VariableCoder):\n \"\"\"Transforms between unicode strings and fixed-width UTF-8 bytes.\"\"\"\n\n def __init__(self, allows_unicode=True):\n self.allows_unicode = allows_unicode\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n contains_unicode = is_unicode_dtype(data.dtype)\n encode_as_char = encoding.get(\"dtype\") == \"S1\"\n\n if encode_as_char:\n del encoding[\"dtype\"] # no longer relevant\n\n if contains_unicode and (encode_as_char or not self.allows_unicode):\n if \"_FillValue\" in attrs:\n raise NotImplementedError(\n \"variable {!r} has a _FillValue specified, but \"\n \"_FillValue is not yet supported on unicode strings: \"\n \"https://github.com/pydata/xarray/issues/1647\".format(name)\n )\n\n string_encoding = encoding.pop(\"_Encoding\", \"utf-8\")\n safe_setitem(attrs, \"_Encoding\", string_encoding, name=name)\n # TODO: figure out how to handle this in a lazy way with dask\n data = encode_string_array(data, string_encoding)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"_Encoding\" in attrs:\n string_encoding = pop_to(attrs, encoding, \"_Encoding\")\n func = partial(decode_bytes_array, encoding=string_encoding)\n data = lazy_elemwise_func(data, func, np.dtype(object))\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":67,"header":"def encode(self, variable, name=None)","id":2663,"name":"encode","nodeType":"Function","startLoc":45,"text":"def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n contains_unicode = is_unicode_dtype(data.dtype)\n encode_as_char = encoding.get(\"dtype\") == \"S1\"\n\n if encode_as_char:\n del encoding[\"dtype\"] # no longer relevant\n\n if contains_unicode and (encode_as_char or not self.allows_unicode):\n if \"_FillValue\" in attrs:\n raise NotImplementedError(\n \"variable {!r} has a _FillValue specified, but \"\n \"_FillValue is not yet supported on unicode strings: \"\n \"https://github.com/pydata/xarray/issues/1647\".format(name)\n )\n\n string_encoding = encoding.pop(\"_Encoding\", \"utf-8\")\n safe_setitem(attrs, \"_Encoding\", string_encoding, name=name)\n # TODO: figure out how to handle this in a lazy way with dask\n data = encode_string_array(data, string_encoding)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":88,"header":"def get_variables(self)","id":2664,"name":"get_variables","nodeType":"Function","startLoc":85,"text":"def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(self.ds[k])) for k in self.ds.keys()\n )"},{"col":0,"comment":" First and last offsets should be calculated from the start day to fix\n an error cause by resampling across multiple days when a one day period is\n not a multiple of the frequency.\n See https://github.com/pandas-dev/pandas/issues/8683\n\n Parameters\n ----------\n first : cftime.datetime\n A datetime object representing the start of a CFTimeIndex range.\n last : cftime.datetime\n A datetime object representing the end of a CFTimeIndex range.\n offset : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency. Contains information on offset type (e.g. Day or 'D') and\n offset magnitude (e.g., n = 3).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed. Defaults to 'right'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n fresult : cftime.datetime\n A datetime object representing the start of a date range that has been\n adjusted to fix resampling errors.\n lresult : cftime.datetime\n A datetime object representing the end of a date range that has been\n adjusted to fix resampling errors.\n ","endLoc":333,"header":"def _adjust_dates_anchored(first, last, offset, closed=\"right\", base=0)","id":2665,"name":"_adjust_dates_anchored","nodeType":"Function","startLoc":274,"text":"def _adjust_dates_anchored(first, last, offset, closed=\"right\", base=0):\n \"\"\" First and last offsets should be calculated from the start day to fix\n an error cause by resampling across multiple days when a one day period is\n not a multiple of the frequency.\n See https://github.com/pandas-dev/pandas/issues/8683\n\n Parameters\n ----------\n first : cftime.datetime\n A datetime object representing the start of a CFTimeIndex range.\n last : cftime.datetime\n A datetime object representing the end of a CFTimeIndex range.\n offset : xarray.coding.cftime_offsets.BaseCFTimeOffset\n The offset object representing target conversion a.k.a. resampling\n frequency. Contains information on offset type (e.g. Day or 'D') and\n offset magnitude (e.g., n = 3).\n closed : 'left' or 'right', optional\n Which side of bin interval is closed. Defaults to 'right'.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '5min' frequency, base could\n range from 0 through 4. Defaults to 0.\n\n Returns\n -------\n fresult : cftime.datetime\n A datetime object representing the start of a date range that has been\n adjusted to fix resampling errors.\n lresult : cftime.datetime\n A datetime object representing the end of a date range that has been\n adjusted to fix resampling errors.\n \"\"\"\n\n base = base % offset.n\n start_day = normalize_date(first)\n base_td = type(offset)(n=base).as_timedelta()\n start_day += base_td\n foffset = exact_cftime_datetime_difference(start_day, first) % offset.as_timedelta()\n loffset = exact_cftime_datetime_difference(start_day, last) % offset.as_timedelta()\n if closed == \"right\":\n if foffset.total_seconds() > 0:\n fresult = first - foffset\n else:\n fresult = first - offset.as_timedelta()\n\n if loffset.total_seconds() > 0:\n lresult = last + (offset.as_timedelta() - loffset)\n else:\n lresult = last\n else:\n if foffset.total_seconds() > 0:\n fresult = first - foffset\n else:\n fresult = first\n\n if loffset.total_seconds() > 0:\n lresult = last + (offset.as_timedelta() - loffset)\n else:\n lresult = last + offset.as_timedelta()\n return fresult, lresult"},{"col":0,"comment":"Exact computation of b - a\n\n Assumes:\n\n a = a_0 + a_m\n b = b_0 + b_m\n\n Here a_0, and b_0 represent the input dates rounded\n down to the nearest second, and a_m, and b_m represent\n the remaining microseconds associated with date a and\n date b.\n\n We can then express the value of b - a as:\n\n b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m\n\n By construction, we know that b_0 - a_0 must be a round number\n of seconds. Therefore we can take the result of b_0 - a_0 using\n ordinary cftime.datetime arithmetic and round to the nearest\n second. b_m - a_m is the remainder, in microseconds, and we\n can simply add this to the rounded timedelta.\n\n Parameters\n ----------\n a : cftime.datetime\n Input datetime\n b : cftime.datetime\n Input datetime\n\n Returns\n -------\n datetime.timedelta\n ","endLoc":373,"header":"def exact_cftime_datetime_difference(a, b)","id":2666,"name":"exact_cftime_datetime_difference","nodeType":"Function","startLoc":336,"text":"def exact_cftime_datetime_difference(a, b):\n \"\"\"Exact computation of b - a\n\n Assumes:\n\n a = a_0 + a_m\n b = b_0 + b_m\n\n Here a_0, and b_0 represent the input dates rounded\n down to the nearest second, and a_m, and b_m represent\n the remaining microseconds associated with date a and\n date b.\n\n We can then express the value of b - a as:\n\n b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m\n\n By construction, we know that b_0 - a_0 must be a round number\n of seconds. Therefore we can take the result of b_0 - a_0 using\n ordinary cftime.datetime arithmetic and round to the nearest\n second. b_m - a_m is the remainder, in microseconds, and we\n can simply add this to the rounded timedelta.\n\n Parameters\n ----------\n a : cftime.datetime\n Input datetime\n b : cftime.datetime\n Input datetime\n\n Returns\n -------\n datetime.timedelta\n \"\"\"\n seconds = b.replace(microsecond=0) - a.replace(microsecond=0)\n seconds = int(round(seconds.total_seconds()))\n microseconds = b.microsecond - a.microsecond\n return datetime.timedelta(seconds=seconds, microseconds=microseconds)"},{"col":4,"comment":"null","endLoc":91,"header":"def get_attrs(self)","id":2667,"name":"get_attrs","nodeType":"Function","startLoc":90,"text":"def get_attrs(self):\n return Frozen(_fix_attributes(self.ds.attributes))"},{"col":4,"comment":"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling.reduce(np.sum)\n \n array([[nan, nan, 3., 6.],\n [nan, nan, 15., 18.]])\n Dimensions without coordinates: a, b\n >>>\n >>> rolling = da.rolling(b=3, min_periods=1)\n >>> rolling.reduce(np.nansum)\n \n array([[ 0., 1., 3., 6.],\n [ 4., 9., 15., 18.]])\n ","endLoc":289,"header":"def reduce(self, func, **kwargs)","id":2668,"name":"reduce","nodeType":"Function","startLoc":242,"text":"def reduce(self, func, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n\n Examples\n --------\n >>> da = DataArray(np.arange(8).reshape(2, 4), dims=('a', 'b'))\n >>>\n >>> rolling = da.rolling(b=3)\n >>> rolling.construct('window_dim')\n \n array([[[np.nan, np.nan, 0], [np.nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[np.nan, np.nan, 4], [np.nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n Dimensions without coordinates: a, b, window_dim\n >>>\n >>> rolling.reduce(np.sum)\n \n array([[nan, nan, 3., 6.],\n [nan, nan, 15., 18.]])\n Dimensions without coordinates: a, b\n >>>\n >>> rolling = da.rolling(b=3, min_periods=1)\n >>> rolling.reduce(np.nansum)\n \n array([[ 0., 1., 3., 6.],\n [ 4., 9., 15., 18.]])\n \"\"\"\n rolling_dim = utils.get_temp_dimname(self.obj.dims, \"_rolling_dim\")\n windows = self.construct(rolling_dim)\n result = windows.reduce(func, dim=rolling_dim, **kwargs)\n\n # Find valid windows based on count.\n counts = self._counts()\n return result.where(counts >= self._min_periods)"},{"col":4,"comment":"null","endLoc":94,"header":"def get_dimensions(self)","id":2669,"name":"get_dimensions","nodeType":"Function","startLoc":93,"text":"def get_dimensions(self):\n return Frozen(self.ds.dimensions)"},{"col":0,"comment":"null","endLoc":90,"header":"def encode_string_array(string_array, encoding=\"utf-8\")","id":2670,"name":"encode_string_array","nodeType":"Function","startLoc":87,"text":"def encode_string_array(string_array, encoding=\"utf-8\"):\n string_array = np.asarray(string_array)\n encoded = [x.encode(encoding) for x in string_array.ravel()]\n return np.array(encoded, dtype=bytes).reshape(string_array.shape)"},{"attributeType":"null","col":8,"comment":"null","endLoc":72,"id":2671,"name":"ds","nodeType":"Attribute","startLoc":72,"text":"self.ds"},{"className":"ZarrStore","col":0,"comment":"Store for reading and writing data via zarr\n ","endLoc":454,"id":2672,"nodeType":"Class","startLoc":230,"text":"class ZarrStore(AbstractWritableDataStore):\n \"\"\"Store for reading and writing data via zarr\n \"\"\"\n\n __slots__ = (\n \"append_dim\",\n \"ds\",\n \"_consolidate_on_close\",\n \"_group\",\n \"_read_only\",\n \"_synchronizer\",\n )\n\n @classmethod\n def open_group(\n cls,\n store,\n mode=\"r\",\n synchronizer=None,\n group=None,\n consolidated=False,\n consolidate_on_close=False,\n ):\n import zarr\n\n open_kwargs = dict(mode=mode, synchronizer=synchronizer, path=group)\n if consolidated:\n # TODO: an option to pass the metadata_key keyword\n zarr_group = zarr.open_consolidated(store, **open_kwargs)\n else:\n zarr_group = zarr.open_group(store, **open_kwargs)\n return cls(zarr_group, consolidate_on_close)\n\n def __init__(self, zarr_group, consolidate_on_close=False):\n self.ds = zarr_group\n self._read_only = self.ds.read_only\n self._synchronizer = self.ds.synchronizer\n self._group = self.ds.path\n self._consolidate_on_close = consolidate_on_close\n self.append_dim = None\n\n def open_store_variable(self, name, zarr_array):\n data = indexing.LazilyOuterIndexedArray(ZarrArrayWrapper(name, self))\n dimensions, attributes = _get_zarr_dims_and_attrs(zarr_array, _DIMENSION_KEY)\n attributes = dict(attributes)\n encoding = {\n \"chunks\": zarr_array.chunks,\n \"compressor\": zarr_array.compressor,\n \"filters\": zarr_array.filters,\n }\n # _FillValue needs to be in attributes, not encoding, so it will get\n # picked up by decode_cf\n if getattr(zarr_array, \"fill_value\") is not None:\n attributes[\"_FillValue\"] = zarr_array.fill_value\n\n return Variable(dimensions, data, attributes, encoding)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.arrays()\n )\n\n def get_attrs(self):\n attributes = dict(self.ds.attrs.asdict())\n return attributes\n\n def get_dimensions(self):\n dimensions = {}\n for k, v in self.ds.arrays():\n try:\n for d, s in zip(v.attrs[_DIMENSION_KEY], v.shape):\n if d in dimensions and dimensions[d] != s:\n raise ValueError(\n \"found conflicting lengths for dimension %s \"\n \"(%d != %d)\" % (d, s, dimensions[d])\n )\n dimensions[d] = s\n\n except KeyError:\n raise KeyError(\n \"Zarr object is missing the attribute `%s`, \"\n \"which is required for xarray to determine \"\n \"variable dimensions.\" % (_DIMENSION_KEY)\n )\n return dimensions\n\n def set_dimensions(self, variables, unlimited_dims=None):\n if unlimited_dims is not None:\n raise NotImplementedError(\n \"Zarr backend doesn't know how to handle unlimited dimensions\"\n )\n\n def set_attributes(self, attributes):\n self.ds.attrs.put(attributes)\n\n def encode_variable(self, variable):\n variable = encode_zarr_variable(variable)\n return variable\n\n def encode_attribute(self, a):\n return _encode_zarr_attr_value(a)\n\n def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n ):\n \"\"\"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n dimension on which the zarray will be appended\n only needed in append mode\n \"\"\"\n\n existing_variables = {\n vn for vn in variables if _encode_variable_name(vn) in self.ds\n }\n new_variables = set(variables) - existing_variables\n variables_without_encoding = {vn: variables[vn] for vn in new_variables}\n variables_encoded, attributes = self.encode(\n variables_without_encoding, attributes\n )\n\n if len(existing_variables) > 0:\n # there are variables to append\n # their encoding must be the same as in the store\n ds = open_zarr(self.ds.store, chunks=None)\n variables_with_encoding = {}\n for vn in existing_variables:\n variables_with_encoding[vn] = variables[vn].copy(deep=False)\n variables_with_encoding[vn].encoding = ds[vn].encoding\n variables_with_encoding, _ = self.encode(variables_with_encoding, {})\n variables_encoded.update(variables_with_encoding)\n\n self.set_attributes(attributes)\n self.set_dimensions(variables_encoded, unlimited_dims=unlimited_dims)\n self.set_variables(\n variables_encoded, check_encoding_set, writer, unlimited_dims=unlimited_dims\n )\n\n def sync(self):\n pass\n\n def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer :\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n\n for vn, v in variables.items():\n name = _encode_variable_name(vn)\n check = vn in check_encoding_set\n attrs = v.attrs.copy()\n dims = v.dims\n dtype = v.dtype\n shape = v.shape\n\n fill_value = attrs.pop(\"_FillValue\", None)\n if v.encoding == {\"_FillValue\": None} and fill_value is None:\n v.encoding = {}\n if name in self.ds:\n zarr_array = self.ds[name]\n if self.append_dim in dims:\n # this is the DataArray that has append_dim as a\n # dimension\n append_axis = dims.index(self.append_dim)\n new_shape = list(zarr_array.shape)\n new_shape[append_axis] += v.shape[append_axis]\n new_region = [slice(None)] * len(new_shape)\n new_region[append_axis] = slice(zarr_array.shape[append_axis], None)\n zarr_array.resize(new_shape)\n writer.add(v.data, zarr_array, region=tuple(new_region))\n else:\n # new variable\n encoding = _extract_zarr_variable_encoding(v, raise_on_invalid=check)\n encoded_attrs = {}\n # the magic for storing the hidden dimension data\n encoded_attrs[_DIMENSION_KEY] = dims\n for k2, v2 in attrs.items():\n encoded_attrs[k2] = self.encode_attribute(v2)\n\n if coding.strings.check_vlen_dtype(dtype) == str:\n dtype = str\n zarr_array = self.ds.create(\n name, shape=shape, dtype=dtype, fill_value=fill_value, **encoding\n )\n zarr_array.attrs.put(encoded_attrs)\n writer.add(v.data, zarr_array)\n\n def close(self):\n if self._consolidate_on_close:\n import zarr\n\n zarr.consolidate_metadata(self.ds.store)"},{"col":4,"comment":"null","endLoc":285,"header":"def open_store_variable(self, name, zarr_array)","id":2673,"name":"open_store_variable","nodeType":"Function","startLoc":271,"text":"def open_store_variable(self, name, zarr_array):\n data = indexing.LazilyOuterIndexedArray(ZarrArrayWrapper(name, self))\n dimensions, attributes = _get_zarr_dims_and_attrs(zarr_array, _DIMENSION_KEY)\n attributes = dict(attributes)\n encoding = {\n \"chunks\": zarr_array.chunks,\n \"compressor\": zarr_array.compressor,\n \"filters\": zarr_array.filters,\n }\n # _FillValue needs to be in attributes, not encoding, so it will get\n # picked up by decode_cf\n if getattr(zarr_array, \"fill_value\") is not None:\n attributes[\"_FillValue\"] = zarr_array.fill_value\n\n return Variable(dimensions, data, attributes, encoding)"},{"col":4,"comment":"null","endLoc":40,"header":"def __init__(self, variable_name, datastore)","id":2674,"name":"__init__","nodeType":"Function","startLoc":32,"text":"def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n\n array = self.get_array()\n self.shape = array.shape\n\n dtype = array.dtype\n self.dtype = dtype"},{"col":4,"comment":"null","endLoc":43,"header":"def get_array(self)","id":2675,"name":"get_array","nodeType":"Function","startLoc":42,"text":"def get_array(self):\n return self.datastore.ds[self.variable_name]"},{"col":0,"comment":"null","endLoc":169,"header":"def _get_zarr_dims_and_attrs(zarr_obj, dimension_key)","id":2676,"name":"_get_zarr_dims_and_attrs","nodeType":"Function","startLoc":156,"text":"def _get_zarr_dims_and_attrs(zarr_obj, dimension_key):\n # Zarr arrays do not have dimenions. To get around this problem, we add\n # an attribute that specifies the dimension. We have to hide this attribute\n # when we send the attributes to the user.\n # zarr_obj can be either a zarr group or zarr array\n try:\n dimensions = zarr_obj.attrs[dimension_key]\n except KeyError:\n raise KeyError(\n \"Zarr object is missing the attribute `%s`, which is \"\n \"required for xarray to determine variable dimensions.\" % (dimension_key)\n )\n attributes = HiddenKeyDict(zarr_obj.attrs, [dimension_key])\n return dimensions, attributes"},{"col":4,"comment":" Number of non-nan entries in each rolling window. ","endLoc":305,"header":"def _counts(self)","id":2677,"name":"_counts","nodeType":"Function","startLoc":291,"text":"def _counts(self):\n \"\"\" Number of non-nan entries in each rolling window. \"\"\"\n\n rolling_dim = utils.get_temp_dimname(self.obj.dims, \"_rolling_dim\")\n # We use False as the fill_value instead of np.nan, since boolean\n # array is faster to be reduced than object array.\n # The use of skipna==False is also faster since it does not need to\n # copy the strided array.\n counts = (\n self.obj.notnull()\n .rolling(center=self.center, **{self.dim: self.window})\n .construct(rolling_dim, fill_value=False)\n .sum(dim=rolling_dim, skipna=False)\n )\n return counts"},{"col":4,"comment":"null","endLoc":348,"header":"def _bottleneck_reduce(self, func, **kwargs)","id":2678,"name":"_bottleneck_reduce","nodeType":"Function","startLoc":307,"text":"def _bottleneck_reduce(self, func, **kwargs):\n from .dataarray import DataArray\n\n # bottleneck doesn't allow min_count to be 0, although it should\n # work the same as if min_count = 1\n if self.min_periods is not None and self.min_periods == 0:\n min_count = 1\n else:\n min_count = self.min_periods\n\n axis = self.obj.get_axis_num(self.dim)\n\n padded = self.obj.variable\n if self.center:\n if isinstance(padded.data, dask_array_type):\n # Workaround to make the padded chunk size is larger than\n # self.window-1\n shift = -(self.window + 1) // 2\n offset = (self.window - 1) // 2\n valid = (slice(None),) * axis + (\n slice(offset, offset + self.obj.shape[axis]),\n )\n else:\n shift = (-self.window // 2) + 1\n valid = (slice(None),) * axis + (slice(-shift, None),)\n padded = padded.pad_with_fill_value({self.dim: (0, -shift)})\n\n if isinstance(padded.data, dask_array_type):\n raise AssertionError(\"should not be reachable\")\n values = dask_rolling_wrapper(\n func, padded.data, window=self.window, min_count=min_count, axis=axis\n )\n else:\n values = func(\n padded.data, window=self.window, min_count=min_count, axis=axis\n )\n\n if self.center:\n values = values[valid]\n result = DataArray(values, self.obj.coords)\n\n return result"},{"col":4,"comment":"null","endLoc":290,"header":"def get_variables(self)","id":2679,"name":"get_variables","nodeType":"Function","startLoc":287,"text":"def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.arrays()\n )"},{"col":4,"comment":"null","endLoc":294,"header":"def get_attrs(self)","id":2680,"name":"get_attrs","nodeType":"Function","startLoc":292,"text":"def get_attrs(self):\n attributes = dict(self.ds.attrs.asdict())\n return attributes"},{"col":4,"comment":"null","endLoc":314,"header":"def get_dimensions(self)","id":2681,"name":"get_dimensions","nodeType":"Function","startLoc":296,"text":"def get_dimensions(self):\n dimensions = {}\n for k, v in self.ds.arrays():\n try:\n for d, s in zip(v.attrs[_DIMENSION_KEY], v.shape):\n if d in dimensions and dimensions[d] != s:\n raise ValueError(\n \"found conflicting lengths for dimension %s \"\n \"(%d != %d)\" % (d, s, dimensions[d])\n )\n dimensions[d] = s\n\n except KeyError:\n raise KeyError(\n \"Zarr object is missing the attribute `%s`, \"\n \"which is required for xarray to determine \"\n \"variable dimensions.\" % (_DIMENSION_KEY)\n )\n return dimensions"},{"col":4,"comment":"null","endLoc":214,"header":"def __getitem__(self, key: Hashable) -> \"DataArray\"","id":2682,"name":"__getitem__","nodeType":"Function","startLoc":211,"text":"def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key in self._data.data_vars:\n raise KeyError(key)\n return cast(\"DataArray\", self._data[key])"},{"col":4,"comment":"Convert these coordinates into a new Dataset\n ","endLoc":219,"header":"def to_dataset(self) -> \"Dataset\"","id":2683,"name":"to_dataset","nodeType":"Function","startLoc":216,"text":"def to_dataset(self) -> \"Dataset\":\n \"\"\"Convert these coordinates into a new Dataset\n \"\"\"\n return self._data._copy_listed(self._names)"},{"col":4,"comment":"null","endLoc":320,"header":"def set_dimensions(self, variables, unlimited_dims=None)","id":2684,"name":"set_dimensions","nodeType":"Function","startLoc":316,"text":"def set_dimensions(self, variables, unlimited_dims=None):\n if unlimited_dims is not None:\n raise NotImplementedError(\n \"Zarr backend doesn't know how to handle unlimited dimensions\"\n )"},{"col":4,"comment":"null","endLoc":323,"header":"def set_attributes(self, attributes)","id":2685,"name":"set_attributes","nodeType":"Function","startLoc":322,"text":"def set_attributes(self, attributes):\n self.ds.attrs.put(attributes)"},{"col":4,"comment":"null","endLoc":327,"header":"def encode_variable(self, variable)","id":2686,"name":"encode_variable","nodeType":"Function","startLoc":325,"text":"def encode_variable(self, variable):\n variable = encode_zarr_variable(variable)\n return variable"},{"col":0,"comment":"\n Converts an Variable into an Variable which follows some\n of the CF conventions:\n\n - Nans are masked using _FillValue (or the deprecated missing_value)\n - Rescaling via: scale_factor and add_offset\n - datetimes are converted to the CF 'units since time' format\n - dtype encodings are enforced.\n\n Parameters\n ----------\n var : xarray.Variable\n A variable holding un-encoded data.\n\n Returns\n -------\n out : xarray.Variable\n A variable which has been encoded as described above.\n ","endLoc":227,"header":"def encode_zarr_variable(var, needs_copy=True, name=None)","id":2687,"name":"encode_zarr_variable","nodeType":"Function","startLoc":197,"text":"def encode_zarr_variable(var, needs_copy=True, name=None):\n \"\"\"\n Converts an Variable into an Variable which follows some\n of the CF conventions:\n\n - Nans are masked using _FillValue (or the deprecated missing_value)\n - Rescaling via: scale_factor and add_offset\n - datetimes are converted to the CF 'units since time' format\n - dtype encodings are enforced.\n\n Parameters\n ----------\n var : xarray.Variable\n A variable holding un-encoded data.\n\n Returns\n -------\n out : xarray.Variable\n A variable which has been encoded as described above.\n \"\"\"\n\n var = conventions.encode_cf_variable(var, name=name)\n\n # zarr allows unicode, but not variable-length strings, so it's both\n # simpler and more compact to always encode as UTF-8 explicitly.\n # TODO: allow toggling this explicitly via dtype in encoding.\n coder = coding.strings.EncodedStringCoder(allows_unicode=True)\n var = coder.encode(var, name=name)\n var = coding.strings.ensure_fixed_length_bytes(var)\n\n return var"},{"col":4,"comment":"null","endLoc":330,"header":"def encode_attribute(self, a)","id":2688,"name":"encode_attribute","nodeType":"Function","startLoc":329,"text":"def encode_attribute(self, a):\n return _encode_zarr_attr_value(a)"},{"col":0,"comment":"null","endLoc":26,"header":"def _encode_zarr_attr_value(value)","id":2689,"name":"_encode_zarr_attr_value","nodeType":"Function","startLoc":18,"text":"def _encode_zarr_attr_value(value):\n if isinstance(value, np.ndarray):\n encoded = value.tolist()\n # this checks if it's a scalar number\n elif isinstance(value, np.generic):\n encoded = value.item()\n else:\n encoded = value\n return encoded"},{"col":4,"comment":"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n dimension on which the zarray will be appended\n only needed in append mode\n ","endLoc":387,"header":"def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n )","id":2690,"name":"store","nodeType":"Function","startLoc":332,"text":"def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n ):\n \"\"\"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n dimension on which the zarray will be appended\n only needed in append mode\n \"\"\"\n\n existing_variables = {\n vn for vn in variables if _encode_variable_name(vn) in self.ds\n }\n new_variables = set(variables) - existing_variables\n variables_without_encoding = {vn: variables[vn] for vn in new_variables}\n variables_encoded, attributes = self.encode(\n variables_without_encoding, attributes\n )\n\n if len(existing_variables) > 0:\n # there are variables to append\n # their encoding must be the same as in the store\n ds = open_zarr(self.ds.store, chunks=None)\n variables_with_encoding = {}\n for vn in existing_variables:\n variables_with_encoding[vn] = variables[vn].copy(deep=False)\n variables_with_encoding[vn].encoding = ds[vn].encoding\n variables_with_encoding, _ = self.encode(variables_with_encoding, {})\n variables_encoded.update(variables_with_encoding)\n\n self.set_attributes(attributes)\n self.set_dimensions(variables_encoded, unlimited_dims=unlimited_dims)\n self.set_variables(\n variables_encoded, check_encoding_set, writer, unlimited_dims=unlimited_dims\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":43,"id":2691,"name":"allows_unicode","nodeType":"Attribute","startLoc":43,"text":"self.allows_unicode"},{"className":"CharacterArrayCoder","col":0,"comment":"Transforms between arrays containing bytes and character arrays.","endLoc":125,"id":2692,"nodeType":"Class","startLoc":102,"text":"class CharacterArrayCoder(VariableCoder):\n \"\"\"Transforms between arrays containing bytes and character arrays.\"\"\"\n\n def encode(self, variable, name=None):\n variable = ensure_fixed_length_bytes(variable)\n\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n if data.dtype.kind == \"S\" and encoding.get(\"dtype\") is not str:\n data = bytes_to_char(data)\n if \"char_dim_name\" in encoding.keys():\n char_dim_name = encoding.pop(\"char_dim_name\")\n else:\n char_dim_name = \"string%s\" % data.shape[-1]\n dims = dims + (char_dim_name,)\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if data.dtype == \"S1\" and dims:\n encoding[\"char_dim_name\"] = dims[-1]\n dims = dims[:-1]\n data = char_to_bytes(data)\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":116,"header":"def encode(self, variable, name=None)","id":2693,"name":"encode","nodeType":"Function","startLoc":105,"text":"def encode(self, variable, name=None):\n variable = ensure_fixed_length_bytes(variable)\n\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n if data.dtype.kind == \"S\" and encoding.get(\"dtype\") is not str:\n data = bytes_to_char(data)\n if \"char_dim_name\" in encoding.keys():\n char_dim_name = encoding.pop(\"char_dim_name\")\n else:\n char_dim_name = \"string%s\" % data.shape[-1]\n dims = dims + (char_dim_name,)\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":244,"header":"def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None","id":2694,"name":"_update_coords","nodeType":"Function","startLoc":221,"text":"def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None:\n from .dataset import calculate_dimensions\n\n variables = self._data._variables.copy()\n variables.update(coords)\n\n # check for inconsistent state *before* modifying anything in-place\n dims = calculate_dimensions(variables)\n new_coord_names = set(coords)\n for dim, size in dims.items():\n if dim in variables:\n new_coord_names.add(dim)\n\n self._data._variables = variables\n self._data._coord_names.update(new_coord_names)\n self._data._dims = dims\n\n # TODO(shoyer): once ._indexes is always populated by a dict, modify\n # it to update inplace instead.\n original_indexes = dict(self._data.indexes)\n original_indexes.update(indexes)\n self._data._indexes = original_indexes"},{"col":0,"comment":"Ensure that a variable with vlen bytes is converted to fixed width.","endLoc":99,"header":"def ensure_fixed_length_bytes(var)","id":2695,"name":"ensure_fixed_length_bytes","nodeType":"Function","startLoc":93,"text":"def ensure_fixed_length_bytes(var):\n \"\"\"Ensure that a variable with vlen bytes is converted to fixed width.\"\"\"\n dims, data, attrs, encoding = unpack_for_encoding(var)\n if check_vlen_dtype(data.dtype) == bytes:\n # TODO: figure out how to handle this with dask\n data = np.asarray(data, dtype=np.string_)\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":361,"header":"def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n )","id":2696,"name":"_numpy_or_bottleneck_reduce","nodeType":"Function","startLoc":350,"text":"def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n ):\n if bottleneck_move_func is not None and not isinstance(\n self.obj.data, dask_array_type\n ):\n # TODO: renable bottleneck with dask after the issues\n # underlying https://github.com/pydata/xarray/issues/2940 are\n # fixed.\n return self._bottleneck_reduce(bottleneck_move_func, **kwargs)\n else:\n return self.reduce(array_agg_func, **kwargs)"},{"col":4,"comment":"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer :\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n ","endLoc":448,"header":"def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None)","id":2697,"name":"set_variables","nodeType":"Function","startLoc":392,"text":"def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer :\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n\n for vn, v in variables.items():\n name = _encode_variable_name(vn)\n check = vn in check_encoding_set\n attrs = v.attrs.copy()\n dims = v.dims\n dtype = v.dtype\n shape = v.shape\n\n fill_value = attrs.pop(\"_FillValue\", None)\n if v.encoding == {\"_FillValue\": None} and fill_value is None:\n v.encoding = {}\n if name in self.ds:\n zarr_array = self.ds[name]\n if self.append_dim in dims:\n # this is the DataArray that has append_dim as a\n # dimension\n append_axis = dims.index(self.append_dim)\n new_shape = list(zarr_array.shape)\n new_shape[append_axis] += v.shape[append_axis]\n new_region = [slice(None)] * len(new_shape)\n new_region[append_axis] = slice(zarr_array.shape[append_axis], None)\n zarr_array.resize(new_shape)\n writer.add(v.data, zarr_array, region=tuple(new_region))\n else:\n # new variable\n encoding = _extract_zarr_variable_encoding(v, raise_on_invalid=check)\n encoded_attrs = {}\n # the magic for storing the hidden dimension data\n encoded_attrs[_DIMENSION_KEY] = dims\n for k2, v2 in attrs.items():\n encoded_attrs[k2] = self.encode_attribute(v2)\n\n if coding.strings.check_vlen_dtype(dtype) == str:\n dtype = str\n zarr_array = self.ds.create(\n name, shape=shape, dtype=dtype, fill_value=fill_value, **encoding\n )\n zarr_array.attrs.put(encoded_attrs)\n writer.add(v.data, zarr_array)"},{"attributeType":"null","col":4,"comment":"null","endLoc":142,"id":2698,"name":"__slots__","nodeType":"Attribute","startLoc":142,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":180,"id":2699,"name":"window_labels","nodeType":"Attribute","startLoc":180,"text":"self.window_labels"},{"col":0,"comment":"This is required for determining the bin edges resampling with\n daily frequencies greater than one day, month end, and year end\n frequencies.\n\n Consider the following example. Let's say you want to downsample the\n time series with the following coordinates to month end frequency:\n\n CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00,\n 2000-02-01 12:00:00], dtype='object')\n\n Without this adjustment, _get_time_bins with month-end frequency will\n return the following index for the bin edges (default closed='right' and\n label='right' in this case):\n\n CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00,\n 2000-02-29 00:00:00], dtype='object')\n\n If 2000-01-31 is used as a bound for a bin, the value on\n 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the\n month of January. To account for this, pandas adds a day minus one worth\n of microseconds to the bin edges generated by cftime range, so that we do\n bin the value at noon on January 31st in the January bin. This results in\n an index with bin edges like the following:\n\n CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59,\n 2000-02-29 23:59:59], dtype='object')\n\n The labels are still:\n\n CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object')\n\n This is also required for daily frequencies longer than one day and\n year-end frequencies.\n ","endLoc":223,"header":"def _adjust_bin_edges(datetime_bins, offset, closed, index, labels)","id":2700,"name":"_adjust_bin_edges","nodeType":"Function","startLoc":178,"text":"def _adjust_bin_edges(datetime_bins, offset, closed, index, labels):\n \"\"\"This is required for determining the bin edges resampling with\n daily frequencies greater than one day, month end, and year end\n frequencies.\n\n Consider the following example. Let's say you want to downsample the\n time series with the following coordinates to month end frequency:\n\n CFTimeIndex([2000-01-01 12:00:00, 2000-01-31 12:00:00,\n 2000-02-01 12:00:00], dtype='object')\n\n Without this adjustment, _get_time_bins with month-end frequency will\n return the following index for the bin edges (default closed='right' and\n label='right' in this case):\n\n CFTimeIndex([1999-12-31 00:00:00, 2000-01-31 00:00:00,\n 2000-02-29 00:00:00], dtype='object')\n\n If 2000-01-31 is used as a bound for a bin, the value on\n 2000-01-31T12:00:00 (at noon on January 31st), will not be included in the\n month of January. To account for this, pandas adds a day minus one worth\n of microseconds to the bin edges generated by cftime range, so that we do\n bin the value at noon on January 31st in the January bin. This results in\n an index with bin edges like the following:\n\n CFTimeIndex([1999-12-31 23:59:59, 2000-01-31 23:59:59,\n 2000-02-29 23:59:59], dtype='object')\n\n The labels are still:\n\n CFTimeIndex([2000-01-31 00:00:00, 2000-02-29 00:00:00], dtype='object')\n\n This is also required for daily frequencies longer than one day and\n year-end frequencies.\n \"\"\"\n is_super_daily = isinstance(offset, (MonthEnd, QuarterEnd, YearEnd)) or (\n isinstance(offset, Day) and offset.n > 1\n )\n if is_super_daily:\n if closed == \"right\":\n datetime_bins = datetime_bins + datetime.timedelta(days=1, microseconds=-1)\n if datetime_bins[-2] > index.max():\n datetime_bins = datetime_bins[:-1]\n labels = labels[:-1]\n\n return datetime_bins, labels"},{"className":"DatasetRolling","col":0,"comment":"null","endLoc":490,"id":2701,"nodeType":"Class","startLoc":364,"text":"class DatasetRolling(Rolling):\n __slots__ = (\"rollings\",)\n\n def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object for Dataset.\n You should use Dataset.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : Dataset\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n Dataset.rolling\n DataArray.rolling\n Dataset.groupby\n DataArray.groupby\n \"\"\"\n super().__init__(obj, windows, min_periods, center)\n if self.dim not in self.obj.dims:\n raise KeyError(self.dim)\n # Keep each Rolling object as a dictionary\n self.rollings = {}\n for key, da in self.obj.data_vars.items():\n # keeps rollings only for the dataset depending on slf.dim\n if self.dim in da.dims:\n self.rollings[key] = DataArrayRolling(da, windows, min_periods, center)\n\n def _dataset_implementation(self, func, **kwargs):\n from .dataset import Dataset\n\n reduced = {}\n for key, da in self.obj.data_vars.items():\n if self.dim in da.dims:\n reduced[key] = func(self.rollings[key], **kwargs)\n else:\n reduced[key] = self.obj[key]\n return Dataset(reduced, coords=self.obj.coords)\n\n def reduce(self, func, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n \"\"\"\n return self._dataset_implementation(\n functools.partial(DataArrayRolling.reduce, func=func), **kwargs\n )\n\n def _counts(self):\n return self._dataset_implementation(DataArrayRolling._counts)\n\n def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n ):\n return self._dataset_implementation(\n functools.partial(\n DataArrayRolling._numpy_or_bottleneck_reduce,\n array_agg_func=array_agg_func,\n bottleneck_move_func=bottleneck_move_func,\n ),\n **kwargs\n )\n\n def construct(self, window_dim, stride=1, fill_value=dtypes.NA):\n \"\"\"\n Convert this rolling object to xr.Dataset,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n Dataset with variables converted from rolling object.\n \"\"\"\n\n from .dataset import Dataset\n\n dataset = {}\n for key, da in self.obj.data_vars.items():\n if self.dim in da.dims:\n dataset[key] = self.rollings[key].construct(\n window_dim, fill_value=fill_value\n )\n else:\n dataset[key] = da\n return Dataset(dataset, coords=self.obj.coords).isel(\n **{self.dim: slice(None, None, stride)}\n )"},{"col":0,"comment":"Convert numpy/dask arrays from fixed width bytes to characters.","endLoc":144,"header":"def bytes_to_char(arr)","id":2702,"name":"bytes_to_char","nodeType":"Function","startLoc":128,"text":"def bytes_to_char(arr):\n \"\"\"Convert numpy/dask arrays from fixed width bytes to characters.\"\"\"\n if arr.dtype.kind != \"S\":\n raise ValueError(\"argument must have a fixed-width bytes dtype\")\n\n if isinstance(arr, dask_array_type):\n import dask.array as da\n\n return da.map_blocks(\n _numpy_bytes_to_char,\n arr,\n dtype=\"S1\",\n chunks=arr.chunks + ((arr.dtype.itemsize,)),\n new_axis=[arr.ndim],\n )\n else:\n return _numpy_bytes_to_char(arr)"},{"col":4,"comment":"\n Moving window object for Dataset.\n You should use Dataset.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : Dataset\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n Dataset.rolling\n DataArray.rolling\n Dataset.groupby\n DataArray.groupby\n ","endLoc":409,"header":"def __init__(self, obj, windows, min_periods=None, center=False)","id":2703,"name":"__init__","nodeType":"Function","startLoc":367,"text":"def __init__(self, obj, windows, min_periods=None, center=False):\n \"\"\"\n Moving window object for Dataset.\n You should use Dataset.rolling() method to construct this object\n instead of the class constructor.\n\n Parameters\n ----------\n obj : Dataset\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n\n Returns\n -------\n rolling : type of input argument\n\n See Also\n --------\n Dataset.rolling\n DataArray.rolling\n Dataset.groupby\n DataArray.groupby\n \"\"\"\n super().__init__(obj, windows, min_periods, center)\n if self.dim not in self.obj.dims:\n raise KeyError(self.dim)\n # Keep each Rolling object as a dictionary\n self.rollings = {}\n for key, da in self.obj.data_vars.items():\n # keeps rollings only for the dataset depending on slf.dim\n if self.dim in da.dims:\n self.rollings[key] = DataArrayRolling(da, windows, min_periods, center)"},{"fileName":"combine.py","filePath":"xarray/core","id":2704,"nodeType":"File","text":"import itertools\nimport warnings\nfrom collections import Counter\nfrom textwrap import dedent\n\nimport pandas as pd\n\nfrom . import dtypes\nfrom .concat import concat\nfrom .dataarray import DataArray\nfrom .dataset import Dataset\nfrom .merge import merge\n\n\ndef _infer_concat_order_from_positions(datasets):\n combined_ids = dict(_infer_tile_ids_from_nested_list(datasets, ()))\n return combined_ids\n\n\ndef _infer_tile_ids_from_nested_list(entry, current_pos):\n \"\"\"\n Given a list of lists (of lists...) of objects, returns a iterator\n which returns a tuple containing the index of each object in the nested\n list structure as the key, and the object. This can then be called by the\n dict constructor to create a dictionary of the objects organised by their\n position in the original nested list.\n\n Recursively traverses the given structure, while keeping track of the\n current position. Should work for any type of object which isn't a list.\n\n Parameters\n ----------\n entry : list[list[obj, obj, ...], ...]\n List of lists of arbitrary depth, containing objects in the order\n they are to be concatenated.\n\n Returns\n -------\n combined_tile_ids : dict[tuple(int, ...), obj]\n \"\"\"\n\n if isinstance(entry, list):\n for i, item in enumerate(entry):\n yield from _infer_tile_ids_from_nested_list(item, current_pos + (i,))\n else:\n yield current_pos, entry\n\n\ndef _infer_concat_order_from_coords(datasets):\n\n concat_dims = []\n tile_ids = [() for ds in datasets]\n\n # All datasets have same variables because they've been grouped as such\n ds0 = datasets[0]\n for dim in ds0.dims:\n\n # Check if dim is a coordinate dimension\n if dim in ds0:\n\n # Need to read coordinate values to do ordering\n indexes = [ds.indexes.get(dim) for ds in datasets]\n if any(index is None for index in indexes):\n raise ValueError(\n \"Every dimension needs a coordinate for \"\n \"inferring concatenation order\"\n )\n\n # If dimension coordinate values are same on every dataset then\n # should be leaving this dimension alone (it's just a \"bystander\")\n if not all(index.equals(indexes[0]) for index in indexes[1:]):\n\n # Infer order datasets should be arranged in along this dim\n concat_dims.append(dim)\n\n if all(index.is_monotonic_increasing for index in indexes):\n ascending = True\n elif all(index.is_monotonic_decreasing for index in indexes):\n ascending = False\n else:\n raise ValueError(\n \"Coordinate variable {} is neither \"\n \"monotonically increasing nor \"\n \"monotonically decreasing on all datasets\".format(dim)\n )\n\n # Assume that any two datasets whose coord along dim starts\n # with the same value have the same coord values throughout.\n if any(index.size == 0 for index in indexes):\n raise ValueError(\"Cannot handle size zero dimensions\")\n first_items = pd.Index([index.take([0]) for index in indexes])\n\n # Sort datasets along dim\n # We want rank but with identical elements given identical\n # position indices - they should be concatenated along another\n # dimension, not along this one\n series = first_items.to_series()\n rank = series.rank(method=\"dense\", ascending=ascending)\n order = rank.astype(int).values - 1\n\n # Append positions along extra dimension to structure which\n # encodes the multi-dimensional concatentation order\n tile_ids = [\n tile_id + (position,) for tile_id, position in zip(tile_ids, order)\n ]\n\n if len(datasets) > 1 and not concat_dims:\n raise ValueError(\n \"Could not find any dimension coordinates to use to \"\n \"order the datasets for concatenation\"\n )\n\n combined_ids = dict(zip(tile_ids, datasets))\n\n return combined_ids, concat_dims\n\n\ndef _check_shape_tile_ids(combined_tile_ids):\n tile_ids = combined_tile_ids.keys()\n\n # Check all tuples are the same length\n # i.e. check that all lists are nested to the same depth\n nesting_depths = [len(tile_id) for tile_id in tile_ids]\n if not nesting_depths:\n nesting_depths = [0]\n if not set(nesting_depths) == {nesting_depths[0]}:\n raise ValueError(\n \"The supplied objects do not form a hypercube because\"\n \" sub-lists do not have consistent depths\"\n )\n\n # Check all lists along one dimension are same length\n for dim in range(nesting_depths[0]):\n indices_along_dim = [tile_id[dim] for tile_id in tile_ids]\n occurrences = Counter(indices_along_dim)\n if len(set(occurrences.values())) != 1:\n raise ValueError(\n \"The supplied objects do not form a hypercube \"\n \"because sub-lists do not have consistent \"\n \"lengths along dimension\" + str(dim)\n )\n\n\ndef _combine_nd(\n combined_ids,\n concat_dims,\n data_vars=\"all\",\n coords=\"different\",\n compat=\"no_conflicts\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Combines an N-dimensional structure of datasets into one by applying a\n series of either concat and merge operations along each dimension.\n\n No checks are performed on the consistency of the datasets, concat_dims or\n tile_IDs, because it is assumed that this has already been done.\n\n Parameters\n ----------\n combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]\n Structure containing all datasets to be concatenated with \"tile_IDs\" as\n keys, which specify position within the desired final combined result.\n concat_dims : sequence of str\n The dimensions along which the datasets should be concatenated. Must be\n in order, and the length must match the length of the tuples used as\n keys in combined_ids. If the string is a dimension name then concat\n along that dimension, if it is None then merge.\n\n Returns\n -------\n combined_ds : xarray.Dataset\n \"\"\"\n\n example_tile_id = next(iter(combined_ids.keys()))\n\n n_dims = len(example_tile_id)\n if len(concat_dims) != n_dims:\n raise ValueError(\n \"concat_dims has length {} but the datasets \"\n \"passed are nested in a {}-dimensional structure\".format(\n len(concat_dims), n_dims\n )\n )\n\n # Each iteration of this loop reduces the length of the tile_ids tuples\n # by one. It always combines along the first dimension, removing the first\n # element of the tuple\n for concat_dim in concat_dims:\n combined_ids = _combine_all_along_first_dim(\n combined_ids,\n dim=concat_dim,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n (combined_ds,) = combined_ids.values()\n return combined_ds\n\n\ndef _combine_all_along_first_dim(\n combined_ids, dim, data_vars, coords, compat, fill_value=dtypes.NA, join=\"outer\"\n):\n\n # Group into lines of datasets which must be combined along dim\n # need to sort by _new_tile_id first for groupby to work\n # TODO: is the sorted need?\n combined_ids = dict(sorted(combined_ids.items(), key=_new_tile_id))\n grouped = itertools.groupby(combined_ids.items(), key=_new_tile_id)\n\n # Combine all of these datasets along dim\n new_combined_ids = {}\n for new_id, group in grouped:\n combined_ids = dict(sorted(group))\n datasets = combined_ids.values()\n new_combined_ids[new_id] = _combine_1d(\n datasets, dim, compat, data_vars, coords, fill_value, join\n )\n return new_combined_ids\n\n\ndef _combine_1d(\n datasets,\n concat_dim,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Applies either concat or merge to 1D list of datasets depending on value\n of concat_dim\n \"\"\"\n\n if concat_dim is not None:\n try:\n combined = concat(\n datasets,\n dim=concat_dim,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n except ValueError as err:\n if \"encountered unexpected variable\" in str(err):\n raise ValueError(\n \"These objects cannot be combined using only \"\n \"xarray.combine_nested, instead either use \"\n \"xarray.combine_by_coords, or do it manually \"\n \"with xarray.concat, xarray.merge and \"\n \"xarray.align\"\n )\n else:\n raise\n else:\n combined = merge(datasets, compat=compat, fill_value=fill_value, join=join)\n\n return combined\n\n\ndef _new_tile_id(single_id_ds_pair):\n tile_id, ds = single_id_ds_pair\n return tile_id[1:]\n\n\ndef _nested_combine(\n datasets,\n concat_dims,\n compat,\n data_vars,\n coords,\n ids,\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n\n if len(datasets) == 0:\n return Dataset()\n\n # Arrange datasets for concatenation\n # Use information from the shape of the user input\n if not ids:\n # Determine tile_IDs by structure of input in N-D\n # (i.e. ordering in list-of-lists)\n combined_ids = _infer_concat_order_from_positions(datasets)\n else:\n # Already sorted so just use the ids already passed\n combined_ids = dict(zip(ids, datasets))\n\n # Check that the inferred shape is combinable\n _check_shape_tile_ids(combined_ids)\n\n # Apply series of concatenate or merge operations along each dimension\n combined = _combine_nd(\n combined_ids,\n concat_dims,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n fill_value=fill_value,\n join=join,\n )\n return combined\n\n\ndef combine_nested(\n datasets,\n concat_dim,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Explicitly combine an N-dimensional grid of datasets into one by using a\n succession of concat and merge operations along each dimension of the grid.\n\n Does not sort the supplied datasets under any circumstances, so the\n datasets must be passed in the order you wish them to be concatenated. It\n does align coordinates, but different variables on datasets can cause it to\n fail under some scenarios. In complex cases, you may need to clean up your\n data and use concat/merge explicitly.\n\n To concatenate along multiple dimensions the datasets must be passed as a\n nested list-of-lists, with a depth equal to the length of ``concat_dims``.\n ``manual_combine`` will concatenate along the top-level list first.\n\n Useful for combining datasets from a set of nested directories, or for\n collecting the output of a simulation parallelized along multiple\n dimensions.\n\n Parameters\n ----------\n datasets : list or nested list of xarray.Dataset objects.\n Dataset objects to combine.\n If concatenation or merging along more than one dimension is desired,\n then datasets must be supplied in a nested list-of-lists.\n concat_dim : str, or list of str, DataArray, Index or None\n Dimensions along which to concatenate variables, as used by\n :py:func:`xarray.concat`.\n Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation\n and merge instead along a particular dimension.\n The position of ``None`` in the list specifies the dimension of the\n nested-list input along which to merge.\n Must be the same length as the depth of the list passed to\n ``datasets``.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential merge conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n coords : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n Examples\n --------\n\n A common task is collecting data from a parallelized simulation in which\n each process wrote out to a separate file. A domain which was decomposed\n into 4 parts, 2 each along both the x and y axes, requires organising the\n datasets into a doubly-nested list, e.g:\n\n >>> x1y1\n \n Dimensions: (x: 2, y: 2)\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y) float64 11.04 23.57 20.77 ...\n precipitation (x, y) float64 5.904 2.453 3.404 ...\n\n >>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]\n >>> combined = xr.combine_nested(ds_grid, concat_dim=['x', 'y'])\n \n Dimensions: (x: 4, y: 4)\n Dimensions without coordinates: x, y\n Data variables:\n temperature (x, y) float64 11.04 23.57 20.77 ...\n precipitation (x, y) float64 5.904 2.453 3.404 ...\n\n ``manual_combine`` can also be used to explicitly merge datasets with\n different variables. For example if we have 4 datasets, which are divided\n along two times, and contain two different variables, we can pass ``None``\n to ``concat_dim`` to specify the dimension of the nested list over which\n we wish to use ``merge`` instead of ``concat``:\n\n >>> t1temp\n \n Dimensions: (t: 5)\n Dimensions without coordinates: t\n Data variables:\n temperature (t) float64 11.04 23.57 20.77 ...\n\n >>> t1precip\n \n Dimensions: (t: 5)\n Dimensions without coordinates: t\n Data variables:\n precipitation (t) float64 5.904 2.453 3.404 ...\n\n >>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]\n >>> combined = xr.combine_nested(ds_grid, concat_dim=['t', None])\n \n Dimensions: (t: 10)\n Dimensions without coordinates: t\n Data variables:\n temperature (t) float64 11.04 23.57 20.77 ...\n precipitation (t) float64 5.904 2.453 3.404 ...\n\n See also\n --------\n concat\n merge\n auto_combine\n \"\"\"\n if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:\n concat_dim = [concat_dim]\n\n # The IDs argument tells _manual_combine that datasets aren't yet sorted\n return _nested_combine(\n datasets,\n concat_dims=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n ids=False,\n fill_value=fill_value,\n join=join,\n )\n\n\ndef vars_as_keys(ds):\n return tuple(sorted(ds))\n\n\ndef combine_by_coords(\n datasets,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n \"\"\"\n Attempt to auto-magically combine the given datasets into one by using\n dimension coordinates.\n\n This method attempts to combine a group of datasets along any number of\n dimensions into a single entity by inspecting coords and metadata and using\n a combination of concat and merge.\n\n Will attempt to order the datasets such that the values in their dimension\n coordinates are monotonic along all dimensions. If it cannot determine the\n order in which to concatenate the datasets, it will raise a ValueError.\n Non-coordinate dimensions will be ignored, as will any coordinate\n dimensions which do not vary between each dataset.\n\n Aligns coordinates, but different variables on datasets can cause it\n to fail under some scenarios. In complex cases, you may need to clean up\n your data and use concat/merge explicitly (also see `manual_combine`).\n\n Works well if, for example, you have N years of data and M data variables,\n and each combination of a distinct time period and set of data variables is\n saved as its own dataset. Also useful for if you have a simulation which is\n parallelized in multiple dimensions, but has global coordinates saved in\n each file specifying the positions of points within the global domain.\n\n Parameters\n ----------\n datasets : sequence of xarray.Dataset\n Dataset objects to combine.\n compat : {'identical', 'equals', 'broadcast_equals', 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n If objects are DataArrays, `data_vars` must be 'all'.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n As per the 'data_vars' kwarg, but for coordinate variables.\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n See also\n --------\n concat\n merge\n combine_nested\n\n Examples\n --------\n\n Combining two datasets using their common dimension coordinates. Notice\n they are concatenated based on the values in their dimension coordinates,\n not on their position in the list passed to `combine_by_coords`.\n\n >>> import numpy as np\n >>> import xarray as xr\n\n >>> x1 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [0, 1], \"x\": [10, 20, 30]},\n ... )\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [2, 3], \"x\": [10, 20, 30]},\n ... )\n >>> x3 = xr.Dataset(\n ... {\n ... \"temperature\": ((\"y\", \"x\"), 20 * np.random.rand(6).reshape(2, 3)),\n ... \"precipitation\": ((\"y\", \"x\"), np.random.rand(6).reshape(2, 3)),\n ... },\n ... coords={\"y\": [2, 3], \"x\": [40, 50, 60]},\n ... )\n\n >>> x1\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 0 1\n * x (x) int64 10 20 30\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 13.93 9.436\n precipitation (y, x) float64 0.2136 0.9974 0.7603 0.4679 0.3115 0.945\n\n >>> x2\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 2 3\n * x (x) int64 10 20 30\n Data variables:\n temperature (y, x) float64 9.341 0.1251 6.269 7.709 8.82 2.316\n precipitation (y, x) float64 0.1728 0.1178 0.03018 0.6509 0.06938 0.3792\n\n >>> x3\n \n Dimensions: (x: 3, y: 2)\n Coordinates:\n * y (y) int64 2 3\n * x (x) int64 40 50 60\n Data variables:\n temperature (y, x) float64 2.789 2.446 6.551 12.46 2.22 15.96\n precipitation (y, x) float64 0.4804 0.1902 0.2457 0.6125 0.4654 0.5953\n\n >>> xr.combine_by_coords([x2, x1])\n \n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 10 20 30\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 7.709 8.82 2.316\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6509 0.06938 0.3792\n\n >>> xr.combine_by_coords([x3, x1])\n \n Dimensions: (x: 6, y: 4)\n Coordinates:\n * x (x) int64 10 20 30 40 50 60\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 nan ... nan 12.46 2.22 15.96\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953\n\n >>> xr.combine_by_coords([x3, x1], join='override')\n \n Dimensions: (x: 3, y: 4)\n Coordinates:\n * x (x) int64 10 20 30\n * y (y) int64 0 1 2 3\n Data variables:\n temperature (y, x) float64 1.654 10.63 7.015 2.543 ... 12.46 2.22 15.96\n precipitation (y, x) float64 0.2136 0.9974 0.7603 ... 0.6125 0.4654 0.5953\n\n \"\"\"\n\n # Group by data vars\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n # Perform the multidimensional combine on each group of data variables\n # before merging back together\n concatenated_grouped_by_data_vars = []\n for vars, datasets_with_same_vars in grouped_by_vars:\n combined_ids, concat_dims = _infer_concat_order_from_coords(\n list(datasets_with_same_vars)\n )\n\n _check_shape_tile_ids(combined_ids)\n\n # Concatenate along all of concat_dims one by one to create single ds\n concatenated = _combine_nd(\n combined_ids,\n concat_dims=concat_dims,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n\n # Check the overall coordinates are monotonically increasing\n for dim in concat_dims:\n indexes = concatenated.indexes.get(dim)\n if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing):\n raise ValueError(\n \"Resulting object does not have monotonic\"\n \" global indexes along dimension {}\".format(dim)\n )\n concatenated_grouped_by_data_vars.append(concatenated)\n\n return merge(\n concatenated_grouped_by_data_vars,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n\n\n# Everything beyond here is only needed until the deprecation cycle in #2616\n# is completed\n\n\n_CONCAT_DIM_DEFAULT = \"__infer_concat_dim__\"\n\n\ndef auto_combine(\n datasets,\n concat_dim=\"_not_supplied\",\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n from_openmfds=False,\n):\n \"\"\"\n Attempt to auto-magically combine the given datasets into one.\n\n This entire function is deprecated in favour of ``combine_nested`` and\n ``combine_by_coords``.\n\n This method attempts to combine a list of datasets into a single entity by\n inspecting metadata and using a combination of concat and merge.\n It does not concatenate along more than one dimension or sort data under\n any circumstances. It does align coordinates, but different variables on\n datasets can cause it to fail under some scenarios. In complex cases, you\n may need to clean up your data and use ``concat``/``merge`` explicitly.\n ``auto_combine`` works well if you have N years of data and M data\n variables, and each combination of a distinct time period and set of data\n variables is saved its own dataset.\n\n Parameters\n ----------\n datasets : sequence of xarray.Dataset\n Dataset objects to merge.\n concat_dim : str or DataArray or Index, optional\n Dimension along which to concatenate variables, as used by\n :py:func:`xarray.concat`. You only need to provide this argument if\n the dimension along which you want to concatenate is not a dimension\n in the original datasets, e.g., if you want to stack a collection of\n 2D arrays along a third dimension.\n By default, xarray attempts to infer this argument by examining\n component files. Set ``concat_dim=None`` explicitly to disable\n concatenation.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n - 'override': skip comparing and pick variable from first dataset\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n Details are in the documentation of concat\n coords : {'minimal', 'different', 'all' o list of str}, optional\n Details are in the documentation of concat\n fill_value : scalar, optional\n Value to use for newly missing values\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n\n Returns\n -------\n combined : xarray.Dataset\n\n See also\n --------\n concat\n Dataset.merge\n \"\"\"\n\n if not from_openmfds:\n basic_msg = dedent(\n \"\"\"\\\n In xarray version 0.15 `auto_combine` will be deprecated. See\n http://xarray.pydata.org/en/stable/combining.html#combining-multi\"\"\"\n )\n warnings.warn(basic_msg, FutureWarning, stacklevel=2)\n\n if concat_dim == \"_not_supplied\":\n concat_dim = _CONCAT_DIM_DEFAULT\n message = \"\"\n else:\n message = dedent(\n \"\"\"\\\n Also `open_mfdataset` will no longer accept a `concat_dim` argument.\n To get equivalent behaviour from now on please use the new\n `combine_nested` function instead (or the `combine='nested'` option to\n `open_mfdataset`).\"\"\"\n )\n\n if _dimension_coords_exist(datasets):\n message += dedent(\n \"\"\"\\\n The datasets supplied have global dimension coordinates. You may want\n to use the new `combine_by_coords` function (or the\n `combine='by_coords'` option to `open_mfdataset`) to order the datasets\n before concatenation. Alternatively, to continue concatenating based\n on the order the datasets are supplied in future, please use the new\n `combine_nested` function (or the `combine='nested'` option to\n open_mfdataset).\"\"\"\n )\n else:\n message += dedent(\n \"\"\"\\\n The datasets supplied do not have global dimension coordinates. In\n future, to continue concatenating without supplying dimension\n coordinates, please use the new `combine_nested` function (or the\n `combine='nested'` option to open_mfdataset.\"\"\"\n )\n\n if _requires_concat_and_merge(datasets):\n manual_dims = [concat_dim].append(None)\n message += dedent(\n \"\"\"\\\n The datasets supplied require both concatenation and merging. From\n xarray version 0.15 this will operation will require either using the\n new `combine_nested` function (or the `combine='nested'` option to\n open_mfdataset), with a nested list structure such that you can combine\n along the dimensions {}. Alternatively if your datasets have global\n dimension coordinates then you can use the new `combine_by_coords`\n function.\"\"\".format(\n manual_dims\n )\n )\n\n warnings.warn(message, FutureWarning, stacklevel=2)\n\n return _old_auto_combine(\n datasets,\n concat_dim=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n fill_value=fill_value,\n join=join,\n )\n\n\ndef _dimension_coords_exist(datasets):\n \"\"\"\n Check if the datasets have consistent global dimension coordinates\n which would in future be used by `auto_combine` for concatenation ordering.\n \"\"\"\n\n # Group by data vars\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n # Simulates performing the multidimensional combine on each group of data\n # variables before merging back together\n try:\n for vars, datasets_with_same_vars in grouped_by_vars:\n _infer_concat_order_from_coords(list(datasets_with_same_vars))\n return True\n except ValueError:\n # ValueError means datasets don't have global dimension coordinates\n # Or something else went wrong in trying to determine them\n return False\n\n\ndef _requires_concat_and_merge(datasets):\n \"\"\"\n Check if the datasets require the use of both xarray.concat and\n xarray.merge, which in future might require the user to use\n `manual_combine` instead.\n \"\"\"\n # Group by data vars\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n return len(list(grouped_by_vars)) > 1\n\n\ndef _old_auto_combine(\n datasets,\n concat_dim=_CONCAT_DIM_DEFAULT,\n compat=\"no_conflicts\",\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n):\n if concat_dim is not None:\n dim = None if concat_dim is _CONCAT_DIM_DEFAULT else concat_dim\n\n sorted_datasets = sorted(datasets, key=vars_as_keys)\n grouped = itertools.groupby(sorted_datasets, key=vars_as_keys)\n\n concatenated = [\n _auto_concat(\n list(datasets),\n dim=dim,\n data_vars=data_vars,\n coords=coords,\n compat=compat,\n fill_value=fill_value,\n join=join,\n )\n for vars, datasets in grouped\n ]\n else:\n concatenated = datasets\n merged = merge(concatenated, compat=compat, fill_value=fill_value, join=join)\n return merged\n\n\ndef _auto_concat(\n datasets,\n dim=None,\n data_vars=\"all\",\n coords=\"different\",\n fill_value=dtypes.NA,\n join=\"outer\",\n compat=\"no_conflicts\",\n):\n if len(datasets) == 1 and dim is None:\n # There is nothing more to combine, so kick out early.\n return datasets[0]\n else:\n if dim is None:\n ds0 = datasets[0]\n ds1 = datasets[1]\n concat_dims = set(ds0.dims)\n if ds0.dims != ds1.dims:\n dim_tuples = set(ds0.dims.items()) - set(ds1.dims.items())\n concat_dims = {i for i, _ in dim_tuples}\n if len(concat_dims) > 1:\n concat_dims = {d for d in concat_dims if not ds0[d].equals(ds1[d])}\n if len(concat_dims) > 1:\n raise ValueError(\n \"too many different dimensions to \" \"concatenate: %s\" % concat_dims\n )\n elif len(concat_dims) == 0:\n raise ValueError(\n \"cannot infer dimension to concatenate: \"\n \"supply the ``concat_dim`` argument \"\n \"explicitly\"\n )\n dim, = concat_dims\n return concat(\n datasets,\n dim=dim,\n data_vars=data_vars,\n coords=coords,\n fill_value=fill_value,\n compat=compat,\n )\n"},{"col":0,"comment":"Like netCDF4.stringtochar, but faster and more flexible.\n ","endLoc":152,"header":"def _numpy_bytes_to_char(arr)","id":2705,"name":"_numpy_bytes_to_char","nodeType":"Function","startLoc":147,"text":"def _numpy_bytes_to_char(arr):\n \"\"\"Like netCDF4.stringtochar, but faster and more flexible.\n \"\"\"\n # ensure the array is contiguous\n arr = np.array(arr, copy=False, order=\"C\", dtype=np.string_)\n return arr.reshape(arr.shape + (1,)).view(\"S1\")"},{"col":0,"comment":"null","endLoc":269,"header":"def _new_tile_id(single_id_ds_pair)","id":2706,"name":"_new_tile_id","nodeType":"Function","startLoc":267,"text":"def _new_tile_id(single_id_ds_pair):\n tile_id, ds = single_id_ds_pair\n return tile_id[1:]"},{"col":0,"comment":"null","endLoc":469,"header":"def vars_as_keys(ds)","id":2707,"name":"vars_as_keys","nodeType":"Function","startLoc":468,"text":"def vars_as_keys(ds):\n return tuple(sorted(ds))"},{"attributeType":"null","col":0,"comment":"null","endLoc":704,"id":2708,"name":"_CONCAT_DIM_DEFAULT","nodeType":"Attribute","startLoc":704,"text":"_CONCAT_DIM_DEFAULT"},{"col":0,"comment":"","endLoc":1,"header":"combine.py#","id":2709,"name":"","nodeType":"Function","startLoc":1,"text":"_CONCAT_DIM_DEFAULT = \"__infer_concat_dim__\""},{"col":0,"comment":"null","endLoc":192,"header":"def _extract_zarr_variable_encoding(variable, raise_on_invalid=False)","id":2710,"name":"_extract_zarr_variable_encoding","nodeType":"Function","startLoc":172,"text":"def _extract_zarr_variable_encoding(variable, raise_on_invalid=False):\n encoding = variable.encoding.copy()\n\n valid_encodings = {\"chunks\", \"compressor\", \"filters\", \"cache_metadata\"}\n\n if raise_on_invalid:\n invalid = [k for k in encoding if k not in valid_encodings]\n if invalid:\n raise ValueError(\n \"unexpected encoding parameters for zarr \" \"backend: %r\" % invalid\n )\n else:\n for k in list(encoding):\n if k not in valid_encodings:\n del encoding[k]\n\n chunks = _determine_zarr_chunks(\n encoding.get(\"chunks\"), variable.chunks, variable.ndim\n )\n encoding[\"chunks\"] = chunks\n return encoding"},{"col":4,"comment":"null","endLoc":420,"header":"def _dataset_implementation(self, func, **kwargs)","id":2711,"name":"_dataset_implementation","nodeType":"Function","startLoc":411,"text":"def _dataset_implementation(self, func, **kwargs):\n from .dataset import Dataset\n\n reduced = {}\n for key, da in self.obj.data_vars.items():\n if self.dim in da.dims:\n reduced[key] = func(self.rollings[key], **kwargs)\n else:\n reduced[key] = self.obj[key]\n return Dataset(reduced, coords=self.obj.coords)"},{"id":2712,"name":"py.typed","nodeType":"TextFile","path":"xarray","text":""},{"col":0,"comment":"\n Given encoding chunks (possibly None) and variable chunks (possibly None)\n ","endLoc":153,"header":"def _determine_zarr_chunks(enc_chunks, var_chunks, ndim)","id":2713,"name":"_determine_zarr_chunks","nodeType":"Function","startLoc":60,"text":"def _determine_zarr_chunks(enc_chunks, var_chunks, ndim):\n \"\"\"\n Given encoding chunks (possibly None) and variable chunks (possibly None)\n \"\"\"\n\n # zarr chunk spec:\n # chunks : int or tuple of ints, optional\n # Chunk shape. If not provided, will be guessed from shape and dtype.\n\n # if there are no chunks in encoding and the variable data is a numpy\n # array, then we let zarr use its own heuristics to pick the chunks\n if var_chunks is None and enc_chunks is None:\n return None\n\n # if there are no chunks in encoding but there are dask chunks, we try to\n # use the same chunks in zarr\n # However, zarr chunks needs to be uniform for each array\n # http://zarr.readthedocs.io/en/latest/spec/v1.html#chunks\n # while dask chunks can be variable sized\n # http://dask.pydata.org/en/latest/array-design.html#chunks\n if var_chunks and enc_chunks is None:\n if any(len(set(chunks[:-1])) > 1 for chunks in var_chunks):\n raise ValueError(\n \"Zarr requires uniform chunk sizes except for final chunk.\"\n \" Variable dask chunks %r are incompatible. Consider \"\n \"rechunking using `chunk()`.\" % (var_chunks,)\n )\n if any((chunks[0] < chunks[-1]) for chunks in var_chunks):\n raise ValueError(\n \"Final chunk of Zarr array must be the same size or smaller \"\n \"than the first. Variable Dask chunks %r are incompatible. \"\n \"Consider rechunking using `chunk()`.\" % var_chunks\n )\n # return the first chunk for each dimension\n return tuple(chunk[0] for chunk in var_chunks)\n\n # from here on, we are dealing with user-specified chunks in encoding\n # zarr allows chunks to be an integer, in which case it uses the same chunk\n # size on each dimension.\n # Here we re-implement this expansion ourselves. That makes the logic of\n # checking chunk compatibility easier\n\n if isinstance(enc_chunks, integer_types):\n enc_chunks_tuple = ndim * (enc_chunks,)\n else:\n enc_chunks_tuple = tuple(enc_chunks)\n\n if len(enc_chunks_tuple) != ndim:\n # throw away encoding chunks, start over\n return _determine_zarr_chunks(None, var_chunks, ndim)\n\n for x in enc_chunks_tuple:\n if not isinstance(x, int):\n raise TypeError(\n \"zarr chunks must be an int or a tuple of ints. \"\n \"Instead found %r\" % (enc_chunks_tuple,)\n )\n\n # if there are chunks in encoding and the variable data is a numpy array,\n # we use the specified chunks\n if var_chunks is None:\n return enc_chunks_tuple\n\n # the hard case\n # DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk\n # this avoids the need to get involved in zarr synchronization / locking\n # From zarr docs:\n # \"If each worker in a parallel computation is writing to a separate\n # region of the array, and if region boundaries are perfectly aligned\n # with chunk boundaries, then no synchronization is required.\"\n # TODO: incorporate synchronizer to allow writes from multiple dask\n # threads\n if var_chunks and enc_chunks_tuple:\n for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks):\n for dchunk in dchunks[:-1]:\n if dchunk % zchunk:\n raise NotImplementedError(\n \"Specified zarr chunks %r would overlap multiple dask \"\n \"chunks %r. This is not implemented in xarray yet. \"\n \" Consider rechunking the data using \"\n \"`chunk()` or specifying different chunks in encoding.\"\n % (enc_chunks_tuple, var_chunks)\n )\n if dchunks[-1] > zchunk:\n raise ValueError(\n \"Final chunk of Zarr array must be the same size or \"\n \"smaller than the first. The specified Zarr chunk \"\n \"encoding is %r, but %r in variable Dask chunks %r is \"\n \"incompatible. Consider rechunking using `chunk()`.\"\n % (enc_chunks_tuple, dchunks, var_chunks)\n )\n return enc_chunks_tuple\n\n raise AssertionError(\"We should never get here. Function logic must be wrong.\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":67,"id":2714,"name":"loffset","nodeType":"Attribute","startLoc":67,"text":"self.loffset"},{"attributeType":"null","col":8,"comment":"null","endLoc":63,"id":2715,"name":"freq","nodeType":"Attribute","startLoc":63,"text":"self.freq"},{"attributeType":"null","col":16,"comment":"null","endLoc":76,"id":2716,"name":"closed","nodeType":"Attribute","startLoc":76,"text":"self.closed"},{"fileName":"_version.py","filePath":"xarray","id":2717,"nodeType":"File","text":"# This file helps to compute a version number in source trees obtained from\n# git-archive tarball (such as those provided by githubs download-from-tag\n# feature). Distribution tarballs (built by setup.py sdist) and build\n# directories (produced by setup.py build) will contain a much shorter file\n# that just contains the computed version number.\n\n# This file is released into the public domain. Generated by\n# versioneer-0.18 (https://github.com/warner/python-versioneer)\n\n\"\"\"Git implementation of _version.py.\"\"\"\n\nimport errno\nimport os\nimport re\nimport subprocess\nimport sys\n\n\ndef get_keywords():\n \"\"\"Get the keywords needed to look up the version information.\"\"\"\n # these strings will be replaced by git during git-archive.\n # setup.py/versioneer.py will grep for the variable names, so they must\n # each be defined on a line of their own. _version.py will just call\n # get_keywords().\n git_refnames = \"\"\n git_full = \"863e49066ca4d61c9adfe62aca3bf21b90e1af8c\"\n git_date = \"2019-10-12 17:33:33 -0400\"\n keywords = {\"refnames\": git_refnames, \"full\": git_full, \"date\": git_date}\n return keywords\n\n\nclass VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\"\n\n\ndef get_config():\n \"\"\"Create, populate and return the VersioneerConfig() object.\"\"\"\n # these strings are filled in when 'setup.py versioneer' creates\n # _version.py\n cfg = VersioneerConfig()\n cfg.VCS = \"git\"\n cfg.style = \"pep440\"\n cfg.tag_prefix = \"v\"\n cfg.parentdir_prefix = \"xarray-\"\n cfg.versionfile_source = \"xarray/_version.py\"\n cfg.verbose = False\n return cfg\n\n\nclass NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\"\n\n\nLONG_VERSION_PY = {}\nHANDLERS = {}\n\n\ndef register_vcs_handler(vcs, method): # decorator\n \"\"\"Decorator to mark a method as the handler for a particular VCS.\"\"\"\n\n def decorate(f):\n \"\"\"Store f in HANDLERS[vcs][method].\"\"\"\n if vcs not in HANDLERS:\n HANDLERS[vcs] = {}\n HANDLERS[vcs][method] = f\n return f\n\n return decorate\n\n\ndef run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen(\n [c] + args,\n cwd=cwd,\n env=env,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr else None),\n )\n break\n except OSError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None, None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None, None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n print(\"stdout was %s\" % stdout)\n return None, p.returncode\n return stdout, p.returncode\n\n\ndef versions_from_parentdir(parentdir_prefix, root, verbose):\n \"\"\"Try to determine the version from the parent directory name.\n\n Source tarballs conventionally unpack into a directory that includes both\n the project name and a version string. We will also support searching up\n two directory levels for an appropriately named parent directory\n \"\"\"\n rootdirs = []\n\n for i in range(3):\n dirname = os.path.basename(root)\n if dirname.startswith(parentdir_prefix):\n return {\n \"version\": dirname[len(parentdir_prefix) :],\n \"full-revisionid\": None,\n \"dirty\": False,\n \"error\": None,\n \"date\": None,\n }\n else:\n rootdirs.append(root)\n root = os.path.dirname(root) # up a level\n\n if verbose:\n print(\n \"Tried directories %s but none started with prefix %s\"\n % (str(rootdirs), parentdir_prefix)\n )\n raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")\n\n\n@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n if line.strip().startswith(\"git_date =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"date\"] = mo.group(1)\n f.close()\n except OSError:\n pass\n return keywords\n\n\n@register_vcs_handler(\"git\", \"keywords\")\ndef git_versions_from_keywords(keywords, tag_prefix, verbose):\n \"\"\"Get version information from git keywords.\"\"\"\n if not keywords:\n raise NotThisMethod(\"no keywords at all, weird\")\n date = keywords.get(\"date\")\n if date is not None:\n # git-2.2.0 added \"%cI\", which expands to an ISO-8601 -compliant\n # datestamp. However we prefer \"%ci\" (which expands to an \"ISO-8601\n # -like\" string, which we must then edit to make compliant), because\n # it's been around since git-1.5.3, and it's too difficult to\n # discover which version we're using, or to work around using an\n # older one.\n date = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n refnames = keywords[\"refnames\"].strip()\n if refnames.startswith(\"$Format\"):\n if verbose:\n print(\"keywords are unexpanded, not using\")\n raise NotThisMethod(\"unexpanded keywords, not a git-archive tarball\")\n refs = {r.strip() for r in refnames.strip(\"()\").split(\",\")}\n # starting in git-1.8.3, tags are listed as \"tag: foo-1.0\" instead of\n # just \"foo-1.0\". If we see a \"tag: \" prefix, prefer those.\n TAG = \"tag: \"\n tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}\n if not tags:\n # Either we're using git < 1.8.3, or there really are no tags. We use\n # a heuristic: assume all version tags have a digit. The old git %d\n # expansion behaves like git log --decorate=short and strips out the\n # refs/heads/ and refs/tags/ prefixes that would let us distinguish\n # between branches and tags. By ignoring refnames without digits, we\n # filter out many common branch names like \"release\" and\n # \"stabilization\", as well as \"HEAD\" and \"master\".\n tags = {r for r in refs if re.search(r\"\\d\", r)}\n if verbose:\n print(\"discarding '%s', no digits\" % \",\".join(refs - tags))\n if verbose:\n print(\"likely tags: %s\" % \",\".join(sorted(tags)))\n for ref in sorted(tags):\n # sorting will prefer e.g. \"2.0\" over \"2.0rc1\"\n if ref.startswith(tag_prefix):\n r = ref[len(tag_prefix) :]\n if verbose:\n print(\"picking %s\" % r)\n return {\n \"version\": r,\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": None,\n \"date\": date,\n }\n # no suitable tags, so version is \"0+unknown\", but full hex is still there\n if verbose:\n print(\"no suitable tags, using unknown + full revision id\")\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": keywords[\"full\"].strip(),\n \"dirty\": False,\n \"error\": \"no suitable tags\",\n \"date\": None,\n }\n\n\n@register_vcs_handler(\"git\", \"pieces_from_vcs\")\ndef git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):\n \"\"\"Get version from 'git describe' in the root of the source tree.\n\n This only gets called if the git-archive 'subst' keywords were *not*\n expanded, and _version.py hasn't already been rewritten with a short\n version string, meaning we're inside a checked out source tree.\n \"\"\"\n GITS = [\"git\"]\n if sys.platform == \"win32\":\n GITS = [\"git.cmd\", \"git.exe\"]\n\n out, rc = run_command(GITS, [\"rev-parse\", \"--git-dir\"], cwd=root, hide_stderr=True)\n if rc != 0:\n if verbose:\n print(\"Directory %s not under git control\" % root)\n raise NotThisMethod(\"'git rev-parse --git-dir' returned error\")\n\n # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]\n # if there isn't one, this yields HEX[-dirty] (no NUM)\n describe_out, rc = run_command(\n GITS,\n [\n \"describe\",\n \"--tags\",\n \"--dirty\",\n \"--always\",\n \"--long\",\n \"--match\",\n \"%s*\" % tag_prefix,\n ],\n cwd=root,\n )\n # --long was added in git-1.5.5\n if describe_out is None:\n raise NotThisMethod(\"'git describe' failed\")\n describe_out = describe_out.strip()\n full_out, rc = run_command(GITS, [\"rev-parse\", \"HEAD\"], cwd=root)\n if full_out is None:\n raise NotThisMethod(\"'git rev-parse' failed\")\n full_out = full_out.strip()\n\n pieces = {}\n pieces[\"long\"] = full_out\n pieces[\"short\"] = full_out[:7] # maybe improved later\n pieces[\"error\"] = None\n\n # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]\n # TAG might have hyphens.\n git_describe = describe_out\n\n # look for -dirty suffix\n dirty = git_describe.endswith(\"-dirty\")\n pieces[\"dirty\"] = dirty\n if dirty:\n git_describe = git_describe[: git_describe.rindex(\"-dirty\")]\n\n # now we have TAG-NUM-gHEX or HEX\n\n if \"-\" in git_describe:\n # TAG-NUM-gHEX\n mo = re.search(r\"^(.+)-(\\d+)-g([0-9a-f]+)$\", git_describe)\n if not mo:\n # unparseable. Maybe git-describe is misbehaving?\n pieces[\"error\"] = \"unable to parse git-describe output: '%s'\" % describe_out\n return pieces\n\n # tag\n full_tag = mo.group(1)\n if not full_tag.startswith(tag_prefix):\n if verbose:\n fmt = \"tag '%s' doesn't start with prefix '%s'\"\n print(fmt % (full_tag, tag_prefix))\n pieces[\"error\"] = \"tag '%s' doesn't start with prefix '%s'\" % (\n full_tag,\n tag_prefix,\n )\n return pieces\n pieces[\"closest-tag\"] = full_tag[len(tag_prefix) :]\n\n # distance: number of commits since tag\n pieces[\"distance\"] = int(mo.group(2))\n\n # commit: short hex revision ID\n pieces[\"short\"] = mo.group(3)\n\n else:\n # HEX: no tags\n pieces[\"closest-tag\"] = None\n count_out, rc = run_command(GITS, [\"rev-list\", \"HEAD\", \"--count\"], cwd=root)\n pieces[\"distance\"] = int(count_out) # total number of commits\n\n # commit date: see ISO-8601 comment in git_versions_from_keywords()\n date = run_command(GITS, [\"show\", \"-s\", \"--format=%ci\", \"HEAD\"], cwd=root)[\n 0\n ].strip()\n pieces[\"date\"] = date.strip().replace(\" \", \"T\", 1).replace(\" \", \"\", 1)\n\n return pieces\n\n\ndef plus_or_dot(pieces):\n \"\"\"Return a + if we don't already have one, else return a .\"\"\"\n if \"+\" in pieces.get(\"closest-tag\", \"\"):\n return \".\"\n return \"+\"\n\n\ndef render_pep440(pieces):\n \"\"\"Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += \"%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = \"0+untagged.%d.g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n\ndef render_pep440_pre(pieces):\n \"\"\"TAG[.post.devDISTANCE] -- No -dirty.\n\n Exceptions:\n 1: no tags. 0.post.devDISTANCE\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \".post.dev%d\" % pieces[\"distance\"]\n else:\n # exception #1\n rendered = \"0.post.dev%d\" % pieces[\"distance\"]\n return rendered\n\n\ndef render_pep440_post(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]+gHEX] .\n\n The \".dev0\" means dirty. Note that .dev0 sorts backwards\n (a dirty tree will appear \"older\" than the corresponding clean one),\n but you shouldn't be releasing software with -dirty anyways.\n\n Exceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += plus_or_dot(pieces)\n rendered += \"g%s\" % pieces[\"short\"]\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n rendered += \"+g%s\" % pieces[\"short\"]\n return rendered\n\n\ndef render_pep440_old(pieces):\n \"\"\"TAG[.postDISTANCE[.dev0]] .\n\n The \".dev0\" means dirty.\n\n Eexceptions:\n 1: no tags. 0.postDISTANCE[.dev0]\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += \".post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n else:\n # exception #1\n rendered = \"0.post%d\" % pieces[\"distance\"]\n if pieces[\"dirty\"]:\n rendered += \".dev0\"\n return rendered\n\n\ndef render_git_describe(pieces):\n \"\"\"TAG[-DISTANCE-gHEX][-dirty].\n\n Like 'git describe --tags --dirty --always'.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"]:\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render_git_describe_long(pieces):\n \"\"\"TAG-DISTANCE-gHEX[-dirty].\n\n Like 'git describe --tags --dirty --always -long'.\n The distance/hash is unconditional.\n\n Exceptions:\n 1: no tags. HEX[-dirty] (note: no 'g' prefix)\n \"\"\"\n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n rendered += \"-%d-g%s\" % (pieces[\"distance\"], pieces[\"short\"])\n else:\n # exception #1\n rendered = pieces[\"short\"]\n if pieces[\"dirty\"]:\n rendered += \"-dirty\"\n return rendered\n\n\ndef render(pieces, style):\n \"\"\"Render the given version pieces into the requested style.\"\"\"\n if pieces[\"error\"]:\n return {\n \"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n \"date\": None,\n }\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(\"unknown style '%s'\" % style)\n\n return {\n \"version\": rendered,\n \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"],\n \"error\": None,\n \"date\": pieces.get(\"date\"),\n }\n\n\ndef get_versions():\n \"\"\"Get version information or return default if unable to do so.\"\"\"\n # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have\n # __file__, we can work backwards from there to the root. Some\n # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which\n # case we can only use expanded keywords.\n\n cfg = get_config()\n verbose = cfg.verbose\n\n try:\n return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)\n except NotThisMethod:\n pass\n\n try:\n root = os.path.realpath(__file__)\n # versionfile_source is the relative path from the top of the source\n # tree (where the .git directory might live) to this file. Invert\n # this to find the root from __file__.\n for i in cfg.versionfile_source.split(\"/\"):\n root = os.path.dirname(root)\n except NameError:\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to find root of source tree\",\n \"date\": None,\n }\n\n try:\n pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)\n return render(pieces, cfg.style)\n except NotThisMethod:\n pass\n\n try:\n if cfg.parentdir_prefix:\n return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)\n except NotThisMethod:\n pass\n\n return {\n \"version\": \"0+unknown\",\n \"full-revisionid\": None,\n \"dirty\": None,\n \"error\": \"unable to compute version\",\n \"date\": None,\n }\n"},{"attributeType":"null","col":16,"comment":"null","endLoc":78,"id":2718,"name":"label","nodeType":"Attribute","startLoc":78,"text":"self.label"},{"attributeType":"null","col":8,"comment":"null","endLoc":66,"id":2719,"name":"base","nodeType":"Attribute","startLoc":66,"text":"self.base"},{"className":"VersioneerConfig","col":0,"comment":"Container for Versioneer configuration parameters.","endLoc":33,"id":2720,"nodeType":"Class","startLoc":32,"text":"class VersioneerConfig:\n \"\"\"Container for Versioneer configuration parameters.\"\"\""},{"className":"NotThisMethod","col":0,"comment":"Exception raised if a method is not valid for the current scenario.","endLoc":51,"id":2721,"nodeType":"Class","startLoc":50,"text":"class NotThisMethod(Exception):\n \"\"\"Exception raised if a method is not valid for the current scenario.\"\"\""},{"col":0,"comment":"Call the given command(s).","endLoc":107,"header":"def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None)","id":2722,"name":"run_command","nodeType":"Function","startLoc":71,"text":"def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):\n \"\"\"Call the given command(s).\"\"\"\n assert isinstance(commands, list)\n p = None\n for c in commands:\n try:\n dispcmd = str([c] + args)\n # remember shell=False, so use git.cmd on windows, not just git\n p = subprocess.Popen(\n [c] + args,\n cwd=cwd,\n env=env,\n stdout=subprocess.PIPE,\n stderr=(subprocess.PIPE if hide_stderr else None),\n )\n break\n except OSError:\n e = sys.exc_info()[1]\n if e.errno == errno.ENOENT:\n continue\n if verbose:\n print(\"unable to run %s\" % dispcmd)\n print(e)\n return None, None\n else:\n if verbose:\n print(\"unable to find command, tried %s\" % (commands,))\n return None, None\n stdout = p.communicate()[0].strip()\n if sys.version_info[0] >= 3:\n stdout = stdout.decode()\n if p.returncode != 0:\n if verbose:\n print(\"unable to run %s (error)\" % dispcmd)\n print(\"stdout was %s\" % stdout)\n return None, p.returncode\n return stdout, p.returncode"},{"col":0,"comment":"","endLoc":1,"header":"resample_cftime.py#","id":2723,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Resampling for CFTimeIndex. Does not support non-integer freq.\"\"\""},{"col":4,"comment":"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n ","endLoc":442,"header":"def reduce(self, func, **kwargs)","id":2724,"name":"reduce","nodeType":"Function","startLoc":422,"text":"def reduce(self, func, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, **kwargs)` to return the result of collapsing an\n np.ndarray over an the rolling dimension.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n Array with summarized data.\n \"\"\"\n return self._dataset_implementation(\n functools.partial(DataArrayRolling.reduce, func=func), **kwargs\n )"},{"fileName":"times.py","filePath":"xarray/coding","id":2725,"nodeType":"File","text":"import re\nimport warnings\nfrom datetime import datetime\nfrom distutils.version import LooseVersion\nfrom functools import partial\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import OutOfBoundsDatetime\n\nfrom ..core import indexing\nfrom ..core.common import contains_cftime_datetimes\nfrom ..core.formatting import first_n_items, format_timestamp, last_item\nfrom ..core.variable import Variable\nfrom .variables import (\n SerializationWarning,\n VariableCoder,\n lazy_elemwise_func,\n pop_to,\n safe_setitem,\n unpack_for_decoding,\n unpack_for_encoding,\n)\n\n# standard calendars recognized by cftime\n_STANDARD_CALENDARS = {\"standard\", \"gregorian\", \"proleptic_gregorian\"}\n\n_NS_PER_TIME_DELTA = {\n \"us\": int(1e3),\n \"ms\": int(1e6),\n \"s\": int(1e9),\n \"m\": int(1e9) * 60,\n \"h\": int(1e9) * 60 * 60,\n \"D\": int(1e9) * 60 * 60 * 24,\n}\n\nTIME_UNITS = frozenset(\n [\"days\", \"hours\", \"minutes\", \"seconds\", \"milliseconds\", \"microseconds\"]\n)\n\n\ndef _import_cftime():\n \"\"\"\n helper function handle the transition to netcdftime/cftime\n as a stand-alone package\n \"\"\"\n try:\n import cftime\n except ImportError:\n # in netCDF4 the num2date/date2num function are top-level api\n try:\n import netCDF4 as cftime\n except ImportError:\n raise ImportError(\"Failed to import cftime\")\n return cftime\n\n\ndef _require_standalone_cftime():\n \"\"\"Raises an ImportError if the standalone cftime is not found\"\"\"\n try:\n import cftime # noqa: F401\n except ImportError:\n raise ImportError(\n \"Decoding times with non-standard calendars \"\n \"or outside the pandas.Timestamp-valid range \"\n \"requires the standalone cftime package.\"\n )\n\n\ndef _netcdf_to_numpy_timeunit(units):\n units = units.lower()\n if not units.endswith(\"s\"):\n units = \"%ss\" % units\n return {\n \"microseconds\": \"us\",\n \"milliseconds\": \"ms\",\n \"seconds\": \"s\",\n \"minutes\": \"m\",\n \"hours\": \"h\",\n \"days\": \"D\",\n }[units]\n\n\ndef _unpack_netcdf_time_units(units):\n # CF datetime units follow the format: \"UNIT since DATE\"\n # this parses out the unit and date allowing for extraneous\n # whitespace.\n matches = re.match(\"(.+) since (.+)\", units)\n if not matches:\n raise ValueError(\"invalid time units: %s\" % units)\n delta_units, ref_date = [s.strip() for s in matches.groups()]\n return delta_units, ref_date\n\n\ndef _decode_cf_datetime_dtype(data, units, calendar, use_cftime):\n # Verify that at least the first and last date can be decoded\n # successfully. Otherwise, tracebacks end up swallowed by\n # Dataset.__repr__ when users try to view their lazily decoded array.\n values = indexing.ImplicitToExplicitIndexingAdapter(indexing.as_indexable(data))\n example_value = np.concatenate(\n [first_n_items(values, 1) or [0], last_item(values) or [0]]\n )\n\n try:\n result = decode_cf_datetime(example_value, units, calendar, use_cftime)\n except Exception:\n calendar_msg = (\n \"the default calendar\" if calendar is None else \"calendar %r\" % calendar\n )\n msg = (\n \"unable to decode time units %r with %s. Try \"\n \"opening your dataset with decode_times=False.\" % (units, calendar_msg)\n )\n raise ValueError(msg)\n else:\n dtype = getattr(result, \"dtype\", np.dtype(\"object\"))\n\n return dtype\n\n\ndef _decode_datetime_with_cftime(num_dates, units, calendar):\n cftime = _import_cftime()\n\n if cftime.__name__ == \"cftime\":\n return np.asarray(\n cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True)\n )\n else:\n # Must be using num2date from an old version of netCDF4 which\n # does not have the only_use_cftime_datetimes option.\n return np.asarray(cftime.num2date(num_dates, units, calendar))\n\n\ndef _decode_datetime_with_pandas(flat_num_dates, units, calendar):\n if calendar not in _STANDARD_CALENDARS:\n raise OutOfBoundsDatetime(\n \"Cannot decode times from a non-standard calendar, {!r}, using \"\n \"pandas.\".format(calendar)\n )\n\n delta, ref_date = _unpack_netcdf_time_units(units)\n delta = _netcdf_to_numpy_timeunit(delta)\n try:\n ref_date = pd.Timestamp(ref_date)\n except ValueError:\n # ValueError is raised by pd.Timestamp for non-ISO timestamp\n # strings, in which case we fall back to using cftime\n raise OutOfBoundsDatetime\n\n # fixes: https://github.com/pydata/pandas/issues/14068\n # these lines check if the the lowest or the highest value in dates\n # cause an OutOfBoundsDatetime (Overflow) error\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"invalid value encountered\", RuntimeWarning)\n pd.to_timedelta(flat_num_dates.min(), delta) + ref_date\n pd.to_timedelta(flat_num_dates.max(), delta) + ref_date\n\n # Cast input dates to integers of nanoseconds because `pd.to_datetime`\n # works much faster when dealing with integers\n # make _NS_PER_TIME_DELTA an array to ensure type upcasting\n flat_num_dates_ns_int = (\n flat_num_dates.astype(np.float64) * _NS_PER_TIME_DELTA[delta]\n ).astype(np.int64)\n\n return (pd.to_timedelta(flat_num_dates_ns_int, \"ns\") + ref_date).values\n\n\ndef decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None):\n \"\"\"Given an array of numeric dates in netCDF format, convert it into a\n numpy array of date time objects.\n\n For standard (Gregorian) calendars, this function uses vectorized\n operations, which makes it much faster than cftime.num2date. In such a\n case, the returned array will be of type np.datetime64.\n\n Note that time unit in `units` must not be smaller than microseconds and\n not larger than days.\n\n See also\n --------\n cftime.num2date\n \"\"\"\n num_dates = np.asarray(num_dates)\n flat_num_dates = num_dates.ravel()\n if calendar is None:\n calendar = \"standard\"\n\n if use_cftime is None:\n try:\n dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)\n except (OutOfBoundsDatetime, OverflowError):\n dates = _decode_datetime_with_cftime(\n flat_num_dates.astype(np.float), units, calendar\n )\n\n if (\n dates[np.nanargmin(num_dates)].year < 1678\n or dates[np.nanargmax(num_dates)].year >= 2262\n ):\n if calendar in _STANDARD_CALENDARS:\n warnings.warn(\n \"Unable to decode time axis into full \"\n \"numpy.datetime64 objects, continuing using \"\n \"cftime.datetime objects instead, reason: dates out \"\n \"of range\",\n SerializationWarning,\n stacklevel=3,\n )\n else:\n if calendar in _STANDARD_CALENDARS:\n dates = cftime_to_nptime(dates)\n elif use_cftime:\n dates = _decode_datetime_with_cftime(\n flat_num_dates.astype(np.float), units, calendar\n )\n else:\n dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)\n\n return dates.reshape(num_dates.shape)\n\n\ndef to_timedelta_unboxed(value, **kwargs):\n if LooseVersion(pd.__version__) < \"0.25.0\":\n result = pd.to_timedelta(value, **kwargs, box=False)\n else:\n result = pd.to_timedelta(value, **kwargs).to_numpy()\n assert result.dtype == \"timedelta64[ns]\"\n return result\n\n\ndef to_datetime_unboxed(value, **kwargs):\n if LooseVersion(pd.__version__) < \"0.25.0\":\n result = pd.to_datetime(value, **kwargs, box=False)\n else:\n result = pd.to_datetime(value, **kwargs).to_numpy()\n assert result.dtype == \"datetime64[ns]\"\n return result\n\n\ndef decode_cf_timedelta(num_timedeltas, units):\n \"\"\"Given an array of numeric timedeltas in netCDF format, convert it into a\n numpy timedelta64[ns] array.\n \"\"\"\n num_timedeltas = np.asarray(num_timedeltas)\n units = _netcdf_to_numpy_timeunit(units)\n result = to_timedelta_unboxed(num_timedeltas.ravel(), unit=units)\n return result.reshape(num_timedeltas.shape)\n\n\ndef _infer_time_units_from_diff(unique_timedeltas):\n for time_unit in [\"days\", \"hours\", \"minutes\", \"seconds\"]:\n delta_ns = _NS_PER_TIME_DELTA[_netcdf_to_numpy_timeunit(time_unit)]\n unit_delta = np.timedelta64(delta_ns, \"ns\")\n diffs = unique_timedeltas / unit_delta\n if np.all(diffs == diffs.astype(int)):\n return time_unit\n return \"seconds\"\n\n\ndef infer_calendar_name(dates):\n \"\"\"Given an array of datetimes, infer the CF calendar name\"\"\"\n if np.asarray(dates).dtype == \"datetime64[ns]\":\n return \"proleptic_gregorian\"\n else:\n return np.asarray(dates).ravel()[0].calendar\n\n\ndef infer_datetime_units(dates):\n \"\"\"Given an array of datetimes, returns a CF compatible time-unit string of\n the form \"{time_unit} since {date[0]}\", where `time_unit` is 'days',\n 'hours', 'minutes' or 'seconds' (the first one that can evenly divide all\n unique time deltas in `dates`)\n \"\"\"\n dates = np.asarray(dates).ravel()\n if np.asarray(dates).dtype == \"datetime64[ns]\":\n dates = to_datetime_unboxed(dates)\n dates = dates[pd.notnull(dates)]\n reference_date = dates[0] if len(dates) > 0 else \"1970-01-01\"\n reference_date = pd.Timestamp(reference_date)\n else:\n reference_date = dates[0] if len(dates) > 0 else \"1970-01-01\"\n reference_date = format_cftime_datetime(reference_date)\n unique_timedeltas = np.unique(np.diff(dates))\n if unique_timedeltas.dtype == np.dtype(\"O\"):\n # Convert to np.timedelta64 objects using pandas to work around a\n # NumPy casting bug: https://github.com/numpy/numpy/issues/11096\n unique_timedeltas = to_timedelta_unboxed(unique_timedeltas)\n units = _infer_time_units_from_diff(unique_timedeltas)\n return \"%s since %s\" % (units, reference_date)\n\n\ndef format_cftime_datetime(date):\n \"\"\"Converts a cftime.datetime object to a string with the format:\n YYYY-MM-DD HH:MM:SS.UUUUUU\n \"\"\"\n return \"{:04d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06d}\".format(\n date.year,\n date.month,\n date.day,\n date.hour,\n date.minute,\n date.second,\n date.microsecond,\n )\n\n\ndef infer_timedelta_units(deltas):\n \"\"\"Given an array of timedeltas, returns a CF compatible time-unit from\n {'days', 'hours', 'minutes' 'seconds'} (the first one that can evenly\n divide all unique time deltas in `deltas`)\n \"\"\"\n deltas = to_timedelta_unboxed(np.asarray(deltas).ravel())\n unique_timedeltas = np.unique(deltas[pd.notnull(deltas)])\n units = _infer_time_units_from_diff(unique_timedeltas)\n return units\n\n\ndef cftime_to_nptime(times):\n \"\"\"Given an array of cftime.datetime objects, return an array of\n numpy.datetime64 objects of the same size\"\"\"\n times = np.asarray(times)\n new = np.empty(times.shape, dtype=\"M8[ns]\")\n for i, t in np.ndenumerate(times):\n try:\n # Use pandas.Timestamp in place of datetime.datetime, because\n # NumPy casts it safely it np.datetime64[ns] for dates outside\n # 1678 to 2262 (this is not currently the case for\n # datetime.datetime).\n dt = pd.Timestamp(\n t.year, t.month, t.day, t.hour, t.minute, t.second, t.microsecond\n )\n except ValueError as e:\n raise ValueError(\n \"Cannot convert date {} to a date in the \"\n \"standard calendar. Reason: {}.\".format(t, e)\n )\n new[i] = np.datetime64(dt)\n return new\n\n\ndef _cleanup_netcdf_time_units(units):\n delta, ref_date = _unpack_netcdf_time_units(units)\n try:\n units = \"%s since %s\" % (delta, format_timestamp(ref_date))\n except OutOfBoundsDatetime:\n # don't worry about reifying the units if they're out of bounds\n pass\n return units\n\n\ndef _encode_datetime_with_cftime(dates, units, calendar):\n \"\"\"Fallback method for encoding dates using cftime.\n\n This method is more flexible than xarray's parsing using datetime64[ns]\n arrays but also slower because it loops over each element.\n \"\"\"\n cftime = _import_cftime()\n\n if np.issubdtype(dates.dtype, np.datetime64):\n # numpy's broken datetime conversion only works for us precision\n dates = dates.astype(\"M8[us]\").astype(datetime)\n\n def encode_datetime(d):\n return np.nan if d is None else cftime.date2num(d, units, calendar)\n\n return np.vectorize(encode_datetime)(dates)\n\n\ndef cast_to_int_if_safe(num):\n int_num = np.array(num, dtype=np.int64)\n if (num == int_num).all():\n num = int_num\n return num\n\n\ndef encode_cf_datetime(dates, units=None, calendar=None):\n \"\"\"Given an array of datetime objects, returns the tuple `(num, units,\n calendar)` suitable for a CF compliant time variable.\n\n Unlike `date2num`, this function can handle datetime64 arrays.\n\n See also\n --------\n cftime.date2num\n \"\"\"\n dates = np.asarray(dates)\n\n if units is None:\n units = infer_datetime_units(dates)\n else:\n units = _cleanup_netcdf_time_units(units)\n\n if calendar is None:\n calendar = infer_calendar_name(dates)\n\n delta, ref_date = _unpack_netcdf_time_units(units)\n try:\n if calendar not in _STANDARD_CALENDARS or dates.dtype.kind == \"O\":\n # parse with cftime instead\n raise OutOfBoundsDatetime\n assert dates.dtype == \"datetime64[ns]\"\n\n delta_units = _netcdf_to_numpy_timeunit(delta)\n time_delta = np.timedelta64(1, delta_units).astype(\"timedelta64[ns]\")\n ref_date = pd.Timestamp(ref_date)\n\n # If the ref_date Timestamp is timezone-aware, convert to UTC and\n # make it timezone-naive (GH 2649).\n if ref_date.tz is not None:\n ref_date = ref_date.tz_convert(None)\n\n # Wrap the dates in a DatetimeIndex to do the subtraction to ensure\n # an OverflowError is raised if the ref_date is too far away from\n # dates to be encoded (GH 2272).\n num = (pd.DatetimeIndex(dates.ravel()) - ref_date) / time_delta\n num = num.values.reshape(dates.shape)\n\n except (OutOfBoundsDatetime, OverflowError):\n num = _encode_datetime_with_cftime(dates, units, calendar)\n\n num = cast_to_int_if_safe(num)\n return (num, units, calendar)\n\n\ndef encode_cf_timedelta(timedeltas, units=None):\n if units is None:\n units = infer_timedelta_units(timedeltas)\n\n np_unit = _netcdf_to_numpy_timeunit(units)\n num = 1.0 * timedeltas / np.timedelta64(1, np_unit)\n num = np.where(pd.isnull(timedeltas), np.nan, num)\n num = cast_to_int_if_safe(num)\n return (num, units)\n\n\nclass CFDatetimeCoder(VariableCoder):\n def __init__(self, use_cftime=None):\n self.use_cftime = use_cftime\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n if np.issubdtype(data.dtype, np.datetime64) or contains_cftime_datetimes(\n variable\n ):\n (data, units, calendar) = encode_cf_datetime(\n data, encoding.pop(\"units\", None), encoding.pop(\"calendar\", None)\n )\n safe_setitem(attrs, \"units\", units, name=name)\n safe_setitem(attrs, \"calendar\", calendar, name=name)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"units\" in attrs and \"since\" in attrs[\"units\"]:\n units = pop_to(attrs, encoding, \"units\")\n calendar = pop_to(attrs, encoding, \"calendar\")\n dtype = _decode_cf_datetime_dtype(data, units, calendar, self.use_cftime)\n transform = partial(\n decode_cf_datetime,\n units=units,\n calendar=calendar,\n use_cftime=self.use_cftime,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)\n\n\nclass CFTimedeltaCoder(VariableCoder):\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n if np.issubdtype(data.dtype, np.timedelta64):\n data, units = encode_cf_timedelta(data, encoding.pop(\"units\", None))\n safe_setitem(attrs, \"units\", units, name=name)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"units\" in attrs and attrs[\"units\"] in TIME_UNITS:\n units = pop_to(attrs, encoding, \"units\")\n transform = partial(decode_cf_timedelta, units=units)\n dtype = np.dtype(\"timedelta64[ns]\")\n data = lazy_elemwise_func(data, transform, dtype=dtype)\n\n return Variable(dims, data, attrs, encoding)\n"},{"col":4,"comment":"null","endLoc":445,"header":"def _counts(self)","id":2726,"name":"_counts","nodeType":"Function","startLoc":444,"text":"def _counts(self):\n return self._dataset_implementation(DataArrayRolling._counts)"},{"col":4,"comment":"null","endLoc":457,"header":"def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n )","id":2727,"name":"_numpy_or_bottleneck_reduce","nodeType":"Function","startLoc":447,"text":"def _numpy_or_bottleneck_reduce(\n self, array_agg_func, bottleneck_move_func, **kwargs\n ):\n return self._dataset_implementation(\n functools.partial(\n DataArrayRolling._numpy_or_bottleneck_reduce,\n array_agg_func=array_agg_func,\n bottleneck_move_func=bottleneck_move_func,\n ),\n **kwargs\n )"},{"col":4,"comment":"\n Convert this rolling object to xr.Dataset,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n Dataset with variables converted from rolling object.\n ","endLoc":490,"header":"def construct(self, window_dim, stride=1, fill_value=dtypes.NA)","id":2728,"name":"construct","nodeType":"Function","startLoc":459,"text":"def construct(self, window_dim, stride=1, fill_value=dtypes.NA):\n \"\"\"\n Convert this rolling object to xr.Dataset,\n where the window dimension is stacked as a new dimension\n\n Parameters\n ----------\n window_dim: str\n New name of the window dimension.\n stride: integer, optional\n size of stride for the rolling window.\n fill_value: optional. Default dtypes.NA\n Filling value to match the dimension size.\n\n Returns\n -------\n Dataset with variables converted from rolling object.\n \"\"\"\n\n from .dataset import Dataset\n\n dataset = {}\n for key, da in self.obj.data_vars.items():\n if self.dim in da.dims:\n dataset[key] = self.rollings[key].construct(\n window_dim, fill_value=fill_value\n )\n else:\n dataset[key] = da\n return Dataset(dataset, coords=self.obj.coords).isel(\n **{self.dim: slice(None, None, stride)}\n )"},{"className":"CFDatetimeCoder","col":0,"comment":"null","endLoc":468,"id":2729,"nodeType":"Class","startLoc":436,"text":"class CFDatetimeCoder(VariableCoder):\n def __init__(self, use_cftime=None):\n self.use_cftime = use_cftime\n\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n if np.issubdtype(data.dtype, np.datetime64) or contains_cftime_datetimes(\n variable\n ):\n (data, units, calendar) = encode_cf_datetime(\n data, encoding.pop(\"units\", None), encoding.pop(\"calendar\", None)\n )\n safe_setitem(attrs, \"units\", units, name=name)\n safe_setitem(attrs, \"calendar\", calendar, name=name)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"units\" in attrs and \"since\" in attrs[\"units\"]:\n units = pop_to(attrs, encoding, \"units\")\n calendar = pop_to(attrs, encoding, \"calendar\")\n dtype = _decode_cf_datetime_dtype(data, units, calendar, self.use_cftime)\n transform = partial(\n decode_cf_datetime,\n units=units,\n calendar=calendar,\n use_cftime=self.use_cftime,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"className":"StackedBytesArray","col":0,"comment":"Wrapper around array-like objects to create a new indexable object where\n values, when accessed, are automatically stacked along the last dimension.\n\n >>> StackedBytesArray(np.array(['a', 'b', 'c']))[:]\n array('abc',\n dtype='|S3')\n ","endLoc":238,"id":2730,"nodeType":"Class","startLoc":200,"text":"class StackedBytesArray(indexing.ExplicitlyIndexedNDArrayMixin):\n \"\"\"Wrapper around array-like objects to create a new indexable object where\n values, when accessed, are automatically stacked along the last dimension.\n\n >>> StackedBytesArray(np.array(['a', 'b', 'c']))[:]\n array('abc',\n dtype='|S3')\n \"\"\"\n\n def __init__(self, array):\n \"\"\"\n Parameters\n ----------\n array : array-like\n Original array of values to wrap.\n \"\"\"\n if array.dtype != \"S1\":\n raise ValueError(\n \"can only use StackedBytesArray if argument has dtype='S1'\"\n )\n self.array = indexing.as_indexable(array)\n\n @property\n def dtype(self):\n return np.dtype(\"S\" + str(self.array.shape[-1]))\n\n @property\n def shape(self):\n return self.array.shape[:-1]\n\n def __repr__(self):\n return \"%s(%r)\" % (type(self).__name__, self.array)\n\n def __getitem__(self, key):\n # require slicing the last dimension completely\n key = type(key)(indexing.expanded_indexer(key.tuple, self.array.ndim))\n if key.tuple[-1] != slice(None):\n raise IndexError(\"too many indices\")\n return _numpy_char_to_bytes(self.array[key])"},{"col":4,"comment":"null","endLoc":224,"header":"@property\n def dtype(self)","id":2731,"name":"dtype","nodeType":"Function","startLoc":222,"text":"@property\n def dtype(self):\n return np.dtype(\"S\" + str(self.array.shape[-1]))"},{"col":4,"comment":"null","endLoc":228,"header":"@property\n def shape(self)","id":2732,"name":"shape","nodeType":"Function","startLoc":226,"text":"@property\n def shape(self):\n return self.array.shape[:-1]"},{"col":4,"comment":"null","endLoc":231,"header":"def __repr__(self)","id":2733,"name":"__repr__","nodeType":"Function","startLoc":230,"text":"def __repr__(self):\n return \"%s(%r)\" % (type(self).__name__, self.array)"},{"col":4,"comment":"null","endLoc":238,"header":"def __getitem__(self, key)","id":2734,"name":"__getitem__","nodeType":"Function","startLoc":233,"text":"def __getitem__(self, key):\n # require slicing the last dimension completely\n key = type(key)(indexing.expanded_indexer(key.tuple, self.array.ndim))\n if key.tuple[-1] != slice(None):\n raise IndexError(\"too many indices\")\n return _numpy_char_to_bytes(self.array[key])"},{"col":4,"comment":"null","endLoc":468,"header":"def decode(self, variable, name=None)","id":2735,"name":"decode","nodeType":"Function","startLoc":453,"text":"def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"units\" in attrs and \"since\" in attrs[\"units\"]:\n units = pop_to(attrs, encoding, \"units\")\n calendar = pop_to(attrs, encoding, \"calendar\")\n dtype = _decode_cf_datetime_dtype(data, units, calendar, self.use_cftime)\n transform = partial(\n decode_cf_datetime,\n units=units,\n calendar=calendar,\n use_cftime=self.use_cftime,\n )\n data = lazy_elemwise_func(data, transform, dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":250,"header":"def __delitem__(self, key: Hashable) -> None","id":2736,"name":"__delitem__","nodeType":"Function","startLoc":246,"text":"def __delitem__(self, key: Hashable) -> None:\n if key in self:\n del self._data[key]\n else:\n raise KeyError(key)"},{"col":4,"comment":"Provide method for the key-autocompletions in IPython. ","endLoc":258,"header":"def _ipython_key_completions_(self)","id":2737,"name":"_ipython_key_completions_","nodeType":"Function","startLoc":252,"text":"def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._data._ipython_key_completions_()\n if key not in self._data.data_vars\n ]"},{"col":0,"comment":"Like netCDF4.chartostring, but faster and more flexible.\n ","endLoc":197,"header":"def _numpy_char_to_bytes(arr)","id":2738,"name":"_numpy_char_to_bytes","nodeType":"Function","startLoc":191,"text":"def _numpy_char_to_bytes(arr):\n \"\"\"Like netCDF4.chartostring, but faster and more flexible.\n \"\"\"\n # based on: http://stackoverflow.com/a/10984878/809705\n arr = np.array(arr, copy=False, order=\"C\")\n dtype = \"S\" + str(arr.shape[-1])\n return arr.view(dtype).reshape(arr.shape[:-1])"},{"attributeType":"null","col":8,"comment":"null","endLoc":220,"id":2739,"name":"array","nodeType":"Attribute","startLoc":220,"text":"self.array"},{"attributeType":"null","col":4,"comment":"null","endLoc":192,"id":2740,"name":"__slots__","nodeType":"Attribute","startLoc":192,"text":"__slots__"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":195,"id":2741,"name":"_data","nodeType":"Attribute","startLoc":195,"text":"self._data"},{"className":"DataArrayCoordinates","col":0,"comment":"Dictionary like container for DataArray coordinates.\n\n Essentially a dict with keys given by the array's\n dimensions and the values given by corresponding DataArray objects.\n ","endLoc":319,"id":2742,"nodeType":"Class","startLoc":261,"text":"class DataArrayCoordinates(Coordinates):\n \"\"\"Dictionary like container for DataArray coordinates.\n\n Essentially a dict with keys given by the array's\n dimensions and the values given by corresponding DataArray objects.\n \"\"\"\n\n __slots__ = (\"_data\",)\n\n def __init__(self, dataarray: \"DataArray\"):\n self._data = dataarray\n\n @property\n def dims(self) -> Tuple[Hashable, ...]:\n return self._data.dims\n\n @property\n def _names(self) -> Set[Hashable]:\n return set(self._data._coords)\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n return self._data._getitem_coord(key)\n\n def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None:\n from .dataset import calculate_dimensions\n\n coords_plus_data = coords.copy()\n coords_plus_data[_THIS_ARRAY] = self._data.variable\n dims = calculate_dimensions(coords_plus_data)\n if not set(dims) <= set(self.dims):\n raise ValueError(\n \"cannot add coordinates with new dimensions to \" \"a DataArray\"\n )\n self._data._coords = coords\n\n # TODO(shoyer): once ._indexes is always populated by a dict, modify\n # it to update inplace instead.\n original_indexes = dict(self._data.indexes)\n original_indexes.update(indexes)\n self._data._indexes = original_indexes\n\n @property\n def variables(self):\n return Frozen(self._data._coords)\n\n def to_dataset(self) -> \"Dataset\":\n from .dataset import Dataset\n\n coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()}\n return Dataset._from_vars_and_coord_names(coords, set(coords))\n\n def __delitem__(self, key: Hashable) -> None:\n del self._data._coords[key]\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return self._data._ipython_key_completions_()"},{"col":0,"comment":"Extract version information from the given file.","endLoc":167,"header":"@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs)","id":2743,"name":"git_get_keywords","nodeType":"Function","startLoc":141,"text":"@register_vcs_handler(\"git\", \"get_keywords\")\ndef git_get_keywords(versionfile_abs):\n \"\"\"Extract version information from the given file.\"\"\"\n # the code embedded in _version.py can just fetch the value of these\n # keywords. When used from setup.py, we don't want to import _version.py,\n # so we do it with a regexp instead. This function is not used from\n # _version.py.\n keywords = {}\n try:\n f = open(versionfile_abs, \"r\")\n for line in f.readlines():\n if line.strip().startswith(\"git_refnames =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"refnames\"] = mo.group(1)\n if line.strip().startswith(\"git_full =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"full\"] = mo.group(1)\n if line.strip().startswith(\"git_date =\"):\n mo = re.search(r'=\\s*\"(.*)\"', line)\n if mo:\n keywords[\"date\"] = mo.group(1)\n f.close()\n except OSError:\n pass\n return keywords"},{"col":4,"comment":"null","endLoc":275,"header":"@property\n def dims(self) -> Tuple[Hashable, ...]","id":2744,"name":"dims","nodeType":"Function","startLoc":273,"text":"@property\n def dims(self) -> Tuple[Hashable, ...]:\n return self._data.dims"},{"col":4,"comment":"null","endLoc":279,"header":"@property\n def _names(self) -> Set[Hashable]","id":2745,"name":"_names","nodeType":"Function","startLoc":277,"text":"@property\n def _names(self) -> Set[Hashable]:\n return set(self._data._coords)"},{"col":4,"comment":"null","endLoc":282,"header":"def __getitem__(self, key: Hashable) -> \"DataArray\"","id":2746,"name":"__getitem__","nodeType":"Function","startLoc":281,"text":"def __getitem__(self, key: Hashable) -> \"DataArray\":\n return self._data._getitem_coord(key)"},{"col":0,"comment":"null","endLoc":84,"header":"def decode_bytes_array(bytes_array, encoding=\"utf-8\")","id":2747,"name":"decode_bytes_array","nodeType":"Function","startLoc":80,"text":"def decode_bytes_array(bytes_array, encoding=\"utf-8\"):\n # This is faster than using np.char.decode() or np.vectorize()\n bytes_array = np.asarray(bytes_array)\n decoded = [x.decode(encoding) for x in bytes_array.ravel()]\n return np.array(decoded, dtype=object).reshape(bytes_array.shape)"},{"attributeType":"null","col":0,"comment":"null","endLoc":54,"id":2748,"name":"LONG_VERSION_PY","nodeType":"Attribute","startLoc":54,"text":"LONG_VERSION_PY"},{"attributeType":"null","col":0,"comment":"null","endLoc":55,"id":2749,"name":"HANDLERS","nodeType":"Attribute","startLoc":55,"text":"HANDLERS"},{"col":0,"comment":"","endLoc":1,"header":"strings.py#","id":2750,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Coders for strings.\"\"\""},{"col":0,"comment":"","endLoc":10,"header":"_version.py#","id":2751,"name":"","nodeType":"Function","startLoc":10,"text":"\"\"\"Git implementation of _version.py.\"\"\"\n\nLONG_VERSION_PY = {}\n\nHANDLERS = {}"},{"fileName":"plot_rasterio_rgb.py","filePath":"doc/gallery","id":2752,"nodeType":"File","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. _recipes.rasterio_rgb:\n\n============================\nimshow() and map projections\n============================\n\nUsing rasterio's projection information for more accurate plots.\n\nThis example extends :ref:`recipes.rasterio` and plots the image in the\noriginal map projection instead of relying on pcolormesh and a map\ntransformation.\n\"\"\"\n\nimport cartopy.crs as ccrs\nimport matplotlib.pyplot as plt\n\nimport xarray as xr\n\n# Read the data\nurl = \"https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif\"\nda = xr.open_rasterio(url)\n\n# The data is in UTM projection. We have to set it manually until\n# https://github.com/SciTools/cartopy/issues/813 is implemented\ncrs = ccrs.UTM(\"18N\")\n\n# Plot on a map\nax = plt.subplot(projection=crs)\nda.plot.imshow(ax=ax, rgb=\"band\", transform=crs)\nax.coastlines(\"10m\", color=\"r\")\nplt.show()\n"},{"attributeType":"null","col":22,"comment":"null","endLoc":16,"id":2753,"name":"ccrs","nodeType":"Attribute","startLoc":16,"text":"ccrs"},{"attributeType":"null","col":28,"comment":"null","endLoc":17,"id":2754,"name":"plt","nodeType":"Attribute","startLoc":17,"text":"plt"},{"attributeType":"null","col":17,"comment":"null","endLoc":19,"id":2755,"name":"xr","nodeType":"Attribute","startLoc":19,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":22,"id":2756,"name":"url","nodeType":"Attribute","startLoc":22,"text":"url"},{"attributeType":"DataArray","col":0,"comment":"null","endLoc":23,"id":2757,"name":"da","nodeType":"Attribute","startLoc":23,"text":"da"},{"id":2758,"name":"dask.rst","nodeType":"TextFile","path":"doc","text":".. _dask:\n\nParallel computing with Dask\n============================\n\nxarray integrates with `Dask `__ to support parallel\ncomputations and streaming computation on datasets that don't fit into memory.\nCurrently, Dask is an entirely optional feature for xarray. However, the\nbenefits of using Dask are sufficiently strong that Dask may become a required\ndependency in a future version of xarray.\n\nFor a full example of how to use xarray's Dask integration, read the\n`blog post introducing xarray and Dask`_. More up-to-date examples\nmay be found at the `Pangeo project's use-cases `_\nand at the `Dask examples website `_.\n\n.. _blog post introducing xarray and Dask: http://stephanhoyer.com/2015/06/11/xray-dask-out-of-core-labeled-arrays/\n\nWhat is a Dask array?\n---------------------\n\n.. image:: _static/dask_array.png\n :width: 40 %\n :align: right\n :alt: A Dask array\n\nDask divides arrays into many small pieces, called *chunks*, each of which is\npresumed to be small enough to fit into memory.\n\nUnlike NumPy, which has eager evaluation, operations on Dask arrays are lazy.\nOperations queue up a series of tasks mapped over blocks, and no computation is\nperformed until you actually ask values to be computed (e.g., to print results\nto your screen or write to disk). At that point, data is loaded into memory\nand computation proceeds in a streaming fashion, block-by-block.\n\nThe actual computation is controlled by a multi-processing or thread pool,\nwhich allows Dask to take full advantage of multiple processors available on\nmost modern computers.\n\nFor more details on Dask, read `its documentation `__.\nNote that xarray only makes use of ``dask.array`` and ``dask.delayed``.\n\n.. _dask.io:\n\nReading and writing data\n------------------------\n\nThe usual way to create a ``Dataset`` filled with Dask arrays is to load the\ndata from a netCDF file or files. You can do this by supplying a ``chunks``\nargument to :py:func:`~xarray.open_dataset` or using the\n:py:func:`~xarray.open_mfdataset` function.\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n np.set_printoptions(precision=3, linewidth=100, threshold=100, edgeitems=3)\n\n ds = xr.Dataset({'temperature': (('time', 'latitude', 'longitude'),\n np.random.randn(30, 180, 180)),\n 'time': pd.date_range('2015-01-01', periods=30),\n 'longitude': np.arange(180),\n 'latitude': np.arange(89.5, -90.5, -1)})\n ds.to_netcdf('example-data.nc')\n\n.. ipython:: python\n\n ds = xr.open_dataset('example-data.nc', chunks={'time': 10})\n ds\n\nIn this example ``latitude`` and ``longitude`` do not appear in the ``chunks``\ndict, so only one chunk will be used along those dimensions. It is also\nentirely equivalent to opening a dataset using :py:meth:`~xarray.open_dataset`\nand then chunking the data using the ``chunk`` method, e.g.,\n``xr.open_dataset('example-data.nc').chunk({'time': 10})``.\n\nTo open multiple files simultaneously in parallel using Dask delayed,\nuse :py:func:`~xarray.open_mfdataset`::\n\n xr.open_mfdataset('my/files/*.nc', parallel=True)\n\nThis function will automatically concatenate and merge datasets into one in\nthe simple cases that it understands (see :py:func:`~xarray.auto_combine`\nfor the full disclaimer). By default, :py:meth:`~xarray.open_mfdataset` will chunk each\nnetCDF file into a single Dask array; again, supply the ``chunks`` argument to\ncontrol the size of the resulting Dask arrays. In more complex cases, you can\nopen each file individually using :py:meth:`~xarray.open_dataset` and merge the result, as\ndescribed in :ref:`combining data`. Passing the keyword argument ``parallel=True`` to :py:meth:`~xarray.open_mfdataset` will speed up the reading of large multi-file datasets by\nexecuting those read tasks in parallel using ``dask.delayed``.\n\nYou'll notice that printing a dataset still shows a preview of array values,\neven if they are actually Dask arrays. We can do this quickly with Dask because\nwe only need to compute the first few values (typically from the first block).\nTo reveal the true nature of an array, print a DataArray:\n\n.. ipython:: python\n\n ds.temperature\n\nOnce you've manipulated a Dask array, you can still write a dataset too big to\nfit into memory back to disk by using :py:meth:`~xarray.Dataset.to_netcdf` in the\nusual way.\n\n.. ipython:: python\n\n ds.to_netcdf('manipulated-example-data.nc')\n\nBy setting the ``compute`` argument to ``False``, :py:meth:`~xarray.Dataset.to_netcdf`\nwill return a ``dask.delayed`` object that can be computed later.\n\n.. ipython:: python\n\n from dask.diagnostics import ProgressBar\n # or distributed.progress when using the distributed scheduler\n delayed_obj = ds.to_netcdf('manipulated-example-data.nc', compute=False)\n with ProgressBar():\n results = delayed_obj.compute()\n\n.. note::\n\n When using Dask's distributed scheduler to write NETCDF4 files,\n it may be necessary to set the environment variable `HDF5_USE_FILE_LOCKING=FALSE`\n to avoid competing locks within the HDF5 SWMR file locking scheme. Note that\n writing netCDF files with Dask's distributed scheduler is only supported for\n the `netcdf4` backend.\n\nA dataset can also be converted to a Dask DataFrame using :py:meth:`~xarray.Dataset.to_dask_dataframe`.\n\n.. ipython:: python\n\n df = ds.to_dask_dataframe()\n df\n\nDask DataFrames do not support multi-indexes so the coordinate variables from the dataset are included as columns in the Dask DataFrame.\n\n.. ipython:: python\n :suppress:\n\n import os\n os.remove('example-data.nc')\n os.remove('manipulated-example-data.nc')\n\nUsing Dask with xarray\n----------------------\n\nNearly all existing xarray methods (including those for indexing, computation,\nconcatenating and grouped operations) have been extended to work automatically\nwith Dask arrays. When you load data as a Dask array in an xarray data\nstructure, almost all xarray operations will keep it as a Dask array; when this\nis not possible, they will raise an exception rather than unexpectedly loading\ndata into memory. Converting a Dask array into memory generally requires an\nexplicit conversion step. One notable exception is indexing operations: to\nenable label based indexing, xarray will automatically load coordinate labels\ninto memory.\n\n.. tip::\n\n By default, dask uses its multi-threaded scheduler, which distributes work across\n multiple cores and allows for processing some datasets that do not fit into memory.\n For running across a cluster, `setup the distributed scheduler `_.\n\nThe easiest way to convert an xarray data structure from lazy Dask arrays into\n*eager*, in-memory NumPy arrays is to use the :py:meth:`~xarray.Dataset.load` method:\n\n.. ipython:: python\n\n ds.load()\n\nYou can also access :py:attr:`~xarray.DataArray.values`, which will always be a\nNumPy array:\n\n.. ipython::\n :verbatim:\n\n In [5]: ds.temperature.values\n Out[5]:\n array([[[ 4.691e-01, -2.829e-01, ..., -5.577e-01, 3.814e-01],\n [ 1.337e+00, -1.531e+00, ..., 8.726e-01, -1.538e+00],\n ...\n # truncated for brevity\n\nExplicit conversion by wrapping a DataArray with ``np.asarray`` also works:\n\n.. ipython::\n :verbatim:\n\n In [5]: np.asarray(ds.temperature)\n Out[5]:\n array([[[ 4.691e-01, -2.829e-01, ..., -5.577e-01, 3.814e-01],\n [ 1.337e+00, -1.531e+00, ..., 8.726e-01, -1.538e+00],\n ...\n\nAlternatively you can load the data into memory but keep the arrays as\nDask arrays using the :py:meth:`~xarray.Dataset.persist` method:\n\n.. ipython:: python\n\n ds = ds.persist()\n\n:py:meth:`~xarray.Dataset.persist` is particularly useful when using a\ndistributed cluster because the data will be loaded into distributed memory\nacross your machines and be much faster to use than reading repeatedly from\ndisk.\n\n.. warning::\n\n On a single machine :py:meth:`~xarray.Dataset.persist` will try to load all of\n your data into memory. You should make sure that your dataset is not larger than\n available memory.\n\n.. note::\n For more on the differences between :py:meth:`~xarray.Dataset.persist` and\n :py:meth:`~xarray.Dataset.compute` see this `Stack Overflow answer `_ and the `Dask documentation `_.\n\nFor performance you may wish to consider chunk sizes. The correct choice of\nchunk size depends both on your data and on the operations you want to perform.\nWith xarray, both converting data to a Dask arrays and converting the chunk\nsizes of Dask arrays is done with the :py:meth:`~xarray.Dataset.chunk` method:\n\n.. ipython:: python\n :suppress:\n\n ds = ds.chunk({'time': 10})\n\n.. ipython:: python\n\n rechunked = ds.chunk({'latitude': 100, 'longitude': 100})\n\nYou can view the size of existing chunks on an array by viewing the\n:py:attr:`~xarray.Dataset.chunks` attribute:\n\n.. ipython:: python\n\n rechunked.chunks\n\nIf there are not consistent chunksizes between all the arrays in a dataset\nalong a particular dimension, an exception is raised when you try to access\n``.chunks``.\n\n.. note::\n\n In the future, we would like to enable automatic alignment of Dask\n chunksizes (but not the other way around). We might also require that all\n arrays in a dataset share the same chunking alignment. Neither of these\n are currently done.\n\nNumPy ufuncs like ``np.sin`` currently only work on eagerly evaluated arrays\n(this will change with the next major NumPy release). We have provided\nreplacements that also work on all xarray objects, including those that store\nlazy Dask arrays, in the :ref:`xarray.ufuncs ` module:\n\n.. ipython:: python\n\n import xarray.ufuncs as xu\n xu.sin(rechunked)\n\nTo access Dask arrays directly, use the new\n:py:attr:`DataArray.data ` attribute. This attribute exposes\narray data either as a Dask array or as a NumPy array, depending on whether it has been\nloaded into Dask or not:\n\n.. ipython:: python\n\n ds.temperature.data\n\n.. note::\n\n In the future, we may extend ``.data`` to support other \"computable\" array\n backends beyond Dask and NumPy (e.g., to support sparse arrays).\n\n.. _dask.automatic-parallelization:\n\nAutomatic parallelization\n-------------------------\n\nAlmost all of xarray's built-in operations work on Dask arrays. If you want to\nuse a function that isn't wrapped by xarray, one option is to extract Dask\narrays from xarray objects (``.data``) and use Dask directly.\n\nAnother option is to use xarray's :py:func:`~xarray.apply_ufunc`, which can\nautomate `embarrassingly parallel\n`__ \"map\" type operations\nwhere a function written for processing NumPy arrays should be repeatedly\napplied to xarray objects containing Dask arrays. It works similarly to\n:py:func:`dask.array.map_blocks` and :py:func:`dask.array.atop`, but without\nrequiring an intermediate layer of abstraction.\n\nFor the best performance when using Dask's multi-threaded scheduler, wrap a\nfunction that already releases the global interpreter lock, which fortunately\nalready includes most NumPy and Scipy functions. Here we show an example\nusing NumPy operations and a fast function from\n`bottleneck `__, which\nwe use to calculate `Spearman's rank-correlation coefficient `__:\n\n.. code-block:: python\n\n import numpy as np\n import xarray as xr\n import bottleneck\n\n def covariance_gufunc(x, y):\n return ((x - x.mean(axis=-1, keepdims=True))\n * (y - y.mean(axis=-1, keepdims=True))).mean(axis=-1)\n\n def pearson_correlation_gufunc(x, y):\n return covariance_gufunc(x, y) / (x.std(axis=-1) * y.std(axis=-1))\n\n def spearman_correlation_gufunc(x, y):\n x_ranks = bottleneck.rankdata(x, axis=-1)\n y_ranks = bottleneck.rankdata(y, axis=-1)\n return pearson_correlation_gufunc(x_ranks, y_ranks)\n\n def spearman_correlation(x, y, dim):\n return xr.apply_ufunc(\n spearman_correlation_gufunc, x, y,\n input_core_dims=[[dim], [dim]],\n dask='parallelized',\n output_dtypes=[float])\n\nThe only aspect of this example that is different from standard usage of\n``apply_ufunc()`` is that we needed to supply the ``output_dtypes`` arguments.\n(Read up on :ref:`comput.wrapping-custom` for an explanation of the\n\"core dimensions\" listed in ``input_core_dims``.)\n\nOur new ``spearman_correlation()`` function achieves near linear speedup\nwhen run on large arrays across the four cores on my laptop. It would also\nwork as a streaming operation, when run on arrays loaded from disk:\n\n.. ipython::\n :verbatim:\n\n In [56]: rs = np.random.RandomState(0)\n\n In [57]: array1 = xr.DataArray(rs.randn(1000, 100000), dims=['place', 'time']) # 800MB\n\n In [58]: array2 = array1 + 0.5 * rs.randn(1000, 100000)\n\n # using one core, on NumPy arrays\n In [61]: %time _ = spearman_correlation(array1, array2, 'time')\n CPU times: user 21.6 s, sys: 2.84 s, total: 24.5 s\n Wall time: 24.9 s\n\n In [8]: chunked1 = array1.chunk({'place': 10})\n\n In [9]: chunked2 = array2.chunk({'place': 10})\n\n # using all my laptop's cores, with Dask\n In [63]: r = spearman_correlation(chunked1, chunked2, 'time').compute()\n\n In [64]: %time _ = r.compute()\n CPU times: user 30.9 s, sys: 1.74 s, total: 32.6 s\n Wall time: 4.59 s\n\nOne limitation of ``apply_ufunc()`` is that it cannot be applied to arrays with\nmultiple chunks along a core dimension:\n\n.. ipython::\n :verbatim:\n\n In [63]: spearman_correlation(chunked1, chunked2, 'place')\n ValueError: dimension 'place' on 0th function argument to apply_ufunc with\n dask='parallelized' consists of multiple chunks, but is also a core\n dimension. To fix, rechunk into a single Dask array chunk along this\n dimension, i.e., ``.rechunk({'place': -1})``, but beware that this may\n significantly increase memory usage.\n\nThis reflects the nature of core dimensions, in contrast to broadcast (non-core)\ndimensions that allow operations to be split into arbitrary chunks for\napplication.\n\n.. tip::\n\n For the majority of NumPy functions that are already wrapped by Dask, it's\n usually a better idea to use the pre-existing ``dask.array`` function, by\n using either a pre-existing xarray methods or\n :py:func:`~xarray.apply_ufunc()` with ``dask='allowed'``. Dask can often\n have a more efficient implementation that makes use of the specialized\n structure of a problem, unlike the generic speedups offered by\n ``dask='parallelized'``.\n\nChunking and performance\n------------------------\n\nThe ``chunks`` parameter has critical performance implications when using Dask\narrays. If your chunks are too small, queueing up operations will be extremely\nslow, because Dask will translate each operation into a huge number of\noperations mapped across chunks. Computation on Dask arrays with small chunks\ncan also be slow, because each operation on a chunk has some fixed overhead from\nthe Python interpreter and the Dask task executor.\n\nConversely, if your chunks are too big, some of your computation may be wasted,\nbecause Dask only computes results one chunk at a time.\n\nA good rule of thumb is to create arrays with a minimum chunksize of at least\none million elements (e.g., a 1000x1000 matrix). With large arrays (10+ GB), the\ncost of queueing up Dask operations can be noticeable, and you may need even\nlarger chunksizes.\n\n.. tip::\n\n Check out the dask documentation on `chunks `_.\n\n\nOptimization Tips\n-----------------\n\nWith analysis pipelines involving both spatial subsetting and temporal resampling, Dask performance can become very slow in certain cases. Here are some optimization tips we have found through experience:\n\n1. Do your spatial and temporal indexing (e.g. ``.sel()`` or ``.isel()``) early in the pipeline, especially before calling ``resample()`` or ``groupby()``. Grouping and resampling triggers some computation on all the blocks, which in theory should commute with indexing, but this optimization hasn't been implemented in Dask yet. (See `Dask issue #746 `_).\n\n2. Save intermediate results to disk as a netCDF files (using ``to_netcdf()``) and then load them again with ``open_dataset()`` for further computations. For example, if subtracting temporal mean from a dataset, save the temporal mean to disk before subtracting. Again, in theory, Dask should be able to do the computation in a streaming fashion, but in practice this is a fail case for the Dask scheduler, because it tries to keep every chunk of an array that it computes in memory. (See `Dask issue #874 `_)\n\n3. Specify smaller chunks across space when using :py:meth:`~xarray.open_mfdataset` (e.g., ``chunks={'latitude': 10, 'longitude': 10}``). This makes spatial subsetting easier, because there's no risk you will load chunks of data referring to different chunks (probably not necessary if you follow suggestion 1).\n\n4. Using the h5netcdf package by passing ``engine='h5netcdf'`` to :py:meth:`~xarray.open_mfdataset`\n can be quicker than the default ``engine='netcdf4'`` that uses the netCDF4 package.\n\n5. Some dask-specific tips may be found `here `_.\n\n6. The dask `diagnostics `_ can be\n useful in identifying performance bottlenecks.\n"},{"id":2759,"name":"setup.cfg","nodeType":"TextFile","path":"","text":"[tool:pytest]\npython_files=test_*.py\ntestpaths=xarray/tests properties\n# Fixed upstream in https://github.com/kwgoodman/bottleneck/pull/199\nfilterwarnings =\n ignore:Using a non-tuple sequence for multidimensional indexing is deprecated:FutureWarning\nenv =\n UVCDAT_ANONYMOUS_LOG=no\nmarkers =\n flaky: flaky tests\n network: tests requiring a network connection\n slow: slow tests\n\n[flake8]\nignore=\n # whitespace before ':' - doesn't work well with black\n E203\n E402\n # line too long - let black worry about that\n E501\n # do not assign a lambda expression, use a def\n E731\n # line break before binary operator\n W503\nexclude=\n doc\n\n[isort]\ndefault_section=THIRDPARTY\nknown_first_party=xarray\nmulti_line_output=3\ninclude_trailing_comma=True\nforce_grid_wrap=0\nuse_parentheses=True\nline_length=88\n\n# Most of the numerical computing stack doesn't have type annotations yet.\n[mypy-affine.*]\nignore_missing_imports = True\n[mypy-bottleneck.*]\nignore_missing_imports = True\n[mypy-cdms2.*]\nignore_missing_imports = True\n[mypy-cf_units.*]\nignore_missing_imports = True\n[mypy-cfgrib.*]\nignore_missing_imports = True\n[mypy-cftime.*]\nignore_missing_imports = True\n[mypy-dask.*]\nignore_missing_imports = True\n[mypy-distributed.*]\nignore_missing_imports = True\n[mypy-h5netcdf.*]\nignore_missing_imports = True\n[mypy-h5py.*]\nignore_missing_imports = True\n[mypy-iris.*]\nignore_missing_imports = True\n[mypy-matplotlib.*]\nignore_missing_imports = True\n[mypy-Nio.*]\nignore_missing_imports = True\n[mypy-nc_time_axis.*]\nignore_missing_imports = True\n[mypy-numbagg.*]\nignore_missing_imports = True\n[mypy-numpy.*]\nignore_missing_imports = True\n[mypy-netCDF4.*]\nignore_missing_imports = True\n[mypy-netcdftime.*]\nignore_missing_imports = True\n[mypy-pandas.*]\nignore_missing_imports = True\n[mypy-PseudoNetCDF.*]\nignore_missing_imports = True\n[mypy-pydap.*]\nignore_missing_imports = True\n[mypy-pytest.*]\nignore_missing_imports = True\n[mypy-rasterio.*]\nignore_missing_imports = True\n[mypy-scipy.*]\nignore_missing_imports = True\n[mypy-seaborn.*]\nignore_missing_imports = True\n[mypy-sparse.*]\nignore_missing_imports = True\n[mypy-toolz.*]\nignore_missing_imports = True\n[mypy-zarr.*]\nignore_missing_imports = True\n\n# setuptools is not typed\n[mypy-setup]\nignore_errors = True\n# versioneer code\n[mypy-versioneer.*]\nignore_errors = True\n# written by versioneer\n[mypy-xarray._version]\nignore_errors = True\n# version spanning code is hard to type annotate (and most of this module will\n# be going away soon anyways)\n[mypy-xarray.core.pycompat]\nignore_errors = True\n\n[versioneer]\nVCS = git\nstyle = pep440\nversionfile_source = xarray/_version.py\nversionfile_build = xarray/_version.py\ntag_prefix = v\nparentdir_prefix = xarray-\n\n[aliases]\ntest = pytest"},{"id":2760,"name":"io.rst","nodeType":"TextFile","path":"doc","text":".. _io:\n\nReading and writing files\n=========================\n\nxarray supports direct serialization and IO to several file formats, from\nsimple :ref:`io.pickle` files to the more flexible :ref:`io.netcdf`\nformat (recommended).\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\n.. _io.netcdf:\n\nnetCDF\n------\n\nThe recommended way to store xarray data structures is `netCDF`__, which\nis a binary file format for self-described datasets that originated\nin the geosciences. xarray is based on the netCDF data model, so netCDF files\non disk directly correspond to :py:class:`~xarray.Dataset` objects (more accurately,\na group in a netCDF file directly corresponds to a to :py:class:`~xarray.Dataset` object.\nSee :ref:`io.netcdf_groups` for more.)\n\nNetCDF is supported on almost all platforms, and parsers exist\nfor the vast majority of scientific programming languages. Recent versions of\nnetCDF are based on the even more widely used HDF5 file-format.\n\n__ http://www.unidata.ucar.edu/software/netcdf/\n\n.. tip::\n\n If you aren't familiar with this data format, the `netCDF FAQ`_ is a good\n place to start.\n\n.. _netCDF FAQ: http://www.unidata.ucar.edu/software/netcdf/docs/faq.html#What-Is-netCDF\n\nReading and writing netCDF files with xarray requires scipy or the\n`netCDF4-Python`__ library to be installed (the later is required to\nread/write netCDF V4 files and use the compression options described below).\n\n__ https://github.com/Unidata/netcdf4-python\n\nWe can save a Dataset to disk using the\n:py:meth:`~Dataset.to_netcdf` method:\n\n.. ipython:: python\n\n ds = xr.Dataset({'foo': (('x', 'y'), np.random.rand(4, 5))},\n coords={'x': [10, 20, 30, 40],\n 'y': pd.date_range('2000-01-01', periods=5),\n 'z': ('x', list('abcd'))})\n\n ds.to_netcdf('saved_on_disk.nc')\n\nBy default, the file is saved as netCDF4 (assuming netCDF4-Python is\ninstalled). You can control the format and engine used to write the file with\nthe ``format`` and ``engine`` arguments.\n\n.. tip::\n\n Using the `h5netcdf `_ package\n by passing ``engine='h5netcdf'`` to :py:meth:`~xarray.open_dataset` can\n sometimes be quicker than the default ``engine='netcdf4'`` that uses the\n `netCDF4 `_ package.\n\n\nWe can load netCDF files to create a new Dataset using\n:py:func:`~xarray.open_dataset`:\n\n.. ipython:: python\n\n ds_disk = xr.open_dataset('saved_on_disk.nc')\n ds_disk\n\nSimilarly, a DataArray can be saved to disk using the\n:py:attr:`DataArray.to_netcdf ` method, and loaded\nfrom disk using the :py:func:`~xarray.open_dataarray` function. As netCDF files\ncorrespond to :py:class:`~xarray.Dataset` objects, these functions internally\nconvert the ``DataArray`` to a ``Dataset`` before saving, and then convert back\nwhen loading, ensuring that the ``DataArray`` that is loaded is always exactly\nthe same as the one that was saved.\n\nA dataset can also be loaded or written to a specific group within a netCDF\nfile. To load from a group, pass a ``group`` keyword argument to the\n``open_dataset`` function. The group can be specified as a path-like\nstring, e.g., to access subgroup 'bar' within group 'foo' pass\n'/foo/bar' as the ``group`` argument. When writing multiple groups in one file,\npass ``mode='a'`` to ``to_netcdf`` to ensure that each call does not delete the\nfile.\n\nData is *always* loaded lazily from netCDF files. You can manipulate, slice and subset\nDataset and DataArray objects, and no array values are loaded into memory until\nyou try to perform some sort of actual computation. For an example of how these\nlazy arrays work, see the OPeNDAP section below.\n\nIt is important to note that when you modify values of a Dataset, even one\nlinked to files on disk, only the in-memory copy you are manipulating in xarray\nis modified: the original file on disk is never touched.\n\n.. tip::\n\n xarray's lazy loading of remote or on-disk datasets is often but not always\n desirable. Before performing computationally intense operations, it is\n often a good idea to load a Dataset (or DataArray) entirely into memory by\n invoking the :py:meth:`~xarray.Dataset.load` method.\n\nDatasets have a :py:meth:`~xarray.Dataset.close` method to close the associated\nnetCDF file. However, it's often cleaner to use a ``with`` statement:\n\n.. ipython:: python\n\n # this automatically closes the dataset after use\n with xr.open_dataset('saved_on_disk.nc') as ds:\n print(ds.keys())\n\nAlthough xarray provides reasonable support for incremental reads of files on\ndisk, it does not support incremental writes, which can be a useful strategy\nfor dealing with datasets too big to fit into memory. Instead, xarray integrates\nwith dask.array (see :ref:`dask`), which provides a fully featured engine for\nstreaming computation.\n\nIt is possible to append or overwrite netCDF variables using the ``mode='a'``\nargument. When using this option, all variables in the dataset will be written\nto the original netCDF file, regardless if they exist in the original dataset.\n\n\n.. _io.netcdf_groups:\n\nGroups\n~~~~~~\n\nNetCDF groups are not supported as part of the :py:class:`~xarray.Dataset` data model.\nInstead, groups can be loaded individually as Dataset objects.\nTo do so, pass a ``group`` keyword argument to the\n:py:func:`~xarray.open_dataset` function. The group can be specified as a path-like\nstring, e.g., to access subgroup ``'bar'`` within group ``'foo'`` pass\n``'/foo/bar'`` as the ``group`` argument.\nIn a similar way, the ``group`` keyword argument can be given to the\n:py:meth:`~xarray.Dataset.to_netcdf` method to write to a group\nin a netCDF file.\nWhen writing multiple groups in one file, pass ``mode='a'`` to\n:py:meth:`~xarray.Dataset.to_netcdf` to ensure that each call does not delete the file.\n\n.. _io.encoding:\n\nReading encoded data\n~~~~~~~~~~~~~~~~~~~~\n\nNetCDF files follow some conventions for encoding datetime arrays (as numbers\nwith a \"units\" attribute) and for packing and unpacking data (as\ndescribed by the \"scale_factor\" and \"add_offset\" attributes). If the argument\n``decode_cf=True`` (default) is given to :py:func:`~xarray.open_dataset`, xarray will attempt\nto automatically decode the values in the netCDF objects according to\n`CF conventions`_. Sometimes this will fail, for example, if a variable\nhas an invalid \"units\" or \"calendar\" attribute. For these cases, you can\nturn this decoding off manually.\n\n.. _CF conventions: http://cfconventions.org/\n\nYou can view this encoding information (among others) in the\n:py:attr:`DataArray.encoding ` and\n:py:attr:`DataArray.encoding ` attributes:\n\n.. ipython::\n :verbatim:\n\n In [1]: ds_disk['y'].encoding\n Out[1]:\n {'zlib': False,\n 'shuffle': False,\n 'complevel': 0,\n 'fletcher32': False,\n 'contiguous': True,\n 'chunksizes': None,\n 'source': 'saved_on_disk.nc',\n 'original_shape': (5,),\n 'dtype': dtype('int64'),\n 'units': 'days since 2000-01-01 00:00:00',\n 'calendar': 'proleptic_gregorian'}\n\n In [9]: ds_disk.encoding\n Out[9]:\n {'unlimited_dims': set(),\n 'source': 'saved_on_disk.nc'}\n\nNote that all operations that manipulate variables other than indexing\nwill remove encoding information.\n\n.. ipython:: python\n :suppress:\n\n ds_disk.close()\n\n\n.. _combining multiple files:\n\nReading multi-file datasets\n...........................\n\nNetCDF files are often encountered in collections, e.g., with different files\ncorresponding to different model runs or one file per timestamp.\nxarray can straightforwardly combine such files into a single Dataset by making use of\n:py:func:`~xarray.concat`, :py:func:`~xarray.merge`, :py:func:`~xarray.combine_nested` and\n:py:func:`~xarray.combine_by_coords`. For details on the difference between these\nfunctions see :ref:`combining data`.\n\nXarray includes support for manipulating datasets that don't fit into memory\nwith dask_. If you have dask installed, you can open multiple files\nsimultaneously in parallel using :py:func:`~xarray.open_mfdataset`::\n\n xr.open_mfdataset('my/files/*.nc', parallel=True)\n\nThis function automatically concatenates and merges multiple files into a\nsingle xarray dataset.\nIt is the recommended way to open multiple files with xarray.\nFor more details on parallel reading, see :ref:`combining.multi`, :ref:`dask.io` and a\n`blog post`_ by Stephan Hoyer.\n:py:func:`~xarray.open_mfdataset` takes many kwargs that allow you to\ncontrol its behaviour (for e.g. ``parallel``, ``combine``, ``compat``, ``join``, ``concat_dim``).\nSee its docstring for more details.\n\n\n.. note::\n\n A common use-case involves a dataset distributed across a large number of files with\n each file containing a large number of variables. Commonly a few of these variables\n need to be concatenated along a dimension (say ``\"time\"``), while the rest are equal\n across the datasets (ignoring floating point differences). The following command\n with suitable modifications (such as ``parallel=True``) works well with such datasets::\n\n xr.open_mfdataset('my/files/*.nc', concat_dim=\"time\",\n \t \t data_vars='minimal', coords='minimal', compat='override')\n\n This command concatenates variables along the ``\"time\"`` dimension, but only those that\n already contain the ``\"time\"`` dimension (``data_vars='minimal', coords='minimal'``).\n Variables that lack the ``\"time\"`` dimension are taken from the first dataset\n (``compat='override'``).\n\n\n.. _dask: http://dask.pydata.org\n.. _blog post: http://stephanhoyer.com/2015/06/11/xray-dask-out-of-core-labeled-arrays/\n\nSometimes multi-file datasets are not conveniently organized for easy use of :py:func:`~xarray.open_mfdataset`.\nOne can use the ``preprocess`` argument to provide a function that takes a dataset\nand returns a modified Dataset.\n:py:func:`~xarray.open_mfdataset` will call ``preprocess`` on every dataset\n(corresponding to each file) prior to combining them.\n\n\nIf :py:func:`~xarray.open_mfdataset` does not meet your needs, other approaches are possible.\nThe general pattern for parallel reading of multiple files\nusing dask, modifying those datasets and then combining into a single ``Dataset`` is::\n\n def modify(ds):\n # modify ds here\n return ds\n\n\n # this is basically what open_mfdataset does\n open_kwargs = dict(decode_cf=True, decode_times=False)\n open_tasks = [dask.delayed(xr.open_dataset)(f, **open_kwargs) for f in file_names]\n tasks = [dask.delayed(modify)(task) for task in open_tasks]\n datasets = dask.compute(tasks) # get a list of xarray.Datasets\n combined = xr.combine_nested(datasets) # or some combination of concat, merge\n\n\nAs an example, here's how we could approximate ``MFDataset`` from the netCDF4\nlibrary::\n\n from glob import glob\n import xarray as xr\n\n def read_netcdfs(files, dim):\n # glob expands paths with * to a list of files, like the unix shell\n paths = sorted(glob(files))\n datasets = [xr.open_dataset(p) for p in paths]\n combined = xr.concat(dataset, dim)\n return combined\n\n combined = read_netcdfs('/all/my/files/*.nc', dim='time')\n\nThis function will work in many cases, but it's not very robust. First, it\nnever closes files, which means it will fail one you need to load more than\na few thousands file. Second, it assumes that you want all the data from each\nfile and that it can all fit into memory. In many situations, you only need\na small subset or an aggregated summary of the data from each file.\n\nHere's a slightly more sophisticated example of how to remedy these\ndeficiencies::\n\n def read_netcdfs(files, dim, transform_func=None):\n def process_one_path(path):\n # use a context manager, to ensure the file gets closed after use\n with xr.open_dataset(path) as ds:\n # transform_func should do some sort of selection or\n # aggregation\n if transform_func is not None:\n ds = transform_func(ds)\n # load all data from the transformed dataset, to ensure we can\n # use it after closing each original file\n ds.load()\n return ds\n\n paths = sorted(glob(files))\n datasets = [process_one_path(p) for p in paths]\n combined = xr.concat(datasets, dim)\n return combined\n\n # here we suppose we only care about the combined mean of each file;\n # you might also use indexing operations like .sel to subset datasets\n combined = read_netcdfs('/all/my/files/*.nc', dim='time',\n transform_func=lambda ds: ds.mean())\n\nThis pattern works well and is very robust. We've used similar code to process\ntens of thousands of files constituting 100s of GB of data.\n\n\n.. _io.netcdf.writing_encoded:\n\nWriting encoded data\n~~~~~~~~~~~~~~~~~~~~\n\nConversely, you can customize how xarray writes netCDF files on disk by\nproviding explicit encodings for each dataset variable. The ``encoding``\nargument takes a dictionary with variable names as keys and variable specific\nencodings as values. These encodings are saved as attributes on the netCDF\nvariables on disk, which allows xarray to faithfully read encoded data back into\nmemory.\n\nIt is important to note that using encodings is entirely optional: if you do not\nsupply any of these encoding options, xarray will write data to disk using a\ndefault encoding, or the options in the ``encoding`` attribute, if set.\nThis works perfectly fine in most cases, but encoding can be useful for\nadditional control, especially for enabling compression.\n\nIn the file on disk, these encodings as saved as attributes on each variable, which\nallow xarray and other CF-compliant tools for working with netCDF files to correctly\nread the data.\n\nScaling and type conversions\n............................\n\nThese encoding options work on any version of the netCDF file format:\n\n- ``dtype``: Any valid NumPy dtype or string convertable to a dtype, e.g., ``'int16'``\n or ``'float32'``. This controls the type of the data written on disk.\n- ``_FillValue``: Values of ``NaN`` in xarray variables are remapped to this value when\n saved on disk. This is important when converting floating point with missing values\n to integers on disk, because ``NaN`` is not a valid value for integer dtypes. As a\n default, variables with float types are attributed a ``_FillValue`` of ``NaN`` in the\n output file, unless explicitly disabled with an encoding ``{'_FillValue': None}``.\n- ``scale_factor`` and ``add_offset``: Used to convert from encoded data on disk to\n to the decoded data in memory, according to the formula\n ``decoded = scale_factor * encoded + add_offset``.\n\nThese parameters can be fruitfully combined to compress discretized data on disk. For\nexample, to save the variable ``foo`` with a precision of 0.1 in 16-bit integers while\nconverting ``NaN`` to ``-9999``, we would use\n``encoding={'foo': {'dtype': 'int16', 'scale_factor': 0.1, '_FillValue': -9999}}``.\nCompression and decompression with such discretization is extremely fast.\n\n.. _io.string-encoding:\n\nString encoding\n...............\n\nxarray can write unicode strings to netCDF files in two ways:\n\n- As variable length strings. This is only supported on netCDF4 (HDF5) files.\n- By encoding strings into bytes, and writing encoded bytes as a character\n array. The default encoding is UTF-8.\n\nBy default, we use variable length strings for compatible files and fall-back\nto using encoded character arrays. Character arrays can be selected even for\nnetCDF4 files by setting the ``dtype`` field in ``encoding`` to ``S1``\n(corresponding to NumPy's single-character bytes dtype).\n\nIf character arrays are used:\n\n- The string encoding that was used is stored on\n disk in the ``_Encoding`` attribute, which matches an ad-hoc convention\n `adopted by the netCDF4-Python library `_.\n At the time of this writing (October 2017), a standard convention for indicating\n string encoding for character arrays in netCDF files was\n `still under discussion `_.\n Technically, you can use\n `any string encoding recognized by Python `_ if you feel the need to deviate from UTF-8,\n by setting the ``_Encoding`` field in ``encoding``. But\n `we don't recommend it `_.\n- The character dimension name can be specifed by the ``char_dim_name`` field of a variable's\n ``encoding``. If this is not specified the default name for the character dimension is\n ``'string%s' % data.shape[-1]``. When decoding character arrays from existing files, the\n ``char_dim_name`` is added to the variables ``encoding`` to preserve if encoding happens, but\n the field can be edited by the user.\n\n.. warning::\n\n Missing values in bytes or unicode string arrays (represented by ``NaN`` in\n xarray) are currently written to disk as empty strings ``''``. This means\n missing values will not be restored when data is loaded from disk.\n This behavior is likely to change in the future (:issue:`1647`).\n Unfortunately, explicitly setting a ``_FillValue`` for string arrays to handle\n missing values doesn't work yet either, though we also hope to fix this in the\n future.\n\nChunk based compression\n.......................\n\n``zlib``, ``complevel``, ``fletcher32``, ``continguous`` and ``chunksizes``\ncan be used for enabling netCDF4/HDF5's chunk based compression, as described\nin the `documentation for createVariable`_ for netCDF4-Python. This only works\nfor netCDF4 files and thus requires using ``format='netCDF4'`` and either\n``engine='netcdf4'`` or ``engine='h5netcdf'``.\n\n.. _documentation for createVariable: http://unidata.github.io/netcdf4-python/#netCDF4.Dataset.createVariable\n\nChunk based gzip compression can yield impressive space savings, especially\nfor sparse data, but it comes with significant performance overhead. HDF5\nlibraries can only read complete chunks back into memory, and maximum\ndecompression speed is in the range of 50-100 MB/s. Worse, HDF5's compression\nand decompression currently cannot be parallelized with dask. For these reasons, we\nrecommend trying discretization based compression (described above) first.\n\nTime units\n..........\n\nThe ``units`` and ``calendar`` attributes control how xarray serializes ``datetime64`` and\n``timedelta64`` arrays to datasets on disk as numeric values. The ``units`` encoding\nshould be a string like ``'days since 1900-01-01'`` for ``datetime64`` data or a string\nlike ``'days'`` for ``timedelta64`` data. ``calendar`` should be one of the calendar types\nsupported by netCDF4-python: 'standard', 'gregorian', 'proleptic_gregorian' 'noleap',\n'365_day', '360_day', 'julian', 'all_leap', '366_day'.\n\nBy default, xarray uses the 'proleptic_gregorian' calendar and units of the smallest time\ndifference between values, with a reference time of the first time value.\n\nInvalid netCDF files\n~~~~~~~~~~~~~~~~~~~~\n\nThe library ``h5netcdf`` allows writing some dtypes (booleans, complex, ...) that aren't \nallowed in netCDF4 (see\n`h5netcdf documentation `_.\nThis feature is availabe through :py:func:`DataArray.to_netcdf` and\n:py:func:`Dataset.to_netcdf` when used with ``engine=\"h5netcdf\"``\nand currently raises a warning unless ``invalid_netcdf=True`` is set:\n\n.. ipython:: python\n :okwarning:\n\n # Writing complex valued data\n da = xr.DataArray([1.+1.j, 2.+2.j, 3.+3.j])\n da.to_netcdf(\"complex.nc\", engine=\"h5netcdf\", invalid_netcdf=True)\n\n # Reading it back\n xr.open_dataarray(\"complex.nc\", engine=\"h5netcdf\")\n\n.. ipython:: python\n :suppress:\n\n import os\n os.remove('complex.nc')\n\n.. warning::\n\n Note that this produces a file that is likely to be not readable by other netCDF\n libraries!\n\n.. _io.iris:\n\nIris\n----\n\nThe Iris_ tool allows easy reading of common meteorological and climate model formats\n(including GRIB and UK MetOffice PP files) into ``Cube`` objects which are in many ways very\nsimilar to ``DataArray`` objects, while enforcing a CF-compliant data model. If iris is\ninstalled xarray can convert a ``DataArray`` into a ``Cube`` using\n:py:meth:`~xarray.DataArray.to_iris`:\n\n.. ipython:: python\n\n da = xr.DataArray(np.random.rand(4, 5), dims=['x', 'y'],\n coords=dict(x=[10, 20, 30, 40],\n y=pd.date_range('2000-01-01', periods=5)))\n\n cube = da.to_iris()\n cube\n\nConversely, we can create a new ``DataArray`` object from a ``Cube`` using\n:py:meth:`~xarray.DataArray.from_iris`:\n\n.. ipython:: python\n\n da_cube = xr.DataArray.from_iris(cube)\n da_cube\n\n\n.. _Iris: http://scitools.org.uk/iris\n\n\nOPeNDAP\n-------\n\nxarray includes support for `OPeNDAP`__ (via the netCDF4 library or Pydap), which\nlets us access large datasets over HTTP.\n\n__ http://www.opendap.org/\n\nFor example, we can open a connection to GBs of weather data produced by the\n`PRISM`__ project, and hosted by `IRI`__ at Columbia:\n\n__ http://www.prism.oregonstate.edu/\n__ http://iri.columbia.edu/\n\n.. ipython source code for this section\n we don't use this to avoid hitting the DAP server on every doc build.\n\n remote_data = xr.open_dataset(\n 'http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods',\n decode_times=False)\n tmax = remote_data.tmax[:500, ::3, ::3]\n tmax\n\n @savefig opendap-prism-tmax.png\n tmax[0].plot()\n\n.. ipython::\n :verbatim:\n\n In [3]: remote_data = xr.open_dataset(\n ...: 'http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods',\n ...: decode_times=False)\n\n In [4]: remote_data\n Out[4]:\n \n Dimensions: (T: 1422, X: 1405, Y: 621)\n Coordinates:\n * X (X) float32 -125.0 -124.958 -124.917 -124.875 -124.833 -124.792 -124.75 ...\n * T (T) float32 -779.5 -778.5 -777.5 -776.5 -775.5 -774.5 -773.5 -772.5 -771.5 ...\n * Y (Y) float32 49.9167 49.875 49.8333 49.7917 49.75 49.7083 49.6667 49.625 ...\n Data variables:\n ppt (T, Y, X) float64 ...\n tdmean (T, Y, X) float64 ...\n tmax (T, Y, X) float64 ...\n tmin (T, Y, X) float64 ...\n Attributes:\n Conventions: IRIDL\n expires: 1375315200\n\n.. TODO: update this example to show off decode_cf?\n\n.. note::\n\n Like many real-world datasets, this dataset does not entirely follow\n `CF conventions`_. Unexpected formats will usually cause xarray's automatic\n decoding to fail. The way to work around this is to either set\n ``decode_cf=False`` in ``open_dataset`` to turn off all use of CF\n conventions, or by only disabling the troublesome parser.\n In this case, we set ``decode_times=False`` because the time axis here\n provides the calendar attribute in a format that xarray does not expect\n (the integer ``360`` instead of a string like ``'360_day'``).\n\nWe can select and slice this data any number of times, and nothing is loaded\nover the network until we look at particular values:\n\n.. ipython::\n :verbatim:\n\n In [4]: tmax = remote_data['tmax'][:500, ::3, ::3]\n\n In [5]: tmax\n Out[5]:\n \n [48541500 values with dtype=float64]\n Coordinates:\n * Y (Y) float32 49.9167 49.7917 49.6667 49.5417 49.4167 49.2917 ...\n * X (X) float32 -125.0 -124.875 -124.75 -124.625 -124.5 -124.375 ...\n * T (T) float32 -779.5 -778.5 -777.5 -776.5 -775.5 -774.5 -773.5 ...\n Attributes:\n pointwidth: 120\n standard_name: air_temperature\n units: Celsius_scale\n expires: 1443657600\n\n # the data is downloaded automatically when we make the plot\n In [6]: tmax[0].plot()\n\n.. image:: _static/opendap-prism-tmax.png\n\nSome servers require authentication before we can access the data. For this\npurpose we can explicitly create a :py:class:`~xarray.backends.PydapDataStore`\nand pass in a `Requests`__ session object. For example for\nHTTP Basic authentication::\n\n import xarray as xr\n import requests\n\n session = requests.Session()\n session.auth = ('username', 'password')\n\n store = xr.backends.PydapDataStore.open('http://example.com/data',\n session=session)\n ds = xr.open_dataset(store)\n\n`Pydap's cas module`__ has functions that generate custom sessions for\nservers that use CAS single sign-on. For example, to connect to servers\nthat require NASA's URS authentication::\n\n import xarray as xr\n from pydata.cas.urs import setup_session\n\n ds_url = 'https://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc'\n\n session = setup_session('username', 'password', check_url=ds_url)\n store = xr.backends.PydapDataStore.open(ds_url, session=session)\n\n ds = xr.open_dataset(store)\n\n__ http://docs.python-requests.org\n__ http://pydap.readthedocs.io/en/latest/client.html#authentication\n\n.. _io.pickle:\n\nPickle\n------\n\nThe simplest way to serialize an xarray object is to use Python's built-in pickle\nmodule:\n\n.. ipython:: python\n\n import pickle\n\n # use the highest protocol (-1) because it is way faster than the default\n # text based pickle format\n pkl = pickle.dumps(ds, protocol=-1)\n\n pickle.loads(pkl)\n\nPickling is important because it doesn't require any external libraries\nand lets you use xarray objects with Python modules like\n:py:mod:`multiprocessing` or :ref:`Dask `. However, pickling is\n**not recommended for long-term storage**.\n\nRestoring a pickle requires that the internal structure of the types for the\npickled data remain unchanged. Because the internal design of xarray is still\nbeing refined, we make no guarantees (at this point) that objects pickled with\nthis version of xarray will work in future versions.\n\n.. note::\n\n When pickling an object opened from a NetCDF file, the pickle file will\n contain a reference to the file on disk. If you want to store the actual\n array values, load it into memory first with :py:meth:`~xarray.Dataset.load`\n or :py:meth:`~xarray.Dataset.compute`.\n\n.. _dictionary io:\n\nDictionary\n----------\n\nWe can convert a ``Dataset`` (or a ``DataArray``) to a dict using\n:py:meth:`~xarray.Dataset.to_dict`:\n\n.. ipython:: python\n\n d = ds.to_dict()\n d\n\nWe can create a new xarray object from a dict using\n:py:meth:`~xarray.Dataset.from_dict`:\n\n.. ipython:: python\n\n ds_dict = xr.Dataset.from_dict(d)\n ds_dict\n\nDictionary support allows for flexible use of xarray objects. It doesn't\nrequire external libraries and dicts can easily be pickled, or converted to\njson, or geojson. All the values are converted to lists, so dicts might\nbe quite large.\n\nTo export just the dataset schema, without the data itself, use the\n``data=False`` option:\n\n.. ipython:: python\n\n ds.to_dict(data=False)\n\nThis can be useful for generating indices of dataset contents to expose to\nsearch indices or other automated data discovery tools.\n\n.. ipython:: python\n :suppress:\n\n import os\n os.remove('saved_on_disk.nc')\n\n.. _io.rasterio:\n\nRasterio\n--------\n\nGeoTIFFs and other gridded raster datasets can be opened using `rasterio`_, if\nrasterio is installed. Here is an example of how to use\n:py:func:`~xarray.open_rasterio` to read one of rasterio's `test files`_:\n\n.. ipython::\n :verbatim:\n\n In [7]: rio = xr.open_rasterio('RGB.byte.tif')\n\n In [8]: rio\n Out[8]:\n \n [1703814 values with dtype=uint8]\n Coordinates:\n * band (band) int64 1 2 3\n * y (y) float64 2.827e+06 2.826e+06 2.826e+06 2.826e+06 2.826e+06 ...\n * x (x) float64 1.021e+05 1.024e+05 1.027e+05 1.03e+05 1.033e+05 ...\n Attributes:\n res: (300.0379266750948, 300.041782729805)\n transform: (300.0379266750948, 0.0, 101985.0, 0.0, -300.041782729805, 28...\n is_tiled: 0\n crs: +init=epsg:32618\n\n\nThe ``x`` and ``y`` coordinates are generated out of the file's metadata\n(``bounds``, ``width``, ``height``), and they can be understood as cartesian\ncoordinates defined in the file's projection provided by the ``crs`` attribute.\n``crs`` is a PROJ4 string which can be parsed by e.g. `pyproj`_ or rasterio.\nSee :ref:`recipes.rasterio` for an example of how to convert these to\nlongitudes and latitudes.\n\n.. warning::\n\n This feature has been added in xarray v0.9.6 and should still be\n considered as being experimental. Please report any bug you may find\n on xarray's github repository.\n\n.. _rasterio: https://rasterio.readthedocs.io/en/latest/\n.. _test files: https://github.com/mapbox/rasterio/blob/master/tests/data/RGB.byte.tif\n.. _pyproj: https://github.com/jswhit/pyproj\n\n.. _io.zarr:\n\nZarr\n----\n\n`Zarr`_ is a Python package providing an implementation of chunked, compressed,\nN-dimensional arrays.\nZarr has the ability to store arrays in a range of ways, including in memory,\nin files, and in cloud-based object storage such as `Amazon S3`_ and\n`Google Cloud Storage`_.\nXarray's Zarr backend allows xarray to leverage these capabilities.\n\n.. warning::\n\n Zarr support is still an experimental feature. Please report any bugs or\n unexepected behavior via github issues.\n\nXarray can't open just any zarr dataset, because xarray requires special\nmetadata (attributes) describing the dataset dimensions and coordinates.\nAt this time, xarray can only open zarr datasets that have been written by\nxarray. To write a dataset with zarr, we use the\n:py:attr:`Dataset.to_zarr ` method.\nTo write to a local directory, we pass a path to a directory\n\n.. ipython:: python\n :suppress:\n\n ! rm -rf path/to/directory.zarr\n\n.. ipython:: python\n\n ds = xr.Dataset({'foo': (('x', 'y'), np.random.rand(4, 5))},\n coords={'x': [10, 20, 30, 40],\n 'y': pd.date_range('2000-01-01', periods=5),\n 'z': ('x', list('abcd'))})\n ds.to_zarr('path/to/directory.zarr')\n\n(The suffix ``.zarr`` is optional--just a reminder that a zarr store lives\nthere.) If the directory does not exist, it will be created. If a zarr\nstore is already present at that path, an error will be raised, preventing it\nfrom being overwritten. To override this behavior and overwrite an existing\nstore, add ``mode='w'`` when invoking ``to_zarr``.\n\nIt is also possible to append to an existing store. For that, set\n``append_dim`` to the name of the dimension along which to append. ``mode``\ncan be omitted as it will internally be set to ``'a'``.\n\n.. ipython:: python\n :suppress:\n\n ! rm -rf path/to/directory.zarr\n\n.. ipython:: python\n\n ds1 = xr.Dataset({'foo': (('x', 'y', 't'), np.random.rand(4, 5, 2))},\n coords={'x': [10, 20, 30, 40],\n 'y': [1,2,3,4,5],\n 't': pd.date_range('2001-01-01', periods=2)})\n ds1.to_zarr('path/to/directory.zarr')\n ds2 = xr.Dataset({'foo': (('x', 'y', 't'), np.random.rand(4, 5, 2))},\n coords={'x': [10, 20, 30, 40],\n 'y': [1,2,3,4,5],\n 't': pd.date_range('2001-01-03', periods=2)})\n ds2.to_zarr('path/to/directory.zarr', append_dim='t')\n\nTo store variable length strings use ``dtype=object``.\n\nTo read back a zarr dataset that has been created this way, we use the\n:py:func:`~xarray.open_zarr` method:\n\n.. ipython:: python\n\n ds_zarr = xr.open_zarr('path/to/directory.zarr')\n ds_zarr\n\nCloud Storage Buckets\n~~~~~~~~~~~~~~~~~~~~~\n\nIt is possible to read and write xarray datasets directly from / to cloud\nstorage buckets using zarr. This example uses the `gcsfs`_ package to provide\na ``MutableMapping`` interface to `Google Cloud Storage`_, which we can then\npass to xarray::\n\n import gcsfs\n fs = gcsfs.GCSFileSystem(project='', token=None)\n gcsmap = gcsfs.mapping.GCSMap('', gcs=fs, check=True, create=False)\n # write to the bucket\n ds.to_zarr(store=gcsmap)\n # read it back\n ds_gcs = xr.open_zarr(gcsmap)\n\n.. _Zarr: http://zarr.readthedocs.io/\n.. _Amazon S3: https://aws.amazon.com/s3/\n.. _Google Cloud Storage: https://cloud.google.com/storage/\n.. _gcsfs: https://github.com/dask/gcsfs\n\nZarr Compressors and Filters\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThere are many different options for compression and filtering possible with\nzarr. These are described in the\n`zarr documentation `_.\nThese options can be passed to the ``to_zarr`` method as variable encoding.\nFor example:\n\n.. ipython:: python\n :suppress:\n\n ! rm -rf foo.zarr\n\n.. ipython:: python\n\n import zarr\n compressor = zarr.Blosc(cname='zstd', clevel=3, shuffle=2)\n ds.to_zarr('foo.zarr', encoding={'foo': {'compressor': compressor}})\n\n.. note::\n\n Not all native zarr compression and filtering options have been tested with\n xarray.\n\nConsolidated Metadata\n~~~~~~~~~~~~~~~~~~~~~\n\nXarray needs to read all of the zarr metadata when it opens a dataset.\nIn some storage mediums, such as with cloud object storage (e.g. amazon S3),\nthis can introduce significant overhead, because two separate HTTP calls to the\nobject store must be made for each variable in the dataset.\nWith version 2.3, zarr will support a feature called *consolidated metadata*,\nwhich allows all metadata for the entire dataset to be stored with a single\nkey (by default called ``.zmetadata``). This can drastically speed up\nopening the store. (For more information on this feature, consult the\n`zarr docs `_.)\n\nIf you have zarr version 2.3 or greater, xarray can write and read stores\nwith consolidated metadata. To write consolidated metadata, pass the\n``consolidated=True`` option to the\n:py:attr:`Dataset.to_zarr ` method::\n\n ds.to_zarr('foo.zarr', consolidated=True)\n\nTo read a consolidated store, pass the ``consolidated=True`` option to\n:py:func:`~xarray.open_zarr`::\n\n ds = xr.open_zarr('foo.zarr', consolidated=True)\n\nXarray can't perform consolidation on pre-existing zarr datasets. This should\nbe done directly from zarr, as described in the\n`zarr docs `_.\n\n.. _io.cfgrib:\n\n.. ipython:: python\n :suppress:\n\n import shutil\n shutil.rmtree('foo.zarr')\n shutil.rmtree('path/to/directory.zarr')\n\nGRIB format via cfgrib\n----------------------\n\nxarray supports reading GRIB files via ECMWF cfgrib_ python driver and ecCodes_\nC-library, if they are installed. To open a GRIB file supply ``engine='cfgrib'``\nto :py:func:`~xarray.open_dataset`:\n\n.. ipython::\n :verbatim:\n\n In [1]: ds_grib = xr.open_dataset('example.grib', engine='cfgrib')\n\nWe recommend installing ecCodes via conda::\n\n conda install -c conda-forge eccodes\n pip install cfgrib\n\n.. _cfgrib: https://github.com/ecmwf/cfgrib\n.. _ecCodes: https://confluence.ecmwf.int/display/ECC/ecCodes+Home\n\n.. _io.pynio:\n\nFormats supported by PyNIO\n--------------------------\n\nxarray can also read GRIB, HDF4 and other file formats supported by PyNIO_,\nif PyNIO is installed. To use PyNIO to read such files, supply\n``engine='pynio'`` to :py:func:`~xarray.open_dataset`.\n\nWe recommend installing PyNIO via conda::\n\n conda install -c conda-forge pynio\n\n.. _PyNIO: https://www.pyngl.ucar.edu/Nio.shtml\n\n.. _io.PseudoNetCDF:\n\nFormats supported by PseudoNetCDF\n---------------------------------\n\nxarray can also read CAMx, BPCH, ARL PACKED BIT, and many other file\nformats supported by PseudoNetCDF_, if PseudoNetCDF is installed.\nPseudoNetCDF can also provide Climate Forecasting Conventions to\nCMAQ files. In addition, PseudoNetCDF can automatically register custom\nreaders that subclass PseudoNetCDF.PseudoNetCDFFile. PseudoNetCDF can\nidentify readers heuristically, or format can be specified via a key in\n`backend_kwargs`.\n\nTo use PseudoNetCDF to read such files, supply\n``engine='pseudonetcdf'`` to :py:func:`~xarray.open_dataset`.\n\nAdd ``backend_kwargs={'format': ''}`` where ``\noptions are listed on the PseudoNetCDF page.\n\n.. _PseudoNetCDF: http://github.com/barronh/PseudoNetCDF\n\n\nCSV and other formats supported by Pandas\n-----------------------------------------\n\nFor more options (tabular formats and CSV files in particular), consider\nexporting your objects to pandas and using its broad range of `IO tools`_.\nFor CSV files, one might also consider `xarray_extras`_.\n\n.. _xarray_extras: https://xarray-extras.readthedocs.io/en/latest/api/csv.html\n\n.. _IO tools: http://pandas.pydata.org/pandas-docs/stable/io.html\n"},{"fileName":"indexes.py","filePath":"xarray/core","id":2761,"nodeType":"File","text":"import collections.abc\nfrom typing import Any, Dict, Hashable, Iterable, Mapping, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import formatting\nfrom .variable import Variable\n\n\nclass Indexes(collections.abc.Mapping):\n \"\"\"Immutable proxy for Dataset or DataArrary indexes.\"\"\"\n\n __slots__ = (\"_indexes\",)\n\n def __init__(self, indexes):\n \"\"\"Not for public consumption.\n\n Parameters\n ----------\n indexes : Dict[Any, pandas.Index]\n Indexes held by this object.\n \"\"\"\n self._indexes = indexes\n\n def __iter__(self):\n return iter(self._indexes)\n\n def __len__(self):\n return len(self._indexes)\n\n def __contains__(self, key):\n return key in self._indexes\n\n def __getitem__(self, key):\n return self._indexes[key]\n\n def __repr__(self):\n return formatting.indexes_repr(self)\n\n\ndef default_indexes(\n coords: Mapping[Any, Variable], dims: Iterable\n) -> Dict[Hashable, pd.Index]:\n \"\"\"Default indexes for a Dataset/DataArray.\n\n Parameters\n ----------\n coords : Mapping[Any, xarray.Variable]\n Coordinate variables from which to draw default indexes.\n dims : iterable\n Iterable of dimension names.\n\n Returns\n -------\n Mapping from indexing keys (levels/dimension names) to indexes used for\n indexing along that dimension.\n \"\"\"\n return {key: coords[key].to_index() for key in dims if key in coords}\n\n\ndef isel_variable_and_index(\n name: Hashable,\n variable: Variable,\n index: pd.Index,\n indexers: Mapping[Hashable, Union[int, slice, np.ndarray, Variable]],\n) -> Tuple[Variable, Optional[pd.Index]]:\n \"\"\"Index a Variable and pandas.Index together.\"\"\"\n if not indexers:\n # nothing to index\n return variable.copy(deep=False), index\n\n if len(variable.dims) > 1:\n raise NotImplementedError(\n \"indexing multi-dimensional variable with indexes is not \" \"supported yet\"\n )\n\n new_variable = variable.isel(indexers)\n\n if new_variable.dims != (name,):\n # can't preserve a index if result has new dimensions\n return new_variable, None\n\n # we need to compute the new index\n (dim,) = variable.dims\n indexer = indexers[dim]\n if isinstance(indexer, Variable):\n indexer = indexer.data\n new_index = index[indexer]\n return new_variable, new_index\n\n\ndef roll_index(index: pd.Index, count: int, axis: int = 0) -> pd.Index:\n \"\"\"Roll an pandas.Index.\"\"\"\n count %= index.shape[0]\n if count != 0:\n return index[-count:].append(index[:-count])\n else:\n return index[:]\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":27,"id":2762,"name":"crs","nodeType":"Attribute","startLoc":27,"text":"crs"},{"attributeType":"null","col":0,"comment":"null","endLoc":30,"id":2763,"name":"ax","nodeType":"Attribute","startLoc":30,"text":"ax"},{"col":0,"comment":"","endLoc":14,"header":"plot_rasterio_rgb.py#","id":2764,"name":"","nodeType":"Function","startLoc":2,"text":"\"\"\"\n.. _recipes.rasterio_rgb:\n\n============================\nimshow() and map projections\n============================\n\nUsing rasterio's projection information for more accurate plots.\n\nThis example extends :ref:`recipes.rasterio` and plots the image in the\noriginal map projection instead of relying on pcolormesh and a map\ntransformation.\n\"\"\"\n\nurl = \"https://github.com/mapbox/rasterio/raw/master/tests/data/RGB.byte.tif\"\n\nda = xr.open_rasterio(url)\n\ncrs = ccrs.UTM(\"18N\")\n\nax = plt.subplot(projection=crs)\n\nda.plot.imshow(ax=ax, rgb=\"band\", transform=crs)\n\nax.coastlines(\"10m\", color=\"r\")\n\nplt.show()"},{"col":4,"comment":"null","endLoc":302,"header":"def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None","id":2765,"name":"_update_coords","nodeType":"Function","startLoc":284,"text":"def _update_coords(\n self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]\n ) -> None:\n from .dataset import calculate_dimensions\n\n coords_plus_data = coords.copy()\n coords_plus_data[_THIS_ARRAY] = self._data.variable\n dims = calculate_dimensions(coords_plus_data)\n if not set(dims) <= set(self.dims):\n raise ValueError(\n \"cannot add coordinates with new dimensions to \" \"a DataArray\"\n )\n self._data._coords = coords\n\n # TODO(shoyer): once ._indexes is always populated by a dict, modify\n # it to update inplace instead.\n original_indexes = dict(self._data.indexes)\n original_indexes.update(indexes)\n self._data._indexes = original_indexes"},{"col":0,"comment":"null","endLoc":118,"header":"def _decode_cf_datetime_dtype(data, units, calendar, use_cftime)","id":2766,"name":"_decode_cf_datetime_dtype","nodeType":"Function","startLoc":95,"text":"def _decode_cf_datetime_dtype(data, units, calendar, use_cftime):\n # Verify that at least the first and last date can be decoded\n # successfully. Otherwise, tracebacks end up swallowed by\n # Dataset.__repr__ when users try to view their lazily decoded array.\n values = indexing.ImplicitToExplicitIndexingAdapter(indexing.as_indexable(data))\n example_value = np.concatenate(\n [first_n_items(values, 1) or [0], last_item(values) or [0]]\n )\n\n try:\n result = decode_cf_datetime(example_value, units, calendar, use_cftime)\n except Exception:\n calendar_msg = (\n \"the default calendar\" if calendar is None else \"calendar %r\" % calendar\n )\n msg = (\n \"unable to decode time units %r with %s. Try \"\n \"opening your dataset with decode_times=False.\" % (units, calendar_msg)\n )\n raise ValueError(msg)\n else:\n dtype = getattr(result, \"dtype\", np.dtype(\"object\"))\n\n return dtype"},{"fileName":"convert.py","filePath":"xarray","id":2767,"nodeType":"File","text":"\"\"\"Functions for converting to and from xarray objects\n\"\"\"\nfrom collections import Counter\n\nimport numpy as np\nimport pandas as pd\n\nfrom .coding.times import CFDatetimeCoder, CFTimedeltaCoder\nfrom .conventions import decode_cf\nfrom .core import duck_array_ops\nfrom .core.dataarray import DataArray\nfrom .core.dtypes import get_fill_value\n\ncdms2_ignored_attrs = {\"name\", \"tileIndex\"}\niris_forbidden_keys = {\n \"standard_name\",\n \"long_name\",\n \"units\",\n \"bounds\",\n \"axis\",\n \"calendar\",\n \"leap_month\",\n \"leap_year\",\n \"month_lengths\",\n \"coordinates\",\n \"grid_mapping\",\n \"climatology\",\n \"cell_methods\",\n \"formula_terms\",\n \"compress\",\n \"missing_value\",\n \"add_offset\",\n \"scale_factor\",\n \"valid_max\",\n \"valid_min\",\n \"valid_range\",\n \"_FillValue\",\n}\ncell_methods_strings = {\n \"point\",\n \"sum\",\n \"maximum\",\n \"median\",\n \"mid_range\",\n \"minimum\",\n \"mean\",\n \"mode\",\n \"standard_deviation\",\n \"variance\",\n}\n\n\ndef encode(var):\n return CFTimedeltaCoder().encode(CFDatetimeCoder().encode(var.variable))\n\n\ndef _filter_attrs(attrs, ignored_attrs):\n \"\"\" Return attrs that are not in ignored_attrs\n \"\"\"\n return {k: v for k, v in attrs.items() if k not in ignored_attrs}\n\n\ndef from_cdms2(variable):\n \"\"\"Convert a cdms2 variable into an DataArray\n \"\"\"\n values = np.asarray(variable)\n name = variable.id\n dims = variable.getAxisIds()\n coords = {}\n for axis in variable.getAxisList():\n coords[axis.id] = DataArray(\n np.asarray(axis),\n dims=[axis.id],\n attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs),\n )\n grid = variable.getGrid()\n if grid is not None:\n ids = [a.id for a in grid.getAxisList()]\n for axis in grid.getLongitude(), grid.getLatitude():\n if axis.id not in variable.getAxisIds():\n coords[axis.id] = DataArray(\n np.asarray(axis[:]),\n dims=ids,\n attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs),\n )\n attrs = _filter_attrs(variable.attributes, cdms2_ignored_attrs)\n dataarray = DataArray(values, dims=dims, coords=coords, name=name, attrs=attrs)\n return decode_cf(dataarray.to_dataset())[dataarray.name]\n\n\ndef to_cdms2(dataarray, copy=True):\n \"\"\"Convert a DataArray into a cdms2 variable\n \"\"\"\n # we don't want cdms2 to be a hard dependency\n import cdms2\n\n def set_cdms2_attrs(var, attrs):\n for k, v in attrs.items():\n setattr(var, k, v)\n\n # 1D axes\n axes = []\n for dim in dataarray.dims:\n coord = encode(dataarray.coords[dim])\n axis = cdms2.createAxis(coord.values, id=dim)\n set_cdms2_attrs(axis, coord.attrs)\n axes.append(axis)\n\n # Data\n var = encode(dataarray)\n cdms2_var = cdms2.createVariable(\n var.values, axes=axes, id=dataarray.name, mask=pd.isnull(var.values), copy=copy\n )\n\n # Attributes\n set_cdms2_attrs(cdms2_var, var.attrs)\n\n # Curvilinear and unstructured grids\n if dataarray.name not in dataarray.coords:\n\n cdms2_axes = {}\n for coord_name in set(dataarray.coords.keys()) - set(dataarray.dims):\n\n coord_array = dataarray.coords[coord_name].to_cdms2()\n\n cdms2_axis_cls = (\n cdms2.coord.TransientAxis2D\n if coord_array.ndim\n else cdms2.auxcoord.TransientAuxAxis1D\n )\n cdms2_axis = cdms2_axis_cls(coord_array)\n if cdms2_axis.isLongitude():\n cdms2_axes[\"lon\"] = cdms2_axis\n elif cdms2_axis.isLatitude():\n cdms2_axes[\"lat\"] = cdms2_axis\n\n if \"lon\" in cdms2_axes and \"lat\" in cdms2_axes:\n if len(cdms2_axes[\"lon\"].shape) == 2:\n cdms2_grid = cdms2.hgrid.TransientCurveGrid(\n cdms2_axes[\"lat\"], cdms2_axes[\"lon\"]\n )\n else:\n cdms2_grid = cdms2.gengrid.AbstractGenericGrid(\n cdms2_axes[\"lat\"], cdms2_axes[\"lon\"]\n )\n for axis in cdms2_grid.getAxisList():\n cdms2_var.setAxis(cdms2_var.getAxisIds().index(axis.id), axis)\n cdms2_var.setGrid(cdms2_grid)\n\n return cdms2_var\n\n\ndef _pick_attrs(attrs, keys):\n \"\"\" Return attrs with keys in keys list\n \"\"\"\n return {k: v for k, v in attrs.items() if k in keys}\n\n\ndef _get_iris_args(attrs):\n \"\"\" Converts the xarray attrs into args that can be passed into Iris\n \"\"\"\n # iris.unit is deprecated in Iris v1.9\n import cf_units\n\n args = {\"attributes\": _filter_attrs(attrs, iris_forbidden_keys)}\n args.update(_pick_attrs(attrs, (\"standard_name\", \"long_name\")))\n unit_args = _pick_attrs(attrs, (\"calendar\",))\n if \"units\" in attrs:\n args[\"units\"] = cf_units.Unit(attrs[\"units\"], **unit_args)\n return args\n\n\n# TODO: Add converting bounds from xarray to Iris and back\ndef to_iris(dataarray):\n \"\"\" Convert a DataArray into a Iris Cube\n \"\"\"\n # Iris not a hard dependency\n import iris\n from iris.fileformats.netcdf import parse_cell_methods\n\n dim_coords = []\n aux_coords = []\n\n for coord_name in dataarray.coords:\n coord = encode(dataarray.coords[coord_name])\n coord_args = _get_iris_args(coord.attrs)\n coord_args[\"var_name\"] = coord_name\n axis = None\n if coord.dims:\n axis = dataarray.get_axis_num(coord.dims)\n if coord_name in dataarray.dims:\n try:\n iris_coord = iris.coords.DimCoord(coord.values, **coord_args)\n dim_coords.append((iris_coord, axis))\n except ValueError:\n iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)\n aux_coords.append((iris_coord, axis))\n else:\n iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)\n aux_coords.append((iris_coord, axis))\n\n args = _get_iris_args(dataarray.attrs)\n args[\"var_name\"] = dataarray.name\n args[\"dim_coords_and_dims\"] = dim_coords\n args[\"aux_coords_and_dims\"] = aux_coords\n if \"cell_methods\" in dataarray.attrs:\n args[\"cell_methods\"] = parse_cell_methods(dataarray.attrs[\"cell_methods\"])\n\n masked_data = duck_array_ops.masked_invalid(dataarray.data)\n cube = iris.cube.Cube(masked_data, **args)\n\n return cube\n\n\ndef _iris_obj_to_attrs(obj):\n \"\"\" Return a dictionary of attrs when given a Iris object\n \"\"\"\n attrs = {\"standard_name\": obj.standard_name, \"long_name\": obj.long_name}\n if obj.units.calendar:\n attrs[\"calendar\"] = obj.units.calendar\n if obj.units.origin != \"1\" and not obj.units.is_unknown():\n attrs[\"units\"] = obj.units.origin\n attrs.update(obj.attributes)\n return {k: v for k, v in attrs.items() if v is not None}\n\n\ndef _iris_cell_methods_to_str(cell_methods_obj):\n \"\"\" Converts a Iris cell methods into a string\n \"\"\"\n cell_methods = []\n for cell_method in cell_methods_obj:\n names = \"\".join([\"{}: \".format(n) for n in cell_method.coord_names])\n intervals = \" \".join(\n [\"interval: {}\".format(interval) for interval in cell_method.intervals]\n )\n comments = \" \".join(\n [\"comment: {}\".format(comment) for comment in cell_method.comments]\n )\n extra = \" \".join([intervals, comments]).strip()\n if extra:\n extra = \" ({})\".format(extra)\n cell_methods.append(names + cell_method.method + extra)\n return \" \".join(cell_methods)\n\n\ndef _name(iris_obj, default=\"unknown\"):\n \"\"\" Mimicks `iris_obj.name()` but with different name resolution order.\n\n Similar to iris_obj.name() method, but using iris_obj.var_name first to\n enable roundtripping.\n \"\"\"\n return iris_obj.var_name or iris_obj.standard_name or iris_obj.long_name or default\n\n\ndef from_iris(cube):\n \"\"\" Convert a Iris cube into an DataArray\n \"\"\"\n import iris.exceptions\n from xarray.core.pycompat import dask_array_type\n\n name = _name(cube)\n if name == \"unknown\":\n name = None\n dims = []\n for i in range(cube.ndim):\n try:\n dim_coord = cube.coord(dim_coords=True, dimensions=(i,))\n dims.append(_name(dim_coord))\n except iris.exceptions.CoordinateNotFoundError:\n dims.append(\"dim_{}\".format(i))\n\n if len(set(dims)) != len(dims):\n duplicates = [k for k, v in Counter(dims).items() if v > 1]\n raise ValueError(\"Duplicate coordinate name {}.\".format(duplicates))\n\n coords = {}\n\n for coord in cube.coords():\n coord_attrs = _iris_obj_to_attrs(coord)\n coord_dims = [dims[i] for i in cube.coord_dims(coord)]\n if coord_dims:\n coords[_name(coord)] = (coord_dims, coord.points, coord_attrs)\n else:\n coords[_name(coord)] = ((), coord.points.item(), coord_attrs)\n\n array_attrs = _iris_obj_to_attrs(cube)\n cell_methods = _iris_cell_methods_to_str(cube.cell_methods)\n if cell_methods:\n array_attrs[\"cell_methods\"] = cell_methods\n\n # Deal with iris 1.* and 2.*\n cube_data = cube.core_data() if hasattr(cube, \"core_data\") else cube.data\n\n # Deal with dask and numpy masked arrays\n if isinstance(cube_data, dask_array_type):\n from dask.array import ma as dask_ma\n\n filled_data = dask_ma.filled(cube_data, get_fill_value(cube.dtype))\n elif isinstance(cube_data, np.ma.MaskedArray):\n filled_data = np.ma.filled(cube_data, get_fill_value(cube.dtype))\n else:\n filled_data = cube_data\n\n dataarray = DataArray(\n filled_data, coords=coords, name=name, attrs=array_attrs, dims=dims\n )\n decoded_ds = decode_cf(dataarray._to_temp_dataset())\n return dataarray._from_temp_dataset(decoded_ds)\n"},{"className":"CFTimedeltaCoder","col":0,"comment":"null","endLoc":490,"id":2768,"nodeType":"Class","startLoc":471,"text":"class CFTimedeltaCoder(VariableCoder):\n def encode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_encoding(variable)\n\n if np.issubdtype(data.dtype, np.timedelta64):\n data, units = encode_cf_timedelta(data, encoding.pop(\"units\", None))\n safe_setitem(attrs, \"units\", units, name=name)\n\n return Variable(dims, data, attrs, encoding)\n\n def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"units\" in attrs and attrs[\"units\"] in TIME_UNITS:\n units = pop_to(attrs, encoding, \"units\")\n transform = partial(decode_cf_timedelta, units=units)\n dtype = np.dtype(\"timedelta64[ns]\")\n data = lazy_elemwise_func(data, transform, dtype=dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"col":4,"comment":"null","endLoc":490,"header":"def decode(self, variable, name=None)","id":2769,"name":"decode","nodeType":"Function","startLoc":481,"text":"def decode(self, variable, name=None):\n dims, data, attrs, encoding = unpack_for_decoding(variable)\n\n if \"units\" in attrs and attrs[\"units\"] in TIME_UNITS:\n units = pop_to(attrs, encoding, \"units\")\n transform = partial(decode_cf_timedelta, units=units)\n dtype = np.dtype(\"timedelta64[ns]\")\n data = lazy_elemwise_func(data, transform, dtype=dtype)\n\n return Variable(dims, data, attrs, encoding)"},{"fileName":"interp.py","filePath":"asv_bench/benchmarks","id":2770,"nodeType":"File","text":"import numpy as np\nimport pandas as pd\n\nimport xarray as xr\n\nfrom . import parameterized, randn, requires_dask\n\nnx = 3000\nlong_nx = 30000000\nny = 2000\nnt = 1000\nwindow = 20\n\nrandn_xy = randn((nx, ny), frac_nan=0.1)\nrandn_xt = randn((nx, nt))\nrandn_t = randn((nt,))\nrandn_long = randn((long_nx,), frac_nan=0.1)\n\n\nnew_x_short = np.linspace(0.3 * nx, 0.7 * nx, 100)\nnew_x_long = np.linspace(0.3 * nx, 0.7 * nx, 1000)\nnew_y_long = np.linspace(0.1, 0.9, 1000)\n\n\nclass Interpolation:\n def setup(self, *args, **kwargs):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn_xy),\n \"var2\": ((\"x\", \"t\"), randn_xt),\n \"var3\": ((\"t\",), randn_t),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )\n\n @parameterized([\"method\", \"is_short\"], ([\"linear\", \"cubic\"], [True, False]))\n def time_interpolation(self, method, is_short):\n new_x = new_x_short if is_short else new_x_long\n self.ds.interp(x=new_x, method=method).load()\n\n @parameterized([\"method\"], ([\"linear\", \"nearest\"]))\n def time_interpolation_2d(self, method):\n self.ds.interp(x=new_x_long, y=new_y_long, method=method).load()\n\n\nclass InterpolationDask(Interpolation):\n def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"t\": 50})\n"},{"col":0,"comment":"Given an array of numeric dates in netCDF format, convert it into a\n numpy array of date time objects.\n\n For standard (Gregorian) calendars, this function uses vectorized\n operations, which makes it much faster than cftime.num2date. In such a\n case, the returned array will be of type np.datetime64.\n\n Note that time unit in `units` must not be smaller than microseconds and\n not larger than days.\n\n See also\n --------\n cftime.num2date\n ","endLoc":219,"header":"def decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None)","id":2771,"name":"decode_cf_datetime","nodeType":"Function","startLoc":168,"text":"def decode_cf_datetime(num_dates, units, calendar=None, use_cftime=None):\n \"\"\"Given an array of numeric dates in netCDF format, convert it into a\n numpy array of date time objects.\n\n For standard (Gregorian) calendars, this function uses vectorized\n operations, which makes it much faster than cftime.num2date. In such a\n case, the returned array will be of type np.datetime64.\n\n Note that time unit in `units` must not be smaller than microseconds and\n not larger than days.\n\n See also\n --------\n cftime.num2date\n \"\"\"\n num_dates = np.asarray(num_dates)\n flat_num_dates = num_dates.ravel()\n if calendar is None:\n calendar = \"standard\"\n\n if use_cftime is None:\n try:\n dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)\n except (OutOfBoundsDatetime, OverflowError):\n dates = _decode_datetime_with_cftime(\n flat_num_dates.astype(np.float), units, calendar\n )\n\n if (\n dates[np.nanargmin(num_dates)].year < 1678\n or dates[np.nanargmax(num_dates)].year >= 2262\n ):\n if calendar in _STANDARD_CALENDARS:\n warnings.warn(\n \"Unable to decode time axis into full \"\n \"numpy.datetime64 objects, continuing using \"\n \"cftime.datetime objects instead, reason: dates out \"\n \"of range\",\n SerializationWarning,\n stacklevel=3,\n )\n else:\n if calendar in _STANDARD_CALENDARS:\n dates = cftime_to_nptime(dates)\n elif use_cftime:\n dates = _decode_datetime_with_cftime(\n flat_num_dates.astype(np.float), units, calendar\n )\n else:\n dates = _decode_datetime_with_pandas(flat_num_dates, units, calendar)\n\n return dates.reshape(num_dates.shape)"},{"className":"Interpolation","col":0,"comment":"null","endLoc":48,"id":2772,"nodeType":"Class","startLoc":25,"text":"class Interpolation:\n def setup(self, *args, **kwargs):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn_xy),\n \"var2\": ((\"x\", \"t\"), randn_xt),\n \"var3\": ((\"t\",), randn_t),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )\n\n @parameterized([\"method\", \"is_short\"], ([\"linear\", \"cubic\"], [True, False]))\n def time_interpolation(self, method, is_short):\n new_x = new_x_short if is_short else new_x_long\n self.ds.interp(x=new_x, method=method).load()\n\n @parameterized([\"method\"], ([\"linear\", \"nearest\"]))\n def time_interpolation_2d(self, method):\n self.ds.interp(x=new_x_long, y=new_y_long, method=method).load()"},{"col":4,"comment":"null","endLoc":39,"header":"def setup(self, *args, **kwargs)","id":2773,"name":"setup","nodeType":"Function","startLoc":26,"text":"def setup(self, *args, **kwargs):\n self.ds = xr.Dataset(\n {\n \"var1\": ((\"x\", \"y\"), randn_xy),\n \"var2\": ((\"x\", \"t\"), randn_xt),\n \"var3\": ((\"t\",), randn_t),\n },\n coords={\n \"x\": np.arange(nx),\n \"y\": np.linspace(0, 1, ny),\n \"t\": pd.date_range(\"1970-01-01\", periods=nt, freq=\"D\"),\n \"x_coords\": (\"x\", np.linspace(1.1, 2.1, nx)),\n },\n )"},{"col":0,"comment":"null","endLoc":165,"header":"def _decode_datetime_with_pandas(flat_num_dates, units, calendar)","id":2774,"name":"_decode_datetime_with_pandas","nodeType":"Function","startLoc":134,"text":"def _decode_datetime_with_pandas(flat_num_dates, units, calendar):\n if calendar not in _STANDARD_CALENDARS:\n raise OutOfBoundsDatetime(\n \"Cannot decode times from a non-standard calendar, {!r}, using \"\n \"pandas.\".format(calendar)\n )\n\n delta, ref_date = _unpack_netcdf_time_units(units)\n delta = _netcdf_to_numpy_timeunit(delta)\n try:\n ref_date = pd.Timestamp(ref_date)\n except ValueError:\n # ValueError is raised by pd.Timestamp for non-ISO timestamp\n # strings, in which case we fall back to using cftime\n raise OutOfBoundsDatetime\n\n # fixes: https://github.com/pydata/pandas/issues/14068\n # these lines check if the the lowest or the highest value in dates\n # cause an OutOfBoundsDatetime (Overflow) error\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"invalid value encountered\", RuntimeWarning)\n pd.to_timedelta(flat_num_dates.min(), delta) + ref_date\n pd.to_timedelta(flat_num_dates.max(), delta) + ref_date\n\n # Cast input dates to integers of nanoseconds because `pd.to_datetime`\n # works much faster when dealing with integers\n # make _NS_PER_TIME_DELTA an array to ensure type upcasting\n flat_num_dates_ns_int = (\n flat_num_dates.astype(np.float64) * _NS_PER_TIME_DELTA[delta]\n ).astype(np.int64)\n\n return (pd.to_timedelta(flat_num_dates_ns_int, \"ns\") + ref_date).values"},{"col":4,"comment":"null","endLoc":390,"header":"def sync(self)","id":2775,"name":"sync","nodeType":"Function","startLoc":389,"text":"def sync(self):\n pass"},{"col":4,"comment":"null","endLoc":454,"header":"def close(self)","id":2776,"name":"close","nodeType":"Function","startLoc":450,"text":"def close(self):\n if self._consolidate_on_close:\n import zarr\n\n zarr.consolidate_metadata(self.ds.store)"},{"attributeType":"null","col":4,"comment":"null","endLoc":234,"id":2777,"name":"__slots__","nodeType":"Attribute","startLoc":234,"text":"__slots__"},{"attributeType":"null","col":0,"comment":"null","endLoc":14,"id":2778,"name":"cdms2_ignored_attrs","nodeType":"Attribute","startLoc":14,"text":"cdms2_ignored_attrs"},{"attributeType":"null","col":0,"comment":"null","endLoc":15,"id":2779,"name":"iris_forbidden_keys","nodeType":"Attribute","startLoc":15,"text":"iris_forbidden_keys"},{"attributeType":"null","col":8,"comment":"null","endLoc":268,"id":2780,"name":"_consolidate_on_close","nodeType":"Attribute","startLoc":268,"text":"self._consolidate_on_close"},{"attributeType":"null","col":0,"comment":"null","endLoc":39,"id":2781,"name":"cell_methods_strings","nodeType":"Attribute","startLoc":39,"text":"cell_methods_strings"},{"col":0,"comment":"","endLoc":2,"header":"convert.py#","id":2782,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Functions for converting to and from xarray objects\n\"\"\"\n\ncdms2_ignored_attrs = {\"name\", \"tileIndex\"}\n\niris_forbidden_keys = {\n \"standard_name\",\n \"long_name\",\n \"units\",\n \"bounds\",\n \"axis\",\n \"calendar\",\n \"leap_month\",\n \"leap_year\",\n \"month_lengths\",\n \"coordinates\",\n \"grid_mapping\",\n \"climatology\",\n \"cell_methods\",\n \"formula_terms\",\n \"compress\",\n \"missing_value\",\n \"add_offset\",\n \"scale_factor\",\n \"valid_max\",\n \"valid_min\",\n \"valid_range\",\n \"_FillValue\",\n}\n\ncell_methods_strings = {\n \"point\",\n \"sum\",\n \"maximum\",\n \"median\",\n \"mid_range\",\n \"minimum\",\n \"mean\",\n \"mode\",\n \"standard_deviation\",\n \"variance\",\n}"},{"col":4,"comment":"null","endLoc":306,"header":"@property\n def variables(self)","id":2783,"name":"variables","nodeType":"Function","startLoc":304,"text":"@property\n def variables(self):\n return Frozen(self._data._coords)"},{"col":4,"comment":"null","endLoc":312,"header":"def to_dataset(self) -> \"Dataset\"","id":2784,"name":"to_dataset","nodeType":"Function","startLoc":308,"text":"def to_dataset(self) -> \"Dataset\":\n from .dataset import Dataset\n\n coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()}\n return Dataset._from_vars_and_coord_names(coords, set(coords))"},{"attributeType":"null","col":8,"comment":"null","endLoc":269,"id":2785,"name":"append_dim","nodeType":"Attribute","startLoc":269,"text":"self.append_dim"},{"attributeType":"null","col":8,"comment":"null","endLoc":266,"id":2786,"name":"_synchronizer","nodeType":"Attribute","startLoc":266,"text":"self._synchronizer"},{"col":0,"comment":"null","endLoc":131,"header":"def _decode_datetime_with_cftime(num_dates, units, calendar)","id":2787,"name":"_decode_datetime_with_cftime","nodeType":"Function","startLoc":121,"text":"def _decode_datetime_with_cftime(num_dates, units, calendar):\n cftime = _import_cftime()\n\n if cftime.__name__ == \"cftime\":\n return np.asarray(\n cftime.num2date(num_dates, units, calendar, only_use_cftime_datetimes=True)\n )\n else:\n # Must be using num2date from an old version of netCDF4 which\n # does not have the only_use_cftime_datetimes option.\n return np.asarray(cftime.num2date(num_dates, units, calendar))"},{"id":2788,"name":"properties","nodeType":"Package"},{"id":2789,"name":"README.md","nodeType":"TextFile","path":"properties","text":"# Property-based tests using Hypothesis\n\nThis directory contains property-based tests using a library\ncalled [Hypothesis](https://github.com/HypothesisWorks/hypothesis-python).\n\nThe property tests for xarray are a work in progress - more are always welcome.\nThey are stored in a separate directory because they tend to run more examples\nand thus take longer, and so that local development can run a test suite\nwithout needing to `pip install hypothesis`.\n\n## Hang on, \"property-based\" tests?\n\nInstead of making assertions about operations on a particular piece of\ndata, you use Hypothesis to describe a *kind* of data, then make assertions\nthat should hold for *any* example of this kind.\n\nFor example: \"given a 2d ndarray of dtype uint8 `arr`,\n`xr.DataArray(arr).plot.imshow()` never raises an exception\".\n\nHypothesis will then try many random examples, and report a minimised\nfailing input for each error it finds.\n[See the docs for more info.](https://hypothesis.readthedocs.io/en/master/)\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":265,"id":2790,"name":"_read_only","nodeType":"Attribute","startLoc":265,"text":"self._read_only"},{"fileName":"plot_control_colorbar.py","filePath":"doc/gallery","id":2791,"nodeType":"File","text":"# -*- coding: utf-8 -*-\n\"\"\"\n===========================\nControl the plot's colorbar\n===========================\n\nUse ``cbar_kwargs`` keyword to specify the number of ticks.\nThe ``spacing`` kwarg can be used to draw proportional ticks.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport xarray as xr\n\n# Load the data\nair_temp = xr.tutorial.load_dataset(\"air_temperature\")\nair2d = air_temp.air.isel(time=500)\n\n# Prepare the figure\nf, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))\n\n# Irregular levels to illustrate the use of a proportional colorbar\nlevels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]\n\n# Plot data\nair2d.plot(ax=ax1, levels=levels)\nair2d.plot(ax=ax2, levels=levels, cbar_kwargs={\"ticks\": levels})\nair2d.plot(\n ax=ax3, levels=levels, cbar_kwargs={\"ticks\": levels, \"spacing\": \"proportional\"}\n)\n\n# Show plots\nplt.tight_layout()\nplt.show()\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":267,"id":2792,"name":"_group","nodeType":"Attribute","startLoc":267,"text":"self._group"},{"attributeType":"null","col":8,"comment":"null","endLoc":264,"id":2793,"name":"ds","nodeType":"Attribute","startLoc":264,"text":"self.ds"},{"attributeType":"null","col":0,"comment":"null","endLoc":18,"id":2794,"name":"__all__","nodeType":"Attribute","startLoc":18,"text":"__all__"},{"col":0,"comment":"","endLoc":5,"header":"__init__.py#","id":2795,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Backend objects for saving and loading data\n\nDataStores provide a uniform interface for saving and loading data in different\nformats. They should not be used directly, but rather through Dataset objects.\n\"\"\"\n\n__all__ = [\n \"AbstractDataStore\",\n \"FileManager\",\n \"CachingFileManager\",\n \"CfGribDataStore\",\n \"DummyFileManager\",\n \"InMemoryDataStore\",\n \"NetCDF4DataStore\",\n \"PydapDataStore\",\n \"NioDataStore\",\n \"ScipyDataStore\",\n \"H5NetCDFStore\",\n \"ZarrStore\",\n \"PseudoNetCDFDataStore\",\n]"},{"attributeType":"null","col":28,"comment":"null","endLoc":10,"id":2796,"name":"plt","nodeType":"Attribute","startLoc":10,"text":"plt"},{"id":2797,"name":"howdoi.rst","nodeType":"TextFile","path":"doc","text":".. currentmodule:: xarray\n\n.. _howdoi:\n\nHow do I ...\n============\n\n.. list-table::\n :header-rows: 1\n :widths: 40 60\n\n * - How do I...\n - Solution\n * - add variables from other datasets to my dataset\n - :py:meth:`Dataset.merge`\n * - add a new dimension and/or coordinate\n - :py:meth:`DataArray.expand_dims`, :py:meth:`Dataset.expand_dims`\n * - add a new coordinate variable\n - :py:meth:`DataArray.assign_coords`\n * - change a data variable to a coordinate variable\n - :py:meth:`Dataset.set_coords`\n * - change the order of dimensions\n - :py:meth:`DataArray.transpose`, :py:meth:`Dataset.transpose`\n * - remove a variable from my object\n - :py:meth:`Dataset.drop`, :py:meth:`DataArray.drop`\n * - remove dimensions of length 1 or 0\n - :py:meth:`DataArray.squeeze`, :py:meth:`Dataset.squeeze`\n * - remove all variables with a particular dimension\n - :py:meth:`Dataset.drop_dims`\n * - convert non-dimension coordinates to data variables or remove them\n - :py:meth:`DataArray.reset_coords`, :py:meth:`Dataset.reset_coords`\n * - rename a variable, dimension or coordinate\n - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename`, :py:meth:`Dataset.rename_vars`, :py:meth:`Dataset.rename_dims`,\n * - convert a DataArray to Dataset or vice versa\n - :py:meth:`DataArray.to_dataset`, :py:meth:`Dataset.to_array`\n * - extract the underlying array (e.g. numpy or Dask arrays)\n - :py:attr:`DataArray.data`\n * - convert to and extract the underlying numpy array\n - :py:attr:`DataArray.values`\n * - find out if my xarray object is wrapping a Dask Array\n - :py:func:`dask.is_dask_collection`\n * - know how much memory my object requires\n - :py:attr:`DataArray.nbytes`, :py:attr:`Dataset.nbytes`\n * - convert a possibly irregularly sampled timeseries to a regularly sampled timeseries\n - :py:meth:`DataArray.resample`, :py:meth:`Dataset.resample` (see :ref:`resampling` for more)\n * - apply a function on all data variables in a Dataset\n - :py:meth:`Dataset.apply`\n * - write xarray objects with complex values to a netCDF file\n - :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine=\"h5netcdf\", invalid_netcdf=True``\n * - make xarray objects look like other xarray objects\n - :py:func:`~xarray.ones_like`, :py:func:`~xarray.zeros_like`, :py:func:`~xarray.full_like`, :py:meth:`Dataset.reindex_like`, :py:meth:`Dataset.interpolate_like`, :py:meth:`Dataset.broadcast_like`, :py:meth:`DataArray.reindex_like`, :py:meth:`DataArray.interpolate_like`, :py:meth:`DataArray.broadcast_like`\n * - replace NaNs with other values\n - :py:meth:`Dataset.fillna`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill`, :py:meth:`Dataset.interpolate_na`, :py:meth:`DataArray.fillna`, :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`DataArray.interpolate_na`\n * - extract the year, month, day or similar from a DataArray of time values\n - ``obj.dt.month`` for example where ``obj`` is a :py:class:`~xarray.DataArray` containing ``datetime64`` or ``cftime`` values. See :ref:`dt_accessor` for more.\n * - round off time values to a specified frequency\n - ``obj.dt.ceil``, ``obj.dt.floor``, ``obj.dt.round``. See :ref:`dt_accessor` for more.\n * - make a mask that is ``True`` where an object contains any of the values in a array\n - :py:meth:`Dataset.isin`, :py:meth:`DataArray.isin`\n"},{"col":4,"comment":"null","endLoc":44,"header":"@parameterized([\"method\", \"is_short\"], ([\"linear\", \"cubic\"], [True, False]))\n def time_interpolation(self, method, is_short)","id":2798,"name":"time_interpolation","nodeType":"Function","startLoc":41,"text":"@parameterized([\"method\", \"is_short\"], ([\"linear\", \"cubic\"], [True, False]))\n def time_interpolation(self, method, is_short):\n new_x = new_x_short if is_short else new_x_long\n self.ds.interp(x=new_x, method=method).load()"},{"attributeType":"null","col":17,"comment":"null","endLoc":12,"id":2799,"name":"xr","nodeType":"Attribute","startLoc":12,"text":"xr"},{"fileName":"setup.py","filePath":"","id":2800,"nodeType":"File","text":"#!/usr/bin/env python\nimport sys\n\nimport versioneer\nfrom setuptools import find_packages, setup\n\nDISTNAME = \"xarray\"\nLICENSE = \"Apache\"\nAUTHOR = \"xarray Developers\"\nAUTHOR_EMAIL = \"xarray@googlegroups.com\"\nURL = \"https://github.com/pydata/xarray\"\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n]\n\nPYTHON_REQUIRES = \">=3.6\"\nINSTALL_REQUIRES = [\"numpy >= 1.14\", \"pandas >= 0.24\"]\nneeds_pytest = {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv)\nSETUP_REQUIRES = [\"pytest-runner >= 4.2\"] if needs_pytest else []\nTESTS_REQUIRE = [\"pytest >= 2.7.1\"]\n\nDESCRIPTION = \"N-D labeled arrays and datasets in Python\"\nLONG_DESCRIPTION = \"\"\"\n**xarray** (formerly **xray**) is an open source project and Python package\nthat makes working with labelled multi-dimensional arrays simple,\nefficient, and fun!\n\nXarray introduces labels in the form of dimensions, coordinates and\nattributes on top of raw NumPy_-like arrays, which allows for a more\nintuitive, more concise, and less error-prone developer experience.\nThe package includes a large and growing library of domain-agnostic functions\nfor advanced analytics and visualization with these data structures.\n\nXarray was inspired by and borrows heavily from pandas_, the popular data\nanalysis package focused on labelled tabular data.\nIt is particularly tailored to working with netCDF_ files, which were the\nsource of xarray's data model, and integrates tightly with dask_ for parallel\ncomputing.\n\n.. _NumPy: https://www.numpy.org\n.. _pandas: https://pandas.pydata.org\n.. _dask: https://dask.org\n.. _netCDF: https://www.unidata.ucar.edu/software/netcdf\n\nWhy xarray?\n-----------\n\nMulti-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called\n\"tensors\") are an essential part of computational science.\nThey are encountered in a wide range of fields, including physics, astronomy,\ngeoscience, bioinformatics, engineering, finance, and deep learning.\nIn Python, NumPy_ provides the fundamental data structure and API for\nworking with raw ND arrays.\nHowever, real-world datasets are usually more than just raw numbers;\nthey have labels which encode information about how the array values map\nto locations in space, time, etc.\n\nXarray doesn't just keep track of labels on arrays -- it uses them to provide a\npowerful and concise interface. For example:\n\n- Apply operations over dimensions by name: ``x.sum('time')``.\n- Select values by label instead of integer location:\n ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``.\n- Mathematical operations (e.g., ``x - y``) vectorize across multiple\n dimensions (array broadcasting) based on dimension names, not shape.\n- Flexible split-apply-combine operations with groupby:\n ``x.groupby('time.dayofyear').mean()``.\n- Database like alignment based on coordinate labels that smoothly\n handles missing values: ``x, y = xr.align(x, y, join='outer')``.\n- Keep track of arbitrary metadata in the form of a Python dictionary:\n ``x.attrs``.\n\nLearn more\n----------\n\n- Documentation: http://xarray.pydata.org\n- Issue tracker: http://github.com/pydata/xarray/issues\n- Source code: http://github.com/pydata/xarray\n- SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk\n\"\"\"\n\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n python_requires=PYTHON_REQUIRES,\n install_requires=INSTALL_REQUIRES,\n setup_requires=SETUP_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n packages=find_packages(),\n package_data={\"xarray\": [\"py.typed\", \"tests/data/*\"]},\n)\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":15,"id":2801,"name":"air_temp","nodeType":"Attribute","startLoc":15,"text":"air_temp"},{"attributeType":"null","col":0,"comment":"null","endLoc":7,"id":2802,"name":"DISTNAME","nodeType":"Attribute","startLoc":7,"text":"DISTNAME"},{"attributeType":"null","col":0,"comment":"null","endLoc":8,"id":2803,"name":"LICENSE","nodeType":"Attribute","startLoc":8,"text":"LICENSE"},{"attributeType":"null","col":0,"comment":"null","endLoc":9,"id":2804,"name":"AUTHOR","nodeType":"Attribute","startLoc":9,"text":"AUTHOR"},{"attributeType":"null","col":0,"comment":"null","endLoc":10,"id":2805,"name":"AUTHOR_EMAIL","nodeType":"Attribute","startLoc":10,"text":"AUTHOR_EMAIL"},{"attributeType":"null","col":0,"comment":"null","endLoc":11,"id":2806,"name":"URL","nodeType":"Attribute","startLoc":11,"text":"URL"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":2807,"name":"CLASSIFIERS","nodeType":"Attribute","startLoc":12,"text":"CLASSIFIERS"},{"attributeType":"null","col":0,"comment":"null","endLoc":24,"id":2808,"name":"PYTHON_REQUIRES","nodeType":"Attribute","startLoc":24,"text":"PYTHON_REQUIRES"},{"attributeType":"null","col":0,"comment":"null","endLoc":25,"id":2809,"name":"INSTALL_REQUIRES","nodeType":"Attribute","startLoc":25,"text":"INSTALL_REQUIRES"},{"attributeType":"null","col":0,"comment":"null","endLoc":26,"id":2810,"name":"needs_pytest","nodeType":"Attribute","startLoc":26,"text":"needs_pytest"},{"attributeType":"null","col":0,"comment":"null","endLoc":27,"id":2811,"name":"SETUP_REQUIRES","nodeType":"Attribute","startLoc":27,"text":"SETUP_REQUIRES"},{"attributeType":"null","col":0,"comment":"null","endLoc":28,"id":2812,"name":"TESTS_REQUIRE","nodeType":"Attribute","startLoc":28,"text":"TESTS_REQUIRE"},{"attributeType":"null","col":0,"comment":"null","endLoc":30,"id":2813,"name":"DESCRIPTION","nodeType":"Attribute","startLoc":30,"text":"DESCRIPTION"},{"attributeType":"null","col":0,"comment":"null","endLoc":31,"id":2814,"name":"LONG_DESCRIPTION","nodeType":"Attribute","startLoc":31,"text":"LONG_DESCRIPTION"},{"col":0,"comment":"","endLoc":2,"header":"setup.py#","id":2815,"name":"","nodeType":"Function","startLoc":2,"text":"DISTNAME = \"xarray\"\n\nLICENSE = \"Apache\"\n\nAUTHOR = \"xarray Developers\"\n\nAUTHOR_EMAIL = \"xarray@googlegroups.com\"\n\nURL = \"https://github.com/pydata/xarray\"\n\nCLASSIFIERS = [\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering\",\n]\n\nPYTHON_REQUIRES = \">=3.6\"\n\nINSTALL_REQUIRES = [\"numpy >= 1.14\", \"pandas >= 0.24\"]\n\nneeds_pytest = {\"pytest\", \"test\", \"ptr\"}.intersection(sys.argv)\n\nSETUP_REQUIRES = [\"pytest-runner >= 4.2\"] if needs_pytest else []\n\nTESTS_REQUIRE = [\"pytest >= 2.7.1\"]\n\nDESCRIPTION = \"N-D labeled arrays and datasets in Python\"\n\nLONG_DESCRIPTION = \"\"\"\n**xarray** (formerly **xray**) is an open source project and Python package\nthat makes working with labelled multi-dimensional arrays simple,\nefficient, and fun!\n\nXarray introduces labels in the form of dimensions, coordinates and\nattributes on top of raw NumPy_-like arrays, which allows for a more\nintuitive, more concise, and less error-prone developer experience.\nThe package includes a large and growing library of domain-agnostic functions\nfor advanced analytics and visualization with these data structures.\n\nXarray was inspired by and borrows heavily from pandas_, the popular data\nanalysis package focused on labelled tabular data.\nIt is particularly tailored to working with netCDF_ files, which were the\nsource of xarray's data model, and integrates tightly with dask_ for parallel\ncomputing.\n\n.. _NumPy: https://www.numpy.org\n.. _pandas: https://pandas.pydata.org\n.. _dask: https://dask.org\n.. _netCDF: https://www.unidata.ucar.edu/software/netcdf\n\nWhy xarray?\n-----------\n\nMulti-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called\n\"tensors\") are an essential part of computational science.\nThey are encountered in a wide range of fields, including physics, astronomy,\ngeoscience, bioinformatics, engineering, finance, and deep learning.\nIn Python, NumPy_ provides the fundamental data structure and API for\nworking with raw ND arrays.\nHowever, real-world datasets are usually more than just raw numbers;\nthey have labels which encode information about how the array values map\nto locations in space, time, etc.\n\nXarray doesn't just keep track of labels on arrays -- it uses them to provide a\npowerful and concise interface. For example:\n\n- Apply operations over dimensions by name: ``x.sum('time')``.\n- Select values by label instead of integer location:\n ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``.\n- Mathematical operations (e.g., ``x - y``) vectorize across multiple\n dimensions (array broadcasting) based on dimension names, not shape.\n- Flexible split-apply-combine operations with groupby:\n ``x.groupby('time.dayofyear').mean()``.\n- Database like alignment based on coordinate labels that smoothly\n handles missing values: ``x, y = xr.align(x, y, join='outer')``.\n- Keep track of arbitrary metadata in the form of a Python dictionary:\n ``x.attrs``.\n\nLearn more\n----------\n\n- Documentation: http://xarray.pydata.org\n- Issue tracker: http://github.com/pydata/xarray/issues\n- Source code: http://github.com/pydata/xarray\n- SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk\n\"\"\"\n\nsetup(\n name=DISTNAME,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n license=LICENSE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=CLASSIFIERS,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n python_requires=PYTHON_REQUIRES,\n install_requires=INSTALL_REQUIRES,\n setup_requires=SETUP_REQUIRES,\n tests_require=TESTS_REQUIRE,\n url=URL,\n packages=find_packages(),\n package_data={\"xarray\": [\"py.typed\", \"tests/data/*\"]},\n)"},{"attributeType":"null","col":0,"comment":"null","endLoc":16,"id":2816,"name":"air2d","nodeType":"Attribute","startLoc":16,"text":"air2d"},{"fileName":"npcompat.py","filePath":"xarray/core","id":2817,"nodeType":"File","text":"# Copyright (c) 2005-2011, NumPy Developers.\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n\n# * Neither the name of the NumPy Developers nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\nimport builtins\nimport operator\nfrom typing import Union\n\nimport numpy as np\n\n\n# Vendored from NumPy 1.12; we need a version that support duck typing, even\n# on dask arrays with __array_function__ enabled.\ndef _validate_axis(axis, ndim, argname):\n try:\n axis = [operator.index(axis)]\n except TypeError:\n axis = list(axis)\n axis = [a + ndim if a < 0 else a for a in axis]\n if not builtins.all(0 <= a < ndim for a in axis):\n raise ValueError(\"invalid axis for this array in `%s` argument\" % argname)\n if len(set(axis)) != len(axis):\n raise ValueError(\"repeated axis in `%s` argument\" % argname)\n return axis\n\n\ndef moveaxis(a, source, destination):\n try:\n # allow duck-array types if they define transpose\n transpose = a.transpose\n except AttributeError:\n a = np.asarray(a)\n transpose = a.transpose\n\n source = _validate_axis(source, a.ndim, \"source\")\n destination = _validate_axis(destination, a.ndim, \"destination\")\n if len(source) != len(destination):\n raise ValueError(\n \"`source` and `destination` arguments must have \"\n \"the same number of elements\"\n )\n\n order = [n for n in range(a.ndim) if n not in source]\n\n for dest, src in sorted(zip(destination, source)):\n order.insert(dest, src)\n\n result = transpose(order)\n return result\n\n\n# Type annotations stubs. See also / to be replaced by:\n# https://github.com/numpy/numpy/issues/7370\n# https://github.com/numpy/numpy-stubs/\nDTypeLike = Union[np.dtype, str]\n\n\n# from dask/array/utils.py\ndef _is_nep18_active():\n class A:\n def __array_function__(self, *args, **kwargs):\n return True\n\n try:\n return np.concatenate([A()])\n except ValueError:\n return False\n\n\nIS_NEP18_ACTIVE = _is_nep18_active()\n"},{"col":0,"comment":"null","endLoc":50,"header":"def _validate_axis(axis, ndim, argname)","id":2818,"name":"_validate_axis","nodeType":"Function","startLoc":40,"text":"def _validate_axis(axis, ndim, argname):\n try:\n axis = [operator.index(axis)]\n except TypeError:\n axis = list(axis)\n axis = [a + ndim if a < 0 else a for a in axis]\n if not builtins.all(0 <= a < ndim for a in axis):\n raise ValueError(\"invalid axis for this array in `%s` argument\" % argname)\n if len(set(axis)) != len(axis):\n raise ValueError(\"repeated axis in `%s` argument\" % argname)\n return axis"},{"col":0,"comment":"null","endLoc":75,"header":"def moveaxis(a, source, destination)","id":2819,"name":"moveaxis","nodeType":"Function","startLoc":53,"text":"def moveaxis(a, source, destination):\n try:\n # allow duck-array types if they define transpose\n transpose = a.transpose\n except AttributeError:\n a = np.asarray(a)\n transpose = a.transpose\n\n source = _validate_axis(source, a.ndim, \"source\")\n destination = _validate_axis(destination, a.ndim, \"destination\")\n if len(source) != len(destination):\n raise ValueError(\n \"`source` and `destination` arguments must have \"\n \"the same number of elements\"\n )\n\n order = [n for n in range(a.ndim) if n not in source]\n\n for dest, src in sorted(zip(destination, source)):\n order.insert(dest, src)\n\n result = transpose(order)\n return result"},{"attributeType":"null","col":0,"comment":"null","endLoc":19,"id":2820,"name":"f","nodeType":"Attribute","startLoc":19,"text":"f"},{"attributeType":"null","col":4,"comment":"null","endLoc":19,"id":2821,"name":"ax1","nodeType":"Attribute","startLoc":19,"text":"ax1"},{"col":0,"comment":"null","endLoc":93,"header":"def _is_nep18_active()","id":2822,"name":"_is_nep18_active","nodeType":"Function","startLoc":85,"text":"def _is_nep18_active():\n class A:\n def __array_function__(self, *args, **kwargs):\n return True\n\n try:\n return np.concatenate([A()])\n except ValueError:\n return False"},{"attributeType":"null","col":9,"comment":"null","endLoc":19,"id":2823,"name":"ax2","nodeType":"Attribute","startLoc":19,"text":"ax2"},{"attributeType":"null","col":14,"comment":"null","endLoc":19,"id":2824,"name":"ax3","nodeType":"Attribute","startLoc":19,"text":"ax3"},{"attributeType":"null","col":0,"comment":"null","endLoc":96,"id":2825,"name":"IS_NEP18_ACTIVE","nodeType":"Attribute","startLoc":96,"text":"IS_NEP18_ACTIVE"},{"col":4,"comment":"null","endLoc":315,"header":"def __delitem__(self, key: Hashable) -> None","id":2826,"name":"__delitem__","nodeType":"Function","startLoc":314,"text":"def __delitem__(self, key: Hashable) -> None:\n del self._data._coords[key]"},{"col":4,"comment":"Provide method for the key-autocompletions in IPython. ","endLoc":319,"header":"def _ipython_key_completions_(self)","id":2827,"name":"_ipython_key_completions_","nodeType":"Function","startLoc":317,"text":"def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return self._data._ipython_key_completions_()"},{"attributeType":"null","col":0,"comment":"null","endLoc":22,"id":2828,"name":"levels","nodeType":"Attribute","startLoc":22,"text":"levels"},{"col":0,"comment":"","endLoc":9,"header":"plot_control_colorbar.py#","id":2829,"name":"","nodeType":"Function","startLoc":2,"text":"\"\"\"\n===========================\nControl the plot's colorbar\n===========================\n\nUse ``cbar_kwargs`` keyword to specify the number of ticks.\nThe ``spacing`` kwarg can be used to draw proportional ticks.\n\"\"\"\n\nair_temp = xr.tutorial.load_dataset(\"air_temperature\")\n\nair2d = air_temp.air.isel(time=500)\n\nf, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 4))\n\nlevels = [245, 250, 255, 260, 265, 270, 275, 280, 285, 290, 310, 340]\n\nair2d.plot(ax=ax1, levels=levels)\n\nair2d.plot(ax=ax2, levels=levels, cbar_kwargs={\"ticks\": levels})\n\nair2d.plot(\n ax=ax3, levels=levels, cbar_kwargs={\"ticks\": levels, \"spacing\": \"proportional\"}\n)\n\nplt.tight_layout()\n\nplt.show()"},{"attributeType":"null","col":4,"comment":"null","endLoc":365,"id":2830,"name":"__slots__","nodeType":"Attribute","startLoc":365,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":405,"id":2831,"name":"rollings","nodeType":"Attribute","startLoc":405,"text":"self.rollings"},{"className":"Coarsen","col":0,"comment":"A object that implements the coarsen.\n\n See Also\n --------\n Dataset.coarsen\n DataArray.coarsen\n ","endLoc":552,"id":2832,"nodeType":"Class","startLoc":493,"text":"class Coarsen:\n \"\"\"A object that implements the coarsen.\n\n See Also\n --------\n Dataset.coarsen\n DataArray.coarsen\n \"\"\"\n\n __slots__ = (\"obj\", \"boundary\", \"coord_func\", \"windows\", \"side\", \"trim_excess\")\n _attributes = (\"windows\", \"side\", \"trim_excess\")\n\n def __init__(self, obj, windows, boundary, side, coord_func):\n \"\"\"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of window size. If 'trim', the excess indexes are trimed.\n If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: mapping from coordinate name to func.\n\n Returns\n -------\n coarsen\n \"\"\"\n self.obj = obj\n self.windows = windows\n self.side = side\n self.boundary = boundary\n\n if not utils.is_dict_like(coord_func):\n coord_func = {d: coord_func for d in self.obj.dims}\n for c in self.obj.coords:\n if c not in coord_func:\n coord_func[c] = duck_array_ops.mean\n self.coord_func = coord_func\n\n def __repr__(self):\n \"\"\"provide a nice str repr of our coarsen object\"\"\"\n\n attrs = [\n \"{k}->{v}\".format(k=k, v=getattr(self, k))\n for k in self._attributes\n if getattr(self, k, None) is not None\n ]\n return \"{klass} [{attrs}]\".format(\n klass=self.__class__.__name__, attrs=\",\".join(attrs)\n )"},{"col":4,"comment":"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of window size. If 'trim', the excess indexes are trimed.\n If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: mapping from coordinate name to func.\n\n Returns\n -------\n coarsen\n ","endLoc":540,"header":"def __init__(self, obj, windows, boundary, side, coord_func)","id":2833,"name":"__init__","nodeType":"Function","startLoc":505,"text":"def __init__(self, obj, windows, boundary, side, coord_func):\n \"\"\"\n Moving window object.\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to window.\n windows : A mapping from a dimension name to window size\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of window size. If 'trim', the excess indexes are trimed.\n If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: mapping from coordinate name to func.\n\n Returns\n -------\n coarsen\n \"\"\"\n self.obj = obj\n self.windows = windows\n self.side = side\n self.boundary = boundary\n\n if not utils.is_dict_like(coord_func):\n coord_func = {d: coord_func for d in self.obj.dims}\n for c in self.obj.coords:\n if c not in coord_func:\n coord_func[c] = duck_array_ops.mean\n self.coord_func = coord_func"},{"col":0,"comment":"","endLoc":31,"header":"npcompat.py#","id":2834,"name":"","nodeType":"Function","startLoc":31,"text":"DTypeLike = Union[np.dtype, str]\n\nIS_NEP18_ACTIVE = _is_nep18_active()"},{"attributeType":"null","col":4,"comment":"null","endLoc":268,"id":2835,"name":"__slots__","nodeType":"Attribute","startLoc":268,"text":"__slots__"},{"attributeType":"DataArray","col":8,"comment":"null","endLoc":271,"id":2836,"name":"_data","nodeType":"Attribute","startLoc":271,"text":"self._data"},{"className":"LevelCoordinatesSource","col":0,"comment":"Iterator for MultiIndex level coordinates.\n\n Used for attribute style lookup with AttrAccessMixin. Not returned directly\n by any public methods.\n ","endLoc":342,"id":2837,"nodeType":"Class","startLoc":322,"text":"class LevelCoordinatesSource(Mapping[Hashable, Any]):\n \"\"\"Iterator for MultiIndex level coordinates.\n\n Used for attribute style lookup with AttrAccessMixin. Not returned directly\n by any public methods.\n \"\"\"\n\n __slots__ = (\"_data\",)\n\n def __init__(self, data_object: \"Union[DataArray, Dataset]\"):\n self._data = data_object\n\n def __getitem__(self, key):\n # not necessary -- everything here can already be found in coords.\n raise KeyError()\n\n def __iter__(self) -> Iterator[Hashable]:\n return iter(self._data._level_coords)\n\n def __len__(self) -> int:\n return len(self._data._level_coords)"},{"col":4,"comment":"null","endLoc":336,"header":"def __getitem__(self, key)","id":2838,"name":"__getitem__","nodeType":"Function","startLoc":334,"text":"def __getitem__(self, key):\n # not necessary -- everything here can already be found in coords.\n raise KeyError()"},{"col":4,"comment":"null","endLoc":339,"header":"def __iter__(self) -> Iterator[Hashable]","id":2839,"name":"__iter__","nodeType":"Function","startLoc":338,"text":"def __iter__(self) -> Iterator[Hashable]:\n return iter(self._data._level_coords)"},{"col":4,"comment":"null","endLoc":342,"header":"def __len__(self) -> int","id":2840,"name":"__len__","nodeType":"Function","startLoc":341,"text":"def __len__(self) -> int:\n return len(self._data._level_coords)"},{"attributeType":"null","col":4,"comment":"null","endLoc":329,"id":2841,"name":"__slots__","nodeType":"Attribute","startLoc":329,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":438,"id":2842,"name":"use_cftime","nodeType":"Attribute","startLoc":438,"text":"self.use_cftime"},{"col":0,"comment":"Raises an ImportError if the standalone cftime is not found","endLoc":67,"header":"def _require_standalone_cftime()","id":2843,"name":"_require_standalone_cftime","nodeType":"Function","startLoc":58,"text":"def _require_standalone_cftime():\n \"\"\"Raises an ImportError if the standalone cftime is not found\"\"\"\n try:\n import cftime # noqa: F401\n except ImportError:\n raise ImportError(\n \"Decoding times with non-standard calendars \"\n \"or outside the pandas.Timestamp-valid range \"\n \"requires the standalone cftime package.\"\n )"},{"col":0,"comment":"Given an array of numeric timedeltas in netCDF format, convert it into a\n numpy timedelta64[ns] array.\n ","endLoc":247,"header":"def decode_cf_timedelta(num_timedeltas, units)","id":2844,"name":"decode_cf_timedelta","nodeType":"Function","startLoc":240,"text":"def decode_cf_timedelta(num_timedeltas, units):\n \"\"\"Given an array of numeric timedeltas in netCDF format, convert it into a\n numpy timedelta64[ns] array.\n \"\"\"\n num_timedeltas = np.asarray(num_timedeltas)\n units = _netcdf_to_numpy_timeunit(units)\n result = to_timedelta_unboxed(num_timedeltas.ravel(), unit=units)\n return result.reshape(num_timedeltas.shape)"},{"col":4,"comment":"null","endLoc":48,"header":"@parameterized([\"method\"], ([\"linear\", \"nearest\"]))\n def time_interpolation_2d(self, method)","id":2845,"name":"time_interpolation_2d","nodeType":"Function","startLoc":46,"text":"@parameterized([\"method\"], ([\"linear\", \"nearest\"]))\n def time_interpolation_2d(self, method):\n self.ds.interp(x=new_x_long, y=new_y_long, method=method).load()"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":27,"id":2846,"name":"ds","nodeType":"Attribute","startLoc":27,"text":"self.ds"},{"className":"InterpolationDask","col":0,"comment":"null","endLoc":55,"id":2847,"nodeType":"Class","startLoc":51,"text":"class InterpolationDask(Interpolation):\n def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"t\": 50})"},{"attributeType":"null","col":0,"comment":"null","endLoc":28,"id":2848,"name":"_NS_PER_TIME_DELTA","nodeType":"Attribute","startLoc":28,"text":"_NS_PER_TIME_DELTA"},{"attributeType":"null","col":0,"comment":"null","endLoc":37,"id":2849,"name":"TIME_UNITS","nodeType":"Attribute","startLoc":37,"text":"TIME_UNITS"},{"col":4,"comment":"null","endLoc":55,"header":"def setup(self, *args, **kwargs)","id":2850,"name":"setup","nodeType":"Function","startLoc":52,"text":"def setup(self, *args, **kwargs):\n requires_dask()\n super().setup(**kwargs)\n self.ds = self.ds.chunk({\"t\": 50})"},{"col":0,"comment":"","endLoc":1,"header":"times.py#","id":2851,"name":"","nodeType":"Function","startLoc":1,"text":"_STANDARD_CALENDARS = {\"standard\", \"gregorian\", \"proleptic_gregorian\"}\n\n_NS_PER_TIME_DELTA = {\n \"us\": int(1e3),\n \"ms\": int(1e6),\n \"s\": int(1e9),\n \"m\": int(1e9) * 60,\n \"h\": int(1e9) * 60 * 60,\n \"D\": int(1e9) * 60 * 60 * 24,\n}\n\nTIME_UNITS = frozenset(\n [\"days\", \"hours\", \"minutes\", \"seconds\", \"milliseconds\", \"microseconds\"]\n)"},{"fileName":"rasterio_.py","filePath":"xarray/backends","id":2852,"nodeType":"File","text":"import os\nimport warnings\n\nimport numpy as np\n\nfrom .. import DataArray\nfrom ..core import indexing\nfrom ..core.utils import is_scalar\nfrom .common import BackendArray\nfrom .file_manager import CachingFileManager\nfrom .locks import SerializableLock\n\n# TODO: should this be GDAL_LOCK instead?\nRASTERIO_LOCK = SerializableLock()\n\n_ERROR_MSG = (\n \"The kind of indexing operation you are trying to do is not \"\n \"valid on rasterio files. Try to load your data with ds.load()\"\n \"first.\"\n)\n\n\nclass RasterioArrayWrapper(BackendArray):\n \"\"\"A wrapper around rasterio dataset objects\"\"\"\n\n def __init__(self, manager, lock, vrt_params=None):\n from rasterio.vrt import WarpedVRT\n\n self.manager = manager\n self.lock = lock\n\n # cannot save riods as an attribute: this would break pickleability\n riods = manager.acquire()\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n self.vrt_params = vrt_params\n self._shape = (riods.count, riods.height, riods.width)\n\n dtypes = riods.dtypes\n if not np.all(np.asarray(dtypes) == dtypes[0]):\n raise ValueError(\"All bands should have the same dtype\")\n self._dtype = np.dtype(dtypes[0])\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def shape(self):\n return self._shape\n\n def _get_indexer(self, key):\n \"\"\" Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n \"\"\"\n assert len(key) == 3, \"rasterio datasets should always be 3D\"\n\n # bands cannot be windowed but they can be listed\n band_key = key[0]\n np_inds = []\n # bands (axis=0) cannot be windowed but they can be listed\n if isinstance(band_key, slice):\n start, stop, step = band_key.indices(self.shape[0])\n band_key = np.arange(start, stop, step)\n # be sure we give out a list\n band_key = (np.asarray(band_key) + 1).tolist()\n if isinstance(band_key, list): # if band_key is not a scalar\n np_inds.append(slice(None))\n\n # but other dims can only be windowed\n window = []\n squeeze_axis = []\n for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):\n if isinstance(k, slice):\n # step is always positive. see indexing.decompose_indexer\n start, stop, step = k.indices(n)\n np_inds.append(slice(None, None, step))\n elif is_scalar(k):\n # windowed operations will always return an array\n # we will have to squeeze it later\n squeeze_axis.append(-(2 - i))\n start = k\n stop = k + 1\n else:\n start, stop = np.min(k), np.max(k) + 1\n np_inds.append(k - start)\n window.append((start, stop))\n\n if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):\n # do outer-style indexing\n np_inds[-2:] = np.ix_(*np_inds[-2:])\n\n return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)\n\n def _getitem(self, key):\n from rasterio.vrt import WarpedVRT\n\n band_key, window, squeeze_axis, np_inds = self._get_indexer(key)\n\n if not band_key or any(start == stop for (start, stop) in window):\n # no need to do IO\n shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)\n out = np.zeros(shape, dtype=self.dtype)\n else:\n with self.lock:\n riods = self.manager.acquire(needs_lock=False)\n if self.vrt_params is not None:\n riods = WarpedVRT(riods, **self.vrt_params)\n out = riods.read(band_key, window=window)\n\n if squeeze_axis:\n out = np.squeeze(out, axis=squeeze_axis)\n return out[np_inds]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )\n\n\ndef _parse_envi(meta):\n \"\"\"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n \"\"\"\n\n def parsevec(s):\n return np.fromstring(s.strip(\"{}\"), dtype=\"float\", sep=\",\")\n\n def default(s):\n return s.strip(\"{}\")\n\n parse = {\"wavelength\": parsevec, \"fwhm\": parsevec}\n parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}\n return parsed_meta\n\n\ndef open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None):\n \"\"\"Open a file with rasterio (experimental).\n\n This should work with any file that rasterio can open (most often:\n geoTIFF). The x and y coordinates are generated automatically from the\n file's geoinformation, shifted to the center of each pixel (see\n `\"PixelIsArea\" Raster Space\n `_\n for more information).\n\n You can generate 2D coordinates from the file's attributes with::\n\n from affine import Affine\n da = xr.open_rasterio('path_to_file.tif')\n transform = Affine.from_gdal(*da.attrs['transform'])\n nx, ny = da.sizes['x'], da.sizes['y']\n x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform\n\n\n Parameters\n ----------\n filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT\n Path to the file to open. Or already open rasterio dataset.\n parse_coordinates : bool, optional\n Whether to parse the x and y coordinates out of the file's\n ``transform`` attribute or not. The default is to automatically\n parse the coordinates only if they are rectilinear (1D).\n It can be useful to set ``parse_coordinates=False``\n if your files are very large or if you don't need the coordinates.\n chunks : int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new\n DataArray into a dask array.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False.\n lock : False, True or threading.Lock, optional\n If chunks is provided, this argument is passed on to\n :py:func:`dask.array.from_array`. By default, a global lock is\n used to avoid issues with concurrent access to the same file when using\n dask's multithreaded backend.\n\n Returns\n -------\n data : DataArray\n The newly created DataArray.\n \"\"\"\n import rasterio\n from rasterio.vrt import WarpedVRT\n\n vrt_params = None\n if isinstance(filename, rasterio.io.DatasetReader):\n filename = filename.name\n elif isinstance(filename, rasterio.vrt.WarpedVRT):\n vrt = filename\n filename = vrt.src_dataset.name\n vrt_params = dict(\n crs=vrt.crs.to_string(),\n resampling=vrt.resampling,\n src_nodata=vrt.src_nodata,\n dst_nodata=vrt.dst_nodata,\n tolerance=vrt.tolerance,\n transform=vrt.transform,\n width=vrt.width,\n height=vrt.height,\n warp_extras=vrt.warp_extras,\n )\n\n if lock is None:\n lock = RASTERIO_LOCK\n\n manager = CachingFileManager(rasterio.open, filename, lock=lock, mode=\"r\")\n riods = manager.acquire()\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n\n if cache is None:\n cache = chunks is None\n\n coords = {}\n\n # Get bands\n if riods.count < 1:\n raise ValueError(\"Unknown dims\")\n coords[\"band\"] = np.asarray(riods.indexes)\n\n # Get coordinates\n if riods.transform.is_rectilinear:\n # 1d coordinates\n parse = True if parse_coordinates is None else parse_coordinates\n if parse:\n nx, ny = riods.width, riods.height\n # xarray coordinates are pixel centered\n x, _ = (np.arange(nx) + 0.5, np.zeros(nx) + 0.5) * riods.transform\n _, y = (np.zeros(ny) + 0.5, np.arange(ny) + 0.5) * riods.transform\n coords[\"y\"] = y\n coords[\"x\"] = x\n else:\n # 2d coordinates\n parse = False if (parse_coordinates is None) else parse_coordinates\n if parse:\n warnings.warn(\n \"The file coordinates' transformation isn't \"\n \"rectilinear: xarray won't parse the coordinates \"\n \"in this case. Set `parse_coordinates=False` to \"\n \"suppress this warning.\",\n RuntimeWarning,\n stacklevel=3,\n )\n\n # Attributes\n attrs = {}\n # Affine transformation matrix (always available)\n # This describes coefficients mapping pixel coordinates to CRS\n # For serialization store as tuple of 6 floats, the last row being\n # always (0, 0, 1) per definition (see\n # https://github.com/sgillies/affine)\n attrs[\"transform\"] = tuple(riods.transform)[:6]\n if hasattr(riods, \"crs\") and riods.crs:\n # CRS is a dict-like object specific to rasterio\n # If CRS is not None, we convert it back to a PROJ4 string using\n # rasterio itself\n try:\n attrs[\"crs\"] = riods.crs.to_proj4()\n except AttributeError:\n attrs[\"crs\"] = riods.crs.to_string()\n if hasattr(riods, \"res\"):\n # (width, height) tuple of pixels in units of CRS\n attrs[\"res\"] = riods.res\n if hasattr(riods, \"is_tiled\"):\n # Is the TIF tiled? (bool)\n # We cast it to an int for netCDF compatibility\n attrs[\"is_tiled\"] = np.uint8(riods.is_tiled)\n if hasattr(riods, \"nodatavals\"):\n # The nodata values for the raster bands\n attrs[\"nodatavals\"] = tuple(\n np.nan if nodataval is None else nodataval for nodataval in riods.nodatavals\n )\n if hasattr(riods, \"scales\"):\n # The scale values for the raster bands\n attrs[\"scales\"] = riods.scales\n if hasattr(riods, \"offsets\"):\n # The offset values for the raster bands\n attrs[\"offsets\"] = riods.offsets\n if hasattr(riods, \"descriptions\") and any(riods.descriptions):\n # Descriptions for each dataset band\n attrs[\"descriptions\"] = riods.descriptions\n if hasattr(riods, \"units\") and any(riods.units):\n # A list of units string for each dataset band\n attrs[\"units\"] = riods.units\n\n # Parse extra metadata from tags, if supported\n parsers = {\"ENVI\": _parse_envi, \"GTiff\": lambda m: m}\n\n driver = riods.driver\n if driver in parsers:\n if driver == \"GTiff\":\n meta = parsers[driver](riods.tags())\n else:\n meta = parsers[driver](riods.tags(ns=driver))\n\n for k, v in meta.items():\n # Add values as coordinates if they match the band count,\n # as attributes otherwise\n if isinstance(v, (list, np.ndarray)) and len(v) == riods.count:\n coords[k] = (\"band\", np.asarray(v))\n else:\n attrs[k] = v\n\n data = indexing.LazilyOuterIndexedArray(\n RasterioArrayWrapper(manager, lock, vrt_params)\n )\n\n # this lets you write arrays loaded with rasterio\n data = indexing.CopyOnWriteArray(data)\n if cache and chunks is None:\n data = indexing.MemoryCachedArray(data)\n\n result = DataArray(data=data, dims=(\"band\", \"y\", \"x\"), coords=coords, attrs=attrs)\n\n if chunks is not None:\n from dask.base import tokenize\n\n # augment the token with the file modification time\n try:\n mtime = os.path.getmtime(filename)\n except OSError:\n # the filename is probably an s3 bucket rather than a regular file\n mtime = None\n token = tokenize(filename, mtime, chunks)\n name_prefix = \"open_rasterio-%s\" % token\n result = result.chunk(chunks, name_prefix=name_prefix, token=token)\n\n # Make the file closeable\n result._file_obj = manager\n\n return result\n"},{"className":"RasterioArrayWrapper","col":0,"comment":"A wrapper around rasterio dataset objects","endLoc":132,"id":2853,"nodeType":"Class","startLoc":23,"text":"class RasterioArrayWrapper(BackendArray):\n \"\"\"A wrapper around rasterio dataset objects\"\"\"\n\n def __init__(self, manager, lock, vrt_params=None):\n from rasterio.vrt import WarpedVRT\n\n self.manager = manager\n self.lock = lock\n\n # cannot save riods as an attribute: this would break pickleability\n riods = manager.acquire()\n if vrt_params is not None:\n riods = WarpedVRT(riods, **vrt_params)\n self.vrt_params = vrt_params\n self._shape = (riods.count, riods.height, riods.width)\n\n dtypes = riods.dtypes\n if not np.all(np.asarray(dtypes) == dtypes[0]):\n raise ValueError(\"All bands should have the same dtype\")\n self._dtype = np.dtype(dtypes[0])\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def shape(self):\n return self._shape\n\n def _get_indexer(self, key):\n \"\"\" Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n \"\"\"\n assert len(key) == 3, \"rasterio datasets should always be 3D\"\n\n # bands cannot be windowed but they can be listed\n band_key = key[0]\n np_inds = []\n # bands (axis=0) cannot be windowed but they can be listed\n if isinstance(band_key, slice):\n start, stop, step = band_key.indices(self.shape[0])\n band_key = np.arange(start, stop, step)\n # be sure we give out a list\n band_key = (np.asarray(band_key) + 1).tolist()\n if isinstance(band_key, list): # if band_key is not a scalar\n np_inds.append(slice(None))\n\n # but other dims can only be windowed\n window = []\n squeeze_axis = []\n for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):\n if isinstance(k, slice):\n # step is always positive. see indexing.decompose_indexer\n start, stop, step = k.indices(n)\n np_inds.append(slice(None, None, step))\n elif is_scalar(k):\n # windowed operations will always return an array\n # we will have to squeeze it later\n squeeze_axis.append(-(2 - i))\n start = k\n stop = k + 1\n else:\n start, stop = np.min(k), np.max(k) + 1\n np_inds.append(k - start)\n window.append((start, stop))\n\n if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):\n # do outer-style indexing\n np_inds[-2:] = np.ix_(*np_inds[-2:])\n\n return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)\n\n def _getitem(self, key):\n from rasterio.vrt import WarpedVRT\n\n band_key, window, squeeze_axis, np_inds = self._get_indexer(key)\n\n if not band_key or any(start == stop for (start, stop) in window):\n # no need to do IO\n shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)\n out = np.zeros(shape, dtype=self.dtype)\n else:\n with self.lock:\n riods = self.manager.acquire(needs_lock=False)\n if self.vrt_params is not None:\n riods = WarpedVRT(riods, **self.vrt_params)\n out = riods.read(band_key, window=window)\n\n if squeeze_axis:\n out = np.squeeze(out, axis=squeeze_axis)\n return out[np_inds]\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )"},{"col":4,"comment":"null","endLoc":46,"header":"@property\n def dtype(self)","id":2854,"name":"dtype","nodeType":"Function","startLoc":44,"text":"@property\n def dtype(self):\n return self._dtype"},{"col":4,"comment":"null","endLoc":50,"header":"@property\n def shape(self)","id":2855,"name":"shape","nodeType":"Function","startLoc":48,"text":"@property\n def shape(self):\n return self._shape"},{"col":4,"comment":" Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n ","endLoc":107,"header":"def _get_indexer(self, key)","id":2856,"name":"_get_indexer","nodeType":"Function","startLoc":52,"text":"def _get_indexer(self, key):\n \"\"\" Get indexer for rasterio array.\n\n Parameter\n ---------\n key: tuple of int\n\n Returns\n -------\n band_key: an indexer for the 1st dimension\n window: two tuples. Each consists of (start, stop).\n squeeze_axis: axes to be squeezed\n np_ind: indexer for loaded numpy array\n\n See also\n --------\n indexing.decompose_indexer\n \"\"\"\n assert len(key) == 3, \"rasterio datasets should always be 3D\"\n\n # bands cannot be windowed but they can be listed\n band_key = key[0]\n np_inds = []\n # bands (axis=0) cannot be windowed but they can be listed\n if isinstance(band_key, slice):\n start, stop, step = band_key.indices(self.shape[0])\n band_key = np.arange(start, stop, step)\n # be sure we give out a list\n band_key = (np.asarray(band_key) + 1).tolist()\n if isinstance(band_key, list): # if band_key is not a scalar\n np_inds.append(slice(None))\n\n # but other dims can only be windowed\n window = []\n squeeze_axis = []\n for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):\n if isinstance(k, slice):\n # step is always positive. see indexing.decompose_indexer\n start, stop, step = k.indices(n)\n np_inds.append(slice(None, None, step))\n elif is_scalar(k):\n # windowed operations will always return an array\n # we will have to squeeze it later\n squeeze_axis.append(-(2 - i))\n start = k\n stop = k + 1\n else:\n start, stop = np.min(k), np.max(k) + 1\n np_inds.append(k - start)\n window.append((start, stop))\n\n if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):\n # do outer-style indexing\n np_inds[-2:] = np.ix_(*np_inds[-2:])\n\n return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)"},{"id":2857,"name":"indexing.rst","nodeType":"TextFile","path":"doc","text":".. _indexing:\n\nIndexing and selecting data\n===========================\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\nxarray offers extremely flexible indexing routines that combine the best\nfeatures of NumPy and pandas for data selection.\n\nThe most basic way to access elements of a :py:class:`~xarray.DataArray`\nobject is to use Python's ``[]`` syntax, such as ``array[i, j]``, where\n``i`` and ``j`` are both integers.\nAs xarray objects can store coordinates corresponding to each dimension of an\narray, label-based indexing similar to ``pandas.DataFrame.loc`` is also possible.\nIn label-based indexing, the element position ``i`` is automatically\nlooked-up from the coordinate values.\n\nDimensions of xarray objects have names, so you can also lookup the dimensions\nby name, instead of remembering their positional order.\n\nThus in total, xarray supports four different kinds of indexing, as described\nbelow and summarized in this table:\n\n.. |br| raw:: html\n\n

\n\n+------------------+--------------+---------------------------------+--------------------------------+\n| Dimension lookup | Index lookup | ``DataArray`` syntax | ``Dataset`` syntax |\n+==================+==============+=================================+================================+\n| Positional | By integer | ``da[:, 0]`` | *not available* |\n+------------------+--------------+---------------------------------+--------------------------------+\n| Positional | By label | ``da.loc[:, 'IA']`` | *not available* |\n+------------------+--------------+---------------------------------+--------------------------------+\n| By name | By integer | ``da.isel(space=0)`` or |br| | ``ds.isel(space=0)`` or |br| |\n| | | ``da[dict(space=0)]`` | ``ds[dict(space=0)]`` |\n+------------------+--------------+---------------------------------+--------------------------------+\n| By name | By label | ``da.sel(space='IA')`` or |br| | ``ds.sel(space='IA')`` or |br| |\n| | | ``da.loc[dict(space='IA')]`` | ``ds.loc[dict(space='IA')]`` |\n+------------------+--------------+---------------------------------+--------------------------------+\n\nMore advanced indexing is also possible for all the methods by\nsupplying :py:class:`~xarray.DataArray` objects as indexer.\nSee :ref:`vectorized_indexing` for the details.\n\n\nPositional indexing\n-------------------\n\nIndexing a :py:class:`~xarray.DataArray` directly works (mostly) just like it\ndoes for numpy arrays, except that the returned object is always another\nDataArray:\n\n.. ipython:: python\n\n da = xr.DataArray(np.random.rand(4, 3),\n [('time', pd.date_range('2000-01-01', periods=4)),\n ('space', ['IA', 'IL', 'IN'])])\n da[:2]\n da[0, 0]\n da[:, [2, 1]]\n\nAttributes are persisted in all indexing operations.\n\n.. warning::\n\n Positional indexing deviates from the NumPy when indexing with multiple\n arrays like ``da[[0, 1], [0, 1]]``, as described in\n :ref:`vectorized_indexing`.\n\nxarray also supports label-based indexing, just like pandas. Because\nwe use a :py:class:`pandas.Index` under the hood, label based indexing is very\nfast. To do label based indexing, use the :py:attr:`~xarray.DataArray.loc` attribute:\n\n.. ipython:: python\n\n da.loc['2000-01-01':'2000-01-02', 'IA']\n\nIn this example, the selected is a subpart of the array\nin the range '2000-01-01':'2000-01-02' along the first coordinate `time`\nand with 'IA' value from the second coordinate `space`.\n\nYou can perform any of the label indexing operations `supported by pandas`__,\nincluding indexing with individual, slices and arrays of labels, as well as\nindexing with boolean arrays. Like pandas, label based indexing in xarray is\n*inclusive* of both the start and stop bounds.\n\n__ http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-label\n\nSetting values with label based indexing is also supported:\n\n.. ipython:: python\n\n da.loc['2000-01-01', ['IL', 'IN']] = -10\n da\n\n\nIndexing with dimension names\n-----------------------------\n\nWith the dimension names, we do not have to rely on dimension order and can\nuse them explicitly to slice data. There are two ways to do this:\n\n1. Use a dictionary as the argument for array positional or label based array\n indexing:\n\n .. ipython:: python\n\n # index by integer array indices\n da[dict(space=0, time=slice(None, 2))]\n\n # index by dimension coordinate labels\n da.loc[dict(time=slice('2000-01-01', '2000-01-02'))]\n\n2. Use the :py:meth:`~xarray.DataArray.sel` and :py:meth:`~xarray.DataArray.isel`\n convenience methods:\n\n .. ipython:: python\n\n # index by integer array indices\n da.isel(space=0, time=slice(None, 2))\n\n # index by dimension coordinate labels\n da.sel(time=slice('2000-01-01', '2000-01-02'))\n\nThe arguments to these methods can be any objects that could index the array\nalong the dimension given by the keyword, e.g., labels for an individual value,\nPython :py:func:`slice` objects or 1-dimensional arrays.\n\n.. note::\n\n We would love to be able to do indexing with labeled dimension names inside\n brackets, but unfortunately, Python `does yet not support`__ indexing with\n keyword arguments like ``da[space=0]``\n\n__ http://legacy.python.org/dev/peps/pep-0472/\n\n\n.. _nearest neighbor lookups:\n\nNearest neighbor lookups\n------------------------\n\nThe label based selection methods :py:meth:`~xarray.Dataset.sel`,\n:py:meth:`~xarray.Dataset.reindex` and :py:meth:`~xarray.Dataset.reindex_like` all\nsupport ``method`` and ``tolerance`` keyword argument. The method parameter allows for\nenabling nearest neighbor (inexact) lookups by use of the methods ``'pad'``,\n``'backfill'`` or ``'nearest'``:\n\n.. ipython:: python\n\n da = xr.DataArray([1, 2, 3], [('x', [0, 1, 2])])\n da.sel(x=[1.1, 1.9], method='nearest')\n da.sel(x=0.1, method='backfill')\n da.reindex(x=[0.5, 1, 1.5, 2, 2.5], method='pad')\n\nTolerance limits the maximum distance for valid matches with an inexact lookup:\n\n.. ipython:: python\n\n da.reindex(x=[1.1, 1.5], method='nearest', tolerance=0.2)\n\nThe method parameter is not yet supported if any of the arguments\nto ``.sel()`` is a ``slice`` object:\n\n.. ipython::\n :verbatim:\n\n In [1]: da.sel(x=slice(1, 3), method='nearest')\n NotImplementedError\n\nHowever, you don't need to use ``method`` to do inexact slicing. Slicing\nalready returns all values inside the range (inclusive), as long as the index\nlabels are monotonic increasing:\n\n.. ipython:: python\n\n da.sel(x=slice(0.9, 3.1))\n\nIndexing axes with monotonic decreasing labels also works, as long as the\n``slice`` or ``.loc`` arguments are also decreasing:\n\n.. ipython:: python\n\n reversed_da = da[::-1]\n reversed_da.loc[3.1:0.9]\n\n\n.. note::\n\n If you want to interpolate along coordinates rather than looking up the\n nearest neighbors, use :py:meth:`~xarray.Dataset.interp` and\n :py:meth:`~xarray.Dataset.interp_like`.\n See :ref:`interpolation ` for the details.\n\n\nDataset indexing\n----------------\n\nWe can also use these methods to index all variables in a dataset\nsimultaneously, returning a new dataset:\n\n.. ipython:: python\n\n da = xr.DataArray(np.random.rand(4, 3),\n [('time', pd.date_range('2000-01-01', periods=4)),\n ('space', ['IA', 'IL', 'IN'])])\n ds = da.to_dataset(name='foo')\n ds.isel(space=[0], time=[0])\n ds.sel(time='2000-01-01')\n\nPositional indexing on a dataset is not supported because the ordering of\ndimensions in a dataset is somewhat ambiguous (it can vary between different\narrays). However, you can do normal indexing with dimension names:\n\n.. ipython:: python\n\n\n ds[dict(space=[0], time=[0])]\n ds.loc[dict(time='2000-01-01')]\n\nUsing indexing to *assign* values to a subset of dataset (e.g.,\n``ds[dict(space=0)] = 1``) is not yet supported.\n\nDropping labels and dimensions\n------------------------------\n\nThe :py:meth:`~xarray.Dataset.drop` method returns a new object with the listed\nindex labels along a dimension dropped:\n\n.. ipython:: python\n\n ds.drop(space=['IN', 'IL'])\n\n``drop`` is both a ``Dataset`` and ``DataArray`` method.\n\nUse :py:meth:`~xarray.Dataset.drop_dims` to drop a full dimension from a Dataset.\nAny variables with these dimensions are also dropped:\n\n.. ipython:: python\n\n ds.drop_dims('time')\n\n\n.. _masking with where:\n\nMasking with ``where``\n----------------------\n\nIndexing methods on xarray objects generally return a subset of the original data.\nHowever, it is sometimes useful to select an object with the same shape as the\noriginal data, but with some elements masked. To do this type of selection in\nxarray, use :py:meth:`~xarray.DataArray.where`:\n\n.. ipython:: python\n\n da = xr.DataArray(np.arange(16).reshape(4, 4), dims=['x', 'y'])\n da.where(da.x + da.y < 4)\n\nThis is particularly useful for ragged indexing of multi-dimensional data,\ne.g., to apply a 2D mask to an image. Note that ``where`` follows all the\nusual xarray broadcasting and alignment rules for binary operations (e.g.,\n``+``) between the object being indexed and the condition, as described in\n:ref:`comput`:\n\n.. ipython:: python\n\n da.where(da.y < 2)\n\nBy default ``where`` maintains the original size of the data. For cases\nwhere the selected data size is much smaller than the original data,\nuse of the option ``drop=True`` clips coordinate\nelements that are fully masked:\n\n.. ipython:: python\n\n da.where(da.y < 2, drop=True)\n\n.. _selecting values with isin:\n\nSelecting values with ``isin``\n------------------------------\n\nTo check whether elements of an xarray object contain a single object, you can\ncompare with the equality operator ``==`` (e.g., ``arr == 3``). To check\nmultiple values, use :py:meth:`~xarray.DataArray.isin`:\n\n.. ipython:: python\n\n da = xr.DataArray([1, 2, 3, 4, 5], dims=['x'])\n da.isin([2, 4])\n\n:py:meth:`~xarray.DataArray.isin` works particularly well with\n:py:meth:`~xarray.DataArray.where` to support indexing by arrays that are not\nalready labels of an array:\n\n.. ipython:: python\n\n lookup = xr.DataArray([-1, -2, -3, -4, -5], dims=['x'])\n da.where(lookup.isin([-2, -4]), drop=True)\n\nHowever, some caution is in order: when done repeatedly, this type of indexing\nis significantly slower than using :py:meth:`~xarray.DataArray.sel`.\n\n.. _vectorized_indexing:\n\nVectorized Indexing\n-------------------\n\nLike numpy and pandas, xarray supports indexing many array elements at once in a\n`vectorized` manner.\n\nIf you only provide integers, slices, or unlabeled arrays (array without\ndimension names, such as ``np.ndarray``, ``list``, but not\n:py:meth:`~xarray.DataArray` or :py:meth:`~xarray.Variable`) indexing can be\nunderstood as orthogonally. Each indexer component selects independently along\nthe corresponding dimension, similar to how vector indexing works in Fortran or\nMATLAB, or after using the :py:func:`numpy.ix_` helper:\n\n.. ipython:: python\n\n da = xr.DataArray(np.arange(12).reshape((3, 4)), dims=['x', 'y'],\n coords={'x': [0, 1, 2], 'y': ['a', 'b', 'c', 'd']})\n da\n da[[0, 1], [1, 1]]\n\nFor more flexibility, you can supply :py:meth:`~xarray.DataArray` objects\nas indexers.\nDimensions on resultant arrays are given by the ordered union of the indexers'\ndimensions:\n\n.. ipython:: python\n\n ind_x = xr.DataArray([0, 1], dims=['x'])\n ind_y = xr.DataArray([0, 1], dims=['y'])\n da[ind_x, ind_y] # orthogonal indexing\n da[ind_x, ind_x] # vectorized indexing\n\nSlices or sequences/arrays without named-dimensions are treated as if they have\nthe same dimension which is indexed along:\n\n.. ipython:: python\n\n # Because [0, 1] is used to index along dimension 'x',\n # it is assumed to have dimension 'x'\n da[[0, 1], ind_x]\n\nFurthermore, you can use multi-dimensional :py:meth:`~xarray.DataArray`\nas indexers, where the resultant array dimension is also determined by\nindexers' dimension:\n\n.. ipython:: python\n\n ind = xr.DataArray([[0, 1], [0, 1]], dims=['a', 'b'])\n da[ind]\n\nSimilar to how NumPy's `advanced indexing`_ works, vectorized\nindexing for xarray is based on our\n:ref:`broadcasting rules `.\nSee :ref:`indexing.rules` for the complete specification.\n\n.. _advanced indexing: https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html\n\nVectorized indexing also works with ``isel``, ``loc``, and ``sel``:\n\n.. ipython:: python\n\n ind = xr.DataArray([[0, 1], [0, 1]], dims=['a', 'b'])\n da.isel(y=ind) # same as da[:, ind]\n\n ind = xr.DataArray([['a', 'b'], ['b', 'a']], dims=['a', 'b'])\n da.loc[:, ind] # same as da.sel(y=ind)\n\nThese methods may also be applied to ``Dataset`` objects\n\n.. ipython:: python\n\n ds = da.to_dataset(name='bar')\n ds.isel(x=xr.DataArray([0, 1, 2], dims=['points']))\n\n.. tip::\n\n If you are lazily loading your data from disk, not every form of vectorized\n indexing is supported (or if supported, may not be supported efficiently).\n You may find increased performance by loading your data into memory first,\n e.g., with :py:meth:`~xarray.Dataset.load`.\n\n.. note::\n\n If an indexer is a :py:meth:`~xarray.DataArray`, its coordinates should not\n conflict with the selected subpart of the target array (except for the\n explicitly indexed dimensions with ``.loc``/``.sel``).\n Otherwise, ``IndexError`` will be raised.\n\n\n.. _assigning_values:\n\nAssigning values with indexing\n------------------------------\n\nTo select and assign values to a portion of a :py:meth:`~xarray.DataArray` you\ncan use indexing with ``.loc`` :\n\n.. ipython:: python\n\n ds = xr.tutorial.open_dataset('air_temperature')\n\n #add an empty 2D dataarray\n ds['empty']= xr.full_like(ds.air.mean('time'),fill_value=0)\n\n #modify one grid point using loc()\n ds['empty'].loc[dict(lon=260, lat=30)] = 100\n\n #modify a 2D region using loc()\n lc = ds.coords['lon']\n la = ds.coords['lat']\n ds['empty'].loc[dict(lon=lc[(lc>220)&(lc<260)], lat=la[(la>20)&(la<60)])] = 100\n\nor :py:meth:`~xarray.where`:\n\n.. ipython:: python\n\n #modify one grid point using xr.where()\n ds['empty'] = xr.where((ds.coords['lat']==20)&(ds.coords['lon']==260), 100, ds['empty'])\n\n #or modify a 2D region using xr.where()\n mask = (ds.coords['lat']>20)&(ds.coords['lat']<60)&(ds.coords['lon']>220)&(ds.coords['lon']<260)\n ds['empty'] = xr.where(mask, 100, ds['empty'])\n\n\nVectorized indexing can also be used to assign values to xarray object.\n\n.. ipython:: python\n\n da = xr.DataArray(np.arange(12).reshape((3, 4)), dims=['x', 'y'],\n coords={'x': [0, 1, 2], 'y': ['a', 'b', 'c', 'd']})\n da\n da[0] = -1 # assignment with broadcasting\n da\n\n ind_x = xr.DataArray([0, 1], dims=['x'])\n ind_y = xr.DataArray([0, 1], dims=['y'])\n da[ind_x, ind_y] = -2 # assign -2 to (ix, iy) = (0, 0) and (1, 1)\n da\n\n da[ind_x, ind_y] += 100 # increment is also possible\n da\n\nLike ``numpy.ndarray``, value assignment sometimes works differently from what one may expect.\n\n.. ipython:: python\n\n da = xr.DataArray([0, 1, 2, 3], dims=['x'])\n ind = xr.DataArray([0, 0, 0], dims=['x'])\n da[ind] -= 1\n da\n\nWhere the 0th element will be subtracted 1 only once.\nThis is because ``v[0] = v[0] - 1`` is called three times, rather than\n``v[0] = v[0] - 1 - 1 - 1``.\nSee `Assigning values to indexed arrays`__ for the details.\n\n__ https://docs.scipy.org/doc/numpy/user/basics.indexing.html#assigning-values-to-indexed-arrays\n\n\n.. note::\n Dask array does not support value assignment\n (see :ref:`dask` for the details).\n\n.. note::\n\n Coordinates in both the left- and right-hand-side arrays should not\n conflict with each other.\n Otherwise, ``IndexError`` will be raised.\n\n.. warning::\n\n Do not try to assign values when using any of the indexing methods ``isel``\n or ``sel``::\n\n # DO NOT do this\n da.isel(space=0) = 0\n\n Assigning values with the chained indexing using ``.sel`` or ``.isel`` fails silently.\n\n .. ipython:: python\n\n da = xr.DataArray([0, 1, 2, 3], dims=['x'])\n # DO NOT do this\n da.isel(x=[0, 1, 2])[1] = -1\n da\n\n\n.. _more_advanced_indexing:\n\nMore advanced indexing\n-----------------------\n\nThe use of :py:meth:`~xarray.DataArray` objects as indexers enables very\nflexible indexing. The following is an example of the pointwise indexing:\n\n.. ipython:: python\n\n da = xr.DataArray(np.arange(56).reshape((7, 8)), dims=['x', 'y'])\n da\n da.isel(x=xr.DataArray([0, 1, 6], dims='z'),\n y=xr.DataArray([0, 1, 0], dims='z'))\n\nwhere three elements at ``(ix, iy) = ((0, 0), (1, 1), (6, 0))`` are selected\nand mapped along a new dimension ``z``.\n\nIf you want to add a coordinate to the new dimension ``z``,\nyou can supply a :py:class:`~xarray.DataArray` with a coordinate,\n\n.. ipython:: python\n\n da.isel(x=xr.DataArray([0, 1, 6], dims='z',\n coords={'z': ['a', 'b', 'c']}),\n y=xr.DataArray([0, 1, 0], dims='z'))\n\nAnalogously, label-based pointwise-indexing is also possible by the ``.sel``\nmethod:\n\n.. ipython:: python\n\n da = xr.DataArray(np.random.rand(4, 3),\n [('time', pd.date_range('2000-01-01', periods=4)),\n ('space', ['IA', 'IL', 'IN'])])\n times = xr.DataArray(pd.to_datetime(['2000-01-03', '2000-01-02', '2000-01-01']),\n dims='new_time')\n da.sel(space=xr.DataArray(['IA', 'IL', 'IN'], dims=['new_time']),\n time=times)\n\n\n.. _align and reindex:\n\nAlign and reindex\n-----------------\n\nxarray's ``reindex``, ``reindex_like`` and ``align`` impose a ``DataArray`` or\n``Dataset`` onto a new set of coordinates corresponding to dimensions. The\noriginal values are subset to the index labels still found in the new labels,\nand values corresponding to new labels not found in the original object are\nin-filled with `NaN`.\n\nxarray operations that combine multiple objects generally automatically align\ntheir arguments to share the same indexes. However, manual alignment can be\nuseful for greater control and for increased performance.\n\nTo reindex a particular dimension, use :py:meth:`~xarray.DataArray.reindex`:\n\n.. ipython:: python\n\n da.reindex(space=['IA', 'CA'])\n\nThe :py:meth:`~xarray.DataArray.reindex_like` method is a useful shortcut.\nTo demonstrate, we will make a subset DataArray with new values:\n\n.. ipython:: python\n\n foo = da.rename('foo')\n baz = (10 * da[:2, :2]).rename('baz')\n baz\n\nReindexing ``foo`` with ``baz`` selects out the first two values along each\ndimension:\n\n.. ipython:: python\n\n foo.reindex_like(baz)\n\nThe opposite operation asks us to reindex to a larger shape, so we fill in\nthe missing values with `NaN`:\n\n.. ipython:: python\n\n baz.reindex_like(foo)\n\nThe :py:func:`~xarray.align` function lets us perform more flexible database-like\n``'inner'``, ``'outer'``, ``'left'`` and ``'right'`` joins:\n\n.. ipython:: python\n\n xr.align(foo, baz, join='inner')\n xr.align(foo, baz, join='outer')\n\nBoth ``reindex_like`` and ``align`` work interchangeably between\n:py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects, and with any number of matching dimension names:\n\n.. ipython:: python\n\n ds\n ds.reindex_like(baz)\n other = xr.DataArray(['a', 'b', 'c'], dims='other')\n # this is a no-op, because there are no shared dimension names\n ds.reindex_like(other)\n\n.. _indexing.missing_coordinates:\n\nMissing coordinate labels\n-------------------------\n\nCoordinate labels for each dimension are optional (as of xarray v0.9). Label\nbased indexing with ``.sel`` and ``.loc`` uses standard positional,\ninteger-based indexing as a fallback for dimensions without a coordinate label:\n\n.. ipython:: python\n\n da = xr.DataArray([1, 2, 3], dims='x')\n da.sel(x=[0, -1])\n\nAlignment between xarray objects where one or both do not have coordinate labels\nsucceeds only if all dimensions of the same name have the same length.\nOtherwise, it raises an informative error:\n\n.. ipython::\n :verbatim:\n\n In [62]: xr.align(da, da[:2])\n ValueError: arguments without labels along dimension 'x' cannot be aligned because they have different dimension sizes: {2, 3}\n\nUnderlying Indexes\n------------------\n\nxarray uses the :py:class:`pandas.Index` internally to perform indexing\noperations. If you need to access the underlying indexes, they are available\nthrough the :py:attr:`~xarray.DataArray.indexes` attribute.\n\n.. ipython:: python\n\n da = xr.DataArray(np.random.rand(4, 3),\n [('time', pd.date_range('2000-01-01', periods=4)),\n ('space', ['IA', 'IL', 'IN'])])\n da\n da.indexes\n da.indexes['time']\n\nUse :py:meth:`~xarray.DataArray.get_index` to get an index for a dimension,\nfalling back to a default :py:class:`pandas.RangeIndex` if it has no coordinate\nlabels:\n\n.. ipython:: python\n\n da = xr.DataArray([1, 2, 3], dims='x')\n da\n da.get_index('x')\n\n\n.. _copies_vs_views:\n\nCopies vs. Views\n----------------\n\nWhether array indexing returns a view or a copy of the underlying\ndata depends on the nature of the labels.\n\nFor positional (integer)\nindexing, xarray follows the same rules as NumPy:\n\n* Positional indexing with only integers and slices returns a view.\n* Positional indexing with arrays or lists returns a copy.\n\nThe rules for label based indexing are more complex:\n\n* Label-based indexing with only slices returns a view.\n* Label-based indexing with arrays returns a copy.\n* Label-based indexing with scalars returns a view or a copy, depending\n upon if the corresponding positional indexer can be represented as an\n integer or a slice object. The exact rules are determined by pandas.\n\nWhether data is a copy or a view is more predictable in xarray than in pandas, so\nunlike pandas, xarray does not produce `SettingWithCopy warnings`_. However, you\nshould still avoid assignment with chained indexing.\n\n.. _SettingWithCopy warnings: http://pandas.pydata.org/pandas-docs/stable/indexing.html#returning-a-view-versus-a-copy\n\n\n.. _multi-level indexing:\n\nMulti-level indexing\n--------------------\n\nJust like pandas, advanced indexing on multi-level indexes is possible with\n``loc`` and ``sel``. You can slice a multi-index by providing multiple indexers,\ni.e., a tuple of slices, labels, list of labels, or any selector allowed by\npandas:\n\n.. ipython:: python\n\n midx = pd.MultiIndex.from_product([list('abc'), [0, 1]],\n names=('one', 'two'))\n mda = xr.DataArray(np.random.rand(6, 3),\n [('x', midx), ('y', range(3))])\n mda\n mda.sel(x=(list('ab'), [0]))\n\nYou can also select multiple elements by providing a list of labels or tuples or\na slice of tuples:\n\n.. ipython:: python\n\n mda.sel(x=[('a', 0), ('b', 1)])\n\nAdditionally, xarray supports dictionaries:\n\n.. ipython:: python\n\n mda.sel(x={'one': 'a', 'two': 0})\n\nFor convenience, ``sel`` also accepts multi-index levels directly\nas keyword arguments:\n\n.. ipython:: python\n\n mda.sel(one='a', two=0)\n\nNote that using ``sel`` it is not possible to mix a dimension\nindexer with level indexers for that dimension\n(e.g., ``mda.sel(x={'one': 'a'}, two=0)`` will raise a ``ValueError``).\n\nLike pandas, xarray handles partial selection on multi-index (level drop).\nAs shown below, it also renames the dimension / coordinate when the\nmulti-index is reduced to a single index.\n\n.. ipython:: python\n\n mda.loc[{'one': 'a'}, ...]\n\nUnlike pandas, xarray does not guess whether you provide index levels or\ndimensions when using ``loc`` in some ambiguous cases. For example, for\n``mda.loc[{'one': 'a', 'two': 0}]`` and ``mda.loc['a', 0]`` xarray\nalways interprets ('one', 'two') and ('a', 0) as the names and\nlabels of the 1st and 2nd dimension, respectively. You must specify all\ndimensions or use the ellipsis in the ``loc`` specifier, e.g. in the example\nabove, ``mda.loc[{'one': 'a', 'two': 0}, :]`` or ``mda.loc[('a', 0), ...]``.\n\n\n.. _indexing.rules:\n\nIndexing rules\n--------------\n\nHere we describe the full rules xarray uses for vectorized indexing. Note that\nthis is for the purposes of explanation: for the sake of efficiency and to\nsupport various backends, the actual implementation is different.\n\n0. (Only for label based indexing.) Look up positional indexes along each\n dimension from the corresponding :py:class:`pandas.Index`.\n\n1. A full slice object ``:`` is inserted for each dimension without an indexer.\n\n2. ``slice`` objects are converted into arrays, given by\n ``np.arange(*slice.indices(...))``.\n\n3. Assume dimension names for array indexers without dimensions, such as\n ``np.ndarray`` and ``list``, from the dimensions to be indexed along.\n For example, ``v.isel(x=[0, 1])`` is understood as\n ``v.isel(x=xr.DataArray([0, 1], dims=['x']))``.\n\n4. For each variable in a ``Dataset`` or ``DataArray`` (the array and its\n coordinates):\n\n a. Broadcast all relevant indexers based on their dimension names\n (see :ref:`compute.broadcasting` for full details).\n\n b. Index the underling array by the broadcast indexers, using NumPy's\n advanced indexing rules.\n\n5. If any indexer DataArray has coordinates and no coordinate with the\n same name exists, attach them to the indexed object.\n\n.. note::\n\n Only 1-dimensional boolean arrays can be used as indexers.\n"},{"fileName":"zarr.py","filePath":"xarray/backends","id":2858,"nodeType":"File","text":"import warnings\n\nimport numpy as np\n\nfrom .. import Variable, coding, conventions\nfrom ..core import indexing\nfrom ..core.pycompat import integer_types\nfrom ..core.utils import FrozenDict, HiddenKeyDict\nfrom .common import AbstractWritableDataStore, BackendArray, _encode_variable_name\n\n# need some special secret attributes to tell us the dimensions\n_DIMENSION_KEY = \"_ARRAY_DIMENSIONS\"\n\n\n# zarr attributes have to be serializable as json\n# many xarray datasets / variables have numpy arrays and values\n# these functions handle encoding / decoding of such items\ndef _encode_zarr_attr_value(value):\n if isinstance(value, np.ndarray):\n encoded = value.tolist()\n # this checks if it's a scalar number\n elif isinstance(value, np.generic):\n encoded = value.item()\n else:\n encoded = value\n return encoded\n\n\nclass ZarrArrayWrapper(BackendArray):\n __slots__ = (\"datastore\", \"dtype\", \"shape\", \"variable_name\")\n\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n\n array = self.get_array()\n self.shape = array.shape\n\n dtype = array.dtype\n self.dtype = dtype\n\n def get_array(self):\n return self.datastore.ds[self.variable_name]\n\n def __getitem__(self, key):\n array = self.get_array()\n if isinstance(key, indexing.BasicIndexer):\n return array[key.tuple]\n elif isinstance(key, indexing.VectorizedIndexer):\n return array.vindex[\n indexing._arrayize_vectorized_indexer(key.tuple, self.shape).tuple\n ]\n else:\n assert isinstance(key, indexing.OuterIndexer)\n return array.oindex[key.tuple]\n # if self.ndim == 0:\n # could possibly have a work-around for 0d data here\n\n\ndef _determine_zarr_chunks(enc_chunks, var_chunks, ndim):\n \"\"\"\n Given encoding chunks (possibly None) and variable chunks (possibly None)\n \"\"\"\n\n # zarr chunk spec:\n # chunks : int or tuple of ints, optional\n # Chunk shape. If not provided, will be guessed from shape and dtype.\n\n # if there are no chunks in encoding and the variable data is a numpy\n # array, then we let zarr use its own heuristics to pick the chunks\n if var_chunks is None and enc_chunks is None:\n return None\n\n # if there are no chunks in encoding but there are dask chunks, we try to\n # use the same chunks in zarr\n # However, zarr chunks needs to be uniform for each array\n # http://zarr.readthedocs.io/en/latest/spec/v1.html#chunks\n # while dask chunks can be variable sized\n # http://dask.pydata.org/en/latest/array-design.html#chunks\n if var_chunks and enc_chunks is None:\n if any(len(set(chunks[:-1])) > 1 for chunks in var_chunks):\n raise ValueError(\n \"Zarr requires uniform chunk sizes except for final chunk.\"\n \" Variable dask chunks %r are incompatible. Consider \"\n \"rechunking using `chunk()`.\" % (var_chunks,)\n )\n if any((chunks[0] < chunks[-1]) for chunks in var_chunks):\n raise ValueError(\n \"Final chunk of Zarr array must be the same size or smaller \"\n \"than the first. Variable Dask chunks %r are incompatible. \"\n \"Consider rechunking using `chunk()`.\" % var_chunks\n )\n # return the first chunk for each dimension\n return tuple(chunk[0] for chunk in var_chunks)\n\n # from here on, we are dealing with user-specified chunks in encoding\n # zarr allows chunks to be an integer, in which case it uses the same chunk\n # size on each dimension.\n # Here we re-implement this expansion ourselves. That makes the logic of\n # checking chunk compatibility easier\n\n if isinstance(enc_chunks, integer_types):\n enc_chunks_tuple = ndim * (enc_chunks,)\n else:\n enc_chunks_tuple = tuple(enc_chunks)\n\n if len(enc_chunks_tuple) != ndim:\n # throw away encoding chunks, start over\n return _determine_zarr_chunks(None, var_chunks, ndim)\n\n for x in enc_chunks_tuple:\n if not isinstance(x, int):\n raise TypeError(\n \"zarr chunks must be an int or a tuple of ints. \"\n \"Instead found %r\" % (enc_chunks_tuple,)\n )\n\n # if there are chunks in encoding and the variable data is a numpy array,\n # we use the specified chunks\n if var_chunks is None:\n return enc_chunks_tuple\n\n # the hard case\n # DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk\n # this avoids the need to get involved in zarr synchronization / locking\n # From zarr docs:\n # \"If each worker in a parallel computation is writing to a separate\n # region of the array, and if region boundaries are perfectly aligned\n # with chunk boundaries, then no synchronization is required.\"\n # TODO: incorporate synchronizer to allow writes from multiple dask\n # threads\n if var_chunks and enc_chunks_tuple:\n for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks):\n for dchunk in dchunks[:-1]:\n if dchunk % zchunk:\n raise NotImplementedError(\n \"Specified zarr chunks %r would overlap multiple dask \"\n \"chunks %r. This is not implemented in xarray yet. \"\n \" Consider rechunking the data using \"\n \"`chunk()` or specifying different chunks in encoding.\"\n % (enc_chunks_tuple, var_chunks)\n )\n if dchunks[-1] > zchunk:\n raise ValueError(\n \"Final chunk of Zarr array must be the same size or \"\n \"smaller than the first. The specified Zarr chunk \"\n \"encoding is %r, but %r in variable Dask chunks %r is \"\n \"incompatible. Consider rechunking using `chunk()`.\"\n % (enc_chunks_tuple, dchunks, var_chunks)\n )\n return enc_chunks_tuple\n\n raise AssertionError(\"We should never get here. Function logic must be wrong.\")\n\n\ndef _get_zarr_dims_and_attrs(zarr_obj, dimension_key):\n # Zarr arrays do not have dimenions. To get around this problem, we add\n # an attribute that specifies the dimension. We have to hide this attribute\n # when we send the attributes to the user.\n # zarr_obj can be either a zarr group or zarr array\n try:\n dimensions = zarr_obj.attrs[dimension_key]\n except KeyError:\n raise KeyError(\n \"Zarr object is missing the attribute `%s`, which is \"\n \"required for xarray to determine variable dimensions.\" % (dimension_key)\n )\n attributes = HiddenKeyDict(zarr_obj.attrs, [dimension_key])\n return dimensions, attributes\n\n\ndef _extract_zarr_variable_encoding(variable, raise_on_invalid=False):\n encoding = variable.encoding.copy()\n\n valid_encodings = {\"chunks\", \"compressor\", \"filters\", \"cache_metadata\"}\n\n if raise_on_invalid:\n invalid = [k for k in encoding if k not in valid_encodings]\n if invalid:\n raise ValueError(\n \"unexpected encoding parameters for zarr \" \"backend: %r\" % invalid\n )\n else:\n for k in list(encoding):\n if k not in valid_encodings:\n del encoding[k]\n\n chunks = _determine_zarr_chunks(\n encoding.get(\"chunks\"), variable.chunks, variable.ndim\n )\n encoding[\"chunks\"] = chunks\n return encoding\n\n\n# Function below is copied from conventions.encode_cf_variable.\n# The only change is to raise an error for object dtypes.\ndef encode_zarr_variable(var, needs_copy=True, name=None):\n \"\"\"\n Converts an Variable into an Variable which follows some\n of the CF conventions:\n\n - Nans are masked using _FillValue (or the deprecated missing_value)\n - Rescaling via: scale_factor and add_offset\n - datetimes are converted to the CF 'units since time' format\n - dtype encodings are enforced.\n\n Parameters\n ----------\n var : xarray.Variable\n A variable holding un-encoded data.\n\n Returns\n -------\n out : xarray.Variable\n A variable which has been encoded as described above.\n \"\"\"\n\n var = conventions.encode_cf_variable(var, name=name)\n\n # zarr allows unicode, but not variable-length strings, so it's both\n # simpler and more compact to always encode as UTF-8 explicitly.\n # TODO: allow toggling this explicitly via dtype in encoding.\n coder = coding.strings.EncodedStringCoder(allows_unicode=True)\n var = coder.encode(var, name=name)\n var = coding.strings.ensure_fixed_length_bytes(var)\n\n return var\n\n\nclass ZarrStore(AbstractWritableDataStore):\n \"\"\"Store for reading and writing data via zarr\n \"\"\"\n\n __slots__ = (\n \"append_dim\",\n \"ds\",\n \"_consolidate_on_close\",\n \"_group\",\n \"_read_only\",\n \"_synchronizer\",\n )\n\n @classmethod\n def open_group(\n cls,\n store,\n mode=\"r\",\n synchronizer=None,\n group=None,\n consolidated=False,\n consolidate_on_close=False,\n ):\n import zarr\n\n open_kwargs = dict(mode=mode, synchronizer=synchronizer, path=group)\n if consolidated:\n # TODO: an option to pass the metadata_key keyword\n zarr_group = zarr.open_consolidated(store, **open_kwargs)\n else:\n zarr_group = zarr.open_group(store, **open_kwargs)\n return cls(zarr_group, consolidate_on_close)\n\n def __init__(self, zarr_group, consolidate_on_close=False):\n self.ds = zarr_group\n self._read_only = self.ds.read_only\n self._synchronizer = self.ds.synchronizer\n self._group = self.ds.path\n self._consolidate_on_close = consolidate_on_close\n self.append_dim = None\n\n def open_store_variable(self, name, zarr_array):\n data = indexing.LazilyOuterIndexedArray(ZarrArrayWrapper(name, self))\n dimensions, attributes = _get_zarr_dims_and_attrs(zarr_array, _DIMENSION_KEY)\n attributes = dict(attributes)\n encoding = {\n \"chunks\": zarr_array.chunks,\n \"compressor\": zarr_array.compressor,\n \"filters\": zarr_array.filters,\n }\n # _FillValue needs to be in attributes, not encoding, so it will get\n # picked up by decode_cf\n if getattr(zarr_array, \"fill_value\") is not None:\n attributes[\"_FillValue\"] = zarr_array.fill_value\n\n return Variable(dimensions, data, attributes, encoding)\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(k, v)) for k, v in self.ds.arrays()\n )\n\n def get_attrs(self):\n attributes = dict(self.ds.attrs.asdict())\n return attributes\n\n def get_dimensions(self):\n dimensions = {}\n for k, v in self.ds.arrays():\n try:\n for d, s in zip(v.attrs[_DIMENSION_KEY], v.shape):\n if d in dimensions and dimensions[d] != s:\n raise ValueError(\n \"found conflicting lengths for dimension %s \"\n \"(%d != %d)\" % (d, s, dimensions[d])\n )\n dimensions[d] = s\n\n except KeyError:\n raise KeyError(\n \"Zarr object is missing the attribute `%s`, \"\n \"which is required for xarray to determine \"\n \"variable dimensions.\" % (_DIMENSION_KEY)\n )\n return dimensions\n\n def set_dimensions(self, variables, unlimited_dims=None):\n if unlimited_dims is not None:\n raise NotImplementedError(\n \"Zarr backend doesn't know how to handle unlimited dimensions\"\n )\n\n def set_attributes(self, attributes):\n self.ds.attrs.put(attributes)\n\n def encode_variable(self, variable):\n variable = encode_zarr_variable(variable)\n return variable\n\n def encode_attribute(self, a):\n return _encode_zarr_attr_value(a)\n\n def store(\n self,\n variables,\n attributes,\n check_encoding_set=frozenset(),\n writer=None,\n unlimited_dims=None,\n ):\n \"\"\"\n Top level method for putting data on this store, this method:\n - encodes variables/attributes\n - sets dimensions\n - sets variables\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n attributes : dict-like\n Dictionary of key/value (attribute name / attribute) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer : ArrayWriter\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n dimension on which the zarray will be appended\n only needed in append mode\n \"\"\"\n\n existing_variables = {\n vn for vn in variables if _encode_variable_name(vn) in self.ds\n }\n new_variables = set(variables) - existing_variables\n variables_without_encoding = {vn: variables[vn] for vn in new_variables}\n variables_encoded, attributes = self.encode(\n variables_without_encoding, attributes\n )\n\n if len(existing_variables) > 0:\n # there are variables to append\n # their encoding must be the same as in the store\n ds = open_zarr(self.ds.store, chunks=None)\n variables_with_encoding = {}\n for vn in existing_variables:\n variables_with_encoding[vn] = variables[vn].copy(deep=False)\n variables_with_encoding[vn].encoding = ds[vn].encoding\n variables_with_encoding, _ = self.encode(variables_with_encoding, {})\n variables_encoded.update(variables_with_encoding)\n\n self.set_attributes(attributes)\n self.set_dimensions(variables_encoded, unlimited_dims=unlimited_dims)\n self.set_variables(\n variables_encoded, check_encoding_set, writer, unlimited_dims=unlimited_dims\n )\n\n def sync(self):\n pass\n\n def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):\n \"\"\"\n This provides a centralized method to set the variables on the data\n store.\n\n Parameters\n ----------\n variables : dict-like\n Dictionary of key/value (variable name / xr.Variable) pairs\n check_encoding_set : list-like\n List of variables that should be checked for invalid encoding\n values\n writer :\n unlimited_dims : list-like\n List of dimension names that should be treated as unlimited\n dimensions.\n \"\"\"\n\n for vn, v in variables.items():\n name = _encode_variable_name(vn)\n check = vn in check_encoding_set\n attrs = v.attrs.copy()\n dims = v.dims\n dtype = v.dtype\n shape = v.shape\n\n fill_value = attrs.pop(\"_FillValue\", None)\n if v.encoding == {\"_FillValue\": None} and fill_value is None:\n v.encoding = {}\n if name in self.ds:\n zarr_array = self.ds[name]\n if self.append_dim in dims:\n # this is the DataArray that has append_dim as a\n # dimension\n append_axis = dims.index(self.append_dim)\n new_shape = list(zarr_array.shape)\n new_shape[append_axis] += v.shape[append_axis]\n new_region = [slice(None)] * len(new_shape)\n new_region[append_axis] = slice(zarr_array.shape[append_axis], None)\n zarr_array.resize(new_shape)\n writer.add(v.data, zarr_array, region=tuple(new_region))\n else:\n # new variable\n encoding = _extract_zarr_variable_encoding(v, raise_on_invalid=check)\n encoded_attrs = {}\n # the magic for storing the hidden dimension data\n encoded_attrs[_DIMENSION_KEY] = dims\n for k2, v2 in attrs.items():\n encoded_attrs[k2] = self.encode_attribute(v2)\n\n if coding.strings.check_vlen_dtype(dtype) == str:\n dtype = str\n zarr_array = self.ds.create(\n name, shape=shape, dtype=dtype, fill_value=fill_value, **encoding\n )\n zarr_array.attrs.put(encoded_attrs)\n writer.add(v.data, zarr_array)\n\n def close(self):\n if self._consolidate_on_close:\n import zarr\n\n zarr.consolidate_metadata(self.ds.store)\n\n\ndef open_zarr(\n store,\n group=None,\n synchronizer=None,\n chunks=\"auto\",\n decode_cf=True,\n mask_and_scale=True,\n decode_times=True,\n concat_characters=True,\n decode_coords=True,\n drop_variables=None,\n consolidated=False,\n overwrite_encoded_chunks=False,\n **kwargs\n):\n \"\"\"Load and decode a dataset from a Zarr store.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n The `store` object should be a valid store for a Zarr group. `store`\n variables must contain dimension metadata encoded in the\n `_ARRAY_DIMENSIONS` attribute.\n\n Parameters\n ----------\n store : MutableMapping or str\n A MutableMapping where a Zarr Group has been stored or a path to a\n directory in file system where a Zarr DirectoryStore has been stored.\n synchronizer : object, optional\n Array synchronizer provided to zarr\n group : str, obtional\n Group path. (a.k.a. `path` in zarr terminology.)\n chunks : int or dict or tuple or {None, 'auto'}, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{'x': 5, 'y': 5}``. If `chunks='auto'`, dask chunks are created\n based on the variable's zarr chunks. If `chunks=None`, zarr array\n data will lazily convert to numpy arrays upon access. This accepts\n all the chunk specifications as Dask does.\n overwrite_encoded_chunks: bool, optional\n Whether to drop the zarr chunks encoded for each variable when a\n dataset is loaded with specified chunk sizes (default: False)\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n drop_variables : string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n consolidated : bool, optional\n Whether to open the store using zarr's consolidated metadata\n capability. Only works for stores that have already been consolidated.\n\n Returns\n -------\n dataset : Dataset\n The newly created dataset.\n\n See Also\n --------\n open_dataset\n\n References\n ----------\n http://zarr.readthedocs.io/\n \"\"\"\n if \"auto_chunk\" in kwargs:\n auto_chunk = kwargs.pop(\"auto_chunk\")\n if auto_chunk:\n chunks = \"auto\" # maintain backwards compatibility\n else:\n chunks = None\n\n warnings.warn(\n \"auto_chunk is deprecated. Use chunks='auto' instead.\",\n FutureWarning,\n stacklevel=2,\n )\n\n if kwargs:\n raise TypeError(\n \"open_zarr() got unexpected keyword arguments \" + \",\".join(kwargs.keys())\n )\n\n if not isinstance(chunks, (int, dict)):\n if chunks != \"auto\" and chunks is not None:\n raise ValueError(\n \"chunks must be an int, dict, 'auto', or None. \"\n \"Instead found %s. \" % chunks\n )\n\n if not decode_cf:\n mask_and_scale = False\n decode_times = False\n concat_characters = False\n decode_coords = False\n\n def maybe_decode_store(store, lock=False):\n ds = conventions.decode_cf(\n store,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n concat_characters=concat_characters,\n decode_coords=decode_coords,\n drop_variables=drop_variables,\n )\n\n # TODO: this is where we would apply caching\n\n return ds\n\n # Zarr supports a wide range of access modes, but for now xarray either\n # reads or writes from a store, never both. For open_zarr, we only read\n mode = \"r\"\n zarr_store = ZarrStore.open_group(\n store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n consolidated=consolidated,\n )\n ds = maybe_decode_store(zarr_store)\n\n # auto chunking needs to be here and not in ZarrStore because variable\n # chunks do not survive decode_cf\n # return trivial case\n if not chunks:\n return ds\n\n # adapted from Dataset.Chunk()\n if isinstance(chunks, int):\n chunks = dict.fromkeys(ds.dims, chunks)\n\n if isinstance(chunks, tuple) and len(chunks) == len(ds.dims):\n chunks = dict(zip(ds.dims, chunks))\n\n def get_chunk(name, var, chunks):\n chunk_spec = dict(zip(var.dims, var.encoding.get(\"chunks\")))\n\n # Coordinate labels aren't chunked\n if var.ndim == 1 and var.dims[0] == name:\n return chunk_spec\n\n if chunks == \"auto\":\n return chunk_spec\n\n for dim in var.dims:\n if dim in chunks:\n spec = chunks[dim]\n if isinstance(spec, int):\n spec = (spec,)\n if isinstance(spec, (tuple, list)) and chunk_spec[dim]:\n if any(s % chunk_spec[dim] for s in spec):\n warnings.warn(\n \"Specified Dask chunks %r would \"\n \"separate Zarr chunk shape %r for \"\n \"dimension %r. This significantly \"\n \"degrades performance. Consider \"\n \"rechunking after loading instead.\"\n % (chunks[dim], chunk_spec[dim], dim),\n stacklevel=2,\n )\n chunk_spec[dim] = chunks[dim]\n return chunk_spec\n\n def maybe_chunk(name, var, chunks):\n from dask.base import tokenize\n\n chunk_spec = get_chunk(name, var, chunks)\n\n if (var.ndim > 0) and (chunk_spec is not None):\n # does this cause any data to be read?\n token2 = tokenize(name, var._data)\n name2 = \"zarr-%s\" % token2\n var = var.chunk(chunk_spec, name=name2, lock=None)\n if overwrite_encoded_chunks and var.chunks is not None:\n var.encoding[\"chunks\"] = tuple(x[0] for x in var.chunks)\n return var\n else:\n return var\n\n variables = {k: maybe_chunk(k, v, chunks) for k, v in ds.variables.items()}\n return ds._replace_vars_and_dims(variables)\n"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":55,"id":2859,"name":"ds","nodeType":"Attribute","startLoc":55,"text":"self.ds"},{"className":"ZarrArrayWrapper","col":0,"comment":"null","endLoc":57,"id":2860,"nodeType":"Class","startLoc":29,"text":"class ZarrArrayWrapper(BackendArray):\n __slots__ = (\"datastore\", \"dtype\", \"shape\", \"variable_name\")\n\n def __init__(self, variable_name, datastore):\n self.datastore = datastore\n self.variable_name = variable_name\n\n array = self.get_array()\n self.shape = array.shape\n\n dtype = array.dtype\n self.dtype = dtype\n\n def get_array(self):\n return self.datastore.ds[self.variable_name]\n\n def __getitem__(self, key):\n array = self.get_array()\n if isinstance(key, indexing.BasicIndexer):\n return array[key.tuple]\n elif isinstance(key, indexing.VectorizedIndexer):\n return array.vindex[\n indexing._arrayize_vectorized_indexer(key.tuple, self.shape).tuple\n ]\n else:\n assert isinstance(key, indexing.OuterIndexer)\n return array.oindex[key.tuple]\n # if self.ndim == 0:\n # could possibly have a work-around for 0d data here"},{"attributeType":"null","col":16,"comment":"null","endLoc":1,"id":2861,"name":"np","nodeType":"Attribute","startLoc":1,"text":"np"},{"col":4,"comment":"null","endLoc":57,"header":"def __getitem__(self, key)","id":2862,"name":"__getitem__","nodeType":"Function","startLoc":45,"text":"def __getitem__(self, key):\n array = self.get_array()\n if isinstance(key, indexing.BasicIndexer):\n return array[key.tuple]\n elif isinstance(key, indexing.VectorizedIndexer):\n return array.vindex[\n indexing._arrayize_vectorized_indexer(key.tuple, self.shape).tuple\n ]\n else:\n assert isinstance(key, indexing.OuterIndexer)\n return array.oindex[key.tuple]\n # if self.ndim == 0:\n # could possibly have a work-around for 0d data here"},{"attributeType":"null","col":17,"comment":"null","endLoc":2,"id":2863,"name":"pd","nodeType":"Attribute","startLoc":2,"text":"pd"},{"attributeType":"null","col":17,"comment":"null","endLoc":4,"id":2864,"name":"xr","nodeType":"Attribute","startLoc":4,"text":"xr"},{"attributeType":"null","col":0,"comment":"null","endLoc":8,"id":2865,"name":"nx","nodeType":"Attribute","startLoc":8,"text":"nx"},{"attributeType":"null","col":0,"comment":"null","endLoc":9,"id":2866,"name":"long_nx","nodeType":"Attribute","startLoc":9,"text":"long_nx"},{"attributeType":"null","col":0,"comment":"null","endLoc":10,"id":2867,"name":"ny","nodeType":"Attribute","startLoc":10,"text":"ny"},{"attributeType":"null","col":0,"comment":"null","endLoc":11,"id":2868,"name":"nt","nodeType":"Attribute","startLoc":11,"text":"nt"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":2869,"name":"window","nodeType":"Attribute","startLoc":12,"text":"window"},{"attributeType":"null","col":0,"comment":"null","endLoc":14,"id":2870,"name":"randn_xy","nodeType":"Attribute","startLoc":14,"text":"randn_xy"},{"attributeType":"null","col":0,"comment":"null","endLoc":15,"id":2871,"name":"randn_xt","nodeType":"Attribute","startLoc":15,"text":"randn_xt"},{"attributeType":"null","col":0,"comment":"null","endLoc":16,"id":2872,"name":"randn_t","nodeType":"Attribute","startLoc":16,"text":"randn_t"},{"attributeType":"null","col":0,"comment":"null","endLoc":17,"id":2873,"name":"randn_long","nodeType":"Attribute","startLoc":17,"text":"randn_long"},{"attributeType":"null","col":0,"comment":"null","endLoc":20,"id":2874,"name":"new_x_short","nodeType":"Attribute","startLoc":20,"text":"new_x_short"},{"attributeType":"null","col":0,"comment":"null","endLoc":21,"id":2875,"name":"new_x_long","nodeType":"Attribute","startLoc":21,"text":"new_x_long"},{"attributeType":"null","col":0,"comment":"null","endLoc":22,"id":2876,"name":"new_y_long","nodeType":"Attribute","startLoc":22,"text":"new_y_long"},{"col":0,"comment":"","endLoc":1,"header":"interp.py#","id":2877,"name":"","nodeType":"Function","startLoc":1,"text":"nx = 3000\n\nlong_nx = 30000000\n\nny = 2000\n\nnt = 1000\n\nwindow = 20\n\nrandn_xy = randn((nx, ny), frac_nan=0.1)\n\nrandn_xt = randn((nx, nt))\n\nrandn_t = randn((nt,))\n\nrandn_long = randn((long_nx,), frac_nan=0.1)\n\nnew_x_short = np.linspace(0.3 * nx, 0.7 * nx, 100)\n\nnew_x_long = np.linspace(0.3 * nx, 0.7 * nx, 1000)\n\nnew_y_long = np.linspace(0.1, 0.9, 1000)"},{"attributeType":"null","col":4,"comment":"null","endLoc":30,"id":2878,"name":"__slots__","nodeType":"Attribute","startLoc":30,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":37,"id":2879,"name":"shape","nodeType":"Attribute","startLoc":37,"text":"self.shape"},{"fileName":"missing.py","filePath":"xarray/core","id":2880,"nodeType":"File","text":"import warnings\nfrom functools import partial\nfrom typing import Any, Callable, Dict, Sequence\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import utils\nfrom .common import _contains_datetime_like_objects\nfrom .computation import apply_ufunc\nfrom .duck_array_ops import dask_array_type\nfrom .utils import OrderedSet, is_scalar\nfrom .variable import Variable, broadcast_variables\n\n\nclass BaseInterpolator:\n \"\"\"Generic interpolator class for normalizing interpolation methods\n \"\"\"\n\n cons_kwargs: Dict[str, Any]\n call_kwargs: Dict[str, Any]\n f: Callable\n method: str\n\n def __call__(self, x):\n return self.f(x, **self.call_kwargs)\n\n def __repr__(self):\n return \"{type}: method={method}\".format(\n type=self.__class__.__name__, method=self.method\n )\n\n\nclass NumpyInterpolator(BaseInterpolator):\n \"\"\"One-dimensional linear interpolation.\n\n See Also\n --------\n numpy.interp\n \"\"\"\n\n def __init__(self, xi, yi, method=\"linear\", fill_value=None, period=None):\n\n if method != \"linear\":\n raise ValueError(\"only method `linear` is valid for the NumpyInterpolator\")\n\n self.method = method\n self.f = np.interp\n self.cons_kwargs = {}\n self.call_kwargs = {\"period\": period}\n\n self._xi = xi\n self._yi = yi\n\n if fill_value is None:\n self._left = np.nan\n self._right = np.nan\n elif isinstance(fill_value, Sequence) and len(fill_value) == 2:\n self._left = fill_value[0]\n self._right = fill_value[1]\n elif is_scalar(fill_value):\n self._left = fill_value\n self._right = fill_value\n else:\n raise ValueError(\"%s is not a valid fill_value\" % fill_value)\n\n def __call__(self, x):\n return self.f(\n x,\n self._xi,\n self._yi,\n left=self._left,\n right=self._right,\n **self.call_kwargs\n )\n\n\nclass ScipyInterpolator(BaseInterpolator):\n \"\"\"Interpolate a 1-D function using Scipy interp1d\n\n See Also\n --------\n scipy.interpolate.interp1d\n \"\"\"\n\n def __init__(\n self,\n xi,\n yi,\n method=None,\n fill_value=None,\n assume_sorted=True,\n copy=False,\n bounds_error=False,\n order=None,\n **kwargs\n ):\n from scipy.interpolate import interp1d\n\n if method is None:\n raise ValueError(\n \"method is a required argument, please supply a \"\n \"valid scipy.inter1d method (kind)\"\n )\n\n if method == \"polynomial\":\n if order is None:\n raise ValueError(\"order is required when method=polynomial\")\n method = order\n\n self.method = method\n\n self.cons_kwargs = kwargs\n self.call_kwargs = {}\n\n if fill_value is None and method == \"linear\":\n fill_value = np.nan, np.nan\n elif fill_value is None:\n fill_value = np.nan\n\n self.f = interp1d(\n xi,\n yi,\n kind=self.method,\n fill_value=fill_value,\n bounds_error=False,\n assume_sorted=assume_sorted,\n copy=copy,\n **self.cons_kwargs\n )\n\n\nclass SplineInterpolator(BaseInterpolator):\n \"\"\"One-dimensional smoothing spline fit to a given set of data points.\n\n See Also\n --------\n scipy.interpolate.UnivariateSpline\n \"\"\"\n\n def __init__(\n self,\n xi,\n yi,\n method=\"spline\",\n fill_value=None,\n order=3,\n nu=0,\n ext=None,\n **kwargs\n ):\n from scipy.interpolate import UnivariateSpline\n\n if method != \"spline\":\n raise ValueError(\"only method `spline` is valid for the SplineInterpolator\")\n\n self.method = method\n self.cons_kwargs = kwargs\n self.call_kwargs = {\"nu\": nu, \"ext\": ext}\n\n if fill_value is not None:\n raise ValueError(\"SplineInterpolator does not support fill_value\")\n\n self.f = UnivariateSpline(xi, yi, k=order, **self.cons_kwargs)\n\n\ndef _apply_over_vars_with_dim(func, self, dim=None, **kwargs):\n \"\"\"Wrapper for datasets\n \"\"\"\n ds = type(self)(coords=self.coords, attrs=self.attrs)\n\n for name, var in self.data_vars.items():\n if dim in var.dims:\n ds[name] = func(var, dim=dim, **kwargs)\n else:\n ds[name] = var\n\n return ds\n\n\ndef get_clean_interp_index(arr, dim, use_coordinate=True):\n \"\"\"get index to use for x values in interpolation.\n\n If use_coordinate is True, the coordinate that shares the name of the\n dimension along which interpolation is being performed will be used as the\n x values.\n\n If use_coordinate is False, the x values are set as an equally spaced\n sequence.\n \"\"\"\n if use_coordinate:\n if use_coordinate is True:\n index = arr.get_index(dim)\n else:\n index = arr.coords[use_coordinate]\n if index.ndim != 1:\n raise ValueError(\n \"Coordinates used for interpolation must be 1D, \"\n \"%s is %dD.\" % (use_coordinate, index.ndim)\n )\n\n # raise if index cannot be cast to a float (e.g. MultiIndex)\n try:\n index = index.values.astype(np.float64)\n except (TypeError, ValueError):\n # pandas raises a TypeError\n # xarray/nuppy raise a ValueError\n raise TypeError(\n \"Index must be castable to float64 to support\"\n \"interpolation, got: %s\" % type(index)\n )\n # check index sorting now so we can skip it later\n if not (np.diff(index) > 0).all():\n raise ValueError(\"Index must be monotonicly increasing\")\n else:\n axis = arr.get_axis_num(dim)\n index = np.arange(arr.shape[axis], dtype=np.float64)\n\n return index\n\n\ndef interp_na(\n self, dim=None, use_coordinate=True, method=\"linear\", limit=None, **kwargs\n):\n \"\"\"Interpolate values according to different methods.\n \"\"\"\n if dim is None:\n raise NotImplementedError(\"dim is a required argument\")\n\n if limit is not None:\n valids = _get_valid_fill_mask(self, dim, limit)\n\n # method\n index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate)\n interp_class, kwargs = _get_interpolator(method, **kwargs)\n interpolator = partial(func_interpolate_na, interp_class, **kwargs)\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"overflow\", RuntimeWarning)\n warnings.filterwarnings(\"ignore\", \"invalid value\", RuntimeWarning)\n arr = apply_ufunc(\n interpolator,\n index,\n self,\n input_core_dims=[[dim], [dim]],\n output_core_dims=[[dim]],\n output_dtypes=[self.dtype],\n dask=\"parallelized\",\n vectorize=True,\n keep_attrs=True,\n ).transpose(*self.dims)\n\n if limit is not None:\n arr = arr.where(valids)\n\n return arr\n\n\ndef func_interpolate_na(interpolator, x, y, **kwargs):\n \"\"\"helper function to apply interpolation along 1 dimension\"\"\"\n # it would be nice if this wasn't necessary, works around:\n # \"ValueError: assignment destination is read-only\" in assignment below\n out = y.copy()\n\n nans = pd.isnull(y)\n nonans = ~nans\n\n # fast track for no-nans and all-nans cases\n n_nans = nans.sum()\n if n_nans == 0 or n_nans == len(y):\n return y\n\n f = interpolator(x[nonans], y[nonans], **kwargs)\n out[nans] = f(x[nans])\n return out\n\n\ndef _bfill(arr, n=None, axis=-1):\n \"\"\"inverse of ffill\"\"\"\n import bottleneck as bn\n\n arr = np.flip(arr, axis=axis)\n\n # fill\n arr = bn.push(arr, axis=axis, n=n)\n\n # reverse back to original\n return np.flip(arr, axis=axis)\n\n\ndef ffill(arr, dim=None, limit=None):\n \"\"\"forward fill missing values\"\"\"\n import bottleneck as bn\n\n axis = arr.get_axis_num(dim)\n\n # work around for bottleneck 178\n _limit = limit if limit is not None else arr.shape[axis]\n\n return apply_ufunc(\n bn.push,\n arr,\n dask=\"parallelized\",\n keep_attrs=True,\n output_dtypes=[arr.dtype],\n kwargs=dict(n=_limit, axis=axis),\n ).transpose(*arr.dims)\n\n\ndef bfill(arr, dim=None, limit=None):\n \"\"\"backfill missing values\"\"\"\n axis = arr.get_axis_num(dim)\n\n # work around for bottleneck 178\n _limit = limit if limit is not None else arr.shape[axis]\n\n return apply_ufunc(\n _bfill,\n arr,\n dask=\"parallelized\",\n keep_attrs=True,\n output_dtypes=[arr.dtype],\n kwargs=dict(n=_limit, axis=axis),\n ).transpose(*arr.dims)\n\n\ndef _get_interpolator(method, vectorizeable_only=False, **kwargs):\n \"\"\"helper function to select the appropriate interpolator class\n\n returns interpolator class and keyword arguments for the class\n \"\"\"\n interp1d_methods = [\n \"linear\",\n \"nearest\",\n \"zero\",\n \"slinear\",\n \"quadratic\",\n \"cubic\",\n \"polynomial\",\n ]\n valid_methods = interp1d_methods + [\n \"barycentric\",\n \"krog\",\n \"pchip\",\n \"spline\",\n \"akima\",\n ]\n\n has_scipy = True\n try:\n from scipy import interpolate\n except ImportError:\n has_scipy = False\n\n # prioritize scipy.interpolate\n if (\n method == \"linear\"\n and not kwargs.get(\"fill_value\", None) == \"extrapolate\"\n and not vectorizeable_only\n ):\n kwargs.update(method=method)\n interp_class = NumpyInterpolator\n\n elif method in valid_methods:\n if not has_scipy:\n raise ImportError(\"Interpolation with method `%s` requires scipy\" % method)\n\n if method in interp1d_methods:\n kwargs.update(method=method)\n interp_class = ScipyInterpolator\n elif vectorizeable_only:\n raise ValueError(\n \"{} is not a vectorizeable interpolator. \"\n \"Available methods are {}\".format(method, interp1d_methods)\n )\n elif method == \"barycentric\":\n interp_class = interpolate.BarycentricInterpolator\n elif method == \"krog\":\n interp_class = interpolate.KroghInterpolator\n elif method == \"pchip\":\n interp_class = interpolate.PchipInterpolator\n elif method == \"spline\":\n kwargs.update(method=method)\n interp_class = SplineInterpolator\n elif method == \"akima\":\n interp_class = interpolate.Akima1DInterpolator\n else:\n raise ValueError(\"%s is not a valid scipy interpolator\" % method)\n else:\n raise ValueError(\"%s is not a valid interpolator\" % method)\n\n return interp_class, kwargs\n\n\ndef _get_interpolator_nd(method, **kwargs):\n \"\"\"helper function to select the appropriate interpolator class\n\n returns interpolator class and keyword arguments for the class\n \"\"\"\n valid_methods = [\"linear\", \"nearest\"]\n\n try:\n from scipy import interpolate\n except ImportError:\n raise ImportError(\"Interpolation with method `%s` requires scipy\" % method)\n\n if method in valid_methods:\n kwargs.update(method=method)\n interp_class = interpolate.interpn\n else:\n raise ValueError(\n \"%s is not a valid interpolator for interpolating \"\n \"over multiple dimensions.\" % method\n )\n\n return interp_class, kwargs\n\n\ndef _get_valid_fill_mask(arr, dim, limit):\n \"\"\"helper function to determine values that can be filled when limit is not\n None\"\"\"\n kw = {dim: limit + 1}\n # we explicitly use construct method to avoid copy.\n new_dim = utils.get_temp_dimname(arr.dims, \"_window\")\n return (\n arr.isnull()\n .rolling(min_periods=1, **kw)\n .construct(new_dim, fill_value=False)\n .sum(new_dim, skipna=False)\n ) <= limit\n\n\ndef _assert_single_chunk(var, axes):\n for axis in axes:\n if len(var.chunks[axis]) > 1 or var.chunks[axis][0] < var.shape[axis]:\n raise NotImplementedError(\n \"Chunking along the dimension to be interpolated \"\n \"({}) is not yet supported.\".format(axis)\n )\n\n\ndef _localize(var, indexes_coords):\n \"\"\" Speed up for linear and nearest neighbor method.\n Only consider a subspace that is needed for the interpolation\n \"\"\"\n indexes = {}\n for dim, [x, new_x] in indexes_coords.items():\n index = x.to_index()\n imin = index.get_loc(np.min(new_x.values), method=\"nearest\")\n imax = index.get_loc(np.max(new_x.values), method=\"nearest\")\n\n indexes[dim] = slice(max(imin - 2, 0), imax + 2)\n indexes_coords[dim] = (x[indexes[dim]], new_x)\n return var.isel(**indexes), indexes_coords\n\n\ndef _floatize_x(x, new_x):\n \"\"\" Make x and new_x float.\n This is particulary useful for datetime dtype.\n x, new_x: tuple of np.ndarray\n \"\"\"\n x = list(x)\n new_x = list(new_x)\n for i in range(len(x)):\n if _contains_datetime_like_objects(x[i]):\n # Scipy casts coordinates to np.float64, which is not accurate\n # enough for datetime64 (uses 64bit integer).\n # We assume that the most of the bits are used to represent the\n # offset (min(x)) and the variation (x - min(x)) can be\n # represented by float.\n xmin = x[i].values.min()\n x[i] = x[i]._to_numeric(offset=xmin, dtype=np.float64)\n new_x[i] = new_x[i]._to_numeric(offset=xmin, dtype=np.float64)\n return x, new_x\n\n\ndef interp(var, indexes_coords, method, **kwargs):\n \"\"\" Make an interpolation of Variable\n\n Parameters\n ----------\n var: Variable\n index_coords:\n Mapping from dimension name to a pair of original and new coordinates.\n Original coordinates should be sorted in strictly ascending order.\n Note that all the coordinates should be Variable objects.\n method: string\n One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic',\n 'cubic'}. For multidimensional interpolation, only\n {'linear', 'nearest'} can be used.\n **kwargs:\n keyword arguments to be passed to scipy.interpolate\n\n Returns\n -------\n Interpolated Variable\n\n See Also\n --------\n DataArray.interp\n Dataset.interp\n \"\"\"\n if not indexes_coords:\n return var.copy()\n\n # simple speed up for the local interpolation\n if method in [\"linear\", \"nearest\"]:\n var, indexes_coords = _localize(var, indexes_coords)\n\n # default behavior\n kwargs[\"bounds_error\"] = kwargs.get(\"bounds_error\", False)\n\n # target dimensions\n dims = list(indexes_coords)\n x, new_x = zip(*[indexes_coords[d] for d in dims])\n destination = broadcast_variables(*new_x)\n\n # transpose to make the interpolated axis to the last position\n broadcast_dims = [d for d in var.dims if d not in dims]\n original_dims = broadcast_dims + dims\n new_dims = broadcast_dims + list(destination[0].dims)\n interped = interp_func(\n var.transpose(*original_dims).data, x, destination, method, kwargs\n )\n\n result = Variable(new_dims, interped, attrs=var.attrs)\n\n # dimension of the output array\n out_dims = OrderedSet()\n for d in var.dims:\n if d in dims:\n out_dims.update(indexes_coords[d][1].dims)\n else:\n out_dims.add(d)\n return result.transpose(*tuple(out_dims))\n\n\ndef interp_func(var, x, new_x, method, kwargs):\n \"\"\"\n multi-dimensional interpolation for array-like. Interpolated axes should be\n located in the last position.\n\n Parameters\n ----------\n var: np.ndarray or dask.array.Array\n Array to be interpolated. The final dimension is interpolated.\n x: a list of 1d array.\n Original coordinates. Should not contain NaN.\n new_x: a list of 1d array\n New coordinates. Should not contain NaN.\n method: string\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for\n 1-dimensional itnterpolation.\n {'linear', 'nearest'} for multidimensional interpolation\n **kwargs:\n Optional keyword arguments to be passed to scipy.interpolator\n\n Returns\n -------\n interpolated: array\n Interpolated array\n\n Note\n ----\n This requiers scipy installed.\n\n See Also\n --------\n scipy.interpolate.interp1d\n \"\"\"\n if not x:\n return var.copy()\n\n if len(x) == 1:\n func, kwargs = _get_interpolator(method, vectorizeable_only=True, **kwargs)\n else:\n func, kwargs = _get_interpolator_nd(method, **kwargs)\n\n if isinstance(var, dask_array_type):\n import dask.array as da\n\n _assert_single_chunk(var, range(var.ndim - len(x), var.ndim))\n chunks = var.chunks[: -len(x)] + new_x[0].shape\n drop_axis = range(var.ndim - len(x), var.ndim)\n new_axis = range(var.ndim - len(x), var.ndim - len(x) + new_x[0].ndim)\n return da.map_blocks(\n _interpnd,\n var,\n x,\n new_x,\n func,\n kwargs,\n dtype=var.dtype,\n chunks=chunks,\n new_axis=new_axis,\n drop_axis=drop_axis,\n )\n\n return _interpnd(var, x, new_x, func, kwargs)\n\n\ndef _interp1d(var, x, new_x, func, kwargs):\n # x, new_x are tuples of size 1.\n x, new_x = x[0], new_x[0]\n rslt = func(x, var, assume_sorted=True, **kwargs)(np.ravel(new_x))\n if new_x.ndim > 1:\n return rslt.reshape(var.shape[:-1] + new_x.shape)\n if new_x.ndim == 0:\n return rslt[..., -1]\n return rslt\n\n\ndef _interpnd(var, x, new_x, func, kwargs):\n x, new_x = _floatize_x(x, new_x)\n\n if len(x) == 1:\n return _interp1d(var, x, new_x, func, kwargs)\n\n # move the interpolation axes to the start position\n var = var.transpose(range(-len(x), var.ndim - len(x)))\n # stack new_x to 1 vector, with reshape\n xi = np.stack([x1.values.ravel() for x1 in new_x], axis=-1)\n rslt = func(x, var, xi, **kwargs)\n # move back the interpolation axes to the last position\n rslt = rslt.transpose(range(-rslt.ndim + 1, 1))\n return rslt.reshape(rslt.shape[:-1] + new_x[0].shape)\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":33,"id":2881,"name":"datastore","nodeType":"Attribute","startLoc":33,"text":"self.datastore"},{"attributeType":"null","col":8,"comment":"null","endLoc":40,"id":2882,"name":"dtype","nodeType":"Attribute","startLoc":40,"text":"self.dtype"},{"className":"BaseInterpolator","col":0,"comment":"Generic interpolator class for normalizing interpolation methods\n ","endLoc":31,"id":2883,"nodeType":"Class","startLoc":16,"text":"class BaseInterpolator:\n \"\"\"Generic interpolator class for normalizing interpolation methods\n \"\"\"\n\n cons_kwargs: Dict[str, Any]\n call_kwargs: Dict[str, Any]\n f: Callable\n method: str\n\n def __call__(self, x):\n return self.f(x, **self.call_kwargs)\n\n def __repr__(self):\n return \"{type}: method={method}\".format(\n type=self.__class__.__name__, method=self.method\n )"},{"col":4,"comment":"null","endLoc":26,"header":"def __call__(self, x)","id":2884,"name":"__call__","nodeType":"Function","startLoc":25,"text":"def __call__(self, x):\n return self.f(x, **self.call_kwargs)"},{"attributeType":"null","col":8,"comment":"null","endLoc":34,"id":2885,"name":"variable_name","nodeType":"Attribute","startLoc":34,"text":"self.variable_name"},{"fileName":"common.py","filePath":"xarray/core","id":2886,"nodeType":"File","text":"import warnings\nfrom contextlib import suppress\nfrom textwrap import dedent\nfrom typing import (\n Any,\n Callable,\n Dict,\n Hashable,\n Iterable,\n Iterator,\n List,\n Mapping,\n Tuple,\n TypeVar,\n Union,\n)\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dtypes, duck_array_ops, formatting, ops\nfrom .arithmetic import SupportsArithmetic\nfrom .npcompat import DTypeLike\nfrom .options import _get_keep_attrs\nfrom .pycompat import dask_array_type\nfrom .rolling_exp import RollingExp\nfrom .utils import Frozen, ReprObject, either_dict_or_kwargs\n\n# Used as a sentinel value to indicate a all dimensions\nALL_DIMS = ReprObject(\"\")\n\n\nC = TypeVar(\"C\")\nT = TypeVar(\"T\")\n\n\nclass ImplementsArrayReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, axis=None, skipna=None, **kwargs):\n return self.reduce(\n func, dim, axis, skipna=skipna, allow_lazy=True, **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, axis=None, **kwargs): # type: ignore\n return self.reduce(func, dim, axis, allow_lazy=True, **kwargs)\n\n return wrapped_func\n\n _reduce_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `{name}` is calculated over axes.\"\"\"\n )\n\n _cum_extra_args_docstring = dedent(\n \"\"\"\\\n dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\"\"\"\n )\n\n\nclass ImplementsDatasetReduce:\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func: Callable, include_skipna: bool, numeric_only: bool):\n if include_skipna:\n\n def wrapped_func(self, dim=None, skipna=None, **kwargs):\n return self.reduce(\n func,\n dim,\n skipna=skipna,\n numeric_only=numeric_only,\n allow_lazy=True,\n **kwargs\n )\n\n else:\n\n def wrapped_func(self, dim=None, **kwargs): # type: ignore\n return self.reduce(\n func, dim, numeric_only=numeric_only, allow_lazy=True, **kwargs\n )\n\n return wrapped_func\n\n _reduce_extra_args_docstring = \"\"\"dim : str or sequence of str, optional\n Dimension(s) over which to apply `{name}`. By default `{name}` is\n applied over all dimensions.\"\"\"\n\n _cum_extra_args_docstring = \"\"\"dim : str or sequence of str, optional\n Dimension over which to apply `{name}`.\n axis : int or sequence of int, optional\n Axis over which to apply `{name}`. Only one of the 'dim'\n and 'axis' arguments can be supplied.\"\"\"\n\n\nclass AbstractArray(ImplementsArrayReduce):\n \"\"\"Shared base class for DataArray and Variable.\n \"\"\"\n\n __slots__ = ()\n\n def __bool__(self: Any) -> bool:\n return bool(self.values)\n\n def __float__(self: Any) -> float:\n return float(self.values)\n\n def __int__(self: Any) -> int:\n return int(self.values)\n\n def __complex__(self: Any) -> complex:\n return complex(self.values)\n\n def __array__(self: Any, dtype: DTypeLike = None) -> np.ndarray:\n return np.asarray(self.values, dtype=dtype)\n\n def __repr__(self) -> str:\n return formatting.array_repr(self)\n\n def _iter(self: Any) -> Iterator[Any]:\n for n in range(len(self)):\n yield self[n]\n\n def __iter__(self: Any) -> Iterator[Any]:\n if self.ndim == 0:\n raise TypeError(\"iteration over a 0-d array\")\n return self._iter()\n\n def get_axis_num(\n self, dim: Union[Hashable, Iterable[Hashable]]\n ) -> Union[int, Tuple[int, ...]]:\n \"\"\"Return axis number(s) corresponding to dimension(s) in this array.\n\n Parameters\n ----------\n dim : str or iterable of str\n Dimension name(s) for which to lookup axes.\n\n Returns\n -------\n int or tuple of int\n Axis number or numbers corresponding to the given dimensions.\n \"\"\"\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n return tuple(self._get_axis_num(d) for d in dim)\n else:\n return self._get_axis_num(dim)\n\n def _get_axis_num(self: Any, dim: Hashable) -> int:\n try:\n return self.dims.index(dim)\n except ValueError:\n raise ValueError(\"%r not found in array dimensions %r\" % (dim, self.dims))\n\n @property\n def sizes(self: Any) -> Mapping[Hashable, int]:\n \"\"\"Ordered mapping from dimension names to lengths.\n\n Immutable.\n\n See also\n --------\n Dataset.sizes\n \"\"\"\n return Frozen(dict(zip(self.dims, self.shape)))\n\n\nclass AttrAccessMixin:\n \"\"\"Mixin class that allows getting keys with attribute access\n \"\"\"\n\n __slots__ = ()\n\n def __init_subclass__(cls):\n \"\"\"Verify that all subclasses explicitly define ``__slots__``. If they don't,\n raise error in the core xarray module and a FutureWarning in third-party\n extensions.\n \"\"\"\n if not hasattr(object.__new__(cls), \"__dict__\"):\n pass\n elif cls.__module__.startswith(\"xarray.\"):\n raise AttributeError(\"%s must explicitly define __slots__\" % cls.__name__)\n else:\n cls.__setattr__ = cls._setattr_dict\n warnings.warn(\n \"xarray subclass %s should explicitly define __slots__\" % cls.__name__,\n FutureWarning,\n stacklevel=2,\n )\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return []\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-autocompletion\n \"\"\"\n return []\n\n def __getattr__(self, name: str) -> Any:\n if name not in {\"__dict__\", \"__setstate__\"}:\n # this avoids an infinite loop when pickle looks for the\n # __setstate__ attribute before the xarray object is initialized\n for source in self._attr_sources:\n with suppress(KeyError):\n return source[name]\n raise AttributeError(\n \"%r object has no attribute %r\" % (type(self).__name__, name)\n )\n\n # This complicated two-method design boosts overall performance of simple operations\n # - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by\n # a whopping 8% compared to a single method that checks hasattr(self, \"__dict__\") at\n # runtime before every single assignment. All of this is just temporary until the\n # FutureWarning can be changed into a hard crash.\n def _setattr_dict(self, name: str, value: Any) -> None:\n \"\"\"Deprecated third party subclass (see ``__init_subclass__`` above)\n \"\"\"\n object.__setattr__(self, name, value)\n if name in self.__dict__:\n # Custom, non-slotted attr, or improperly assigned variable?\n warnings.warn(\n \"Setting attribute %r on a %r object. Explicitly define __slots__ \"\n \"to suppress this warning for legitimate custom attributes and \"\n \"raise an error when attempting variables assignments.\"\n % (name, type(self).__name__),\n FutureWarning,\n stacklevel=2,\n )\n\n def __setattr__(self, name: str, value: Any) -> None:\n \"\"\"Objects with ``__slots__`` raise AttributeError if you try setting an\n undeclared attribute. This is desirable, but the error message could use some\n improvement.\n \"\"\"\n try:\n object.__setattr__(self, name, value)\n except AttributeError as e:\n # Don't accidentally shadow custom AttributeErrors, e.g.\n # DataArray.dims.setter\n if str(e) != \"%r object has no attribute %r\" % (type(self).__name__, name):\n raise\n raise AttributeError(\n \"cannot set attribute %r on a %r object. Use __setitem__ style\"\n \"assignment (e.g., `ds['name'] = ...`) instead of assigning variables.\"\n % (name, type(self).__name__)\n ) from e\n\n def __dir__(self) -> List[str]:\n \"\"\"Provide method name lookup and completion. Only provide 'public'\n methods.\n \"\"\"\n extra_attrs = [\n item\n for sublist in self._attr_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return sorted(set(dir(type(self)) + extra_attrs))\n\n def _ipython_key_completions_(self) -> List[str]:\n \"\"\"Provide method for the key-autocompletions in IPython.\n See http://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion\n For the details.\n \"\"\"\n item_lists = [\n item\n for sublist in self._item_sources\n for item in sublist\n if isinstance(item, str)\n ]\n return list(set(item_lists))\n\n\ndef get_squeeze_dims(\n xarray_obj,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n axis: Union[int, Iterable[int], None] = None,\n) -> List[Hashable]:\n \"\"\"Get a list of dimensions to squeeze out.\n \"\"\"\n if dim is not None and axis is not None:\n raise ValueError(\"cannot use both parameters `axis` and `dim`\")\n if dim is None and axis is None:\n return [d for d, s in xarray_obj.sizes.items() if s == 1]\n\n if isinstance(dim, Iterable) and not isinstance(dim, str):\n dim = list(dim)\n elif dim is not None:\n dim = [dim]\n else:\n assert axis is not None\n if isinstance(axis, int):\n axis = [axis]\n axis = list(axis)\n if any(not isinstance(a, int) for a in axis):\n raise TypeError(\"parameter `axis` must be int or iterable of int.\")\n alldims = list(xarray_obj.sizes.keys())\n dim = [alldims[a] for a in axis]\n\n if any(xarray_obj.sizes[k] > 1 for k in dim):\n raise ValueError(\n \"cannot select a dimension to squeeze out \"\n \"which has length greater than one\"\n )\n return dim\n\n\nclass DataWithCoords(SupportsArithmetic, AttrAccessMixin):\n \"\"\"Shared base class for Dataset and DataArray.\"\"\"\n\n __slots__ = ()\n\n _rolling_exp_cls = RollingExp\n\n def squeeze(\n self,\n dim: Union[Hashable, Iterable[Hashable], None] = None,\n drop: bool = False,\n axis: Union[int, Iterable[int], None] = None,\n ):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or Hashable or iterable of Hashable, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n drop : bool, optional\n If ``drop=True``, drop squeezed coordinates instead of making them\n scalar.\n axis : None or int or iterable of int, optional\n Like dim, but positional.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = get_squeeze_dims(self, dim, axis)\n return self.isel(drop=drop, **{d: 0 for d in dims})\n\n def get_index(self, key: Hashable) -> pd.Index:\n \"\"\"Get an index for a dimension, with fall-back to a default RangeIndex\n \"\"\"\n if key not in self.dims:\n raise KeyError(key)\n\n try:\n return self.indexes[key]\n except KeyError:\n # need to ensure dtype=int64 in case range is empty on Python 2\n return pd.Index(range(self.sizes[key]), name=key, dtype=np.int64)\n\n def _calc_assign_results(\n self: C, kwargs: Mapping[Hashable, Union[T, Callable[[C], T]]]\n ) -> Dict[Hashable, T]:\n return {k: v(self) if callable(v) else v for k, v in kwargs.items()}\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign new coordinates to this object.\n\n Returns a new object with all the original data in addition to the new\n coordinates.\n\n Parameters\n ----------\n coords : dict, optional\n A dict with keys which are variables names. If the values are\n callable, they are computed on this object and assigned to new\n coordinate variables. If the values are not callable,\n (e.g. a ``DataArray``, scalar, or array), they are simply assigned.\n\n **coords_kwargs : keyword, value pairs, optional\n The keyword arguments form of ``coords``.\n One of ``coords`` or ``coords_kwargs`` must be provided.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new coordinates in addition to the existing\n data.\n\n Examples\n --------\n Convert longitude coordinates from 0-359 to -180-179:\n\n >>> da = xr.DataArray(np.random.rand(4),\n ... coords=[np.array([358, 359, 0, 1])],\n ... dims='lon')\n >>> da\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 358 359 0 1\n >>> da.assign_coords(lon=(((da.lon + 180) % 360) - 180))\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n The function also accepts dictionary arguments:\n\n >>> da.assign_coords({'lon': (((da.lon + 180) % 360) - 180)})\n \n array([0.28298 , 0.667347, 0.657938, 0.177683])\n Coordinates:\n * lon (lon) int64 -2 -1 0 1\n\n Notes\n -----\n Since ``coords_kwargs`` is a dictionary, the order of your arguments may\n not be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign_coords``\n is possible, but you cannot reference other variables created within\n the same ``assign_coords`` call.\n\n See also\n --------\n Dataset.assign\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n data = self.copy(deep=False)\n results = self._calc_assign_results(coords_kwargs)\n data.coords.update(results)\n return data\n\n def assign_attrs(self, *args, **kwargs):\n \"\"\"Assign new attrs to this object.\n\n Returns a new object equivalent to self.attrs.update(*args, **kwargs).\n\n Parameters\n ----------\n args : positional arguments passed into ``attrs.update``.\n kwargs : keyword arguments passed into ``attrs.update``.\n\n Returns\n -------\n assigned : same type as caller\n A new object with the new attrs in addition to the existing data.\n\n See also\n --------\n Dataset.assign\n \"\"\"\n out = self.copy(deep=False)\n out.attrs.update(*args, **kwargs)\n return out\n\n def pipe(\n self,\n func: Union[Callable[..., T], Tuple[Callable[..., T], str]],\n *args,\n **kwargs\n ) -> T:\n \"\"\"\n Apply func(self, *args, **kwargs)\n\n This method replicates the pandas method of the same name.\n\n Parameters\n ----------\n func : function\n function to apply to this xarray object (Dataset/DataArray).\n ``args``, and ``kwargs`` are passed into ``func``.\n Alternatively a ``(callable, data_keyword)`` tuple where\n ``data_keyword`` is a string indicating the keyword of\n ``callable`` that expects the xarray object.\n args : positional arguments passed into ``func``.\n kwargs : a dictionary of keyword arguments passed into ``func``.\n\n Returns\n -------\n object : the return type of ``func``.\n\n Notes\n -----\n\n Use ``.pipe`` when chaining together functions that expect\n xarray or pandas objects, e.g., instead of writing\n\n >>> f(g(h(ds), arg1=a), arg2=b, arg3=c)\n\n You can write\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe(f, arg2=b, arg3=c)\n ... )\n\n If you have a function that takes the data as (say) the second\n argument, pass a tuple indicating which keyword expects the\n data. For example, suppose ``f`` takes its data as ``arg2``:\n\n >>> (ds.pipe(h)\n ... .pipe(g, arg1=a)\n ... .pipe((f, 'arg2'), arg1=a, arg3=c)\n ... )\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n >>> def adder(data, arg):\n ... return data + arg\n ...\n >>> def div(data, arg):\n ... return data / arg\n ...\n >>> def sub_mult(data, sub_arg, mult_arg):\n ... return (data * mult_arg) - sub_arg\n ...\n >>> x.pipe(adder, 2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> x.pipe(adder, arg=2)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 16.53 13.85 21.27 18.37\n precipitation (lat, lon) float64 2.731 2.719 2.848 2.467\n\n >>> (\n ... x\n ... .pipe(adder, arg=2)\n ... .pipe(div, arg=2)\n ... .pipe(sub_mult, sub_arg=2, mult_arg=2)\n ... )\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lon (lon) int64 150 160\n * lat (lat) int64 10 20\n Data variables:\n temperature_c (lat, lon) float64 14.53 11.85 19.27 16.37\n precipitation (lat, lon) float64 0.7315 0.7189 0.8481 0.4671\n\n See Also\n --------\n pandas.DataFrame.pipe\n \"\"\"\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\n \"%s is both the pipe target and a keyword \" \"argument\" % target\n )\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)\n\n def groupby(self, group, squeeze: bool = True, restore_coord_dims: bool = None):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose unique values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n\n Examples\n --------\n Calculate daily anomalies for daily data:\n\n >>> da = xr.DataArray(np.linspace(0, 1826, num=1827),\n ... coords=[pd.date_range('1/1/2000', '31/12/2004',\n ... freq='D')],\n ... dims='time')\n >>> da\n \n array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03, 1.826e+03])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n >>> da.groupby('time.dayofyear') - da.groupby('time.dayofyear').mean('time')\n \n array([-730.8, -730.8, -730.8, ..., 730.2, 730.2, 730.5])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n dayofyear (time) int64 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 ...\n\n See Also\n --------\n core.groupby.DataArrayGroupBy\n core.groupby.DatasetGroupBy\n \"\"\"\n return self._groupby_cls(\n self, group, squeeze=squeeze, restore_coord_dims=restore_coord_dims\n )\n\n def groupby_bins(\n self,\n group,\n bins,\n right: bool = True,\n labels=None,\n precision: int = 3,\n include_lowest: bool = False,\n squeeze: bool = True,\n restore_coord_dims: bool = None,\n ):\n \"\"\"Returns a GroupBy object for performing grouped operations.\n\n Rather than using all unique values of `group`, the values are discretized\n first by applying `pandas.cut` [1]_ to `group`.\n\n Parameters\n ----------\n group : str, DataArray or IndexVariable\n Array whose binned values should be used to group this array. If a\n string, must be the name of a variable contained in this dataset.\n bins : int or array of scalars\n If bins is an int, it defines the number of equal-width bins in the\n range of x. However, in this case, the range of x is extended by .1%\n on each side to include the min or max values of x. If bins is a\n sequence it defines the bin edges allowing for non-uniform bin\n width. No extension of the range of x is done in this case.\n right : boolean, optional\n Indicates whether the bins include the rightmost edge or not. If\n right == True (the default), then the bins [1,2,3,4] indicate\n (1,2], (2,3], (3,4].\n labels : array or boolean, default None\n Used as labels for the resulting bins. Must be of the same length as\n the resulting bins. If False, string bin labels are assigned by\n `pandas.cut`.\n precision : int\n The precision at which to store and display the bins labels.\n include_lowest : bool\n Whether the first interval should be left-inclusive or not.\n squeeze : boolean, optional\n If \"group\" is a dimension of any arrays in this dataset, `squeeze`\n controls whether the subarrays have a dimension of length 1 along\n that dimension or if the dimension is squeezed out.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n\n Returns\n -------\n grouped : GroupBy\n A `GroupBy` object patterned after `pandas.GroupBy` that can be\n iterated over in the form of `(unique_value, grouped_array)` pairs.\n The name of the group has the added suffix `_bins` in order to\n distinguish it from the original variable.\n\n References\n ----------\n .. [1] http://pandas.pydata.org/pandas-docs/stable/generated/pandas.cut.html\n \"\"\"\n return self._groupby_cls(\n self,\n group,\n squeeze=squeeze,\n bins=bins,\n restore_coord_dims=restore_coord_dims,\n cut_kwargs={\n \"right\": right,\n \"labels\": labels,\n \"precision\": precision,\n \"include_lowest\": include_lowest,\n },\n )\n\n def rolling(\n self,\n dim: Mapping[Hashable, int] = None,\n min_periods: int = None,\n center: bool = False,\n **window_kwargs: int\n ):\n \"\"\"\n Rolling window object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to create the rolling iterator\n along (e.g. `time`) to its moving window size.\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA). The default, None, is equivalent to\n setting min_periods equal to the size of the window.\n center : boolean, default False\n Set the labels at the center of the window.\n **window_kwargs : optional\n The keyword arguments form of ``dim``.\n One of dim or window_kwargs must be provided.\n\n Returns\n -------\n Rolling object (core.rolling.DataArrayRolling for DataArray,\n core.rolling.DatasetRolling for Dataset.)\n\n Examples\n --------\n Create rolling seasonal average of monthly data e.g. DJF, JFM, ..., SON:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.rolling(time=3, center=True).mean()\n \n array([nan, 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., nan])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n\n Remove the NaNs using ``dropna()``:\n\n >>> da.rolling(time=3, center=True).mean().dropna('time')\n \n array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-15 2000-02-15 2000-03-15 ...\n\n See Also\n --------\n core.rolling.DataArrayRolling\n core.rolling.DatasetRolling\n \"\"\"\n dim = either_dict_or_kwargs(dim, window_kwargs, \"rolling\")\n return self._rolling_cls(self, dim, min_periods=min_periods, center=center)\n\n def rolling_exp(\n self,\n window: Mapping[Hashable, int] = None,\n window_type: str = \"span\",\n **window_kwargs\n ):\n \"\"\"\n Exponentially-weighted moving window.\n Similar to EWM in pandas\n\n Requires the optional Numbagg dependency.\n\n Parameters\n ----------\n window : A single mapping from a dimension name to window value,\n optional\n dim : str\n Name of the dimension to create the rolling exponential window\n along (e.g., `time`).\n window : int\n Size of the moving window. The type of this is specified in\n `window_type`\n window_type : str, one of ['span', 'com', 'halflife', 'alpha'],\n default 'span'\n The format of the previously supplied window. Each is a simple\n numerical transformation of the others. Described in detail:\n https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.ewm.html\n **window_kwargs : optional\n The keyword arguments form of ``window``.\n One of window or window_kwargs must be provided.\n\n See Also\n --------\n core.rolling_exp.RollingExp\n \"\"\"\n window = either_dict_or_kwargs(window, window_kwargs, \"rolling_exp\")\n\n return self._rolling_exp_cls(self, window, window_type)\n\n def coarsen(\n self,\n dim: Mapping[Hashable, int] = None,\n boundary: str = \"exact\",\n side: Union[str, Mapping[Hashable, str]] = \"left\",\n coord_func: str = \"mean\",\n **window_kwargs: int\n ):\n \"\"\"\n Coarsen object.\n\n Parameters\n ----------\n dim: dict, optional\n Mapping from the dimension name to the window size.\n dim : str\n Name of the dimension to create the rolling iterator\n along (e.g., `time`).\n window : int\n Size of the moving window.\n boundary : 'exact' | 'trim' | 'pad'\n If 'exact', a ValueError will be raised if dimension size is not a\n multiple of the window size. If 'trim', the excess entries are\n dropped. If 'pad', NA will be padded.\n side : 'left' or 'right' or mapping from dimension to 'left' or 'right'\n coord_func: function (name) that is applied to the coordintes,\n or a mapping from coordinate name to function (name).\n\n Returns\n -------\n Coarsen object (core.rolling.DataArrayCoarsen for DataArray,\n core.rolling.DatasetCoarsen for Dataset.)\n\n Examples\n --------\n Coarsen the long time series by averaging over every four days.\n\n >>> da = xr.DataArray(np.linspace(0, 364, num=364),\n ... dims='time',\n ... coords={'time': pd.date_range(\n ... '15/12/1999', periods=364)})\n >>> da\n \n array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245,\n 364. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12\n >>>\n >>> da.coarsen(time=3, boundary='trim').mean()\n \n array([ 1.002755, 4.011019, 7.019284, ..., 358.986226,\n 361.99449 ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10\n >>>\n\n See Also\n --------\n core.rolling.DataArrayCoarsen\n core.rolling.DatasetCoarsen\n \"\"\"\n dim = either_dict_or_kwargs(dim, window_kwargs, \"coarsen\")\n return self._coarsen_cls(\n self, dim, boundary=boundary, side=side, coord_func=coord_func\n )\n\n def resample(\n self,\n indexer: Mapping[Hashable, str] = None,\n skipna=None,\n closed: str = None,\n label: str = None,\n base: int = 0,\n keep_attrs: bool = None,\n loffset=None,\n restore_coord_dims: bool = None,\n **indexer_kwargs: str\n ):\n \"\"\"Returns a Resample object for performing resampling operations.\n\n Handles both downsampling and upsampling. If any intervals contain no\n values from the original object, they will be given the value ``NaN``.\n\n Parameters\n ----------\n indexer : {dim: freq}, optional\n Mapping from the dimension name to resample frequency.\n skipna : bool, optional\n Whether to skip missing values when aggregating in downsampling.\n closed : 'left' or 'right', optional\n Side of each interval to treat as closed.\n label : 'left or 'right', optional\n Side of each interval to use for labeling.\n base : int, optional\n For frequencies that evenly subdivide 1 day, the \"origin\" of the\n aggregated intervals. For example, for '24H' frequency, base could\n range from 0 through 23.\n loffset : timedelta or str, optional\n Offset used to adjust the resampled time labels. Some pandas date\n offset strings are supported.\n keep_attrs : bool, optional\n If True, the object's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n restore_coord_dims : bool, optional\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n **indexer_kwargs : {dim: freq}\n The keyword arguments form of ``indexer``.\n One of indexer or indexer_kwargs must be provided.\n\n Returns\n -------\n resampled : same type as caller\n This object resampled.\n\n Examples\n --------\n Downsample monthly time-series data to seasonal data:\n\n >>> da = xr.DataArray(np.linspace(0, 11, num=12),\n ... coords=[pd.date_range('15/12/1999',\n ... periods=12, freq=pd.DateOffset(months=1))],\n ... dims='time')\n >>> da\n \n array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 2000-01-15 2000-02-15 ...\n >>> da.resample(time=\"QS-DEC\").mean()\n \n array([ 1., 4., 7., 10.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-01 2000-03-01 2000-06-01 2000-09-01\n\n Upsample monthly time-series data to daily data:\n\n >>> da.resample(time='1D').interpolate('linear')\n \n array([ 0. , 0.032258, 0.064516, ..., 10.935484, 10.967742, 11. ])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 1999-12-17 ...\n\n Limit scope of upsampling method\n >>> da.resample(time='1D').nearest(tolerance='1D')\n \n array([ 0., 0., nan, ..., nan, 11., 11.])\n Coordinates:\n * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-11-15\n\n References\n ----------\n\n .. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n \"\"\"\n # TODO support non-string indexer after removing the old API.\n\n from .dataarray import DataArray\n from .resample import RESAMPLE_DIM\n from ..coding.cftimeindex import CFTimeIndex\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n # note: the second argument (now 'skipna') use to be 'dim'\n if (\n (skipna is not None and not isinstance(skipna, bool))\n or (\"how\" in indexer_kwargs and \"how\" not in self.dims)\n or (\"dim\" in indexer_kwargs and \"dim\" not in self.dims)\n ):\n raise TypeError(\n \"resample() no longer supports the `how` or \"\n \"`dim` arguments. Instead call methods on resample \"\n \"objects, e.g., data.resample(time='1D').mean()\"\n )\n\n indexer = either_dict_or_kwargs(indexer, indexer_kwargs, \"resample\")\n if len(indexer) != 1:\n raise ValueError(\"Resampling only supported along single dimensions.\")\n dim, freq = next(iter(indexer.items()))\n\n dim_name = dim\n dim_coord = self[dim]\n\n if isinstance(self.indexes[dim_name], CFTimeIndex):\n from .resample_cftime import CFTimeGrouper\n\n grouper = CFTimeGrouper(freq, closed, label, base, loffset)\n else:\n grouper = pd.Grouper(\n freq=freq, closed=closed, label=label, base=base, loffset=loffset\n )\n group = DataArray(\n dim_coord, coords=dim_coord.coords, dims=dim_coord.dims, name=RESAMPLE_DIM\n )\n resampler = self._resample_cls(\n self,\n group=group,\n dim=dim_name,\n grouper=grouper,\n resample_dim=RESAMPLE_DIM,\n restore_coord_dims=restore_coord_dims,\n )\n\n return resampler\n\n def where(self, cond, other=dtypes.NA, drop: bool = False):\n \"\"\"Filter elements from this object according to a condition.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic.\n\n Parameters\n ----------\n cond : DataArray or Dataset with boolean dtype\n Locations at which to preserve this object's values.\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, these locations filled with NA.\n drop : boolean, optional\n If True, coordinate labels that only correspond to False values of\n the condition are dropped from the result. Mutually exclusive with\n ``other``.\n\n Returns\n -------\n Same type as caller.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> a = xr.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))\n >>> a.where(a.x + a.y < 4)\n \n array([[ 0., 1., 2., 3., nan],\n [ 5., 6., 7., nan, nan],\n [ 10., 11., nan, nan, nan],\n [ 15., nan, nan, nan, nan],\n [ nan, nan, nan, nan, nan]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 5, -1)\n \n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, -1],\n [10, 11, 12, -1, -1],\n [15, 16, -1, -1, -1],\n [20, -1, -1, -1, -1]])\n Dimensions without coordinates: x, y\n >>> a.where(a.x + a.y < 4, drop=True)\n \n array([[ 0., 1., 2., 3.],\n [ 5., 6., 7., nan],\n [ 10., 11., nan, nan],\n [ 15., nan, nan, nan]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n where : equivalent function\n \"\"\"\n from .alignment import align\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if drop:\n if other is not dtypes.NA:\n raise ValueError(\"cannot set `other` if drop=True\")\n\n if not isinstance(cond, (Dataset, DataArray)):\n raise TypeError(\n \"cond argument is %r but must be a %r or %r\"\n % (cond, Dataset, DataArray)\n )\n\n # align so we can use integer indexing\n self, cond = align(self, cond)\n\n # get cond with the minimal size needed for the Dataset\n if isinstance(cond, Dataset):\n clipcond = cond.to_array().any(\"variable\")\n else:\n clipcond = cond\n\n # clip the data corresponding to coordinate dims that are not used\n nonzeros = zip(clipcond.dims, np.nonzero(clipcond.values))\n indexers = {k: np.unique(v) for k, v in nonzeros}\n\n self = self.isel(**indexers)\n cond = cond.isel(**indexers)\n\n return ops.where_method(self, cond, other)\n\n def close(self: Any) -> None:\n \"\"\"Close any files linked to this object\n \"\"\"\n if self._file_obj is not None:\n self._file_obj.close()\n self._file_obj = None\n\n def isin(self, test_elements):\n \"\"\"Tests each value in the array for whether it is in test elements.\n\n Parameters\n ----------\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if an array or array_like.\n See numpy notes for behavior with non-array-like parameters.\n\n Returns\n -------\n isin : same as object, bool\n Has the same shape as this object.\n\n Examples\n --------\n\n >>> array = xr.DataArray([1, 2, 3], dims='x')\n >>> array.isin([1, 3])\n \n array([ True, False, True])\n Dimensions without coordinates: x\n\n See also\n --------\n numpy.isin\n \"\"\"\n from .computation import apply_ufunc\n from .dataset import Dataset\n from .dataarray import DataArray\n from .variable import Variable\n\n if isinstance(test_elements, Dataset):\n raise TypeError(\n \"isin() argument must be convertible to an array: {}\".format(\n test_elements\n )\n )\n elif isinstance(test_elements, (Variable, DataArray)):\n # need to explicitly pull out data to support dask arrays as the\n # second argument\n test_elements = test_elements.data\n\n return apply_ufunc(\n duck_array_ops.isin,\n self,\n kwargs=dict(test_elements=test_elements),\n dask=\"allowed\",\n )\n\n def __enter__(self: T) -> T:\n return self\n\n def __exit__(self, exc_type, exc_value, traceback) -> None:\n self.close()\n\n def __getitem__(self, value):\n # implementations of this class should implement this method\n raise NotImplementedError()\n\n\ndef full_like(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Return a new object with the same shape and type as a given object.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object in input\n fill_value : scalar\n Value to fill the new object with before returning it.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object with the same shape and type as other, with the data\n filled with fill_value. Coords will be copied from other.\n If other is based on dask, the new one will be as well, and will be\n split in the same chunks.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 1)\n \n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5)\n \n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, 0.5, dtype=np.double)\n \n array([[0.5, 0.5, 0.5],\n [0.5, 0.5, 0.5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.full_like(x, np.nan, dtype=np.double)\n \n array([[nan, nan, nan],\n [nan, nan, nan]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n ones_like\n\n \"\"\"\n from .dataarray import DataArray\n from .dataset import Dataset\n from .variable import Variable\n\n if isinstance(other, Dataset):\n data_vars = {\n k: _full_like_variable(v, fill_value, dtype)\n for k, v in other.data_vars.items()\n }\n return Dataset(data_vars, coords=other.coords, attrs=other.attrs)\n elif isinstance(other, DataArray):\n return DataArray(\n _full_like_variable(other.variable, fill_value, dtype),\n dims=other.dims,\n coords=other.coords,\n attrs=other.attrs,\n name=other.name,\n )\n elif isinstance(other, Variable):\n return _full_like_variable(other, fill_value, dtype)\n else:\n raise TypeError(\"Expected DataArray, Dataset, or Variable\")\n\n\ndef _full_like_variable(other, fill_value, dtype: DTypeLike = None):\n \"\"\"Inner function of full_like, where other must be a variable\n \"\"\"\n from .variable import Variable\n\n if isinstance(other.data, dask_array_type):\n import dask.array\n\n if dtype is None:\n dtype = other.dtype\n data = dask.array.full(\n other.shape, fill_value, dtype=dtype, chunks=other.data.chunks\n )\n else:\n data = np.full_like(other, fill_value, dtype=dtype)\n\n return Variable(dims=other.dims, data=data, attrs=other.attrs)\n\n\ndef zeros_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of zeros with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of zeros with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x)\n \n array([[0, 0, 0],\n [0, 0, 0]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> xr.zeros_like(x, dtype=np.float)\n \n array([[0., 0., 0.],\n [0., 0., 0.]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n ones_like\n full_like\n\n \"\"\"\n return full_like(other, 0, dtype)\n\n\ndef ones_like(other, dtype: DTypeLike = None):\n \"\"\"Return a new object of ones with the same shape and\n type as a given dataarray or dataset.\n\n Parameters\n ----------\n other : DataArray, Dataset, or Variable\n The reference object. The output will have the same dimensions and coordinates as this object.\n dtype : dtype, optional\n dtype of the new array. If omitted, it defaults to other.dtype.\n\n Returns\n -------\n out : same as object\n New object of ones with the same shape and type as other.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.DataArray(np.arange(6).reshape(2, 3),\n ... dims=['lat', 'lon'],\n ... coords={'lat': [1, 2], 'lon': [0, 1, 2]})\n >>> x\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n >>> >>> xr.ones_like(x)\n \n array([[1, 1, 1],\n [1, 1, 1]])\n Coordinates:\n * lat (lat) int64 1 2\n * lon (lon) int64 0 1 2\n\n See also\n --------\n\n zeros_like\n full_like\n\n \"\"\"\n return full_like(other, 1, dtype)\n\n\ndef is_np_datetime_like(dtype: DTypeLike) -> bool:\n \"\"\"Check if a dtype is a subclass of the numpy datetime types\n \"\"\"\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)\n\n\ndef _contains_cftime_datetimes(array) -> bool:\n \"\"\"Check if an array contains cftime.datetime objects\n \"\"\"\n try:\n from cftime import datetime as cftime_datetime\n except ImportError:\n return False\n else:\n if array.dtype == np.dtype(\"O\") and array.size > 0:\n sample = array.ravel()[0]\n if isinstance(sample, dask_array_type):\n sample = sample.compute()\n if isinstance(sample, np.ndarray):\n sample = sample.item()\n return isinstance(sample, cftime_datetime)\n else:\n return False\n\n\ndef contains_cftime_datetimes(var) -> bool:\n \"\"\"Check if an xarray.Variable contains cftime.datetime objects\n \"\"\"\n return _contains_cftime_datetimes(var.data)\n\n\ndef _contains_datetime_like_objects(var) -> bool:\n \"\"\"Check if a variable contains datetime like objects (either\n np.datetime64, np.timedelta64, or cftime.datetime)\n \"\"\"\n return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)\n"},{"col":4,"comment":"provide a nice str repr of our coarsen object","endLoc":552,"header":"def __repr__(self)","id":2887,"name":"__repr__","nodeType":"Function","startLoc":542,"text":"def __repr__(self):\n \"\"\"provide a nice str repr of our coarsen object\"\"\"\n\n attrs = [\n \"{k}->{v}\".format(k=k, v=getattr(self, k))\n for k in self._attributes\n if getattr(self, k, None) is not None\n ]\n return \"{klass} [{attrs}]\".format(\n klass=self.__class__.__name__, attrs=\",\".join(attrs)\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":332,"id":2888,"name":"_data","nodeType":"Attribute","startLoc":332,"text":"self._data"},{"attributeType":"null","col":0,"comment":"null","endLoc":12,"id":2889,"name":"_DIMENSION_KEY","nodeType":"Attribute","startLoc":12,"text":"_DIMENSION_KEY"},{"attributeType":"null","col":4,"comment":"null","endLoc":502,"id":2890,"name":"__slots__","nodeType":"Attribute","startLoc":502,"text":"__slots__"},{"attributeType":"null","col":4,"comment":"null","endLoc":503,"id":2891,"name":"_attributes","nodeType":"Attribute","startLoc":503,"text":"_attributes"},{"attributeType":"null","col":0,"comment":"null","endLoc":33,"id":2892,"name":"C","nodeType":"Attribute","startLoc":33,"text":"C"},{"col":0,"comment":"","endLoc":1,"header":"zarr.py#","id":2893,"name":"","nodeType":"Function","startLoc":1,"text":"_DIMENSION_KEY = \"_ARRAY_DIMENSIONS\""},{"attributeType":"null","col":0,"comment":"null","endLoc":34,"id":2894,"name":"T","nodeType":"Attribute","startLoc":34,"text":"T"},{"attributeType":"null","col":8,"comment":"null","endLoc":533,"id":2895,"name":"boundary","nodeType":"Attribute","startLoc":533,"text":"self.boundary"},{"attributeType":"ReprObject","col":0,"comment":"null","endLoc":30,"id":2896,"name":"_THIS_ARRAY","nodeType":"Attribute","startLoc":30,"text":"_THIS_ARRAY"},{"col":0,"comment":"","endLoc":1,"header":"common.py#","id":2897,"name":"","nodeType":"Function","startLoc":1,"text":"ALL_DIMS = ReprObject(\"\")\n\nC = TypeVar(\"C\")\n\nT = TypeVar(\"T\")"},{"col":0,"comment":"","endLoc":1,"header":"coordinates.py#","id":2898,"name":"","nodeType":"Function","startLoc":1,"text":"if TYPE_CHECKING:\n from .dataarray import DataArray\n from .dataset import Dataset\n\n_THIS_ARRAY = ReprObject(\"\")"},{"attributeType":"null","col":8,"comment":"null","endLoc":532,"id":2899,"name":"side","nodeType":"Attribute","startLoc":532,"text":"self.side"},{"fileName":"facetgrid.py","filePath":"xarray/plot","id":2900,"nodeType":"File","text":"import functools\nimport itertools\nimport warnings\n\nimport numpy as np\n\nfrom ..core.formatting import format_item\nfrom .utils import (\n _infer_xy_labels,\n _process_cmap_cbar_kwargs,\n import_matplotlib_pyplot,\n label_from_attrs,\n)\n\n# Overrides axes.labelsize, xtick.major.size, ytick.major.size\n# from mpl.rcParams\n_FONTSIZE = \"small\"\n# For major ticks on x, y axes\n_NTICKS = 5\n\n\ndef _nicetitle(coord, value, maxchar, template):\n \"\"\"\n Put coord, value in template and truncate at maxchar\n \"\"\"\n prettyvalue = format_item(value, quote_strings=False)\n title = template.format(coord=coord, value=prettyvalue)\n\n if len(title) > maxchar:\n title = title[: (maxchar - 3)] + \"...\"\n\n return title\n\n\nclass FacetGrid:\n \"\"\"\n Initialize the matplotlib figure and FacetGrid object.\n\n The :class:`FacetGrid` is an object that links a xarray DataArray to\n a matplotlib figure with a particular structure.\n\n In particular, :class:`FacetGrid` is used to draw plots with multiple\n Axes where each Axes shows the same relationship conditioned on\n different levels of some dimension. It's possible to condition on up to\n two variables by assigning variables to the rows and columns of the\n grid.\n\n The general approach to plotting here is called \"small multiples\",\n where the same kind of plot is repeated multiple times, and the\n specific use of small multiples to display the same relationship\n conditioned on one ore more other variables is often called a \"trellis\n plot\".\n\n The basic workflow is to initialize the :class:`FacetGrid` object with\n the DataArray and the variable names that are used to structure the grid.\n Then plotting functions can be applied to each subset by calling\n :meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.\n\n Attributes\n ----------\n axes : numpy object array\n Contains axes in corresponding position, as returned from\n plt.subplots\n fig : matplotlib.Figure\n The figure containing all the axes\n name_dicts : numpy object array\n Contains dictionaries mapping coordinate names to values. None is\n used as a sentinel value for axes which should remain empty, ie.\n sometimes the bottom right grid\n \"\"\"\n\n def __init__(\n self,\n data,\n col=None,\n row=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n figsize=None,\n aspect=1,\n size=3,\n subplot_kws=None,\n ):\n \"\"\"\n Parameters\n ----------\n data : DataArray\n xarray DataArray to be plotted\n row, col : strings\n Dimesion names that define subsets of the data, which will be drawn\n on separate facets in the grid.\n col_wrap : int, optional\n \"Wrap\" the column variable at this width, so that the column facets\n sharex : bool, optional\n If true, the facets will share x axes\n sharey : bool, optional\n If true, the facets will share y axes\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n If set, overrides ``size`` and ``aspect``.\n aspect : scalar, optional\n Aspect ratio of each facet, so that ``aspect * size`` gives the\n width of each facet in inches\n size : scalar, optional\n Height (in inches) of each facet. See also: ``aspect``\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots\n\n \"\"\"\n\n plt = import_matplotlib_pyplot()\n\n # Handle corner case of nonunique coordinates\n rep_col = col is not None and not data[col].to_index().is_unique\n rep_row = row is not None and not data[row].to_index().is_unique\n if rep_col or rep_row:\n raise ValueError(\n \"Coordinates used for faceting cannot \"\n \"contain repeated (nonunique) values.\"\n )\n\n # single_group is the grouping variable, if there is exactly one\n if col and row:\n single_group = False\n nrow = len(data[row])\n ncol = len(data[col])\n nfacet = nrow * ncol\n if col_wrap is not None:\n warnings.warn(\"Ignoring col_wrap since both col and row \" \"were passed\")\n elif row and not col:\n single_group = row\n elif not row and col:\n single_group = col\n else:\n raise ValueError(\"Pass a coordinate name as an argument for row or col\")\n\n # Compute grid shape\n if single_group:\n nfacet = len(data[single_group])\n if col:\n # idea - could add heuristic for nice shapes like 3x4\n ncol = nfacet\n if row:\n ncol = 1\n if col_wrap is not None:\n # Overrides previous settings\n ncol = col_wrap\n nrow = int(np.ceil(nfacet / ncol))\n\n # Set the subplot kwargs\n subplot_kws = {} if subplot_kws is None else subplot_kws\n\n if figsize is None:\n # Calculate the base figure size with extra horizontal space for a\n # colorbar\n cbar_space = 1\n figsize = (ncol * size * aspect + cbar_space, nrow * size)\n\n fig, axes = plt.subplots(\n nrow,\n ncol,\n sharex=sharex,\n sharey=sharey,\n squeeze=False,\n figsize=figsize,\n subplot_kw=subplot_kws,\n )\n\n # Set up the lists of names for the row and column facet variables\n col_names = list(data[col].values) if col else []\n row_names = list(data[row].values) if row else []\n\n if single_group:\n full = [{single_group: x} for x in data[single_group].values]\n empty = [None for x in range(nrow * ncol - len(full))]\n name_dicts = full + empty\n else:\n rowcols = itertools.product(row_names, col_names)\n name_dicts = [{row: r, col: c} for r, c in rowcols]\n\n name_dicts = np.array(name_dicts).reshape(nrow, ncol)\n\n # Set up the class attributes\n # ---------------------------\n\n # First the public API\n self.data = data\n self.name_dicts = name_dicts\n self.fig = fig\n self.axes = axes\n self.row_names = row_names\n self.col_names = col_names\n self.figlegend = None\n\n # Next the private variables\n self._single_group = single_group\n self._nrow = nrow\n self._row_var = row\n self._ncol = ncol\n self._col_var = col\n self._col_wrap = col_wrap\n self._x_var = None\n self._y_var = None\n self._cmap_extend = None\n self._mappables = []\n self._finalized = False\n\n @property\n def _left_axes(self):\n return self.axes[:, 0]\n\n @property\n def _bottom_axes(self):\n return self.axes[-1, :]\n\n def map_dataarray(self, func, x, y, **kwargs):\n \"\"\"\n Apply a plotting function to a 2d facet's subset of the data.\n\n This is more convenient and less general than ``FacetGrid.map``\n\n Parameters\n ----------\n func : callable\n A plotting function with the same signature as a 2d xarray\n plotting method such as `xarray.plot.imshow`\n x, y : string\n Names of the coordinates to plot on x, y axes\n kwargs :\n additional keyword arguments to func\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n\n if kwargs.get(\"cbar_ax\", None) is not None:\n raise ValueError(\"cbar_ax not supported by FacetGrid.\")\n\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n func, self.data.values, **kwargs\n )\n\n self._cmap_extend = cmap_params.get(\"extend\")\n\n # Order is important\n func_kwargs = {\n k: v\n for k, v in kwargs.items()\n if k not in {\"cmap\", \"colors\", \"cbar_kwargs\", \"levels\"}\n }\n func_kwargs.update(cmap_params)\n func_kwargs.update({\"add_colorbar\": False, \"add_labels\": False})\n\n # Get x, y labels for the first subplot\n x, y = _infer_xy_labels(\n darray=self.data.loc[self.name_dicts.flat[0]],\n x=x,\n y=y,\n imshow=func.__name__ == \"imshow\",\n rgb=kwargs.get(\"rgb\", None),\n )\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n mappable = func(subset, x=x, y=y, ax=ax, **func_kwargs)\n self._mappables.append(mappable)\n\n self._finalize_grid(x, y)\n\n if kwargs.get(\"add_colorbar\", True):\n self.add_colorbar(**cbar_kwargs)\n\n return self\n\n def map_dataarray_line(\n self, func, x, y, hue, add_legend=True, _labels=None, **kwargs\n ):\n from .plot import _infer_line_data\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n mappable = func(\n subset,\n x=x,\n y=y,\n ax=ax,\n hue=hue,\n add_legend=False,\n _labels=False,\n **kwargs\n )\n self._mappables.append(mappable)\n\n _, _, hueplt, xlabel, ylabel, huelabel = _infer_line_data(\n darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue\n )\n\n self._hue_var = hueplt\n self._hue_label = huelabel\n self._finalize_grid(xlabel, ylabel)\n\n if add_legend and hueplt is not None and huelabel is not None:\n self.add_legend()\n\n return self\n\n def map_dataset(\n self, func, x=None, y=None, hue=None, hue_style=None, add_guide=None, **kwargs\n ):\n from .dataset_plot import _infer_meta_data, _parse_size\n\n kwargs[\"add_guide\"] = False\n kwargs[\"_is_facetgrid\"] = True\n\n if kwargs.get(\"markersize\", None):\n kwargs[\"size_mapping\"] = _parse_size(\n self.data[kwargs[\"markersize\"]], kwargs.pop(\"size_norm\", None)\n )\n\n meta_data = _infer_meta_data(self.data, x, y, hue, hue_style, add_guide)\n kwargs[\"meta_data\"] = meta_data\n\n if hue and meta_data[\"hue_style\"] == \"continuous\":\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n func, self.data[hue].values, **kwargs\n )\n kwargs[\"meta_data\"][\"cmap_params\"] = cmap_params\n kwargs[\"meta_data\"][\"cbar_kwargs\"] = cbar_kwargs\n\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # None is the sentinel value\n if d is not None:\n subset = self.data.loc[d]\n maybe_mappable = func(\n ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs\n )\n # TODO: this is needed to get legends to work.\n # but maybe_mappable is a list in that case :/\n self._mappables.append(maybe_mappable)\n\n self._finalize_grid(meta_data[\"xlabel\"], meta_data[\"ylabel\"])\n\n if hue:\n self._hue_label = meta_data.pop(\"hue_label\", None)\n if meta_data[\"add_legend\"]:\n self._hue_var = meta_data[\"hue\"]\n self.add_legend()\n elif meta_data[\"add_colorbar\"]:\n self.add_colorbar(label=self._hue_label, **cbar_kwargs)\n\n return self\n\n def _finalize_grid(self, *axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n if not self._finalized:\n self.set_axis_labels(*axlabels)\n self.set_titles()\n self.fig.tight_layout()\n\n for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):\n if namedict is None:\n ax.set_visible(False)\n\n self._finalized = True\n\n def add_legend(self, **kwargs):\n figlegend = self.fig.legend(\n handles=self._mappables[-1],\n labels=list(self._hue_var.values),\n title=self._hue_label,\n loc=\"center right\",\n **kwargs\n )\n\n self.figlegend = figlegend\n # Draw the plot to set the bounding boxes correctly\n self.fig.draw(self.fig.canvas.get_renderer())\n\n # Calculate and set the new width of the figure so the legend fits\n legend_width = figlegend.get_window_extent().width / self.fig.dpi\n figure_width = self.fig.get_figwidth()\n self.fig.set_figwidth(figure_width + legend_width)\n\n # Draw the plot again to get the new transformations\n self.fig.draw(self.fig.canvas.get_renderer())\n\n # Now calculate how much space we need on the right side\n legend_width = figlegend.get_window_extent().width / self.fig.dpi\n space_needed = legend_width / (figure_width + legend_width) + 0.02\n # margin = .01\n # _space_needed = margin + space_needed\n right = 1 - space_needed\n\n # Place the subplot axes to give space for the legend\n self.fig.subplots_adjust(right=right)\n\n def add_colorbar(self, **kwargs):\n \"\"\"Draw a colorbar\n \"\"\"\n kwargs = kwargs.copy()\n if self._cmap_extend is not None:\n kwargs.setdefault(\"extend\", self._cmap_extend)\n if \"label\" not in kwargs:\n kwargs.setdefault(\"label\", label_from_attrs(self.data))\n self.cbar = self.fig.colorbar(\n self._mappables[-1], ax=list(self.axes.flat), **kwargs\n )\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n if x_var in self.data.coords:\n self._x_var = x_var\n self.set_xlabels(label_from_attrs(self.data[x_var]))\n else:\n # x_var is a string\n self.set_xlabels(x_var)\n\n if y_var is not None:\n if y_var in self.data.coords:\n self._y_var = y_var\n self.set_ylabels(label_from_attrs(self.data[y_var]))\n else:\n self.set_ylabels(y_var)\n return self\n\n def set_xlabels(self, label=None, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = label_from_attrs(self.data[self._x_var])\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n return self\n\n def set_ylabels(self, label=None, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = label_from_attrs(self.data[self._y_var])\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n return self\n\n def set_titles(self, template=\"{coord} = {value}\", maxchar=30, size=None, **kwargs):\n \"\"\"\n Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for plot titles containing {coord} and {value}\n maxchar : int\n Truncate titles at maxchar\n kwargs : keyword args\n additional arguments to matplotlib.text\n\n Returns\n -------\n self: FacetGrid object\n\n \"\"\"\n import matplotlib as mpl\n\n if size is None:\n size = mpl.rcParams[\"axes.labelsize\"]\n\n nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template)\n\n if self._single_group:\n for d, ax in zip(self.name_dicts.flat, self.axes.flat):\n # Only label the ones with data\n if d is not None:\n coord, value = list(d.items()).pop()\n title = nicetitle(coord, value, maxchar=maxchar)\n ax.set_title(title, size=size, **kwargs)\n else:\n # The row titles on the right edge of the grid\n for ax, row_name in zip(self.axes[:, -1], self.row_names):\n title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar)\n ax.annotate(\n title,\n xy=(1.02, 0.5),\n xycoords=\"axes fraction\",\n rotation=270,\n ha=\"left\",\n va=\"center\",\n **kwargs\n )\n\n # The column titles on the top row\n for ax, col_name in zip(self.axes[0, :], self.col_names):\n title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar)\n ax.set_title(title, size=size, **kwargs)\n\n return self\n\n def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS, fontsize=_FONTSIZE):\n \"\"\"\n Set and control tick behavior\n\n Parameters\n ----------\n max_xticks, max_yticks : int, optional\n Maximum number of labeled ticks to plot on x, y axes\n fontsize : string or int\n Font size as used by matplotlib text\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n from matplotlib.ticker import MaxNLocator\n\n # Both are necessary\n x_major_locator = MaxNLocator(nbins=max_xticks)\n y_major_locator = MaxNLocator(nbins=max_yticks)\n\n for ax in self.axes.flat:\n ax.xaxis.set_major_locator(x_major_locator)\n ax.yaxis.set_major_locator(y_major_locator)\n for tick in itertools.chain(\n ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks()\n ):\n tick.label1.set_fontsize(fontsize)\n\n return self\n\n def map(self, func, *args, **kwargs):\n \"\"\"\n Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : FacetGrid object\n\n \"\"\"\n plt = import_matplotlib_pyplot()\n\n for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):\n if namedict is not None:\n data = self.data.loc[namedict]\n plt.sca(ax)\n innerargs = [data[a].values for a in args]\n maybe_mappable = func(*innerargs, **kwargs)\n # TODO: better way to verify that an artist is mappable?\n # https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522\n if maybe_mappable and hasattr(maybe_mappable, \"autoscale_None\"):\n self._mappables.append(maybe_mappable)\n\n self._finalize_grid(*args[:2])\n\n return self\n\n\ndef _easy_facetgrid(\n data,\n plotfunc,\n kind,\n x=None,\n y=None,\n row=None,\n col=None,\n col_wrap=None,\n sharex=True,\n sharey=True,\n aspect=None,\n size=None,\n subplot_kws=None,\n ax=None,\n figsize=None,\n **kwargs\n):\n \"\"\"\n Convenience method to call xarray.plot.FacetGrid from 2d plotting methods\n\n kwargs are the arguments to 2d plotting method\n \"\"\"\n if ax is not None:\n raise ValueError(\"Can't use axes when making faceted plots.\")\n if aspect is None:\n aspect = 1\n if size is None:\n size = 3\n elif figsize is not None:\n raise ValueError(\"cannot provide both `figsize` and `size` arguments\")\n\n g = FacetGrid(\n data=data,\n col=col,\n row=row,\n col_wrap=col_wrap,\n sharex=sharex,\n sharey=sharey,\n figsize=figsize,\n aspect=aspect,\n size=size,\n subplot_kws=subplot_kws,\n )\n\n if kind == \"line\":\n return g.map_dataarray_line(plotfunc, x, y, **kwargs)\n\n if kind == \"dataarray\":\n return g.map_dataarray(plotfunc, x, y, **kwargs)\n\n if kind == \"dataset\":\n return g.map_dataset(plotfunc, x, y, **kwargs)\n"},{"col":0,"comment":"\n Put coord, value in template and truncate at maxchar\n ","endLoc":32,"header":"def _nicetitle(coord, value, maxchar, template)","id":2901,"name":"_nicetitle","nodeType":"Function","startLoc":22,"text":"def _nicetitle(coord, value, maxchar, template):\n \"\"\"\n Put coord, value in template and truncate at maxchar\n \"\"\"\n prettyvalue = format_item(value, quote_strings=False)\n title = template.format(coord=coord, value=prettyvalue)\n\n if len(title) > maxchar:\n title = title[: (maxchar - 3)] + \"...\"\n\n return title"},{"id":2902,"name":"stale.yml","nodeType":"TextFile","path":".github","text":"# Configuration for probot-stale - https://github.com/probot/stale\n\n# Number of days of inactivity before an Issue or Pull Request becomes stale\ndaysUntilStale: 700 # start with a large number and reduce shortly\n\n# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.\n# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.\ndaysUntilClose: 30\n\n# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable\nexemptLabels:\n - pinned\n - security\n - \"[Status] Maybe Later\"\n\n# Set to true to ignore issues in a project (defaults to false)\nexemptProjects: false\n\n# Set to true to ignore issues in a milestone (defaults to false)\nexemptMilestones: false\n\n# Set to true to ignore issues with an assignee (defaults to false)\nexemptAssignees: true\n\n# Label to use when marking as stale\nstaleLabel: stale\n\n# Comment to post when marking as stale. Set to `false` to disable\nmarkComment: |\n In order to maintain a list of currently relevant issues, we mark issues as stale after a period of inactivity\n\n If this issue remains relevant, please comment here or remove the `stale` label; otherwise it will be marked as closed automatically\n\n# Comment to post when removing the stale label.\n# unmarkComment: >\n# Your comment here.\n\n# Comment to post when closing a stale Issue or Pull Request.\n# closeComment: >\n# Your comment here.\n\n# Limit the number of actions per hour, from 1-30. Default is 30\nlimitPerRun: 1 # start with a small number\n\n\n# Limit to only `issues` or `pulls`\n# only: issues\n\n# Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':\n# pulls:\n# daysUntilStale: 30\n# markComment: >\n# This pull request has been automatically marked as stale because it has not had\n# recent activity. It will be closed if no further activity occurs. Thank you\n# for your contributions.\n\n# issues:\n# exemptLabels:\n# - confirmed"},{"col":4,"comment":"null","endLoc":31,"header":"def __repr__(self)","id":2903,"name":"__repr__","nodeType":"Function","startLoc":28,"text":"def __repr__(self):\n return \"{type}: method={method}\".format(\n type=self.__class__.__name__, method=self.method\n )"},{"attributeType":"null","col":4,"comment":"null","endLoc":20,"id":2904,"name":"cons_kwargs","nodeType":"Attribute","startLoc":20,"text":"cons_kwargs"},{"attributeType":"null","col":4,"comment":"null","endLoc":21,"id":2905,"name":"call_kwargs","nodeType":"Attribute","startLoc":21,"text":"call_kwargs"},{"attributeType":"null","col":0,"comment":"null","endLoc":17,"id":2906,"name":"_FONTSIZE","nodeType":"Attribute","startLoc":17,"text":"_FONTSIZE"},{"attributeType":"null","col":0,"comment":"null","endLoc":19,"id":2907,"name":"_NTICKS","nodeType":"Attribute","startLoc":19,"text":"_NTICKS"},{"col":0,"comment":"","endLoc":1,"header":"facetgrid.py#","id":2908,"name":"","nodeType":"Function","startLoc":1,"text":"_FONTSIZE = \"small\"\n\n_NTICKS = 5"},{"attributeType":"null","col":4,"comment":"null","endLoc":22,"id":2909,"name":"f","nodeType":"Attribute","startLoc":22,"text":"f"},{"attributeType":"null","col":4,"comment":"null","endLoc":23,"id":2910,"name":"method","nodeType":"Attribute","startLoc":23,"text":"method"},{"className":"NumpyInterpolator","col":0,"comment":"One-dimensional linear interpolation.\n\n See Also\n --------\n numpy.interp\n ","endLoc":75,"id":2911,"nodeType":"Class","startLoc":34,"text":"class NumpyInterpolator(BaseInterpolator):\n \"\"\"One-dimensional linear interpolation.\n\n See Also\n --------\n numpy.interp\n \"\"\"\n\n def __init__(self, xi, yi, method=\"linear\", fill_value=None, period=None):\n\n if method != \"linear\":\n raise ValueError(\"only method `linear` is valid for the NumpyInterpolator\")\n\n self.method = method\n self.f = np.interp\n self.cons_kwargs = {}\n self.call_kwargs = {\"period\": period}\n\n self._xi = xi\n self._yi = yi\n\n if fill_value is None:\n self._left = np.nan\n self._right = np.nan\n elif isinstance(fill_value, Sequence) and len(fill_value) == 2:\n self._left = fill_value[0]\n self._right = fill_value[1]\n elif is_scalar(fill_value):\n self._left = fill_value\n self._right = fill_value\n else:\n raise ValueError(\"%s is not a valid fill_value\" % fill_value)\n\n def __call__(self, x):\n return self.f(\n x,\n self._xi,\n self._yi,\n left=self._left,\n right=self._right,\n **self.call_kwargs\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":540,"id":2912,"name":"coord_func","nodeType":"Attribute","startLoc":540,"text":"self.coord_func"},{"col":4,"comment":"null","endLoc":65,"header":"def __init__(self, xi, yi, method=\"linear\", fill_value=None, period=None)","id":2913,"name":"__init__","nodeType":"Function","startLoc":42,"text":"def __init__(self, xi, yi, method=\"linear\", fill_value=None, period=None):\n\n if method != \"linear\":\n raise ValueError(\"only method `linear` is valid for the NumpyInterpolator\")\n\n self.method = method\n self.f = np.interp\n self.cons_kwargs = {}\n self.call_kwargs = {\"period\": period}\n\n self._xi = xi\n self._yi = yi\n\n if fill_value is None:\n self._left = np.nan\n self._right = np.nan\n elif isinstance(fill_value, Sequence) and len(fill_value) == 2:\n self._left = fill_value[0]\n self._right = fill_value[1]\n elif is_scalar(fill_value):\n self._left = fill_value\n self._right = fill_value\n else:\n raise ValueError(\"%s is not a valid fill_value\" % fill_value)"},{"id":2914,"name":"MANIFEST.in","nodeType":"TextFile","path":"","text":"include LICENSE\nrecursive-include licenses *\nrecursive-include doc *\nprune doc/_build\nprune doc/generated\nglobal-exclude .DS_Store\ninclude versioneer.py\ninclude xarray/_version.py\n"},{"col":4,"comment":"null","endLoc":75,"header":"def __call__(self, x)","id":2915,"name":"__call__","nodeType":"Function","startLoc":67,"text":"def __call__(self, x):\n return self.f(\n x,\n self._xi,\n self._yi,\n left=self._left,\n right=self._right,\n **self.call_kwargs\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":530,"id":2916,"name":"obj","nodeType":"Attribute","startLoc":530,"text":"self.obj"},{"attributeType":"null","col":8,"comment":"null","endLoc":531,"id":2917,"name":"windows","nodeType":"Attribute","startLoc":531,"text":"self.windows"},{"className":"DataArrayCoarsen","col":0,"comment":"null","endLoc":584,"id":2918,"nodeType":"Class","startLoc":555,"text":"class DataArrayCoarsen(Coarsen):\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func):\n \"\"\"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n \"\"\"\n\n def wrapped_func(self, **kwargs):\n from .dataarray import DataArray\n\n reduced = self.obj.variable.coarsen(\n self.windows, func, self.boundary, self.side\n )\n coords = {}\n for c, v in self.obj.coords.items():\n if c == self.obj.name:\n coords[c] = reduced\n else:\n if any(d in self.windows for d in v.dims):\n coords[c] = v.variable.coarsen(\n self.windows, self.coord_func[c], self.boundary, self.side\n )\n else:\n coords[c] = v\n return DataArray(reduced, dims=self.obj.dims, coords=coords)\n\n return wrapped_func"},{"col":4,"comment":"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n ","endLoc":584,"header":"@classmethod\n def _reduce_method(cls, func)","id":2919,"name":"_reduce_method","nodeType":"Function","startLoc":558,"text":"@classmethod\n def _reduce_method(cls, func):\n \"\"\"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n \"\"\"\n\n def wrapped_func(self, **kwargs):\n from .dataarray import DataArray\n\n reduced = self.obj.variable.coarsen(\n self.windows, func, self.boundary, self.side\n )\n coords = {}\n for c, v in self.obj.coords.items():\n if c == self.obj.name:\n coords[c] = reduced\n else:\n if any(d in self.windows for d in v.dims):\n coords[c] = v.variable.coarsen(\n self.windows, self.coord_func[c], self.boundary, self.side\n )\n else:\n coords[c] = v\n return DataArray(reduced, dims=self.obj.dims, coords=coords)\n\n return wrapped_func"},{"fileName":"__init__.py","filePath":"asv_bench/benchmarks","id":2920,"nodeType":"File","text":"import itertools\n\nimport numpy as np\n\n_counter = itertools.count()\n\n\ndef parameterized(names, params):\n def decorator(func):\n func.param_names = names\n func.params = params\n return func\n\n return decorator\n\n\ndef requires_dask():\n try:\n import dask # noqa: F401\n except ImportError:\n raise NotImplementedError()\n\n\ndef randn(shape, frac_nan=None, chunks=None, seed=0):\n rng = np.random.RandomState(seed)\n if chunks is None:\n x = rng.standard_normal(shape)\n else:\n import dask.array as da\n\n rng = da.random.RandomState(seed)\n x = rng.standard_normal(shape, chunks=chunks)\n\n if frac_nan is not None:\n inds = rng.choice(range(x.size), int(x.size * frac_nan))\n x.flat[inds] = np.nan\n\n return x\n\n\ndef randint(low, high=None, size=None, frac_minus=None, seed=0):\n rng = np.random.RandomState(seed)\n x = rng.randint(low, high, size)\n if frac_minus is not None:\n inds = rng.choice(range(x.size), int(x.size * frac_minus))\n x.flat[inds] = -1\n\n return x\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":5,"id":2921,"name":"_counter","nodeType":"Attribute","startLoc":5,"text":"_counter"},{"col":4,"comment":"null","endLoc":127,"header":"def _getitem(self, key)","id":2922,"name":"_getitem","nodeType":"Function","startLoc":109,"text":"def _getitem(self, key):\n from rasterio.vrt import WarpedVRT\n\n band_key, window, squeeze_axis, np_inds = self._get_indexer(key)\n\n if not band_key or any(start == stop for (start, stop) in window):\n # no need to do IO\n shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)\n out = np.zeros(shape, dtype=self.dtype)\n else:\n with self.lock:\n riods = self.manager.acquire(needs_lock=False)\n if self.vrt_params is not None:\n riods = WarpedVRT(riods, **self.vrt_params)\n out = riods.read(band_key, window=window)\n\n if squeeze_axis:\n out = np.squeeze(out, axis=squeeze_axis)\n return out[np_inds]"},{"col":0,"comment":"","endLoc":1,"header":"__init__.py#","id":2923,"name":"","nodeType":"Function","startLoc":1,"text":"_counter = itertools.count()"},{"attributeType":"null","col":8,"comment":"null","endLoc":52,"id":2924,"name":"_xi","nodeType":"Attribute","startLoc":52,"text":"self._xi"},{"attributeType":"{__ne__}","col":8,"comment":"null","endLoc":47,"id":2925,"name":"method","nodeType":"Attribute","startLoc":47,"text":"self.method"},{"attributeType":"null","col":8,"comment":"null","endLoc":48,"id":2926,"name":"f","nodeType":"Attribute","startLoc":48,"text":"self.f"},{"attributeType":"null","col":12,"comment":"null","endLoc":63,"id":2927,"name":"_right","nodeType":"Attribute","startLoc":63,"text":"self._right"},{"attributeType":"null","col":4,"comment":"null","endLoc":556,"id":2928,"name":"__slots__","nodeType":"Attribute","startLoc":556,"text":"__slots__"},{"className":"DatasetCoarsen","col":0,"comment":"null","endLoc":616,"id":2929,"nodeType":"Class","startLoc":587,"text":"class DatasetCoarsen(Coarsen):\n __slots__ = ()\n\n @classmethod\n def _reduce_method(cls, func):\n \"\"\"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n \"\"\"\n\n def wrapped_func(self, **kwargs):\n from .dataset import Dataset\n\n reduced = {}\n for key, da in self.obj.data_vars.items():\n reduced[key] = da.variable.coarsen(\n self.windows, func, self.boundary, self.side\n )\n\n coords = {}\n for c, v in self.obj.coords.items():\n if any(d in self.windows for d in v.dims):\n coords[c] = v.variable.coarsen(\n self.windows, self.coord_func[c], self.boundary, self.side\n )\n else:\n coords[c] = v.variable\n return Dataset(reduced, coords=coords)\n\n return wrapped_func"},{"col":4,"comment":"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n ","endLoc":616,"header":"@classmethod\n def _reduce_method(cls, func)","id":2930,"name":"_reduce_method","nodeType":"Function","startLoc":590,"text":"@classmethod\n def _reduce_method(cls, func):\n \"\"\"\n Return a wrapped function for injecting numpy methods.\n see ops.inject_coarsen_methods\n \"\"\"\n\n def wrapped_func(self, **kwargs):\n from .dataset import Dataset\n\n reduced = {}\n for key, da in self.obj.data_vars.items():\n reduced[key] = da.variable.coarsen(\n self.windows, func, self.boundary, self.side\n )\n\n coords = {}\n for c, v in self.obj.coords.items():\n if any(d in self.windows for d in v.dims):\n coords[c] = v.variable.coarsen(\n self.windows, self.coord_func[c], self.boundary, self.side\n )\n else:\n coords[c] = v.variable\n return Dataset(reduced, coords=coords)\n\n return wrapped_func"},{"id":2931,"name":"PYTHON_LICENSE","nodeType":"TextFile","path":"licenses","text":"A. HISTORY OF THE SOFTWARE\n==========================\n\nPython was created in the early 1990s by Guido van Rossum at Stichting\nMathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands\nas a successor of a language called ABC. Guido remains Python's\nprincipal author, although it includes many contributions from others.\n\nIn 1995, Guido continued his work on Python at the Corporation for\nNational Research Initiatives (CNRI, see http://www.cnri.reston.va.us)\nin Reston, Virginia where he released several versions of the\nsoftware.\n\nIn May 2000, Guido and the Python core development team moved to\nBeOpen.com to form the BeOpen PythonLabs team. In October of the same\nyear, the PythonLabs team moved to Digital Creations (now Zope\nCorporation, see http://www.zope.com). In 2001, the Python Software\nFoundation (PSF, see http://www.python.org/psf/) was formed, a\nnon-profit organization created specifically to own Python-related\nIntellectual Property. Zope Corporation is a sponsoring member of\nthe PSF.\n\nAll Python releases are Open Source (see http://www.opensource.org for\nthe Open Source Definition). Historically, most, but not all, Python\nreleases have also been GPL-compatible; the table below summarizes\nthe various releases.\n\n Release Derived Year Owner GPL-\n from compatible? (1)\n\n 0.9.0 thru 1.2 1991-1995 CWI yes\n 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes\n 1.6 1.5.2 2000 CNRI no\n 2.0 1.6 2000 BeOpen.com no\n 1.6.1 1.6 2001 CNRI yes (2)\n 2.1 2.0+1.6.1 2001 PSF no\n 2.0.1 2.0+1.6.1 2001 PSF yes\n 2.1.1 2.1+2.0.1 2001 PSF yes\n 2.1.2 2.1.1 2002 PSF yes\n 2.1.3 2.1.2 2002 PSF yes\n 2.2 and above 2.1.1 2001-now PSF yes\n\nFootnotes:\n\n(1) GPL-compatible doesn't mean that we're distributing Python under\n the GPL. All Python licenses, unlike the GPL, let you distribute\n a modified version without making your changes open source. The\n GPL-compatible licenses make it possible to combine Python with\n other software that is released under the GPL; the others don't.\n\n(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,\n because its license has a choice of law clause. According to\n CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1\n is \"not incompatible\" with the GPL.\n\nThanks to the many outside volunteers who have worked under Guido's\ndirection to make these releases possible.\n\n\nB. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON\n===============================================================\n\nPYTHON SOFTWARE FOUNDATION LICENSE VERSION 2\n--------------------------------------------\n\n1. This LICENSE AGREEMENT is between the Python Software Foundation\n(\"PSF\"), and the Individual or Organization (\"Licensee\") accessing and\notherwise using this software (\"Python\") in source or binary form and\nits associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, PSF hereby\ngrants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,\nanalyze, test, perform and/or display publicly, prepare derivative works,\ndistribute, and otherwise use Python alone or in any derivative version,\nprovided, however, that PSF's License Agreement and PSF's notice of copyright,\ni.e., \"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,\n2011, 2012, 2013, 2014, 2015 Python Software Foundation; All Rights Reserved\"\nare retained in Python alone or in any derivative version prepared by Licensee.\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python.\n\n4. PSF is making Python available to Licensee on an \"AS IS\"\nbasis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\nFOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. Nothing in this License Agreement shall be deemed to create any\nrelationship of agency, partnership, or joint venture between PSF and\nLicensee. This License Agreement does not grant permission to use PSF\ntrademarks or trade name in a trademark sense to endorse or promote\nproducts or services of Licensee, or any third party.\n\n8. By copying, installing or otherwise using Python, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nBEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0\n-------------------------------------------\n\nBEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1\n\n1. This LICENSE AGREEMENT is between BeOpen.com (\"BeOpen\"), having an\noffice at 160 Saratoga Avenue, Santa Clara, CA 95051, and the\nIndividual or Organization (\"Licensee\") accessing and otherwise using\nthis software in source or binary form and its associated\ndocumentation (\"the Software\").\n\n2. Subject to the terms and conditions of this BeOpen Python License\nAgreement, BeOpen hereby grants Licensee a non-exclusive,\nroyalty-free, world-wide license to reproduce, analyze, test, perform\nand/or display publicly, prepare derivative works, distribute, and\notherwise use the Software alone or in any derivative version,\nprovided, however, that the BeOpen Python License is retained in the\nSoftware, alone or in any derivative version prepared by Licensee.\n\n3. BeOpen is making the Software available to Licensee on an \"AS IS\"\nbasis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE\nSOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS\nAS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY\nDERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n5. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n6. This License Agreement shall be governed by and interpreted in all\nrespects by the law of the State of California, excluding conflict of\nlaw provisions. Nothing in this License Agreement shall be deemed to\ncreate any relationship of agency, partnership, or joint venture\nbetween BeOpen and Licensee. This License Agreement does not grant\npermission to use BeOpen trademarks or trade names in a trademark\nsense to endorse or promote products or services of Licensee, or any\nthird party. As an exception, the \"BeOpen Python\" logos available at\nhttp://www.pythonlabs.com/logos.html may be used according to the\npermissions granted on that web page.\n\n7. By copying, installing or otherwise using the software, Licensee\nagrees to be bound by the terms and conditions of this License\nAgreement.\n\n\nCNRI LICENSE AGREEMENT FOR PYTHON 1.6.1\n---------------------------------------\n\n1. This LICENSE AGREEMENT is between the Corporation for National\nResearch Initiatives, having an office at 1895 Preston White Drive,\nReston, VA 20191 (\"CNRI\"), and the Individual or Organization\n(\"Licensee\") accessing and otherwise using Python 1.6.1 software in\nsource or binary form and its associated documentation.\n\n2. Subject to the terms and conditions of this License Agreement, CNRI\nhereby grants Licensee a nonexclusive, royalty-free, world-wide\nlicense to reproduce, analyze, test, perform and/or display publicly,\nprepare derivative works, distribute, and otherwise use Python 1.6.1\nalone or in any derivative version, provided, however, that CNRI's\nLicense Agreement and CNRI's notice of copyright, i.e., \"Copyright (c)\n1995-2001 Corporation for National Research Initiatives; All Rights\nReserved\" are retained in Python 1.6.1 alone or in any derivative\nversion prepared by Licensee. Alternately, in lieu of CNRI's License\nAgreement, Licensee may substitute the following text (omitting the\nquotes): \"Python 1.6.1 is made available subject to the terms and\nconditions in CNRI's License Agreement. This Agreement together with\nPython 1.6.1 may be located on the Internet using the following\nunique, persistent identifier (known as a handle): 1895.22/1013. This\nAgreement may also be obtained from a proxy server on the Internet\nusing the following URL: http://hdl.handle.net/1895.22/1013\".\n\n3. In the event Licensee prepares a derivative work that is based on\nor incorporates Python 1.6.1 or any part thereof, and wants to make\nthe derivative work available to others as provided herein, then\nLicensee hereby agrees to include in any such work a brief summary of\nthe changes made to Python 1.6.1.\n\n4. CNRI is making Python 1.6.1 available to Licensee on an \"AS IS\"\nbasis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR\nIMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND\nDISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS\nFOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT\nINFRINGE ANY THIRD PARTY RIGHTS.\n\n5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON\n1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS\nA RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,\nOR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.\n\n6. This License Agreement will automatically terminate upon a material\nbreach of its terms and conditions.\n\n7. This License Agreement shall be governed by the federal\nintellectual property law of the United States, including without\nlimitation the federal copyright law, and, to the extent such\nU.S. federal law does not apply, by the law of the Commonwealth of\nVirginia, excluding Virginia's conflict of law provisions.\nNotwithstanding the foregoing, with regard to derivative works based\non Python 1.6.1 that incorporate non-separable material that was\npreviously distributed under the GNU General Public License (GPL), the\nlaw of the Commonwealth of Virginia shall govern this License\nAgreement only as to issues arising under or with respect to\nParagraphs 4, 5, and 7 of this License Agreement. Nothing in this\nLicense Agreement shall be deemed to create any relationship of\nagency, partnership, or joint venture between CNRI and Licensee. This\nLicense Agreement does not grant permission to use CNRI trademarks or\ntrade name in a trademark sense to endorse or promote products or\nservices of Licensee, or any third party.\n\n8. By clicking on the \"ACCEPT\" button where indicated, or by copying,\ninstalling or otherwise using Python 1.6.1, Licensee agrees to be\nbound by the terms and conditions of this License Agreement.\n\n ACCEPT\n\n\nCWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2\n--------------------------------------------------\n\nCopyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,\nThe Netherlands. All rights reserved.\n\nPermission to use, copy, modify, and distribute this software and its\ndocumentation for any purpose and without fee is hereby granted,\nprovided that the above copyright notice appear in all copies and that\nboth that copyright notice and this permission notice appear in\nsupporting documentation, and that the name of Stichting Mathematisch\nCentrum or CWI not be used in advertising or publicity pertaining to\ndistribution of the software without specific, written prior\npermission.\n\nSTICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO\nTHIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\nFITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE\nFOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT\nOF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."},{"fileName":"utils.py","filePath":"xarray/plot","id":2932,"nodeType":"File","text":"import itertools\nimport textwrap\nimport warnings\nfrom datetime import datetime\nfrom inspect import getfullargspec\nfrom typing import Any, Iterable, Mapping, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..core.options import OPTIONS\nfrom ..core.utils import is_scalar\n\ntry:\n import nc_time_axis # noqa: F401\n\n nc_time_axis_available = True\nexcept ImportError:\n nc_time_axis_available = False\n\nROBUST_PERCENTILE = 2.0\n\n\ndef import_seaborn():\n \"\"\"import seaborn and handle deprecation of apionly module\"\"\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n try:\n import seaborn.apionly as sns\n\n if (\n w\n and issubclass(w[-1].category, UserWarning)\n and (\"seaborn.apionly module\" in str(w[-1].message))\n ):\n raise ImportError\n except ImportError:\n import seaborn as sns\n finally:\n warnings.resetwarnings()\n return sns\n\n\n_registered = False\n\n\ndef register_pandas_datetime_converter_if_needed():\n # based on https://github.com/pandas-dev/pandas/pull/17710\n global _registered\n if not _registered:\n pd.plotting.register_matplotlib_converters()\n _registered = True\n\n\ndef import_matplotlib_pyplot():\n \"\"\"Import pyplot as register appropriate converters.\"\"\"\n register_pandas_datetime_converter_if_needed()\n import matplotlib.pyplot as plt\n\n return plt\n\n\ndef _determine_extend(calc_data, vmin, vmax):\n extend_min = calc_data.min() < vmin\n extend_max = calc_data.max() > vmax\n if extend_min and extend_max:\n extend = \"both\"\n elif extend_min:\n extend = \"min\"\n elif extend_max:\n extend = \"max\"\n else:\n extend = \"neither\"\n return extend\n\n\ndef _build_discrete_cmap(cmap, levels, extend, filled):\n \"\"\"\n Build a discrete colormap and normalization of the data.\n \"\"\"\n import matplotlib as mpl\n\n if not filled:\n # non-filled contour plots\n extend = \"max\"\n\n if extend == \"both\":\n ext_n = 2\n elif extend in [\"min\", \"max\"]:\n ext_n = 1\n else:\n ext_n = 0\n\n n_colors = len(levels) + ext_n - 1\n pal = _color_palette(cmap, n_colors)\n\n new_cmap, cnorm = mpl.colors.from_levels_and_colors(levels, pal, extend=extend)\n # copy the old cmap name, for easier testing\n new_cmap.name = getattr(cmap, \"name\", cmap)\n\n return new_cmap, cnorm\n\n\ndef _color_palette(cmap, n_colors):\n import matplotlib.pyplot as plt\n from matplotlib.colors import ListedColormap\n\n colors_i = np.linspace(0, 1.0, n_colors)\n if isinstance(cmap, (list, tuple)):\n # we have a list of colors\n cmap = ListedColormap(cmap, N=n_colors)\n pal = cmap(colors_i)\n elif isinstance(cmap, str):\n # we have some sort of named palette\n try:\n # is this a matplotlib cmap?\n cmap = plt.get_cmap(cmap)\n pal = cmap(colors_i)\n except ValueError:\n # ValueError happens when mpl doesn't like a colormap, try seaborn\n try:\n from seaborn.apionly import color_palette\n\n pal = color_palette(cmap, n_colors=n_colors)\n except (ValueError, ImportError):\n # or maybe we just got a single color as a string\n cmap = ListedColormap([cmap], N=n_colors)\n pal = cmap(colors_i)\n else:\n # cmap better be a LinearSegmentedColormap (e.g. viridis)\n pal = cmap(colors_i)\n\n return pal\n\n\n# _determine_cmap_params is adapted from Seaborn:\n# https://github.com/mwaskom/seaborn/blob/v0.6/seaborn/matrix.py#L158\n# Used under the terms of Seaborn's license, see licenses/SEABORN_LICENSE.\n\n\ndef _determine_cmap_params(\n plot_data,\n vmin=None,\n vmax=None,\n cmap=None,\n center=None,\n robust=False,\n extend=None,\n levels=None,\n filled=True,\n norm=None,\n):\n \"\"\"\n Use some heuristics to set good defaults for colorbar and range.\n\n Parameters\n ==========\n plot_data: Numpy array\n Doesn't handle xarray objects\n\n Returns\n =======\n cmap_params : dict\n Use depends on the type of the plotting function\n \"\"\"\n import matplotlib as mpl\n\n calc_data = np.ravel(plot_data[np.isfinite(plot_data)])\n\n # Handle all-NaN input data gracefully\n if calc_data.size == 0:\n # Arbitrary default for when all values are NaN\n calc_data = np.array(0.0)\n\n # Setting center=False prevents a divergent cmap\n possibly_divergent = center is not False\n\n # Set center to 0 so math below makes sense but remember its state\n center_is_none = False\n if center is None:\n center = 0\n center_is_none = True\n\n # Setting both vmin and vmax prevents a divergent cmap\n if (vmin is not None) and (vmax is not None):\n possibly_divergent = False\n\n # Setting vmin or vmax implies linspaced levels\n user_minmax = (vmin is not None) or (vmax is not None)\n\n # vlim might be computed below\n vlim = None\n\n # save state; needed later\n vmin_was_none = vmin is None\n vmax_was_none = vmax is None\n\n if vmin is None:\n if robust:\n vmin = np.percentile(calc_data, ROBUST_PERCENTILE)\n else:\n vmin = calc_data.min()\n elif possibly_divergent:\n vlim = abs(vmin - center)\n\n if vmax is None:\n if robust:\n vmax = np.percentile(calc_data, 100 - ROBUST_PERCENTILE)\n else:\n vmax = calc_data.max()\n elif possibly_divergent:\n vlim = abs(vmax - center)\n\n if possibly_divergent:\n # kwargs not specific about divergent or not: infer defaults from data\n divergent = ((vmin < 0) and (vmax > 0)) or not center_is_none\n else:\n divergent = False\n\n # A divergent map should be symmetric around the center value\n if divergent:\n if vlim is None:\n vlim = max(abs(vmin - center), abs(vmax - center))\n vmin, vmax = -vlim, vlim\n\n # Now add in the centering value and set the limits\n vmin += center\n vmax += center\n\n # now check norm and harmonize with vmin, vmax\n if norm is not None:\n if norm.vmin is None:\n norm.vmin = vmin\n else:\n if not vmin_was_none and vmin != norm.vmin:\n raise ValueError(\n \"Cannot supply vmin and a norm\" + \" with a different vmin.\"\n )\n vmin = norm.vmin\n\n if norm.vmax is None:\n norm.vmax = vmax\n else:\n if not vmax_was_none and vmax != norm.vmax:\n raise ValueError(\n \"Cannot supply vmax and a norm\" + \" with a different vmax.\"\n )\n vmax = norm.vmax\n\n # if BoundaryNorm, then set levels\n if isinstance(norm, mpl.colors.BoundaryNorm):\n levels = norm.boundaries\n\n # Choose default colormaps if not provided\n if cmap is None:\n if divergent:\n cmap = OPTIONS[\"cmap_divergent\"]\n else:\n cmap = OPTIONS[\"cmap_sequential\"]\n\n # Handle discrete levels\n if levels is not None and norm is None:\n if is_scalar(levels):\n if user_minmax:\n levels = np.linspace(vmin, vmax, levels)\n elif levels == 1:\n levels = np.asarray([(vmin + vmax) / 2])\n else:\n # N in MaxNLocator refers to bins, not ticks\n ticker = mpl.ticker.MaxNLocator(levels - 1)\n levels = ticker.tick_values(vmin, vmax)\n vmin, vmax = levels[0], levels[-1]\n\n if extend is None:\n extend = _determine_extend(calc_data, vmin, vmax)\n\n if levels is not None or isinstance(norm, mpl.colors.BoundaryNorm):\n cmap, newnorm = _build_discrete_cmap(cmap, levels, extend, filled)\n norm = newnorm if norm is None else norm\n\n return dict(\n vmin=vmin, vmax=vmax, cmap=cmap, extend=extend, levels=levels, norm=norm\n )\n\n\ndef _infer_xy_labels_3d(darray, x, y, rgb):\n \"\"\"\n Determine x and y labels for showing RGB images.\n\n Attempts to infer which dimension is RGB/RGBA by size and order of dims.\n\n \"\"\"\n assert rgb is None or rgb != x\n assert rgb is None or rgb != y\n # Start by detecting and reporting invalid combinations of arguments\n assert darray.ndim == 3\n not_none = [a for a in (x, y, rgb) if a is not None]\n if len(set(not_none)) < len(not_none):\n raise ValueError(\n \"Dimension names must be None or unique strings, but imshow was \"\n \"passed x=%r, y=%r, and rgb=%r.\" % (x, y, rgb)\n )\n for label in not_none:\n if label not in darray.dims:\n raise ValueError(\"%r is not a dimension\" % (label,))\n\n # Then calculate rgb dimension if certain and check validity\n could_be_color = [\n label\n for label in darray.dims\n if darray[label].size in (3, 4) and label not in (x, y)\n ]\n if rgb is None and not could_be_color:\n raise ValueError(\n \"A 3-dimensional array was passed to imshow(), but there is no \"\n \"dimension that could be color. At least one dimension must be \"\n \"of size 3 (RGB) or 4 (RGBA), and not given as x or y.\"\n )\n if rgb is None and len(could_be_color) == 1:\n rgb = could_be_color[0]\n if rgb is not None and darray[rgb].size not in (3, 4):\n raise ValueError(\n \"Cannot interpret dim %r of size %s as RGB or RGBA.\"\n % (rgb, darray[rgb].size)\n )\n\n # If rgb dimension is still unknown, there must be two or three dimensions\n # in could_be_color. We therefore warn, and use a heuristic to break ties.\n if rgb is None:\n assert len(could_be_color) in (2, 3)\n rgb = could_be_color[-1]\n warnings.warn(\n \"Several dimensions of this array could be colors. Xarray \"\n \"will use the last possible dimension (%r) to match \"\n \"matplotlib.pyplot.imshow. You can pass names of x, y, \"\n \"and/or rgb dimensions to override this guess.\" % rgb\n )\n assert rgb is not None\n\n # Finally, we pick out the red slice and delegate to the 2D version:\n return _infer_xy_labels(darray.isel(**{rgb: 0}), x, y)\n\n\ndef _infer_xy_labels(darray, x, y, imshow=False, rgb=None):\n \"\"\"\n Determine x and y labels. For use in _plot2d\n\n darray must be a 2 dimensional data array, or 3d for imshow only.\n \"\"\"\n assert x is None or x != y\n if imshow and darray.ndim == 3:\n return _infer_xy_labels_3d(darray, x, y, rgb)\n\n if x is None and y is None:\n if darray.ndim != 2:\n raise ValueError(\"DataArray must be 2d\")\n y, x = darray.dims\n elif x is None:\n if y not in darray.dims and y not in darray.coords:\n raise ValueError(\"y must be a dimension name if x is not supplied\")\n x = darray.dims[0] if y == darray.dims[1] else darray.dims[1]\n elif y is None:\n if x not in darray.dims and x not in darray.coords:\n raise ValueError(\"x must be a dimension name if y is not supplied\")\n y = darray.dims[0] if x == darray.dims[1] else darray.dims[1]\n elif any(k not in darray.coords and k not in darray.dims for k in (x, y)):\n raise ValueError(\"x and y must be coordinate variables\")\n return x, y\n\n\ndef get_axis(figsize, size, aspect, ax):\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\n if figsize is not None:\n if ax is not None:\n raise ValueError(\"cannot provide both `figsize` and \" \"`ax` arguments\")\n if size is not None:\n raise ValueError(\"cannot provide both `figsize` and \" \"`size` arguments\")\n _, ax = plt.subplots(figsize=figsize)\n elif size is not None:\n if ax is not None:\n raise ValueError(\"cannot provide both `size` and `ax` arguments\")\n if aspect is None:\n width, height = mpl.rcParams[\"figure.figsize\"]\n aspect = width / height\n figsize = (size * aspect, size)\n _, ax = plt.subplots(figsize=figsize)\n elif aspect is not None:\n raise ValueError(\"cannot provide `aspect` argument without `size`\")\n\n if ax is None:\n ax = plt.gca()\n\n return ax\n\n\ndef label_from_attrs(da, extra=\"\"):\n \"\"\" Makes informative labels if variable metadata (attrs) follows\n CF conventions. \"\"\"\n\n if da.attrs.get(\"long_name\"):\n name = da.attrs[\"long_name\"]\n elif da.attrs.get(\"standard_name\"):\n name = da.attrs[\"standard_name\"]\n elif da.name is not None:\n name = da.name\n else:\n name = \"\"\n\n if da.attrs.get(\"units\"):\n units = \" [{}]\".format(da.attrs[\"units\"])\n else:\n units = \"\"\n\n return \"\\n\".join(textwrap.wrap(name + extra + units, 30))\n\n\ndef _interval_to_mid_points(array):\n \"\"\"\n Helper function which returns an array\n with the Intervals' mid points.\n \"\"\"\n\n return np.array([x.mid for x in array])\n\n\ndef _interval_to_bound_points(array):\n \"\"\"\n Helper function which returns an array\n with the Intervals' boundaries.\n \"\"\"\n\n array_boundaries = np.array([x.left for x in array])\n array_boundaries = np.concatenate((array_boundaries, np.array([array[-1].right])))\n\n return array_boundaries\n\n\ndef _interval_to_double_bound_points(xarray, yarray):\n \"\"\"\n Helper function to deal with a xarray consisting of pd.Intervals. Each\n interval is replaced with both boundaries. I.e. the length of xarray\n doubles. yarray is modified so it matches the new shape of xarray.\n \"\"\"\n\n xarray1 = np.array([x.left for x in xarray])\n xarray2 = np.array([x.right for x in xarray])\n\n xarray = list(itertools.chain.from_iterable(zip(xarray1, xarray2)))\n yarray = list(itertools.chain.from_iterable(zip(yarray, yarray)))\n\n return xarray, yarray\n\n\ndef _resolve_intervals_2dplot(val, func_name):\n \"\"\"\n Helper function to replace the values of a coordinate array containing\n pd.Interval with their mid-points or - for pcolormesh - boundaries which\n increases length by 1.\n \"\"\"\n label_extra = \"\"\n if _valid_other_type(val, [pd.Interval]):\n if func_name == \"pcolormesh\":\n val = _interval_to_bound_points(val)\n else:\n val = _interval_to_mid_points(val)\n label_extra = \"_center\"\n\n return val, label_extra\n\n\ndef _valid_other_type(x, types):\n \"\"\"\n Do all elements of x have a type from types?\n \"\"\"\n return all(any(isinstance(el, t) for t in types) for el in np.ravel(x))\n\n\ndef _valid_numpy_subdtype(x, numpy_types):\n \"\"\"\n Is any dtype from numpy_types superior to the dtype of x?\n \"\"\"\n # If any of the types given in numpy_types is understood as numpy.generic,\n # all possible x will be considered valid. This is probably unwanted.\n for t in numpy_types:\n assert not np.issubdtype(np.generic, t)\n\n return any(np.issubdtype(x.dtype, t) for t in numpy_types)\n\n\ndef _ensure_plottable(*args):\n \"\"\"\n Raise exception if there is anything in args that can't be plotted on an\n axis by matplotlib.\n \"\"\"\n numpy_types = [np.floating, np.integer, np.timedelta64, np.datetime64]\n other_types = [datetime]\n try:\n import cftime\n\n cftime_datetime = [cftime.datetime]\n except ImportError:\n cftime_datetime = []\n other_types = other_types + cftime_datetime\n for x in args:\n if not (\n _valid_numpy_subdtype(np.array(x), numpy_types)\n or _valid_other_type(np.array(x), other_types)\n ):\n raise TypeError(\n \"Plotting requires coordinates to be numeric \"\n \"or dates of type np.datetime64, \"\n \"datetime.datetime, cftime.datetime or \"\n \"pd.Interval.\"\n )\n if (\n _valid_other_type(np.array(x), cftime_datetime)\n and not nc_time_axis_available\n ):\n raise ImportError(\n \"Plotting of arrays of cftime.datetime \"\n \"objects or arrays indexed by \"\n \"cftime.datetime objects requires the \"\n \"optional `nc-time-axis` (v1.2.0 or later) \"\n \"package.\"\n )\n\n\ndef _is_numeric(arr):\n numpy_types = [np.floating, np.integer]\n return _valid_numpy_subdtype(arr, numpy_types)\n\n\ndef _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params):\n plt = import_matplotlib_pyplot()\n cbar_kwargs.setdefault(\"extend\", cmap_params[\"extend\"])\n if cbar_ax is None:\n cbar_kwargs.setdefault(\"ax\", ax)\n else:\n cbar_kwargs.setdefault(\"cax\", cbar_ax)\n\n cbar = plt.colorbar(primitive, **cbar_kwargs)\n\n return cbar\n\n\ndef _rescale_imshow_rgb(darray, vmin, vmax, robust):\n assert robust or vmin is not None or vmax is not None\n # TODO: remove when min numpy version is bumped to 1.13\n # There's a cyclic dependency via DataArray, so we can't import from\n # xarray.ufuncs in global scope.\n from xarray.ufuncs import maximum, minimum\n\n # Calculate vmin and vmax automatically for `robust=True`\n if robust:\n if vmax is None:\n vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE)\n if vmin is None:\n vmin = np.nanpercentile(darray, ROBUST_PERCENTILE)\n # If not robust and one bound is None, calculate the default other bound\n # and check that an interval between them exists.\n elif vmax is None:\n vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1\n if vmax < vmin:\n raise ValueError(\n \"vmin=%r is less than the default vmax (%r) - you must supply \"\n \"a vmax > vmin in this case.\" % (vmin, vmax)\n )\n elif vmin is None:\n vmin = 0\n if vmin > vmax:\n raise ValueError(\n \"vmax=%r is less than the default vmin (0) - you must supply \"\n \"a vmin < vmax in this case.\" % vmax\n )\n # Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float\n # to avoid precision loss, integer over/underflow, etc with extreme inputs.\n # After scaling, downcast to 32-bit float. This substantially reduces\n # memory usage after we hand `darray` off to matplotlib.\n darray = ((darray.astype(\"f8\") - vmin) / (vmax - vmin)).astype(\"f4\")\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"xarray.ufuncs\", PendingDeprecationWarning)\n return minimum(maximum(darray, 0), 1)\n\n\ndef _update_axes(\n ax,\n xincrease,\n yincrease,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n):\n \"\"\"\n Update axes with provided parameters\n \"\"\"\n if xincrease is None:\n pass\n elif xincrease and ax.xaxis_inverted():\n ax.invert_xaxis()\n elif not xincrease and not ax.xaxis_inverted():\n ax.invert_xaxis()\n\n if yincrease is None:\n pass\n elif yincrease and ax.yaxis_inverted():\n ax.invert_yaxis()\n elif not yincrease and not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # The default xscale, yscale needs to be None.\n # If we set a scale it resets the axes formatters,\n # This means that set_xscale('linear') on a datetime axis\n # will remove the date labels. So only set the scale when explicitly\n # asked to. https://github.com/matplotlib/matplotlib/issues/8740\n if xscale is not None:\n ax.set_xscale(xscale)\n if yscale is not None:\n ax.set_yscale(yscale)\n\n if xticks is not None:\n ax.set_xticks(xticks)\n if yticks is not None:\n ax.set_yticks(yticks)\n\n if xlim is not None:\n ax.set_xlim(xlim)\n if ylim is not None:\n ax.set_ylim(ylim)\n\n\ndef _is_monotonic(coord, axis=0):\n \"\"\"\n >>> _is_monotonic(np.array([0, 1, 2]))\n True\n >>> _is_monotonic(np.array([2, 1, 0]))\n True\n >>> _is_monotonic(np.array([0, 2, 1]))\n False\n \"\"\"\n if coord.shape[axis] < 3:\n return True\n else:\n n = coord.shape[axis]\n delta_pos = coord.take(np.arange(1, n), axis=axis) >= coord.take(\n np.arange(0, n - 1), axis=axis\n )\n delta_neg = coord.take(np.arange(1, n), axis=axis) <= coord.take(\n np.arange(0, n - 1), axis=axis\n )\n return np.all(delta_pos) or np.all(delta_neg)\n\n\ndef _infer_interval_breaks(coord, axis=0, check_monotonic=False):\n \"\"\"\n >>> _infer_interval_breaks(np.arange(5))\n array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])\n >>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)\n array([[-0.5, 0.5, 1.5],\n [ 2.5, 3.5, 4.5]])\n \"\"\"\n coord = np.asarray(coord)\n\n if check_monotonic and not _is_monotonic(coord, axis=axis):\n raise ValueError(\n \"The input coordinate is not sorted in increasing \"\n \"order along axis %d. This can lead to unexpected \"\n \"results. Consider calling the `sortby` method on \"\n \"the input DataArray. To plot data with categorical \"\n \"axes, consider using the `heatmap` function from \"\n \"the `seaborn` statistical plotting library.\" % axis\n )\n\n deltas = 0.5 * np.diff(coord, axis=axis)\n if deltas.size == 0:\n deltas = np.array(0.0)\n first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)\n last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)\n trim_last = tuple(\n slice(None, -1) if n == axis else slice(None) for n in range(coord.ndim)\n )\n return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis)\n\n\ndef _process_cmap_cbar_kwargs(\n func,\n data,\n cmap=None,\n colors=None,\n cbar_kwargs: Union[Iterable[Tuple[str, Any]], Mapping[str, Any]] = None,\n levels=None,\n **kwargs\n):\n \"\"\"\n Parameters\n ==========\n func : plotting function\n data : ndarray,\n Data values\n\n Returns\n =======\n cmap_params\n\n cbar_kwargs\n \"\"\"\n cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs)\n\n if \"contour\" in func.__name__ and levels is None:\n levels = 7 # this is the matplotlib default\n\n # colors is mutually exclusive with cmap\n if cmap and colors:\n raise ValueError(\"Can't specify both cmap and colors.\")\n\n # colors is only valid when levels is supplied or the plot is of type\n # contour or contourf\n if colors and ((\"contour\" not in func.__name__) and (levels is None)):\n raise ValueError(\"Can only specify colors with contour or levels\")\n\n # we should not be getting a list of colors in cmap anymore\n # is there a better way to do this test?\n if isinstance(cmap, (list, tuple)):\n raise ValueError(\n \"Specifying a list of colors in cmap is deprecated. \"\n \"Use colors keyword instead.\"\n )\n\n cmap_kwargs = {\n \"plot_data\": data,\n \"levels\": levels,\n \"cmap\": colors if colors else cmap,\n \"filled\": func.__name__ != \"contour\",\n }\n\n cmap_args = getfullargspec(_determine_cmap_params).args\n cmap_kwargs.update((a, kwargs[a]) for a in cmap_args if a in kwargs)\n cmap_params = _determine_cmap_params(**cmap_kwargs)\n\n return cmap_params, cbar_kwargs\n"},{"id":2933,"name":"why-xarray.rst","nodeType":"TextFile","path":"doc","text":"Overview: Why xarray?\n=====================\n\nXarray introduces labels in the form of dimensions, coordinates and attributes on top of\nraw NumPy-like multidimensional arrays, which allows for a more intuitive, more concise,\nand less error-prone developer experience.\n\nWhat labels enable\n------------------\n\nMulti-dimensional (a.k.a. N-dimensional, ND) arrays (sometimes called\n\"tensors\") are an essential part of computational science.\nThey are encountered in a wide range of fields, including physics, astronomy,\ngeoscience, bioinformatics, engineering, finance, and deep learning.\nIn Python, NumPy_ provides the fundamental data structure and API for\nworking with raw ND arrays.\nHowever, real-world datasets are usually more than just raw numbers;\nthey have labels which encode information about how the array values map\nto locations in space, time, etc.\n\nXarray doesn't just keep track of labels on arrays -- it uses them to provide a\npowerful and concise interface. For example:\n\n- Apply operations over dimensions by name: ``x.sum('time')``.\n- Select values by label (or logical location) instead of integer location:\n ``x.loc['2014-01-01']`` or ``x.sel(time='2014-01-01')``.\n- Mathematical operations (e.g., ``x - y``) vectorize across multiple\n dimensions (array broadcasting) based on dimension names, not shape.\n- Easily use the `split-apply-combine `_\n paradigm with ``groupby``:\n ``x.groupby('time.dayofyear').mean()``.\n- Database-like alignment based on coordinate labels that smoothly\n handles missing values: ``x, y = xr.align(x, y, join='outer')``.\n- Keep track of arbitrary metadata in the form of a Python dictionary:\n ``x.attrs``.\n\nThe N-dimensional nature of xarray's data structures makes it suitable for dealing\nwith multi-dimensional scientific data, and its use of dimension names\ninstead of axis labels (``dim='time'`` instead of ``axis=0``) makes such\narrays much more manageable than the raw numpy ndarray: with xarray, you don't\nneed to keep track of the order of an array's dimensions or insert dummy dimensions of\nsize 1 to align arrays (e.g., using ``np.newaxis``).\n\nThe immediate payoff of using xarray is that you'll write less code. The\nlong-term payoff is that you'll understand what you were thinking when you come\nback to look at it weeks or months later.\n\nCore data structures\n--------------------\n\nxarray has two core data structures, which build upon and extend the core\nstrengths of NumPy_ and pandas_. Both data structures are fundamentally N-dimensional:\n\n- :py:class:`~xarray.DataArray` is our implementation of a labeled, N-dimensional\n array. It is an N-D generalization of a :py:class:`pandas.Series`. The name\n ``DataArray`` itself is borrowed from Fernando Perez's datarray_ project,\n which prototyped a similar data structure.\n- :py:class:`~xarray.Dataset` is a multi-dimensional, in-memory array database.\n It is a dict-like container of ``DataArray`` objects aligned along any number of\n shared dimensions, and serves a similar purpose in xarray to the\n :py:class:`pandas.DataFrame`.\n\nThe value of attaching labels to numpy's :py:class:`numpy.ndarray` may be\nfairly obvious, but the dataset may need more motivation.\n\nThe power of the dataset over a plain dictionary is that, in addition to\npulling out arrays by name, it is possible to select or combine data along a\ndimension across all arrays simultaneously. Like a\n:py:class:`~pandas.DataFrame`, datasets facilitate array operations with\nheterogeneous data -- the difference is that the arrays in a dataset can have \nnot only different data types, but also different numbers of dimensions.\n\nThis data model is borrowed from the netCDF_ file format, which also provides\nxarray with a natural and portable serialization format. NetCDF is very popular\nin the geosciences, and there are existing libraries for reading and writing\nnetCDF in many programming languages, including Python.\n\nxarray distinguishes itself from many tools for working with netCDF data\nin-so-far as it provides data structures for in-memory analytics that both\nutilize and preserve labels. You only need to do the tedious work of adding\nmetadata once, not every time you save a file.\n\nGoals and aspirations\n---------------------\n\nXarray contributes domain-agnostic data-structures and tools for labeled\nmulti-dimensional arrays to Python's SciPy_ ecosystem for numerical computing.\nIn particular, xarray builds upon and integrates with NumPy_ and pandas_:\n\n- Our user-facing interfaces aim to be more explicit verisons of those found in\n NumPy/pandas.\n- Compatibility with the broader ecosystem is a major goal: it should be easy\n to get your data in and out.\n- We try to keep a tight focus on functionality and interfaces related to\n labeled data, and leverage other Python libraries for everything else, e.g.,\n NumPy/pandas for fast arrays/indexing (xarray itself contains no compiled\n code), Dask_ for parallel computing, matplotlib_ for plotting, etc.\n\nXarray is a collaborative and community driven project, run entirely on\nvolunteer effort (see :ref:`contributing`).\nOur target audience is anyone who needs N-dimensional labeled arrays in Python.\nOriginally, development was driven by the data analysis needs of physical\nscientists (especially geoscientists who already know and love\nnetCDF_), but it has become a much more broadly useful tool, and is still\nunder active development.\nSee our technical :ref:`roadmap` for more details, and feel free to reach out\nwith questions about whether xarray is the right tool for your needs.\n\n.. _datarray: https://github.com/fperez/datarray\n.. _Dask: http://dask.org\n.. _matplotlib: http://matplotlib.org\n.. _netCDF: http://www.unidata.ucar.edu/software/netcdf\n.. _NumPy: http://www.numpy.org\n.. _pandas: http://pandas.pydata.org\n.. _SciPy: http://www.scipy.org\n"},{"id":2934,"name":"style.css","nodeType":"TextFile","path":"doc/_static","text":"@import url(\"theme.css\");\n\n.wy-side-nav-search>a img.logo,\n.wy-side-nav-search .wy-dropdown>a img.logo {\n width: 12rem\n}\n\n.wy-side-nav-search {\n background-color: #eee;\n}\n\n.wy-side-nav-search>div.version {\n display: none;\n}\n\n.wy-nav-top {\n background-color: #555;\n}\n\ntable.colwidths-given {\n table-layout: fixed;\n width: 100%;\n}\ntable.docutils td {\n white-space: unset;\n word-wrap: break-word;\n}\n"},{"fileName":"plot.py","filePath":"xarray/plot","id":2935,"nodeType":"File","text":"\"\"\"\nUse this module directly:\n import xarray.plot as xplt\n\nOr use the methods on a DataArray or Dataset:\n DataArray.plot._____\n Dataset.plot._____\n\"\"\"\nimport functools\n\nimport numpy as np\nimport pandas as pd\n\nfrom .facetgrid import _easy_facetgrid\nfrom .utils import (\n _add_colorbar,\n _ensure_plottable,\n _infer_interval_breaks,\n _infer_xy_labels,\n _interval_to_double_bound_points,\n _interval_to_mid_points,\n _process_cmap_cbar_kwargs,\n _rescale_imshow_rgb,\n _resolve_intervals_2dplot,\n _update_axes,\n _valid_other_type,\n get_axis,\n import_matplotlib_pyplot,\n label_from_attrs,\n)\n\n\ndef _infer_line_data(darray, x, y, hue):\n error_msg = \"must be either None or one of ({:s})\".format(\n \", \".join([repr(dd) for dd in darray.dims])\n )\n ndims = len(darray.dims)\n\n if x is not None and x not in darray.dims and x not in darray.coords:\n raise ValueError(\"x \" + error_msg)\n\n if y is not None and y not in darray.dims and y not in darray.coords:\n raise ValueError(\"y \" + error_msg)\n\n if x is not None and y is not None:\n raise ValueError(\"You cannot specify both x and y kwargs\" \"for line plots.\")\n\n if ndims == 1:\n huename = None\n hueplt = None\n huelabel = \"\"\n\n if x is not None:\n xplt = darray[x]\n yplt = darray\n\n elif y is not None:\n xplt = darray\n yplt = darray[y]\n\n else: # Both x & y are None\n dim = darray.dims[0]\n xplt = darray[dim]\n yplt = darray\n\n else:\n if x is None and y is None and hue is None:\n raise ValueError(\"For 2D inputs, please\" \"specify either hue, x or y.\")\n\n if y is None:\n xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue)\n xplt = darray[xname]\n if xplt.ndim > 1:\n if huename in darray.dims:\n otherindex = 1 if darray.dims.index(huename) == 0 else 0\n otherdim = darray.dims[otherindex]\n yplt = darray.transpose(otherdim, huename, transpose_coords=False)\n xplt = xplt.transpose(otherdim, huename, transpose_coords=False)\n else:\n raise ValueError(\n \"For 2D inputs, hue must be a dimension\"\n \" i.e. one of \" + repr(darray.dims)\n )\n\n else:\n xdim, = darray[xname].dims\n huedim, = darray[huename].dims\n yplt = darray.transpose(xdim, huedim)\n\n else:\n yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue)\n yplt = darray[yname]\n if yplt.ndim > 1:\n if huename in darray.dims:\n otherindex = 1 if darray.dims.index(huename) == 0 else 0\n otherdim = darray.dims[otherindex]\n xplt = darray.transpose(otherdim, huename, transpose_coords=False)\n else:\n raise ValueError(\n \"For 2D inputs, hue must be a dimension\"\n \" i.e. one of \" + repr(darray.dims)\n )\n\n else:\n ydim, = darray[yname].dims\n huedim, = darray[huename].dims\n xplt = darray.transpose(ydim, huedim)\n\n huelabel = label_from_attrs(darray[huename])\n hueplt = darray[huename]\n\n xlabel = label_from_attrs(xplt)\n ylabel = label_from_attrs(yplt)\n\n return xplt, yplt, hueplt, xlabel, ylabel, huelabel\n\n\ndef plot(\n darray,\n row=None,\n col=None,\n col_wrap=None,\n ax=None,\n hue=None,\n rtol=0.01,\n subplot_kws=None,\n **kwargs\n):\n \"\"\"\n Default plot of DataArray using matplotlib.pyplot.\n\n Calls xarray plotting function based on the dimensions of\n darray.squeeze()\n\n =============== ===========================\n Dimensions Plotting function\n --------------- ---------------------------\n 1 :py:func:`xarray.plot.line`\n 2 :py:func:`xarray.plot.pcolormesh`\n Anything else :py:func:`xarray.plot.hist`\n =============== ===========================\n\n Parameters\n ----------\n darray : DataArray\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n hue : string, optional\n If passed, make faceted line plots with hue on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n ax : matplotlib axes, optional\n If None, uses the current axis. Not applicable when using facets.\n rtol : number, optional\n Relative tolerance used to determine if the indexes\n are uniformly spaced. Usually a small positive number.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n **kwargs : optional\n Additional keyword arguments to matplotlib\n\n \"\"\"\n darray = darray.squeeze().compute()\n\n plot_dims = set(darray.dims)\n plot_dims.discard(row)\n plot_dims.discard(col)\n plot_dims.discard(hue)\n\n ndims = len(plot_dims)\n\n error_msg = (\n \"Only 1d and 2d plots are supported for facets in xarray. \"\n \"See the package `Seaborn` for more options.\"\n )\n\n if ndims in [1, 2]:\n if row or col:\n kwargs[\"row\"] = row\n kwargs[\"col\"] = col\n kwargs[\"col_wrap\"] = col_wrap\n kwargs[\"subplot_kws\"] = subplot_kws\n if ndims == 1:\n plotfunc = line\n kwargs[\"hue\"] = hue\n elif ndims == 2:\n if hue:\n plotfunc = line\n kwargs[\"hue\"] = hue\n else:\n plotfunc = pcolormesh\n else:\n if row or col or hue:\n raise ValueError(error_msg)\n plotfunc = hist\n\n kwargs[\"ax\"] = ax\n\n return plotfunc(darray, **kwargs)\n\n\n# This function signature should not change so that it can use\n# matplotlib format strings\ndef line(\n darray,\n *args,\n row=None,\n col=None,\n figsize=None,\n aspect=None,\n size=None,\n ax=None,\n hue=None,\n x=None,\n y=None,\n xincrease=None,\n yincrease=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n add_legend=True,\n _labels=True,\n **kwargs\n):\n \"\"\"\n Line plot of DataArray index against values\n\n Wraps :func:`matplotlib:matplotlib.pyplot.plot`\n\n Parameters\n ----------\n darray : DataArray\n Must be 1 dimensional\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n hue : string, optional\n Dimension or coordinate for which you want multiple lines plotted.\n If plotting against a 2D coordinate, ``hue`` must be a dimension.\n x, y : string, optional\n Dimensions or coordinates for x, y axis.\n Only one of these may be specified.\n The other coordinate plots values from the DataArray on which this\n plot method is called.\n xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional\n Specifies scaling for the x- and y-axes respectively\n xticks, yticks : Specify tick locations for x- and y-axes\n xlim, ylim : Specify x- and y-axes limits\n xincrease : None, True, or False, optional\n Should the values on the x axes be increasing from left to right?\n if None, use the default for the matplotlib function.\n yincrease : None, True, or False, optional\n Should the values on the y axes be increasing from top to bottom?\n if None, use the default for the matplotlib function.\n add_legend : boolean, optional\n Add legend with y axis coordinates (2D inputs only).\n *args, **kwargs : optional\n Additional arguments to matplotlib.pyplot.plot\n \"\"\"\n # Handle facetgrids first\n if row or col:\n allargs = locals().copy()\n allargs.update(allargs.pop(\"kwargs\"))\n allargs.pop(\"darray\")\n return _easy_facetgrid(darray, line, kind=\"line\", **allargs)\n\n ndims = len(darray.dims)\n if ndims > 2:\n raise ValueError(\n \"Line plots are for 1- or 2-dimensional DataArrays. \"\n \"Passed DataArray has {ndims} \"\n \"dimensions\".format(ndims=ndims)\n )\n\n # The allargs dict passed to _easy_facetgrid above contains args\n if args is ():\n args = kwargs.pop(\"args\", ())\n else:\n assert \"args\" not in kwargs\n\n ax = get_axis(figsize, size, aspect, ax)\n xplt, yplt, hueplt, xlabel, ylabel, hue_label = _infer_line_data(darray, x, y, hue)\n\n # Remove pd.Intervals if contained in xplt.values.\n if _valid_other_type(xplt.values, [pd.Interval]):\n # Is it a step plot? (see matplotlib.Axes.step)\n if kwargs.get(\"linestyle\", \"\").startswith(\"steps-\"):\n xplt_val, yplt_val = _interval_to_double_bound_points(\n xplt.values, yplt.values\n )\n # Remove steps-* to be sure that matplotlib is not confused\n kwargs[\"linestyle\"] = (\n kwargs[\"linestyle\"]\n .replace(\"steps-pre\", \"\")\n .replace(\"steps-post\", \"\")\n .replace(\"steps-mid\", \"\")\n )\n if kwargs[\"linestyle\"] == \"\":\n del kwargs[\"linestyle\"]\n else:\n xplt_val = _interval_to_mid_points(xplt.values)\n yplt_val = yplt.values\n xlabel += \"_center\"\n else:\n xplt_val = xplt.values\n yplt_val = yplt.values\n\n _ensure_plottable(xplt_val, yplt_val)\n\n primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs)\n\n if _labels:\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n ax.set_title(darray._title_for_slice())\n\n if darray.ndim == 2 and add_legend:\n ax.legend(handles=primitive, labels=list(hueplt.values), title=hue_label)\n\n # Rotate dates on xlabels\n # Do this without calling autofmt_xdate so that x-axes ticks\n # on other subplots (if any) are not deleted.\n # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots\n if np.issubdtype(xplt.dtype, np.datetime64):\n for xlabels in ax.get_xticklabels():\n xlabels.set_rotation(30)\n xlabels.set_ha(\"right\")\n\n _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)\n\n return primitive\n\n\ndef step(darray, *args, where=\"pre\", linestyle=None, ls=None, **kwargs):\n \"\"\"\n Step plot of DataArray index against values\n\n Similar to :func:`matplotlib:matplotlib.pyplot.step`\n\n Parameters\n ----------\n where : {'pre', 'post', 'mid'}, optional, default 'pre'\n Define where the steps should be placed:\n - 'pre': The y value is continued constantly to the left from\n every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has the\n value ``y[i]``.\n - 'post': The y value is continued constantly to the right from\n every *x* position, i.e. the interval ``[x[i], x[i+1])`` has the\n value ``y[i]``.\n - 'mid': Steps occur half-way between the *x* positions.\n Note that this parameter is ignored if the x coordinate consists of\n :py:func:`pandas.Interval` values, e.g. as a result of\n :py:func:`xarray.Dataset.groupby_bins`. In this case, the actual\n boundaries of the interval are used.\n\n *args, **kwargs : optional\n Additional arguments following :py:func:`xarray.plot.line`\n \"\"\"\n if where not in {\"pre\", \"post\", \"mid\"}:\n raise ValueError(\"'where' argument to step must be \" \"'pre', 'post' or 'mid'\")\n\n if ls is not None:\n if linestyle is None:\n linestyle = ls\n else:\n raise TypeError(\"ls and linestyle are mutually exclusive\")\n if linestyle is None:\n linestyle = \"\"\n linestyle = \"steps-\" + where + linestyle\n\n return line(darray, *args, linestyle=linestyle, **kwargs)\n\n\ndef hist(\n darray,\n figsize=None,\n size=None,\n aspect=None,\n ax=None,\n xincrease=None,\n yincrease=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n **kwargs\n):\n \"\"\"\n Histogram of DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.hist`\n\n Plots N dimensional arrays by first flattening the array.\n\n Parameters\n ----------\n darray : DataArray\n Can be any dimension\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n **kwargs : optional\n Additional keyword arguments to matplotlib.pyplot.hist\n\n \"\"\"\n ax = get_axis(figsize, size, aspect, ax)\n\n no_nan = np.ravel(darray.values)\n no_nan = no_nan[pd.notnull(no_nan)]\n\n primitive = ax.hist(no_nan, **kwargs)\n\n ax.set_title(\"Histogram\")\n ax.set_xlabel(label_from_attrs(darray))\n\n _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)\n\n return primitive\n\n\n# MUST run before any 2d plotting functions are defined since\n# _plot2d decorator adds them as methods here.\nclass _PlotMethods:\n \"\"\"\n Enables use of xarray.plot functions as attributes on a DataArray.\n For example, DataArray.plot.imshow\n \"\"\"\n\n __slots__ = (\"_da\",)\n\n def __init__(self, darray):\n self._da = darray\n\n def __call__(self, **kwargs):\n return plot(self._da, **kwargs)\n\n @functools.wraps(hist)\n def hist(self, ax=None, **kwargs):\n return hist(self._da, ax=ax, **kwargs)\n\n @functools.wraps(line)\n def line(self, *args, **kwargs):\n return line(self._da, *args, **kwargs)\n\n @functools.wraps(step)\n def step(self, *args, **kwargs):\n return step(self._da, *args, **kwargs)\n\n\ndef _plot2d(plotfunc):\n \"\"\"\n Decorator for common 2d plotting logic\n\n Also adds the 2d plot method to class _PlotMethods\n \"\"\"\n commondoc = \"\"\"\n Parameters\n ----------\n darray : DataArray\n Must be 2 dimensional, unless creating faceted plots\n x : string, optional\n Coordinate for x axis. If None use darray.dims[1]\n y : string, optional\n Coordinate for y axis. If None use darray.dims[0]\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional\n Specifies scaling for the x- and y-axes respectively\n xticks, yticks : Specify tick locations for x- and y-axes\n xlim, ylim : Specify x- and y-axes limits\n xincrease : None, True, or False, optional\n Should the values on the x axes be increasing from left to right?\n if None, use the default for the matplotlib function.\n yincrease : None, True, or False, optional\n Should the values on the y axes be increasing from top to bottom?\n if None, use the default for the matplotlib function.\n add_colorbar : Boolean, optional\n Adds colorbar to axis\n add_labels : Boolean, optional\n Use xarray metadata to label axes\n norm : ``matplotlib.colors.Normalize`` instance, optional\n If the ``norm`` has vmin or vmax specified, the corresponding kwarg\n must be None.\n vmin, vmax : floats, optional\n Values to anchor the colormap, otherwise they are inferred from the\n data and other keyword arguments. When a diverging dataset is inferred,\n setting one of these values will fix the other by symmetry around\n ``center``. Setting both values prevents use of a diverging colormap.\n If discrete levels are provided as an explicit list, both of these\n values are ignored.\n cmap : matplotlib colormap name or object, optional\n The mapping from data values to color space. If not provided, this\n will be either be ``viridis`` (if the function infers a sequential\n dataset) or ``RdBu_r`` (if the function infers a diverging dataset).\n When `Seaborn` is installed, ``cmap`` may also be a `seaborn`\n color palette. If ``cmap`` is seaborn color palette and the plot type\n is not ``contour`` or ``contourf``, ``levels`` must also be specified.\n colors : discrete colors to plot, optional\n A single color or a list of colors. If the plot type is not ``contour``\n or ``contourf``, the ``levels`` argument is required.\n center : float, optional\n The value at which to center the colormap. Passing this value implies\n use of a diverging colormap. Setting it to ``False`` prevents use of a\n diverging colormap.\n robust : bool, optional\n If True and ``vmin`` or ``vmax`` are absent, the colormap range is\n computed with 2nd and 98th percentiles instead of the extreme values.\n extend : {'neither', 'both', 'min', 'max'}, optional\n How to draw arrows extending the colorbar beyond its limits. If not\n provided, extend is inferred from vmin, vmax and the data limits.\n levels : int or list-like object, optional\n Split the colormap (cmap) into discrete color intervals. If an integer\n is provided, \"nice\" levels are chosen based on the data range: this can\n imply that the final number of levels is not exactly the expected one.\n Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to\n setting ``levels=np.linspace(vmin, vmax, N)``.\n infer_intervals : bool, optional\n Only applies to pcolormesh. If True, the coordinate intervals are\n passed to pcolormesh. If False, the original coordinates are used\n (this can be useful for certain map projections). The default is to\n always infer intervals, unless the mesh is irregular and plotted on\n a map projection.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n cbar_ax : matplotlib Axes, optional\n Axes in which to draw the colorbar.\n cbar_kwargs : dict, optional\n Dictionary of keyword arguments to pass to the colorbar.\n **kwargs : optional\n Additional arguments to wrapped matplotlib function\n\n Returns\n -------\n artist :\n The same type of primitive artist that the wrapped matplotlib\n function returns\n \"\"\"\n\n # Build on the original docstring\n plotfunc.__doc__ = \"%s\\n%s\" % (plotfunc.__doc__, commondoc)\n\n @functools.wraps(plotfunc)\n def newplotfunc(\n darray,\n x=None,\n y=None,\n figsize=None,\n size=None,\n aspect=None,\n ax=None,\n row=None,\n col=None,\n col_wrap=None,\n xincrease=True,\n yincrease=True,\n add_colorbar=None,\n add_labels=True,\n vmin=None,\n vmax=None,\n cmap=None,\n center=None,\n robust=False,\n extend=None,\n levels=None,\n infer_intervals=None,\n colors=None,\n subplot_kws=None,\n cbar_ax=None,\n cbar_kwargs=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n norm=None,\n **kwargs\n ):\n # All 2d plots in xarray share this function signature.\n # Method signature below should be consistent.\n\n # Decide on a default for the colorbar before facetgrids\n if add_colorbar is None:\n add_colorbar = plotfunc.__name__ != \"contour\"\n imshow_rgb = plotfunc.__name__ == \"imshow\" and darray.ndim == (\n 3 + (row is not None) + (col is not None)\n )\n if imshow_rgb:\n # Don't add a colorbar when showing an image with explicit colors\n add_colorbar = False\n # Matplotlib does not support normalising RGB data, so do it here.\n # See eg. https://github.com/matplotlib/matplotlib/pull/10220\n if robust or vmax is not None or vmin is not None:\n darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)\n vmin, vmax, robust = None, None, False\n\n # Handle facetgrids first\n if row or col:\n allargs = locals().copy()\n del allargs[\"darray\"]\n del allargs[\"imshow_rgb\"]\n allargs.update(allargs.pop(\"kwargs\"))\n # Need the decorated plotting function\n allargs[\"plotfunc\"] = globals()[plotfunc.__name__]\n return _easy_facetgrid(darray, kind=\"dataarray\", **allargs)\n\n plt = import_matplotlib_pyplot()\n\n rgb = kwargs.pop(\"rgb\", None)\n if rgb is not None and plotfunc.__name__ != \"imshow\":\n raise ValueError('The \"rgb\" keyword is only valid for imshow()')\n elif rgb is not None and not imshow_rgb:\n raise ValueError(\n 'The \"rgb\" keyword is only valid for imshow()'\n \"with a three-dimensional array (per facet)\"\n )\n\n xlab, ylab = _infer_xy_labels(\n darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb\n )\n\n # better to pass the ndarrays directly to plotting functions\n xval = darray[xlab].values\n yval = darray[ylab].values\n\n # check if we need to broadcast one dimension\n if xval.ndim < yval.ndim:\n xval = np.broadcast_to(xval, yval.shape)\n\n if yval.ndim < xval.ndim:\n yval = np.broadcast_to(yval, xval.shape)\n\n # May need to transpose for correct x, y labels\n # xlab may be the name of a coord, we have to check for dim names\n if imshow_rgb:\n # For RGB[A] images, matplotlib requires the color dimension\n # to be last. In Xarray the order should be unimportant, so\n # we transpose to (y, x, color) to make this work.\n yx_dims = (ylab, xlab)\n dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)\n if dims != darray.dims:\n darray = darray.transpose(*dims, transpose_coords=True)\n elif darray[xlab].dims[-1] == darray.dims[0]:\n darray = darray.transpose(transpose_coords=True)\n\n # Pass the data as a masked ndarray too\n zval = darray.to_masked_array(copy=False)\n\n # Replace pd.Intervals if contained in xval or yval.\n xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)\n yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)\n\n _ensure_plottable(xplt, yplt)\n\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n plotfunc, zval.data, **locals()\n )\n\n if \"contour\" in plotfunc.__name__:\n # extend is a keyword argument only for contour and contourf, but\n # passing it to the colorbar is sufficient for imshow and\n # pcolormesh\n kwargs[\"extend\"] = cmap_params[\"extend\"]\n kwargs[\"levels\"] = cmap_params[\"levels\"]\n # if colors == a single color, matplotlib draws dashed negative\n # contours. we lose this feature if we pass cmap and not colors\n if isinstance(colors, str):\n cmap_params[\"cmap\"] = None\n kwargs[\"colors\"] = colors\n\n if \"pcolormesh\" == plotfunc.__name__:\n kwargs[\"infer_intervals\"] = infer_intervals\n\n if \"imshow\" == plotfunc.__name__ and isinstance(aspect, str):\n # forbid usage of mpl strings\n raise ValueError(\n \"plt.imshow's `aspect` kwarg is not available \" \"in xarray\"\n )\n\n ax = get_axis(figsize, size, aspect, ax)\n primitive = plotfunc(\n xplt,\n yplt,\n zval,\n ax=ax,\n cmap=cmap_params[\"cmap\"],\n vmin=cmap_params[\"vmin\"],\n vmax=cmap_params[\"vmax\"],\n norm=cmap_params[\"norm\"],\n **kwargs\n )\n\n # Label the plot with metadata\n if add_labels:\n ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))\n ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))\n ax.set_title(darray._title_for_slice())\n\n if add_colorbar:\n if add_labels and \"label\" not in cbar_kwargs:\n cbar_kwargs[\"label\"] = label_from_attrs(darray)\n cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params)\n elif cbar_ax is not None or cbar_kwargs:\n # inform the user about keywords which aren't used\n raise ValueError(\n \"cbar_ax and cbar_kwargs can't be used with \" \"add_colorbar=False.\"\n )\n\n # origin kwarg overrides yincrease\n if \"origin\" in kwargs:\n yincrease = None\n\n _update_axes(\n ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim\n )\n\n # Rotate dates on xlabels\n # Do this without calling autofmt_xdate so that x-axes ticks\n # on other subplots (if any) are not deleted.\n # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots\n if np.issubdtype(xplt.dtype, np.datetime64):\n for xlabels in ax.get_xticklabels():\n xlabels.set_rotation(30)\n xlabels.set_ha(\"right\")\n\n return primitive\n\n # For use as DataArray.plot.plotmethod\n @functools.wraps(newplotfunc)\n def plotmethod(\n _PlotMethods_obj,\n x=None,\n y=None,\n figsize=None,\n size=None,\n aspect=None,\n ax=None,\n row=None,\n col=None,\n col_wrap=None,\n xincrease=True,\n yincrease=True,\n add_colorbar=None,\n add_labels=True,\n vmin=None,\n vmax=None,\n cmap=None,\n colors=None,\n center=None,\n robust=False,\n extend=None,\n levels=None,\n infer_intervals=None,\n subplot_kws=None,\n cbar_ax=None,\n cbar_kwargs=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n norm=None,\n **kwargs\n ):\n \"\"\"\n The method should have the same signature as the function.\n\n This just makes the method work on Plotmethods objects,\n and passes all the other arguments straight through.\n \"\"\"\n allargs = locals()\n allargs[\"darray\"] = _PlotMethods_obj._da\n allargs.update(kwargs)\n for arg in [\"_PlotMethods_obj\", \"newplotfunc\", \"kwargs\"]:\n del allargs[arg]\n return newplotfunc(**allargs)\n\n # Add to class _PlotMethods\n setattr(_PlotMethods, plotmethod.__name__, plotmethod)\n\n return newplotfunc\n\n\n@_plot2d\ndef imshow(x, y, z, ax, **kwargs):\n \"\"\"\n Image plot of 2d DataArray using matplotlib.pyplot\n\n Wraps :func:`matplotlib:matplotlib.pyplot.imshow`\n\n While other plot methods require the DataArray to be strictly\n two-dimensional, ``imshow`` also accepts a 3D array where some\n dimension can be interpreted as RGB or RGBA color channels and\n allows this dimension to be specified via the kwarg ``rgb=``.\n\n Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBA\n data, by applying a single scaling factor and offset to all bands.\n Passing ``robust=True`` infers ``vmin`` and ``vmax``\n :ref:`in the usual way `.\n\n .. note::\n This function needs uniformly spaced coordinates to\n properly label the axes. Call DataArray.plot() to check.\n\n The pixels are centered on the coordinates values. Ie, if the coordinate\n value is 3.2 then the pixels for those coordinates will be centered on 3.2.\n \"\"\"\n\n if x.ndim != 1 or y.ndim != 1:\n raise ValueError(\n \"imshow requires 1D coordinates, try using \" \"pcolormesh or contour(f)\"\n )\n\n # Centering the pixels- Assumes uniform spacing\n try:\n xstep = (x[1] - x[0]) / 2.0\n except IndexError:\n # Arbitrary default value, similar to matplotlib behaviour\n xstep = 0.1\n try:\n ystep = (y[1] - y[0]) / 2.0\n except IndexError:\n ystep = 0.1\n left, right = x[0] - xstep, x[-1] + xstep\n bottom, top = y[-1] + ystep, y[0] - ystep\n\n defaults = {\"origin\": \"upper\", \"interpolation\": \"nearest\"}\n\n if not hasattr(ax, \"projection\"):\n # not for cartopy geoaxes\n defaults[\"aspect\"] = \"auto\"\n\n # Allow user to override these defaults\n defaults.update(kwargs)\n\n if defaults[\"origin\"] == \"upper\":\n defaults[\"extent\"] = [left, right, bottom, top]\n else:\n defaults[\"extent\"] = [left, right, top, bottom]\n\n if z.ndim == 3:\n # matplotlib imshow uses black for missing data, but Xarray makes\n # missing data transparent. We therefore add an alpha channel if\n # there isn't one, and set it to transparent where data is masked.\n if z.shape[-1] == 3:\n alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype)\n if np.issubdtype(z.dtype, np.integer):\n alpha *= 255\n z = np.ma.concatenate((z, alpha), axis=2)\n else:\n z = z.copy()\n z[np.any(z.mask, axis=-1), -1] = 0\n\n primitive = ax.imshow(z, **defaults)\n\n return primitive\n\n\n@_plot2d\ndef contour(x, y, z, ax, **kwargs):\n \"\"\"\n Contour plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.contour`\n \"\"\"\n primitive = ax.contour(x, y, z, **kwargs)\n return primitive\n\n\n@_plot2d\ndef contourf(x, y, z, ax, **kwargs):\n \"\"\"\n Filled contour plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.contourf`\n \"\"\"\n primitive = ax.contourf(x, y, z, **kwargs)\n return primitive\n\n\n@_plot2d\ndef pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs):\n \"\"\"\n Pseudocolor plot of 2d DataArray\n\n Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`\n \"\"\"\n\n # decide on a default for infer_intervals (GH781)\n x = np.asarray(x)\n if infer_intervals is None:\n if hasattr(ax, \"projection\"):\n if len(x.shape) == 1:\n infer_intervals = True\n else:\n infer_intervals = False\n else:\n infer_intervals = True\n\n if infer_intervals and (\n (np.shape(x)[0] == np.shape(z)[1])\n or ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1]))\n ):\n if len(x.shape) == 1:\n x = _infer_interval_breaks(x, check_monotonic=True)\n else:\n # we have to infer the intervals on both axes\n x = _infer_interval_breaks(x, axis=1)\n x = _infer_interval_breaks(x, axis=0)\n\n if infer_intervals and (np.shape(y)[0] == np.shape(z)[0]):\n if len(y.shape) == 1:\n y = _infer_interval_breaks(y, check_monotonic=True)\n else:\n # we have to infer the intervals on both axes\n y = _infer_interval_breaks(y, axis=1)\n y = _infer_interval_breaks(y, axis=0)\n\n primitive = ax.pcolormesh(x, y, z, **kwargs)\n\n # by default, pcolormesh picks \"round\" values for bounds\n # this results in ugly looking plots with lots of surrounding whitespace\n if not hasattr(ax, \"projection\") and x.ndim == 1 and y.ndim == 1:\n # not a cartopy geoaxis\n ax.set_xlim(x[0], x[-1])\n ax.set_ylim(y[0], y[-1])\n\n return primitive\n"},{"col":0,"comment":"null","endLoc":584,"header":"def _rescale_imshow_rgb(darray, vmin, vmax, robust)","id":2936,"name":"_rescale_imshow_rgb","nodeType":"Function","startLoc":548,"text":"def _rescale_imshow_rgb(darray, vmin, vmax, robust):\n assert robust or vmin is not None or vmax is not None\n # TODO: remove when min numpy version is bumped to 1.13\n # There's a cyclic dependency via DataArray, so we can't import from\n # xarray.ufuncs in global scope.\n from xarray.ufuncs import maximum, minimum\n\n # Calculate vmin and vmax automatically for `robust=True`\n if robust:\n if vmax is None:\n vmax = np.nanpercentile(darray, 100 - ROBUST_PERCENTILE)\n if vmin is None:\n vmin = np.nanpercentile(darray, ROBUST_PERCENTILE)\n # If not robust and one bound is None, calculate the default other bound\n # and check that an interval between them exists.\n elif vmax is None:\n vmax = 255 if np.issubdtype(darray.dtype, np.integer) else 1\n if vmax < vmin:\n raise ValueError(\n \"vmin=%r is less than the default vmax (%r) - you must supply \"\n \"a vmax > vmin in this case.\" % (vmin, vmax)\n )\n elif vmin is None:\n vmin = 0\n if vmin > vmax:\n raise ValueError(\n \"vmax=%r is less than the default vmin (0) - you must supply \"\n \"a vmin < vmax in this case.\" % vmax\n )\n # Scale interval [vmin .. vmax] to [0 .. 1], with darray as 64-bit float\n # to avoid precision loss, integer over/underflow, etc with extreme inputs.\n # After scaling, downcast to 32-bit float. This substantially reduces\n # memory usage after we hand `darray` off to matplotlib.\n darray = ((darray.astype(\"f8\") - vmin) / (vmax - vmin)).astype(\"f4\")\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"xarray.ufuncs\", PendingDeprecationWarning)\n return minimum(maximum(darray, 0), 1)"},{"col":0,"comment":"import seaborn and handle deprecation of apionly module","endLoc":41,"header":"def import_seaborn()","id":2937,"name":"import_seaborn","nodeType":"Function","startLoc":24,"text":"def import_seaborn():\n \"\"\"import seaborn and handle deprecation of apionly module\"\"\"\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n try:\n import seaborn.apionly as sns\n\n if (\n w\n and issubclass(w[-1].category, UserWarning)\n and (\"seaborn.apionly module\" in str(w[-1].message))\n ):\n raise ImportError\n except ImportError:\n import seaborn as sns\n finally:\n warnings.resetwarnings()\n return sns"},{"col":4,"comment":"null","endLoc":132,"header":"def __getitem__(self, key)","id":2938,"name":"__getitem__","nodeType":"Function","startLoc":129,"text":"def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.OUTER, self._getitem\n )"},{"attributeType":"null","col":8,"comment":"null","endLoc":37,"id":2939,"name":"_shape","nodeType":"Attribute","startLoc":37,"text":"self._shape"},{"attributeType":"null","col":4,"comment":"null","endLoc":588,"id":2940,"name":"__slots__","nodeType":"Attribute","startLoc":588,"text":"__slots__"},{"id":2941,"name":"advanced_selection_interpolation.svg","nodeType":"TextFile","path":"doc/_static","text":"\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n image/svg+xml\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n y\n x\n \n \n \n \n z\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n y\n x\n \n \n \n \n z\n \n \n \n \n \n \n \n \n Advanced indexing\n Advanced interpolation\n \n \n \n\n"},{"attributeType":"null","col":16,"comment":"null","endLoc":3,"id":2942,"name":"np","nodeType":"Attribute","startLoc":3,"text":"np"},{"attributeType":"None","col":4,"comment":"null","endLoc":14,"id":2943,"name":"bottleneck","nodeType":"Attribute","startLoc":14,"text":"bottleneck"},{"attributeType":"null","col":0,"comment":"null","endLoc":17,"id":2944,"name":"_ROLLING_REDUCE_DOCSTRING_TEMPLATE","nodeType":"Attribute","startLoc":17,"text":"_ROLLING_REDUCE_DOCSTRING_TEMPLATE"},{"col":0,"comment":"","endLoc":1,"header":"rolling.py#","id":2945,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import bottleneck\nexcept ImportError:\n # use numpy methods instead\n bottleneck = None\n\n_ROLLING_REDUCE_DOCSTRING_TEMPLATE = \"\"\"\\\nReduce this object's data windows by applying `{name}` along its dimension.\n\nParameters\n----------\n**kwargs : dict\n Additional keyword arguments passed on to `{name}`.\n\nReturns\n-------\nreduced : same type as caller\n New object with `{name}` applied along its rolling dimnension.\n\"\"\"\n\ninject_coarsen_methods(DataArrayCoarsen)\n\ninject_coarsen_methods(DatasetCoarsen)"},{"col":0,"comment":"\n Helper function to replace the values of a coordinate array containing\n pd.Interval with their mid-points or - for pcolormesh - boundaries which\n increases length by 1.\n ","endLoc":470,"header":"def _resolve_intervals_2dplot(val, func_name)","id":2946,"name":"_resolve_intervals_2dplot","nodeType":"Function","startLoc":456,"text":"def _resolve_intervals_2dplot(val, func_name):\n \"\"\"\n Helper function to replace the values of a coordinate array containing\n pd.Interval with their mid-points or - for pcolormesh - boundaries which\n increases length by 1.\n \"\"\"\n label_extra = \"\"\n if _valid_other_type(val, [pd.Interval]):\n if func_name == \"pcolormesh\":\n val = _interval_to_bound_points(val)\n else:\n val = _interval_to_mid_points(val)\n label_extra = \"_center\"\n\n return val, label_extra"},{"id":2947,"name":"dataset-diagram-logo.tex","nodeType":"TextFile","path":"doc/_static","text":"\\documentclass[class=minimal,border=0pt,convert={size=600,outext=.png}]{standalone}\n% \\documentclass[class=minimal,border=0pt]{standalone}\n\\usepackage[scaled]{helvet}\n\\renewcommand*\\familydefault{\\sfdefault}\n\n% ===========================================================================\n% The code below (used to define the \\tikzcuboid command) is copied,\n% unmodified, from a tex.stackexchange.com answer by the user \"Tom Bombadil\":\n% http://tex.stackexchange.com/a/29882/8335\n%\n% It is licensed under the Creative Commons Attribution-ShareAlike 3.0\n% Unported license: http://creativecommons.org/licenses/by-sa/3.0/\n% ===========================================================================\n\n\\usepackage[usenames,dvipsnames]{color}\n\\usepackage{tikz}\n\\usepackage{keyval}\n\\usepackage{ifthen}\n\n%====================================\n%emphasize vertices --> switch and emph style (e.g. thick,black)\n%====================================\n\\makeatletter\n% Standard Values for Parameters\n\\newcommand{\\tikzcuboid@shiftx}{0}\n\\newcommand{\\tikzcuboid@shifty}{0}\n\\newcommand{\\tikzcuboid@dimx}{3}\n\\newcommand{\\tikzcuboid@dimy}{3}\n\\newcommand{\\tikzcuboid@dimz}{3}\n\\newcommand{\\tikzcuboid@scale}{1}\n\\newcommand{\\tikzcuboid@densityx}{1}\n\\newcommand{\\tikzcuboid@densityy}{1}\n\\newcommand{\\tikzcuboid@densityz}{1}\n\\newcommand{\\tikzcuboid@rotation}{0}\n\\newcommand{\\tikzcuboid@anglex}{0}\n\\newcommand{\\tikzcuboid@angley}{90}\n\\newcommand{\\tikzcuboid@anglez}{225}\n\\newcommand{\\tikzcuboid@scalex}{1}\n\\newcommand{\\tikzcuboid@scaley}{1}\n\\newcommand{\\tikzcuboid@scalez}{sqrt(0.5)}\n\\newcommand{\\tikzcuboid@linefront}{black}\n\\newcommand{\\tikzcuboid@linetop}{black}\n\\newcommand{\\tikzcuboid@lineright}{black}\n\\newcommand{\\tikzcuboid@fillfront}{white}\n\\newcommand{\\tikzcuboid@filltop}{white}\n\\newcommand{\\tikzcuboid@fillright}{white}\n\\newcommand{\\tikzcuboid@shaded}{N}\n\\newcommand{\\tikzcuboid@shadecolor}{black}\n\\newcommand{\\tikzcuboid@shadeperc}{25}\n\\newcommand{\\tikzcuboid@emphedge}{N}\n\\newcommand{\\tikzcuboid@emphstyle}{thick}\n\n% Definition of Keys\n\\define@key{tikzcuboid}{shiftx}[\\tikzcuboid@shiftx]{\\renewcommand{\\tikzcuboid@shiftx}{#1}}\n\\define@key{tikzcuboid}{shifty}[\\tikzcuboid@shifty]{\\renewcommand{\\tikzcuboid@shifty}{#1}}\n\\define@key{tikzcuboid}{dimx}[\\tikzcuboid@dimx]{\\renewcommand{\\tikzcuboid@dimx}{#1}}\n\\define@key{tikzcuboid}{dimy}[\\tikzcuboid@dimy]{\\renewcommand{\\tikzcuboid@dimy}{#1}}\n\\define@key{tikzcuboid}{dimz}[\\tikzcuboid@dimz]{\\renewcommand{\\tikzcuboid@dimz}{#1}}\n\\define@key{tikzcuboid}{scale}[\\tikzcuboid@scale]{\\renewcommand{\\tikzcuboid@scale}{#1}}\n\\define@key{tikzcuboid}{densityx}[\\tikzcuboid@densityx]{\\renewcommand{\\tikzcuboid@densityx}{#1}}\n\\define@key{tikzcuboid}{densityy}[\\tikzcuboid@densityy]{\\renewcommand{\\tikzcuboid@densityy}{#1}}\n\\define@key{tikzcuboid}{densityz}[\\tikzcuboid@densityz]{\\renewcommand{\\tikzcuboid@densityz}{#1}}\n\\define@key{tikzcuboid}{rotation}[\\tikzcuboid@rotation]{\\renewcommand{\\tikzcuboid@rotation}{#1}}\n\\define@key{tikzcuboid}{anglex}[\\tikzcuboid@anglex]{\\renewcommand{\\tikzcuboid@anglex}{#1}}\n\\define@key{tikzcuboid}{angley}[\\tikzcuboid@angley]{\\renewcommand{\\tikzcuboid@angley}{#1}}\n\\define@key{tikzcuboid}{anglez}[\\tikzcuboid@anglez]{\\renewcommand{\\tikzcuboid@anglez}{#1}}\n\\define@key{tikzcuboid}{scalex}[\\tikzcuboid@scalex]{\\renewcommand{\\tikzcuboid@scalex}{#1}}\n\\define@key{tikzcuboid}{scaley}[\\tikzcuboid@scaley]{\\renewcommand{\\tikzcuboid@scaley}{#1}}\n\\define@key{tikzcuboid}{scalez}[\\tikzcuboid@scalez]{\\renewcommand{\\tikzcuboid@scalez}{#1}}\n\\define@key{tikzcuboid}{linefront}[\\tikzcuboid@linefront]{\\renewcommand{\\tikzcuboid@linefront}{#1}}\n\\define@key{tikzcuboid}{linetop}[\\tikzcuboid@linetop]{\\renewcommand{\\tikzcuboid@linetop}{#1}}\n\\define@key{tikzcuboid}{lineright}[\\tikzcuboid@lineright]{\\renewcommand{\\tikzcuboid@lineright}{#1}}\n\\define@key{tikzcuboid}{fillfront}[\\tikzcuboid@fillfront]{\\renewcommand{\\tikzcuboid@fillfront}{#1}}\n\\define@key{tikzcuboid}{filltop}[\\tikzcuboid@filltop]{\\renewcommand{\\tikzcuboid@filltop}{#1}}\n\\define@key{tikzcuboid}{fillright}[\\tikzcuboid@fillright]{\\renewcommand{\\tikzcuboid@fillright}{#1}}\n\\define@key{tikzcuboid}{shaded}[\\tikzcuboid@shaded]{\\renewcommand{\\tikzcuboid@shaded}{#1}}\n\\define@key{tikzcuboid}{shadecolor}[\\tikzcuboid@shadecolor]{\\renewcommand{\\tikzcuboid@shadecolor}{#1}}\n\\define@key{tikzcuboid}{shadeperc}[\\tikzcuboid@shadeperc]{\\renewcommand{\\tikzcuboid@shadeperc}{#1}}\n\\define@key{tikzcuboid}{emphedge}[\\tikzcuboid@emphedge]{\\renewcommand{\\tikzcuboid@emphedge}{#1}}\n\\define@key{tikzcuboid}{emphstyle}[\\tikzcuboid@emphstyle]{\\renewcommand{\\tikzcuboid@emphstyle}{#1}}\n% Commands\n\\newcommand{\\tikzcuboid}[1]{\n \\setkeys{tikzcuboid}{#1} % Process Keys passed to command\n \\pgfmathsetmacro{\\vectorxx}{\\tikzcuboid@scalex*cos(\\tikzcuboid@anglex)}\n \\pgfmathsetmacro{\\vectorxy}{\\tikzcuboid@scalex*sin(\\tikzcuboid@anglex)}\n \\pgfmathsetmacro{\\vectoryx}{\\tikzcuboid@scaley*cos(\\tikzcuboid@angley)}\n \\pgfmathsetmacro{\\vectoryy}{\\tikzcuboid@scaley*sin(\\tikzcuboid@angley)}\n \\pgfmathsetmacro{\\vectorzx}{\\tikzcuboid@scalez*cos(\\tikzcuboid@anglez)}\n \\pgfmathsetmacro{\\vectorzy}{\\tikzcuboid@scalez*sin(\\tikzcuboid@anglez)}\n \\begin{scope}[xshift=\\tikzcuboid@shiftx, yshift=\\tikzcuboid@shifty, scale=\\tikzcuboid@scale, rotate=\\tikzcuboid@rotation, x={(\\vectorxx,\\vectorxy)}, y={(\\vectoryx,\\vectoryy)}, z={(\\vectorzx,\\vectorzy)}]\n \\pgfmathsetmacro{\\steppingx}{1/\\tikzcuboid@densityx}\n \\pgfmathsetmacro{\\steppingy}{1/\\tikzcuboid@densityy}\n \\pgfmathsetmacro{\\steppingz}{1/\\tikzcuboid@densityz}\n \\newcommand{\\dimx}{\\tikzcuboid@dimx}\n \\newcommand{\\dimy}{\\tikzcuboid@dimy}\n \\newcommand{\\dimz}{\\tikzcuboid@dimz}\n \\pgfmathsetmacro{\\secondx}{2*\\steppingx}\n \\pgfmathsetmacro{\\secondy}{2*\\steppingy}\n \\pgfmathsetmacro{\\secondz}{2*\\steppingz}\n \\foreach \\x in {\\steppingx,\\secondx,...,\\dimx}\n { \\foreach \\y in {\\steppingy,\\secondy,...,\\dimy}\n { \\pgfmathsetmacro{\\lowx}{(\\x-\\steppingx)}\n \\pgfmathsetmacro{\\lowy}{(\\y-\\steppingy)}\n \\filldraw[fill=\\tikzcuboid@fillfront,draw=\\tikzcuboid@linefront] (\\lowx,\\lowy,\\dimz) -- (\\lowx,\\y,\\dimz) -- (\\x,\\y,\\dimz) -- (\\x,\\lowy,\\dimz) -- cycle;\n\n }\n }\n \\foreach \\x in {\\steppingx,\\secondx,...,\\dimx}\n { \\foreach \\z in {\\steppingz,\\secondz,...,\\dimz}\n { \\pgfmathsetmacro{\\lowx}{(\\x-\\steppingx)}\n \\pgfmathsetmacro{\\lowz}{(\\z-\\steppingz)}\n \\filldraw[fill=\\tikzcuboid@filltop,draw=\\tikzcuboid@linetop] (\\lowx,\\dimy,\\lowz) -- (\\lowx,\\dimy,\\z) -- (\\x,\\dimy,\\z) -- (\\x,\\dimy,\\lowz) -- cycle;\n }\n }\n \\foreach \\y in {\\steppingy,\\secondy,...,\\dimy}\n { \\foreach \\z in {\\steppingz,\\secondz,...,\\dimz}\n { \\pgfmathsetmacro{\\lowy}{(\\y-\\steppingy)}\n \\pgfmathsetmacro{\\lowz}{(\\z-\\steppingz)}\n \\filldraw[fill=\\tikzcuboid@fillright,draw=\\tikzcuboid@lineright] (\\dimx,\\lowy,\\lowz) -- (\\dimx,\\lowy,\\z) -- (\\dimx,\\y,\\z) -- (\\dimx,\\y,\\lowz) -- cycle;\n }\n }\n \\ifthenelse{\\equal{\\tikzcuboid@emphedge}{Y}}%\n {\\draw[\\tikzcuboid@emphstyle](0,\\dimy,0) -- (\\dimx,\\dimy,0) -- (\\dimx,\\dimy,\\dimz) -- (0,\\dimy,\\dimz) -- cycle;%\n \\draw[\\tikzcuboid@emphstyle] (0,0,\\dimz) -- (0,\\dimy,\\dimz) -- (\\dimx,\\dimy,\\dimz) -- (\\dimx,0,\\dimz) -- cycle;%\n \\draw[\\tikzcuboid@emphstyle](\\dimx,0,0) -- (\\dimx,\\dimy,0) -- (\\dimx,\\dimy,\\dimz) -- (\\dimx,0,\\dimz) -- cycle;%\n }%\n {}\n \\end{scope}\n}\n\n\\makeatother\n\n\\begin{document}\n\n\\begin{tikzpicture}\n \\tikzcuboid{%\n shiftx=21cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=2,%\n densityy=2,%\n densityz=2,%\n dimx=4,%\n dimy=3,%\n dimz=3,%\n linefront=purple!75!black,%\n linetop=purple!50!black,%\n lineright=purple!25!black,%\n fillfront=purple!25!white,%\n filltop=purple!50!white,%\n fillright=purple!75!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=21cm,%\n shifty=11.6cm,%\n scale=1.00,%\n rotation=0,%\n densityx=2,%\n densityy=2,%\n densityz=2,%\n dimx=4,%\n dimy=3,%\n dimz=3,%\n linefront=teal!75!black,%\n linetop=teal!50!black,%\n lineright=teal!25!black,%\n fillfront=teal!25!white,%\n filltop=teal!50!white,%\n fillright=teal!75!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=26.8cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=10000,%\n densityy=2,%\n densityz=2,%\n dimx=0,%\n dimy=3,%\n dimz=3,%\n linefront=orange!75!black,%\n linetop=orange!50!black,%\n lineright=orange!25!black,%\n fillfront=orange!25!white,%\n filltop=orange!50!white,%\n fillright=orange!100!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=28.6cm,%\n shifty=8cm,%\n scale=1.00,%\n rotation=0,%\n densityx=10000,%\n densityy=2,%\n densityz=2,%\n dimx=0,%\n dimy=3,%\n dimz=3,%\n linefront=purple!75!black,%\n linetop=purple!50!black,%\n lineright=purple!25!black,%\n fillfront=purple!25!white,%\n filltop=purple!50!white,%\n fillright=red!75!white,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n % \\tikzcuboid{%\n % shiftx=27.1cm,%\n % shifty=10.1cm,%\n % scale=1.00,%\n % rotation=0,%\n % densityx=100,%\n % densityy=2,%\n % densityz=100,%\n % dimx=0,%\n % dimy=3,%\n % dimz=0,%\n % emphedge=Y,%\n % emphstyle=ultra thick,\n % }\n % \\tikzcuboid{%\n % shiftx=27.1cm,%\n % shifty=10.1cm,%\n % scale=1.00,%\n % rotation=180,%\n % densityx=100,%\n % densityy=100,%\n % densityz=2,%\n % dimx=0,%\n % dimy=0,%\n % dimz=3,%\n % emphedge=Y,%\n % emphstyle=ultra thick,\n % }\n \\tikzcuboid{%\n shiftx=26.8cm,%\n shifty=11.4cm,%\n scale=1.00,%\n rotation=0,%\n densityx=100,%\n densityy=2,%\n densityz=100,%\n dimx=0,%\n dimy=3,%\n dimz=0,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n \\tikzcuboid{%\n shiftx=25.3cm,%\n shifty=12.9cm,%\n scale=1.00,%\n rotation=180,%\n densityx=100,%\n densityy=100,%\n densityz=2,%\n dimx=0,%\n dimy=0,%\n dimz=3,%\n emphedge=Y,%\n emphstyle=ultra thick,\n }\n % \\fill (27.1,10.1) circle[radius=2pt];\n \\node [font=\\fontsize{100}{100}\\fontfamily{phv}\\selectfont, anchor=west, text width=9cm, color=white!50!black] at (30,10.6) {\\textbf{\\emph{x}}};\n \\node [font=\\fontsize{100}{100}\\fontfamily{phv}\\selectfont, anchor=west, text width=9cm] at (32,10.25) {{array}};\n\\end{tikzpicture}\n\n\\end{document}\n"},{"col":0,"comment":"\n Helper function which returns an array\n with the Intervals' boundaries.\n ","endLoc":437,"header":"def _interval_to_bound_points(array)","id":2948,"name":"_interval_to_bound_points","nodeType":"Function","startLoc":428,"text":"def _interval_to_bound_points(array):\n \"\"\"\n Helper function which returns an array\n with the Intervals' boundaries.\n \"\"\"\n\n array_boundaries = np.array([x.left for x in array])\n array_boundaries = np.concatenate((array_boundaries, np.array([array[-1].right])))\n\n return array_boundaries"},{"id":2949,"name":"reshaping.rst","nodeType":"TextFile","path":"doc","text":".. _reshape:\n\n###############################\nReshaping and reorganizing data\n###############################\n\nThese methods allow you to reorganize\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\nReordering dimensions\n---------------------\n\nTo reorder dimensions on a :py:class:`~xarray.DataArray` or across all variables\non a :py:class:`~xarray.Dataset`, use :py:meth:`~xarray.DataArray.transpose`:\n\n.. ipython:: python\n\n ds = xr.Dataset({'foo': (('x', 'y', 'z'), [[[42]]]), 'bar': (('y', 'z'), [[24]])})\n ds.transpose('y', 'z', 'x')\n ds.transpose() # reverses all dimensions\n\nExpand and squeeze dimensions\n-----------------------------\n\nTo expand a :py:class:`~xarray.DataArray` or all\nvariables on a :py:class:`~xarray.Dataset` along a new dimension,\nuse :py:meth:`~xarray.DataArray.expand_dims`\n\n.. ipython:: python\n\n expanded = ds.expand_dims('w')\n expanded\n\nThis method attaches a new dimension with size 1 to all data variables.\n\nTo remove such a size-1 dimension from the :py:class:`~xarray.DataArray`\nor :py:class:`~xarray.Dataset`,\nuse :py:meth:`~xarray.DataArray.squeeze`\n\n.. ipython:: python\n\n expanded.squeeze('w')\n\nConverting between datasets and arrays\n--------------------------------------\n\nTo convert from a Dataset to a DataArray, use :py:meth:`~xarray.Dataset.to_array`:\n\n.. ipython:: python\n\n arr = ds.to_array()\n arr\n\nThis method broadcasts all data variables in the dataset against each other,\nthen concatenates them along a new dimension into a new array while preserving\ncoordinates.\n\nTo convert back from a DataArray to a Dataset, use\n:py:meth:`~xarray.DataArray.to_dataset`:\n\n.. ipython:: python\n\n arr.to_dataset(dim='variable')\n\nThe broadcasting behavior of ``to_array`` means that the resulting array\nincludes the union of data variable dimensions:\n\n.. ipython:: python\n\n ds2 = xr.Dataset({'a': 0, 'b': ('x', [3, 4, 5])})\n\n # the input dataset has 4 elements\n ds2\n\n # the resulting array has 6 elements\n ds2.to_array()\n\nOtherwise, the result could not be represented as an orthogonal array.\n\nIf you use ``to_dataset`` without supplying the ``dim`` argument, the DataArray will be converted into a Dataset of one variable:\n\n.. ipython:: python\n\n arr.to_dataset(name='combined')\n\n.. _reshape.stack:\n\nStack and unstack\n-----------------\n\nAs part of xarray's nascent support for :py:class:`pandas.MultiIndex`, we have\nimplemented :py:meth:`~xarray.DataArray.stack` and\n:py:meth:`~xarray.DataArray.unstack` method, for combining or splitting dimensions:\n\n.. ipython:: python\n\n array = xr.DataArray(np.random.randn(2, 3),\n coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n stacked = array.stack(z=('x', 'y'))\n stacked\n stacked.unstack('z')\n\nThese methods are modeled on the :py:class:`pandas.DataFrame` methods of the\nsame name, although in xarray they always create new dimensions rather than\nadding to the existing index or columns.\n\nLike :py:meth:`DataFrame.unstack`, xarray's ``unstack``\nalways succeeds, even if the multi-index being unstacked does not contain all\npossible levels. Missing levels are filled in with ``NaN`` in the resulting object:\n\n.. ipython:: python\n\n stacked2 = stacked[::2]\n stacked2\n stacked2.unstack('z')\n\nHowever, xarray's ``stack`` has an important difference from pandas: unlike\npandas, it does not automatically drop missing values. Compare:\n\n.. ipython:: python\n\n array = xr.DataArray([[np.nan, 1], [2, 3]], dims=['x', 'y'])\n array.stack(z=('x', 'y'))\n array.to_pandas().stack()\n\nWe departed from pandas's behavior here because predictable shapes for new\narray dimensions is necessary for :ref:`dask`.\n\n.. _reshape.stacking_different:\n\nStacking different variables together\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nThese stacking and unstacking operations are particularly useful for reshaping\nxarray objects for use in machine learning packages, such as `scikit-learn\n`_, that usually require two-dimensional numpy\narrays as inputs. For datasets with only one variable, we only need ``stack``\nand ``unstack``, but combining multiple variables in a\n:py:class:`xarray.Dataset` is more complicated. If the variables in the dataset\nhave matching numbers of dimensions, we can call\n:py:meth:`~xarray.Dataset.to_array` and then stack along the the new coordinate.\nBut :py:meth:`~xarray.Dataset.to_array` will broadcast the dataarrays together,\nwhich will effectively tile the lower dimensional variable along the missing\ndimensions. The method :py:meth:`xarray.Dataset.to_stacked_array` allows\ncombining variables of differing dimensions without this wasteful copying while\n:py:meth:`xarray.DataArray.to_unstacked_dataset` reverses this operation.\nJust as with :py:meth:`xarray.Dataset.stack` the stacked coordinate is\nrepresented by a :py:class:`pandas.MultiIndex` object. These methods are used\nlike this:\n\n.. ipython:: python\n\n data = xr.Dataset(\n data_vars={'a': (('x', 'y'), [[0, 1, 2], [3, 4, 5]]),\n 'b': ('x', [6, 7])},\n coords={'y': ['u', 'v', 'w']}\n )\n stacked = data.to_stacked_array(\"z\", sample_dims=['x'])\n stacked\n unstacked = stacked.to_unstacked_dataset(\"z\")\n unstacked\n\nIn this example, ``stacked`` is a two dimensional array that we can easily pass to a scikit-learn or another generic\nnumerical method.\n\n.. note::\n\n Unlike with ``stack``, in ``to_stacked_array``, the user specifies the dimensions they **do not** want stacked.\n For a machine learning task, these unstacked dimensions can be interpreted as the dimensions over which samples are\n drawn, whereas the stacked coordinates are the features. Naturally, all variables should possess these sampling\n dimensions.\n\n\n.. _reshape.set_index:\n\nSet and reset index\n-------------------\n\nComplementary to stack / unstack, xarray's ``.set_index``, ``.reset_index`` and\n``.reorder_levels`` allow easy manipulation of ``DataArray`` or ``Dataset``\nmulti-indexes without modifying the data and its dimensions.\n\nYou can create a multi-index from several 1-dimensional variables and/or\ncoordinates using :py:meth:`~xarray.DataArray.set_index`:\n\n.. ipython:: python\n\n da = xr.DataArray(np.random.rand(4),\n coords={'band': ('x', ['a', 'a', 'b', 'b']),\n 'wavenumber': ('x', np.linspace(200, 400, 4))},\n dims='x')\n da\n mda = da.set_index(x=['band', 'wavenumber'])\n mda\n\nThese coordinates can now be used for indexing, e.g.,\n\n.. ipython:: python\n\n mda.sel(band='a')\n\nConversely, you can use :py:meth:`~xarray.DataArray.reset_index`\nto extract multi-index levels as coordinates (this is mainly useful\nfor serialization):\n\n.. ipython:: python\n\n mda.reset_index('x')\n\n:py:meth:`~xarray.DataArray.reorder_levels` allows changing the order\nof multi-index levels:\n\n.. ipython:: python\n\n mda.reorder_levels(x=['wavenumber', 'band'])\n\nAs of xarray v0.9 coordinate labels for each dimension are optional.\nYou can also use ``.set_index`` / ``.reset_index`` to add / remove\nlabels for one or several dimensions:\n\n.. ipython:: python\n\n array = xr.DataArray([1, 2, 3], dims='x')\n array\n array['c'] = ('x', ['a', 'b', 'c'])\n array.set_index(x='c')\n array = array.set_index(x='c')\n array = array.reset_index('x', drop=True)\n\n.. _reshape.shift_and_roll:\n\nShift and roll\n--------------\n\nTo adjust coordinate labels, you can use the :py:meth:`~xarray.Dataset.shift` and\n:py:meth:`~xarray.Dataset.roll` methods:\n\n.. ipython:: python\n\n\tarray = xr.DataArray([1, 2, 3, 4], dims='x')\n\tarray.shift(x=2)\n\tarray.roll(x=2, roll_coords=True)\n\n.. _reshape.sort:\n\nSort\n----\n\nOne may sort a DataArray/Dataset via :py:meth:`~xarray.DataArray.sortby` and\n:py:meth:`~xarray.DataArray.sortby`. The input can be an individual or list of\n1D ``DataArray`` objects:\n\n.. ipython:: python\n\n ds = xr.Dataset({'A': (('x', 'y'), [[1, 2], [3, 4]]),\n 'B': (('x', 'y'), [[5, 6], [7, 8]])},\n coords={'x': ['b', 'a'], 'y': [1, 0]})\n dax = xr.DataArray([100, 99], [('x', [0, 1])])\n day = xr.DataArray([90, 80], [('y', [0, 1])])\n ds.sortby([day, dax])\n\nAs a shortcut, you can refer to existing coordinates by name:\n\n.. ipython:: python\n\n ds.sortby('x')\n ds.sortby(['y', 'x'])\n ds.sortby(['y', 'x'], ascending=False)\n"},{"attributeType":"null","col":12,"comment":"null","endLoc":62,"id":2950,"name":"_left","nodeType":"Attribute","startLoc":62,"text":"self._left"},{"id":2951,"name":"README.txt","nodeType":"TextFile","path":"doc/gallery","text":".. _recipes:\n\nGallery\n=======\n\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":17,"id":2952,"name":"nc_time_axis_available","nodeType":"Attribute","startLoc":17,"text":"nc_time_axis_available"},{"attributeType":"null","col":0,"comment":"null","endLoc":21,"id":2953,"name":"ROBUST_PERCENTILE","nodeType":"Attribute","startLoc":21,"text":"ROBUST_PERCENTILE"},{"id":2954,"name":"time-series.rst","nodeType":"TextFile","path":"doc","text":".. _time-series:\n\n================\nTime series data\n================\n\nA major use case for xarray is multi-dimensional time-series data.\nAccordingly, we've copied many of features that make working with time-series\ndata in pandas such a joy to xarray. In most cases, we rely on pandas for the\ncore functionality.\n\n.. ipython:: python\n :suppress:\n\n import numpy as np\n import pandas as pd\n import xarray as xr\n np.random.seed(123456)\n\nCreating datetime64 data\n------------------------\n\nxarray uses the numpy dtypes ``datetime64[ns]`` and ``timedelta64[ns]`` to\nrepresent datetime data, which offer vectorized (if sometimes buggy) operations\nwith numpy and smooth integration with pandas.\n\nTo convert to or create regular arrays of ``datetime64`` data, we recommend\nusing :py:func:`pandas.to_datetime` and :py:func:`pandas.date_range`:\n\n.. ipython:: python\n\n pd.to_datetime(['2000-01-01', '2000-02-02'])\n pd.date_range('2000-01-01', periods=365)\n\nAlternatively, you can supply arrays of Python ``datetime`` objects. These get\nconverted automatically when used as arguments in xarray objects:\n\n.. ipython:: python\n\n import datetime\n xr.Dataset({'time': datetime.datetime(2000, 1, 1)})\n\nWhen reading or writing netCDF files, xarray automatically decodes datetime and\ntimedelta arrays using `CF conventions`_ (that is, by using a ``units``\nattribute like ``'days since 2000-01-01'``).\n\n.. _CF conventions: http://cfconventions.org\n\n.. note::\n\n When decoding/encoding datetimes for non-standard calendars or for dates\n before year 1678 or after year 2262, xarray uses the `cftime`_ library.\n It was previously packaged with the ``netcdf4-python`` package under the\n name ``netcdftime`` but is now distributed separately. ``cftime`` is an\n :ref:`optional dependency` of xarray.\n\n.. _cftime: https://unidata.github.io/cftime\n\n\nYou can manual decode arrays in this form by passing a dataset to\n:py:func:`~xarray.decode_cf`:\n\n.. ipython:: python\n\n attrs = {'units': 'hours since 2000-01-01'}\n ds = xr.Dataset({'time': ('time', [0, 1, 2, 3], attrs)})\n xr.decode_cf(ds)\n\nOne unfortunate limitation of using ``datetime64[ns]`` is that it limits the\nnative representation of dates to those that fall between the years 1678 and\n2262. When a netCDF file contains dates outside of these bounds, dates will be\nreturned as arrays of :py:class:`cftime.datetime` objects and a :py:class:`~xarray.CFTimeIndex`\nwill be used for indexing. :py:class:`~xarray.CFTimeIndex` enables a subset of\nthe indexing functionality of a :py:class:`pandas.DatetimeIndex` and is only\nfully compatible with the standalone version of ``cftime`` (not the version\npackaged with earlier versions ``netCDF4``). See :ref:`CFTimeIndex` for more\ninformation.\n\nDatetime indexing\n-----------------\n\nxarray borrows powerful indexing machinery from pandas (see :ref:`indexing`).\n\nThis allows for several useful and succinct forms of indexing, particularly for\n`datetime64` data. For example, we support indexing with strings for single\nitems and with the `slice` object:\n\n.. ipython:: python\n\n time = pd.date_range('2000-01-01', freq='H', periods=365 * 24)\n ds = xr.Dataset({'foo': ('time', np.arange(365 * 24)), 'time': time})\n ds.sel(time='2000-01')\n ds.sel(time=slice('2000-06-01', '2000-06-10'))\n\nYou can also select a particular time by indexing with a\n:py:class:`datetime.time` object:\n\n.. ipython:: python\n\n ds.sel(time=datetime.time(12))\n\nFor more details, read the pandas documentation.\n\n.. _dt_accessor:\n\nDatetime components\n-------------------\n\nSimilar `to pandas`_, the components of datetime objects contained in a\ngiven ``DataArray`` can be quickly computed using a special ``.dt`` accessor.\n\n.. _to pandas: http://pandas.pydata.org/pandas-docs/stable/basics.html#basics-dt-accessors\n\n.. ipython:: python\n\n time = pd.date_range('2000-01-01', freq='6H', periods=365 * 4)\n ds = xr.Dataset({'foo': ('time', np.arange(365 * 4)), 'time': time})\n ds.time.dt.hour\n ds.time.dt.dayofweek\n\nThe ``.dt`` accessor works on both coordinate dimensions as well as\nmulti-dimensional data.\n\nxarray also supports a notion of \"virtual\" or \"derived\" coordinates for\n`datetime components`__ implemented by pandas, including \"year\", \"month\",\n\"day\", \"hour\", \"minute\", \"second\", \"dayofyear\", \"week\", \"dayofweek\", \"weekday\"\nand \"quarter\":\n\n__ http://pandas.pydata.org/pandas-docs/stable/api.html#time-date-components\n\n.. ipython:: python\n\n ds['time.month']\n ds['time.dayofyear']\n\nFor use as a derived coordinate, xarray adds ``'season'`` to the list of\ndatetime components supported by pandas:\n\n.. ipython:: python\n\n ds['time.season']\n ds['time'].dt.season\n\nThe set of valid seasons consists of 'DJF', 'MAM', 'JJA' and 'SON', labeled by\nthe first letters of the corresponding months.\n\nYou can use these shortcuts with both Datasets and DataArray coordinates.\n\nIn addition, xarray supports rounding operations ``floor``, ``ceil``, and ``round``. These operations require that you supply a `rounding frequency as a string argument.`__\n\n__ http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases\n\n.. ipython:: python\n\n ds['time'].dt.floor('D')\n\nThe ``.dt`` accessor can also be used to generate formatted datetime strings\nfor arrays utilising the same formatting as the standard `datetime.strftime`_.\n\n.. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior\n\n.. ipython:: python\n\n ds['time'].dt.strftime('%a, %b %d %H:%M')\n\n.. _resampling:\n\nResampling and grouped operations\n---------------------------------\n\nDatetime components couple particularly well with grouped operations (see\n:ref:`groupby`) for analyzing features that repeat over time. Here's how to\ncalculate the mean by time of day:\n\n.. ipython:: python\n :okwarning:\n\n ds.groupby('time.hour').mean()\n\nFor upsampling or downsampling temporal resolutions, xarray offers a\n:py:meth:`~xarray.Dataset.resample` method building on the core functionality\noffered by the pandas method of the same name. Resample uses essentially the\nsame api as ``resample`` `in pandas`_.\n\n.. _in pandas: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#up-and-downsampling\n\nFor example, we can downsample our dataset from hourly to 6-hourly:\n\n.. ipython:: python\n :okwarning:\n\n ds.resample(time='6H')\n\nThis will create a specialized ``Resample`` object which saves information\nnecessary for resampling. All of the reduction methods which work with\n``Resample`` objects can also be used for resampling:\n\n.. ipython:: python\n :okwarning:\n\n ds.resample(time='6H').mean()\n\nYou can also supply an arbitrary reduction function to aggregate over each\nresampling group:\n\n.. ipython:: python\n\n ds.resample(time='6H').reduce(np.mean)\n\nFor upsampling, xarray provides six methods: ``asfreq``, ``ffill``, ``bfill``, ``pad``,\n``nearest`` and ``interpolate``. ``interpolate`` extends ``scipy.interpolate.interp1d``\nand supports all of its schemes. All of these resampling operations work on both\nDataset and DataArray objects with an arbitrary number of dimensions.\n\nIn order to limit the scope of the methods ``ffill``, ``bfill``, ``pad`` and\n``nearest`` the ``tolerance`` argument can be set in coordinate units.\nData that has indices outside of the given ``tolerance`` are set to ``NaN``.\n\n.. ipython:: python\n\n ds.resample(time='1H').nearest(tolerance='1H')\n\n\nFor more examples of using grouped operations on a time dimension, see\n:ref:`toy weather data`.\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":44,"id":2955,"name":"_registered","nodeType":"Attribute","startLoc":44,"text":"_registered"},{"attributeType":"null","col":8,"comment":"null","endLoc":49,"id":2956,"name":"cons_kwargs","nodeType":"Attribute","startLoc":49,"text":"self.cons_kwargs"},{"col":0,"comment":"","endLoc":1,"header":"utils.py#","id":2957,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import nc_time_axis # noqa: F401\n\n nc_time_axis_available = True\nexcept ImportError:\n nc_time_axis_available = False\n\nROBUST_PERCENTILE = 2.0\n\n_registered = False"},{"className":"_PlotMethods","col":0,"comment":"\n Enables use of xarray.plot functions as attributes on a DataArray.\n For example, DataArray.plot.imshow\n ","endLoc":477,"id":2958,"nodeType":"Class","startLoc":453,"text":"class _PlotMethods:\n \"\"\"\n Enables use of xarray.plot functions as attributes on a DataArray.\n For example, DataArray.plot.imshow\n \"\"\"\n\n __slots__ = (\"_da\",)\n\n def __init__(self, darray):\n self._da = darray\n\n def __call__(self, **kwargs):\n return plot(self._da, **kwargs)\n\n @functools.wraps(hist)\n def hist(self, ax=None, **kwargs):\n return hist(self._da, ax=ax, **kwargs)\n\n @functools.wraps(line)\n def line(self, *args, **kwargs):\n return line(self._da, *args, **kwargs)\n\n @functools.wraps(step)\n def step(self, *args, **kwargs):\n return step(self._da, *args, **kwargs)"},{"col":4,"comment":"null","endLoc":465,"header":"def __call__(self, **kwargs)","id":2959,"name":"__call__","nodeType":"Function","startLoc":464,"text":"def __call__(self, **kwargs):\n return plot(self._da, **kwargs)"},{"attributeType":"null","col":8,"comment":"null","endLoc":50,"id":2960,"name":"call_kwargs","nodeType":"Attribute","startLoc":50,"text":"self.call_kwargs"},{"id":2961,"name":"plotting.rst","nodeType":"TextFile","path":"doc","text":".. _plotting:\n\nPlotting\n========\n\nIntroduction\n------------\n\nLabeled data enables expressive computations. These same\nlabels can also be used to easily create informative plots.\n\nxarray's plotting capabilities are centered around\n:py:class:`xarray.DataArray` objects.\nTo plot :py:class:`xarray.Dataset` objects\nsimply access the relevant DataArrays, ie ``dset['var1']``.\nDataset specific plotting routines are also available (see :ref:`plot-dataset`).\nHere we focus mostly on arrays 2d or larger. If your data fits\nnicely into a pandas DataFrame then you're better off using one of the more\ndeveloped tools there.\n\nxarray plotting functionality is a thin wrapper around the popular\n`matplotlib `_ library.\nMatplotlib syntax and function names were copied as much as possible, which\nmakes for an easy transition between the two.\nMatplotlib must be installed before xarray can plot.\n\nTo use xarray's plotting capabilities with time coordinates containing\n``cftime.datetime`` objects\n`nc-time-axis `_ v1.2.0 or later\nneeds to be installed.\n\nFor more extensive plotting applications consider the following projects:\n\n- `Seaborn `_: \"provides\n a high-level interface for drawing attractive statistical graphics.\"\n Integrates well with pandas.\n\n- `HoloViews `_\n and `GeoViews `_: \"Composable, declarative\n data structures for building even complex visualizations easily.\" Includes\n native support for xarray objects.\n\n- `hvplot `_: ``hvplot`` makes it very easy to produce\n dynamic plots (backed by ``Holoviews`` or ``Geoviews``) by adding a ``hvplot``\n accessor to DataArrays.\n\n- `Cartopy `_: Provides cartographic\n tools.\n\nImports\n~~~~~~~\n\n.. ipython:: python\n :suppress:\n\n # Use defaults so we don't get gridlines in generated docs\n import matplotlib as mpl\n mpl.rcdefaults()\n\nThe following imports are necessary for all of the examples.\n\n.. ipython:: python\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import xarray as xr\n\nFor these examples we'll use the North American air temperature dataset.\n\n.. ipython:: python\n\n airtemps = xr.tutorial.open_dataset('air_temperature')\n airtemps\n\n # Convert to celsius\n air = airtemps.air - 273.15\n\n # copy attributes to get nice figure labels and change Kelvin to Celsius\n air.attrs = airtemps.air.attrs\n air.attrs['units'] = 'deg C'\n\n.. note::\n Until :issue:`1614` is solved, you might need to copy over the metadata in ``attrs`` to get informative figure labels (as was done above).\n\n\nDataArrays\n----------\n\nOne Dimension\n~~~~~~~~~~~~~\n\n================\n Simple Example\n================\n\nThe simplest way to make a plot is to call the :py:func:`xarray.DataArray.plot()` method.\n\n.. ipython:: python\n\n air1d = air.isel(lat=10, lon=10)\n\n @savefig plotting_1d_simple.png width=4in\n air1d.plot()\n\nxarray uses the coordinate name along with metadata ``attrs.long_name``, ``attrs.standard_name``, ``DataArray.name`` and ``attrs.units`` (if available) to label the axes. The names ``long_name``, ``standard_name`` and ``units`` are copied from the `CF-conventions spec `_. When choosing names, the order of precedence is ``long_name``, ``standard_name`` and finally ``DataArray.name``. The y-axis label in the above plot was constructed from the ``long_name`` and ``units`` attributes of ``air1d``.\n\n.. ipython:: python\n\n air1d.attrs\n\n======================\n Additional Arguments\n======================\n\nAdditional arguments are passed directly to the matplotlib function which\ndoes the work.\nFor example, :py:func:`xarray.plot.line` calls\nmatplotlib.pyplot.plot_ passing in the index and the array values as x and y, respectively.\nSo to make a line plot with blue triangles a matplotlib format string\ncan be used:\n\n.. _matplotlib.pyplot.plot: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot\n\n.. ipython:: python\n\n @savefig plotting_1d_additional_args.png width=4in\n air1d[:200].plot.line('b-^')\n\n.. note::\n Not all xarray plotting methods support passing positional arguments\n to the wrapped matplotlib functions, but they do all\n support keyword arguments.\n\nKeyword arguments work the same way, and are more explicit.\n\n.. ipython:: python\n\n @savefig plotting_example_sin3.png width=4in\n air1d[:200].plot.line(color='purple', marker='o')\n\n=========================\n Adding to Existing Axis\n=========================\n\nTo add the plot to an existing axis pass in the axis as a keyword argument\n``ax``. This works for all xarray plotting methods.\nIn this example ``axes`` is an array consisting of the left and right\naxes created by ``plt.subplots``.\n\n.. ipython:: python\n\n fig, axes = plt.subplots(ncols=2)\n\n axes\n\n air1d.plot(ax=axes[0])\n air1d.plot.hist(ax=axes[1])\n\n plt.tight_layout()\n\n @savefig plotting_example_existing_axes.png width=6in\n plt.draw()\n\nOn the right is a histogram created by :py:func:`xarray.plot.hist`.\n\n.. _plotting.figsize:\n\n=============================\n Controlling the figure size\n=============================\n\nYou can pass a ``figsize`` argument to all xarray's plotting methods to\ncontrol the figure size. For convenience, xarray's plotting methods also\nsupport the ``aspect`` and ``size`` arguments which control the size of the\nresulting image via the formula ``figsize = (aspect * size, size)``:\n\n.. ipython:: python\n\n air1d.plot(aspect=2, size=3)\n @savefig plotting_example_size_and_aspect.png\n plt.tight_layout()\n\n.. ipython:: python\n :suppress:\n\n # create a dummy figure so sphinx plots everything below normally\n plt.figure()\n\nThis feature also works with :ref:`plotting.faceting`. For facet plots,\n``size`` and ``aspect`` refer to a single panel (so that ``aspect * size``\ngives the width of each facet in inches), while ``figsize`` refers to the\nentire figure (as for matplotlib's ``figsize`` argument).\n\n.. note::\n\n If ``figsize`` or ``size`` are used, a new figure is created,\n so this is mutually exclusive with the ``ax`` argument.\n\n.. note::\n\n The convention used by xarray (``figsize = (aspect * size, size)``) is\n borrowed from seaborn: it is therefore `not equivalent to matplotlib's`_.\n\n.. _not equivalent to matplotlib's: https://github.com/mwaskom/seaborn/issues/746\n\n\n.. _plotting.multiplelines:\n\n====================================================\n Multiple lines showing variation along a dimension\n====================================================\n\nIt is possible to make line plots of two-dimensional data by calling :py:func:`xarray.plot.line`\nwith appropriate arguments. Consider the 3D variable ``air`` defined above. We can use line\nplots to check the variation of air temperature at three different latitudes along a longitude line:\n\n.. ipython:: python\n\n @savefig plotting_example_multiple_lines_x_kwarg.png\n air.isel(lon=10, lat=[19,21,22]).plot.line(x='time')\n\nIt is required to explicitly specify either\n\n1. ``x``: the dimension to be used for the x-axis, or\n2. ``hue``: the dimension you want to represent by multiple lines.\n\nThus, we could have made the previous plot by specifying ``hue='lat'`` instead of ``x='time'``.\nIf required, the automatic legend can be turned off using ``add_legend=False``. Alternatively,\n``hue`` can be passed directly to :py:func:`xarray.plot` as `air.isel(lon=10, lat=[19,21,22]).plot(hue='lat')`.\n\n\n========================\n Dimension along y-axis\n========================\n\nIt is also possible to make line plots such that the data are on the x-axis and a dimension is on the y-axis. This can be done by specifying the appropriate ``y`` keyword argument.\n\n.. ipython:: python\n\n @savefig plotting_example_xy_kwarg.png\n air.isel(time=10, lon=[10, 11]).plot(y='lat', hue='lon')\n\n============\n Step plots\n============\n\nAs an alternative, also a step plot similar to matplotlib's ``plt.step`` can be\nmade using 1D data.\n\n.. ipython:: python\n :okwarning:\n\n @savefig plotting_example_step.png width=4in\n air1d[:20].plot.step(where='mid')\n\nThe argument ``where`` defines where the steps should be placed, options are\n``'pre'`` (default), ``'post'``, and ``'mid'``. This is particularly handy\nwhen plotting data grouped with :py:func:`xarray.Dataset.groupby_bins`.\n\n.. ipython:: python\n\n air_grp = air.mean(['time','lon']).groupby_bins('lat',[0,23.5,66.5,90])\n air_mean = air_grp.mean()\n air_std = air_grp.std()\n air_mean.plot.step()\n (air_mean + air_std).plot.step(ls=':')\n (air_mean - air_std).plot.step(ls=':')\n plt.ylim(-20,30)\n @savefig plotting_example_step_groupby.png width=4in\n plt.title('Zonal mean temperature')\n\nIn this case, the actual boundaries of the bins are used and the ``where`` argument\nis ignored.\n\n\nOther axes kwargs\n~~~~~~~~~~~~~~~~~\n\n\nThe keyword arguments ``xincrease`` and ``yincrease`` let you control the axes direction.\n\n.. ipython:: python\n\n @savefig plotting_example_xincrease_yincrease_kwarg.png\n air.isel(time=10, lon=[10, 11]).plot.line(y='lat', hue='lon', xincrease=False, yincrease=False)\n\nIn addition, one can use ``xscale, yscale`` to set axes scaling; ``xticks, yticks`` to set axes ticks and ``xlim, ylim`` to set axes limits. These accept the same values as the matplotlib methods ``Axes.set_(x,y)scale()``, ``Axes.set_(x,y)ticks()``, ``Axes.set_(x,y)lim()`` respectively.\n\n\nTwo Dimensions\n~~~~~~~~~~~~~~\n\n================\n Simple Example\n================\n\nThe default method :py:meth:`xarray.DataArray.plot` calls :py:func:`xarray.plot.pcolormesh` by default when the data is two-dimensional.\n\n.. ipython:: python\n\n air2d = air.isel(time=500)\n\n @savefig 2d_simple.png width=4in\n air2d.plot()\n\nAll 2d plots in xarray allow the use of the keyword arguments ``yincrease``\nand ``xincrease``.\n\n.. ipython:: python\n\n @savefig 2d_simple_yincrease.png width=4in\n air2d.plot(yincrease=False)\n\n.. note::\n\n We use :py:func:`xarray.plot.pcolormesh` as the default two-dimensional plot\n method because it is more flexible than :py:func:`xarray.plot.imshow`.\n However, for large arrays, ``imshow`` can be much faster than ``pcolormesh``.\n If speed is important to you and you are plotting a regular mesh, consider\n using ``imshow``.\n\n================\n Missing Values\n================\n\nxarray plots data with :ref:`missing_values`.\n\n.. ipython:: python\n\n bad_air2d = air2d.copy()\n\n bad_air2d[dict(lat=slice(0, 10), lon=slice(0, 25))] = np.nan\n\n @savefig plotting_missing_values.png width=4in\n bad_air2d.plot()\n\n========================\n Nonuniform Coordinates\n========================\n\nIt's not necessary for the coordinates to be evenly spaced. Both\n:py:func:`xarray.plot.pcolormesh` (default) and :py:func:`xarray.plot.contourf` can\nproduce plots with nonuniform coordinates.\n\n.. ipython:: python\n\n b = air2d.copy()\n # Apply a nonlinear transformation to one of the coords\n b.coords['lat'] = np.log(b.coords['lat'])\n\n @savefig plotting_nonuniform_coords.png width=4in\n b.plot()\n\n====================\n Calling Matplotlib\n====================\n\nSince this is a thin wrapper around matplotlib, all the functionality of\nmatplotlib is available.\n\n.. ipython:: python\n\n air2d.plot(cmap=plt.cm.Blues)\n plt.title('These colors prove North America\\nhas fallen in the ocean')\n plt.ylabel('latitude')\n plt.xlabel('longitude')\n plt.tight_layout()\n\n @savefig plotting_2d_call_matplotlib.png width=4in\n plt.draw()\n\n.. note::\n\n xarray methods update label information and generally play around with the\n axes. So any kind of updates to the plot\n should be done *after* the call to the xarray's plot.\n In the example below, ``plt.xlabel`` effectively does nothing, since\n ``d_ylog.plot()`` updates the xlabel.\n\n .. ipython:: python\n\n plt.xlabel('Never gonna see this.')\n air2d.plot()\n\n @savefig plotting_2d_call_matplotlib2.png width=4in\n plt.draw()\n\n===========\n Colormaps\n===========\n\nxarray borrows logic from Seaborn to infer what kind of color map to use. For\nexample, consider the original data in Kelvins rather than Celsius:\n\n.. ipython:: python\n\n @savefig plotting_kelvin.png width=4in\n airtemps.air.isel(time=0).plot()\n\nThe Celsius data contain 0, so a diverging color map was used. The\nKelvins do not have 0, so the default color map was used.\n\n.. _robust-plotting:\n\n========\n Robust\n========\n\nOutliers often have an extreme effect on the output of the plot.\nHere we add two bad data points. This affects the color scale,\nwashing out the plot.\n\n.. ipython:: python\n\n air_outliers = airtemps.air.isel(time=0).copy()\n air_outliers[0, 0] = 100\n air_outliers[-1, -1] = 400\n\n @savefig plotting_robust1.png width=4in\n air_outliers.plot()\n\nThis plot shows that we have outliers. The easy way to visualize\nthe data without the outliers is to pass the parameter\n``robust=True``.\nThis will use the 2nd and 98th\npercentiles of the data to compute the color limits.\n\n.. ipython:: python\n\n @savefig plotting_robust2.png width=4in\n air_outliers.plot(robust=True)\n\nObserve that the ranges of the color bar have changed. The arrows on the\ncolor bar indicate\nthat the colors include data points outside the bounds.\n\n====================\n Discrete Colormaps\n====================\n\nIt is often useful, when visualizing 2d data, to use a discrete colormap,\nrather than the default continuous colormaps that matplotlib uses. The\n``levels`` keyword argument can be used to generate plots with discrete\ncolormaps. For example, to make a plot with 8 discrete color intervals:\n\n.. ipython:: python\n\n @savefig plotting_discrete_levels.png width=4in\n air2d.plot(levels=8)\n\nIt is also possible to use a list of levels to specify the boundaries of the\ndiscrete colormap:\n\n.. ipython:: python\n\n @savefig plotting_listed_levels.png width=4in\n air2d.plot(levels=[0, 12, 18, 30])\n\nYou can also specify a list of discrete colors through the ``colors`` argument:\n\n.. ipython:: python\n\n flatui = [\"#9b59b6\", \"#3498db\", \"#95a5a6\", \"#e74c3c\", \"#34495e\", \"#2ecc71\"]\n @savefig plotting_custom_colors_levels.png width=4in\n air2d.plot(levels=[0, 12, 18, 30], colors=flatui)\n\nFinally, if you have `Seaborn `_\ninstalled, you can also specify a seaborn color palette to the ``cmap``\nargument. Note that ``levels`` *must* be specified with seaborn color palettes\nif using ``imshow`` or ``pcolormesh`` (but not with ``contour`` or ``contourf``,\nsince levels are chosen automatically).\n\n.. ipython:: python\n :okwarning:\n\n @savefig plotting_seaborn_palette.png width=4in\n air2d.plot(levels=10, cmap='husl')\n plt.draw()\n\n.. _plotting.faceting:\n\nFaceting\n~~~~~~~~\n\nFaceting here refers to splitting an array along one or two dimensions and\nplotting each group.\nxarray's basic plotting is useful for plotting two dimensional arrays. What\nabout three or four dimensional arrays? That's where facets become helpful.\n\nConsider the temperature data set. There are 4 observations per day for two\nyears which makes for 2920 values along the time dimension.\nOne way to visualize this data is to make a\nseparate plot for each time period.\n\nThe faceted dimension should not have too many values;\nfaceting on the time dimension will produce 2920 plots. That's\ntoo much to be helpful. To handle this situation try performing\nan operation that reduces the size of the data in some way. For example, we\ncould compute the average air temperature for each month and reduce the\nsize of this dimension from 2920 -> 12. A simpler way is\nto just take a slice on that dimension.\nSo let's use a slice to pick 6 times throughout the first year.\n\n.. ipython:: python\n\n t = air.isel(time=slice(0, 365 * 4, 250))\n t.coords\n\n================\n Simple Example\n================\n\nThe easiest way to create faceted plots is to pass in ``row`` or ``col``\narguments to the xarray plotting methods/functions. This returns a\n:py:class:`xarray.plot.FacetGrid` object.\n\n.. ipython:: python\n\n @savefig plot_facet_dataarray.png\n g_simple = t.plot(x='lon', y='lat', col='time', col_wrap=3)\n\nFaceting also works for line plots.\n\n.. ipython:: python\n\n @savefig plot_facet_dataarray_line.png\n g_simple_line = t.isel(lat=slice(0,None,4)).plot(x='lon', hue='lat', col='time', col_wrap=3)\n\n===============\n 4 dimensional\n===============\n\nFor 4 dimensional arrays we can use the rows and columns of the grids.\nHere we create a 4 dimensional array by taking the original data and adding\na fixed amount. Now we can see how the temperature maps would compare if\none were much hotter.\n\n.. ipython:: python\n\n t2 = t.isel(time=slice(0, 2))\n t4d = xr.concat([t2, t2 + 40], pd.Index(['normal', 'hot'], name='fourth_dim'))\n # This is a 4d array\n t4d.coords\n\n @savefig plot_facet_4d.png\n t4d.plot(x='lon', y='lat', col='time', row='fourth_dim')\n\n================\n Other features\n================\n\nFaceted plotting supports other arguments common to xarray 2d plots.\n\n.. ipython:: python\n :suppress:\n\n plt.close('all')\n\n.. ipython:: python\n\n hasoutliers = t.isel(time=slice(0, 5)).copy()\n hasoutliers[0, 0, 0] = -100\n hasoutliers[-1, -1, -1] = 400\n\n @savefig plot_facet_robust.png\n g = hasoutliers.plot.pcolormesh('lon', 'lat', col='time', col_wrap=3,\n robust=True, cmap='viridis',\n\t\t\t\t cbar_kwargs={'label': 'this has outliers'})\n\n===================\n FacetGrid Objects\n===================\n\n:py:class:`xarray.plot.FacetGrid` is used to control the behavior of the\nmultiple plots.\nIt borrows an API and code from `Seaborn's FacetGrid\n`_.\nThe structure is contained within the ``axes`` and ``name_dicts``\nattributes, both 2d Numpy object arrays.\n\n.. ipython:: python\n\n g.axes\n\n g.name_dicts\n\nIt's possible to select the :py:class:`xarray.DataArray` or\n:py:class:`xarray.Dataset` corresponding to the FacetGrid through the\n``name_dicts``.\n\n.. ipython:: python\n\n g.data.loc[g.name_dicts[0, 0]]\n\nHere is an example of using the lower level API and then modifying the axes after\nthey have been plotted.\n\n.. ipython:: python\n\n g = t.plot.imshow('lon', 'lat', col='time', col_wrap=3, robust=True)\n\n for i, ax in enumerate(g.axes.flat):\n ax.set_title('Air Temperature %d' % i)\n\n bottomright = g.axes[-1, -1]\n bottomright.annotate('bottom right', (240, 40))\n\n @savefig plot_facet_iterator.png\n plt.draw()\n\nTODO: add an example of using the ``map`` method to plot dataset variables\n(e.g., with ``plt.quiver``).\n\n.. _plot-dataset:\n\nDatasets\n--------\n\n``xarray`` has limited support for plotting Dataset variables against each other.\nConsider this dataset\n\n.. ipython:: python\n\n ds = xr.tutorial.scatter_example_dataset()\n ds\n\n\nSuppose we want to scatter ``A`` against ``B``\n\n.. ipython:: python\n\n @savefig ds_simple_scatter.png\n ds.plot.scatter(x='A', y='B')\n\nThe ``hue`` kwarg lets you vary the color by variable value\n\n.. ipython:: python\n\n @savefig ds_hue_scatter.png\n ds.plot.scatter(x='A', y='B', hue='w')\n\nWhen ``hue`` is specified, a colorbar is added for numeric ``hue`` DataArrays by\ndefault and a legend is added for non-numeric ``hue`` DataArrays (as above).\nYou can force a legend instead of a colorbar by setting ``hue_style='discrete'``.\nAdditionally, the boolean kwarg ``add_guide`` can be used to prevent the display of a legend or colorbar (as appropriate).\n\n.. ipython:: python\n\n ds.w.values = [1, 2, 3, 5]\n @savefig ds_discrete_legend_hue_scatter.png\n ds.plot.scatter(x='A', y='B', hue='w', hue_style='discrete')\n\nThe ``markersize`` kwarg lets you vary the point's size by variable value. You can additionally pass ``size_norm`` to control how the variable's values are mapped to point sizes.\n\n.. ipython:: python\n\n @savefig ds_hue_size_scatter.png\n ds.plot.scatter(x='A', y='B', hue='z', hue_style='discrete', markersize='z')\n\nFaceting is also possible\n\n.. ipython:: python\n\n @savefig ds_facet_scatter.png\n ds.plot.scatter(x='A', y='B', col='x', row='z', hue='w', hue_style='discrete')\n\n\nFor more advanced scatter plots, we recommend converting the relevant data variables to a pandas DataFrame and using the extensive plotting capabilities of ``seaborn``.\n\n\n.. _plot-maps:\n\nMaps\n----\n\nTo follow this section you'll need to have Cartopy installed and working.\n\nThis script will plot the air temperature on a map.\n\n.. ipython:: python\n\n import cartopy.crs as ccrs\n air = xr.tutorial.open_dataset('air_temperature').air\n ax = plt.axes(projection=ccrs.Orthographic(-80, 35))\n air.isel(time=0).plot.contourf(ax=ax, transform=ccrs.PlateCarree());\n @savefig plotting_maps_cartopy.png width=100%\n ax.set_global(); ax.coastlines();\n\nWhen faceting on maps, the projection can be transferred to the ``plot``\nfunction using the ``subplot_kws`` keyword. The axes for the subplots created\nby faceting are accessible in the object returned by ``plot``:\n\n.. ipython:: python\n\n p = air.isel(time=[0, 4]).plot(transform=ccrs.PlateCarree(), col='time',\n subplot_kws={'projection': ccrs.Orthographic(-80, 35)})\n for ax in p.axes.flat:\n ax.coastlines()\n ax.gridlines()\n @savefig plotting_maps_cartopy_facetting.png width=100%\n plt.draw();\n\n\nDetails\n-------\n\nWays to Use\n~~~~~~~~~~~\n\nThere are three ways to use the xarray plotting functionality:\n\n1. Use ``plot`` as a convenience method for a DataArray.\n\n2. Access a specific plotting method from the ``plot`` attribute of a\n DataArray.\n\n3. Directly from the xarray plot submodule.\n\nThese are provided for user convenience; they all call the same code.\n\n.. ipython:: python\n\n import xarray.plot as xplt\n da = xr.DataArray(range(5))\n fig, axes = plt.subplots(ncols=2, nrows=2)\n da.plot(ax=axes[0, 0])\n da.plot.line(ax=axes[0, 1])\n xplt.plot(da, ax=axes[1, 0])\n xplt.line(da, ax=axes[1, 1])\n plt.tight_layout()\n @savefig plotting_ways_to_use.png width=6in\n plt.draw()\n\nHere the output is the same. Since the data is 1 dimensional the line plot\nwas used.\n\nThe convenience method :py:meth:`xarray.DataArray.plot` dispatches to an appropriate\nplotting function based on the dimensions of the ``DataArray`` and whether\nthe coordinates are sorted and uniformly spaced. This table\ndescribes what gets plotted:\n\n=============== ===========================\nDimensions Plotting function\n--------------- ---------------------------\n1 :py:func:`xarray.plot.line`\n2 :py:func:`xarray.plot.pcolormesh`\nAnything else :py:func:`xarray.plot.hist`\n=============== ===========================\n\nCoordinates\n~~~~~~~~~~~\n\nIf you'd like to find out what's really going on in the coordinate system,\nread on.\n\n.. ipython:: python\n\n a0 = xr.DataArray(np.zeros((4, 3, 2)), dims=('y', 'x', 'z'),\n name='temperature')\n a0[0, 0, 0] = 1\n a = a0.isel(z=0)\n a\n\nThe plot will produce an image corresponding to the values of the array.\nHence the top left pixel will be a different color than the others.\nBefore reading on, you may want to look at the coordinates and\nthink carefully about what the limits, labels, and orientation for\neach of the axes should be.\n\n.. ipython:: python\n\n @savefig plotting_example_2d_simple.png width=4in\n a.plot()\n\nIt may seem strange that\nthe values on the y axis are decreasing with -0.5 on the top. This is because\nthe pixels are centered over their coordinates, and the\naxis labels and ranges correspond to the values of the\ncoordinates.\n\nMultidimensional coordinates\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nSee also: :ref:`examples.multidim`.\n\nYou can plot irregular grids defined by multidimensional coordinates with\nxarray, but you'll have to tell the plot function to use these coordinates\ninstead of the default ones:\n\n.. ipython:: python\n\n lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4))\n lon += lat/10\n lat += lon/10\n da = xr.DataArray(np.arange(20).reshape(4, 5), dims=['y', 'x'],\n coords = {'lat': (('y', 'x'), lat),\n 'lon': (('y', 'x'), lon)})\n\n @savefig plotting_example_2d_irreg.png width=4in\n da.plot.pcolormesh('lon', 'lat');\n\nNote that in this case, xarray still follows the pixel centered convention.\nThis might be undesirable in some cases, for example when your data is defined\non a polar projection (:issue:`781`). This is why the default is to not follow\nthis convention when plotting on a map:\n\n.. ipython:: python\n\n import cartopy.crs as ccrs\n ax = plt.subplot(projection=ccrs.PlateCarree());\n da.plot.pcolormesh('lon', 'lat', ax=ax);\n ax.scatter(lon, lat, transform=ccrs.PlateCarree());\n @savefig plotting_example_2d_irreg_map.png width=4in\n ax.coastlines(); ax.gridlines(draw_labels=True);\n\nYou can however decide to infer the cell boundaries and use the\n``infer_intervals`` keyword:\n\n.. ipython:: python\n\n ax = plt.subplot(projection=ccrs.PlateCarree());\n da.plot.pcolormesh('lon', 'lat', ax=ax, infer_intervals=True);\n ax.scatter(lon, lat, transform=ccrs.PlateCarree());\n @savefig plotting_example_2d_irreg_map_infer.png width=4in\n ax.coastlines(); ax.gridlines(draw_labels=True);\n\n.. note::\n The data model of xarray does not support datasets with `cell boundaries`_\n yet. If you want to use these coordinates, you'll have to make the plots\n outside the xarray framework.\n\n.. _cell boundaries: http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#cell-boundaries\n\nOne can also make line plots with multidimensional coordinates. In this case, ``hue`` must be a dimension name, not a coordinate name.\n\n.. ipython:: python\n\n f, ax = plt.subplots(2, 1)\n da.plot.line(x='lon', hue='y', ax=ax[0]);\n @savefig plotting_example_2d_hue_xy.png\n da.plot.line(x='lon', hue='x', ax=ax[1]);\n"},{"fileName":"api.py","filePath":"xarray/backends","id":2962,"nodeType":"File","text":"import os.path\nimport warnings\nfrom glob import glob\nfrom io import BytesIO\nfrom numbers import Number\nfrom pathlib import Path\nfrom textwrap import dedent\nfrom typing import (\n TYPE_CHECKING,\n Callable,\n Dict,\n Hashable,\n Iterable,\n Mapping,\n Tuple,\n Union,\n)\n\nimport numpy as np\n\nfrom .. import DataArray, Dataset, auto_combine, backends, coding, conventions\nfrom ..core import indexing\nfrom ..core.combine import (\n _infer_concat_order_from_positions,\n _nested_combine,\n combine_by_coords,\n)\nfrom ..core.utils import close_on_error, is_grib_path, is_remote_uri\nfrom .common import AbstractDataStore, ArrayWriter\nfrom .locks import _get_scheduler\n\nif TYPE_CHECKING:\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n\n\nDATAARRAY_NAME = \"__xarray_dataarray_name__\"\nDATAARRAY_VARIABLE = \"__xarray_dataarray_variable__\"\n\n\ndef _get_default_engine_remote_uri():\n try:\n import netCDF4 # noqa: F401\n\n engine = \"netcdf4\"\n except ImportError: # pragma: no cover\n try:\n import pydap # noqa: F401\n\n engine = \"pydap\"\n except ImportError:\n raise ValueError(\n \"netCDF4 or pydap is required for accessing \"\n \"remote datasets via OPeNDAP\"\n )\n return engine\n\n\ndef _get_default_engine_grib():\n msgs = []\n try:\n import Nio # noqa: F401\n\n msgs += [\"set engine='pynio' to access GRIB files with PyNIO\"]\n except ImportError: # pragma: no cover\n pass\n try:\n import cfgrib # noqa: F401\n\n msgs += [\"set engine='cfgrib' to access GRIB files with cfgrib\"]\n except ImportError: # pragma: no cover\n pass\n if msgs:\n raise ValueError(\" or\\n\".join(msgs))\n else:\n raise ValueError(\"PyNIO or cfgrib is required for accessing \" \"GRIB files\")\n\n\ndef _get_default_engine_gz():\n try:\n import scipy # noqa: F401\n\n engine = \"scipy\"\n except ImportError: # pragma: no cover\n raise ValueError(\"scipy is required for accessing .gz files\")\n return engine\n\n\ndef _get_default_engine_netcdf():\n try:\n import netCDF4 # noqa: F401\n\n engine = \"netcdf4\"\n except ImportError: # pragma: no cover\n try:\n import scipy.io.netcdf # noqa: F401\n\n engine = \"scipy\"\n except ImportError:\n raise ValueError(\n \"cannot read or write netCDF files without \"\n \"netCDF4-python or scipy installed\"\n )\n return engine\n\n\ndef _get_engine_from_magic_number(filename_or_obj):\n # check byte header to determine file type\n if isinstance(filename_or_obj, bytes):\n magic_number = filename_or_obj[:8]\n else:\n if filename_or_obj.tell() != 0:\n raise ValueError(\n \"file-like object read/write pointer not at zero \"\n \"please close and reopen, or use a context \"\n \"manager\"\n )\n magic_number = filename_or_obj.read(8)\n filename_or_obj.seek(0)\n\n if magic_number.startswith(b\"CDF\"):\n engine = \"scipy\"\n elif magic_number.startswith(b\"\\211HDF\\r\\n\\032\\n\"):\n engine = \"h5netcdf\"\n if isinstance(filename_or_obj, bytes):\n raise ValueError(\n \"can't open netCDF4/HDF5 as bytes \"\n \"try passing a path or file-like object\"\n )\n else:\n if isinstance(filename_or_obj, bytes) and len(filename_or_obj) > 80:\n filename_or_obj = filename_or_obj[:80] + b\"...\"\n raise ValueError(\n \"{} is not a valid netCDF file \"\n \"did you mean to pass a string for a path instead?\".format(filename_or_obj)\n )\n return engine\n\n\ndef _get_default_engine(path, allow_remote=False):\n if allow_remote and is_remote_uri(path):\n engine = _get_default_engine_remote_uri()\n elif is_grib_path(path):\n engine = _get_default_engine_grib()\n elif path.endswith(\".gz\"):\n engine = _get_default_engine_gz()\n else:\n engine = _get_default_engine_netcdf()\n return engine\n\n\ndef _normalize_path(path):\n if is_remote_uri(path):\n return path\n else:\n return os.path.abspath(os.path.expanduser(path))\n\n\ndef _validate_dataset_names(dataset):\n \"\"\"DataArray.name and Dataset keys must be a string or None\"\"\"\n\n def check_name(name):\n if isinstance(name, str):\n if not name:\n raise ValueError(\n \"Invalid name for DataArray or Dataset key: \"\n \"string must be length 1 or greater for \"\n \"serialization to netCDF files\"\n )\n elif name is not None:\n raise TypeError(\n \"DataArray.name or Dataset key must be either a \"\n \"string or None for serialization to netCDF files\"\n )\n\n for k in dataset.variables:\n check_name(k)\n\n\ndef _validate_attrs(dataset):\n \"\"\"`attrs` must have a string key and a value which is either: a number,\n a string, an ndarray or a list/tuple of numbers/strings.\n \"\"\"\n\n def check_attr(name, value):\n if isinstance(name, str):\n if not name:\n raise ValueError(\n \"Invalid name for attr: string must be \"\n \"length 1 or greater for serialization to \"\n \"netCDF files\"\n )\n else:\n raise TypeError(\n \"Invalid name for attr: {} must be a string for \"\n \"serialization to netCDF files\".format(name)\n )\n\n if not isinstance(value, (str, Number, np.ndarray, np.number, list, tuple)):\n raise TypeError(\n \"Invalid value for attr: {} must be a number, \"\n \"a string, an ndarray or a list/tuple of \"\n \"numbers/strings for serialization to netCDF \"\n \"files\".format(value)\n )\n\n # Check attrs on the dataset itself\n for k, v in dataset.attrs.items():\n check_attr(k, v)\n\n # Check attrs on each variable within the dataset\n for variable in dataset.variables.values():\n for k, v in variable.attrs.items():\n check_attr(k, v)\n\n\ndef _protect_dataset_variables_inplace(dataset, cache):\n for name, variable in dataset.variables.items():\n if name not in variable.dims:\n # no need to protect IndexVariable objects\n data = indexing.CopyOnWriteArray(variable._data)\n if cache:\n data = indexing.MemoryCachedArray(data)\n variable.data = data\n\n\ndef _finalize_store(write, store):\n \"\"\" Finalize this store by explicitly syncing and closing\"\"\"\n del write # ensure writing is done first\n store.close()\n\n\ndef load_dataset(filename_or_obj, **kwargs):\n \"\"\"Open, load into memory, and close a Dataset from a file or file-like\n object.\n\n This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs\n from `open_dataset` in that it loads the Dataset into memory, closes the\n file, and returns the Dataset. In contrast, `open_dataset` keeps the file\n handle open and lazy loads its contents. All parameters are passed directly\n to `open_dataset`. See that documentation for further details.\n\n Returns\n -------\n dataset : Dataset\n The newly created Dataset.\n\n See Also\n --------\n open_dataset\n \"\"\"\n if \"cache\" in kwargs:\n raise TypeError(\"cache has no effect in this context\")\n\n with open_dataset(filename_or_obj, **kwargs) as ds:\n return ds.load()\n\n\ndef load_dataarray(filename_or_obj, **kwargs):\n \"\"\"Open, load into memory, and close a DataArray from a file or file-like\n object containing a single data variable.\n\n This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs\n from `open_dataarray` in that it loads the Dataset into memory, closes the\n file, and returns the Dataset. In contrast, `open_dataarray` keeps the file\n handle open and lazy loads its contents. All parameters are passed directly\n to `open_dataarray`. See that documentation for further details.\n\n Returns\n -------\n datarray : DataArray\n The newly created DataArray.\n\n See Also\n --------\n open_dataarray\n \"\"\"\n if \"cache\" in kwargs:\n raise TypeError(\"cache has no effect in this context\")\n\n with open_dataarray(filename_or_obj, **kwargs) as da:\n return da.load()\n\n\ndef open_dataset(\n filename_or_obj,\n group=None,\n decode_cf=True,\n mask_and_scale=None,\n decode_times=True,\n autoclose=None,\n concat_characters=True,\n decode_coords=True,\n engine=None,\n chunks=None,\n lock=None,\n cache=None,\n drop_variables=None,\n backend_kwargs=None,\n use_cftime=None,\n):\n \"\"\"Open and decode a dataset from a file or file-like object.\n\n Parameters\n ----------\n filename_or_obj : str, Path, file or xarray.backends.*DataStore\n Strings and Path objects are interpreted as a path to a netCDF file\n or an OpenDAP URL and opened with python-netCDF4, unless the filename\n ends with .gz, in which case the file is gunzipped and opened with\n scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like\n objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n netCDF4 files).\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA. mask_and_scale defaults to True except for the\n pseudonetcdf backend.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n autoclose : bool, optional\n If True, automatically close files to avoid OS Error of too many files\n being open. However, this option doesn't work with streams, e.g.,\n BytesIO.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \\\n 'pseudonetcdf'}, optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n chunks : int or dict, optional\n If chunks is provided, it used to load the new dataset into dask\n arrays. ``chunks={}`` loads the dataset with dask using a single\n chunk for all arrays.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False. Does not\n change the behavior of coordinates corresponding to dimensions, which\n always load their data from disk into a ``pandas.Index``.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n backend_kwargs: dictionary, optional\n A dictionary of keyword arguments to pass on to the backend. This\n may be useful when backend options would improve performance or\n allow user control of dataset processing.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Returns\n -------\n dataset : Dataset\n The newly created dataset.\n\n Notes\n -----\n ``open_dataset`` opens the file with read-only access. When you modify\n values of a Dataset, even one linked to files on disk, only the in-memory\n copy you are manipulating in xarray is modified: the original file on disk\n is never touched.\n\n See Also\n --------\n open_mfdataset\n \"\"\"\n engines = [\n None,\n \"netcdf4\",\n \"scipy\",\n \"pydap\",\n \"h5netcdf\",\n \"pynio\",\n \"cfgrib\",\n \"pseudonetcdf\",\n ]\n if engine not in engines:\n raise ValueError(\n \"unrecognized engine for open_dataset: {}\\n\"\n \"must be one of: {}\".format(engine, engines)\n )\n\n if autoclose is not None:\n warnings.warn(\n \"The autoclose argument is no longer used by \"\n \"xarray.open_dataset() and is now ignored; it will be removed in \"\n \"a future version of xarray. If necessary, you can control the \"\n \"maximum number of simultaneous open files with \"\n \"xarray.set_options(file_cache_maxsize=...).\",\n FutureWarning,\n stacklevel=2,\n )\n\n if mask_and_scale is None:\n mask_and_scale = not engine == \"pseudonetcdf\"\n\n if not decode_cf:\n mask_and_scale = False\n decode_times = False\n concat_characters = False\n decode_coords = False\n\n if cache is None:\n cache = chunks is None\n\n if backend_kwargs is None:\n backend_kwargs = {}\n\n def maybe_decode_store(store, lock=False):\n ds = conventions.decode_cf(\n store,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n concat_characters=concat_characters,\n decode_coords=decode_coords,\n drop_variables=drop_variables,\n use_cftime=use_cftime,\n )\n\n _protect_dataset_variables_inplace(ds, cache)\n\n if chunks is not None:\n from dask.base import tokenize\n\n # if passed an actual file path, augment the token with\n # the file modification time\n if isinstance(filename_or_obj, str) and not is_remote_uri(filename_or_obj):\n mtime = os.path.getmtime(filename_or_obj)\n else:\n mtime = None\n token = tokenize(\n filename_or_obj,\n mtime,\n group,\n decode_cf,\n mask_and_scale,\n decode_times,\n concat_characters,\n decode_coords,\n engine,\n chunks,\n drop_variables,\n use_cftime,\n )\n name_prefix = \"open_dataset-%s\" % token\n ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token)\n ds2._file_obj = ds._file_obj\n else:\n ds2 = ds\n\n return ds2\n\n if isinstance(filename_or_obj, Path):\n filename_or_obj = str(filename_or_obj)\n\n if isinstance(filename_or_obj, AbstractDataStore):\n store = filename_or_obj\n\n elif isinstance(filename_or_obj, str):\n filename_or_obj = _normalize_path(filename_or_obj)\n\n if engine is None:\n engine = _get_default_engine(filename_or_obj, allow_remote=True)\n if engine == \"netcdf4\":\n store = backends.NetCDF4DataStore.open(\n filename_or_obj, group=group, lock=lock, **backend_kwargs\n )\n elif engine == \"scipy\":\n store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)\n elif engine == \"pydap\":\n store = backends.PydapDataStore.open(filename_or_obj, **backend_kwargs)\n elif engine == \"h5netcdf\":\n store = backends.H5NetCDFStore(\n filename_or_obj, group=group, lock=lock, **backend_kwargs\n )\n elif engine == \"pynio\":\n store = backends.NioDataStore(filename_or_obj, lock=lock, **backend_kwargs)\n elif engine == \"pseudonetcdf\":\n store = backends.PseudoNetCDFDataStore.open(\n filename_or_obj, lock=lock, **backend_kwargs\n )\n elif engine == \"cfgrib\":\n store = backends.CfGribDataStore(\n filename_or_obj, lock=lock, **backend_kwargs\n )\n\n else:\n if engine not in [None, \"scipy\", \"h5netcdf\"]:\n raise ValueError(\n \"can only read bytes or file-like objects \"\n \"with engine='scipy' or 'h5netcdf'\"\n )\n engine = _get_engine_from_magic_number(filename_or_obj)\n if engine == \"scipy\":\n store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)\n elif engine == \"h5netcdf\":\n store = backends.H5NetCDFStore(\n filename_or_obj, group=group, lock=lock, **backend_kwargs\n )\n\n with close_on_error(store):\n ds = maybe_decode_store(store)\n\n # Ensure source filename always stored in dataset object (GH issue #2550)\n if \"source\" not in ds.encoding:\n if isinstance(filename_or_obj, str):\n ds.encoding[\"source\"] = filename_or_obj\n\n return ds\n\n\ndef open_dataarray(\n filename_or_obj,\n group=None,\n decode_cf=True,\n mask_and_scale=None,\n decode_times=True,\n autoclose=None,\n concat_characters=True,\n decode_coords=True,\n engine=None,\n chunks=None,\n lock=None,\n cache=None,\n drop_variables=None,\n backend_kwargs=None,\n use_cftime=None,\n):\n \"\"\"Open an DataArray from a file or file-like object containing a single\n data variable.\n\n This is designed to read netCDF files with only one data variable. If\n multiple variables are present then a ValueError is raised.\n\n Parameters\n ----------\n filename_or_obj : str, Path, file or xarray.backends.*DataStore\n Strings and Paths are interpreted as a path to a netCDF file or an\n OpenDAP URL and opened with python-netCDF4, unless the filename ends\n with .gz, in which case the file is gunzipped and opened with\n scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like\n objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n netCDF4 files).\n decode_cf : bool, optional\n Whether to decode these variables, assuming they were saved according\n to CF conventions.\n mask_and_scale : bool, optional\n If True, replace array values equal to `_FillValue` with NA and scale\n values according to the formula `original_values * scale_factor +\n add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are\n taken from variable attributes (if they exist). If the `_FillValue` or\n `missing_value` attribute contains multiple values a warning will be\n issued and all array values matching one of the multiple values will\n be replaced by NA. mask_and_scale defaults to True except for the\n pseudonetcdf backend.\n decode_times : bool, optional\n If True, decode times encoded in the standard NetCDF datetime format\n into datetime objects. Otherwise, leave them encoded as numbers.\n concat_characters : bool, optional\n If True, concatenate along the last dimension of character arrays to\n form string arrays. Dimensions will only be concatenated over (and\n removed) if they have no corresponding variable and if they are only\n used as the last dimension of character arrays.\n decode_coords : bool, optional\n If True, decode the 'coordinates' attribute to identify coordinates in\n the resulting dataset.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \\\n optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n chunks : int or dict, optional\n If chunks is provided, it used to load the new dataset into dask\n arrays.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n cache : bool, optional\n If True, cache data loaded from the underlying datastore in memory as\n NumPy arrays when accessed to avoid reading from the underlying data-\n store multiple times. Defaults to True unless you specify the `chunks`\n argument to use dask, in which case it defaults to False. Does not\n change the behavior of coordinates corresponding to dimensions, which\n always load their data from disk into a ``pandas.Index``.\n drop_variables: string or iterable, optional\n A variable or list of variables to exclude from being parsed from the\n dataset. This may be useful to drop variables with problems or\n inconsistent values.\n backend_kwargs: dictionary, optional\n A dictionary of keyword arguments to pass on to the backend. This\n may be useful when backend options would improve performance or\n allow user control of dataset processing.\n use_cftime: bool, optional\n Only relevant if encoded dates come from a standard calendar\n (e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not\n specified). If None (default), attempt to decode times to\n ``np.datetime64[ns]`` objects; if this is not possible, decode times to\n ``cftime.datetime`` objects. If True, always decode times to\n ``cftime.datetime`` objects, regardless of whether or not they can be\n represented using ``np.datetime64[ns]`` objects. If False, always\n decode times to ``np.datetime64[ns]`` objects; if this is not possible\n raise an error.\n\n Notes\n -----\n This is designed to be fully compatible with `DataArray.to_netcdf`. Saving\n using `DataArray.to_netcdf` and then loading with this function will\n produce an identical result.\n\n All parameters are passed directly to `xarray.open_dataset`. See that\n documentation for further details.\n\n See also\n --------\n open_dataset\n \"\"\"\n\n dataset = open_dataset(\n filename_or_obj,\n group=group,\n decode_cf=decode_cf,\n mask_and_scale=mask_and_scale,\n decode_times=decode_times,\n autoclose=autoclose,\n concat_characters=concat_characters,\n decode_coords=decode_coords,\n engine=engine,\n chunks=chunks,\n lock=lock,\n cache=cache,\n drop_variables=drop_variables,\n backend_kwargs=backend_kwargs,\n use_cftime=use_cftime,\n )\n\n if len(dataset.data_vars) != 1:\n raise ValueError(\n \"Given file dataset contains more than one data \"\n \"variable. Please read with xarray.open_dataset and \"\n \"then select the variable you want.\"\n )\n else:\n data_array, = dataset.data_vars.values()\n\n data_array._file_obj = dataset._file_obj\n\n # Reset names if they were changed during saving\n # to ensure that we can 'roundtrip' perfectly\n if DATAARRAY_NAME in dataset.attrs:\n data_array.name = dataset.attrs[DATAARRAY_NAME]\n del dataset.attrs[DATAARRAY_NAME]\n\n if data_array.name == DATAARRAY_VARIABLE:\n data_array.name = None\n\n return data_array\n\n\nclass _MultiFileCloser:\n __slots__ = (\"file_objs\",)\n\n def __init__(self, file_objs):\n self.file_objs = file_objs\n\n def close(self):\n for f in self.file_objs:\n f.close()\n\n\ndef open_mfdataset(\n paths,\n chunks=None,\n concat_dim=\"_not_supplied\",\n compat=\"no_conflicts\",\n preprocess=None,\n engine=None,\n lock=None,\n data_vars=\"all\",\n coords=\"different\",\n combine=\"_old_auto\",\n autoclose=None,\n parallel=False,\n join=\"outer\",\n **kwargs\n):\n \"\"\"Open multiple files as a single dataset.\n\n If combine='by_coords' then the function ``combine_by_coords`` is used to combine\n the datasets into one before returning the result, and if combine='nested' then\n ``combine_nested`` is used. The filepaths must be structured according to which\n combining function is used, the details of which are given in the documentation for\n ``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated)\n ``auto_combine`` will be used, please specify either ``combine='by_coords'`` or\n ``combine='nested'`` in future. Requires dask to be installed. See documentation for\n details on dask [1]. Attributes from the first dataset file are used for the\n combined dataset.\n\n Parameters\n ----------\n paths : str or sequence\n Either a string glob in the form \"path/to/my/files/*.nc\" or an explicit list of\n files to open. Paths can be given as strings or as pathlib Paths. If\n concatenation along more than one dimension is desired, then ``paths`` must be a\n nested list-of-lists (see ``manual_combine`` for details). (A string glob will\n be expanded to a 1-dimensional list.)\n chunks : int or dict, optional\n Dictionary with keys given by dimension names and values given by chunk sizes.\n In general, these should divide the dimensions of each dataset. If int, chunk\n each dimension by ``chunks``. By default, chunks will be chosen to load entire\n input files into memory at once. This has a major impact on performance: please\n see the full documentation for more details [2].\n concat_dim : str, or list of str, DataArray, Index or None, optional\n Dimensions to concatenate files along. You only need to provide this argument\n if any of the dimensions along which you want to concatenate is not a dimension\n in the original datasets, e.g., if you want to stack a collection of 2D arrays\n along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to\n disable concatenation along a particular dimension.\n combine : {'by_coords', 'nested'}, optional\n Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to\n combine all the data. If this argument is not provided, `xarray.auto_combine` is\n used, but in the future this behavior will switch to use\n `xarray.combine_by_coords` by default.\n compat : {'identical', 'equals', 'broadcast_equals',\n 'no_conflicts', 'override'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts when merging:\n * 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n * 'equals': all values and dimensions must be the same.\n * 'identical': all values, dimensions and attributes must be the\n same.\n * 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n * 'override': skip comparing and pick variable from first dataset\n preprocess : callable, optional\n If provided, call this function on each dataset prior to concatenation.\n You can find the file-name from which each dataset was loaded in\n ``ds.encoding['source']``.\n engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \\\n optional\n Engine to use when reading files. If not provided, the default engine\n is chosen based on available dependencies, with a preference for\n 'netcdf4'.\n lock : False or duck threading.Lock, optional\n Resource lock to use when reading data from disk. Only relevant when\n using dask or another form of parallelism. By default, appropriate\n locks are chosen to safely read and write files with the currently\n active dask scheduler.\n data_vars : {'minimal', 'different', 'all' or list of str}, optional\n These data variables will be concatenated together:\n * 'minimal': Only data variables in which the dimension already\n appears are included.\n * 'different': Data variables which are not equal (ignoring\n attributes) across all datasets are also concatenated (as well as\n all for which dimension already appears). Beware: this option may\n load the data payload of data variables into memory if they are not\n already loaded.\n * 'all': All data variables will be concatenated.\n * list of str: The listed data variables will be concatenated, in\n addition to the 'minimal' data variables.\n coords : {'minimal', 'different', 'all' or list of str}, optional\n These coordinate variables will be concatenated together:\n * 'minimal': Only coordinates in which the dimension already appears\n are included.\n * 'different': Coordinates which are not equal (ignoring attributes)\n across all datasets are also concatenated (as well as all for which\n dimension already appears). Beware: this option may load the data\n payload of coordinate variables into memory if they are not already\n loaded.\n * 'all': All coordinate variables will be concatenated, except\n those corresponding to other dimensions.\n * list of str: The listed coordinate variables will be concatenated,\n in addition the 'minimal' coordinates.\n parallel : bool, optional\n If True, the open and preprocess steps of this function will be\n performed in parallel using ``dask.delayed``. Default is False.\n join : {'outer', 'inner', 'left', 'right', 'exact, 'override'}, optional\n String indicating how to combine differing indexes\n (excluding concat_dim) in objects\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': instead of aligning, raise `ValueError` when indexes to be\n aligned are not equal\n - 'override': if indexes are of same size, rewrite indexes to be\n those of the first object with that dimension. Indexes for the same\n dimension must have the same size in all objects.\n **kwargs : optional\n Additional arguments passed on to :py:func:`xarray.open_dataset`.\n\n Returns\n -------\n xarray.Dataset\n\n Notes\n -----\n ``open_mfdataset`` opens files with read-only access. When you modify values\n of a Dataset, even one linked to files on disk, only the in-memory copy you\n are manipulating in xarray is modified: the original file on disk is never\n touched.\n\n See Also\n --------\n combine_by_coords\n combine_nested\n auto_combine\n open_dataset\n\n References\n ----------\n\n .. [1] http://xarray.pydata.org/en/stable/dask.html\n .. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance\n \"\"\"\n if isinstance(paths, str):\n if is_remote_uri(paths):\n raise ValueError(\n \"cannot do wild-card matching for paths that are remote URLs: \"\n \"{!r}. Instead, supply paths as an explicit list of strings.\".format(\n paths\n )\n )\n paths = sorted(glob(paths))\n else:\n paths = [str(p) if isinstance(p, Path) else p for p in paths]\n\n if not paths:\n raise OSError(\"no files to open\")\n\n # If combine='by_coords' then this is unnecessary, but quick.\n # If combine='nested' then this creates a flat list which is easier to\n # iterate over, while saving the originally-supplied structure as \"ids\"\n if combine == \"nested\":\n if str(concat_dim) == \"_not_supplied\":\n raise ValueError(\"Must supply concat_dim when using \" \"combine='nested'\")\n else:\n if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:\n concat_dim = [concat_dim]\n combined_ids_paths = _infer_concat_order_from_positions(paths)\n ids, paths = (list(combined_ids_paths.keys()), list(combined_ids_paths.values()))\n\n open_kwargs = dict(\n engine=engine, chunks=chunks or {}, lock=lock, autoclose=autoclose, **kwargs\n )\n\n if parallel:\n import dask\n\n # wrap the open_dataset, getattr, and preprocess with delayed\n open_ = dask.delayed(open_dataset)\n getattr_ = dask.delayed(getattr)\n if preprocess is not None:\n preprocess = dask.delayed(preprocess)\n else:\n open_ = open_dataset\n getattr_ = getattr\n\n datasets = [open_(p, **open_kwargs) for p in paths]\n file_objs = [getattr_(ds, \"_file_obj\") for ds in datasets]\n if preprocess is not None:\n datasets = [preprocess(ds) for ds in datasets]\n\n if parallel:\n # calling compute here will return the datasets/file_objs lists,\n # the underlying datasets will still be stored as dask arrays\n datasets, file_objs = dask.compute(datasets, file_objs)\n\n # Combine all datasets, closing them in case of a ValueError\n try:\n if combine == \"_old_auto\":\n # Use the old auto_combine for now\n # Remove this after deprecation cycle from #2616 is complete\n basic_msg = dedent(\n \"\"\"\\\n In xarray version 0.15 the default behaviour of `open_mfdataset`\n will change. To retain the existing behavior, pass\n combine='nested'. To use future default behavior, pass\n combine='by_coords'. See\n http://xarray.pydata.org/en/stable/combining.html#combining-multi\n \"\"\"\n )\n warnings.warn(basic_msg, FutureWarning, stacklevel=2)\n\n combined = auto_combine(\n datasets,\n concat_dim=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n join=join,\n from_openmfds=True,\n )\n elif combine == \"nested\":\n # Combined nested list by successive concat and merge operations\n # along each dimension, using structure given by \"ids\"\n combined = _nested_combine(\n datasets,\n concat_dims=concat_dim,\n compat=compat,\n data_vars=data_vars,\n coords=coords,\n ids=ids,\n join=join,\n )\n elif combine == \"by_coords\":\n # Redo ordering from coordinates, ignoring how they were ordered\n # previously\n combined = combine_by_coords(\n datasets, compat=compat, data_vars=data_vars, coords=coords, join=join\n )\n else:\n raise ValueError(\n \"{} is an invalid option for the keyword argument\"\n \" ``combine``\".format(combine)\n )\n except ValueError:\n for ds in datasets:\n ds.close()\n raise\n\n combined._file_obj = _MultiFileCloser(file_objs)\n combined.attrs = datasets[0].attrs\n return combined\n\n\nWRITEABLE_STORES: Dict[str, Callable] = {\n \"netcdf4\": backends.NetCDF4DataStore.open,\n \"scipy\": backends.ScipyDataStore,\n \"h5netcdf\": backends.H5NetCDFStore,\n}\n\n\ndef to_netcdf(\n dataset: Dataset,\n path_or_file=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n multifile: bool = False,\n invalid_netcdf: bool = False,\n) -> Union[Tuple[ArrayWriter, AbstractDataStore], bytes, \"Delayed\", None]:\n \"\"\"This function creates an appropriate datastore for writing a dataset to\n disk as a netCDF file\n\n See `Dataset.to_netcdf` for full API docs.\n\n The ``multifile`` argument is only for the private use of save_mfdataset.\n \"\"\"\n if isinstance(path_or_file, Path):\n path_or_file = str(path_or_file)\n\n if encoding is None:\n encoding = {}\n\n if path_or_file is None:\n if engine is None:\n engine = \"scipy\"\n elif engine != \"scipy\":\n raise ValueError(\n \"invalid engine for creating bytes with \"\n \"to_netcdf: %r. Only the default engine \"\n \"or engine='scipy' is supported\" % engine\n )\n if not compute:\n raise NotImplementedError(\n \"to_netcdf() with compute=False is not yet implemented when \"\n \"returning bytes\"\n )\n elif isinstance(path_or_file, str):\n if engine is None:\n engine = _get_default_engine(path_or_file)\n path_or_file = _normalize_path(path_or_file)\n else: # file-like object\n engine = \"scipy\"\n\n # validate Dataset keys, DataArray names, and attr keys/values\n _validate_dataset_names(dataset)\n _validate_attrs(dataset)\n\n try:\n store_open = WRITEABLE_STORES[engine]\n except KeyError:\n raise ValueError(\"unrecognized engine for to_netcdf: %r\" % engine)\n\n if format is not None:\n format = format.upper()\n\n # handle scheduler specific logic\n scheduler = _get_scheduler()\n have_chunks = any(v.chunks for v in dataset.variables.values())\n\n autoclose = have_chunks and scheduler in [\"distributed\", \"multiprocessing\"]\n if autoclose and engine == \"scipy\":\n raise NotImplementedError(\n \"Writing netCDF files with the %s backend \"\n \"is not currently supported with dask's %s \"\n \"scheduler\" % (engine, scheduler)\n )\n\n target = path_or_file if path_or_file is not None else BytesIO()\n kwargs = dict(autoclose=True) if autoclose else {}\n if invalid_netcdf:\n if engine == \"h5netcdf\":\n kwargs[\"invalid_netcdf\"] = invalid_netcdf\n else:\n raise ValueError(\n \"unrecognized option 'invalid_netcdf' for engine %s\" % engine\n )\n store = store_open(target, mode, format, group, **kwargs)\n\n if unlimited_dims is None:\n unlimited_dims = dataset.encoding.get(\"unlimited_dims\", None)\n if unlimited_dims is not None:\n if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable):\n unlimited_dims = [unlimited_dims]\n else:\n unlimited_dims = list(unlimited_dims)\n\n writer = ArrayWriter()\n\n # TODO: figure out how to refactor this logic (here and in save_mfdataset)\n # to avoid this mess of conditionals\n try:\n # TODO: allow this work (setting up the file for writing array data)\n # to be parallelized with dask\n dump_to_store(\n dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims\n )\n if autoclose:\n store.close()\n\n if multifile:\n return writer, store\n\n writes = writer.sync(compute=compute)\n\n if path_or_file is None:\n store.sync()\n return target.getvalue()\n finally:\n if not multifile and compute:\n store.close()\n\n if not compute:\n import dask\n\n return dask.delayed(_finalize_store)(writes, store)\n return None\n\n\ndef dump_to_store(\n dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None\n):\n \"\"\"Store dataset contents to a backends.*DataStore object.\"\"\"\n if writer is None:\n writer = ArrayWriter()\n\n if encoding is None:\n encoding = {}\n\n variables, attrs = conventions.encode_dataset_coordinates(dataset)\n\n check_encoding = set()\n for k, enc in encoding.items():\n # no need to shallow copy the variable again; that already happened\n # in encode_dataset_coordinates\n variables[k].encoding = enc\n check_encoding.add(k)\n\n if encoder:\n variables, attrs = encoder(variables, attrs)\n\n store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims)\n\n\ndef save_mfdataset(\n datasets, paths, mode=\"w\", format=None, groups=None, engine=None, compute=True\n):\n \"\"\"Write multiple datasets to disk as netCDF files simultaneously.\n\n This function is intended for use with datasets consisting of dask.array\n objects, in which case it can write the multiple datasets to disk\n simultaneously using a shared thread pool.\n\n When not using dask, it is no different than calling ``to_netcdf``\n repeatedly.\n\n Parameters\n ----------\n datasets : list of xarray.Dataset\n List of datasets to save.\n paths : list of str or list of Paths\n List of paths to which to save each corresponding dataset.\n mode : {'w', 'a'}, optional\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n these locations will be overwritten.\n format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',\n 'NETCDF3_CLASSIC'}, optional\n\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n groups : list of str, optional\n Paths to the netCDF4 group in each corresponding file to which to save\n datasets (only works for format='NETCDF4'). The groups will be created\n if necessary.\n engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n See `Dataset.to_netcdf` for additional information.\n compute: boolean\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n\n Examples\n --------\n\n Save a dataset into one netCDF per year of data:\n\n >>> years, datasets = zip(*ds.groupby('time.year'))\n >>> paths = ['%s.nc' % y for y in years]\n >>> xr.save_mfdataset(datasets, paths)\n \"\"\"\n if mode == \"w\" and len(set(paths)) < len(paths):\n raise ValueError(\n \"cannot use mode='w' when writing multiple \" \"datasets to the same path\"\n )\n\n for obj in datasets:\n if not isinstance(obj, Dataset):\n raise TypeError(\n \"save_mfdataset only supports writing Dataset \"\n \"objects, received type %s\" % type(obj)\n )\n\n if groups is None:\n groups = [None] * len(datasets)\n\n if len({len(datasets), len(paths), len(groups)}) > 1:\n raise ValueError(\n \"must supply lists of the same length for the \"\n \"datasets, paths and groups arguments to \"\n \"save_mfdataset\"\n )\n\n writers, stores = zip(\n *[\n to_netcdf(\n ds, path, mode, format, group, engine, compute=compute, multifile=True\n )\n for ds, path, group in zip(datasets, paths, groups)\n ]\n )\n\n try:\n writes = [w.sync(compute=compute) for w in writers]\n finally:\n if compute:\n for store in stores:\n store.close()\n\n if not compute:\n import dask\n\n return dask.delayed(\n [dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)]\n )\n\n\ndef _validate_datatypes_for_zarr_append(dataset):\n \"\"\"DataArray.name and Dataset keys must be a string or None\"\"\"\n\n def check_dtype(var):\n if (\n not np.issubdtype(var.dtype, np.number)\n and not coding.strings.is_unicode_dtype(var.dtype)\n and not var.dtype == object\n ):\n # and not re.match('^bytes[1-9]+$', var.dtype.name)):\n raise ValueError(\n \"Invalid dtype for data variable: {} \"\n \"dtype must be a subtype of number, \"\n \"a fixed sized string, a fixed size \"\n \"unicode string or an object\".format(var)\n )\n\n for k in dataset.data_vars.values():\n check_dtype(k)\n\n\ndef _validate_append_dim_and_encoding(\n ds_to_append, store, append_dim, encoding, **open_kwargs\n):\n try:\n ds = backends.zarr.open_zarr(store, **open_kwargs)\n except ValueError: # store empty\n return\n if append_dim:\n if append_dim not in ds.dims:\n raise ValueError(\n \"{} not a valid dimension in the Dataset\".format(append_dim)\n )\n for data_var in ds_to_append:\n if data_var in ds:\n if append_dim is None:\n raise ValueError(\n \"variable '{}' already exists, but append_dim \"\n \"was not set\".format(data_var)\n )\n if data_var in encoding.keys():\n raise ValueError(\n \"variable '{}' already exists, but encoding was\"\n \"provided\".format(data_var)\n )\n\n\ndef to_zarr(\n dataset,\n store=None,\n mode=None,\n synchronizer=None,\n group=None,\n encoding=None,\n compute=True,\n consolidated=False,\n append_dim=None,\n):\n \"\"\"This function creates an appropriate datastore for writing a dataset to\n a zarr ztore\n\n See `Dataset.to_zarr` for full API docs.\n \"\"\"\n if isinstance(store, Path):\n store = str(store)\n if encoding is None:\n encoding = {}\n\n # validate Dataset keys, DataArray names, and attr keys/values\n _validate_dataset_names(dataset)\n _validate_attrs(dataset)\n\n if mode == \"a\":\n _validate_datatypes_for_zarr_append(dataset)\n _validate_append_dim_and_encoding(\n dataset,\n store,\n append_dim,\n group=group,\n consolidated=consolidated,\n encoding=encoding,\n )\n\n zstore = backends.ZarrStore.open_group(\n store=store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n consolidate_on_close=consolidated,\n )\n zstore.append_dim = append_dim\n writer = ArrayWriter()\n # TODO: figure out how to properly handle unlimited_dims\n dump_to_store(dataset, zstore, writer, encoding=encoding)\n writes = writer.sync(compute=compute)\n\n if compute:\n _finalize_store(writes, zstore)\n else:\n import dask\n\n return dask.delayed(_finalize_store)(writes, zstore)\n\n return zstore\n"},{"fileName":"pydap_.py","filePath":"xarray/backends","id":2963,"nodeType":"File","text":"import numpy as np\n\nfrom .. import Variable\nfrom ..core import indexing\nfrom ..core.pycompat import integer_types\nfrom ..core.utils import Frozen, FrozenDict, is_dict_like\nfrom .common import AbstractDataStore, BackendArray, robust_getitem\n\n\nclass PydapArrayWrapper(BackendArray):\n def __init__(self, array):\n self.array = array\n\n @property\n def shape(self):\n return self.array.shape\n\n @property\n def dtype(self):\n return self.array.dtype\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.BASIC, self._getitem\n )\n\n def _getitem(self, key):\n # pull the data from the array attribute if possible, to avoid\n # downloading coordinate data twice\n array = getattr(self.array, \"array\", self.array)\n result = robust_getitem(array, key, catch=ValueError)\n # in some cases, pydap doesn't squeeze axes automatically like numpy\n axis = tuple(n for n, k in enumerate(key) if isinstance(k, integer_types))\n if result.ndim + len(axis) != array.ndim and len(axis) > 0:\n result = np.squeeze(result, axis)\n\n return result\n\n\ndef _fix_attributes(attributes):\n attributes = dict(attributes)\n for k in list(attributes):\n if k.lower() == \"global\" or k.lower().endswith(\"_global\"):\n # move global attributes to the top level, like the netcdf-C\n # DAP client\n attributes.update(attributes.pop(k))\n elif is_dict_like(attributes[k]):\n # Make Hierarchical attributes to a single level with a\n # dot-separated key\n attributes.update(\n {\n \"{}.{}\".format(k, k_child): v_child\n for k_child, v_child in attributes.pop(k).items()\n }\n )\n return attributes\n\n\nclass PydapDataStore(AbstractDataStore):\n \"\"\"Store for accessing OpenDAP datasets with pydap.\n\n This store provides an alternative way to access OpenDAP datasets that may\n be useful if the netCDF4 library is not available.\n \"\"\"\n\n def __init__(self, ds):\n \"\"\"\n Parameters\n ----------\n ds : pydap DatasetType\n \"\"\"\n self.ds = ds\n\n @classmethod\n def open(cls, url, session=None):\n import pydap.client\n\n ds = pydap.client.open_url(url, session=session)\n return cls(ds)\n\n def open_store_variable(self, var):\n data = indexing.LazilyOuterIndexedArray(PydapArrayWrapper(var))\n return Variable(var.dimensions, data, _fix_attributes(var.attributes))\n\n def get_variables(self):\n return FrozenDict(\n (k, self.open_store_variable(self.ds[k])) for k in self.ds.keys()\n )\n\n def get_attrs(self):\n return Frozen(_fix_attributes(self.ds.attributes))\n\n def get_dimensions(self):\n return Frozen(self.ds.dimensions)\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":53,"id":2964,"name":"_yi","nodeType":"Attribute","startLoc":53,"text":"self._yi"},{"col":4,"comment":"null","endLoc":469,"header":"@functools.wraps(hist)\n def hist(self, ax=None, **kwargs)","id":2965,"name":"hist","nodeType":"Function","startLoc":467,"text":"@functools.wraps(hist)\n def hist(self, ax=None, **kwargs):\n return hist(self._da, ax=ax, **kwargs)"},{"className":"PydapArrayWrapper","col":0,"comment":"null","endLoc":37,"id":2966,"nodeType":"Class","startLoc":10,"text":"class PydapArrayWrapper(BackendArray):\n def __init__(self, array):\n self.array = array\n\n @property\n def shape(self):\n return self.array.shape\n\n @property\n def dtype(self):\n return self.array.dtype\n\n def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.BASIC, self._getitem\n )\n\n def _getitem(self, key):\n # pull the data from the array attribute if possible, to avoid\n # downloading coordinate data twice\n array = getattr(self.array, \"array\", self.array)\n result = robust_getitem(array, key, catch=ValueError)\n # in some cases, pydap doesn't squeeze axes automatically like numpy\n axis = tuple(n for n, k in enumerate(key) if isinstance(k, integer_types))\n if result.ndim + len(axis) != array.ndim and len(axis) > 0:\n result = np.squeeze(result, axis)\n\n return result"},{"className":"ScipyInterpolator","col":0,"comment":"Interpolate a 1-D function using Scipy interp1d\n\n See Also\n --------\n scipy.interpolate.interp1d\n ","endLoc":130,"id":2967,"nodeType":"Class","startLoc":78,"text":"class ScipyInterpolator(BaseInterpolator):\n \"\"\"Interpolate a 1-D function using Scipy interp1d\n\n See Also\n --------\n scipy.interpolate.interp1d\n \"\"\"\n\n def __init__(\n self,\n xi,\n yi,\n method=None,\n fill_value=None,\n assume_sorted=True,\n copy=False,\n bounds_error=False,\n order=None,\n **kwargs\n ):\n from scipy.interpolate import interp1d\n\n if method is None:\n raise ValueError(\n \"method is a required argument, please supply a \"\n \"valid scipy.inter1d method (kind)\"\n )\n\n if method == \"polynomial\":\n if order is None:\n raise ValueError(\"order is required when method=polynomial\")\n method = order\n\n self.method = method\n\n self.cons_kwargs = kwargs\n self.call_kwargs = {}\n\n if fill_value is None and method == \"linear\":\n fill_value = np.nan, np.nan\n elif fill_value is None:\n fill_value = np.nan\n\n self.f = interp1d(\n xi,\n yi,\n kind=self.method,\n fill_value=fill_value,\n bounds_error=False,\n assume_sorted=assume_sorted,\n copy=copy,\n **self.cons_kwargs\n )"},{"col":4,"comment":"null","endLoc":473,"header":"@functools.wraps(line)\n def line(self, *args, **kwargs)","id":2968,"name":"line","nodeType":"Function","startLoc":471,"text":"@functools.wraps(line)\n def line(self, *args, **kwargs):\n return line(self._da, *args, **kwargs)"},{"col":4,"comment":"null","endLoc":16,"header":"@property\n def shape(self)","id":2969,"name":"shape","nodeType":"Function","startLoc":14,"text":"@property\n def shape(self):\n return self.array.shape"},{"col":4,"comment":"null","endLoc":20,"header":"@property\n def dtype(self)","id":2970,"name":"dtype","nodeType":"Function","startLoc":18,"text":"@property\n def dtype(self):\n return self.array.dtype"},{"col":4,"comment":"null","endLoc":25,"header":"def __getitem__(self, key)","id":2971,"name":"__getitem__","nodeType":"Function","startLoc":22,"text":"def __getitem__(self, key):\n return indexing.explicit_indexing_adapter(\n key, self.shape, indexing.IndexingSupport.BASIC, self._getitem\n )"},{"col":4,"comment":"null","endLoc":130,"header":"def __init__(\n self,\n xi,\n yi,\n method=None,\n fill_value=None,\n assume_sorted=True,\n copy=False,\n bounds_error=False,\n order=None,\n **kwargs\n )","id":2972,"name":"__init__","nodeType":"Function","startLoc":86,"text":"def __init__(\n self,\n xi,\n yi,\n method=None,\n fill_value=None,\n assume_sorted=True,\n copy=False,\n bounds_error=False,\n order=None,\n **kwargs\n ):\n from scipy.interpolate import interp1d\n\n if method is None:\n raise ValueError(\n \"method is a required argument, please supply a \"\n \"valid scipy.inter1d method (kind)\"\n )\n\n if method == \"polynomial\":\n if order is None:\n raise ValueError(\"order is required when method=polynomial\")\n method = order\n\n self.method = method\n\n self.cons_kwargs = kwargs\n self.call_kwargs = {}\n\n if fill_value is None and method == \"linear\":\n fill_value = np.nan, np.nan\n elif fill_value is None:\n fill_value = np.nan\n\n self.f = interp1d(\n xi,\n yi,\n kind=self.method,\n fill_value=fill_value,\n bounds_error=False,\n assume_sorted=assume_sorted,\n copy=copy,\n **self.cons_kwargs\n )"},{"col":4,"comment":"null","endLoc":37,"header":"def _getitem(self, key)","id":2973,"name":"_getitem","nodeType":"Function","startLoc":27,"text":"def _getitem(self, key):\n # pull the data from the array attribute if possible, to avoid\n # downloading coordinate data twice\n array = getattr(self.array, \"array\", self.array)\n result = robust_getitem(array, key, catch=ValueError)\n # in some cases, pydap doesn't squeeze axes automatically like numpy\n axis = tuple(n for n, k in enumerate(key) if isinstance(k, integer_types))\n if result.ndim + len(axis) != array.ndim and len(axis) > 0:\n result = np.squeeze(result, axis)\n\n return result"},{"col":4,"comment":"null","endLoc":477,"header":"@functools.wraps(step)\n def step(self, *args, **kwargs)","id":2974,"name":"step","nodeType":"Function","startLoc":475,"text":"@functools.wraps(step)\n def step(self, *args, **kwargs):\n return step(self._da, *args, **kwargs)"},{"id":2975,"name":"api.rst","nodeType":"TextFile","path":"doc","text":".. currentmodule:: xarray\n\n#############\nAPI reference\n#############\n\nThis page provides an auto-generated summary of xarray's API. For more details\nand examples, refer to the relevant chapters in the main part of the\ndocumentation.\n\nSee also: :ref:`public api`\n\nTop-level functions\n===================\n\n.. autosummary::\n :toctree: generated/\n\n apply_ufunc\n align\n broadcast\n concat\n merge\n auto_combine\n combine_by_coords\n combine_nested\n where\n set_options\n full_like\n zeros_like\n ones_like\n dot\n map_blocks\n\nDataset\n=======\n\nCreating a dataset\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset\n decode_cf\n\nAttributes\n----------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.dims\n Dataset.sizes\n Dataset.data_vars\n Dataset.coords\n Dataset.attrs\n Dataset.encoding\n Dataset.indexes\n Dataset.get_index\n Dataset.chunks\n Dataset.nbytes\n\nDictionary interface\n--------------------\n\nDatasets implement the mapping interface with keys given by variable names\nand values given by ``DataArray`` objects.\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.__getitem__\n Dataset.__setitem__\n Dataset.__delitem__\n Dataset.update\n Dataset.items\n Dataset.values\n\nDataset contents\n----------------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.copy\n Dataset.assign\n Dataset.assign_coords\n Dataset.assign_attrs\n Dataset.pipe\n Dataset.merge\n Dataset.rename\n Dataset.rename_vars\n Dataset.rename_dims\n Dataset.swap_dims\n Dataset.expand_dims\n Dataset.drop\n Dataset.drop_dims\n Dataset.set_coords\n Dataset.reset_coords\n\nComparisons\n-----------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.equals\n Dataset.identical\n Dataset.broadcast_equals\n\nIndexing\n--------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.loc\n Dataset.isel\n Dataset.sel\n Dataset.head\n Dataset.tail\n Dataset.thin\n Dataset.squeeze\n Dataset.interp\n Dataset.interp_like\n Dataset.reindex\n Dataset.reindex_like\n Dataset.set_index\n Dataset.reset_index\n Dataset.reorder_levels\n\nMissing value handling\n----------------------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.isnull\n Dataset.notnull\n Dataset.combine_first\n Dataset.count\n Dataset.dropna\n Dataset.fillna\n Dataset.ffill\n Dataset.bfill\n Dataset.interpolate_na\n Dataset.where\n Dataset.isin\n\nComputation\n-----------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.apply\n Dataset.reduce\n Dataset.groupby\n Dataset.groupby_bins\n Dataset.rolling\n Dataset.rolling_exp\n Dataset.coarsen\n Dataset.resample\n Dataset.diff\n Dataset.quantile\n Dataset.differentiate\n Dataset.integrate\n\n**Aggregation**:\n:py:attr:`~Dataset.all`\n:py:attr:`~Dataset.any`\n:py:attr:`~Dataset.argmax`\n:py:attr:`~Dataset.argmin`\n:py:attr:`~Dataset.max`\n:py:attr:`~Dataset.mean`\n:py:attr:`~Dataset.median`\n:py:attr:`~Dataset.min`\n:py:attr:`~Dataset.prod`\n:py:attr:`~Dataset.sum`\n:py:attr:`~Dataset.std`\n:py:attr:`~Dataset.var`\n\n**ndarray methods**:\n:py:attr:`~Dataset.astype`\n:py:attr:`~Dataset.argsort`\n:py:attr:`~Dataset.clip`\n:py:attr:`~Dataset.conj`\n:py:attr:`~Dataset.conjugate`\n:py:attr:`~Dataset.imag`\n:py:attr:`~Dataset.round`\n:py:attr:`~Dataset.real`\n:py:attr:`~Dataset.cumsum`\n:py:attr:`~Dataset.cumprod`\n:py:attr:`~Dataset.rank`\n\n**Grouped operations**:\n:py:attr:`~core.groupby.DatasetGroupBy.assign`\n:py:attr:`~core.groupby.DatasetGroupBy.assign_coords`\n:py:attr:`~core.groupby.DatasetGroupBy.first`\n:py:attr:`~core.groupby.DatasetGroupBy.last`\n:py:attr:`~core.groupby.DatasetGroupBy.fillna`\n:py:attr:`~core.groupby.DatasetGroupBy.where`\n:py:attr:`~core.groupby.DatasetGroupBy.quantile`\n\nReshaping and reorganizing\n--------------------------\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.transpose\n Dataset.stack\n Dataset.unstack\n Dataset.to_stacked_array\n Dataset.shift\n Dataset.roll\n Dataset.sortby\n Dataset.broadcast_like\n\nDataArray\n=========\n\n.. autosummary::\n :toctree: generated/\n\n DataArray\n\nAttributes\n----------\n\n.. autosummary::\n :toctree: generated/\n\n DataArray.values\n DataArray.data\n DataArray.coords\n DataArray.dims\n DataArray.sizes\n DataArray.name\n DataArray.attrs\n DataArray.encoding\n DataArray.indexes\n DataArray.get_index\n\n**ndarray attributes**:\n:py:attr:`~DataArray.ndim`\n:py:attr:`~DataArray.shape`\n:py:attr:`~DataArray.size`\n:py:attr:`~DataArray.dtype`\n:py:attr:`~DataArray.nbytes`\n:py:attr:`~DataArray.chunks`\n\nDataArray contents\n------------------\n\n.. autosummary::\n :toctree: generated/\n\n DataArray.assign_coords\n DataArray.assign_attrs\n DataArray.pipe\n DataArray.rename\n DataArray.swap_dims\n DataArray.expand_dims\n DataArray.drop\n DataArray.reset_coords\n DataArray.copy\n\n**ndarray methods**:\n:py:attr:`~DataArray.astype`\n:py:attr:`~DataArray.item`\n\n\nIndexing\n--------\n\n.. autosummary::\n :toctree: generated/\n\n DataArray.__getitem__\n DataArray.__setitem__\n DataArray.loc\n DataArray.isel\n DataArray.sel\n DataArray.head\n DataArray.tail\n DataArray.thin\n DataArray.squeeze\n DataArray.interp\n DataArray.interp_like\n DataArray.reindex\n DataArray.reindex_like\n DataArray.set_index\n DataArray.reset_index\n DataArray.reorder_levels\n\nMissing value handling\n----------------------\n\n.. autosummary::\n :toctree: generated/\n\n DataArray.isnull\n DataArray.notnull\n DataArray.combine_first\n DataArray.count\n DataArray.dropna\n DataArray.fillna\n DataArray.ffill\n DataArray.bfill\n DataArray.interpolate_na\n DataArray.where\n DataArray.isin\n\nComparisons\n-----------\n\n.. autosummary::\n :toctree: generated/\n\n DataArray.equals\n DataArray.identical\n DataArray.broadcast_equals\n\nComputation\n-----------\n\n.. autosummary::\n :toctree: generated/\n\n DataArray.reduce\n DataArray.groupby\n DataArray.groupby_bins\n DataArray.rolling\n DataArray.rolling_exp\n DataArray.coarsen\n DataArray.dt\n DataArray.resample\n DataArray.get_axis_num\n DataArray.diff\n DataArray.dot\n DataArray.quantile\n DataArray.differentiate\n DataArray.integrate\n DataArray.str\n\n**Aggregation**:\n:py:attr:`~DataArray.all`\n:py:attr:`~DataArray.any`\n:py:attr:`~DataArray.argmax`\n:py:attr:`~DataArray.argmin`\n:py:attr:`~DataArray.max`\n:py:attr:`~DataArray.mean`\n:py:attr:`~DataArray.median`\n:py:attr:`~DataArray.min`\n:py:attr:`~DataArray.prod`\n:py:attr:`~DataArray.sum`\n:py:attr:`~DataArray.std`\n:py:attr:`~DataArray.var`\n\n**ndarray methods**:\n:py:attr:`~DataArray.argsort`\n:py:attr:`~DataArray.clip`\n:py:attr:`~DataArray.conj`\n:py:attr:`~DataArray.conjugate`\n:py:attr:`~DataArray.imag`\n:py:attr:`~DataArray.searchsorted`\n:py:attr:`~DataArray.round`\n:py:attr:`~DataArray.real`\n:py:attr:`~DataArray.T`\n:py:attr:`~DataArray.cumsum`\n:py:attr:`~DataArray.cumprod`\n:py:attr:`~DataArray.rank`\n\n**Grouped operations**:\n:py:attr:`~core.groupby.DataArrayGroupBy.assign_coords`\n:py:attr:`~core.groupby.DataArrayGroupBy.first`\n:py:attr:`~core.groupby.DataArrayGroupBy.last`\n:py:attr:`~core.groupby.DataArrayGroupBy.fillna`\n:py:attr:`~core.groupby.DataArrayGroupBy.where`\n:py:attr:`~core.groupby.DataArrayGroupBy.quantile`\n\nReshaping and reorganizing\n--------------------------\n\n.. autosummary::\n :toctree: generated/\n\n DataArray.transpose\n DataArray.stack\n DataArray.unstack\n DataArray.to_unstacked_dataset\n DataArray.shift\n DataArray.roll\n DataArray.sortby\n DataArray.broadcast_like\n\n.. _api.ufuncs:\n\nUniversal functions\n===================\n\n.. warning::\n\n With recent versions of numpy, dask and xarray, NumPy ufuncs are now\n supported directly on all xarray and dask objects. This obviates the need\n for the ``xarray.ufuncs`` module, which should not be used for new code\n unless compatibility with versions of NumPy prior to v1.13 is required.\n\nThis functions are copied from NumPy, but extended to work on NumPy arrays,\ndask arrays and all xarray objects. You can find them in the ``xarray.ufuncs``\nmodule:\n\n:py:attr:`~ufuncs.angle`\n:py:attr:`~ufuncs.arccos`\n:py:attr:`~ufuncs.arccosh`\n:py:attr:`~ufuncs.arcsin`\n:py:attr:`~ufuncs.arcsinh`\n:py:attr:`~ufuncs.arctan`\n:py:attr:`~ufuncs.arctan2`\n:py:attr:`~ufuncs.arctanh`\n:py:attr:`~ufuncs.ceil`\n:py:attr:`~ufuncs.conj`\n:py:attr:`~ufuncs.copysign`\n:py:attr:`~ufuncs.cos`\n:py:attr:`~ufuncs.cosh`\n:py:attr:`~ufuncs.deg2rad`\n:py:attr:`~ufuncs.degrees`\n:py:attr:`~ufuncs.exp`\n:py:attr:`~ufuncs.expm1`\n:py:attr:`~ufuncs.fabs`\n:py:attr:`~ufuncs.fix`\n:py:attr:`~ufuncs.floor`\n:py:attr:`~ufuncs.fmax`\n:py:attr:`~ufuncs.fmin`\n:py:attr:`~ufuncs.fmod`\n:py:attr:`~ufuncs.fmod`\n:py:attr:`~ufuncs.frexp`\n:py:attr:`~ufuncs.hypot`\n:py:attr:`~ufuncs.imag`\n:py:attr:`~ufuncs.iscomplex`\n:py:attr:`~ufuncs.isfinite`\n:py:attr:`~ufuncs.isinf`\n:py:attr:`~ufuncs.isnan`\n:py:attr:`~ufuncs.isreal`\n:py:attr:`~ufuncs.ldexp`\n:py:attr:`~ufuncs.log`\n:py:attr:`~ufuncs.log10`\n:py:attr:`~ufuncs.log1p`\n:py:attr:`~ufuncs.log2`\n:py:attr:`~ufuncs.logaddexp`\n:py:attr:`~ufuncs.logaddexp2`\n:py:attr:`~ufuncs.logical_and`\n:py:attr:`~ufuncs.logical_not`\n:py:attr:`~ufuncs.logical_or`\n:py:attr:`~ufuncs.logical_xor`\n:py:attr:`~ufuncs.maximum`\n:py:attr:`~ufuncs.minimum`\n:py:attr:`~ufuncs.nextafter`\n:py:attr:`~ufuncs.rad2deg`\n:py:attr:`~ufuncs.radians`\n:py:attr:`~ufuncs.real`\n:py:attr:`~ufuncs.rint`\n:py:attr:`~ufuncs.sign`\n:py:attr:`~ufuncs.signbit`\n:py:attr:`~ufuncs.sin`\n:py:attr:`~ufuncs.sinh`\n:py:attr:`~ufuncs.sqrt`\n:py:attr:`~ufuncs.square`\n:py:attr:`~ufuncs.tan`\n:py:attr:`~ufuncs.tanh`\n:py:attr:`~ufuncs.trunc`\n\nIO / Conversion\n===============\n\nDataset methods\n---------------\n\n.. autosummary::\n :toctree: generated/\n\n open_dataset\n load_dataset\n open_mfdataset\n open_rasterio\n open_zarr\n Dataset.to_netcdf\n Dataset.to_zarr\n save_mfdataset\n Dataset.to_array\n Dataset.to_dataframe\n Dataset.to_dask_dataframe\n Dataset.to_dict\n Dataset.from_dataframe\n Dataset.from_dict\n Dataset.close\n Dataset.compute\n Dataset.persist\n Dataset.load\n Dataset.chunk\n Dataset.unify_chunks\n Dataset.map_blocks\n Dataset.filter_by_attrs\n Dataset.info\n\nDataArray methods\n-----------------\n\n.. autosummary::\n :toctree: generated/\n\n open_dataarray\n load_dataarray\n DataArray.to_dataset\n DataArray.to_netcdf\n DataArray.to_pandas\n DataArray.to_series\n DataArray.to_dataframe\n DataArray.to_index\n DataArray.to_masked_array\n DataArray.to_cdms2\n DataArray.to_iris\n DataArray.from_iris\n DataArray.to_dict\n DataArray.from_series\n DataArray.from_cdms2\n DataArray.from_dict\n DataArray.close\n DataArray.compute\n DataArray.persist\n DataArray.load\n DataArray.chunk\n DataArray.unify_chunks\n DataArray.map_blocks\n\nGroupBy objects\n===============\n\n.. autosummary::\n :toctree: generated/\n\n core.groupby.DataArrayGroupBy\n core.groupby.DataArrayGroupBy.apply\n core.groupby.DataArrayGroupBy.reduce\n core.groupby.DatasetGroupBy\n core.groupby.DatasetGroupBy.apply\n core.groupby.DatasetGroupBy.reduce\n\nRolling objects\n===============\n\n.. autosummary::\n :toctree: generated/\n\n core.rolling.DataArrayRolling\n core.rolling.DataArrayRolling.construct\n core.rolling.DataArrayRolling.reduce\n core.rolling.DatasetRolling\n core.rolling.DatasetRolling.construct\n core.rolling.DatasetRolling.reduce\n core.rolling_exp.RollingExp\n\nResample objects\n================\n\nResample objects also implement the GroupBy interface\n(methods like ``apply()``, ``reduce()``, ``mean()``, ``sum()``, etc.).\n\n.. autosummary::\n :toctree: generated/\n\n core.resample.DataArrayResample\n core.resample.DataArrayResample.asfreq\n core.resample.DataArrayResample.backfill\n core.resample.DataArrayResample.interpolate\n core.resample.DataArrayResample.nearest\n core.resample.DataArrayResample.pad\n core.resample.DatasetResample\n core.resample.DatasetResample.asfreq\n core.resample.DatasetResample.backfill\n core.resample.DatasetResample.interpolate\n core.resample.DatasetResample.nearest\n core.resample.DatasetResample.pad\n\nAccessors\n=========\n\n.. autosummary::\n :toctree: generated/\n\n core.accessor_dt.DatetimeAccessor\n core.accessor_str.StringAccessor\n\nCustom Indexes\n==============\n.. autosummary::\n :toctree: generated/\n\n CFTimeIndex\n\nCreating custom indexes\n-----------------------\n.. autosummary::\n :toctree: generated/\n\n cftime_range\n\nPlotting\n========\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.plot\n DataArray.plot\n Dataset.plot.scatter\n plot.plot\n plot.contourf\n plot.contour\n plot.hist\n plot.imshow\n plot.line\n plot.pcolormesh\n plot.FacetGrid\n\nTesting\n=======\n\n.. autosummary::\n :toctree: generated/\n\n testing.assert_equal\n testing.assert_identical\n testing.assert_allclose\n testing.assert_chunks_equal\n\nExceptions\n==========\n\n.. autosummary::\n :toctree: generated/\n\n MergeError\n SerializationWarning\n\nAdvanced API\n============\n\n.. autosummary::\n :toctree: generated/\n\n Dataset.variables\n DataArray.variable\n Variable\n IndexVariable\n as_variable\n register_dataset_accessor\n register_dataarray_accessor\n\nThese backends provide a low-level interface for lazily loading data from\nexternal file-formats or protocols, and can be manually invoked to create\narguments for the ``from_store`` and ``dump_to_store`` Dataset methods:\n\n.. autosummary::\n :toctree: generated/\n\n backends.NetCDF4DataStore\n backends.H5NetCDFStore\n backends.PydapDataStore\n backends.ScipyDataStore\n backends.FileManager\n backends.CachingFileManager\n backends.DummyFileManager\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":459,"id":2976,"name":"__slots__","nodeType":"Attribute","startLoc":459,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":462,"id":2977,"name":"_da","nodeType":"Attribute","startLoc":462,"text":"self._da"},{"className":"_MultiFileCloser","col":0,"comment":"null","endLoc":704,"id":2978,"nodeType":"Class","startLoc":696,"text":"class _MultiFileCloser:\n __slots__ = (\"file_objs\",)\n\n def __init__(self, file_objs):\n self.file_objs = file_objs\n\n def close(self):\n for f in self.file_objs:\n f.close()"},{"col":4,"comment":"null","endLoc":704,"header":"def close(self)","id":2979,"name":"close","nodeType":"Function","startLoc":702,"text":"def close(self):\n for f in self.file_objs:\n f.close()"},{"attributeType":"{__eq__} | None","col":8,"comment":"null","endLoc":111,"id":2980,"name":"method","nodeType":"Attribute","startLoc":111,"text":"self.method"},{"col":0,"comment":"\n Decorator for common 2d plotting logic\n\n Also adds the 2d plot method to class _PlotMethods\n ","endLoc":829,"header":"def _plot2d(plotfunc)","id":2981,"name":"_plot2d","nodeType":"Function","startLoc":480,"text":"def _plot2d(plotfunc):\n \"\"\"\n Decorator for common 2d plotting logic\n\n Also adds the 2d plot method to class _PlotMethods\n \"\"\"\n commondoc = \"\"\"\n Parameters\n ----------\n darray : DataArray\n Must be 2 dimensional, unless creating faceted plots\n x : string, optional\n Coordinate for x axis. If None use darray.dims[1]\n y : string, optional\n Coordinate for y axis. If None use darray.dims[0]\n figsize : tuple, optional\n A tuple (width, height) of the figure in inches.\n Mutually exclusive with ``size`` and ``ax``.\n aspect : scalar, optional\n Aspect ratio of plot, so that ``aspect * size`` gives the width in\n inches. Only used if a ``size`` is provided.\n size : scalar, optional\n If provided, create a new figure for the plot with the given size.\n Height (in inches) of each plot. See also: ``aspect``.\n ax : matplotlib axes object, optional\n Axis on which to plot this figure. By default, use the current axis.\n Mutually exclusive with ``size`` and ``figsize``.\n row : string, optional\n If passed, make row faceted plots on this dimension name\n col : string, optional\n If passed, make column faceted plots on this dimension name\n col_wrap : integer, optional\n Use together with ``col`` to wrap faceted plots\n xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional\n Specifies scaling for the x- and y-axes respectively\n xticks, yticks : Specify tick locations for x- and y-axes\n xlim, ylim : Specify x- and y-axes limits\n xincrease : None, True, or False, optional\n Should the values on the x axes be increasing from left to right?\n if None, use the default for the matplotlib function.\n yincrease : None, True, or False, optional\n Should the values on the y axes be increasing from top to bottom?\n if None, use the default for the matplotlib function.\n add_colorbar : Boolean, optional\n Adds colorbar to axis\n add_labels : Boolean, optional\n Use xarray metadata to label axes\n norm : ``matplotlib.colors.Normalize`` instance, optional\n If the ``norm`` has vmin or vmax specified, the corresponding kwarg\n must be None.\n vmin, vmax : floats, optional\n Values to anchor the colormap, otherwise they are inferred from the\n data and other keyword arguments. When a diverging dataset is inferred,\n setting one of these values will fix the other by symmetry around\n ``center``. Setting both values prevents use of a diverging colormap.\n If discrete levels are provided as an explicit list, both of these\n values are ignored.\n cmap : matplotlib colormap name or object, optional\n The mapping from data values to color space. If not provided, this\n will be either be ``viridis`` (if the function infers a sequential\n dataset) or ``RdBu_r`` (if the function infers a diverging dataset).\n When `Seaborn` is installed, ``cmap`` may also be a `seaborn`\n color palette. If ``cmap`` is seaborn color palette and the plot type\n is not ``contour`` or ``contourf``, ``levels`` must also be specified.\n colors : discrete colors to plot, optional\n A single color or a list of colors. If the plot type is not ``contour``\n or ``contourf``, the ``levels`` argument is required.\n center : float, optional\n The value at which to center the colormap. Passing this value implies\n use of a diverging colormap. Setting it to ``False`` prevents use of a\n diverging colormap.\n robust : bool, optional\n If True and ``vmin`` or ``vmax`` are absent, the colormap range is\n computed with 2nd and 98th percentiles instead of the extreme values.\n extend : {'neither', 'both', 'min', 'max'}, optional\n How to draw arrows extending the colorbar beyond its limits. If not\n provided, extend is inferred from vmin, vmax and the data limits.\n levels : int or list-like object, optional\n Split the colormap (cmap) into discrete color intervals. If an integer\n is provided, \"nice\" levels are chosen based on the data range: this can\n imply that the final number of levels is not exactly the expected one.\n Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to\n setting ``levels=np.linspace(vmin, vmax, N)``.\n infer_intervals : bool, optional\n Only applies to pcolormesh. If True, the coordinate intervals are\n passed to pcolormesh. If False, the original coordinates are used\n (this can be useful for certain map projections). The default is to\n always infer intervals, unless the mesh is irregular and plotted on\n a map projection.\n subplot_kws : dict, optional\n Dictionary of keyword arguments for matplotlib subplots. Only applies\n to FacetGrid plotting.\n cbar_ax : matplotlib Axes, optional\n Axes in which to draw the colorbar.\n cbar_kwargs : dict, optional\n Dictionary of keyword arguments to pass to the colorbar.\n **kwargs : optional\n Additional arguments to wrapped matplotlib function\n\n Returns\n -------\n artist :\n The same type of primitive artist that the wrapped matplotlib\n function returns\n \"\"\"\n\n # Build on the original docstring\n plotfunc.__doc__ = \"%s\\n%s\" % (plotfunc.__doc__, commondoc)\n\n @functools.wraps(plotfunc)\n def newplotfunc(\n darray,\n x=None,\n y=None,\n figsize=None,\n size=None,\n aspect=None,\n ax=None,\n row=None,\n col=None,\n col_wrap=None,\n xincrease=True,\n yincrease=True,\n add_colorbar=None,\n add_labels=True,\n vmin=None,\n vmax=None,\n cmap=None,\n center=None,\n robust=False,\n extend=None,\n levels=None,\n infer_intervals=None,\n colors=None,\n subplot_kws=None,\n cbar_ax=None,\n cbar_kwargs=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n norm=None,\n **kwargs\n ):\n # All 2d plots in xarray share this function signature.\n # Method signature below should be consistent.\n\n # Decide on a default for the colorbar before facetgrids\n if add_colorbar is None:\n add_colorbar = plotfunc.__name__ != \"contour\"\n imshow_rgb = plotfunc.__name__ == \"imshow\" and darray.ndim == (\n 3 + (row is not None) + (col is not None)\n )\n if imshow_rgb:\n # Don't add a colorbar when showing an image with explicit colors\n add_colorbar = False\n # Matplotlib does not support normalising RGB data, so do it here.\n # See eg. https://github.com/matplotlib/matplotlib/pull/10220\n if robust or vmax is not None or vmin is not None:\n darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)\n vmin, vmax, robust = None, None, False\n\n # Handle facetgrids first\n if row or col:\n allargs = locals().copy()\n del allargs[\"darray\"]\n del allargs[\"imshow_rgb\"]\n allargs.update(allargs.pop(\"kwargs\"))\n # Need the decorated plotting function\n allargs[\"plotfunc\"] = globals()[plotfunc.__name__]\n return _easy_facetgrid(darray, kind=\"dataarray\", **allargs)\n\n plt = import_matplotlib_pyplot()\n\n rgb = kwargs.pop(\"rgb\", None)\n if rgb is not None and plotfunc.__name__ != \"imshow\":\n raise ValueError('The \"rgb\" keyword is only valid for imshow()')\n elif rgb is not None and not imshow_rgb:\n raise ValueError(\n 'The \"rgb\" keyword is only valid for imshow()'\n \"with a three-dimensional array (per facet)\"\n )\n\n xlab, ylab = _infer_xy_labels(\n darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb\n )\n\n # better to pass the ndarrays directly to plotting functions\n xval = darray[xlab].values\n yval = darray[ylab].values\n\n # check if we need to broadcast one dimension\n if xval.ndim < yval.ndim:\n xval = np.broadcast_to(xval, yval.shape)\n\n if yval.ndim < xval.ndim:\n yval = np.broadcast_to(yval, xval.shape)\n\n # May need to transpose for correct x, y labels\n # xlab may be the name of a coord, we have to check for dim names\n if imshow_rgb:\n # For RGB[A] images, matplotlib requires the color dimension\n # to be last. In Xarray the order should be unimportant, so\n # we transpose to (y, x, color) to make this work.\n yx_dims = (ylab, xlab)\n dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)\n if dims != darray.dims:\n darray = darray.transpose(*dims, transpose_coords=True)\n elif darray[xlab].dims[-1] == darray.dims[0]:\n darray = darray.transpose(transpose_coords=True)\n\n # Pass the data as a masked ndarray too\n zval = darray.to_masked_array(copy=False)\n\n # Replace pd.Intervals if contained in xval or yval.\n xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)\n yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)\n\n _ensure_plottable(xplt, yplt)\n\n cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(\n plotfunc, zval.data, **locals()\n )\n\n if \"contour\" in plotfunc.__name__:\n # extend is a keyword argument only for contour and contourf, but\n # passing it to the colorbar is sufficient for imshow and\n # pcolormesh\n kwargs[\"extend\"] = cmap_params[\"extend\"]\n kwargs[\"levels\"] = cmap_params[\"levels\"]\n # if colors == a single color, matplotlib draws dashed negative\n # contours. we lose this feature if we pass cmap and not colors\n if isinstance(colors, str):\n cmap_params[\"cmap\"] = None\n kwargs[\"colors\"] = colors\n\n if \"pcolormesh\" == plotfunc.__name__:\n kwargs[\"infer_intervals\"] = infer_intervals\n\n if \"imshow\" == plotfunc.__name__ and isinstance(aspect, str):\n # forbid usage of mpl strings\n raise ValueError(\n \"plt.imshow's `aspect` kwarg is not available \" \"in xarray\"\n )\n\n ax = get_axis(figsize, size, aspect, ax)\n primitive = plotfunc(\n xplt,\n yplt,\n zval,\n ax=ax,\n cmap=cmap_params[\"cmap\"],\n vmin=cmap_params[\"vmin\"],\n vmax=cmap_params[\"vmax\"],\n norm=cmap_params[\"norm\"],\n **kwargs\n )\n\n # Label the plot with metadata\n if add_labels:\n ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))\n ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))\n ax.set_title(darray._title_for_slice())\n\n if add_colorbar:\n if add_labels and \"label\" not in cbar_kwargs:\n cbar_kwargs[\"label\"] = label_from_attrs(darray)\n cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params)\n elif cbar_ax is not None or cbar_kwargs:\n # inform the user about keywords which aren't used\n raise ValueError(\n \"cbar_ax and cbar_kwargs can't be used with \" \"add_colorbar=False.\"\n )\n\n # origin kwarg overrides yincrease\n if \"origin\" in kwargs:\n yincrease = None\n\n _update_axes(\n ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim\n )\n\n # Rotate dates on xlabels\n # Do this without calling autofmt_xdate so that x-axes ticks\n # on other subplots (if any) are not deleted.\n # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplots\n if np.issubdtype(xplt.dtype, np.datetime64):\n for xlabels in ax.get_xticklabels():\n xlabels.set_rotation(30)\n xlabels.set_ha(\"right\")\n\n return primitive\n\n # For use as DataArray.plot.plotmethod\n @functools.wraps(newplotfunc)\n def plotmethod(\n _PlotMethods_obj,\n x=None,\n y=None,\n figsize=None,\n size=None,\n aspect=None,\n ax=None,\n row=None,\n col=None,\n col_wrap=None,\n xincrease=True,\n yincrease=True,\n add_colorbar=None,\n add_labels=True,\n vmin=None,\n vmax=None,\n cmap=None,\n colors=None,\n center=None,\n robust=False,\n extend=None,\n levels=None,\n infer_intervals=None,\n subplot_kws=None,\n cbar_ax=None,\n cbar_kwargs=None,\n xscale=None,\n yscale=None,\n xticks=None,\n yticks=None,\n xlim=None,\n ylim=None,\n norm=None,\n **kwargs\n ):\n \"\"\"\n The method should have the same signature as the function.\n\n This just makes the method work on Plotmethods objects,\n and passes all the other arguments straight through.\n \"\"\"\n allargs = locals()\n allargs[\"darray\"] = _PlotMethods_obj._da\n allargs.update(kwargs)\n for arg in [\"_PlotMethods_obj\", \"newplotfunc\", \"kwargs\"]:\n del allargs[arg]\n return newplotfunc(**allargs)\n\n # Add to class _PlotMethods\n setattr(_PlotMethods, plotmethod.__name__, plotmethod)\n\n return newplotfunc"},{"fileName":"computation.py","filePath":"xarray/core","id":2982,"nodeType":"File","text":"\"\"\"\nFunctions for applying functions that act on arrays to xarray's labeled data.\n\"\"\"\nimport functools\nimport itertools\nimport operator\nfrom collections import Counter\nfrom typing import (\n TYPE_CHECKING,\n AbstractSet,\n Any,\n Callable,\n Dict,\n Hashable,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n Union,\n)\n\nimport numpy as np\n\nfrom . import duck_array_ops, utils\nfrom .alignment import deep_align\nfrom .merge import merge_coordinates_without_align\nfrom .pycompat import dask_array_type\nfrom .utils import is_dict_like\nfrom .variable import Variable\n\nif TYPE_CHECKING:\n from .coordinates import Coordinates # noqa\n from .dataset import Dataset\n\n_NO_FILL_VALUE = utils.ReprObject(\"\")\n_DEFAULT_NAME = utils.ReprObject(\"\")\n_JOINS_WITHOUT_FILL_VALUES = frozenset({\"inner\", \"exact\"})\n\n\nclass _UFuncSignature:\n \"\"\"Core dimensions signature for a given function.\n\n Based on the signature provided by generalized ufuncs in NumPy.\n\n Attributes\n ----------\n input_core_dims : tuple[tuple]\n Core dimension names on each input variable.\n output_core_dims : tuple[tuple]\n Core dimension names on each output variable.\n \"\"\"\n\n __slots__ = (\n \"input_core_dims\",\n \"output_core_dims\",\n \"_all_input_core_dims\",\n \"_all_output_core_dims\",\n \"_all_core_dims\",\n )\n\n def __init__(self, input_core_dims, output_core_dims=((),)):\n self.input_core_dims = tuple(tuple(a) for a in input_core_dims)\n self.output_core_dims = tuple(tuple(a) for a in output_core_dims)\n self._all_input_core_dims = None\n self._all_output_core_dims = None\n self._all_core_dims = None\n\n @property\n def all_input_core_dims(self):\n if self._all_input_core_dims is None:\n self._all_input_core_dims = frozenset(\n dim for dims in self.input_core_dims for dim in dims\n )\n return self._all_input_core_dims\n\n @property\n def all_output_core_dims(self):\n if self._all_output_core_dims is None:\n self._all_output_core_dims = frozenset(\n dim for dims in self.output_core_dims for dim in dims\n )\n return self._all_output_core_dims\n\n @property\n def all_core_dims(self):\n if self._all_core_dims is None:\n self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims\n return self._all_core_dims\n\n @property\n def num_inputs(self):\n return len(self.input_core_dims)\n\n @property\n def num_outputs(self):\n return len(self.output_core_dims)\n\n def __eq__(self, other):\n try:\n return (\n self.input_core_dims == other.input_core_dims\n and self.output_core_dims == other.output_core_dims\n )\n except AttributeError:\n return False\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (\n type(self).__name__,\n list(self.input_core_dims),\n list(self.output_core_dims),\n )\n\n def __str__(self):\n lhs = \",\".join(\"({})\".format(\",\".join(dims)) for dims in self.input_core_dims)\n rhs = \",\".join(\"({})\".format(\",\".join(dims)) for dims in self.output_core_dims)\n return \"{}->{}\".format(lhs, rhs)\n\n def to_gufunc_string(self):\n \"\"\"Create an equivalent signature string for a NumPy gufunc.\n\n Unlike __str__, handles dimensions that don't map to Python\n identifiers.\n \"\"\"\n all_dims = self.all_core_dims\n dims_map = dict(zip(sorted(all_dims), range(len(all_dims))))\n input_core_dims = [\n [\"dim%d\" % dims_map[dim] for dim in core_dims]\n for core_dims in self.input_core_dims\n ]\n output_core_dims = [\n [\"dim%d\" % dims_map[dim] for dim in core_dims]\n for core_dims in self.output_core_dims\n ]\n alt_signature = type(self)(input_core_dims, output_core_dims)\n return str(alt_signature)\n\n\ndef result_name(objects: list) -> Any:\n # use the same naming heuristics as pandas:\n # https://github.com/blaze/blaze/issues/458#issuecomment-51936356\n names = {getattr(obj, \"name\", _DEFAULT_NAME) for obj in objects}\n names.discard(_DEFAULT_NAME)\n if len(names) == 1:\n name, = names\n else:\n name = None\n return name\n\n\ndef _get_coords_list(args) -> List[\"Coordinates\"]:\n coords_list = []\n for arg in args:\n try:\n coords = arg.coords\n except AttributeError:\n pass # skip this argument\n else:\n coords_list.append(coords)\n return coords_list\n\n\ndef build_output_coords(\n args: list, signature: _UFuncSignature, exclude_dims: AbstractSet = frozenset()\n) -> \"List[Dict[Any, Variable]]\":\n \"\"\"Build output coordinates for an operation.\n\n Parameters\n ----------\n args : list\n List of raw operation arguments. Any valid types for xarray operations\n are OK, e.g., scalars, Variable, DataArray, Dataset.\n signature : _UfuncSignature\n Core dimensions signature for the operation.\n exclude_dims : optional set\n Dimensions excluded from the operation. Coordinates along these\n dimensions are dropped.\n\n Returns\n -------\n Dictionary of Variable objects with merged coordinates.\n \"\"\"\n coords_list = _get_coords_list(args)\n\n if len(coords_list) == 1 and not exclude_dims:\n # we can skip the expensive merge\n unpacked_coords, = coords_list\n merged_vars = dict(unpacked_coords.variables)\n else:\n # TODO: save these merged indexes, instead of re-computing them later\n merged_vars, unused_indexes = merge_coordinates_without_align(\n coords_list, exclude_dims=exclude_dims\n )\n\n output_coords = []\n for output_dims in signature.output_core_dims:\n dropped_dims = signature.all_input_core_dims - set(output_dims)\n if dropped_dims:\n filtered = {\n k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims)\n }\n else:\n filtered = merged_vars\n output_coords.append(filtered)\n\n return output_coords\n\n\ndef apply_dataarray_vfunc(\n func, *args, signature, join=\"inner\", exclude_dims=frozenset(), keep_attrs=False\n):\n \"\"\"Apply a variable level function over DataArray, Variable and/or ndarray\n objects.\n \"\"\"\n from .dataarray import DataArray\n\n if len(args) > 1:\n args = deep_align(\n args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False\n )\n\n if keep_attrs and hasattr(args[0], \"name\"):\n name = args[0].name\n else:\n name = result_name(args)\n result_coords = build_output_coords(args, signature, exclude_dims)\n\n data_vars = [getattr(a, \"variable\", a) for a in args]\n result_var = func(*data_vars)\n\n if signature.num_outputs > 1:\n out = tuple(\n DataArray(variable, coords, name=name, fastpath=True)\n for variable, coords in zip(result_var, result_coords)\n )\n else:\n coords, = result_coords\n out = DataArray(result_var, coords, name=name, fastpath=True)\n\n return out\n\n\ndef ordered_set_union(all_keys: List[Iterable]) -> Iterable:\n return {key: None for keys in all_keys for key in keys}.keys()\n\n\ndef ordered_set_intersection(all_keys: List[Iterable]) -> Iterable:\n intersection = set(all_keys[0])\n for keys in all_keys[1:]:\n intersection.intersection_update(keys)\n return [key for key in all_keys[0] if key in intersection]\n\n\ndef assert_and_return_exact_match(all_keys):\n first_keys = all_keys[0]\n for keys in all_keys[1:]:\n if keys != first_keys:\n raise ValueError(\n \"exact match required for all data variable names, \"\n \"but %r != %r\" % (keys, first_keys)\n )\n return first_keys\n\n\n_JOINERS = {\n \"inner\": ordered_set_intersection,\n \"outer\": ordered_set_union,\n \"left\": operator.itemgetter(0),\n \"right\": operator.itemgetter(-1),\n \"exact\": assert_and_return_exact_match,\n}\n\n\ndef join_dict_keys(\n objects: Iterable[Union[Mapping, Any]], how: str = \"inner\"\n) -> Iterable:\n joiner = _JOINERS[how]\n all_keys = [obj.keys() for obj in objects if hasattr(obj, \"keys\")]\n return joiner(all_keys)\n\n\ndef collect_dict_values(\n objects: Iterable[Union[Mapping, Any]], keys: Iterable, fill_value: object = None\n) -> List[list]:\n return [\n [obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects]\n for key in keys\n ]\n\n\ndef _as_variables_or_variable(arg):\n try:\n return arg.variables\n except AttributeError:\n try:\n return arg.variable\n except AttributeError:\n return arg\n\n\ndef _unpack_dict_tuples(\n result_vars: Mapping[Hashable, Tuple[Variable, ...]], num_outputs: int\n) -> Tuple[Dict[Hashable, Variable], ...]:\n out = tuple({} for _ in range(num_outputs)) # type: ignore\n for name, values in result_vars.items():\n for value, results_dict in zip(values, out):\n results_dict[name] = value\n return out\n\n\ndef apply_dict_of_variables_vfunc(\n func, *args, signature, join=\"inner\", fill_value=None\n):\n \"\"\"Apply a variable level function over dicts of DataArray, DataArray,\n Variable and ndarray objects.\n \"\"\"\n args = [_as_variables_or_variable(arg) for arg in args]\n names = join_dict_keys(args, how=join)\n grouped_by_name = collect_dict_values(args, names, fill_value)\n\n result_vars = {}\n for name, variable_args in zip(names, grouped_by_name):\n result_vars[name] = func(*variable_args)\n\n if signature.num_outputs > 1:\n return _unpack_dict_tuples(result_vars, signature.num_outputs)\n else:\n return result_vars\n\n\ndef _fast_dataset(\n variables: Dict[Hashable, Variable], coord_variables: Mapping[Hashable, Variable]\n) -> \"Dataset\":\n \"\"\"Create a dataset as quickly as possible.\n\n Beware: the `variables` dict is modified INPLACE.\n \"\"\"\n from .dataset import Dataset\n\n variables.update(coord_variables)\n coord_names = set(coord_variables)\n return Dataset._from_vars_and_coord_names(variables, coord_names)\n\n\ndef apply_dataset_vfunc(\n func,\n *args,\n signature,\n join=\"inner\",\n dataset_join=\"exact\",\n fill_value=_NO_FILL_VALUE,\n exclude_dims=frozenset(),\n keep_attrs=False\n):\n \"\"\"Apply a variable level function over Dataset, dict of DataArray,\n DataArray, Variable and/or ndarray objects.\n \"\"\"\n from .dataset import Dataset\n\n first_obj = args[0] # we'll copy attrs from this in case keep_attrs=True\n\n if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE:\n raise TypeError(\n \"to apply an operation to datasets with different \"\n \"data variables with apply_ufunc, you must supply the \"\n \"dataset_fill_value argument.\"\n )\n\n if len(args) > 1:\n args = deep_align(\n args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False\n )\n\n list_of_coords = build_output_coords(args, signature, exclude_dims)\n args = [getattr(arg, \"data_vars\", arg) for arg in args]\n\n result_vars = apply_dict_of_variables_vfunc(\n func, *args, signature=signature, join=dataset_join, fill_value=fill_value\n )\n\n if signature.num_outputs > 1:\n out = tuple(_fast_dataset(*args) for args in zip(result_vars, list_of_coords))\n else:\n coord_vars, = list_of_coords\n out = _fast_dataset(result_vars, coord_vars)\n\n if keep_attrs and isinstance(first_obj, Dataset):\n if isinstance(out, tuple):\n out = tuple(ds._copy_attrs_from(first_obj) for ds in out)\n else:\n out._copy_attrs_from(first_obj)\n return out\n\n\ndef _iter_over_selections(obj, dim, values):\n \"\"\"Iterate over selections of an xarray object in the provided order.\"\"\"\n from .groupby import _dummy_copy\n\n dummy = None\n for value in values:\n try:\n obj_sel = obj.sel(**{dim: value})\n except (KeyError, IndexError):\n if dummy is None:\n dummy = _dummy_copy(obj)\n obj_sel = dummy\n yield obj_sel\n\n\ndef apply_groupby_func(func, *args):\n \"\"\"Apply a dataset or datarray level function over GroupBy, Dataset,\n DataArray, Variable and/or ndarray objects.\n \"\"\"\n from .groupby import GroupBy, peek_at\n from .variable import Variable\n\n groupbys = [arg for arg in args if isinstance(arg, GroupBy)]\n assert groupbys, \"must have at least one groupby to iterate over\"\n first_groupby = groupbys[0]\n if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):\n raise ValueError(\n \"apply_ufunc can only perform operations over \"\n \"multiple GroupBy objets at once if they are all \"\n \"grouped the same way\"\n )\n\n grouped_dim = first_groupby._group.name\n unique_values = first_groupby._unique_coord.values\n\n iterators = []\n for arg in args:\n if isinstance(arg, GroupBy):\n iterator = (value for _, value in arg)\n elif hasattr(arg, \"dims\") and grouped_dim in arg.dims:\n if isinstance(arg, Variable):\n raise ValueError(\n \"groupby operations cannot be performed with \"\n \"xarray.Variable objects that share a dimension with \"\n \"the grouped dimension\"\n )\n iterator = _iter_over_selections(arg, grouped_dim, unique_values)\n else:\n iterator = itertools.repeat(arg)\n iterators.append(iterator)\n\n applied = (func(*zipped_args) for zipped_args in zip(*iterators))\n applied_example, applied = peek_at(applied)\n combine = first_groupby._combine\n if isinstance(applied_example, tuple):\n combined = tuple(combine(output) for output in zip(*applied))\n else:\n combined = combine(applied)\n return combined\n\n\ndef unified_dim_sizes(\n variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()\n) -> Dict[Hashable, int]:\n\n dim_sizes: Dict[Hashable, int] = {}\n\n for var in variables:\n if len(set(var.dims)) < len(var.dims):\n raise ValueError(\n \"broadcasting cannot handle duplicate \"\n \"dimensions on a variable: %r\" % list(var.dims)\n )\n for dim, size in zip(var.dims, var.shape):\n if dim not in exclude_dims:\n if dim not in dim_sizes:\n dim_sizes[dim] = size\n elif dim_sizes[dim] != size:\n raise ValueError(\n \"operands cannot be broadcast together \"\n \"with mismatched lengths for dimension \"\n \"%r: %s vs %s\" % (dim, dim_sizes[dim], size)\n )\n return dim_sizes\n\n\nSLICE_NONE = slice(None)\n\n\ndef broadcast_compat_data(\n variable: Variable,\n broadcast_dims: Tuple[Hashable, ...],\n core_dims: Tuple[Hashable, ...],\n) -> Any:\n data = variable.data\n\n old_dims = variable.dims\n new_dims = broadcast_dims + core_dims\n\n if new_dims == old_dims:\n # optimize for the typical case\n return data\n\n set_old_dims = set(old_dims)\n missing_core_dims = [d for d in core_dims if d not in set_old_dims]\n if missing_core_dims:\n raise ValueError(\n \"operand to apply_ufunc has required core dimensions {}, but \"\n \"some of these dimensions are absent on an input variable: {}\".format(\n list(core_dims), missing_core_dims\n )\n )\n\n set_new_dims = set(new_dims)\n unexpected_dims = [d for d in old_dims if d not in set_new_dims]\n if unexpected_dims:\n raise ValueError(\n \"operand to apply_ufunc encountered unexpected \"\n \"dimensions %r on an input variable: these are core \"\n \"dimensions on other input or output variables\" % unexpected_dims\n )\n\n # for consistency with numpy, keep broadcast dimensions to the left\n old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)\n reordered_dims = old_broadcast_dims + core_dims\n if reordered_dims != old_dims:\n order = tuple(old_dims.index(d) for d in reordered_dims)\n data = duck_array_ops.transpose(data, order)\n\n if new_dims != reordered_dims:\n key_parts = []\n for dim in new_dims:\n if dim in set_old_dims:\n key_parts.append(SLICE_NONE)\n elif key_parts:\n # no need to insert new axes at the beginning that are already\n # handled by broadcasting\n key_parts.append(np.newaxis)\n data = data[tuple(key_parts)]\n\n return data\n\n\ndef apply_variable_ufunc(\n func,\n *args,\n signature,\n exclude_dims=frozenset(),\n dask=\"forbidden\",\n output_dtypes=None,\n output_sizes=None,\n keep_attrs=False\n):\n \"\"\"Apply a ndarray level function over Variable and/or ndarray objects.\n \"\"\"\n from .variable import Variable, as_compatible_data\n\n dim_sizes = unified_dim_sizes(\n (a for a in args if hasattr(a, \"dims\")), exclude_dims=exclude_dims\n )\n broadcast_dims = tuple(\n dim for dim in dim_sizes if dim not in signature.all_core_dims\n )\n output_dims = [broadcast_dims + out for out in signature.output_core_dims]\n\n input_data = [\n broadcast_compat_data(arg, broadcast_dims, core_dims)\n if isinstance(arg, Variable)\n else arg\n for arg, core_dims in zip(args, signature.input_core_dims)\n ]\n\n if any(isinstance(array, dask_array_type) for array in input_data):\n if dask == \"forbidden\":\n raise ValueError(\n \"apply_ufunc encountered a dask array on an \"\n \"argument, but handling for dask arrays has not \"\n \"been enabled. Either set the ``dask`` argument \"\n \"or load your data into memory first with \"\n \"``.load()`` or ``.compute()``\"\n )\n elif dask == \"parallelized\":\n input_dims = [broadcast_dims + dims for dims in signature.input_core_dims]\n numpy_func = func\n\n def func(*arrays):\n return _apply_blockwise(\n numpy_func,\n arrays,\n input_dims,\n output_dims,\n signature,\n output_dtypes,\n output_sizes,\n )\n\n elif dask == \"allowed\":\n pass\n else:\n raise ValueError(\n \"unknown setting for dask array handling in \"\n \"apply_ufunc: {}\".format(dask)\n )\n result_data = func(*input_data)\n\n if signature.num_outputs == 1:\n result_data = (result_data,)\n elif (\n not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs\n ):\n raise ValueError(\n \"applied function does not have the number of \"\n \"outputs specified in the ufunc signature. \"\n \"Result is not a tuple of {} elements: {!r}\".format(\n signature.num_outputs, result_data\n )\n )\n\n output = []\n for dims, data in zip(output_dims, result_data):\n data = as_compatible_data(data)\n if data.ndim != len(dims):\n raise ValueError(\n \"applied function returned data with unexpected \"\n \"number of dimensions: {} vs {}, for dimensions {}\".format(\n data.ndim, len(dims), dims\n )\n )\n\n var = Variable(dims, data, fastpath=True)\n for dim, new_size in var.sizes.items():\n if dim in dim_sizes and new_size != dim_sizes[dim]:\n raise ValueError(\n \"size of dimension {!r} on inputs was unexpectedly \"\n \"changed by applied function from {} to {}. Only \"\n \"dimensions specified in ``exclude_dims`` with \"\n \"xarray.apply_ufunc are allowed to change size.\".format(\n dim, dim_sizes[dim], new_size\n )\n )\n\n if keep_attrs and isinstance(args[0], Variable):\n var.attrs.update(args[0].attrs)\n output.append(var)\n\n if signature.num_outputs == 1:\n return output[0]\n else:\n return tuple(output)\n\n\ndef _apply_blockwise(\n func, args, input_dims, output_dims, signature, output_dtypes, output_sizes=None\n):\n import dask.array\n\n if signature.num_outputs > 1:\n raise NotImplementedError(\n \"multiple outputs from apply_ufunc not yet \"\n \"supported with dask='parallelized'\"\n )\n\n if output_dtypes is None:\n raise ValueError(\n \"output dtypes (output_dtypes) must be supplied to \"\n \"apply_func when using dask='parallelized'\"\n )\n if not isinstance(output_dtypes, list):\n raise TypeError(\n \"output_dtypes must be a list of objects coercible to \"\n \"numpy dtypes, got {}\".format(output_dtypes)\n )\n if len(output_dtypes) != signature.num_outputs:\n raise ValueError(\n \"apply_ufunc arguments output_dtypes and \"\n \"output_core_dims must have the same length: {} vs {}\".format(\n len(output_dtypes), signature.num_outputs\n )\n )\n (dtype,) = output_dtypes\n\n if output_sizes is None:\n output_sizes = {}\n\n new_dims = signature.all_output_core_dims - signature.all_input_core_dims\n if any(dim not in output_sizes for dim in new_dims):\n raise ValueError(\n \"when using dask='parallelized' with apply_ufunc, \"\n \"output core dimensions not found on inputs must \"\n \"have explicitly set sizes with ``output_sizes``: {}\".format(new_dims)\n )\n\n for n, (data, core_dims) in enumerate(zip(args, signature.input_core_dims)):\n if isinstance(data, dask_array_type):\n # core dimensions cannot span multiple chunks\n for axis, dim in enumerate(core_dims, start=-len(core_dims)):\n if len(data.chunks[axis]) != 1:\n raise ValueError(\n \"dimension {!r} on {}th function argument to \"\n \"apply_ufunc with dask='parallelized' consists of \"\n \"multiple chunks, but is also a core dimension. To \"\n \"fix, rechunk into a single dask array chunk along \"\n \"this dimension, i.e., ``.chunk({})``, but beware \"\n \"that this may significantly increase memory usage.\".format(\n dim, n, {dim: -1}\n )\n )\n\n (out_ind,) = output_dims\n\n blockwise_args = []\n for arg, dims in zip(args, input_dims):\n # skip leading dimensions that are implicitly added by broadcasting\n ndim = getattr(arg, \"ndim\", 0)\n trimmed_dims = dims[-ndim:] if ndim else ()\n blockwise_args.extend([arg, trimmed_dims])\n\n return dask.array.blockwise(\n func,\n out_ind,\n *blockwise_args,\n dtype=dtype,\n concatenate=True,\n new_axes=output_sizes\n )\n\n\ndef apply_array_ufunc(func, *args, dask=\"forbidden\"):\n \"\"\"Apply a ndarray level function over ndarray objects.\"\"\"\n if any(isinstance(arg, dask_array_type) for arg in args):\n if dask == \"forbidden\":\n raise ValueError(\n \"apply_ufunc encountered a dask array on an \"\n \"argument, but handling for dask arrays has not \"\n \"been enabled. Either set the ``dask`` argument \"\n \"or load your data into memory first with \"\n \"``.load()`` or ``.compute()``\"\n )\n elif dask == \"parallelized\":\n raise ValueError(\n \"cannot use dask='parallelized' for apply_ufunc \"\n \"unless at least one input is an xarray object\"\n )\n elif dask == \"allowed\":\n pass\n else:\n raise ValueError(\"unknown setting for dask array handling: {}\".format(dask))\n return func(*args)\n\n\ndef apply_ufunc(\n func: Callable,\n *args: Any,\n input_core_dims: Sequence[Sequence] = None,\n output_core_dims: Optional[Sequence[Sequence]] = ((),),\n exclude_dims: AbstractSet = frozenset(),\n vectorize: bool = False,\n join: str = \"exact\",\n dataset_join: str = \"exact\",\n dataset_fill_value: object = _NO_FILL_VALUE,\n keep_attrs: bool = False,\n kwargs: Mapping = None,\n dask: str = \"forbidden\",\n output_dtypes: Sequence = None,\n output_sizes: Mapping[Any, int] = None\n) -> Any:\n \"\"\"Apply a vectorized function for unlabeled arrays on xarray objects.\n\n The function will be mapped over the data variable(s) of the input\n arguments using xarray's standard rules for labeled computation, including\n alignment, broadcasting, looping over GroupBy/Dataset variables, and\n merging of coordinates.\n\n Parameters\n ----------\n func : callable\n Function to call like ``func(*args, **kwargs)`` on unlabeled arrays\n (``.data``) that returns an array or tuple of arrays. If multiple\n arguments with non-matching dimensions are supplied, this function is\n expected to vectorize (broadcast) over axes of positional arguments in\n the style of NumPy universal functions [1]_ (if this is not the case,\n set ``vectorize=True``). If this function returns multiple outputs, you\n must set ``output_core_dims`` as well.\n *args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars\n Mix of labeled and/or unlabeled arrays to which to apply the function.\n input_core_dims : Sequence[Sequence], optional\n List of the same length as ``args`` giving the list of core dimensions\n on each input argument that should not be broadcast. By default, we\n assume there are no core dimensions on any input arguments.\n\n For example, ``input_core_dims=[[], ['time']]`` indicates that all\n dimensions on the first argument and all dimensions other than 'time'\n on the second argument should be broadcast.\n\n Core dimensions are automatically moved to the last axes of input\n variables before applying ``func``, which facilitates using NumPy style\n generalized ufuncs [2]_.\n output_core_dims : List[tuple], optional\n List of the same length as the number of output arguments from\n ``func``, giving the list of core dimensions on each output that were\n not broadcast on the inputs. By default, we assume that ``func``\n outputs exactly one array, with axes corresponding to each broadcast\n dimension.\n\n Core dimensions are assumed to appear as the last dimensions of each\n output in the provided order.\n exclude_dims : set, optional\n Core dimensions on the inputs to exclude from alignment and\n broadcasting entirely. Any input coordinates along these dimensions\n will be dropped. Each excluded dimension must also appear in\n ``input_core_dims`` for at least one argument. Only dimensions listed\n here are allowed to change size between input and output objects.\n vectorize : bool, optional\n If True, then assume ``func`` only takes arrays defined over core\n dimensions as input and vectorize it automatically with\n :py:func:`numpy.vectorize`. This option exists for convenience, but is\n almost always slower than supplying a pre-vectorized function.\n Using this option requires NumPy version 1.12 or newer.\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining the indexes of the passed objects along each\n dimension, and the variables of Dataset objects with mismatched\n data variables:\n\n - 'outer': use the union of object indexes\n - 'inner': use the intersection of object indexes\n - 'left': use indexes from the first object with each dimension\n - 'right': use indexes from the last object with each dimension\n - 'exact': raise `ValueError` instead of aligning when indexes to be\n aligned are not equal\n dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining variables of Dataset objects with mismatched\n data variables.\n\n - 'outer': take variables from both Dataset objects\n - 'inner': take only overlapped variables\n - 'left': take only variables from the first object\n - 'right': take only variables from the last object\n - 'exact': data variables on all Dataset objects must match exactly\n dataset_fill_value : optional\n Value used in place of missing variables on Dataset inputs when the\n datasets do not share the exact same ``data_vars``. Required if\n ``dataset_join not in {'inner', 'exact'}``, otherwise ignored.\n keep_attrs: boolean, Optional\n Whether to copy attributes from the first argument to the output.\n kwargs: dict, optional\n Optional keyword arguments passed directly on to call ``func``.\n dask: 'forbidden', 'allowed' or 'parallelized', optional\n How to handle applying to objects containing lazy data in the form of\n dask arrays:\n\n - 'forbidden' (default): raise an error if a dask array is encountered.\n - 'allowed': pass dask arrays directly on to ``func``.\n - 'parallelized': automatically parallelize ``func`` if any of the\n inputs are a dask array. If used, the ``output_dtypes`` argument must\n also be provided. Multiple output arguments are not yet supported.\n output_dtypes : list of dtypes, optional\n Optional list of output dtypes. Only used if dask='parallelized'.\n output_sizes : dict, optional\n Optional mapping from dimension names to sizes for outputs. Only used\n if dask='parallelized' and new dimensions (not found on inputs) appear\n on outputs.\n\n Returns\n -------\n Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or\n numpy.ndarray, the first type on that list to appear on an input.\n\n Examples\n --------\n\n Calculate the vector magnitude of two arguments:\n\n >>> def magnitude(a, b):\n ... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)\n ... return xr.apply_ufunc(func, a, b)\n\n You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``\n objects, with automatically preserved dimensions and coordinates, e.g.,\n\n >>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])\n >>> magnitude(array, -array)\n \n array([1.414214, 2.828427, 4.242641])\n Coordinates:\n * x (x) float64 0.1 0.2 0.3\n\n Plain scalars, numpy arrays and a mix of these with xarray objects is also\n supported:\n\n >>> magnitude(4, 5)\n 5.0\n >>> magnitude(3, np.array([0, 4]))\n array([3., 5.])\n >>> magnitude(array, 0)\n \n array([1., 2., 3.])\n Coordinates:\n * x (x) float64 0.1 0.2 0.3\n\n Other examples of how you could use ``apply_ufunc`` to write functions to\n (very nearly) replicate existing xarray functionality:\n\n Compute the mean (``.mean``) over one dimension::\n\n def mean(obj, dim):\n # note: apply always moves core dimensions to the end\n return apply_ufunc(np.mean, obj,\n input_core_dims=[[dim]],\n kwargs={'axis': -1})\n\n Inner product over a specific dimension (like ``xr.dot``)::\n\n def _inner(x, y):\n result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])\n return result[..., 0, 0]\n\n def inner_product(a, b, dim):\n return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])\n\n Stack objects along a new dimension (like ``xr.concat``)::\n\n def stack(objects, dim, new_coord):\n # note: this version does not stack coordinates\n func = lambda *x: np.stack(x, axis=-1)\n result = apply_ufunc(func, *objects,\n output_core_dims=[[dim]],\n join='outer',\n dataset_fill_value=np.nan)\n result[dim] = new_coord\n return result\n\n If your function is not vectorized but can be applied only to core\n dimensions, you can use ``vectorize=True`` to turn into a vectorized\n function. This wraps :py:func:`numpy.vectorize`, so the operation isn't\n terribly fast. Here we'll use it to calculate the distance between\n empirical samples from two probability distributions, using a scipy\n function that needs to be applied to vectors::\n\n import scipy.stats\n\n def earth_mover_distance(first_samples,\n second_samples,\n dim='ensemble'):\n return apply_ufunc(scipy.stats.wasserstein_distance,\n first_samples, second_samples,\n input_core_dims=[[dim], [dim]],\n vectorize=True)\n\n Most of NumPy's builtin functions already broadcast their inputs\n appropriately for use in `apply`. You may find helper functions such as\n numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also\n works well with numba's vectorize and guvectorize. Further explanation with\n examples are provided in the xarray documentation [3].\n\n See also\n --------\n numpy.broadcast_arrays\n numba.vectorize\n numba.guvectorize\n\n References\n ----------\n .. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html\n .. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html\n .. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation\n \"\"\"\n from .groupby import GroupBy\n from .dataarray import DataArray\n from .variable import Variable\n\n if input_core_dims is None:\n input_core_dims = ((),) * (len(args))\n elif len(input_core_dims) != len(args):\n raise ValueError(\n \"input_core_dims must be None or a tuple with the length same to \"\n \"the number of arguments. Given input_core_dims: {}, \"\n \"number of args: {}.\".format(input_core_dims, len(args))\n )\n\n if kwargs is None:\n kwargs = {}\n\n signature = _UFuncSignature(input_core_dims, output_core_dims)\n\n if exclude_dims and not exclude_dims <= signature.all_core_dims:\n raise ValueError(\n \"each dimension in `exclude_dims` must also be a \"\n \"core dimension in the function signature\"\n )\n\n if kwargs:\n func = functools.partial(func, **kwargs)\n\n if vectorize:\n if signature.all_core_dims:\n func = np.vectorize(\n func, otypes=output_dtypes, signature=signature.to_gufunc_string()\n )\n else:\n func = np.vectorize(func, otypes=output_dtypes)\n\n variables_vfunc = functools.partial(\n apply_variable_ufunc,\n func,\n signature=signature,\n exclude_dims=exclude_dims,\n keep_attrs=keep_attrs,\n dask=dask,\n output_dtypes=output_dtypes,\n output_sizes=output_sizes,\n )\n\n if any(isinstance(a, GroupBy) for a in args):\n this_apply = functools.partial(\n apply_ufunc,\n func,\n input_core_dims=input_core_dims,\n output_core_dims=output_core_dims,\n exclude_dims=exclude_dims,\n join=join,\n dataset_join=dataset_join,\n dataset_fill_value=dataset_fill_value,\n keep_attrs=keep_attrs,\n dask=dask,\n )\n return apply_groupby_func(this_apply, *args)\n elif any(is_dict_like(a) for a in args):\n return apply_dataset_vfunc(\n variables_vfunc,\n *args,\n signature=signature,\n join=join,\n exclude_dims=exclude_dims,\n dataset_join=dataset_join,\n fill_value=dataset_fill_value,\n keep_attrs=keep_attrs\n )\n elif any(isinstance(a, DataArray) for a in args):\n return apply_dataarray_vfunc(\n variables_vfunc,\n *args,\n signature=signature,\n join=join,\n exclude_dims=exclude_dims,\n keep_attrs=keep_attrs\n )\n elif any(isinstance(a, Variable) for a in args):\n return variables_vfunc(*args)\n else:\n return apply_array_ufunc(func, *args, dask=dask)\n\n\ndef dot(*arrays, dims=None, **kwargs):\n \"\"\"Generalized dot product for xarray objects. Like np.einsum, but\n provides a simpler interface based on array dimensions.\n\n Parameters\n ----------\n arrays: DataArray (or Variable) objects\n Arrays to compute.\n dims: str or tuple of strings, optional\n Which dimensions to sum over.\n If not speciified, then all the common dimensions are summed over.\n **kwargs: dict\n Additional keyword arguments passed to numpy.einsum or\n dask.array.einsum\n\n Returns\n -------\n dot: DataArray\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xp\n >>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=['a', 'b'])\n >>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2),\n ... dims=['a', 'b', 'c'])\n >>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=['c', 'd'])\n\n >>> da_a\n \n array([[0, 1],\n [2, 3],\n [4, 5]])\n Dimensions without coordinates: a, b\n\n >>> da_b\n \n array([[[ 0, 1],\n [ 2, 3]],\n [[ 4, 5],\n [ 6, 7]],\n [[ 8, 9],\n [10, 11]]])\n Dimensions without coordinates: a, b, c\n\n >>> da_c\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Dimensions without coordinates: c, d\n\n >>> xr.dot(da_a, da_b, dims=['a', 'b'])\n \n array([110, 125])\n Dimensions without coordinates: c\n\n >>> xr.dot(da_a, da_b, dims=['a'])\n \n array([[40, 46],\n [70, 79]])\n Dimensions without coordinates: b, c\n\n >>> xr.dot(da_a, da_b, da_c, dims=['b', 'c'])\n \n array([[ 9, 14, 19],\n [ 93, 150, 207],\n [273, 446, 619]])\n Dimensions without coordinates: a, d\n\n \"\"\"\n from .dataarray import DataArray\n from .variable import Variable\n\n if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):\n raise TypeError(\n \"Only xr.DataArray and xr.Variable are supported.\"\n \"Given {}.\".format([type(arr) for arr in arrays])\n )\n\n if len(arrays) == 0:\n raise TypeError(\"At least one array should be given.\")\n\n if isinstance(dims, str):\n dims = (dims,)\n\n common_dims = set.intersection(*[set(arr.dims) for arr in arrays])\n all_dims = []\n for arr in arrays:\n all_dims += [d for d in arr.dims if d not in all_dims]\n\n einsum_axes = \"abcdefghijklmnopqrstuvwxyz\"\n dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}\n\n if dims is None:\n # find dimensions that occur more than one times\n dim_counts = Counter()\n for arr in arrays:\n dim_counts.update(arr.dims)\n dims = tuple(d for d, c in dim_counts.items() if c > 1)\n\n dims = tuple(dims) # make dims a tuple\n\n # dimensions to be parallelized\n broadcast_dims = tuple(d for d in all_dims if d in common_dims and d not in dims)\n input_core_dims = [\n [d for d in arr.dims if d not in broadcast_dims] for arr in arrays\n ]\n output_core_dims = [tuple(d for d in all_dims if d not in dims + broadcast_dims)]\n\n # construct einsum subscripts, such as '...abc,...ab->...c'\n # Note: input_core_dims are always moved to the last position\n subscripts_list = [\n \"...\" + \"\".join([dim_map[d] for d in ds]) for ds in input_core_dims\n ]\n subscripts = \",\".join(subscripts_list)\n subscripts += \"->...\" + \"\".join([dim_map[d] for d in output_core_dims[0]])\n\n # subscripts should be passed to np.einsum as arg, not as kwargs. We need\n # to construct a partial function for apply_ufunc to work.\n func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)\n result = apply_ufunc(\n func,\n *arrays,\n input_core_dims=input_core_dims,\n output_core_dims=output_core_dims,\n dask=\"allowed\"\n )\n return result.transpose(*[d for d in all_dims if d in result.dims])\n\n\ndef where(cond, x, y):\n \"\"\"Return elements from `x` or `y` depending on `cond`.\n\n Performs xarray-like broadcasting across input arguments.\n\n Parameters\n ----------\n cond : scalar, array, Variable, DataArray or Dataset with boolean dtype\n When True, return values from `x`, otherwise returns values from `y`.\n x, y : scalar, array, Variable, DataArray or Dataset\n Values from which to choose. All dimension coordinates on these objects\n must be aligned with each other and with `cond`.\n\n Returns\n -------\n In priority order: Dataset, DataArray, Variable or array, whichever\n type appears as an input argument.\n\n Examples\n --------\n >>> import xarray as xr\n >>> import numpy as np\n >>> x = xr.DataArray(0.1 * np.arange(10), dims=['lat'],\n ... coords={'lat': np.arange(10)}, name='sst')\n >>> x\n \n array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])\n Coordinates:\n * lat (lat) int64 0 1 2 3 4 5 6 7 8 9\n\n >>> xr.where(x < 0.5, x, 100*x)\n \n array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])\n Coordinates:\n * lat (lat) int64 0 1 2 3 4 5 6 7 8 9\n\n >>> >>> y = xr.DataArray(\n ... 0.1 * np.arange(9).reshape(3, 3),\n ... dims=[\"lat\", \"lon\"],\n ... coords={\"lat\": np.arange(3), \"lon\": 10 + np.arange(3)},\n ... name=\"sst\",\n ... )\n >>> y\n \n array([[0. , 0.1, 0.2],\n [0.3, 0.4, 0.5],\n [0.6, 0.7, 0.8]])\n Coordinates:\n * lat (lat) int64 0 1 2\n * lon (lon) int64 10 11 12\n\n >>> xr.where(y.lat < 1, y, -1)\n \n array([[ 0. , 0.1, 0.2],\n [-1. , -1. , -1. ],\n [-1. , -1. , -1. ]])\n Coordinates:\n * lat (lat) int64 0 1 2\n * lon (lon) int64 10 11 12\n\n >>> cond = xr.DataArray([True, False], dims=['x'])\n >>> x = xr.DataArray([1, 2], dims=['y'])\n >>> xr.where(cond, x, 0)\n \n array([[1, 2],\n [0, 0]])\n Dimensions without coordinates: x, y\n\n See also\n --------\n numpy.where : corresponding numpy function\n Dataset.where, DataArray.where : equivalent methods\n \"\"\"\n # alignment for three arguments is complicated, so don't support it yet\n return apply_ufunc(\n duck_array_ops.where,\n cond,\n x,\n y,\n join=\"exact\",\n dataset_join=\"exact\",\n dask=\"allowed\",\n )\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":12,"id":2983,"name":"array","nodeType":"Attribute","startLoc":12,"text":"self.array"},{"attributeType":"{acquire}","col":8,"comment":"null","endLoc":29,"id":2984,"name":"manager","nodeType":"Attribute","startLoc":29,"text":"self.manager"},{"attributeType":"null","col":8,"comment":"null","endLoc":30,"id":2985,"name":"lock","nodeType":"Attribute","startLoc":30,"text":"self.lock"},{"attributeType":"null","col":8,"comment":"null","endLoc":121,"id":2986,"name":"f","nodeType":"Attribute","startLoc":121,"text":"self.f"},{"attributeType":"null","col":4,"comment":"null","endLoc":697,"id":2987,"name":"__slots__","nodeType":"Attribute","startLoc":697,"text":"__slots__"},{"attributeType":"null","col":8,"comment":"null","endLoc":700,"id":2988,"name":"file_objs","nodeType":"Attribute","startLoc":700,"text":"self.file_objs"},{"attributeType":"null","col":0,"comment":"null","endLoc":39,"id":2989,"name":"DATAARRAY_NAME","nodeType":"Attribute","startLoc":39,"text":"DATAARRAY_NAME"},{"attributeType":"None","col":8,"comment":"null","endLoc":36,"id":2990,"name":"vrt_params","nodeType":"Attribute","startLoc":36,"text":"self.vrt_params"},{"attributeType":"null","col":0,"comment":"null","endLoc":40,"id":2991,"name":"DATAARRAY_VARIABLE","nodeType":"Attribute","startLoc":40,"text":"DATAARRAY_VARIABLE"},{"attributeType":"null","col":8,"comment":"null","endLoc":113,"id":2992,"name":"cons_kwargs","nodeType":"Attribute","startLoc":113,"text":"self.cons_kwargs"},{"attributeType":"null","col":0,"comment":"null","endLoc":966,"id":2993,"name":"WRITEABLE_STORES","nodeType":"Attribute","startLoc":966,"text":"WRITEABLE_STORES"},{"attributeType":"null","col":8,"comment":"null","endLoc":114,"id":2994,"name":"call_kwargs","nodeType":"Attribute","startLoc":114,"text":"self.call_kwargs"},{"className":"SplineInterpolator","col":0,"comment":"One-dimensional smoothing spline fit to a given set of data points.\n\n See Also\n --------\n scipy.interpolate.UnivariateSpline\n ","endLoc":164,"id":2995,"nodeType":"Class","startLoc":133,"text":"class SplineInterpolator(BaseInterpolator):\n \"\"\"One-dimensional smoothing spline fit to a given set of data points.\n\n See Also\n --------\n scipy.interpolate.UnivariateSpline\n \"\"\"\n\n def __init__(\n self,\n xi,\n yi,\n method=\"spline\",\n fill_value=None,\n order=3,\n nu=0,\n ext=None,\n **kwargs\n ):\n from scipy.interpolate import UnivariateSpline\n\n if method != \"spline\":\n raise ValueError(\"only method `spline` is valid for the SplineInterpolator\")\n\n self.method = method\n self.cons_kwargs = kwargs\n self.call_kwargs = {\"nu\": nu, \"ext\": ext}\n\n if fill_value is not None:\n raise ValueError(\"SplineInterpolator does not support fill_value\")\n\n self.f = UnivariateSpline(xi, yi, k=order, **self.cons_kwargs)"},{"attributeType":"null","col":8,"comment":"null","endLoc":42,"id":2996,"name":"_dtype","nodeType":"Attribute","startLoc":42,"text":"self._dtype"},{"col":4,"comment":"null","endLoc":164,"header":"def __init__(\n self,\n xi,\n yi,\n method=\"spline\",\n fill_value=None,\n order=3,\n nu=0,\n ext=None,\n **kwargs\n )","id":2997,"name":"__init__","nodeType":"Function","startLoc":141,"text":"def __init__(\n self,\n xi,\n yi,\n method=\"spline\",\n fill_value=None,\n order=3,\n nu=0,\n ext=None,\n **kwargs\n ):\n from scipy.interpolate import UnivariateSpline\n\n if method != \"spline\":\n raise ValueError(\"only method `spline` is valid for the SplineInterpolator\")\n\n self.method = method\n self.cons_kwargs = kwargs\n self.call_kwargs = {\"nu\": nu, \"ext\": ext}\n\n if fill_value is not None:\n raise ValueError(\"SplineInterpolator does not support fill_value\")\n\n self.f = UnivariateSpline(xi, yi, k=order, **self.cons_kwargs)"},{"col":0,"comment":"","endLoc":1,"header":"api.py#","id":2998,"name":"","nodeType":"Function","startLoc":1,"text":"if TYPE_CHECKING:\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n\nDATAARRAY_NAME = \"__xarray_dataarray_name__\"\n\nDATAARRAY_VARIABLE = \"__xarray_dataarray_variable__\"\n\nWRITEABLE_STORES: Dict[str, Callable] = {\n \"netcdf4\": backends.NetCDF4DataStore.open,\n \"scipy\": backends.ScipyDataStore,\n \"h5netcdf\": backends.H5NetCDFStore,\n}"},{"attributeType":"{__ne__}","col":8,"comment":"null","endLoc":157,"id":2999,"name":"method","nodeType":"Attribute","startLoc":157,"text":"self.method"},{"className":"_UFuncSignature","col":0,"comment":"Core dimensions signature for a given function.\n\n Based on the signature provided by generalized ufuncs in NumPy.\n\n Attributes\n ----------\n input_core_dims : tuple[tuple]\n Core dimension names on each input variable.\n output_core_dims : tuple[tuple]\n Core dimension names on each output variable.\n ","endLoc":141,"id":3000,"nodeType":"Class","startLoc":42,"text":"class _UFuncSignature:\n \"\"\"Core dimensions signature for a given function.\n\n Based on the signature provided by generalized ufuncs in NumPy.\n\n Attributes\n ----------\n input_core_dims : tuple[tuple]\n Core dimension names on each input variable.\n output_core_dims : tuple[tuple]\n Core dimension names on each output variable.\n \"\"\"\n\n __slots__ = (\n \"input_core_dims\",\n \"output_core_dims\",\n \"_all_input_core_dims\",\n \"_all_output_core_dims\",\n \"_all_core_dims\",\n )\n\n def __init__(self, input_core_dims, output_core_dims=((),)):\n self.input_core_dims = tuple(tuple(a) for a in input_core_dims)\n self.output_core_dims = tuple(tuple(a) for a in output_core_dims)\n self._all_input_core_dims = None\n self._all_output_core_dims = None\n self._all_core_dims = None\n\n @property\n def all_input_core_dims(self):\n if self._all_input_core_dims is None:\n self._all_input_core_dims = frozenset(\n dim for dims in self.input_core_dims for dim in dims\n )\n return self._all_input_core_dims\n\n @property\n def all_output_core_dims(self):\n if self._all_output_core_dims is None:\n self._all_output_core_dims = frozenset(\n dim for dims in self.output_core_dims for dim in dims\n )\n return self._all_output_core_dims\n\n @property\n def all_core_dims(self):\n if self._all_core_dims is None:\n self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims\n return self._all_core_dims\n\n @property\n def num_inputs(self):\n return len(self.input_core_dims)\n\n @property\n def num_outputs(self):\n return len(self.output_core_dims)\n\n def __eq__(self, other):\n try:\n return (\n self.input_core_dims == other.input_core_dims\n and self.output_core_dims == other.output_core_dims\n )\n except AttributeError:\n return False\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return \"%s(%r, %r)\" % (\n type(self).__name__,\n list(self.input_core_dims),\n list(self.output_core_dims),\n )\n\n def __str__(self):\n lhs = \",\".join(\"({})\".format(\",\".join(dims)) for dims in self.input_core_dims)\n rhs = \",\".join(\"({})\".format(\",\".join(dims)) for dims in self.output_core_dims)\n return \"{}->{}\".format(lhs, rhs)\n\n def to_gufunc_string(self):\n \"\"\"Create an equivalent signature string for a NumPy gufunc.\n\n Unlike __str__, handles dimensions that don't map to Python\n identifiers.\n \"\"\"\n all_dims = self.all_core_dims\n dims_map = dict(zip(sorted(all_dims), range(len(all_dims))))\n input_core_dims = [\n [\"dim%d\" % dims_map[dim] for dim in core_dims]\n for core_dims in self.input_core_dims\n ]\n output_core_dims = [\n [\"dim%d\" % dims_map[dim] for dim in core_dims]\n for core_dims in self.output_core_dims\n ]\n alt_signature = type(self)(input_core_dims, output_core_dims)\n return str(alt_signature)"},{"col":4,"comment":"null","endLoc":76,"header":"@property\n def all_input_core_dims(self)","id":3001,"name":"all_input_core_dims","nodeType":"Function","startLoc":70,"text":"@property\n def all_input_core_dims(self):\n if self._all_input_core_dims is None:\n self._all_input_core_dims = frozenset(\n dim for dims in self.input_core_dims for dim in dims\n )\n return self._all_input_core_dims"},{"col":4,"comment":"null","endLoc":84,"header":"@property\n def all_output_core_dims(self)","id":3002,"name":"all_output_core_dims","nodeType":"Function","startLoc":78,"text":"@property\n def all_output_core_dims(self):\n if self._all_output_core_dims is None:\n self._all_output_core_dims = frozenset(\n dim for dims in self.output_core_dims for dim in dims\n )\n return self._all_output_core_dims"},{"attributeType":"null","col":8,"comment":"null","endLoc":164,"id":3003,"name":"f","nodeType":"Attribute","startLoc":164,"text":"self.f"},{"col":4,"comment":"null","endLoc":90,"header":"@property\n def all_core_dims(self)","id":3004,"name":"all_core_dims","nodeType":"Function","startLoc":86,"text":"@property\n def all_core_dims(self):\n if self._all_core_dims is None:\n self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims\n return self._all_core_dims"},{"col":4,"comment":"null","endLoc":94,"header":"@property\n def num_inputs(self)","id":3005,"name":"num_inputs","nodeType":"Function","startLoc":92,"text":"@property\n def num_inputs(self):\n return len(self.input_core_dims)"},{"attributeType":"null","col":8,"comment":"null","endLoc":158,"id":3006,"name":"cons_kwargs","nodeType":"Attribute","startLoc":158,"text":"self.cons_kwargs"},{"fileName":"accessor_example.py","filePath":"doc/examples/_code","id":3007,"nodeType":"File","text":"import xarray as xr\n\n\n@xr.register_dataset_accessor(\"geo\")\nclass GeoAccessor:\n def __init__(self, xarray_obj):\n self._obj = xarray_obj\n self._center = None\n\n @property\n def center(self):\n \"\"\"Return the geographic center point of this dataset.\"\"\"\n if self._center is None:\n # we can use a cache on our accessor objects, because accessors\n # themselves are cached on instances that access them.\n lon = self._obj.latitude\n lat = self._obj.longitude\n self._center = (float(lon.mean()), float(lat.mean()))\n return self._center\n\n def plot(self):\n \"\"\"Plot data on a map.\"\"\"\n return \"plotting!\"\n"},{"col":0,"comment":"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n ","endLoc":162,"header":"def _parse_envi(meta)","id":3008,"name":"_parse_envi","nodeType":"Function","startLoc":135,"text":"def _parse_envi(meta):\n \"\"\"Parse ENVI metadata into Python data structures.\n\n See the link for information on the ENVI header file format:\n http://www.harrisgeospatial.com/docs/enviheaderfiles.html\n\n Parameters\n ----------\n meta : dict\n Dictionary of keys and str values to parse, as returned by the rasterio\n tags(ns='ENVI') call.\n\n Returns\n -------\n parsed_meta : dict\n Dictionary containing the original keys and the parsed values\n\n \"\"\"\n\n def parsevec(s):\n return np.fromstring(s.strip(\"{}\"), dtype=\"float\", sep=\",\")\n\n def default(s):\n return s.strip(\"{}\")\n\n parse = {\"wavelength\": parsevec, \"fwhm\": parsevec}\n parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}\n return parsed_meta"},{"col":4,"comment":"null","endLoc":98,"header":"@property\n def num_outputs(self)","id":3009,"name":"num_outputs","nodeType":"Function","startLoc":96,"text":"@property\n def num_outputs(self):\n return len(self.output_core_dims)"},{"attributeType":"null","col":8,"comment":"null","endLoc":159,"id":3010,"name":"call_kwargs","nodeType":"Attribute","startLoc":159,"text":"self.call_kwargs"},{"className":"GeoAccessor","col":0,"comment":"null","endLoc":23,"id":3011,"nodeType":"Class","startLoc":4,"text":"@xr.register_dataset_accessor(\"geo\")\nclass GeoAccessor:\n def __init__(self, xarray_obj):\n self._obj = xarray_obj\n self._center = None\n\n @property\n def center(self):\n \"\"\"Return the geographic center point of this dataset.\"\"\"\n if self._center is None:\n # we can use a cache on our accessor objects, because accessors\n # themselves are cached on instances that access them.\n lon = self._obj.latitude\n lat = self._obj.longitude\n self._center = (float(lon.mean()), float(lat.mean()))\n return self._center\n\n def plot(self):\n \"\"\"Plot data on a map.\"\"\"\n return \"plotting!\""},{"col":4,"comment":"null","endLoc":8,"header":"def __init__(self, xarray_obj)","id":3012,"name":"__init__","nodeType":"Function","startLoc":6,"text":"def __init__(self, xarray_obj):\n self._obj = xarray_obj\n self._center = None"},{"col":4,"comment":"Return the geographic center point of this dataset.","endLoc":19,"header":"@property\n def center(self)","id":3013,"name":"center","nodeType":"Function","startLoc":10,"text":"@property\n def center(self):\n \"\"\"Return the geographic center point of this dataset.\"\"\"\n if self._center is None:\n # we can use a cache on our accessor objects, because accessors\n # themselves are cached on instances that access them.\n lon = self._obj.latitude\n lat = self._obj.longitude\n self._center = (float(lon.mean()), float(lat.mean()))\n return self._center"},{"col":0,"comment":"helper function to apply interpolation along 1 dimension","endLoc":275,"header":"def func_interpolate_na(interpolator, x, y, **kwargs)","id":3014,"name":"func_interpolate_na","nodeType":"Function","startLoc":259,"text":"def func_interpolate_na(interpolator, x, y, **kwargs):\n \"\"\"helper function to apply interpolation along 1 dimension\"\"\"\n # it would be nice if this wasn't necessary, works around:\n # \"ValueError: assignment destination is read-only\" in assignment below\n out = y.copy()\n\n nans = pd.isnull(y)\n nonans = ~nans\n\n # fast track for no-nans and all-nans cases\n n_nans = nans.sum()\n if n_nans == 0 or n_nans == len(y):\n return y\n\n f = interpolator(x[nonans], y[nonans], **kwargs)\n out[nans] = f(x[nans])\n return out"},{"fileName":"variable.py","filePath":"xarray/core","id":3015,"nodeType":"File","text":"import functools\nimport itertools\nfrom collections import defaultdict\nfrom datetime import timedelta\nfrom distutils.version import LooseVersion\nfrom typing import Any, Dict, Hashable, Mapping, TypeVar, Union\n\nimport numpy as np\nimport pandas as pd\n\nimport xarray as xr # only for Dataset and DataArray\n\nfrom . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils\nfrom .indexing import (\n BasicIndexer,\n OuterIndexer,\n PandasIndexAdapter,\n VectorizedIndexer,\n as_indexable,\n)\nfrom .npcompat import IS_NEP18_ACTIVE\nfrom .options import _get_keep_attrs\nfrom .pycompat import dask_array_type, integer_types\nfrom .utils import (\n OrderedSet,\n decode_numpy_dict_values,\n either_dict_or_kwargs,\n ensure_us_time_resolution,\n)\n\ntry:\n import dask.array as da\nexcept ImportError:\n pass\n\n\nNON_NUMPY_SUPPORTED_ARRAY_TYPES = (\n indexing.ExplicitlyIndexed,\n pd.Index,\n) + dask_array_type\n# https://github.com/python/mypy/issues/224\nBASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore\n\nVariableType = TypeVar(\"VariableType\", bound=\"Variable\")\n\"\"\"Type annotation to be used when methods of Variable return self or a copy of self.\nWhen called from an instance of a subclass, e.g. IndexVariable, mypy identifies the\noutput as an instance of the subclass.\n\nUsage::\n\n class Variable:\n def f(self: VariableType, ...) -> VariableType:\n ...\n\"\"\"\n\n\nclass MissingDimensionsError(ValueError):\n \"\"\"Error class used when we can't safely guess a dimension name.\n \"\"\"\n\n # inherits from ValueError for backward compatibility\n # TODO: move this to an xarray.exceptions module?\n\n\ndef as_variable(obj, name=None) -> \"Union[Variable, IndexVariable]\":\n \"\"\"Convert an object into a Variable.\n\n Parameters\n ----------\n obj : object\n Object to convert into a Variable.\n\n - If the object is already a Variable, return a shallow copy.\n - Otherwise, if the object has 'dims' and 'data' attributes, convert\n it into a new Variable.\n - If all else fails, attempt to convert the object into a Variable by\n unpacking it into the arguments for creating a new Variable.\n name : str, optional\n If provided:\n\n - `obj` can be a 1D array, which is assumed to label coordinate values\n along a dimension of this given name.\n - Variables with name matching one of their dimensions are converted\n into `IndexVariable` objects.\n\n Returns\n -------\n var : Variable\n The newly created variable.\n\n \"\"\"\n from .dataarray import DataArray\n\n # TODO: consider extending this method to automatically handle Iris and\n if isinstance(obj, DataArray):\n # extract the primary Variable from DataArrays\n obj = obj.variable\n\n if isinstance(obj, Variable):\n obj = obj.copy(deep=False)\n elif isinstance(obj, tuple):\n try:\n obj = Variable(*obj)\n except (TypeError, ValueError) as error:\n # use .format() instead of % because it handles tuples consistently\n raise error.__class__(\n \"Could not convert tuple of form \"\n \"(dims, data[, attrs, encoding]): \"\n \"{} to Variable.\".format(obj)\n )\n elif utils.is_scalar(obj):\n obj = Variable([], obj)\n elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:\n obj = Variable(obj.name, obj)\n elif isinstance(obj, (set, dict)):\n raise TypeError(\"variable %r has invalid type %r\" % (name, type(obj)))\n elif name is not None:\n data = as_compatible_data(obj)\n if data.ndim != 1:\n raise MissingDimensionsError(\n \"cannot set variable %r with %r-dimensional data \"\n \"without explicit dimension names. Pass a tuple of \"\n \"(dims, data) instead.\" % (name, data.ndim)\n )\n obj = Variable(name, data, fastpath=True)\n else:\n raise TypeError(\n \"unable to convert object into a variable without an \"\n \"explicit list of dimensions: %r\" % obj\n )\n\n if name is not None and name in obj.dims:\n # convert the Variable into an Index\n if obj.ndim != 1:\n raise MissingDimensionsError(\n \"%r has more than 1-dimension and the same name as one of its \"\n \"dimensions %r. xarray disallows such variables because they \"\n \"conflict with the coordinates used to label \"\n \"dimensions.\" % (name, obj.dims)\n )\n obj = obj.to_index_variable()\n\n return obj\n\n\ndef _maybe_wrap_data(data):\n \"\"\"\n Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure\n they can be indexed properly.\n\n NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should\n all pass through unmodified.\n \"\"\"\n if isinstance(data, pd.Index):\n return PandasIndexAdapter(data)\n return data\n\n\ndef _possibly_convert_objects(values):\n \"\"\"Convert arrays of datetime.datetime and datetime.timedelta objects into\n datetime64 and timedelta64, according to the pandas convention.\n \"\"\"\n return np.asarray(pd.Series(values.ravel())).reshape(values.shape)\n\n\ndef as_compatible_data(data, fastpath=False):\n \"\"\"Prepare and wrap data to put in a Variable.\n\n - If data does not have the necessary attributes, convert it to ndarray.\n - If data has dtype=datetime64, ensure that it has ns precision. If it's a\n pandas.Timestamp, convert it to datetime64.\n - If data is already a pandas or xarray object (other than an Index), just\n use the values.\n\n Finally, wrap it up with an adapter if necessary.\n \"\"\"\n if fastpath and getattr(data, \"ndim\", 0) > 0:\n # can't use fastpath (yet) for scalars\n return _maybe_wrap_data(data)\n\n if isinstance(data, Variable):\n return data.data\n\n if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):\n return _maybe_wrap_data(data)\n\n if isinstance(data, tuple):\n data = utils.to_0d_object_array(data)\n\n if isinstance(data, pd.Timestamp):\n # TODO: convert, handle datetime objects, too\n data = np.datetime64(data.value, \"ns\")\n\n if isinstance(data, timedelta):\n data = np.timedelta64(getattr(data, \"value\", data), \"ns\")\n\n # we don't want nested self-described arrays\n data = getattr(data, \"values\", data)\n\n if isinstance(data, np.ma.MaskedArray):\n mask = np.ma.getmaskarray(data)\n if mask.any():\n dtype, fill_value = dtypes.maybe_promote(data.dtype)\n data = np.asarray(data, dtype=dtype)\n data[mask] = fill_value\n else:\n data = np.asarray(data)\n\n if not isinstance(data, np.ndarray):\n if hasattr(data, \"__array_function__\"):\n if IS_NEP18_ACTIVE:\n return data\n else:\n raise TypeError(\n \"Got an NumPy-like array type providing the \"\n \"__array_function__ protocol but NEP18 is not enabled. \"\n \"Check that numpy >= v1.16 and that the environment \"\n 'variable \"NUMPY_EXPERIMENTAL_ARRAY_FUNCTION\" is set to '\n '\"1\"'\n )\n\n # validate whether the data is valid data types\n data = np.asarray(data)\n\n if isinstance(data, np.ndarray):\n if data.dtype.kind == \"O\":\n data = _possibly_convert_objects(data)\n elif data.dtype.kind == \"M\":\n data = np.asarray(data, \"datetime64[ns]\")\n elif data.dtype.kind == \"m\":\n data = np.asarray(data, \"timedelta64[ns]\")\n\n return _maybe_wrap_data(data)\n\n\ndef _as_array_or_item(data):\n \"\"\"Return the given values as a numpy array, or as an individual item if\n it's a 0d datetime64 or timedelta64 array.\n\n Importantly, this function does not copy data if it is already an ndarray -\n otherwise, it will not be possible to update Variable values in place.\n\n This function mostly exists because 0-dimensional ndarrays with\n dtype=datetime64 are broken :(\n https://github.com/numpy/numpy/issues/4337\n https://github.com/numpy/numpy/issues/7619\n\n TODO: remove this (replace with np.asarray) once these issues are fixed\n \"\"\"\n data = np.asarray(data)\n if data.ndim == 0:\n if data.dtype.kind == \"M\":\n data = np.datetime64(data, \"ns\")\n elif data.dtype.kind == \"m\":\n data = np.timedelta64(data, \"ns\")\n return data\n\n\nclass Variable(\n common.AbstractArray, arithmetic.SupportsArithmetic, utils.NdimSizeLenMixin\n):\n \"\"\"A netcdf-like variable consisting of dimensions, data and attributes\n which describe a single Array. A single Variable object is not fully\n described outside the context of its parent Dataset (if you want such a\n fully described object, use a DataArray instead).\n\n The main functional difference between Variables and numpy arrays is that\n numerical operations on Variables implement array broadcasting by dimension\n name. For example, adding an Variable with dimensions `('time',)` to\n another Variable with dimensions `('space',)` results in a new Variable\n with dimensions `('time', 'space')`. Furthermore, numpy reduce operations\n like ``mean`` or ``sum`` are overwritten to take a \"dimension\" argument\n instead of an \"axis\".\n\n Variables are light-weight objects used as the building block for datasets.\n They are more primitive objects, so operations with them provide marginally\n higher performance than using DataArrays. However, manipulating data in the\n form of a Dataset or DataArray should almost always be preferred, because\n they can use more complete metadata in context of coordinate labels.\n \"\"\"\n\n __slots__ = (\"_dims\", \"_data\", \"_attrs\", \"_encoding\")\n\n def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):\n \"\"\"\n Parameters\n ----------\n dims : str or sequence of str\n Name(s) of the the data dimension(s). Must be either a string (only\n for 1D data) or a sequence of strings with length equal to the\n number of dimensions.\n data : array_like\n Data array which supports numpy-like data access.\n attrs : dict_like or None, optional\n Attributes to assign to the new variable. If None (default), an\n empty attribute dictionary is initialized.\n encoding : dict_like or None, optional\n Dictionary specifying how to encode this array's data into a\n serialized format like netCDF4. Currently used keys (for netCDF)\n include '_FillValue', 'scale_factor', 'add_offset' and 'dtype'.\n Well-behaved code to serialize a Variable should ignore\n unrecognized encoding items.\n \"\"\"\n self._data = as_compatible_data(data, fastpath=fastpath)\n self._dims = self._parse_dimensions(dims)\n self._attrs = None\n self._encoding = None\n if attrs is not None:\n self.attrs = attrs\n if encoding is not None:\n self.encoding = encoding\n\n @property\n def dtype(self):\n return self._data.dtype\n\n @property\n def shape(self):\n return self._data.shape\n\n @property\n def nbytes(self):\n return self.size * self.dtype.itemsize\n\n @property\n def _in_memory(self):\n return isinstance(self._data, (np.ndarray, np.number, PandasIndexAdapter)) or (\n isinstance(self._data, indexing.MemoryCachedArray)\n and isinstance(self._data.array, indexing.NumpyIndexingAdapter)\n )\n\n @property\n def data(self):\n if hasattr(self._data, \"__array_function__\") or isinstance(\n self._data, dask_array_type\n ):\n return self._data\n else:\n return self.values\n\n @data.setter\n def data(self, data):\n data = as_compatible_data(data)\n if data.shape != self.shape:\n raise ValueError(\"replacement data must match the Variable's shape\")\n self._data = data\n\n def load(self, **kwargs):\n \"\"\"Manually trigger loading of this variable's data from disk or a\n remote source into memory and return this variable.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n if isinstance(self._data, dask_array_type):\n self._data = as_compatible_data(self._data.compute(**kwargs))\n elif not hasattr(self._data, \"__array_function__\"):\n self._data = np.asarray(self._data)\n return self\n\n def compute(self, **kwargs):\n \"\"\"Manually trigger loading of this variable's data from disk or a\n remote source into memory and return a new variable. The original is\n left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def __dask_graph__(self):\n if isinstance(self._data, dask_array_type):\n return self._data.__dask_graph__()\n else:\n return None\n\n def __dask_keys__(self):\n return self._data.__dask_keys__()\n\n def __dask_layers__(self):\n return self._data.__dask_layers__()\n\n @property\n def __dask_optimize__(self):\n return self._data.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n return self._data.__dask_scheduler__\n\n def __dask_postcompute__(self):\n array_func, array_args = self._data.__dask_postcompute__()\n return (\n self._dask_finalize,\n (array_func, array_args, self._dims, self._attrs, self._encoding),\n )\n\n def __dask_postpersist__(self):\n array_func, array_args = self._data.__dask_postpersist__()\n return (\n self._dask_finalize,\n (array_func, array_args, self._dims, self._attrs, self._encoding),\n )\n\n @staticmethod\n def _dask_finalize(results, array_func, array_args, dims, attrs, encoding):\n if isinstance(results, dict): # persist case\n name = array_args[0]\n results = {k: v for k, v in results.items() if k[0] == name}\n data = array_func(results, *array_args)\n return Variable(dims, data, attrs=attrs, encoding=encoding)\n\n @property\n def values(self):\n \"\"\"The variable's data as a numpy.ndarray\"\"\"\n return _as_array_or_item(self._data)\n\n @values.setter\n def values(self, values):\n self.data = values\n\n def to_base_variable(self):\n \"\"\"Return this variable as a base xarray.Variable\"\"\"\n return Variable(\n self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True\n )\n\n to_variable = utils.alias(to_base_variable, \"to_variable\")\n\n def to_index_variable(self):\n \"\"\"Return this variable as an xarray.IndexVariable\"\"\"\n return IndexVariable(\n self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True\n )\n\n to_coord = utils.alias(to_index_variable, \"to_coord\")\n\n def to_index(self):\n \"\"\"Convert this variable to a pandas.Index\"\"\"\n return self.to_index_variable().to_index()\n\n def to_dict(self, data=True):\n \"\"\"Dictionary representation of variable.\"\"\"\n item = {\"dims\": self.dims, \"attrs\": decode_numpy_dict_values(self.attrs)}\n if data:\n item[\"data\"] = ensure_us_time_resolution(self.values).tolist()\n else:\n item.update({\"dtype\": str(self.dtype), \"shape\": self.shape})\n return item\n\n @property\n def dims(self):\n \"\"\"Tuple of dimension names with which this variable is associated.\n \"\"\"\n return self._dims\n\n @dims.setter\n def dims(self, value):\n self._dims = self._parse_dimensions(value)\n\n def _parse_dimensions(self, dims):\n if isinstance(dims, str):\n dims = (dims,)\n dims = tuple(dims)\n if len(dims) != self.ndim:\n raise ValueError(\n \"dimensions %s must have the same length as the \"\n \"number of data dimensions, ndim=%s\" % (dims, self.ndim)\n )\n return dims\n\n def _item_key_to_tuple(self, key):\n if utils.is_dict_like(key):\n return tuple(key.get(dim, slice(None)) for dim in self.dims)\n else:\n return key\n\n def _broadcast_indexes(self, key):\n \"\"\"Prepare an indexing key for an indexing operation.\n\n Parameters\n -----------\n key: int, slice, array, dict or tuple of integer, slices and arrays\n Any valid input for indexing.\n\n Returns\n -------\n dims: tuple\n Dimension of the resultant variable.\n indexers: IndexingTuple subclass\n Tuple of integer, array-like, or slices to use when indexing\n self._data. The type of this argument indicates the type of\n indexing to perform, either basic, outer or vectorized.\n new_order : Optional[Sequence[int]]\n Optional reordering to do on the result of indexing. If not None,\n the first len(new_order) indexing should be moved to these\n positions.\n \"\"\"\n key = self._item_key_to_tuple(key) # key is a tuple\n # key is a tuple of full size\n key = indexing.expanded_indexer(key, self.ndim)\n # Convert a scalar Variable to an integer\n key = tuple(\n k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key\n )\n # Convert a 0d-array to an integer\n key = tuple(\n k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key\n )\n\n if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):\n return self._broadcast_indexes_basic(key)\n\n self._validate_indexers(key)\n # Detect it can be mapped as an outer indexer\n # If all key is unlabeled, or\n # key can be mapped as an OuterIndexer.\n if all(not isinstance(k, Variable) for k in key):\n return self._broadcast_indexes_outer(key)\n\n # If all key is 1-dimensional and there are no duplicate labels,\n # key can be mapped as an OuterIndexer.\n dims = []\n for k, d in zip(key, self.dims):\n if isinstance(k, Variable):\n if len(k.dims) > 1:\n return self._broadcast_indexes_vectorized(key)\n dims.append(k.dims[0])\n elif not isinstance(k, integer_types):\n dims.append(d)\n if len(set(dims)) == len(dims):\n return self._broadcast_indexes_outer(key)\n\n return self._broadcast_indexes_vectorized(key)\n\n def _broadcast_indexes_basic(self, key):\n dims = tuple(\n dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)\n )\n return dims, BasicIndexer(key), None\n\n def _validate_indexers(self, key):\n \"\"\" Make sanity checks \"\"\"\n for dim, k in zip(self.dims, key):\n if isinstance(k, BASIC_INDEXING_TYPES):\n pass\n else:\n if not isinstance(k, Variable):\n k = np.asarray(k)\n if k.ndim > 1:\n raise IndexError(\n \"Unlabeled multi-dimensional array cannot be \"\n \"used for indexing: {}\".format(k)\n )\n if k.dtype.kind == \"b\":\n if self.shape[self.get_axis_num(dim)] != len(k):\n raise IndexError(\n \"Boolean array size {:d} is used to index array \"\n \"with shape {:s}.\".format(len(k), str(self.shape))\n )\n if k.ndim > 1:\n raise IndexError(\n \"{}-dimensional boolean indexing is \"\n \"not supported. \".format(k.ndim)\n )\n if getattr(k, \"dims\", (dim,)) != (dim,):\n raise IndexError(\n \"Boolean indexer should be unlabeled or on the \"\n \"same dimension to the indexed array. Indexer is \"\n \"on {:s} but the target dimension is {:s}.\".format(\n str(k.dims), dim\n )\n )\n\n def _broadcast_indexes_outer(self, key):\n dims = tuple(\n k.dims[0] if isinstance(k, Variable) else dim\n for k, dim in zip(key, self.dims)\n if not isinstance(k, integer_types)\n )\n\n new_key = []\n for k in key:\n if isinstance(k, Variable):\n k = k.data\n if not isinstance(k, BASIC_INDEXING_TYPES):\n k = np.asarray(k)\n if k.dtype.kind == \"b\":\n (k,) = np.nonzero(k)\n new_key.append(k)\n\n return dims, OuterIndexer(tuple(new_key)), None\n\n def _nonzero(self):\n \"\"\" Equivalent numpy's nonzero but returns a tuple of Varibles. \"\"\"\n # TODO we should replace dask's native nonzero\n # after https://github.com/dask/dask/issues/1076 is implemented.\n nonzeros = np.nonzero(self.data)\n return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))\n\n def _broadcast_indexes_vectorized(self, key):\n variables = []\n out_dims_set = OrderedSet()\n for dim, value in zip(self.dims, key):\n if isinstance(value, slice):\n out_dims_set.add(dim)\n else:\n variable = (\n value\n if isinstance(value, Variable)\n else as_variable(value, name=dim)\n )\n if variable.dtype.kind == \"b\": # boolean indexing case\n (variable,) = variable._nonzero()\n\n variables.append(variable)\n out_dims_set.update(variable.dims)\n\n variable_dims = set()\n for variable in variables:\n variable_dims.update(variable.dims)\n\n slices = []\n for i, (dim, value) in enumerate(zip(self.dims, key)):\n if isinstance(value, slice):\n if dim in variable_dims:\n # We only convert slice objects to variables if they share\n # a dimension with at least one other variable. Otherwise,\n # we can equivalently leave them as slices aknd transpose\n # the result. This is significantly faster/more efficient\n # for most array backends.\n values = np.arange(*value.indices(self.sizes[dim]))\n variables.insert(i - len(slices), Variable((dim,), values))\n else:\n slices.append((i, value))\n\n try:\n variables = _broadcast_compat_variables(*variables)\n except ValueError:\n raise IndexError(\"Dimensions of indexers mismatch: {}\".format(key))\n\n out_key = [variable.data for variable in variables]\n out_dims = tuple(out_dims_set)\n slice_positions = set()\n for i, value in slices:\n out_key.insert(i, value)\n new_position = out_dims.index(self.dims[i])\n slice_positions.add(new_position)\n\n if slice_positions:\n new_order = [i for i in range(len(out_dims)) if i not in slice_positions]\n else:\n new_order = None\n\n return out_dims, VectorizedIndexer(tuple(out_key)), new_order\n\n def __getitem__(self: VariableType, key) -> VariableType:\n \"\"\"Return a new Variable object whose contents are consistent with\n getting the provided key from the underlying data.\n\n NB. __getitem__ and __setitem__ implement xarray-style indexing,\n where if keys are unlabeled arrays, we index the array orthogonally\n with them. If keys are labeled array (such as Variables), they are\n broadcasted with our usual scheme and then the array is indexed with\n the broadcasted key, like numpy's fancy indexing.\n\n If you really want to do indexing like `x[x > 0]`, manipulate the numpy\n array `x.values` directly.\n \"\"\"\n dims, indexer, new_order = self._broadcast_indexes(key)\n data = as_indexable(self._data)[indexer]\n if new_order:\n data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)\n return self._finalize_indexing_result(dims, data)\n\n def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:\n \"\"\"Used by IndexVariable to return IndexVariable objects when possible.\n \"\"\"\n return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)\n\n def _getitem_with_mask(self, key, fill_value=dtypes.NA):\n \"\"\"Index this Variable with -1 remapped to fill_value.\"\"\"\n # TODO(shoyer): expose this method in public API somewhere (isel?) and\n # use it for reindex.\n # TODO(shoyer): add a sanity check that all other integers are\n # non-negative\n # TODO(shoyer): add an optimization, remapping -1 to an adjacent value\n # that is actually indexed rather than mapping it to the last value\n # along each axis.\n\n if fill_value is dtypes.NA:\n fill_value = dtypes.get_fill_value(self.dtype)\n\n dims, indexer, new_order = self._broadcast_indexes(key)\n\n if self.size:\n if isinstance(self._data, dask_array_type):\n # dask's indexing is faster this way; also vindex does not\n # support negative indices yet:\n # https://github.com/dask/dask/pull/2967\n actual_indexer = indexing.posify_mask_indexer(indexer)\n else:\n actual_indexer = indexer\n\n data = as_indexable(self._data)[actual_indexer]\n mask = indexing.create_mask(indexer, self.shape, data)\n data = duck_array_ops.where(mask, fill_value, data)\n else:\n # array cannot be indexed along dimensions of size 0, so just\n # build the mask directly instead.\n mask = indexing.create_mask(indexer, self.shape)\n data = np.broadcast_to(fill_value, getattr(mask, \"shape\", ()))\n\n if new_order:\n data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)\n return self._finalize_indexing_result(dims, data)\n\n def __setitem__(self, key, value):\n \"\"\"__setitem__ is overloaded to access the underlying numpy values with\n orthogonal indexing.\n\n See __getitem__ for more details.\n \"\"\"\n dims, index_tuple, new_order = self._broadcast_indexes(key)\n\n if not isinstance(value, Variable):\n value = as_compatible_data(value)\n if value.ndim > len(dims):\n raise ValueError(\n \"shape mismatch: value array of shape %s could not be \"\n \"broadcast to indexing result with %s dimensions\"\n % (value.shape, len(dims))\n )\n if value.ndim == 0:\n value = Variable((), value)\n else:\n value = Variable(dims[-value.ndim :], value)\n # broadcast to become assignable\n value = value.set_dims(dims).data\n\n if new_order:\n value = duck_array_ops.asarray(value)\n value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]\n value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))\n\n indexable = as_indexable(self._data)\n indexable[index_tuple] = value\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of local attributes on this variable.\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n self._attrs = dict(value)\n\n @property\n def encoding(self):\n \"\"\"Dictionary of encodings on this variable.\n \"\"\"\n if self._encoding is None:\n self._encoding = {}\n return self._encoding\n\n @encoding.setter\n def encoding(self, value):\n try:\n self._encoding = dict(value)\n except ValueError:\n raise ValueError(\"encoding must be castable to a dictionary\")\n\n def copy(self, deep=True, data=None):\n \"\"\"Returns a copy of this object.\n\n If `deep=True`, the data array is loaded into memory and copied onto\n the new object. Dimensions, attributes and encodings are always copied.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether the data array is loaded into memory and copied onto\n the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n When `data` is used, `deep` is ignored.\n\n Returns\n -------\n object : Variable\n New object with dimensions, attributes, encodings, and optionally\n data copied from original.\n\n Examples\n --------\n\n Shallow copy versus deep copy\n\n >>> var = xr.Variable(data=[1, 2, 3], dims='x')\n >>> var.copy()\n \n array([1, 2, 3])\n >>> var_0 = var.copy(deep=False)\n >>> var_0[0] = 7\n >>> var_0\n \n array([7, 2, 3])\n >>> var\n \n array([7, 2, 3])\n\n Changing the data using the ``data`` argument maintains the\n structure of the original object, but with the new data. Original\n object is unaffected.\n\n >>> var.copy(data=[0.1, 0.2, 0.3])\n \n array([ 0.1, 0.2, 0.3])\n >>> var\n \n array([7, 2, 3])\n\n See Also\n --------\n pandas.DataFrame.copy\n \"\"\"\n if data is None:\n data = self._data\n\n if isinstance(data, indexing.MemoryCachedArray):\n # don't share caching between copies\n data = indexing.MemoryCachedArray(data.array)\n\n if deep:\n if hasattr(data, \"__array_function__\") or isinstance(\n data, dask_array_type\n ):\n data = data.copy()\n elif not isinstance(data, PandasIndexAdapter):\n # pandas.Index is immutable\n data = np.array(data)\n else:\n data = as_compatible_data(data)\n if self.shape != data.shape:\n raise ValueError(\n \"Data shape {} must match shape of object {}\".format(\n data.shape, self.shape\n )\n )\n\n # note:\n # dims is already an immutable tuple\n # attributes and encoding will be copied when the new Array is created\n return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)\n\n def __copy__(self):\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None):\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore\n\n @property\n def chunks(self):\n \"\"\"Block dimensions for this array's data or None if it's not a dask\n array.\n \"\"\"\n return getattr(self._data, \"chunks\", None)\n\n _array_counter = itertools.count()\n\n def chunk(self, chunks=None, name=None, lock=False):\n \"\"\"Coerce this array's data into a dask arrays with the given chunks.\n\n If this variable is a non-dask array, it will be converted to dask\n array. If it's a dask array, it will be rechunked to the given chunk\n sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, tuple or dict, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``.\n name : str, optional\n Used to generate the name for this array in the internal dask\n graph. Does not need not be unique.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Variable\n \"\"\"\n import dask\n import dask.array as da\n\n if utils.is_dict_like(chunks):\n chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}\n\n if chunks is None:\n chunks = self.chunks or self.shape\n\n data = self._data\n if isinstance(data, da.Array):\n data = data.rechunk(chunks)\n else:\n if isinstance(data, indexing.ExplicitlyIndexed):\n # Unambiguously handle array storage backends (like NetCDF4 and h5py)\n # that can't handle general array indexing. For example, in netCDF4 you\n # can do \"outer\" indexing along two dimensions independent, which works\n # differently from how NumPy handles it.\n # da.from_array works by using lazy indexing with a tuple of slices.\n # Using OuterIndexer is a pragmatic choice: dask does not yet handle\n # different indexing types in an explicit way:\n # https://github.com/dask/dask/issues/2883\n data = indexing.ImplicitToExplicitIndexingAdapter(\n data, indexing.OuterIndexer\n )\n if LooseVersion(dask.__version__) < \"2.0.0\":\n kwargs = {}\n else:\n # All of our lazily loaded backend array classes should use NumPy\n # array operations.\n kwargs = {\"meta\": np.ndarray}\n else:\n kwargs = {}\n\n if utils.is_dict_like(chunks):\n chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))\n\n data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)\n\n return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)\n\n def isel(\n self: VariableType,\n indexers: Mapping[Hashable, Any] = None,\n **indexers_kwargs: Any\n ) -> VariableType:\n \"\"\"Return a new array indexed along the specified dimension(s).\n\n Parameters\n ----------\n **indexers : {dim: indexer, ...}\n Keyword arguments with names matching dimensions and values given\n by integers, slice objects or arrays.\n\n Returns\n -------\n obj : Array object\n A new Array with the selected data and dimensions. In general,\n the new variable's data will be a view of this variable's data,\n unless numpy fancy indexing was triggered by using an array\n indexer, in which case the data will be a copy.\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n\n invalid = indexers.keys() - set(self.dims)\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)\n return self[key]\n\n def squeeze(self, dim=None):\n \"\"\"Return a new object with squeezed data.\n\n Parameters\n ----------\n dim : None or str or tuple of str, optional\n Selects a subset of the length one dimensions. If a dimension is\n selected with length greater than one, an error is raised. If\n None, all length one dimensions are squeezed.\n\n Returns\n -------\n squeezed : same type as caller\n This object, but with with all or a subset of the dimensions of\n length 1 removed.\n\n See Also\n --------\n numpy.squeeze\n \"\"\"\n dims = common.get_squeeze_dims(self, dim)\n return self.isel({d: 0 for d in dims})\n\n def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):\n axis = self.get_axis_num(dim)\n\n if count > 0:\n keep = slice(None, -count)\n elif count < 0:\n keep = slice(-count, None)\n else:\n keep = slice(None)\n\n trimmed_data = self[(slice(None),) * axis + (keep,)].data\n\n if fill_value is dtypes.NA:\n dtype, fill_value = dtypes.maybe_promote(self.dtype)\n else:\n dtype = self.dtype\n\n shape = list(self.shape)\n shape[axis] = min(abs(count), shape[axis])\n\n if isinstance(trimmed_data, dask_array_type):\n chunks = list(trimmed_data.chunks)\n chunks[axis] = (shape[axis],)\n full = functools.partial(da.full, chunks=chunks)\n else:\n full = np.full\n\n filler = full(shape, fill_value, dtype=dtype)\n\n if count > 0:\n arrays = [filler, trimmed_data]\n else:\n arrays = [trimmed_data, filler]\n\n data = duck_array_ops.concatenate(arrays, axis)\n\n if isinstance(data, dask_array_type):\n # chunked data should come out with the same chunks; this makes\n # it feasible to combine shifted and unshifted data\n # TODO: remove this once dask.array automatically aligns chunks\n data = data.rechunk(self.data.chunks)\n\n return type(self)(self.dims, data, self._attrs, fastpath=True)\n\n def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):\n \"\"\"\n Return a new Variable with shifted data.\n\n Parameters\n ----------\n shifts : mapping of the form {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Variable\n Variable with the same dimensions and attributes but shifted data.\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"shift\")\n result = self\n for dim, count in shifts.items():\n result = result._shift_one_dim(dim, count, fill_value=fill_value)\n return result\n\n def pad_with_fill_value(\n self, pad_widths=None, fill_value=dtypes.NA, **pad_widths_kwargs\n ):\n \"\"\"\n Return a new Variable with paddings.\n\n Parameters\n ----------\n pad_width: Mapping of the form {dim: (before, after)}\n Number of values padded to the edges of each dimension.\n **pad_widths_kwargs:\n Keyword argument for pad_widths\n \"\"\"\n pad_widths = either_dict_or_kwargs(pad_widths, pad_widths_kwargs, \"pad\")\n\n if fill_value is dtypes.NA:\n dtype, fill_value = dtypes.maybe_promote(self.dtype)\n else:\n dtype = self.dtype\n\n if isinstance(self.data, dask_array_type):\n array = self.data\n\n # Dask does not yet support pad. We manually implement it.\n # https://github.com/dask/dask/issues/1926\n for d, pad in pad_widths.items():\n axis = self.get_axis_num(d)\n before_shape = list(array.shape)\n before_shape[axis] = pad[0]\n before_chunks = list(array.chunks)\n before_chunks[axis] = (pad[0],)\n after_shape = list(array.shape)\n after_shape[axis] = pad[1]\n after_chunks = list(array.chunks)\n after_chunks[axis] = (pad[1],)\n\n arrays = []\n if pad[0] > 0:\n arrays.append(\n da.full(\n before_shape, fill_value, dtype=dtype, chunks=before_chunks\n )\n )\n arrays.append(array)\n if pad[1] > 0:\n arrays.append(\n da.full(\n after_shape, fill_value, dtype=dtype, chunks=after_chunks\n )\n )\n if len(arrays) > 1:\n array = da.concatenate(arrays, axis=axis)\n else:\n pads = [(0, 0) if d not in pad_widths else pad_widths[d] for d in self.dims]\n array = np.pad(\n self.data.astype(dtype, copy=False),\n pads,\n mode=\"constant\",\n constant_values=fill_value,\n )\n return type(self)(self.dims, array)\n\n def _roll_one_dim(self, dim, count):\n axis = self.get_axis_num(dim)\n\n count %= self.shape[axis]\n if count != 0:\n indices = [slice(-count, None), slice(None, -count)]\n else:\n indices = [slice(None)]\n\n arrays = [self[(slice(None),) * axis + (idx,)].data for idx in indices]\n\n data = duck_array_ops.concatenate(arrays, axis)\n\n if isinstance(data, dask_array_type):\n # chunked data should come out with the same chunks; this makes\n # it feasible to combine shifted and unshifted data\n # TODO: remove this once dask.array automatically aligns chunks\n data = data.rechunk(self.data.chunks)\n\n return type(self)(self.dims, data, self._attrs, fastpath=True)\n\n def roll(self, shifts=None, **shifts_kwargs):\n \"\"\"\n Return a new Variable with rolld data.\n\n Parameters\n ----------\n shifts : mapping of the form {dim: offset}\n Integer offset to roll along each of the given dimensions.\n Positive offsets roll to the right; negative offsets roll to the\n left.\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Variable\n Variable with the same dimensions and attributes but rolled data.\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"roll\")\n\n result = self\n for dim, count in shifts.items():\n result = result._roll_one_dim(dim, count)\n return result\n\n def transpose(self, *dims) -> \"Variable\":\n \"\"\"Return a new Variable object with transposed dimensions.\n\n Parameters\n ----------\n *dims : str, optional\n By default, reverse the dimensions. Otherwise, reorder the\n dimensions to this order.\n\n Returns\n -------\n transposed : Variable\n The returned object has transposed data and dimensions with the\n same attributes as the original.\n\n Notes\n -----\n This operation returns a view of this variable's data. It is\n lazy for dask-backed Variables but not for numpy-backed Variables.\n\n See Also\n --------\n numpy.transpose\n \"\"\"\n if len(dims) == 0:\n dims = self.dims[::-1]\n axes = self.get_axis_num(dims)\n if len(dims) < 2: # no need to transpose if only one dimension\n return self.copy(deep=False)\n\n data = as_indexable(self._data).transpose(axes)\n return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)\n\n @property\n def T(self) -> \"Variable\":\n return self.transpose()\n\n def set_dims(self, dims, shape=None):\n \"\"\"Return a new variable with given set of dimensions.\n This method might be used to attach new dimension(s) to variable.\n\n When possible, this operation does not copy this variable's data.\n\n Parameters\n ----------\n dims : str or sequence of str or dict\n Dimensions to include on the new variable. If a dict, values are\n used to provide the sizes of new dimensions; otherwise, new\n dimensions are inserted with length 1.\n\n Returns\n -------\n Variable\n \"\"\"\n if isinstance(dims, str):\n dims = [dims]\n\n if shape is None and utils.is_dict_like(dims):\n shape = dims.values()\n\n missing_dims = set(self.dims) - set(dims)\n if missing_dims:\n raise ValueError(\n \"new dimensions %r must be a superset of \"\n \"existing dimensions %r\" % (dims, self.dims)\n )\n\n self_dims = set(self.dims)\n expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims\n\n if self.dims == expanded_dims:\n # don't use broadcast_to unless necessary so the result remains\n # writeable if possible\n expanded_data = self.data\n elif shape is not None:\n dims_map = dict(zip(dims, shape))\n tmp_shape = tuple(dims_map[d] for d in expanded_dims)\n expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)\n else:\n expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]\n\n expanded_var = Variable(\n expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True\n )\n return expanded_var.transpose(*dims)\n\n def _stack_once(self, dims, new_dim):\n if not set(dims) <= set(self.dims):\n raise ValueError(\"invalid existing dimensions: %s\" % dims)\n\n if new_dim in self.dims:\n raise ValueError(\n \"cannot create a new dimension with the same \"\n \"name as an existing dimension\"\n )\n\n if len(dims) == 0:\n # don't stack\n return self.copy(deep=False)\n\n other_dims = [d for d in self.dims if d not in dims]\n dim_order = other_dims + list(dims)\n reordered = self.transpose(*dim_order)\n\n new_shape = reordered.shape[: len(other_dims)] + (-1,)\n new_data = reordered.data.reshape(new_shape)\n new_dims = reordered.dims[: len(other_dims)] + (new_dim,)\n\n return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)\n\n def stack(self, dimensions=None, **dimensions_kwargs):\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the order of the data\n along each new dimension will be in contiguous (C) order.\n\n Parameters\n ----------\n dimensions : Mapping of form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Variable\n Variable with the same attributes but stacked data.\n\n See also\n --------\n Variable.unstack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"stack\")\n result = self\n for new_dim, dims in dimensions.items():\n result = result._stack_once(dims, new_dim)\n return result\n\n def _unstack_once(self, dims, old_dim):\n new_dim_names = tuple(dims.keys())\n new_dim_sizes = tuple(dims.values())\n\n if old_dim not in self.dims:\n raise ValueError(\"invalid existing dimension: %s\" % old_dim)\n\n if set(new_dim_names).intersection(self.dims):\n raise ValueError(\n \"cannot create a new dimension with the same \"\n \"name as an existing dimension\"\n )\n\n if np.prod(new_dim_sizes) != self.sizes[old_dim]:\n raise ValueError(\n \"the product of the new dimension sizes must \"\n \"equal the size of the old dimension\"\n )\n\n other_dims = [d for d in self.dims if d != old_dim]\n dim_order = other_dims + [old_dim]\n reordered = self.transpose(*dim_order)\n\n new_shape = reordered.shape[: len(other_dims)] + new_dim_sizes\n new_data = reordered.data.reshape(new_shape)\n new_dims = reordered.dims[: len(other_dims)] + new_dim_names\n\n return Variable(new_dims, new_data, self._attrs, self._encoding, fastpath=True)\n\n def unstack(self, dimensions=None, **dimensions_kwargs):\n \"\"\"\n Unstack an existing dimension into multiple new dimensions.\n\n New dimensions will be added at the end, and the order of the data\n along each new dimension will be in contiguous (C) order.\n\n Parameters\n ----------\n dimensions : mapping of the form old_dim={dim1: size1, ...}\n Names of existing dimensions, and the new dimensions and sizes\n that they map to.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n unstacked : Variable\n Variable with the same attributes but unstacked data.\n\n See also\n --------\n Variable.stack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"unstack\")\n result = self\n for old_dim, dims in dimensions.items():\n result = result._unstack_once(dims, old_dim)\n return result\n\n def fillna(self, value):\n return ops.fillna(self, value)\n\n def where(self, cond, other=dtypes.NA):\n return ops.where_method(self, cond, other)\n\n def reduce(\n self,\n func,\n dim=None,\n axis=None,\n keep_attrs=None,\n keepdims=False,\n allow_lazy=False,\n **kwargs\n ):\n \"\"\"Reduce this array by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dim'\n and 'axis' arguments can be supplied. If neither are supplied, then\n the reduction is calculated over the flattened array (by calling\n `func(x)` without an axis argument).\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is common.ALL_DIMS:\n dim = None\n if dim is not None and axis is not None:\n raise ValueError(\"cannot supply both 'axis' and 'dim' arguments\")\n\n if dim is not None:\n axis = self.get_axis_num(dim)\n input_data = self.data if allow_lazy else self.values\n if axis is not None:\n data = func(input_data, axis=axis, **kwargs)\n else:\n data = func(input_data, **kwargs)\n\n if getattr(data, \"shape\", ()) == self.shape:\n dims = self.dims\n else:\n removed_axes = (\n range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim\n )\n if keepdims:\n # Insert np.newaxis for removed dims\n slices = tuple(\n np.newaxis if i in removed_axes else slice(None, None)\n for i in range(self.ndim)\n )\n if getattr(data, \"shape\", None) is None:\n # Reduce has produced a scalar value, not an array-like\n data = np.asanyarray(data)[slices]\n else:\n data = data[slices]\n dims = self.dims\n else:\n dims = [\n adim for n, adim in enumerate(self.dims) if n not in removed_axes\n ]\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self._attrs if keep_attrs else None\n\n return Variable(dims, data, attrs=attrs)\n\n @classmethod\n def concat(cls, variables, dim=\"concat_dim\", positions=None, shortcut=False):\n \"\"\"Concatenate variables along a new or existing dimension.\n\n Parameters\n ----------\n variables : iterable of Array\n Arrays to stack together. Each variable is expected to have\n matching dimensions and shape except for along the stacked\n dimension.\n dim : str or DataArray, optional\n Name of the dimension to stack along. This can either be a new\n dimension name, in which case it is added along axis=0, or an\n existing dimension name, in which case the location of the\n dimension is unchanged. Where to insert the new dimension is\n determined by the first variable.\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to\n which to assign each dataset along the concatenated dimension.\n If not supplied, objects are concatenated in the provided order.\n shortcut : bool, optional\n This option is used internally to speed-up groupby operations.\n If `shortcut` is True, some checks of internal consistency between\n arrays to concatenate are skipped.\n\n Returns\n -------\n stacked : Variable\n Concatenated Variable formed by stacking all the supplied variables\n along the given dimension.\n \"\"\"\n if not isinstance(dim, str):\n dim, = dim.dims\n\n # can't do this lazily: we need to loop through variables at least\n # twice\n variables = list(variables)\n first_var = variables[0]\n\n arrays = [v.data for v in variables]\n\n if dim in first_var.dims:\n axis = first_var.get_axis_num(dim)\n dims = first_var.dims\n data = duck_array_ops.concatenate(arrays, axis=axis)\n if positions is not None:\n # TODO: deprecate this option -- we don't need it for groupby\n # any more.\n indices = nputils.inverse_permutation(np.concatenate(positions))\n data = duck_array_ops.take(data, indices, axis=axis)\n else:\n axis = 0\n dims = (dim,) + first_var.dims\n data = duck_array_ops.stack(arrays, axis=axis)\n\n attrs = dict(first_var.attrs)\n encoding = dict(first_var.encoding)\n if not shortcut:\n for var in variables:\n if var.dims != first_var.dims:\n raise ValueError(\"inconsistent dimensions\")\n utils.remove_incompatible_items(attrs, var.attrs)\n\n return cls(dims, data, attrs, encoding)\n\n def equals(self, other, equiv=duck_array_ops.array_equiv):\n \"\"\"True if two Variables have the same dimensions and values;\n otherwise False.\n\n Variables can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for Variables\n does element-wise comparisons (like numpy.ndarrays).\n \"\"\"\n other = getattr(other, \"variable\", other)\n try:\n return self.dims == other.dims and (\n self._data is other._data or equiv(self.data, other.data)\n )\n except (TypeError, AttributeError):\n return False\n\n def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):\n \"\"\"True if two Variables have the values after being broadcast against\n each other; otherwise False.\n\n Variables can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n \"\"\"\n try:\n self, other = broadcast_variables(self, other)\n except (ValueError, AttributeError):\n return False\n return self.equals(other, equiv=equiv)\n\n def identical(self, other):\n \"\"\"Like equals, but also checks attributes.\n \"\"\"\n try:\n return utils.dict_equiv(self.attrs, other.attrs) and self.equals(other)\n except (TypeError, AttributeError):\n return False\n\n def no_conflicts(self, other):\n \"\"\"True if the intersection of two Variable's non-null data is\n equal; otherwise false.\n\n Variables can thus still be equal if there are locations where either,\n or both, contain NaN values.\n \"\"\"\n return self.broadcast_equals(other, equiv=duck_array_ops.array_notnull_equiv)\n\n def quantile(self, q, dim=None, interpolation=\"linear\", keep_attrs=None):\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements.\n\n Parameters\n ----------\n q : float in range of [0,1] (or sequence of floats)\n Quantile to compute, which must be between 0 and 1\n inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n quantiles : Variable\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile,\n DataArray.quantile\n \"\"\"\n if isinstance(self.data, dask_array_type):\n raise TypeError(\n \"quantile does not work for arrays stored as dask \"\n \"arrays. Load the data via .compute() or .load() \"\n \"prior to calling this method.\"\n )\n\n q = np.asarray(q, dtype=np.float64)\n\n new_dims = list(self.dims)\n if dim is not None:\n axis = self.get_axis_num(dim)\n if utils.is_scalar(dim):\n new_dims.remove(dim)\n else:\n for d in dim:\n new_dims.remove(d)\n else:\n axis = None\n new_dims = []\n\n # Only add the quantile dimension if q is array-like\n if q.ndim != 0:\n new_dims = [\"quantile\"] + new_dims\n\n qs = np.nanpercentile(\n self.data, q * 100.0, axis=axis, interpolation=interpolation\n )\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self._attrs if keep_attrs else None\n\n return Variable(new_dims, qs, attrs)\n\n def rank(self, dim, pct=False):\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within that\n set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n\n Returns\n -------\n ranked : Variable\n\n See Also\n --------\n Dataset.rank, DataArray.rank\n \"\"\"\n import bottleneck as bn\n\n data = self.data\n\n if isinstance(data, dask_array_type):\n raise TypeError(\n \"rank does not work for arrays stored as dask \"\n \"arrays. Load the data via .compute() or .load() \"\n \"prior to calling this method.\"\n )\n elif not isinstance(data, np.ndarray):\n raise TypeError(\n \"rank is not implemented for {} objects.\".format(type(data))\n )\n\n axis = self.get_axis_num(dim)\n func = bn.nanrankdata if self.dtype.kind == \"f\" else bn.rankdata\n ranked = func(data, axis=axis)\n if pct:\n count = np.sum(~np.isnan(data), axis=axis, keepdims=True)\n ranked /= count\n return Variable(self.dims, ranked)\n\n def rolling_window(\n self, dim, window, window_dim, center=False, fill_value=dtypes.NA\n ):\n \"\"\"\n Make a rolling_window along dim and add a new_dim to the last place.\n\n Parameters\n ----------\n dim: str\n Dimension over which to compute rolling_window\n window: int\n Window size of the rolling\n window_dim: str\n New name of the window dimension.\n center: boolean. default False.\n If True, pad fill_value for both ends. Otherwise, pad in the head\n of the axis.\n fill_value:\n value to be filled.\n\n Returns\n -------\n Variable that is a view of the original array with a added dimension of\n size w.\n The return dim: self.dims + (window_dim, )\n The return shape: self.shape + (window, )\n\n Examples\n --------\n >>> v=Variable(('a', 'b'), np.arange(8).reshape((2,4)))\n >>> v.rolling_window(x, 'b', 3, 'window_dim')\n \n array([[[nan, nan, 0], [nan, 0, 1], [0, 1, 2], [1, 2, 3]],\n [[nan, nan, 4], [nan, 4, 5], [4, 5, 6], [5, 6, 7]]])\n\n >>> v.rolling_window(x, 'b', 3, 'window_dim', center=True)\n \n array([[[nan, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, nan]],\n [[nan, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, nan]]])\n \"\"\"\n if fill_value is dtypes.NA: # np.nan is passed\n dtype, fill_value = dtypes.maybe_promote(self.dtype)\n array = self.astype(dtype, copy=False).data\n else:\n dtype = self.dtype\n array = self.data\n\n new_dims = self.dims + (window_dim,)\n return Variable(\n new_dims,\n duck_array_ops.rolling_window(\n array,\n axis=self.get_axis_num(dim),\n window=window,\n center=center,\n fill_value=fill_value,\n ),\n )\n\n def coarsen(self, windows, func, boundary=\"exact\", side=\"left\"):\n \"\"\"\n Apply\n \"\"\"\n windows = {k: v for k, v in windows.items() if k in self.dims}\n if not windows:\n return self.copy()\n\n reshaped, axes = self._coarsen_reshape(windows, boundary, side)\n if isinstance(func, str):\n name = func\n func = getattr(duck_array_ops, name, None)\n if func is None:\n raise NameError(\"{} is not a valid method.\".format(name))\n return type(self)(self.dims, func(reshaped, axis=axes), self._attrs)\n\n def _coarsen_reshape(self, windows, boundary, side):\n \"\"\"\n Construct a reshaped-array for corsen\n \"\"\"\n if not utils.is_dict_like(boundary):\n boundary = {d: boundary for d in windows.keys()}\n\n if not utils.is_dict_like(side):\n side = {d: side for d in windows.keys()}\n\n # remove unrelated dimensions\n boundary = {k: v for k, v in boundary.items() if k in windows}\n side = {k: v for k, v in side.items() if k in windows}\n\n for d, window in windows.items():\n if window <= 0:\n raise ValueError(\"window must be > 0. Given {}\".format(window))\n\n variable = self\n for d, window in windows.items():\n # trim or pad the object\n size = variable.shape[self._get_axis_num(d)]\n n = int(size / window)\n if boundary[d] == \"exact\":\n if n * window != size:\n raise ValueError(\n \"Could not coarsen a dimension of size {} with \"\n \"window {}\".format(size, window)\n )\n elif boundary[d] == \"trim\":\n if side[d] == \"left\":\n variable = variable.isel({d: slice(0, window * n)})\n else:\n excess = size - window * n\n variable = variable.isel({d: slice(excess, None)})\n elif boundary[d] == \"pad\": # pad\n pad = window * n - size\n if pad < 0:\n pad += window\n if side[d] == \"left\":\n pad_widths = {d: (0, pad)}\n else:\n pad_widths = {d: (pad, 0)}\n variable = variable.pad_with_fill_value(pad_widths)\n else:\n raise TypeError(\n \"{} is invalid for boundary. Valid option is 'exact', \"\n \"'trim' and 'pad'\".format(boundary[d])\n )\n\n shape = []\n axes = []\n axis_count = 0\n for i, d in enumerate(variable.dims):\n if d in windows:\n size = variable.shape[i]\n shape.append(int(size / windows[d]))\n shape.append(windows[d])\n axis_count += 1\n axes.append(i + axis_count)\n else:\n shape.append(variable.shape[i])\n\n return variable.data.reshape(shape), tuple(axes)\n\n @property\n def real(self):\n return type(self)(self.dims, self.data.real, self._attrs)\n\n @property\n def imag(self):\n return type(self)(self.dims, self.data.imag, self._attrs)\n\n def __array_wrap__(self, obj, context=None):\n return Variable(self.dims, obj)\n\n @staticmethod\n def _unary_op(f):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n with np.errstate(all=\"ignore\"):\n return self.__array_wrap__(f(self.data, *args, **kwargs))\n\n return func\n\n @staticmethod\n def _binary_op(f, reflexive=False, **ignored_kwargs):\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, (xr.DataArray, xr.Dataset)):\n return NotImplemented\n self_data, other_data, dims = _broadcast_compat_data(self, other)\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self._attrs if keep_attrs else None\n with np.errstate(all=\"ignore\"):\n new_data = (\n f(self_data, other_data)\n if not reflexive\n else f(other_data, self_data)\n )\n result = Variable(dims, new_data, attrs=attrs)\n return result\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, xr.Dataset):\n raise TypeError(\"cannot add a Dataset to a Variable in-place\")\n self_data, other_data, dims = _broadcast_compat_data(self, other)\n if dims != self.dims:\n raise ValueError(\"dimensions cannot change for in-place \" \"operations\")\n with np.errstate(all=\"ignore\"):\n self.values = f(self_data, other_data)\n return self\n\n return func\n\n def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):\n \"\"\" A (private) method to convert datetime array to numeric dtype\n See duck_array_ops.datetime_to_numeric\n \"\"\"\n numeric_array = duck_array_ops.datetime_to_numeric(\n self.data, offset, datetime_unit, dtype\n )\n return type(self)(self.dims, numeric_array, self._attrs)\n\n\nops.inject_all_ops_and_reduce_methods(Variable)\n\n\nclass IndexVariable(Variable):\n \"\"\"Wrapper for accommodating a pandas.Index in an xarray.Variable.\n\n IndexVariable preserve loaded values in the form of a pandas.Index instead\n of a NumPy array. Hence, their values are immutable and must always be one-\n dimensional.\n\n They also have a name property, which is the name of their sole dimension\n unless another name is given.\n \"\"\"\n\n __slots__ = ()\n\n def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):\n super().__init__(dims, data, attrs, encoding, fastpath)\n if self.ndim != 1:\n raise ValueError(\"%s objects must be 1-dimensional\" % type(self).__name__)\n\n # Unlike in Variable, always eagerly load values into memory\n if not isinstance(self._data, PandasIndexAdapter):\n self._data = PandasIndexAdapter(self._data)\n\n def load(self):\n # data is already loaded into memory for IndexVariable\n return self\n\n # https://github.com/python/mypy/issues/1465\n @Variable.data.setter # type: ignore\n def data(self, data):\n Variable.data.fset(self, data)\n if not isinstance(self._data, PandasIndexAdapter):\n self._data = PandasIndexAdapter(self._data)\n\n def chunk(self, chunks=None, name=None, lock=False):\n # Dummy - do not chunk. This method is invoked e.g. by Dataset.chunk()\n return self.copy(deep=False)\n\n def _finalize_indexing_result(self, dims, data):\n if getattr(data, \"ndim\", 0) != 1:\n # returns Variable rather than IndexVariable if multi-dimensional\n return Variable(dims, data, self._attrs, self._encoding)\n else:\n return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)\n\n def __setitem__(self, key, value):\n raise TypeError(\"%s values cannot be modified\" % type(self).__name__)\n\n @classmethod\n def concat(cls, variables, dim=\"concat_dim\", positions=None, shortcut=False):\n \"\"\"Specialized version of Variable.concat for IndexVariable objects.\n\n This exists because we want to avoid converting Index objects to NumPy\n arrays, if possible.\n \"\"\"\n if not isinstance(dim, str):\n dim, = dim.dims\n\n variables = list(variables)\n first_var = variables[0]\n\n if any(not isinstance(v, cls) for v in variables):\n raise TypeError(\n \"IndexVariable.concat requires that all input \"\n \"variables be IndexVariable objects\"\n )\n\n indexes = [v._data.array for v in variables]\n\n if not indexes:\n data = []\n else:\n data = indexes[0].append(indexes[1:])\n\n if positions is not None:\n indices = nputils.inverse_permutation(np.concatenate(positions))\n data = data.take(indices)\n\n attrs = dict(first_var.attrs)\n if not shortcut:\n for var in variables:\n if var.dims != first_var.dims:\n raise ValueError(\"inconsistent dimensions\")\n utils.remove_incompatible_items(attrs, var.attrs)\n\n return cls(first_var.dims, data, attrs)\n\n def copy(self, deep=True, data=None):\n \"\"\"Returns a copy of this object.\n\n `deep` is ignored since data is stored in the form of\n pandas.Index, which is already immutable. Dimensions, attributes\n and encodings are always copied.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Deep is ignored when data is given. Whether the data array is\n loaded into memory and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n\n Returns\n -------\n object : Variable\n New object with dimensions, attributes, encodings, and optionally\n data copied from original.\n \"\"\"\n if data is None:\n data = self._data.copy(deep=deep)\n else:\n data = as_compatible_data(data)\n if self.shape != data.shape:\n raise ValueError(\n \"Data shape {} must match shape of object {}\".format(\n data.shape, self.shape\n )\n )\n return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)\n\n def equals(self, other, equiv=None):\n # if equiv is specified, super up\n if equiv is not None:\n return super().equals(other, equiv)\n\n # otherwise use the native index equals, rather than looking at _data\n other = getattr(other, \"variable\", other)\n try:\n return self.dims == other.dims and self._data_equals(other)\n except (TypeError, AttributeError):\n return False\n\n def _data_equals(self, other):\n return self.to_index().equals(other.to_index())\n\n def to_index_variable(self):\n \"\"\"Return this variable as an xarray.IndexVariable\"\"\"\n return self\n\n to_coord = utils.alias(to_index_variable, \"to_coord\")\n\n def to_index(self):\n \"\"\"Convert this variable to a pandas.Index\"\"\"\n # n.b. creating a new pandas.Index from an old pandas.Index is\n # basically free as pandas.Index objects are immutable\n assert self.ndim == 1\n index = self._data.array\n if isinstance(index, pd.MultiIndex):\n # set default names for multi-index unnamed levels so that\n # we can safely rename dimension / coordinate later\n valid_level_names = [\n name or \"{}_level_{}\".format(self.dims[0], i)\n for i, name in enumerate(index.names)\n ]\n index = index.set_names(valid_level_names)\n else:\n index = index.set_names(self.name)\n return index\n\n @property\n def level_names(self):\n \"\"\"Return MultiIndex level names or None if this IndexVariable has no\n MultiIndex.\n \"\"\"\n index = self.to_index()\n if isinstance(index, pd.MultiIndex):\n return index.names\n else:\n return None\n\n def get_level_variable(self, level):\n \"\"\"Return a new IndexVariable from a given MultiIndex level.\"\"\"\n if self.level_names is None:\n raise ValueError(\"IndexVariable %r has no MultiIndex\" % self.name)\n index = self.to_index()\n return type(self)(self.dims, index.get_level_values(level))\n\n @property\n def name(self):\n return self.dims[0]\n\n @name.setter\n def name(self, value):\n raise AttributeError(\"cannot modify name of IndexVariable in-place\")\n\n\n# for backwards compatibility\nCoordinate = utils.alias(IndexVariable, \"Coordinate\")\n\n\ndef _unified_dims(variables):\n # validate dimensions\n all_dims = {}\n for var in variables:\n var_dims = var.dims\n if len(set(var_dims)) < len(var_dims):\n raise ValueError(\n \"broadcasting cannot handle duplicate \"\n \"dimensions: %r\" % list(var_dims)\n )\n for d, s in zip(var_dims, var.shape):\n if d not in all_dims:\n all_dims[d] = s\n elif all_dims[d] != s:\n raise ValueError(\n \"operands cannot be broadcast together \"\n \"with mismatched lengths for dimension %r: %s\"\n % (d, (all_dims[d], s))\n )\n return all_dims\n\n\ndef _broadcast_compat_variables(*variables):\n \"\"\"Create broadcast compatible variables, with the same dimensions.\n\n Unlike the result of broadcast_variables(), some variables may have\n dimensions of size 1 instead of the the size of the broadcast dimension.\n \"\"\"\n dims = tuple(_unified_dims(variables))\n return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)\n\n\ndef broadcast_variables(*variables):\n \"\"\"Given any number of variables, return variables with matching dimensions\n and broadcast data.\n\n The data on the returned variables will be a view of the data on the\n corresponding original arrays, but dimensions will be reordered and\n inserted so that both broadcast arrays have the same dimensions. The new\n dimensions are sorted in order of appearance in the first variable's\n dimensions followed by the second variable's dimensions.\n \"\"\"\n dims_map = _unified_dims(variables)\n dims_tuple = tuple(dims_map)\n return tuple(\n var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables\n )\n\n\ndef _broadcast_compat_data(self, other):\n if all(hasattr(other, attr) for attr in [\"dims\", \"data\", \"shape\", \"encoding\"]):\n # `other` satisfies the necessary Variable API for broadcast_variables\n new_self, new_other = _broadcast_compat_variables(self, other)\n self_data = new_self.data\n other_data = new_other.data\n dims = new_self.dims\n else:\n # rely on numpy broadcasting rules\n self_data = self.data\n other_data = other\n dims = self.dims\n return self_data, other_data, dims\n\n\ndef concat(variables, dim=\"concat_dim\", positions=None, shortcut=False):\n \"\"\"Concatenate variables along a new or existing dimension.\n\n Parameters\n ----------\n variables : iterable of Array\n Arrays to stack together. Each variable is expected to have\n matching dimensions and shape except for along the stacked\n dimension.\n dim : str or DataArray, optional\n Name of the dimension to stack along. This can either be a new\n dimension name, in which case it is added along axis=0, or an\n existing dimension name, in which case the location of the\n dimension is unchanged. Where to insert the new dimension is\n determined by the first variable.\n positions : None or list of integer arrays, optional\n List of integer arrays which specifies the integer positions to which\n to assign each dataset along the concatenated dimension. If not\n supplied, objects are concatenated in the provided order.\n shortcut : bool, optional\n This option is used internally to speed-up groupby operations.\n If `shortcut` is True, some checks of internal consistency between\n arrays to concatenate are skipped.\n\n Returns\n -------\n stacked : Variable\n Concatenated Variable formed by stacking all the supplied variables\n along the given dimension.\n \"\"\"\n variables = list(variables)\n if all(isinstance(v, IndexVariable) for v in variables):\n return IndexVariable.concat(variables, dim, positions, shortcut)\n else:\n return Variable.concat(variables, dim, positions, shortcut)\n\n\ndef assert_unique_multiindex_level_names(variables):\n \"\"\"Check for uniqueness of MultiIndex level names in all given\n variables.\n\n Not public API. Used for checking consistency of DataArray and Dataset\n objects.\n \"\"\"\n level_names = defaultdict(list)\n all_level_names = set()\n for var_name, var in variables.items():\n if isinstance(var._data, PandasIndexAdapter):\n idx_level_names = var.to_index_variable().level_names\n if idx_level_names is not None:\n for n in idx_level_names:\n level_names[n].append(\"%r (%s)\" % (n, var_name))\n if idx_level_names:\n all_level_names.update(idx_level_names)\n\n for k, v in level_names.items():\n if k in variables:\n v.append(\"(%s)\" % k)\n\n duplicate_names = [v for v in level_names.values() if len(v) > 1]\n if duplicate_names:\n conflict_str = \"\\n\".join([\", \".join(v) for v in duplicate_names])\n raise ValueError(\"conflicting MultiIndex level name(s):\\n%s\" % conflict_str)\n # Check confliction between level names and dimensions GH:2299\n for k, v in variables.items():\n for d in v.dims:\n if d in all_level_names:\n raise ValueError(\n \"conflicting level / dimension names. {} \"\n \"already exists as a level name.\".format(d)\n )\n"},{"col":4,"comment":"null","endLoc":107,"header":"def __eq__(self, other)","id":3016,"name":"__eq__","nodeType":"Function","startLoc":100,"text":"def __eq__(self, other):\n try:\n return (\n self.input_core_dims == other.input_core_dims\n and self.output_core_dims == other.output_core_dims\n )\n except AttributeError:\n return False"},{"col":4,"comment":"null","endLoc":110,"header":"def __ne__(self, other)","id":3017,"name":"__ne__","nodeType":"Function","startLoc":109,"text":"def __ne__(self, other):\n return not self == other"},{"col":4,"comment":"null","endLoc":117,"header":"def __repr__(self)","id":3018,"name":"__repr__","nodeType":"Function","startLoc":112,"text":"def __repr__(self):\n return \"%s(%r, %r)\" % (\n type(self).__name__,\n list(self.input_core_dims),\n list(self.output_core_dims),\n )"},{"attributeType":"null","col":0,"comment":"null","endLoc":14,"id":3019,"name":"RASTERIO_LOCK","nodeType":"Attribute","startLoc":14,"text":"RASTERIO_LOCK"},{"className":"MissingDimensionsError","col":0,"comment":"Error class used when we can't safely guess a dimension name.\n ","endLoc":62,"id":3020,"nodeType":"Class","startLoc":57,"text":"class MissingDimensionsError(ValueError):\n \"\"\"Error class used when we can't safely guess a dimension name.\n \"\"\"\n\n # inherits from ValueError for backward compatibility\n # TODO: move this to an xarray.exceptions module?"},{"col":0,"comment":"Return the given values as a numpy array, or as an individual item if\n it's a 0d datetime64 or timedelta64 array.\n\n Importantly, this function does not copy data if it is already an ndarray -\n otherwise, it will not be possible to update Variable values in place.\n\n This function mostly exists because 0-dimensional ndarrays with\n dtype=datetime64 are broken :(\n https://github.com/numpy/numpy/issues/4337\n https://github.com/numpy/numpy/issues/7619\n\n TODO: remove this (replace with np.asarray) once these issues are fixed\n ","endLoc":256,"header":"def _as_array_or_item(data)","id":3021,"name":"_as_array_or_item","nodeType":"Function","startLoc":236,"text":"def _as_array_or_item(data):\n \"\"\"Return the given values as a numpy array, or as an individual item if\n it's a 0d datetime64 or timedelta64 array.\n\n Importantly, this function does not copy data if it is already an ndarray -\n otherwise, it will not be possible to update Variable values in place.\n\n This function mostly exists because 0-dimensional ndarrays with\n dtype=datetime64 are broken :(\n https://github.com/numpy/numpy/issues/4337\n https://github.com/numpy/numpy/issues/7619\n\n TODO: remove this (replace with np.asarray) once these issues are fixed\n \"\"\"\n data = np.asarray(data)\n if data.ndim == 0:\n if data.dtype.kind == \"M\":\n data = np.datetime64(data, \"ns\")\n elif data.dtype.kind == \"m\":\n data = np.timedelta64(data, \"ns\")\n return data"},{"col":4,"comment":"null","endLoc":122,"header":"def __str__(self)","id":3022,"name":"__str__","nodeType":"Function","startLoc":119,"text":"def __str__(self):\n lhs = \",\".join(\"({})\".format(\",\".join(dims)) for dims in self.input_core_dims)\n rhs = \",\".join(\"({})\".format(\",\".join(dims)) for dims in self.output_core_dims)\n return \"{}->{}\".format(lhs, rhs)"},{"col":4,"comment":"Create an equivalent signature string for a NumPy gufunc.\n\n Unlike __str__, handles dimensions that don't map to Python\n identifiers.\n ","endLoc":141,"header":"def to_gufunc_string(self)","id":3023,"name":"to_gufunc_string","nodeType":"Function","startLoc":124,"text":"def to_gufunc_string(self):\n \"\"\"Create an equivalent signature string for a NumPy gufunc.\n\n Unlike __str__, handles dimensions that don't map to Python\n identifiers.\n \"\"\"\n all_dims = self.all_core_dims\n dims_map = dict(zip(sorted(all_dims), range(len(all_dims))))\n input_core_dims = [\n [\"dim%d\" % dims_map[dim] for dim in core_dims]\n for core_dims in self.input_core_dims\n ]\n output_core_dims = [\n [\"dim%d\" % dims_map[dim] for dim in core_dims]\n for core_dims in self.output_core_dims\n ]\n alt_signature = type(self)(input_core_dims, output_core_dims)\n return str(alt_signature)"},{"col":0,"comment":"inverse of ffill","endLoc":288,"header":"def _bfill(arr, n=None, axis=-1)","id":3024,"name":"_bfill","nodeType":"Function","startLoc":278,"text":"def _bfill(arr, n=None, axis=-1):\n \"\"\"inverse of ffill\"\"\"\n import bottleneck as bn\n\n arr = np.flip(arr, axis=axis)\n\n # fill\n arr = bn.push(arr, axis=axis, n=n)\n\n # reverse back to original\n return np.flip(arr, axis=axis)"},{"col":4,"comment":"Plot data on a map.","endLoc":23,"header":"def plot(self)","id":3025,"name":"plot","nodeType":"Function","startLoc":21,"text":"def plot(self):\n \"\"\"Plot data on a map.\"\"\"\n return \"plotting!\""},{"attributeType":"null","col":8,"comment":"null","endLoc":7,"id":3026,"name":"_obj","nodeType":"Attribute","startLoc":7,"text":"self._obj"},{"attributeType":"None","col":8,"comment":"null","endLoc":8,"id":3027,"name":"_center","nodeType":"Attribute","startLoc":8,"text":"self._center"},{"attributeType":"null","col":17,"comment":"null","endLoc":1,"id":3028,"name":"xr","nodeType":"Attribute","startLoc":1,"text":"xr"},{"col":0,"comment":"Create broadcast compatible variables, with the same dimensions.\n\n Unlike the result of broadcast_variables(), some variables may have\n dimensions of size 1 instead of the the size of the broadcast dimension.\n ","endLoc":2163,"header":"def _broadcast_compat_variables(*variables)","id":3029,"name":"_broadcast_compat_variables","nodeType":"Function","startLoc":2156,"text":"def _broadcast_compat_variables(*variables):\n \"\"\"Create broadcast compatible variables, with the same dimensions.\n\n Unlike the result of broadcast_variables(), some variables may have\n dimensions of size 1 instead of the the size of the broadcast dimension.\n \"\"\"\n dims = tuple(_unified_dims(variables))\n return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)"},{"attributeType":"null","col":0,"comment":"null","endLoc":16,"id":3030,"name":"_ERROR_MSG","nodeType":"Attribute","startLoc":16,"text":"_ERROR_MSG"},{"col":0,"comment":"","endLoc":1,"header":"rasterio_.py#","id":3031,"name":"","nodeType":"Function","startLoc":1,"text":"RASTERIO_LOCK = SerializableLock()\n\n_ERROR_MSG = (\n \"The kind of indexing operation you are trying to do is not \"\n \"valid on rasterio files. Try to load your data with ds.load()\"\n \"first.\"\n)"},{"attributeType":"null","col":4,"comment":"null","endLoc":55,"id":3032,"name":"__slots__","nodeType":"Attribute","startLoc":55,"text":"__slots__"},{"col":0,"comment":"null","endLoc":2195,"header":"def _broadcast_compat_data(self, other)","id":3033,"name":"_broadcast_compat_data","nodeType":"Function","startLoc":2183,"text":"def _broadcast_compat_data(self, other):\n if all(hasattr(other, attr) for attr in [\"dims\", \"data\", \"shape\", \"encoding\"]):\n # `other` satisfies the necessary Variable API for broadcast_variables\n new_self, new_other = _broadcast_compat_variables(self, other)\n self_data = new_self.data\n other_data = new_other.data\n dims = new_self.dims\n else:\n # rely on numpy broadcasting rules\n self_data = self.data\n other_data = other\n dims = self.dims\n return self_data, other_data, dims"},{"attributeType":"null","col":8,"comment":"null","endLoc":65,"id":3034,"name":"output_core_dims","nodeType":"Attribute","startLoc":65,"text":"self.output_core_dims"},{"fileName":"parallel.py","filePath":"xarray/core","id":3035,"nodeType":"File","text":"try:\n import dask\n import dask.array\n from dask.highlevelgraph import HighLevelGraph\n from .dask_array_compat import meta_from_array\n\nexcept ImportError:\n pass\n\nimport itertools\nimport operator\nfrom typing import (\n Any,\n Callable,\n Dict,\n Hashable,\n Mapping,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n)\n\nimport numpy as np\n\nfrom .dataarray import DataArray\nfrom .dataset import Dataset\n\nT_DSorDA = TypeVar(\"T_DSorDA\", DataArray, Dataset)\n\n\ndef dataset_to_dataarray(obj: Dataset) -> DataArray:\n if not isinstance(obj, Dataset):\n raise TypeError(\"Expected Dataset, got %s\" % type(obj))\n\n if len(obj.data_vars) > 1:\n raise TypeError(\n \"Trying to convert Dataset with more than one data variable to DataArray\"\n )\n\n return next(iter(obj.data_vars.values()))\n\n\ndef make_meta(obj):\n \"\"\"If obj is a DataArray or Dataset, return a new object of the same type and with\n the same variables and dtypes, but where all variables have size 0 and numpy\n backend.\n If obj is neither a DataArray nor Dataset, return it unaltered.\n \"\"\"\n if isinstance(obj, DataArray):\n obj_array = obj\n obj = obj._to_temp_dataset()\n elif isinstance(obj, Dataset):\n obj_array = None\n else:\n return obj\n\n meta = Dataset()\n for name, variable in obj.variables.items():\n meta_obj = meta_from_array(variable.data, ndim=variable.ndim)\n meta[name] = (variable.dims, meta_obj, variable.attrs)\n meta.attrs = obj.attrs\n meta = meta.set_coords(obj.coords)\n\n if obj_array is not None:\n return obj_array._from_temp_dataset(meta)\n return meta\n\n\ndef infer_template(\n func: Callable[..., T_DSorDA], obj: Union[DataArray, Dataset], *args, **kwargs\n) -> T_DSorDA:\n \"\"\"Infer return object by running the function on meta objects.\n \"\"\"\n meta_args = [make_meta(arg) for arg in (obj,) + args]\n\n try:\n template = func(*meta_args, **kwargs)\n except Exception as e:\n raise Exception(\n \"Cannot infer object returned from running user provided function.\"\n ) from e\n\n if not isinstance(template, (Dataset, DataArray)):\n raise TypeError(\n \"Function must return an xarray DataArray or Dataset. Instead it returned \"\n f\"{type(template)}\"\n )\n\n return template\n\n\ndef make_dict(x: Union[DataArray, Dataset]) -> Dict[Hashable, Any]:\n \"\"\"Map variable name to numpy(-like) data\n (Dataset.to_dict() is too complicated).\n \"\"\"\n if isinstance(x, DataArray):\n x = x._to_temp_dataset()\n\n return {k: v.data for k, v in x.variables.items()}\n\n\ndef map_blocks(\n func: Callable[..., T_DSorDA],\n obj: Union[DataArray, Dataset],\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n) -> T_DSorDA:\n \"\"\"Apply a function to each chunk of a DataArray or Dataset. This function is\n experimental and its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a DataArray or Dataset as its first\n parameter. The function will receive a subset of 'obj' (see below),\n corresponding to one chunk along each chunked dimension. ``func`` will be\n executed as ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like 'obj' but\n has sizes 0, to determine properties of the returned object such as dtype,\n variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n obj: DataArray, Dataset\n Passed to the function as its first argument, one dask chunk at a time.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced obj. xarray objects,\n if any, will not be split by chunks. Passing dask collections is not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of the\n function.\n\n Notes\n -----\n This function is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays, it is\n recommended to use apply_ufunc.\n\n If none of the variables in obj is backed by dask, calling this function is\n equivalent to calling ``func(obj, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks,\n xarray.DataArray.map_blocks\n \"\"\"\n\n def _wrapper(func, obj, to_array, args, kwargs):\n if to_array:\n obj = dataset_to_dataarray(obj)\n\n result = func(obj, *args, **kwargs)\n\n for name, index in result.indexes.items():\n if name in obj.indexes:\n if len(index) != len(obj.indexes[name]):\n raise ValueError(\n \"Length of the %r dimension has changed. This is not allowed.\"\n % name\n )\n\n return make_dict(result)\n\n if not isinstance(args, Sequence):\n raise TypeError(\"args must be a sequence (for example, a list or tuple).\")\n if kwargs is None:\n kwargs = {}\n elif not isinstance(kwargs, Mapping):\n raise TypeError(\"kwargs must be a mapping (for example, a dict)\")\n\n for value in list(args) + list(kwargs.values()):\n if dask.is_dask_collection(value):\n raise TypeError(\n \"Cannot pass dask collections in args or kwargs yet. Please compute or \"\n \"load values before passing to map_blocks.\"\n )\n\n if not dask.is_dask_collection(obj):\n return func(obj, *args, **kwargs)\n\n if isinstance(obj, DataArray):\n # only using _to_temp_dataset would break\n # func = lambda x: x.to_dataset()\n # since that relies on preserving name.\n if obj.name is None:\n dataset = obj._to_temp_dataset()\n else:\n dataset = obj.to_dataset()\n input_is_array = True\n else:\n dataset = obj\n input_is_array = False\n\n input_chunks = dataset.chunks\n\n template: Union[DataArray, Dataset] = infer_template(func, obj, *args, **kwargs)\n if isinstance(template, DataArray):\n result_is_array = True\n template_name = template.name\n template = template._to_temp_dataset()\n elif isinstance(template, Dataset):\n result_is_array = False\n else:\n raise TypeError(\n f\"func output must be DataArray or Dataset; got {type(template)}\"\n )\n\n template_indexes = set(template.indexes)\n dataset_indexes = set(dataset.indexes)\n preserved_indexes = template_indexes & dataset_indexes\n new_indexes = template_indexes - dataset_indexes\n indexes = {dim: dataset.indexes[dim] for dim in preserved_indexes}\n indexes.update({k: template.indexes[k] for k in new_indexes})\n\n graph: Dict[Any, Any] = {}\n gname = \"%s-%s\" % (\n dask.utils.funcname(func),\n dask.base.tokenize(dataset, args, kwargs),\n )\n\n # map dims to list of chunk indexes\n ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()}\n # mapping from chunk index to slice bounds\n chunk_index_bounds = {\n dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items()\n }\n\n # iterate over all possible chunk combinations\n for v in itertools.product(*ichunk.values()):\n chunk_index_dict = dict(zip(dataset.dims, v))\n\n # this will become [[name1, variable1],\n # [name2, variable2],\n # ...]\n # which is passed to dict and then to Dataset\n data_vars = []\n coords = []\n\n for name, variable in dataset.variables.items():\n # make a task that creates tuple of (dims, chunk)\n if dask.is_dask_collection(variable.data):\n # recursively index into dask_keys nested list to get chunk\n chunk = variable.__dask_keys__()\n for dim in variable.dims:\n chunk = chunk[chunk_index_dict[dim]]\n\n chunk_variable_task = (\"%s-%s\" % (gname, chunk[0]),) + v\n graph[chunk_variable_task] = (\n tuple,\n [variable.dims, chunk, variable.attrs],\n )\n else:\n # non-dask array with possibly chunked dimensions\n # index into variable appropriately\n subsetter = {}\n for dim in variable.dims:\n if dim in chunk_index_dict:\n which_chunk = chunk_index_dict[dim]\n subsetter[dim] = slice(\n chunk_index_bounds[dim][which_chunk],\n chunk_index_bounds[dim][which_chunk + 1],\n )\n\n subset = variable.isel(subsetter)\n chunk_variable_task = (\n \"%s-%s\" % (gname, dask.base.tokenize(subset)),\n ) + v\n graph[chunk_variable_task] = (\n tuple,\n [subset.dims, subset, subset.attrs],\n )\n\n # this task creates dict mapping variable name to above tuple\n if name in dataset._coord_names:\n coords.append([name, chunk_variable_task])\n else:\n data_vars.append([name, chunk_variable_task])\n\n from_wrapper = (gname,) + v\n graph[from_wrapper] = (\n _wrapper,\n func,\n (Dataset, (dict, data_vars), (dict, coords), dataset.attrs),\n input_is_array,\n args,\n kwargs,\n )\n\n # mapping from variable name to dask graph key\n var_key_map: Dict[Hashable, str] = {}\n for name, variable in template.variables.items():\n if name in indexes:\n continue\n gname_l = \"%s-%s\" % (gname, name)\n var_key_map[name] = gname_l\n\n key: Tuple[Any, ...] = (gname_l,)\n for dim in variable.dims:\n if dim in chunk_index_dict:\n key += (chunk_index_dict[dim],)\n else:\n # unchunked dimensions in the input have one chunk in the result\n key += (0,)\n\n graph[key] = (operator.getitem, from_wrapper, name)\n\n graph = HighLevelGraph.from_collections(gname, graph, dependencies=[dataset])\n\n result = Dataset(coords=indexes, attrs=template.attrs)\n for name, gname_l in var_key_map.items():\n dims = template[name].dims\n var_chunks = []\n for dim in dims:\n if dim in input_chunks:\n var_chunks.append(input_chunks[dim])\n elif dim in indexes:\n var_chunks.append((len(indexes[dim]),))\n\n data = dask.array.Array(\n graph, name=gname_l, chunks=var_chunks, dtype=template[name].dtype\n )\n result[name] = (dims, data, template[name].attrs)\n\n result = result.set_coords(template._coord_names)\n\n if result_is_array:\n da = dataset_to_dataarray(result)\n da.name = template_name\n return da # type: ignore\n return result # type: ignore\n"},{"attributeType":"None","col":8,"comment":"null","endLoc":67,"id":3036,"name":"_all_output_core_dims","nodeType":"Attribute","startLoc":67,"text":"self._all_output_core_dims"},{"id":3037,"name":"xarray/tests","nodeType":"Package"},{"fileName":"__init__.py","filePath":"xarray/tests","id":3038,"nodeType":"File","text":"import importlib\nimport platform\nimport re\nimport warnings\nfrom contextlib import contextmanager\nfrom distutils import version\nfrom unittest import mock # noqa: F401\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_equal # noqa: F401\nfrom pandas.testing import assert_frame_equal # noqa: F401\n\nimport xarray.testing\nfrom xarray.core import utils\nfrom xarray.core.duck_array_ops import allclose_or_equiv # noqa: F401\nfrom xarray.core.indexing import ExplicitlyIndexed\nfrom xarray.core.options import set_options\nfrom xarray.plot.utils import import_seaborn\n\n# import mpl and change the backend before other mpl imports\ntry:\n import matplotlib as mpl\n\n # Order of imports is important here.\n # Using a different backend makes Travis CI work\n mpl.use(\"Agg\")\nexcept ImportError:\n pass\n\n\narm_xfail = pytest.mark.xfail(\n platform.machine() == \"aarch64\" or \"arm\" in platform.machine(),\n reason=\"expected failure on ARM\",\n)\n\n\ndef _importorskip(modname, minversion=None):\n try:\n mod = importlib.import_module(modname)\n has = True\n if minversion is not None:\n if LooseVersion(mod.__version__) < LooseVersion(minversion):\n raise ImportError(\"Minimum version not satisfied\")\n except ImportError:\n has = False\n func = pytest.mark.skipif(not has, reason=\"requires {}\".format(modname))\n return has, func\n\n\ndef LooseVersion(vstring):\n # Our development version is something like '0.10.9+aac7bfc'\n # This function just ignored the git commit id.\n vstring = vstring.split(\"+\")[0]\n return version.LooseVersion(vstring)\n\n\nhas_matplotlib, requires_matplotlib = _importorskip(\"matplotlib\")\nhas_scipy, requires_scipy = _importorskip(\"scipy\")\nhas_pydap, requires_pydap = _importorskip(\"pydap.client\")\nhas_netCDF4, requires_netCDF4 = _importorskip(\"netCDF4\")\nhas_h5netcdf, requires_h5netcdf = _importorskip(\"h5netcdf\")\nhas_pynio, requires_pynio = _importorskip(\"Nio\")\nhas_pseudonetcdf, requires_pseudonetcdf = _importorskip(\"PseudoNetCDF\")\nhas_cftime, requires_cftime = _importorskip(\"cftime\")\nhas_dask, requires_dask = _importorskip(\"dask\")\nhas_bottleneck, requires_bottleneck = _importorskip(\"bottleneck\")\nhas_nc_time_axis, requires_nc_time_axis = _importorskip(\"nc_time_axis\")\nhas_rasterio, requires_rasterio = _importorskip(\"rasterio\")\nhas_zarr, requires_zarr = _importorskip(\"zarr\")\nhas_iris, requires_iris = _importorskip(\"iris\")\nhas_cfgrib, requires_cfgrib = _importorskip(\"cfgrib\")\nhas_numbagg, requires_numbagg = _importorskip(\"numbagg\")\nhas_sparse, requires_sparse = _importorskip(\"sparse\")\n\n# some special cases\nhas_scipy_or_netCDF4 = has_scipy or has_netCDF4\nrequires_scipy_or_netCDF4 = pytest.mark.skipif(\n not has_scipy_or_netCDF4, reason=\"requires scipy or netCDF4\"\n)\nhas_cftime_or_netCDF4 = has_cftime or has_netCDF4\nrequires_cftime_or_netCDF4 = pytest.mark.skipif(\n not has_cftime_or_netCDF4, reason=\"requires cftime or netCDF4\"\n)\ntry:\n import_seaborn()\n has_seaborn = True\nexcept ImportError:\n has_seaborn = False\nrequires_seaborn = pytest.mark.skipif(not has_seaborn, reason=\"requires seaborn\")\n\n# change some global options for tests\nset_options(warn_for_unclosed_files=True)\n\nif has_dask:\n import dask\n\n dask.config.set(scheduler=\"single-threaded\")\n\nflaky = pytest.mark.flaky\nnetwork = pytest.mark.network\n\n\n@contextmanager\ndef raises_regex(error, pattern):\n __tracebackhide__ = True\n with pytest.raises(error) as excinfo:\n yield\n message = str(excinfo.value)\n if not re.search(pattern, message):\n raise AssertionError(\n \"exception %r did not match pattern %r\" % (excinfo.value, pattern)\n )\n\n\nclass UnexpectedDataAccess(Exception):\n pass\n\n\nclass InaccessibleArray(utils.NDArrayMixin, ExplicitlyIndexed):\n def __init__(self, array):\n self.array = array\n\n def __getitem__(self, key):\n raise UnexpectedDataAccess(\"Tried accessing data\")\n\n\nclass ReturnItem:\n def __getitem__(self, key):\n return key\n\n\nclass IndexerMaker:\n def __init__(self, indexer_cls):\n self._indexer_cls = indexer_cls\n\n def __getitem__(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n return self._indexer_cls(key)\n\n\ndef source_ndarray(array):\n \"\"\"Given an ndarray, return the base object which holds its memory, or the\n object itself.\n \"\"\"\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"DatetimeIndex.base\")\n warnings.filterwarnings(\"ignore\", \"TimedeltaIndex.base\")\n base = getattr(array, \"base\", np.asarray(array).base)\n if base is None:\n base = array\n return base\n\n\n# Internal versions of xarray's test functions that validate additional\n# invariants\n\n\ndef assert_equal(a, b):\n xarray.testing.assert_equal(a, b)\n xarray.testing._assert_internal_invariants(a)\n xarray.testing._assert_internal_invariants(b)\n\n\ndef assert_identical(a, b):\n xarray.testing.assert_identical(a, b)\n xarray.testing._assert_internal_invariants(a)\n xarray.testing._assert_internal_invariants(b)\n\n\ndef assert_allclose(a, b, **kwargs):\n xarray.testing.assert_allclose(a, b, **kwargs)\n xarray.testing._assert_internal_invariants(a)\n xarray.testing._assert_internal_invariants(b)\n"},{"attributeType":"null","col":8,"comment":"null","endLoc":64,"id":3039,"name":"input_core_dims","nodeType":"Attribute","startLoc":64,"text":"self.input_core_dims"},{"attributeType":"null","col":0,"comment":"null","endLoc":29,"id":3040,"name":"T_DSorDA","nodeType":"Attribute","startLoc":29,"text":"T_DSorDA"},{"fileName":"cftime_offsets.py","filePath":"xarray/coding","id":3041,"nodeType":"File","text":"\"\"\"Time offset classes for use with cftime.datetime objects\"\"\"\n# The offset classes and mechanisms for generating time ranges defined in\n# this module were copied/adapted from those defined in pandas. See in\n# particular the objects and methods defined in pandas.tseries.offsets\n# and pandas.core.indexes.datetimes.\n\n# For reference, here is a copy of the pandas copyright notice:\n\n# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team\n# All rights reserved.\n\n# Copyright (c) 2008-2011 AQR Capital Management, LLC\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n\n# * Neither the name of the copyright holder nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport re\nfrom datetime import timedelta\nfrom functools import partial\nfrom typing import ClassVar, Optional\n\nimport numpy as np\n\nfrom ..core.pdcompat import count_not_none\nfrom .cftimeindex import CFTimeIndex, _parse_iso8601_with_reso\nfrom .times import format_cftime_datetime\n\n\ndef get_date_type(calendar):\n \"\"\"Return the cftime date type for a given calendar name.\"\"\"\n try:\n import cftime\n except ImportError:\n raise ImportError(\"cftime is required for dates with non-standard calendars\")\n else:\n calendars = {\n \"noleap\": cftime.DatetimeNoLeap,\n \"360_day\": cftime.Datetime360Day,\n \"365_day\": cftime.DatetimeNoLeap,\n \"366_day\": cftime.DatetimeAllLeap,\n \"gregorian\": cftime.DatetimeGregorian,\n \"proleptic_gregorian\": cftime.DatetimeProlepticGregorian,\n \"julian\": cftime.DatetimeJulian,\n \"all_leap\": cftime.DatetimeAllLeap,\n \"standard\": cftime.DatetimeGregorian,\n }\n return calendars[calendar]\n\n\nclass BaseCFTimeOffset:\n _freq: ClassVar[Optional[str]] = None\n _day_option: ClassVar[Optional[str]] = None\n\n def __init__(self, n=1):\n if not isinstance(n, int):\n raise TypeError(\n \"The provided multiple 'n' must be an integer. \"\n \"Instead a value of type {!r} was provided.\".format(type(n))\n )\n self.n = n\n\n def rule_code(self):\n return self._freq\n\n def __eq__(self, other):\n return self.n == other.n and self.rule_code() == other.rule_code()\n\n def __ne__(self, other):\n return not self == other\n\n def __add__(self, other):\n return self.__apply__(other)\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract a cftime.datetime \" \"from a time offset.\")\n elif type(other) == type(self):\n return type(self)(self.n - other.n)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n return type(self)(n=other * self.n)\n\n def __neg__(self):\n return self * -1\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def __radd__(self, other):\n return self.__add__(other)\n\n def __rsub__(self, other):\n if isinstance(other, BaseCFTimeOffset) and type(self) != type(other):\n raise TypeError(\"Cannot subtract cftime offsets of differing \" \"types\")\n return -self + other\n\n def __apply__(self):\n return NotImplemented\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n test_date = (self + date) - self\n return date == test_date\n\n def rollforward(self, date):\n if self.onOffset(date):\n return date\n else:\n return date + type(self)()\n\n def rollback(self, date):\n if self.onOffset(date):\n return date\n else:\n return date - type(self)()\n\n def __str__(self):\n return \"<{}: n={}>\".format(type(self).__name__, self.n)\n\n def __repr__(self):\n return str(self)\n\n def _get_offset_day(self, other):\n # subclass must implement `_day_option`; calling from the base class\n # will raise NotImplementedError.\n return _get_day_of_month(other, self._day_option)\n\n\ndef _get_day_of_month(other, day_option):\n \"\"\"Find the day in `other`'s month that satisfies a BaseCFTimeOffset's\n onOffset policy, as described by the `day_option` argument.\n\n Parameters\n ----------\n other : cftime.datetime\n day_option : 'start', 'end'\n 'start': returns 1\n 'end': returns last day of the month\n\n Returns\n -------\n day_of_month : int\n\n \"\"\"\n\n if day_option == \"start\":\n return 1\n elif day_option == \"end\":\n days_in_month = _days_in_month(other)\n return days_in_month\n elif day_option is None:\n # Note: unlike `_shift_month`, _get_day_of_month does not\n # allow day_option = None\n raise NotImplementedError()\n else:\n raise ValueError(day_option)\n\n\ndef _days_in_month(date):\n \"\"\"The number of days in the month of the given date\"\"\"\n if date.month == 12:\n reference = type(date)(date.year + 1, 1, 1)\n else:\n reference = type(date)(date.year, date.month + 1, 1)\n return (reference - timedelta(days=1)).day\n\n\ndef _adjust_n_months(other_day, n, reference_day):\n \"\"\"Adjust the number of times a monthly offset is applied based\n on the day of a given date, and the reference day provided.\n \"\"\"\n if n > 0 and other_day < reference_day:\n n = n - 1\n elif n <= 0 and other_day > reference_day:\n n = n + 1\n return n\n\n\ndef _adjust_n_years(other, n, month, reference_day):\n \"\"\"Adjust the number of times an annual offset is applied based on\n another date, and the reference day provided\"\"\"\n if n > 0:\n if other.month < month or (other.month == month and other.day < reference_day):\n n -= 1\n else:\n if other.month > month or (other.month == month and other.day > reference_day):\n n += 1\n return n\n\n\ndef _shift_month(date, months, day_option=\"start\"):\n \"\"\"Shift the date to a month start or end a given number of months away.\n \"\"\"\n delta_year = (date.month + months) // 12\n month = (date.month + months) % 12\n\n if month == 0:\n month = 12\n delta_year = delta_year - 1\n year = date.year + delta_year\n\n if day_option == \"start\":\n day = 1\n elif day_option == \"end\":\n reference = type(date)(year, month, 1)\n day = _days_in_month(reference)\n else:\n raise ValueError(day_option)\n # dayofwk=-1 is required to update the dayofwk and dayofyr attributes of\n # the returned date object in versions of cftime between 1.0.2 and\n # 1.0.3.4. It can be removed for versions of cftime greater than\n # 1.0.3.4.\n return date.replace(year=year, month=month, day=day, dayofwk=-1)\n\n\ndef roll_qtrday(other, n, month, day_option, modby=3):\n \"\"\"Possibly increment or decrement the number of periods to shift\n based on rollforward/rollbackward conventions.\n\n Parameters\n ----------\n other : cftime.datetime\n n : number of periods to increment, before adjusting for rolling\n month : int reference month giving the first month of the year\n day_option : 'start', 'end'\n The convention to use in finding the day in a given month against\n which to compare for rollforward/rollbackward decisions.\n modby : int 3 for quarters, 12 for years\n\n Returns\n -------\n n : int number of periods to increment\n\n See Also\n --------\n _get_day_of_month : Find the day in a month provided an offset.\n \"\"\"\n\n months_since = other.month % modby - month % modby\n\n if n > 0:\n if months_since < 0 or (\n months_since == 0 and other.day < _get_day_of_month(other, day_option)\n ):\n # pretend to roll back if on same month but\n # before compare_day\n n -= 1\n else:\n if months_since > 0 or (\n months_since == 0 and other.day > _get_day_of_month(other, day_option)\n ):\n # make sure to roll forward, so negate\n n += 1\n return n\n\n\ndef _validate_month(month, default_month):\n if month is None:\n result_month = default_month\n else:\n result_month = month\n if not isinstance(result_month, int):\n raise TypeError(\n \"'self.month' must be an integer value between 1 \"\n \"and 12. Instead, it was set to a value of \"\n \"{!r}\".format(result_month)\n )\n elif not (1 <= result_month <= 12):\n raise ValueError(\n \"'self.month' must be an integer value between 1 \"\n \"and 12. Instead, it was set to a value of \"\n \"{!r}\".format(result_month)\n )\n return result_month\n\n\nclass MonthBegin(BaseCFTimeOffset):\n _freq = \"MS\"\n\n def __apply__(self, other):\n n = _adjust_n_months(other.day, self.n, 1)\n return _shift_month(other, n, \"start\")\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == 1\n\n\nclass MonthEnd(BaseCFTimeOffset):\n _freq = \"M\"\n\n def __apply__(self, other):\n n = _adjust_n_months(other.day, self.n, _days_in_month(other))\n return _shift_month(other, n, \"end\")\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == _days_in_month(date)\n\n\n_MONTH_ABBREVIATIONS = {\n 1: \"JAN\",\n 2: \"FEB\",\n 3: \"MAR\",\n 4: \"APR\",\n 5: \"MAY\",\n 6: \"JUN\",\n 7: \"JUL\",\n 8: \"AUG\",\n 9: \"SEP\",\n 10: \"OCT\",\n 11: \"NOV\",\n 12: \"DEC\",\n}\n\n\nclass QuarterOffset(BaseCFTimeOffset):\n \"\"\"Quarter representation copied off of pandas/tseries/offsets.py\n \"\"\"\n\n _freq: ClassVar[str]\n _default_month: ClassVar[int]\n\n def __init__(self, n=1, month=None):\n BaseCFTimeOffset.__init__(self, n)\n self.month = _validate_month(month, self._default_month)\n\n def __apply__(self, other):\n # months_since: find the calendar quarter containing other.month,\n # e.g. if other.month == 8, the calendar quarter is [Jul, Aug, Sep].\n # Then find the month in that quarter containing an onOffset date for\n # self. `months_since` is the number of months to shift other.month\n # to get to this on-offset month.\n months_since = other.month % 3 - self.month % 3\n qtrs = roll_qtrday(\n other, self.n, self.month, day_option=self._day_option, modby=3\n )\n months = qtrs * 3 - months_since\n return _shift_month(other, months, self._day_option)\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n mod_month = (date.month - self.month) % 3\n return mod_month == 0 and date.day == self._get_offset_day(date)\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract cftime.datetime from offset.\")\n elif type(other) == type(self) and other.month == self.month:\n return type(self)(self.n - other.n, month=self.month)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n return type(self)(n=other * self.n, month=self.month)\n\n def rule_code(self):\n return \"{}-{}\".format(self._freq, _MONTH_ABBREVIATIONS[self.month])\n\n def __str__(self):\n return \"<{}: n={}, month={}>\".format(type(self).__name__, self.n, self.month)\n\n\nclass QuarterBegin(QuarterOffset):\n # When converting a string to an offset, pandas converts\n # 'QS' to a QuarterBegin offset starting in the month of\n # January. When creating a QuarterBegin offset directly\n # from the constructor, however, the default month is March.\n # We follow that behavior here.\n _default_month = 3\n _freq = \"QS\"\n _day_option = \"start\"\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest start of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + QuarterBegin(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest start of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - QuarterBegin(month=self.month)\n\n\nclass QuarterEnd(QuarterOffset):\n # When converting a string to an offset, pandas converts\n # 'Q' to a QuarterEnd offset starting in the month of\n # December. When creating a QuarterEnd offset directly\n # from the constructor, however, the default month is March.\n # We follow that behavior here.\n _default_month = 3\n _freq = \"Q\"\n _day_option = \"end\"\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest end of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + QuarterEnd(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest end of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - QuarterEnd(month=self.month)\n\n\nclass YearOffset(BaseCFTimeOffset):\n _freq: ClassVar[str]\n _day_option: ClassVar[str]\n _default_month: ClassVar[int]\n\n def __init__(self, n=1, month=None):\n BaseCFTimeOffset.__init__(self, n)\n self.month = _validate_month(month, self._default_month)\n\n def __apply__(self, other):\n reference_day = _get_day_of_month(other, self._day_option)\n years = _adjust_n_years(other, self.n, self.month, reference_day)\n months = years * 12 + (self.month - other.month)\n return _shift_month(other, months, self._day_option)\n\n def __sub__(self, other):\n import cftime\n\n if isinstance(other, cftime.datetime):\n raise TypeError(\"Cannot subtract cftime.datetime from offset.\")\n elif type(other) == type(self) and other.month == self.month:\n return type(self)(self.n - other.n, month=self.month)\n else:\n return NotImplemented\n\n def __mul__(self, other):\n return type(self)(n=other * self.n, month=self.month)\n\n def rule_code(self):\n return \"{}-{}\".format(self._freq, _MONTH_ABBREVIATIONS[self.month])\n\n def __str__(self):\n return \"<{}: n={}, month={}>\".format(type(self).__name__, self.n, self.month)\n\n\nclass YearBegin(YearOffset):\n _freq = \"AS\"\n _day_option = \"start\"\n _default_month = 1\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == 1 and date.month == self.month\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest start of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + YearBegin(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest start of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - YearBegin(month=self.month)\n\n\nclass YearEnd(YearOffset):\n _freq = \"A\"\n _day_option = \"end\"\n _default_month = 12\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == _days_in_month(date) and date.month == self.month\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest end of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + YearEnd(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest end of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - YearEnd(month=self.month)\n\n\nclass Day(BaseCFTimeOffset):\n _freq = \"D\"\n\n def as_timedelta(self):\n return timedelta(days=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()\n\n\nclass Hour(BaseCFTimeOffset):\n _freq = \"H\"\n\n def as_timedelta(self):\n return timedelta(hours=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()\n\n\nclass Minute(BaseCFTimeOffset):\n _freq = \"T\"\n\n def as_timedelta(self):\n return timedelta(minutes=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()\n\n\nclass Second(BaseCFTimeOffset):\n _freq = \"S\"\n\n def as_timedelta(self):\n return timedelta(seconds=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()\n\n\n_FREQUENCIES = {\n \"A\": YearEnd,\n \"AS\": YearBegin,\n \"Y\": YearEnd,\n \"YS\": YearBegin,\n \"Q\": partial(QuarterEnd, month=12),\n \"QS\": partial(QuarterBegin, month=1),\n \"M\": MonthEnd,\n \"MS\": MonthBegin,\n \"D\": Day,\n \"H\": Hour,\n \"T\": Minute,\n \"min\": Minute,\n \"S\": Second,\n \"AS-JAN\": partial(YearBegin, month=1),\n \"AS-FEB\": partial(YearBegin, month=2),\n \"AS-MAR\": partial(YearBegin, month=3),\n \"AS-APR\": partial(YearBegin, month=4),\n \"AS-MAY\": partial(YearBegin, month=5),\n \"AS-JUN\": partial(YearBegin, month=6),\n \"AS-JUL\": partial(YearBegin, month=7),\n \"AS-AUG\": partial(YearBegin, month=8),\n \"AS-SEP\": partial(YearBegin, month=9),\n \"AS-OCT\": partial(YearBegin, month=10),\n \"AS-NOV\": partial(YearBegin, month=11),\n \"AS-DEC\": partial(YearBegin, month=12),\n \"A-JAN\": partial(YearEnd, month=1),\n \"A-FEB\": partial(YearEnd, month=2),\n \"A-MAR\": partial(YearEnd, month=3),\n \"A-APR\": partial(YearEnd, month=4),\n \"A-MAY\": partial(YearEnd, month=5),\n \"A-JUN\": partial(YearEnd, month=6),\n \"A-JUL\": partial(YearEnd, month=7),\n \"A-AUG\": partial(YearEnd, month=8),\n \"A-SEP\": partial(YearEnd, month=9),\n \"A-OCT\": partial(YearEnd, month=10),\n \"A-NOV\": partial(YearEnd, month=11),\n \"A-DEC\": partial(YearEnd, month=12),\n \"QS-JAN\": partial(QuarterBegin, month=1),\n \"QS-FEB\": partial(QuarterBegin, month=2),\n \"QS-MAR\": partial(QuarterBegin, month=3),\n \"QS-APR\": partial(QuarterBegin, month=4),\n \"QS-MAY\": partial(QuarterBegin, month=5),\n \"QS-JUN\": partial(QuarterBegin, month=6),\n \"QS-JUL\": partial(QuarterBegin, month=7),\n \"QS-AUG\": partial(QuarterBegin, month=8),\n \"QS-SEP\": partial(QuarterBegin, month=9),\n \"QS-OCT\": partial(QuarterBegin, month=10),\n \"QS-NOV\": partial(QuarterBegin, month=11),\n \"QS-DEC\": partial(QuarterBegin, month=12),\n \"Q-JAN\": partial(QuarterEnd, month=1),\n \"Q-FEB\": partial(QuarterEnd, month=2),\n \"Q-MAR\": partial(QuarterEnd, month=3),\n \"Q-APR\": partial(QuarterEnd, month=4),\n \"Q-MAY\": partial(QuarterEnd, month=5),\n \"Q-JUN\": partial(QuarterEnd, month=6),\n \"Q-JUL\": partial(QuarterEnd, month=7),\n \"Q-AUG\": partial(QuarterEnd, month=8),\n \"Q-SEP\": partial(QuarterEnd, month=9),\n \"Q-OCT\": partial(QuarterEnd, month=10),\n \"Q-NOV\": partial(QuarterEnd, month=11),\n \"Q-DEC\": partial(QuarterEnd, month=12),\n}\n\n\n_FREQUENCY_CONDITION = \"|\".join(_FREQUENCIES.keys())\n_PATTERN = r\"^((?P\\d+)|())(?P({}))$\".format(_FREQUENCY_CONDITION)\n\n\n# pandas defines these offsets as \"Tick\" objects, which for instance have\n# distinct behavior from monthly or longer frequencies in resample.\nCFTIME_TICKS = (Day, Hour, Minute, Second)\n\n\ndef to_offset(freq):\n \"\"\"Convert a frequency string to the appropriate subclass of\n BaseCFTimeOffset.\"\"\"\n if isinstance(freq, BaseCFTimeOffset):\n return freq\n else:\n try:\n freq_data = re.match(_PATTERN, freq).groupdict()\n except AttributeError:\n raise ValueError(\"Invalid frequency string provided\")\n\n freq = freq_data[\"freq\"]\n multiples = freq_data[\"multiple\"]\n if multiples is None:\n multiples = 1\n else:\n multiples = int(multiples)\n\n return _FREQUENCIES[freq](n=multiples)\n\n\ndef to_cftime_datetime(date_str_or_date, calendar=None):\n import cftime\n\n if isinstance(date_str_or_date, str):\n if calendar is None:\n raise ValueError(\n \"If converting a string to a cftime.datetime object, \"\n \"a calendar type must be provided\"\n )\n date, _ = _parse_iso8601_with_reso(get_date_type(calendar), date_str_or_date)\n return date\n elif isinstance(date_str_or_date, cftime.datetime):\n return date_str_or_date\n else:\n raise TypeError(\n \"date_str_or_date must be a string or a \"\n \"subclass of cftime.datetime. Instead got \"\n \"{!r}.\".format(date_str_or_date)\n )\n\n\ndef normalize_date(date):\n \"\"\"Round datetime down to midnight.\"\"\"\n return date.replace(hour=0, minute=0, second=0, microsecond=0)\n\n\ndef _maybe_normalize_date(date, normalize):\n \"\"\"Round datetime down to midnight if normalize is True.\"\"\"\n if normalize:\n return normalize_date(date)\n else:\n return date\n\n\ndef _generate_linear_range(start, end, periods):\n \"\"\"Generate an equally-spaced sequence of cftime.datetime objects between\n and including two dates (whose length equals the number of periods).\"\"\"\n import cftime\n\n total_seconds = (end - start).total_seconds()\n values = np.linspace(0.0, total_seconds, periods, endpoint=True)\n units = \"seconds since {}\".format(format_cftime_datetime(start))\n calendar = start.calendar\n return cftime.num2date(\n values, units=units, calendar=calendar, only_use_cftime_datetimes=True\n )\n\n\ndef _generate_range(start, end, periods, offset):\n \"\"\"Generate a regular range of cftime.datetime objects with a\n given time offset.\n\n Adapted from pandas.tseries.offsets.generate_range.\n\n Parameters\n ----------\n start : cftime.datetime, or None\n Start of range\n end : cftime.datetime, or None\n End of range\n periods : int, or None\n Number of elements in the sequence\n offset : BaseCFTimeOffset\n An offset class designed for working with cftime.datetime objects\n\n Returns\n -------\n A generator object\n \"\"\"\n if start:\n start = offset.rollforward(start)\n\n if end:\n end = offset.rollback(end)\n\n if periods is None and end < start:\n end = None\n periods = 0\n\n if end is None:\n end = start + (periods - 1) * offset\n\n if start is None:\n start = end - (periods - 1) * offset\n\n current = start\n if offset.n >= 0:\n while current <= end:\n yield current\n\n next_date = current + offset\n if next_date <= current:\n raise ValueError(\n \"Offset {offset} did not increment date\".format(offset=offset)\n )\n current = next_date\n else:\n while current >= end:\n yield current\n\n next_date = current + offset\n if next_date >= current:\n raise ValueError(\n \"Offset {offset} did not decrement date\".format(offset=offset)\n )\n current = next_date\n\n\ndef cftime_range(\n start=None,\n end=None,\n periods=None,\n freq=\"D\",\n normalize=False,\n name=None,\n closed=None,\n calendar=\"standard\",\n):\n \"\"\"Return a fixed frequency CFTimeIndex.\n\n Parameters\n ----------\n start : str or cftime.datetime, optional\n Left bound for generating dates.\n end : str or cftime.datetime, optional\n Right bound for generating dates.\n periods : integer, optional\n Number of periods to generate.\n freq : str, default 'D', BaseCFTimeOffset, or None\n Frequency strings can have multiples, e.g. '5H'.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting index\n closed : {None, 'left', 'right'}, optional\n Make the interval closed with respect to the given frequency to the\n 'left', 'right', or both sides (None, the default).\n calendar : str\n Calendar type for the datetimes (default 'standard').\n\n Returns\n -------\n CFTimeIndex\n\n Notes\n -----\n\n This function is an analog of ``pandas.date_range`` for use in generating\n sequences of ``cftime.datetime`` objects. It supports most of the\n features of ``pandas.date_range`` (e.g. specifying how the index is\n ``closed`` on either side, or whether or not to ``normalize`` the start and\n end bounds); however, there are some notable exceptions:\n\n - You cannot specify a ``tz`` (time zone) argument.\n - Start or end dates specified as partial-datetime strings must use the\n `ISO-8601 format `_.\n - It supports many, but not all, frequencies supported by\n ``pandas.date_range``. For example it does not currently support any of\n the business-related, semi-monthly, or sub-second frequencies.\n - Compound sub-monthly frequencies are not supported, e.g. '1H1min', as\n these can easily be written in terms of the finest common resolution,\n e.g. '61min'.\n\n Valid simple frequency strings for use with ``cftime``-calendars include\n any multiples of the following.\n\n +--------+--------------------------+\n | Alias | Description |\n +========+==========================+\n | A, Y | Year-end frequency |\n +--------+--------------------------+\n | AS, YS | Year-start frequency |\n +--------+--------------------------+\n | Q | Quarter-end frequency |\n +--------+--------------------------+\n | QS | Quarter-start frequency |\n +--------+--------------------------+\n | M | Month-end frequency |\n +--------+--------------------------+\n | MS | Month-start frequency |\n +--------+--------------------------+\n | D | Day frequency |\n +--------+--------------------------+\n | H | Hour frequency |\n +--------+--------------------------+\n | T, min | Minute frequency |\n +--------+--------------------------+\n | S | Second frequency |\n +--------+--------------------------+\n\n Any multiples of the following anchored offsets are also supported.\n\n +----------+--------------------------------------------------------------------+\n | Alias | Description |\n +==========+====================================================================+\n | A(S)-JAN | Annual frequency, anchored at the end (or beginning) of January |\n +----------+--------------------------------------------------------------------+\n | A(S)-FEB | Annual frequency, anchored at the end (or beginning) of February |\n +----------+--------------------------------------------------------------------+\n | A(S)-MAR | Annual frequency, anchored at the end (or beginning) of March |\n +----------+--------------------------------------------------------------------+\n | A(S)-APR | Annual frequency, anchored at the end (or beginning) of April |\n +----------+--------------------------------------------------------------------+\n | A(S)-MAY | Annual frequency, anchored at the end (or beginning) of May |\n +----------+--------------------------------------------------------------------+\n | A(S)-JUN | Annual frequency, anchored at the end (or beginning) of June |\n +----------+--------------------------------------------------------------------+\n | A(S)-JUL | Annual frequency, anchored at the end (or beginning) of July |\n +----------+--------------------------------------------------------------------+\n | A(S)-AUG | Annual frequency, anchored at the end (or beginning) of August |\n +----------+--------------------------------------------------------------------+\n | A(S)-SEP | Annual frequency, anchored at the end (or beginning) of September |\n +----------+--------------------------------------------------------------------+\n | A(S)-OCT | Annual frequency, anchored at the end (or beginning) of October |\n +----------+--------------------------------------------------------------------+\n | A(S)-NOV | Annual frequency, anchored at the end (or beginning) of November |\n +----------+--------------------------------------------------------------------+\n | A(S)-DEC | Annual frequency, anchored at the end (or beginning) of December |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JAN | Quarter frequency, anchored at the end (or beginning) of January |\n +----------+--------------------------------------------------------------------+\n | Q(S)-FEB | Quarter frequency, anchored at the end (or beginning) of February |\n +----------+--------------------------------------------------------------------+\n | Q(S)-MAR | Quarter frequency, anchored at the end (or beginning) of March |\n +----------+--------------------------------------------------------------------+\n | Q(S)-APR | Quarter frequency, anchored at the end (or beginning) of April |\n +----------+--------------------------------------------------------------------+\n | Q(S)-MAY | Quarter frequency, anchored at the end (or beginning) of May |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JUN | Quarter frequency, anchored at the end (or beginning) of June |\n +----------+--------------------------------------------------------------------+\n | Q(S)-JUL | Quarter frequency, anchored at the end (or beginning) of July |\n +----------+--------------------------------------------------------------------+\n | Q(S)-AUG | Quarter frequency, anchored at the end (or beginning) of August |\n +----------+--------------------------------------------------------------------+\n | Q(S)-SEP | Quarter frequency, anchored at the end (or beginning) of September |\n +----------+--------------------------------------------------------------------+\n | Q(S)-OCT | Quarter frequency, anchored at the end (or beginning) of October |\n +----------+--------------------------------------------------------------------+\n | Q(S)-NOV | Quarter frequency, anchored at the end (or beginning) of November |\n +----------+--------------------------------------------------------------------+\n | Q(S)-DEC | Quarter frequency, anchored at the end (or beginning) of December |\n +----------+--------------------------------------------------------------------+\n\n\n Finally, the following calendar aliases are supported.\n\n +--------------------------------+---------------------------------------+\n | Alias | Date type |\n +================================+=======================================+\n | standard, gregorian | ``cftime.DatetimeGregorian`` |\n +--------------------------------+---------------------------------------+\n | proleptic_gregorian | ``cftime.DatetimeProlepticGregorian`` |\n +--------------------------------+---------------------------------------+\n | noleap, 365_day | ``cftime.DatetimeNoLeap`` |\n +--------------------------------+---------------------------------------+\n | all_leap, 366_day | ``cftime.DatetimeAllLeap`` |\n +--------------------------------+---------------------------------------+\n | 360_day | ``cftime.Datetime360Day`` |\n +--------------------------------+---------------------------------------+\n | julian | ``cftime.DatetimeJulian`` |\n +--------------------------------+---------------------------------------+\n\n Examples\n --------\n\n This function returns a ``CFTimeIndex``, populated with ``cftime.datetime``\n objects associated with the specified calendar type, e.g.\n\n >>> xr.cftime_range(start='2000', periods=6, freq='2MS', calendar='noleap')\n CFTimeIndex([2000-01-01 00:00:00, 2000-03-01 00:00:00, 2000-05-01 00:00:00,\n 2000-07-01 00:00:00, 2000-09-01 00:00:00, 2000-11-01 00:00:00],\n dtype='object')\n\n As in the standard pandas function, three of the ``start``, ``end``,\n ``periods``, or ``freq`` arguments must be specified at a given time, with\n the other set to ``None``. See the `pandas documentation\n `_\n for more examples of the behavior of ``date_range`` with each of the\n parameters.\n\n See Also\n --------\n pandas.date_range\n \"\"\"\n # Adapted from pandas.core.indexes.datetimes._generate_range.\n if count_not_none(start, end, periods, freq) != 3:\n raise ValueError(\n \"Of the arguments 'start', 'end', 'periods', and 'freq', three \"\n \"must be specified at a time.\"\n )\n\n if start is not None:\n start = to_cftime_datetime(start, calendar)\n start = _maybe_normalize_date(start, normalize)\n if end is not None:\n end = to_cftime_datetime(end, calendar)\n end = _maybe_normalize_date(end, normalize)\n\n if freq is None:\n dates = _generate_linear_range(start, end, periods)\n else:\n offset = to_offset(freq)\n dates = np.array(list(_generate_range(start, end, periods, offset)))\n\n left_closed = False\n right_closed = False\n\n if closed is None:\n left_closed = True\n right_closed = True\n elif closed == \"left\":\n left_closed = True\n elif closed == \"right\":\n right_closed = True\n else:\n raise ValueError(\"Closed must be either 'left', 'right' or None\")\n\n if not left_closed and len(dates) and start is not None and dates[0] == start:\n dates = dates[1:]\n if not right_closed and len(dates) and end is not None and dates[-1] == end:\n dates = dates[:-1]\n\n return CFTimeIndex(dates, name=name)\n"},{"col":0,"comment":"","endLoc":8,"header":"parallel.py#","id":3042,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import dask\n import dask.array\n from dask.highlevelgraph import HighLevelGraph\n from .dask_array_compat import meta_from_array\n\nexcept ImportError:\n pass\n\nT_DSorDA = TypeVar(\"T_DSorDA\", DataArray, Dataset)"},{"attributeType":"null","col":0,"comment":"null","endLoc":37,"id":3043,"name":"NON_NUMPY_SUPPORTED_ARRAY_TYPES","nodeType":"Attribute","startLoc":37,"text":"NON_NUMPY_SUPPORTED_ARRAY_TYPES"},{"attributeType":"None","col":8,"comment":"null","endLoc":68,"id":3044,"name":"_all_core_dims","nodeType":"Attribute","startLoc":68,"text":"self._all_core_dims"},{"attributeType":"None","col":8,"comment":"null","endLoc":66,"id":3045,"name":"_all_input_core_dims","nodeType":"Attribute","startLoc":66,"text":"self._all_input_core_dims"},{"className":"MonthBegin","col":0,"comment":"null","endLoc":318,"id":3046,"nodeType":"Class","startLoc":308,"text":"class MonthBegin(BaseCFTimeOffset):\n _freq = \"MS\"\n\n def __apply__(self, other):\n n = _adjust_n_months(other.day, self.n, 1)\n return _shift_month(other, n, \"start\")\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == 1"},{"col":4,"comment":"null","endLoc":313,"header":"def __apply__(self, other)","id":3047,"name":"__apply__","nodeType":"Function","startLoc":311,"text":"def __apply__(self, other):\n n = _adjust_n_months(other.day, self.n, 1)\n return _shift_month(other, n, \"start\")"},{"col":0,"comment":"null","endLoc":249,"header":"def ordered_set_union(all_keys: List[Iterable]) -> Iterable","id":3048,"name":"ordered_set_union","nodeType":"Function","startLoc":248,"text":"def ordered_set_union(all_keys: List[Iterable]) -> Iterable:\n return {key: None for keys in all_keys for key in keys}.keys()"},{"col":0,"comment":"null","endLoc":256,"header":"def ordered_set_intersection(all_keys: List[Iterable]) -> Iterable","id":3049,"name":"ordered_set_intersection","nodeType":"Function","startLoc":252,"text":"def ordered_set_intersection(all_keys: List[Iterable]) -> Iterable:\n intersection = set(all_keys[0])\n for keys in all_keys[1:]:\n intersection.intersection_update(keys)\n return [key for key in all_keys[0] if key in intersection]"},{"fileName":"dataarray.py","filePath":"xarray/core","id":3050,"nodeType":"File","text":"import functools\nimport warnings\nfrom numbers import Number\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Hashable,\n Iterable,\n List,\n Mapping,\n Optional,\n Sequence,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport numpy as np\nimport pandas as pd\n\nfrom ..plot.plot import _PlotMethods\nfrom . import (\n computation,\n dtypes,\n groupby,\n indexing,\n ops,\n pdcompat,\n resample,\n rolling,\n utils,\n)\nfrom .accessor_dt import DatetimeAccessor\nfrom .accessor_str import StringAccessor\nfrom .alignment import (\n _broadcast_helper,\n _get_broadcast_dims_map_common_coords,\n align,\n reindex_like_indexers,\n)\nfrom .common import AbstractArray, DataWithCoords\nfrom .coordinates import (\n DataArrayCoordinates,\n LevelCoordinatesSource,\n assert_coordinate_consistent,\n remap_label_indexers,\n)\nfrom .dataset import Dataset, merge_indexes, split_indexes\nfrom .formatting import format_item\nfrom .indexes import Indexes, default_indexes\nfrom .options import OPTIONS\nfrom .utils import ReprObject, _check_inplace, either_dict_or_kwargs\nfrom .variable import (\n IndexVariable,\n Variable,\n as_compatible_data,\n as_variable,\n assert_unique_multiindex_level_names,\n)\n\nif TYPE_CHECKING:\n T_DSorDA = TypeVar(\"T_DSorDA\", \"DataArray\", Dataset)\n\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n try:\n from cdms2 import Variable as cdms2_Variable\n except ImportError:\n cdms2_Variable = None\n try:\n from iris.cube import Cube as iris_Cube\n except ImportError:\n iris_Cube = None\n\n\ndef _infer_coords_and_dims(\n shape, coords, dims\n) -> \"Tuple[Dict[Any, Variable], Tuple[Hashable, ...]]\":\n \"\"\"All the logic for creating a new DataArray\"\"\"\n\n if (\n coords is not None\n and not utils.is_dict_like(coords)\n and len(coords) != len(shape)\n ):\n raise ValueError(\n \"coords is not dict-like, but it has %s items, \"\n \"which does not match the %s dimensions of the \"\n \"data\" % (len(coords), len(shape))\n )\n\n if isinstance(dims, str):\n dims = (dims,)\n\n if dims is None:\n dims = [\"dim_%s\" % n for n in range(len(shape))]\n if coords is not None and len(coords) == len(shape):\n # try to infer dimensions from coords\n if utils.is_dict_like(coords):\n # deprecated in GH993, removed in GH1539\n raise ValueError(\n \"inferring DataArray dimensions from \"\n \"dictionary like ``coords`` is no longer \"\n \"supported. Use an explicit list of \"\n \"``dims`` instead.\"\n )\n for n, (dim, coord) in enumerate(zip(dims, coords)):\n coord = as_variable(coord, name=dims[n]).to_index_variable()\n dims[n] = coord.name\n dims = tuple(dims)\n elif len(dims) != len(shape):\n raise ValueError(\n \"different number of dimensions on data \"\n \"and dims: %s vs %s\" % (len(shape), len(dims))\n )\n else:\n for d in dims:\n if not isinstance(d, str):\n raise TypeError(\"dimension %s is not a string\" % d)\n\n new_coords: Dict[Any, Variable] = {}\n\n if utils.is_dict_like(coords):\n for k, v in coords.items():\n new_coords[k] = as_variable(v, name=k)\n elif coords is not None:\n for dim, coord in zip(dims, coords):\n var = as_variable(coord, name=dim)\n var.dims = (dim,)\n new_coords[dim] = var.to_index_variable()\n\n sizes = dict(zip(dims, shape))\n for k, v in new_coords.items():\n if any(d not in dims for d in v.dims):\n raise ValueError(\n \"coordinate %s has dimensions %s, but these \"\n \"are not a subset of the DataArray \"\n \"dimensions %s\" % (k, v.dims, dims)\n )\n\n for d, s in zip(v.dims, v.shape):\n if s != sizes[d]:\n raise ValueError(\n \"conflicting sizes for dimension %r: \"\n \"length %s on the data but length %s on \"\n \"coordinate %r\" % (d, sizes[d], s, k)\n )\n\n if k in sizes and v.shape != (sizes[k],):\n raise ValueError(\n \"coordinate %r is a DataArray dimension, but \"\n \"it has shape %r rather than expected shape %r \"\n \"matching the dimension size\" % (k, v.shape, (sizes[k],))\n )\n\n assert_unique_multiindex_level_names(new_coords)\n\n return new_coords, dims\n\n\ndef _check_data_shape(data, coords, dims):\n if data is dtypes.NA:\n data = np.nan\n if coords is not None and utils.is_scalar(data, include_0d=False):\n if utils.is_dict_like(coords):\n if dims is None:\n return data\n else:\n data_shape = tuple(\n as_variable(coords[k], k).size if k in coords.keys() else 1\n for k in dims\n )\n else:\n data_shape = tuple(as_variable(coord, \"foo\").size for coord in coords)\n data = np.full(data_shape, data)\n return data\n\n\nclass _LocIndexer:\n __slots__ = (\"data_array\",)\n\n def __init__(self, data_array: \"DataArray\"):\n self.data_array = data_array\n\n def __getitem__(self, key) -> \"DataArray\":\n if not utils.is_dict_like(key):\n # expand the indexer so we can handle Ellipsis\n labels = indexing.expanded_indexer(key, self.data_array.ndim)\n key = dict(zip(self.data_array.dims, labels))\n return self.data_array.sel(**key)\n\n def __setitem__(self, key, value) -> None:\n if not utils.is_dict_like(key):\n # expand the indexer so we can handle Ellipsis\n labels = indexing.expanded_indexer(key, self.data_array.ndim)\n key = dict(zip(self.data_array.dims, labels))\n\n pos_indexers, _ = remap_label_indexers(self.data_array, key)\n self.data_array[pos_indexers] = value\n\n\n# Used as the key corresponding to a DataArray's variable when converting\n# arbitrary DataArray objects to datasets\n_THIS_ARRAY = ReprObject(\"\")\n\n\nclass DataArray(AbstractArray, DataWithCoords):\n \"\"\"N-dimensional array with labeled coordinates and dimensions.\n\n DataArray provides a wrapper around numpy ndarrays that uses labeled\n dimensions and coordinates to support metadata aware operations. The API is\n similar to that for the pandas Series or DataFrame, but DataArray objects\n can have any number of dimensions, and their contents have fixed data\n types.\n\n Additional features over raw numpy arrays:\n\n - Apply operations over dimensions by name: ``x.sum('time')``.\n - Select or assign values by integer location (like numpy): ``x[:10]``\n or by label (like pandas): ``x.loc['2014-01-01']`` or\n ``x.sel(time='2014-01-01')``.\n - Mathematical operations (e.g., ``x - y``) vectorize across multiple\n dimensions (known in numpy as \"broadcasting\") based on dimension names,\n regardless of their original order.\n - Keep track of arbitrary metadata in the form of a Python dictionary:\n ``x.attrs``\n - Convert to a pandas Series: ``x.to_series()``.\n\n Getting items from or doing mathematical operations with a DataArray\n always returns another DataArray.\n\n Attributes\n ----------\n dims : tuple\n Dimension names associated with this array.\n values : np.ndarray\n Access or modify DataArray values as a numpy array.\n coords : dict-like\n Dictionary of DataArray objects that label values along each dimension.\n name : str or None\n Name of this array.\n attrs : dict\n Dictionary for holding arbitrary metadata.\n \"\"\"\n\n _accessors: Optional[Dict[str, Any]]\n _coords: Dict[Any, Variable]\n _indexes: Optional[Dict[Hashable, pd.Index]]\n _name: Optional[Hashable]\n _variable: Variable\n\n __slots__ = (\n \"_accessors\",\n \"_coords\",\n \"_file_obj\",\n \"_indexes\",\n \"_name\",\n \"_variable\",\n \"__weakref__\",\n )\n\n _groupby_cls = groupby.DataArrayGroupBy\n _rolling_cls = rolling.DataArrayRolling\n _coarsen_cls = rolling.DataArrayCoarsen\n _resample_cls = resample.DataArrayResample\n\n __default = ReprObject(\"\")\n\n dt = property(DatetimeAccessor)\n\n def __init__(\n self,\n data: Any = dtypes.NA,\n coords: Union[Sequence[Tuple], Mapping[Hashable, Any], None] = None,\n dims: Union[Hashable, Sequence[Hashable], None] = None,\n name: Hashable = None,\n attrs: Mapping = None,\n # deprecated parameters\n encoding=None,\n # internal parameters\n indexes: Dict[Hashable, pd.Index] = None,\n fastpath: bool = False,\n ):\n \"\"\"\n Parameters\n ----------\n data : array_like\n Values for this array. Must be an ``numpy.ndarray``, ndarray like,\n or castable to an ``ndarray``. If a self-described xarray or pandas\n object, attempts are made to use this array's metadata to fill in\n other unspecified arguments. A view of the array's data is used\n instead of a copy if possible.\n coords : sequence or dict of array_like objects, optional\n Coordinates (tick labels) to use for indexing along each dimension.\n The following notations are accepted:\n\n - mapping {dimension name: array-like}\n - sequence of tuples that are valid arguments for xarray.Variable()\n - (dims, data)\n - (dims, data, attrs)\n - (dims, data, attrs, encoding)\n\n Additionally, it is possible to define a coord whose name\n does not match the dimension name, or a coord based on multiple\n dimensions, with one of the following notations:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n\n dims : hashable or sequence of hashable, optional\n Name(s) of the data dimension(s). Must be either a hashable (only\n for 1D data) or a sequence of hashables with length equal to the\n number of dimensions. If this argument is omitted, dimension names\n are taken from ``coords`` (if possible) and otherwise default to\n ``['dim_0', ... 'dim_n']``.\n name : str or None, optional\n Name of this array.\n attrs : dict_like or None, optional\n Attributes to assign to the new instance. By default, an empty\n attribute dictionary is initialized.\n \"\"\"\n if encoding is not None:\n warnings.warn(\n \"The `encoding` argument to `DataArray` is deprecated, and . \"\n \"will be removed in 0.15. \"\n \"Instead, specify the encoding when writing to disk or \"\n \"set the `encoding` attribute directly.\",\n FutureWarning,\n stacklevel=2,\n )\n if fastpath:\n variable = data\n assert dims is None\n assert attrs is None\n assert encoding is None\n else:\n # try to fill in arguments from data if they weren't supplied\n if coords is None:\n\n if isinstance(data, DataArray):\n coords = data.coords\n elif isinstance(data, pd.Series):\n coords = [data.index]\n elif isinstance(data, pd.DataFrame):\n coords = [data.index, data.columns]\n elif isinstance(data, (pd.Index, IndexVariable)):\n coords = [data]\n elif isinstance(data, pdcompat.Panel):\n coords = [data.items, data.major_axis, data.minor_axis]\n\n if dims is None:\n dims = getattr(data, \"dims\", getattr(coords, \"dims\", None))\n if name is None:\n name = getattr(data, \"name\", None)\n if attrs is None:\n attrs = getattr(data, \"attrs\", None)\n if encoding is None:\n encoding = getattr(data, \"encoding\", None)\n\n data = _check_data_shape(data, coords, dims)\n data = as_compatible_data(data)\n coords, dims = _infer_coords_and_dims(data.shape, coords, dims)\n variable = Variable(dims, data, attrs, encoding, fastpath=True)\n\n # These fully describe a DataArray\n self._variable = variable\n assert isinstance(coords, dict)\n self._coords = coords\n self._name = name\n self._accessors = None\n\n # TODO(shoyer): document this argument, once it becomes part of the\n # public interface.\n self._indexes = indexes\n\n self._file_obj = None\n\n def _replace(\n self,\n variable: Variable = None,\n coords=None,\n name: Optional[Hashable] = __default,\n ) -> \"DataArray\":\n if variable is None:\n variable = self.variable\n if coords is None:\n coords = self._coords\n if name is self.__default:\n name = self.name\n return type(self)(variable, coords, name=name, fastpath=True)\n\n def _replace_maybe_drop_dims(\n self, variable: Variable, name: Optional[Hashable] = __default\n ) -> \"DataArray\":\n if variable.dims == self.dims and variable.shape == self.shape:\n coords = self._coords.copy()\n elif variable.dims == self.dims:\n # Shape has changed (e.g. from reduce(..., keepdims=True)\n new_sizes = dict(zip(self.dims, variable.shape))\n coords = {\n k: v\n for k, v in self._coords.items()\n if v.shape == tuple(new_sizes[d] for d in v.dims)\n }\n else:\n allowed_dims = set(variable.dims)\n coords = {\n k: v for k, v in self._coords.items() if set(v.dims) <= allowed_dims\n }\n return self._replace(variable, coords, name)\n\n def _overwrite_indexes(self, indexes: Mapping[Hashable, Any]) -> \"DataArray\":\n if not len(indexes):\n return self\n coords = self._coords.copy()\n for name, idx in indexes.items():\n coords[name] = IndexVariable(name, idx)\n obj = self._replace(coords=coords)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Any, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj\n\n def _to_temp_dataset(self) -> Dataset:\n return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False)\n\n def _from_temp_dataset(\n self, dataset: Dataset, name: Hashable = __default\n ) -> \"DataArray\":\n variable = dataset._variables.pop(_THIS_ARRAY)\n coords = dataset._variables\n return self._replace(variable, coords, name)\n\n def _to_dataset_split(self, dim: Hashable) -> Dataset:\n def subset(dim, label):\n array = self.loc[{dim: label}]\n if dim in array.coords:\n del array.coords[dim]\n array.attrs = {}\n return array\n\n variables = {label: subset(dim, label) for label in self.get_index(dim)}\n\n coords = self.coords.to_dataset()\n if dim in coords:\n del coords[dim]\n return Dataset(variables, coords, self.attrs)\n\n def _to_dataset_whole(\n self, name: Hashable = None, shallow_copy: bool = True\n ) -> Dataset:\n if name is None:\n name = self.name\n if name is None:\n raise ValueError(\n \"unable to convert unnamed DataArray to a \"\n \"Dataset without providing an explicit name\"\n )\n if name in self.coords:\n raise ValueError(\n \"cannot create a Dataset from a DataArray with \"\n \"the same name as one of its coordinates\"\n )\n # use private APIs for speed: this is called by _to_temp_dataset(),\n # which is used in the guts of a lot of operations (e.g., reindex)\n variables = self._coords.copy()\n variables[name] = self.variable\n if shallow_copy:\n for k in variables:\n variables[k] = variables[k].copy(deep=False)\n coord_names = set(self._coords)\n dataset = Dataset._from_vars_and_coord_names(variables, coord_names)\n return dataset\n\n def to_dataset(self, dim: Hashable = None, *, name: Hashable = None) -> Dataset:\n \"\"\"Convert a DataArray to a Dataset.\n\n Parameters\n ----------\n dim : hashable, optional\n Name of the dimension on this array along which to split this array\n into separate variables. If not provided, this array is converted\n into a Dataset of one variable.\n name : hashable, optional\n Name to substitute for this array's name. Only valid if ``dim`` is\n not provided.\n\n Returns\n -------\n dataset : Dataset\n \"\"\"\n if dim is not None and dim not in self.dims:\n raise TypeError(\n \"{} is not a dim. If supplying a ``name``, pass as a kwarg.\".format(dim)\n )\n\n if dim is not None:\n if name is not None:\n raise TypeError(\"cannot supply both dim and name arguments\")\n return self._to_dataset_split(dim)\n else:\n return self._to_dataset_whole(name)\n\n @property\n def name(self) -> Optional[Hashable]:\n \"\"\"The name of this array.\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, value: Optional[Hashable]) -> None:\n self._name = value\n\n @property\n def variable(self) -> Variable:\n \"\"\"Low level interface to the Variable object for this DataArray.\"\"\"\n return self._variable\n\n @property\n def dtype(self) -> np.dtype:\n return self.variable.dtype\n\n @property\n def shape(self) -> Tuple[int, ...]:\n return self.variable.shape\n\n @property\n def size(self) -> int:\n return self.variable.size\n\n @property\n def nbytes(self) -> int:\n return self.variable.nbytes\n\n @property\n def ndim(self) -> int:\n return self.variable.ndim\n\n def __len__(self) -> int:\n return len(self.variable)\n\n @property\n def data(self) -> Any:\n \"\"\"The array's data as a dask or numpy array\n \"\"\"\n return self.variable.data\n\n @data.setter\n def data(self, value: Any) -> None:\n self.variable.data = value\n\n @property\n def values(self) -> np.ndarray:\n \"\"\"The array's data as a numpy.ndarray\"\"\"\n return self.variable.values\n\n @values.setter\n def values(self, value: Any) -> None:\n self.variable.values = value\n\n @property\n def _in_memory(self) -> bool:\n return self.variable._in_memory\n\n def to_index(self) -> pd.Index:\n \"\"\"Convert this variable to a pandas.Index. Only possible for 1D\n arrays.\n \"\"\"\n return self.variable.to_index()\n\n @property\n def dims(self) -> Tuple[Hashable, ...]:\n \"\"\"Tuple of dimension names associated with this array.\n\n Note that the type of this property is inconsistent with\n `Dataset.dims`. See `Dataset.sizes` and `DataArray.sizes` for\n consistently named properties.\n \"\"\"\n return self.variable.dims\n\n @dims.setter\n def dims(self, value):\n raise AttributeError(\n \"you cannot assign dims on a DataArray. Use \"\n \".rename() or .swap_dims() instead.\"\n )\n\n def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]:\n if utils.is_dict_like(key):\n return key\n else:\n key = indexing.expanded_indexer(key, self.ndim)\n return dict(zip(self.dims, key))\n\n @property\n def _level_coords(self) -> Dict[Hashable, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[Hashable, Hashable] = {}\n\n for cname, var in self._coords.items():\n if var.ndim == 1 and isinstance(var, IndexVariable):\n level_names = var.level_names\n if level_names is not None:\n dim, = var.dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords\n\n def _getitem_coord(self, key):\n from .dataset import _get_virtual_variable\n\n try:\n var = self._coords[key]\n except KeyError:\n dim_sizes = dict(zip(self.dims, self.shape))\n _, key, var = _get_virtual_variable(\n self._coords, key, self._level_coords, dim_sizes\n )\n\n return self._replace_maybe_drop_dims(var, name=key)\n\n def __getitem__(self, key: Any) -> \"DataArray\":\n if isinstance(key, str):\n return self._getitem_coord(key)\n else:\n # xarray-style array indexing\n return self.isel(indexers=self._item_key_to_dict(key))\n\n def __setitem__(self, key: Any, value: Any) -> None:\n if isinstance(key, str):\n self.coords[key] = value\n else:\n # Coordinates in key, value and self[key] should be consistent.\n # TODO Coordinate consistency in key is checked here, but it\n # causes unnecessary indexing. It should be optimized.\n obj = self[key]\n if isinstance(value, DataArray):\n assert_coordinate_consistent(value, obj.coords.variables)\n # DataArray key -> Variable key\n key = {\n k: v.variable if isinstance(v, DataArray) else v\n for k, v in self._item_key_to_dict(key).items()\n }\n self.variable[key] = value\n\n def __delitem__(self, key: Any) -> None:\n del self.coords[key]\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return self._item_sources + [self.attrs]\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-completion\n \"\"\"\n return [\n self.coords,\n {d: self.coords[d] for d in self.dims},\n LevelCoordinatesSource(self),\n ]\n\n def __contains__(self, key: Any) -> bool:\n return key in self.data\n\n @property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing like pandas.\n \"\"\"\n return _LocIndexer(self)\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary storing arbitrary metadata with this array.\"\"\"\n return self.variable.attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n # Disable type checking to work around mypy bug - see mypy#4167\n self.variable.attrs = value # type: ignore\n\n @property\n def encoding(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of format-specific settings for how this array should be\n serialized.\"\"\"\n return self.variable.encoding\n\n @encoding.setter\n def encoding(self, value: Mapping[Hashable, Any]) -> None:\n self.variable.encoding = value\n\n @property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\n \"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._coords, self.dims)\n return Indexes(self._indexes)\n\n @property\n def coords(self) -> DataArrayCoordinates:\n \"\"\"Dictionary-like container of coordinate arrays.\n \"\"\"\n return DataArrayCoordinates(self)\n\n def reset_coords(\n self,\n names: Union[Iterable[Hashable], Hashable, None] = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> Union[None, \"DataArray\", Dataset]:\n \"\"\"Given names of coordinates, reset them to become variables.\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset, or DataArray if ``drop == True``\n \"\"\"\n _check_inplace(inplace)\n if names is None:\n names = set(self.coords) - set(self.dims)\n dataset = self.coords.to_dataset().reset_coords(names, drop)\n if drop:\n return self._replace(coords=dataset._variables)\n else:\n if self.name is None:\n raise ValueError(\n \"cannot reset_coords with drop=False on an unnamed DataArrray\"\n )\n dataset[self.name] = self.variable\n return dataset\n\n def __dask_graph__(self):\n return self._to_temp_dataset().__dask_graph__()\n\n def __dask_keys__(self):\n return self._to_temp_dataset().__dask_keys__()\n\n def __dask_layers__(self):\n return self._to_temp_dataset().__dask_layers__()\n\n @property\n def __dask_optimize__(self):\n return self._to_temp_dataset().__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n return self._to_temp_dataset().__dask_scheduler__\n\n def __dask_postcompute__(self):\n func, args = self._to_temp_dataset().__dask_postcompute__()\n return self._dask_finalize, (func, args, self.name)\n\n def __dask_postpersist__(self):\n func, args = self._to_temp_dataset().__dask_postpersist__()\n return self._dask_finalize, (func, args, self.name)\n\n @staticmethod\n def _dask_finalize(results, func, args, name):\n ds = func(results, *args)\n variable = ds._variables.pop(_THIS_ARRAY)\n coords = ds._variables\n return DataArray(variable, coords, name=name, fastpath=True)\n\n def load(self, **kwargs) -> \"DataArray\":\n \"\"\"Manually trigger loading of this array's data from disk or a\n remote source into memory and return this array.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n ds = self._to_temp_dataset().load(**kwargs)\n new = self._from_temp_dataset(ds)\n self._variable = new._variable\n self._coords = new._coords\n return self\n\n def compute(self, **kwargs) -> \"DataArray\":\n \"\"\"Manually trigger loading of this array's data from disk or a\n remote source into memory and return a new array. The original is\n left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def persist(self, **kwargs) -> \"DataArray\":\n \"\"\" Trigger computation in constituent dask arrays\n\n This keeps them as dask arrays but encourages them to keep data in\n memory. This is particularly useful when on a distributed machine.\n When on a single machine consider using ``.compute()`` instead.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n ds = self._to_temp_dataset().persist(**kwargs)\n return self._from_temp_dataset(ds)\n\n def copy(self, deep: bool = True, data: Any = None) -> \"DataArray\":\n \"\"\"Returns a copy of this array.\n\n If `deep=True`, a deep copy is made of the data array.\n Otherwise, a shallow copy is made, so each variable in the new\n array's dataset is also a variable in this array's dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether the data array and its coordinates are loaded into memory\n and copied onto the new object. Default is True.\n data : array_like, optional\n Data to use in the new object. Must have same shape as original.\n When `data` is used, `deep` is ignored for all data variables,\n and only used for coords.\n\n Returns\n -------\n object : DataArray\n New object with dimensions, attributes, coordinates, name,\n encoding, and optionally data copied from original.\n\n Examples\n --------\n\n Shallow versus deep copy\n\n >>> array = xr.DataArray([1, 2, 3], dims='x',\n ... coords={'x': ['a', 'b', 'c']})\n >>> array.copy()\n \n array([1, 2, 3])\n Coordinates:\n * x (x) >> array_0 = array.copy(deep=False)\n >>> array_0[0] = 7\n >>> array_0\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array\n \n array([7, 2, 3])\n Coordinates:\n * x (x) >> array.copy(data=[0.1, 0.2, 0.3])\n \n array([ 0.1, 0.2, 0.3])\n Coordinates:\n * x (x) >> array\n \n array([1, 2, 3])\n Coordinates:\n * x (x) \"DataArray\":\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None) -> \"DataArray\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore\n\n @property\n def chunks(self) -> Optional[Tuple[Tuple[int, ...], ...]]:\n \"\"\"Block dimensions for this array's data or None if it's not a dask\n array.\n \"\"\"\n return self.variable.chunks\n\n def chunk(\n self,\n chunks: Union[\n None,\n Number,\n Tuple[Number, ...],\n Tuple[Tuple[Number, ...], ...],\n Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]],\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"DataArray\":\n \"\"\"Coerce this array's data into a dask arrays with the given chunks.\n\n If this variable is a non-dask array, it will be converted to dask\n array. If it's a dask array, it will be rechunked to the given chunk\n sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int, tuple or mapping, optional\n Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of the new dask array.\n token : str, optional\n Token uniquely identifying this array.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.DataArray\n \"\"\"\n if isinstance(chunks, (tuple, list)):\n chunks = dict(zip(self.dims, chunks))\n\n ds = self._to_temp_dataset().chunk(\n chunks, name_prefix=name_prefix, token=token, lock=lock\n )\n return self._from_temp_dataset(ds)\n\n def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by integer indexing\n along the specified dimension(s).\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n ds = self._to_temp_dataset().isel(drop=drop, indexers=indexers)\n return self._from_temp_dataset(ds)\n\n def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n drop: bool = False,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by selecting index\n labels along the specified dimension(s).\n\n .. warning::\n\n Do not try to assign values when using any of the indexing methods\n ``isel`` or ``sel``::\n\n da = xr.DataArray([0, 1, 2, 3], dims=['x'])\n # DO NOT do this\n da.isel(x=[0, 1, 2])[1] = -1\n\n Assigning values with the chained indexing using ``.sel`` or\n ``.isel`` fails silently.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n\n \"\"\"\n ds = self._to_temp_dataset().sel(\n indexers=indexers,\n drop=drop,\n method=method,\n tolerance=tolerance,\n **indexers_kwargs\n )\n return self._from_temp_dataset(ds)\n\n def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by the the first `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.head\n DataArray.tail\n DataArray.thin\n \"\"\"\n ds = self._to_temp_dataset().head(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)\n\n def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by the the last `n`\n values along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.tail\n DataArray.head\n DataArray.thin\n \"\"\"\n ds = self._to_temp_dataset().tail(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)\n\n def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Return a new DataArray whose data is given by each `n` value\n along the specified dimension(s). Default `n` = 5\n\n See Also\n --------\n Dataset.thin\n DataArray.head\n DataArray.tail\n \"\"\"\n ds = self._to_temp_dataset().thin(indexers, **indexers_kwargs)\n return self._from_temp_dataset(ds)\n\n def broadcast_like(\n self, other: Union[\"DataArray\", Dataset], exclude: Iterable[Hashable] = None\n ) -> \"DataArray\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n\n This is equivalent to xr.broadcast(other, self)[1]\n\n xarray objects are broadcast against each other in arithmetic\n operations, so this method is not be necessary for most uses.\n\n If no change is needed, the input data is returned to the output\n without being copied.\n\n If new coords are added by the broadcast, their values are\n NaN filled.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n Returns\n -------\n new_da: xr.DataArray\n\n Examples\n --------\n\n >>> arr1\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115]])\n Coordinates:\n * x (x) >> arr2\n \n array([[0.612611, 0.125753],\n [0.853181, 0.948818],\n [0.180885, 0.33363 ]])\n Coordinates:\n * x (x) >> arr1.broadcast_like(arr2)\n \n array([[0.840235, 0.215216, 0.77917 ],\n [0.726351, 0.543824, 0.875115],\n [ nan, nan, nan]])\n Coordinates:\n * x (x) object 'a' 'b' 'c'\n * y (y) object 'a' 'b' 'c'\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)\n\n def reindex_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n ) -> \"DataArray\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found on this\n data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but coordinates from\n the other object.\n\n See Also\n --------\n DataArray.reindex\n align\n \"\"\"\n indexers = reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n tolerance=tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n\n def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance=None,\n copy: bool = True,\n fill_value=dtypes.NA,\n **indexers_kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict, optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found on\n this data array:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : DataArray\n Another dataset array, with this array's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.reindex_like\n align\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n ds = self._to_temp_dataset().reindex(\n indexers=indexers,\n method=method,\n tolerance=tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n return self._from_temp_dataset(ds)\n\n def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any\n ) -> \"DataArray\":\n \"\"\" Multidimensional interpolation of variables.\n\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n new coordinate can be an scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array.\n assume_sorted: boolean, optional\n If False, values of x can be in any order and they are sorted\n first. If True, x has to be an array of monotonically increasing\n values.\n kwargs: dictionary\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.DataArray\n New dataarray on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n\n Examples\n --------\n >>> da = xr.DataArray([1, 3], [('x', np.arange(2))])\n >>> da.interp(x=0.5)\n \n array(2.0)\n Coordinates:\n x float64 0.5\n \"\"\"\n if self.dtype.kind not in \"uifc\":\n raise TypeError(\n \"interp only works for a numeric type array. \"\n \"Given {}.\".format(self.dtype)\n )\n ds = self._to_temp_dataset().interp(\n coords,\n method=method,\n kwargs=kwargs,\n assume_sorted=assume_sorted,\n **coords_kwargs\n )\n return self._from_temp_dataset(ds)\n\n def interp_like(\n self,\n other: Union[\"DataArray\", Dataset],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"DataArray\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.DataArray\n Another dataarray by interpolating this dataarray's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataarray has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n DataArray.interp\n DataArray.reindex_like\n \"\"\"\n if self.dtype.kind not in \"uifc\":\n raise TypeError(\n \"interp only works for a numeric type array. \"\n \"Given {}.\".format(self.dtype)\n )\n ds = self._to_temp_dataset().interp_like(\n other, method=method, kwargs=kwargs, assume_sorted=assume_sorted\n )\n return self._from_temp_dataset(ds)\n\n def rename(\n self,\n new_name_or_name_dict: Union[Hashable, Mapping[Hashable, Hashable]] = None,\n **names: Hashable\n ) -> \"DataArray\":\n \"\"\"Returns a new DataArray with renamed coordinates or a new name.\n\n Parameters\n ----------\n new_name_or_name_dict : str or dict-like, optional\n If the argument is dict-like, it used as a mapping from old\n names to new names for coordinates. Otherwise, use the argument\n as the new name for this array.\n **names: hashable, optional\n The keyword arguments form of a mapping from old names to\n new names for coordinates.\n One of new_name_or_name_dict or names must be provided.\n\n Returns\n -------\n renamed : DataArray\n Renamed array or array with renamed coordinates.\n\n See Also\n --------\n Dataset.rename\n DataArray.swap_dims\n \"\"\"\n if names or utils.is_dict_like(new_name_or_name_dict):\n new_name_or_name_dict = cast(\n Mapping[Hashable, Hashable], new_name_or_name_dict\n )\n name_dict = either_dict_or_kwargs(new_name_or_name_dict, names, \"rename\")\n dataset = self._to_temp_dataset().rename(name_dict)\n return self._from_temp_dataset(dataset)\n else:\n new_name_or_name_dict = cast(Hashable, new_name_or_name_dict)\n return self._replace(name=new_name_or_name_dict)\n\n def swap_dims(self, dims_dict: Mapping[Hashable, Hashable]) -> \"DataArray\":\n \"\"\"Returns a new DataArray with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names. Each value must already be a coordinate on this\n array.\n\n Returns\n -------\n swapped : DataArray\n DataArray with swapped dimensions.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=[0, 1], dims=\"x\",\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> arr\n \n array([0, 1])\n Coordinates:\n * x (x) >> arr.swap_dims({\"x\": \"y\"})\n \n array([0, 1])\n Coordinates:\n x (y) \"DataArray\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, dict, or None\n Dimensions to include on the new variable.\n If provided as str or sequence of str, then dimensions are inserted\n with length 1. If provided as a dict, then the keys are the new\n dimensions and the values are either integers (giving the length of\n the new dimensions) or sequence/ndarray (giving the coordinates of\n the new dimensions).\n axis : integer, list (or tuple) of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if isinstance(dim, int):\n raise TypeError(\"dim should be hashable or sequence/mapping of hashables\")\n elif isinstance(dim, Sequence) and not isinstance(dim, str):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = dict.fromkeys(dim, 1)\n elif dim is not None and not isinstance(dim, Mapping):\n dim = {cast(Hashable, dim): 1}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n ds = self._to_temp_dataset().expand_dims(dim, axis)\n return self._from_temp_dataset(ds)\n\n def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]]\n ) -> Optional[\"DataArray\"]:\n \"\"\"Set DataArray (multi-)indexes using one or more existing\n coordinates.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another DataArray, with this data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> arr\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n >>> arr.set_index(x='a')\n \n array([[1., 1., 1.],\n [1., 1., 1.]])\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n\n See Also\n --------\n DataArray.reset_index\n \"\"\"\n _check_inplace(inplace)\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n coords, _ = merge_indexes(indexes, self._coords, set(), append=append)\n return self._replace(coords=coords)\n\n def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> Optional[\"DataArray\"]:\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : hashable or sequence of hashables\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n\n See Also\n --------\n DataArray.set_index\n \"\"\"\n _check_inplace(inplace)\n coords, _ = split_indexes(\n dims_or_levels, self._coords, set(), self._level_coords, drop=drop\n )\n return self._replace(coords=coords)\n\n def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int]\n ) -> \"DataArray\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : DataArray\n Another dataarray, with this dataarray's data but replaced\n coordinates.\n \"\"\"\n _check_inplace(inplace)\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n replace_coords = {}\n for dim, order in dim_order.items():\n coord = self._coords[dim]\n index = coord.to_index()\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\"coordinate %r has no MultiIndex\" % dim)\n replace_coords[dim] = IndexVariable(coord.dims, index.reorder_levels(order))\n coords = self._coords.copy()\n coords.update(replace_coords)\n return self._replace(coords=coords)\n\n def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable]\n ) -> \"DataArray\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : DataArray\n DataArray with stacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n\n See also\n --------\n DataArray.unstack\n \"\"\"\n ds = self._to_temp_dataset().stack(dimensions, **dimensions_kwargs)\n return self._from_temp_dataset(ds)\n\n def unstack(\n self, dim: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : DataArray\n Array with unstacked data.\n\n Examples\n --------\n\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> arr\n \n array([[0, 1, 2],\n [3, 4, 5]])\n Coordinates:\n * x (x) |S1 'a' 'b'\n * y (y) int64 0 1 2\n >>> stacked = arr.stack(z=('x', 'y'))\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],\n names=['x', 'y'])\n >>> roundtripped = stacked.unstack()\n >>> arr.identical(roundtripped)\n True\n\n See also\n --------\n DataArray.stack\n \"\"\"\n ds = self._to_temp_dataset().unstack(dim)\n return self._from_temp_dataset(ds)\n\n def to_unstacked_dataset(self, dim, level=0):\n \"\"\"Unstack DataArray expanding to Dataset along a given level of a\n stacked coordinate.\n\n This is the inverse operation of Dataset.to_stacked_array.\n\n Parameters\n ----------\n dim : str\n Name of existing dimension to unstack\n level : int or str\n The MultiIndex level to expand to a dataset along. Can either be\n the integer index of the level or its name.\n label : int, default 0\n Label of the level to expand dataset along. Overrides the label\n argument if given.\n\n Returns\n -------\n unstacked: Dataset\n\n Examples\n --------\n >>> import xarray as xr\n >>> arr = DataArray(np.arange(6).reshape(2, 3),\n ... coords=[('x', ['a', 'b']), ('y', [0, 1, 2])])\n >>> data = xr.Dataset({'a': arr, 'b': arr.isel(y=0)})\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) >> stacked = data.to_stacked_array(\"z\", ['y'])\n >>> stacked.indexes['z']\n MultiIndex(levels=[['a', 'b'], [0, 1, 2]],\n labels=[[0, 0, 0, 1], [0, 1, 2, -1]],\n names=['variable', 'y'])\n >>> roundtripped = stacked.to_unstacked_dataset(dim='z')\n >>> data.identical(roundtripped)\n True\n\n See Also\n --------\n Dataset.to_stacked_array\n \"\"\"\n\n idx = self.indexes[dim]\n if not isinstance(idx, pd.MultiIndex):\n raise ValueError(\"'{}' is not a stacked coordinate\".format(dim))\n\n level_number = idx._get_level_number(level)\n variables = idx.levels[level_number]\n variable_dim = idx.names[level_number]\n\n # pull variables out of datarray\n data_dict = {}\n for k in variables:\n data_dict[k] = self.sel({variable_dim: k}).squeeze(drop=True)\n\n # unstacked dataset\n return Dataset(data_dict)\n\n def transpose(self, *dims: Hashable, transpose_coords: bool = None) -> \"DataArray\":\n \"\"\"Return a new DataArray object with transposed dimensions.\n\n Parameters\n ----------\n *dims : hashable, optional\n By default, reverse the dimensions. Otherwise, reorder the\n dimensions to this order.\n transpose_coords : boolean, optional\n If True, also transpose the coordinates of this DataArray.\n\n Returns\n -------\n transposed : DataArray\n The returned DataArray's array is transposed.\n\n Notes\n -----\n This operation returns a view of this array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded.\n\n See Also\n --------\n numpy.transpose\n Dataset.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims):\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted array dimensions (%s)\" % (dims, tuple(self.dims))\n )\n\n variable = self.variable.transpose(*dims)\n if transpose_coords:\n coords: Dict[Hashable, Variable] = {}\n for name, coord in self.coords.items():\n coord_dims = tuple(dim for dim in dims if dim in coord.dims)\n coords[name] = coord.variable.transpose(*coord_dims)\n return self._replace(variable, coords)\n else:\n if transpose_coords is None and any(self[c].ndim > 1 for c in self.coords):\n warnings.warn(\n \"This DataArray contains multi-dimensional \"\n \"coordinates. In the future, these coordinates \"\n \"will be transposed as well unless you specify \"\n \"transpose_coords=False.\",\n FutureWarning,\n stacklevel=2,\n )\n return self._replace(variable)\n\n @property\n def T(self) -> \"DataArray\":\n return self.transpose()\n\n # Drop coords\n @overload\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"DataArray\":\n ...\n\n # Drop index labels along dimension\n @overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"DataArray\":\n ...\n\n def drop(self, labels, dim=None, *, errors=\"raise\"): # noqa: F811\n \"\"\"Drop coordinates or index labels from this DataArray.\n\n Parameters\n ----------\n labels : hashable or sequence of hashables\n Name(s) of coordinates or index labels to drop.\n If dim is not None, labels can be any array-like.\n dim : hashable, optional\n Dimension along which to drop index labels. By default (if\n ``dim is None``), drops coordinates rather than index labels.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the coordinates or index labels passed are not\n in the array. If 'ignore', any given labels that are in the\n array are dropped and no error is raised.\n Returns\n -------\n dropped : DataArray\n \"\"\"\n ds = self._to_temp_dataset().drop(labels, dim, errors=errors)\n return self._from_temp_dataset(ds)\n\n def dropna(\n self, dim: Hashable, how: str = \"any\", thresh: int = None\n ) -> \"DataArray\":\n \"\"\"Returns a new array with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {'any', 'all'}, optional\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n If supplied, require this many non-NA values.\n\n Returns\n -------\n DataArray\n \"\"\"\n ds = self._to_temp_dataset().dropna(dim, how=how, thresh=thresh)\n return self._from_temp_dataset(ds)\n\n def fillna(self, value: Any) -> \"DataArray\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray or DataArray\n Used to fill all matching missing values in this array. If the\n argument is a DataArray, it is first aligned with (reindexed to)\n this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n if utils.is_dict_like(value):\n raise TypeError(\n \"cannot provide fill value as a dictionary with \"\n \"fillna on a DataArray\"\n )\n out = ops.fillna(self, value)\n return out\n\n def interpolate_na(\n self,\n dim=None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, str] = True,\n **kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline', 'akima'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline', and `akima`: use their\n respective``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n\n Returns\n -------\n DataArray\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n \"\"\"\n from .missing import interp_na\n\n return interp_na(\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n **kwargs\n )\n\n def ffill(self, dim: Hashable, limit: int = None) -> \"DataArray\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n \"\"\"\n from .missing import ffill\n\n return ffill(self, dim, limit=limit)\n\n def bfill(self, dim: Hashable, limit: int = None) -> \"DataArray\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n DataArray\n \"\"\"\n from .missing import bfill\n\n return bfill(self, dim, limit=limit)\n\n def combine_first(self, other: \"DataArray\") -> \"DataArray\":\n \"\"\"Combine two DataArray objects, with union of coordinates.\n\n This operation follows the normal broadcasting and alignment rules of\n ``join='outer'``. Default to non-null values of array calling the\n method. Use np.nan to fill in vacant cells after alignment.\n\n Parameters\n ----------\n other : DataArray\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n return ops.fillna(self, other, join=\"outer\")\n\n def reduce(\n self,\n func: Callable[..., Any],\n dim: Union[None, Hashable, Sequence[Hashable]] = None,\n axis: Union[None, int, Sequence[int]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n **kwargs: Any\n ) -> \"DataArray\":\n \"\"\"Reduce this array by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : function\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : hashable or sequence of hashables, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to repeatedly apply `func`. Only one of the\n 'dim' and 'axis' arguments can be supplied. If neither are\n supplied, then the reduction is calculated over the flattened array\n (by calling `f(x)` without an axis argument).\n keep_attrs : bool, optional\n If True, the variable's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : DataArray\n DataArray with this object's array replaced with an array with\n summarized data and the indicated dimension(s) removed.\n \"\"\"\n\n var = self.variable.reduce(func, dim, axis, keep_attrs, keepdims, **kwargs)\n return self._replace_maybe_drop_dims(var)\n\n def to_pandas(self) -> Union[\"DataArray\", pd.Series, pd.DataFrame]:\n \"\"\"Convert this array into a pandas object with the same shape.\n\n The type of the returned object depends on the number of DataArray\n dimensions:\n\n * 0D -> `xarray.DataArray`\n * 1D -> `pandas.Series`\n * 2D -> `pandas.DataFrame`\n * 3D -> `pandas.Panel` *(deprecated)*\n\n Only works for arrays with 3 or fewer dimensions.\n\n The DataArray constructor performs the inverse transformation.\n \"\"\"\n # TODO: consolidate the info about pandas constructors and the\n # attributes that correspond to their indexes into a separate module?\n constructors = {\n 0: lambda x: x,\n 1: pd.Series,\n 2: pd.DataFrame,\n 3: pdcompat.Panel,\n }\n try:\n constructor = constructors[self.ndim]\n except KeyError:\n raise ValueError(\n \"cannot convert arrays with %s dimensions into \"\n \"pandas objects\" % self.ndim\n )\n indexes = [self.get_index(dim) for dim in self.dims]\n return constructor(self.values, *indexes)\n\n def to_dataframe(self, name: Hashable = None) -> pd.DataFrame:\n \"\"\"Convert this array and its coordinates into a tidy pandas.DataFrame.\n\n The DataFrame is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n\n Other coordinates are included as columns in the DataFrame.\n \"\"\"\n if name is None:\n name = self.name\n if name is None:\n raise ValueError(\n \"cannot convert an unnamed DataArray to a \"\n \"DataFrame: use the ``name`` parameter\"\n )\n\n dims = dict(zip(self.dims, self.shape))\n # By using a unique name, we can convert a DataArray into a DataFrame\n # even if it shares a name with one of its coordinates.\n # I would normally use unique_name = object() but that results in a\n # dataframe with columns in the wrong order, for reasons I have not\n # been able to debug (possibly a pandas bug?).\n unique_name = \"__unique_name_identifier_z98xfz98xugfg73ho__\"\n ds = self._to_dataset_whole(name=unique_name)\n df = ds._to_dataframe(dims)\n df.columns = [name if c == unique_name else c for c in df.columns]\n return df\n\n def to_series(self) -> pd.Series:\n \"\"\"Convert this array into a pandas.Series.\n\n The Series is indexed by the Cartesian product of index coordinates\n (in the form of a :py:class:`pandas.MultiIndex`).\n \"\"\"\n index = self.coords.to_index()\n return pd.Series(self.values.reshape(-1), index=index, name=self.name)\n\n def to_masked_array(self, copy: bool = True) -> np.ma.MaskedArray:\n \"\"\"Convert this array into a numpy.ma.MaskedArray\n\n Parameters\n ----------\n copy : bool\n If True (default) make a copy of the array in the result. If False,\n a MaskedArray view of DataArray.values is returned.\n\n Returns\n -------\n result : MaskedArray\n Masked where invalid values (nan or inf) occur.\n \"\"\"\n values = self.values # only compute lazy arrays once\n isnull = pd.isnull(values)\n return np.ma.MaskedArray(data=values, mask=isnull, copy=copy)\n\n def to_netcdf(self, *args, **kwargs) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write DataArray contents to a netCDF file.\n\n All parameters are passed directly to `xarray.Dataset.to_netcdf`.\n\n Notes\n -----\n Only xarray.Dataset objects can be written to netCDF files, so\n the xarray.DataArray is converted to a xarray.Dataset object\n containing a single variable. If the DataArray has no name, or if the\n name is the same as a co-ordinate name, then it is given the name\n '__xarray_dataarray_variable__'.\n \"\"\"\n from ..backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE\n\n if self.name is None:\n # If no name is set then use a generic xarray name\n dataset = self.to_dataset(name=DATAARRAY_VARIABLE)\n elif self.name in self.coords or self.name in self.dims:\n # The name is the same as one of the coords names, which netCDF\n # doesn't support, so rename it but keep track of the old name\n dataset = self.to_dataset(name=DATAARRAY_VARIABLE)\n dataset.attrs[DATAARRAY_NAME] = self.name\n else:\n # No problems with the name - so we're fine!\n dataset = self.to_dataset()\n\n return dataset.to_netcdf(*args, **kwargs)\n\n def to_dict(self, data: bool = True) -> dict:\n \"\"\"\n Convert this xarray.DataArray into a dictionary following xarray\n naming conventions.\n\n Converts all variables and attributes to native Python objects.\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n DataArray.from_dict\n \"\"\"\n d = self.variable.to_dict(data=data)\n d.update({\"coords\": {}, \"name\": self.name})\n for k in self.coords:\n d[\"coords\"][k] = self.coords[k].variable.to_dict(data=data)\n return d\n\n @classmethod\n def from_dict(cls, d: dict) -> \"DataArray\":\n \"\"\"\n Convert a dictionary into an xarray.DataArray\n\n Input dict can take several forms::\n\n d = {'dims': ('t'), 'data': x}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data': x,\n 'name': 'a'}\n\n where 't' is the name of the dimesion, 'a' is the name of the array,\n and x and t are lists, numpy.arrays, or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'dims': [..], 'data': [..]}\n\n Returns\n -------\n obj : xarray.DataArray\n\n See also\n --------\n DataArray.to_dict\n Dataset.from_dict\n \"\"\"\n coords = None\n if \"coords\" in d:\n try:\n coords = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\"))\n for k, v in d[\"coords\"].items()\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict when coords are missing the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n try:\n data = d[\"data\"]\n except KeyError:\n raise ValueError(\"cannot convert dict without the key 'data''\")\n else:\n obj = cls(data, coords, d.get(\"dims\"), d.get(\"name\"), d.get(\"attrs\"))\n return obj\n\n @classmethod\n def from_series(cls, series: pd.Series, sparse: bool = False) -> \"DataArray\":\n \"\"\"Convert a pandas.Series into an xarray.DataArray.\n\n If the series's index is a MultiIndex, it will be expanded into a\n tensor product of one-dimensional coordinates (filling in missing\n values with NaN). Thus this operation should be the inverse of the\n `to_series` method.\n\n If sparse=True, creates a sparse array instead of a dense NumPy array.\n Requires the pydata/sparse package.\n\n See also\n --------\n xarray.Dataset.from_dataframe\n \"\"\"\n temp_name = \"__temporary_name\"\n df = pd.DataFrame({temp_name: series})\n ds = Dataset.from_dataframe(df, sparse=sparse)\n result = cast(DataArray, ds[temp_name])\n result.name = series.name\n return result\n\n def to_cdms2(self) -> \"cdms2_Variable\":\n \"\"\"Convert this array into a cdms2.Variable\n \"\"\"\n from ..convert import to_cdms2\n\n return to_cdms2(self)\n\n @classmethod\n def from_cdms2(cls, variable: \"cdms2_Variable\") -> \"DataArray\":\n \"\"\"Convert a cdms2.Variable into an xarray.DataArray\n \"\"\"\n from ..convert import from_cdms2\n\n return from_cdms2(variable)\n\n def to_iris(self) -> \"iris_Cube\":\n \"\"\"Convert this array into a iris.cube.Cube\n \"\"\"\n from ..convert import to_iris\n\n return to_iris(self)\n\n @classmethod\n def from_iris(cls, cube: \"iris_Cube\") -> \"DataArray\":\n \"\"\"Convert a iris.cube.Cube into an xarray.DataArray\n \"\"\"\n from ..convert import from_iris\n\n return from_iris(cube)\n\n def _all_compat(self, other: \"DataArray\", compat_str: str) -> bool:\n \"\"\"Helper function for equals, broadcast_equals, and identical\n \"\"\"\n\n def compat(x, y):\n return getattr(x.variable, compat_str)(y.variable)\n\n return utils.dict_equiv(self.coords, other.coords, compat=compat) and compat(\n self, other\n )\n\n def broadcast_equals(self, other: \"DataArray\") -> bool:\n \"\"\"Two DataArrays are broadcast equal if they are equal after\n broadcasting them against each other such that they have the same\n dimensions.\n\n See Also\n --------\n DataArray.equals\n DataArray.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False\n\n def equals(self, other: \"DataArray\") -> bool:\n \"\"\"True if two DataArrays have the same dimensions, coordinates and\n values; otherwise False.\n\n DataArrays can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``DataArray``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False\n\n def identical(self, other: \"DataArray\") -> bool:\n \"\"\"Like equals, but also checks the array name and attributes, and\n attributes on all coordinates.\n\n See Also\n --------\n DataArray.broadcast_equals\n DataArray.equal\n \"\"\"\n try:\n return self.name == other.name and self._all_compat(other, \"identical\")\n except (TypeError, AttributeError):\n return False\n\n __default_name = object()\n\n def _result_name(self, other: Any = None) -> Optional[Hashable]:\n # use the same naming heuristics as pandas:\n # https://github.com/ContinuumIO/blaze/issues/458#issuecomment-51936356\n other_name = getattr(other, \"name\", self.__default_name)\n if other_name is self.__default_name or other_name == self.name:\n return self.name\n else:\n return None\n\n def __array_wrap__(self, obj, context=None) -> \"DataArray\":\n new_var = self.variable.__array_wrap__(obj, context)\n return self._replace(new_var)\n\n def __matmul__(self, obj):\n return self.dot(obj)\n\n def __rmatmul__(self, other):\n # currently somewhat duplicative, as only other DataArrays are\n # compatible with matmul\n return computation.dot(other, self)\n\n @staticmethod\n def _unary_op(f: Callable[..., Any]) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n with np.errstate(all=\"ignore\"):\n return self.__array_wrap__(f(self.variable.data, *args, **kwargs))\n\n return func\n\n @staticmethod\n def _binary_op(\n f: Callable[..., Any],\n reflexive: bool = False,\n join: str = None, # see xarray.align\n **ignored_kwargs\n ) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, (Dataset, groupby.GroupBy)):\n return NotImplemented\n if isinstance(other, DataArray):\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n self, other = align(self, other, join=align_type, copy=False)\n other_variable = getattr(other, \"variable\", other)\n other_coords = getattr(other, \"coords\", None)\n\n variable = (\n f(self.variable, other_variable)\n if not reflexive\n else f(other_variable, self.variable)\n )\n coords, indexes = self.coords._merge_raw(other_coords)\n name = self._result_name(other)\n\n return self._replace(variable, coords, name)\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f: Callable) -> Callable[..., \"DataArray\"]:\n @functools.wraps(f)\n def func(self, other):\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a DataArray and \"\n \"a grouped object are not permitted\"\n )\n # n.b. we can't align other to self (with other.reindex_like(self))\n # because `other` may be converted into floats, which would cause\n # in-place arithmetic to fail unpredictably. Instead, we simply\n # don't support automatic alignment with in-place arithmetic.\n other_coords = getattr(other, \"coords\", None)\n other_variable = getattr(other, \"variable\", other)\n with self.coords._merge_inplace(other_coords):\n f(self.variable, other_variable)\n return self\n\n return func\n\n def _copy_attrs_from(self, other: Union[\"DataArray\", Dataset, Variable]) -> None:\n self.attrs = other.attrs\n\n @property\n def plot(self) -> _PlotMethods:\n \"\"\"\n Access plotting functions\n\n >>> d = DataArray([[1, 2], [3, 4]])\n\n For convenience just call this directly\n\n >>> d.plot()\n\n Or use it as a namespace to use xarray.plot functions as\n DataArray methods\n\n >>> d.plot.imshow() # equivalent to xarray.plot.imshow(d)\n\n \"\"\"\n return _PlotMethods(self)\n\n def _title_for_slice(self, truncate: int = 50) -> str:\n \"\"\"\n If the dataarray has 1 dimensional coordinates or comes from a slice\n we can show that info in the title\n\n Parameters\n ----------\n truncate : integer\n maximum number of characters for title\n\n Returns\n -------\n title : string\n Can be used for plot titles\n\n \"\"\"\n one_dims = []\n for dim, coord in self.coords.items():\n if coord.size == 1:\n one_dims.append(\n \"{dim} = {v}\".format(dim=dim, v=format_item(coord.values))\n )\n\n title = \", \".join(one_dims)\n if len(title) > truncate:\n title = title[: (truncate - 3)] + \"...\"\n\n return title\n\n def diff(self, dim: Hashable, n: int = 1, label: Hashable = \"upper\") -> \"DataArray\":\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : hashable, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : hashable, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> arr = xr.DataArray([5, 5, 6, 6], [[1, 2, 3, 4]], ['x'])\n >>> arr.diff('x')\n \n array([0, 1, 0])\n Coordinates:\n * x (x) int64 2 3 4\n >>> arr.diff('x', 2)\n \n array([ 1, -1])\n Coordinates:\n * x (x) int64 3 4\n\n See Also\n --------\n DataArray.differentiate\n \"\"\"\n ds = self._to_temp_dataset().diff(n=n, dim=dim, label=label)\n return self._from_temp_dataset(ds)\n\n def shift(\n self,\n shifts: Mapping[Hashable, int] = None,\n fill_value: Any = dtypes.NA,\n **shifts_kwargs: int\n ) -> \"DataArray\":\n \"\"\"Shift this array by an offset along one or more dimensions.\n\n Only the data is moved; coordinates stay in place. Values shifted from\n beyond array bounds are replaced by NaN. This is consistent with the\n behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : DataArray\n DataArray with the same coordinates and attributes but shifted\n data.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.shift(x=1)\n \n array([ nan, 5., 6.])\n Coordinates:\n * x (x) int64 0 1 2\n \"\"\"\n variable = self.variable.shift(\n shifts=shifts, fill_value=fill_value, **shifts_kwargs\n )\n return self._replace(variable=variable)\n\n def roll(\n self,\n shifts: Mapping[Hashable, int] = None,\n roll_coords: bool = None,\n **shifts_kwargs: int\n ) -> \"DataArray\":\n \"\"\"Roll this array by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to rotate each of the given dimensions.\n Positive offsets roll to the right; negative offsets roll to the\n left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n rolled : DataArray\n DataArray with the same attributes but rolled data and coordinates.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.roll(x=1)\n \n array([7, 5, 6])\n Coordinates:\n * x (x) int64 2 0 1\n \"\"\"\n ds = self._to_temp_dataset().roll(\n shifts=shifts, roll_coords=roll_coords, **shifts_kwargs\n )\n return self._from_temp_dataset(ds)\n\n @property\n def real(self) -> \"DataArray\":\n return self._replace(self.variable.real)\n\n @property\n def imag(self) -> \"DataArray\":\n return self._replace(self.variable.imag)\n\n def dot(\n self, other: \"DataArray\", dims: Union[Hashable, Sequence[Hashable], None] = None\n ) -> \"DataArray\":\n \"\"\"Perform dot product of two DataArrays along their shared dims.\n\n Equivalent to taking taking tensordot over all shared dims.\n\n Parameters\n ----------\n other : DataArray\n The other array with which the dot product is performed.\n dims: hashable or sequence of hashables, optional\n Along which dimensions to be summed over. Default all the common\n dimensions are summed over.\n\n Returns\n -------\n result : DataArray\n Array resulting from the dot product over all shared dimensions.\n\n See also\n --------\n dot\n numpy.tensordot\n\n Examples\n --------\n\n >>> da_vals = np.arange(6 * 5 * 4).reshape((6, 5, 4))\n >>> da = DataArray(da_vals, dims=['x', 'y', 'z'])\n >>> dm_vals = np.arange(4)\n >>> dm = DataArray(dm_vals, dims=['z'])\n\n >>> dm.dims\n ('z')\n >>> da.dims\n ('x', 'y', 'z')\n\n >>> dot_result = da.dot(dm)\n >>> dot_result.dims\n ('x', 'y')\n \"\"\"\n if isinstance(other, Dataset):\n raise NotImplementedError(\n \"dot products are not yet supported with Dataset objects.\"\n )\n if not isinstance(other, DataArray):\n raise TypeError(\"dot only operates on DataArrays.\")\n\n return computation.dot(self, other, dims=dims)\n\n def sortby(\n self,\n variables: Union[Hashable, \"DataArray\", Sequence[Union[Hashable, \"DataArray\"]]],\n ascending: bool = True,\n ) -> \"DataArray\":\n \"\"\"Sort object by labels or values (along an axis).\n\n Sorts the dataarray, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: hashable, DataArray, or sequence of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords whose values are used to sort this array.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: DataArray\n A new dataarray where all the specified dims are sorted by dim\n labels.\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.random.rand(5),\n ... coords=[pd.date_range('1/1/2000', periods=5)],\n ... dims='time')\n >>> da\n \n array([ 0.965471, 0.615637, 0.26532 , 0.270962, 0.552878])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03 ...\n\n >>> da.sortby(da)\n \n array([ 0.26532 , 0.270962, 0.552878, 0.615637, 0.965471])\n Coordinates:\n * time (time) datetime64[ns] 2000-01-03 2000-01-04 2000-01-05 ...\n \"\"\"\n ds = self._to_temp_dataset().sortby(variables, ascending=ascending)\n return self._from_temp_dataset(ds)\n\n def quantile(\n self,\n q: Any,\n dim: Union[Hashable, Sequence[Hashable], None] = None,\n interpolation: str = \"linear\",\n keep_attrs: bool = None,\n ) -> \"DataArray\":\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : hashable or sequence of hashable, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n - linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n - lower: ``i``.\n - higher: ``j``.\n - nearest: ``i`` or ``j``, whichever is nearest.\n - midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n quantiles : DataArray\n If `q` is a single quantile, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile and a quantile dimension\n is added to the return array. The other dimensions are the\n dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, Dataset.quantile\n \"\"\"\n\n ds = self._to_temp_dataset().quantile(\n q, dim=dim, keep_attrs=keep_attrs, interpolation=interpolation\n )\n return self._from_temp_dataset(ds)\n\n def rank(\n self, dim: Hashable, pct: bool = False, keep_attrs: bool = None\n ) -> \"DataArray\":\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within that\n set. Ranks begin at 1, not 0. If pct, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : hashable\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : DataArray\n DataArray with the same coordinates and dtype 'float64'.\n\n Examples\n --------\n\n >>> arr = xr.DataArray([5, 6, 7], dims='x')\n >>> arr.rank('x')\n \n array([ 1., 2., 3.])\n Dimensions without coordinates: x\n \"\"\"\n\n ds = self._to_temp_dataset().rank(dim, pct=pct, keep_attrs=keep_attrs)\n return self._from_temp_dataset(ds)\n\n def differentiate(\n self, coord: Hashable, edge_order: int = 1, datetime_unit: str = None\n ) -> \"DataArray\":\n \"\"\" Differentiate the array with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: hashable\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: DataArray\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.differentiate('x')\n \n array([[30. , 30. , 30. ],\n [27.545455, 27.545455, 27.545455],\n [27.545455, 27.545455, 27.545455],\n [30. , 30. , 30. ]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n \"\"\"\n ds = self._to_temp_dataset().differentiate(coord, edge_order, datetime_unit)\n return self._from_temp_dataset(ds)\n\n def integrate(\n self, dim: Union[Hashable, Sequence[Hashable]], datetime_unit: str = None\n ) -> \"DataArray\":\n \"\"\" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: hashable, or a sequence of hashable\n Coordinate(s) used for the integration.\n datetime_unit: str, optional\n Can be used to specify the unit if datetime coordinate is used.\n One of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns',\n 'ps', 'fs', 'as'}\n\n Returns\n -------\n integrated: DataArray\n\n See also\n --------\n numpy.trapz: corresponding numpy function\n\n Examples\n --------\n\n >>> da = xr.DataArray(np.arange(12).reshape(4, 3), dims=['x', 'y'],\n ... coords={'x': [0, 0.1, 1.1, 1.2]})\n >>> da\n \n array([[ 0, 1, 2],\n [ 3, 4, 5],\n [ 6, 7, 8],\n [ 9, 10, 11]])\n Coordinates:\n * x (x) float64 0.0 0.1 1.1 1.2\n Dimensions without coordinates: y\n >>>\n >>> da.integrate('x')\n \n array([5.4, 6.6, 7.8])\n Dimensions without coordinates: y\n \"\"\"\n ds = self._to_temp_dataset().integrate(dim, datetime_unit)\n return self._from_temp_dataset(ds)\n\n def unify_chunks(self) -> \"DataArray\":\n \"\"\" Unify chunk size along all chunked dimensions of this DataArray.\n\n Returns\n -------\n\n DataArray with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n \"\"\"\n ds = self._to_temp_dataset().unify_chunks()\n return self._from_temp_dataset(ds)\n\n def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each chunk of this DataArray. This method is experimental\n and its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a DataArray as its first parameter. The\n function will receive a subset of this DataArray, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this array\n but has sizes 0, to determine properties of the returned object such as\n dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this DataArray is backed by dask, calling this\n method is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.Dataset.map_blocks\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs)\n\n # this needs to be at the end, or mypy will confuse with `str`\n # https://mypy.readthedocs.io/en/latest/common_issues.html#dealing-with-conflicting-names\n str = property(StringAccessor)\n\n\n# priority most be higher than Variable to properly work with binary ufuncs\nops.inject_all_ops_and_reduce_methods(DataArray, priority=60)\n"},{"col":0,"comment":"null","endLoc":267,"header":"def assert_and_return_exact_match(all_keys)","id":3051,"name":"assert_and_return_exact_match","nodeType":"Function","startLoc":259,"text":"def assert_and_return_exact_match(all_keys):\n first_keys = all_keys[0]\n for keys in all_keys[1:]:\n if keys != first_keys:\n raise ValueError(\n \"exact match required for all data variable names, \"\n \"but %r != %r\" % (keys, first_keys)\n )\n return first_keys"},{"col":0,"comment":"null","endLoc":483,"header":"def unified_dim_sizes(\n variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()\n) -> Dict[Hashable, int]","id":3052,"name":"unified_dim_sizes","nodeType":"Function","startLoc":461,"text":"def unified_dim_sizes(\n variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()\n) -> Dict[Hashable, int]:\n\n dim_sizes: Dict[Hashable, int] = {}\n\n for var in variables:\n if len(set(var.dims)) < len(var.dims):\n raise ValueError(\n \"broadcasting cannot handle duplicate \"\n \"dimensions on a variable: %r\" % list(var.dims)\n )\n for dim, size in zip(var.dims, var.shape):\n if dim not in exclude_dims:\n if dim not in dim_sizes:\n dim_sizes[dim] = size\n elif dim_sizes[dim] != size:\n raise ValueError(\n \"operands cannot be broadcast together \"\n \"with mismatched lengths for dimension \"\n \"%r: %s vs %s\" % (dim, dim_sizes[dim], size)\n )\n return dim_sizes"},{"className":"UnexpectedDataAccess","col":0,"comment":"null","endLoc":117,"id":3053,"nodeType":"Class","startLoc":116,"text":"class UnexpectedDataAccess(Exception):\n pass"},{"col":0,"comment":"null","endLoc":540,"header":"def broadcast_compat_data(\n variable: Variable,\n broadcast_dims: Tuple[Hashable, ...],\n core_dims: Tuple[Hashable, ...],\n) -> Any","id":3054,"name":"broadcast_compat_data","nodeType":"Function","startLoc":489,"text":"def broadcast_compat_data(\n variable: Variable,\n broadcast_dims: Tuple[Hashable, ...],\n core_dims: Tuple[Hashable, ...],\n) -> Any:\n data = variable.data\n\n old_dims = variable.dims\n new_dims = broadcast_dims + core_dims\n\n if new_dims == old_dims:\n # optimize for the typical case\n return data\n\n set_old_dims = set(old_dims)\n missing_core_dims = [d for d in core_dims if d not in set_old_dims]\n if missing_core_dims:\n raise ValueError(\n \"operand to apply_ufunc has required core dimensions {}, but \"\n \"some of these dimensions are absent on an input variable: {}\".format(\n list(core_dims), missing_core_dims\n )\n )\n\n set_new_dims = set(new_dims)\n unexpected_dims = [d for d in old_dims if d not in set_new_dims]\n if unexpected_dims:\n raise ValueError(\n \"operand to apply_ufunc encountered unexpected \"\n \"dimensions %r on an input variable: these are core \"\n \"dimensions on other input or output variables\" % unexpected_dims\n )\n\n # for consistency with numpy, keep broadcast dimensions to the left\n old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)\n reordered_dims = old_broadcast_dims + core_dims\n if reordered_dims != old_dims:\n order = tuple(old_dims.index(d) for d in reordered_dims)\n data = duck_array_ops.transpose(data, order)\n\n if new_dims != reordered_dims:\n key_parts = []\n for dim in new_dims:\n if dim in set_old_dims:\n key_parts.append(SLICE_NONE)\n elif key_parts:\n # no need to insert new axes at the beginning that are already\n # handled by broadcasting\n key_parts.append(np.newaxis)\n data = data[tuple(key_parts)]\n\n return data"},{"className":"InaccessibleArray","col":0,"comment":"null","endLoc":125,"id":3055,"nodeType":"Class","startLoc":120,"text":"class InaccessibleArray(utils.NDArrayMixin, ExplicitlyIndexed):\n def __init__(self, array):\n self.array = array\n\n def __getitem__(self, key):\n raise UnexpectedDataAccess(\"Tried accessing data\")"},{"col":4,"comment":"null","endLoc":122,"header":"def __init__(self, array)","id":3056,"name":"__init__","nodeType":"Function","startLoc":121,"text":"def __init__(self, array):\n self.array = array"},{"col":4,"comment":"null","endLoc":125,"header":"def __getitem__(self, key)","id":3057,"name":"__getitem__","nodeType":"Function","startLoc":124,"text":"def __getitem__(self, key):\n raise UnexpectedDataAccess(\"Tried accessing data\")"},{"col":4,"comment":"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.","endLoc":318,"header":"def onOffset(self, date)","id":3058,"name":"onOffset","nodeType":"Function","startLoc":315,"text":"def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == 1"},{"attributeType":"null","col":4,"comment":"null","endLoc":309,"id":3059,"name":"_freq","nodeType":"Attribute","startLoc":309,"text":"_freq"},{"attributeType":"null","col":8,"comment":"null","endLoc":122,"id":3060,"name":"array","nodeType":"Attribute","startLoc":122,"text":"self.array"},{"className":"QuarterBegin","col":0,"comment":"null","endLoc":422,"id":3061,"nodeType":"Class","startLoc":400,"text":"class QuarterBegin(QuarterOffset):\n # When converting a string to an offset, pandas converts\n # 'QS' to a QuarterBegin offset starting in the month of\n # January. When creating a QuarterBegin offset directly\n # from the constructor, however, the default month is March.\n # We follow that behavior here.\n _default_month = 3\n _freq = \"QS\"\n _day_option = \"start\"\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest start of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + QuarterBegin(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest start of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - QuarterBegin(month=self.month)"},{"col":4,"comment":"Roll date forward to nearest start of quarter","endLoc":415,"header":"def rollforward(self, date)","id":3062,"name":"rollforward","nodeType":"Function","startLoc":410,"text":"def rollforward(self, date):\n \"\"\"Roll date forward to nearest start of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + QuarterBegin(month=self.month)"},{"className":"_LocIndexer","col":0,"comment":"null","endLoc":205,"id":3063,"nodeType":"Class","startLoc":185,"text":"class _LocIndexer:\n __slots__ = (\"data_array\",)\n\n def __init__(self, data_array: \"DataArray\"):\n self.data_array = data_array\n\n def __getitem__(self, key) -> \"DataArray\":\n if not utils.is_dict_like(key):\n # expand the indexer so we can handle Ellipsis\n labels = indexing.expanded_indexer(key, self.data_array.ndim)\n key = dict(zip(self.data_array.dims, labels))\n return self.data_array.sel(**key)\n\n def __setitem__(self, key, value) -> None:\n if not utils.is_dict_like(key):\n # expand the indexer so we can handle Ellipsis\n labels = indexing.expanded_indexer(key, self.data_array.ndim)\n key = dict(zip(self.data_array.dims, labels))\n\n pos_indexers, _ = remap_label_indexers(self.data_array, key)\n self.data_array[pos_indexers] = value"},{"col":4,"comment":"null","endLoc":196,"header":"def __getitem__(self, key) -> \"DataArray\"","id":3064,"name":"__getitem__","nodeType":"Function","startLoc":191,"text":"def __getitem__(self, key) -> \"DataArray\":\n if not utils.is_dict_like(key):\n # expand the indexer so we can handle Ellipsis\n labels = indexing.expanded_indexer(key, self.data_array.ndim)\n key = dict(zip(self.data_array.dims, labels))\n return self.data_array.sel(**key)"},{"attributeType":"null","col":0,"comment":"null","endLoc":42,"id":3065,"name":"BASIC_INDEXING_TYPES","nodeType":"Attribute","startLoc":42,"text":"BASIC_INDEXING_TYPES"},{"attributeType":"null","col":0,"comment":"Type annotation to be used when methods of Variable return self or a copy of self.\nWhen called from an instance of a subclass, e.g. IndexVariable, mypy identifies the\noutput as an instance of the subclass.\n\nUsage::\n\n class Variable:\n def f(self: VariableType, ...) -> VariableType:\n ...\n","endLoc":44,"id":3066,"name":"VariableType","nodeType":"Attribute","startLoc":44,"text":"VariableType"},{"col":0,"comment":"","endLoc":1,"header":"variable.py#","id":3067,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import dask.array as da\nexcept ImportError:\n pass\n\nNON_NUMPY_SUPPORTED_ARRAY_TYPES = (\n indexing.ExplicitlyIndexed,\n pd.Index,\n) + dask_array_type\n\nBASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore\n\nVariableType = TypeVar(\"VariableType\", bound=\"Variable\")\n\n\"\"\"Type annotation to be used when methods of Variable return self or a copy of self.\nWhen called from an instance of a subclass, e.g. IndexVariable, mypy identifies the\noutput as an instance of the subclass.\n\nUsage::\n\n class Variable:\n def f(self: VariableType, ...) -> VariableType:\n ...\n\"\"\"\n\nops.inject_all_ops_and_reduce_methods(Variable)\n\nCoordinate = utils.alias(IndexVariable, \"Coordinate\")"},{"fileName":"pdcompat.py","filePath":"xarray/core","id":3068,"nodeType":"File","text":"# The remove_unused_levels defined here was copied based on the source code\n# defined in pandas.core.indexes.muli.py\n\n# For reference, here is a copy of the pandas copyright notice:\n\n# (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team\n# All rights reserved.\n\n# Copyright (c) 2008-2011 AQR Capital Management, LLC\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n\n# * Neither the name of the copyright holder nor the names of any\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom distutils.version import LooseVersion\n\nimport pandas as pd\n\n# allow ourselves to type checks for Panel even after it's removed\nif LooseVersion(pd.__version__) < \"0.25.0\":\n Panel = pd.Panel\nelse:\n\n class Panel: # type: ignore\n pass\n\n\ndef count_not_none(*args) -> int:\n \"\"\"Compute the number of non-None arguments.\n\n Copied from pandas.core.common.count_not_none (not part of the public API)\n \"\"\"\n return sum([arg is not None for arg in args])\n"},{"className":"Panel","col":4,"comment":"null","endLoc":50,"id":3069,"nodeType":"Class","startLoc":49,"text":"class Panel: # type: ignore\n pass"},{"attributeType":"null","col":4,"comment":"null","endLoc":46,"id":3070,"name":"Panel","nodeType":"Attribute","startLoc":46,"text":"Panel"},{"col":0,"comment":"","endLoc":40,"header":"pdcompat.py#","id":3071,"name":"","nodeType":"Function","startLoc":40,"text":"if LooseVersion(pd.__version__) < \"0.25.0\":\n Panel = pd.Panel\nelse:\n\n class Panel: # type: ignore\n pass"},{"fileName":"dataset.py","filePath":"xarray/core","id":3072,"nodeType":"File","text":"import copy\nimport functools\nimport sys\nimport warnings\nfrom collections import defaultdict\nfrom numbers import Number\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n DefaultDict,\n Dict,\n Hashable,\n Iterable,\n Iterator,\n List,\n Mapping,\n MutableMapping,\n Optional,\n Sequence,\n Set,\n Tuple,\n TypeVar,\n Union,\n cast,\n overload,\n)\n\nimport numpy as np\nimport pandas as pd\n\nimport xarray as xr\n\nfrom ..coding.cftimeindex import _parse_array_of_cftime_strings\nfrom ..plot.dataset_plot import _Dataset_PlotMethods\nfrom . import (\n alignment,\n dtypes,\n duck_array_ops,\n formatting,\n groupby,\n ops,\n resample,\n rolling,\n utils,\n)\nfrom .alignment import _broadcast_helper, _get_broadcast_dims_map_common_coords, align\nfrom .common import (\n ALL_DIMS,\n DataWithCoords,\n ImplementsDatasetReduce,\n _contains_datetime_like_objects,\n)\nfrom .coordinates import (\n DatasetCoordinates,\n LevelCoordinatesSource,\n assert_coordinate_consistent,\n remap_label_indexers,\n)\nfrom .duck_array_ops import datetime_to_numeric\nfrom .indexes import Indexes, default_indexes, isel_variable_and_index, roll_index\nfrom .merge import (\n dataset_merge_method,\n dataset_update_method,\n merge_coordinates_without_align,\n merge_data_and_coords,\n)\nfrom .options import OPTIONS, _get_keep_attrs\nfrom .pycompat import dask_array_type\nfrom .utils import (\n Frozen,\n SortedKeysDict,\n _check_inplace,\n decode_numpy_dict_values,\n either_dict_or_kwargs,\n hashable,\n is_dict_like,\n is_list_like,\n maybe_wrap_array,\n)\nfrom .variable import IndexVariable, Variable, as_variable, broadcast_variables\n\nif TYPE_CHECKING:\n from ..backends import AbstractDataStore, ZarrStore\n from .dataarray import DataArray\n from .merge import CoercibleMapping\n\n T_DSorDA = TypeVar(\"T_DSorDA\", DataArray, \"Dataset\")\n\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n\n\n# list of attributes of pd.DatetimeIndex that are ndarrays of time info\n_DATETIMEINDEX_COMPONENTS = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"microsecond\",\n \"nanosecond\",\n \"date\",\n \"time\",\n \"dayofyear\",\n \"weekofyear\",\n \"dayofweek\",\n \"quarter\",\n]\n\n\ndef _get_virtual_variable(\n variables, key: Hashable, level_vars: Mapping = None, dim_sizes: Mapping = None\n) -> Tuple[Hashable, Hashable, Variable]:\n \"\"\"Get a virtual variable (e.g., 'time.year' or a MultiIndex level)\n from a dict of xarray.Variable objects (if possible)\n \"\"\"\n if level_vars is None:\n level_vars = {}\n if dim_sizes is None:\n dim_sizes = {}\n\n if key in dim_sizes:\n data = pd.Index(range(dim_sizes[key]), name=key)\n variable = IndexVariable((key,), data)\n return key, key, variable\n\n if not isinstance(key, str):\n raise KeyError(key)\n\n split_key = key.split(\".\", 1)\n var_name: Optional[str]\n if len(split_key) == 2:\n ref_name, var_name = split_key\n elif len(split_key) == 1:\n ref_name, var_name = key, None\n else:\n raise KeyError(key)\n\n if ref_name in level_vars:\n dim_var = variables[level_vars[ref_name]]\n ref_var = dim_var.to_index_variable().get_level_variable(ref_name)\n else:\n ref_var = variables[ref_name]\n\n if var_name is None:\n virtual_var = ref_var\n var_name = key\n else:\n if _contains_datetime_like_objects(ref_var):\n ref_var = xr.DataArray(ref_var)\n data = getattr(ref_var.dt, var_name).data\n else:\n data = getattr(ref_var, var_name).data\n virtual_var = Variable(ref_var.dims, data)\n\n return ref_name, var_name, virtual_var\n\n\ndef calculate_dimensions(variables: Mapping[Hashable, Variable]) -> Dict[Hashable, int]:\n \"\"\"Calculate the dimensions corresponding to a set of variables.\n\n Returns dictionary mapping from dimension names to sizes. Raises ValueError\n if any of the dimension sizes conflict.\n \"\"\"\n dims: Dict[Hashable, int] = {}\n last_used = {}\n scalar_vars = {k for k, v in variables.items() if not v.dims}\n for k, var in variables.items():\n for dim, size in zip(var.dims, var.shape):\n if dim in scalar_vars:\n raise ValueError(\n \"dimension %r already exists as a scalar \" \"variable\" % dim\n )\n if dim not in dims:\n dims[dim] = size\n last_used[dim] = k\n elif dims[dim] != size:\n raise ValueError(\n \"conflicting sizes for dimension %r: \"\n \"length %s on %r and length %s on %r\"\n % (dim, size, k, dims[dim], last_used[dim])\n )\n return dims\n\n\ndef merge_indexes(\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n append: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Merge variables into multi-indexes.\n\n Not public API. Used in Dataset and DataArray set_index\n methods.\n \"\"\"\n vars_to_replace: Dict[Hashable, Variable] = {}\n vars_to_remove: List[Hashable] = []\n error_msg = \"{} is not the name of an existing variable.\"\n\n for dim, var_names in indexes.items():\n if isinstance(var_names, str) or not isinstance(var_names, Sequence):\n var_names = [var_names]\n\n names: List[Hashable] = []\n codes: List[List[int]] = []\n levels: List[List[int]] = []\n current_index_variable = variables.get(dim)\n\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n if (\n current_index_variable is not None\n and var.dims != current_index_variable.dims\n ):\n raise ValueError(\n \"dimension mismatch between %r %s and %r %s\"\n % (dim, current_index_variable.dims, n, var.dims)\n )\n\n if current_index_variable is not None and append:\n current_index = current_index_variable.to_index()\n if isinstance(current_index, pd.MultiIndex):\n names.extend(current_index.names)\n codes.extend(current_index.codes)\n levels.extend(current_index.levels)\n else:\n names.append(\"%s_level_0\" % dim)\n cat = pd.Categorical(current_index.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n if not len(names) and len(var_names) == 1:\n idx = pd.Index(variables[var_names[0]].values)\n\n else:\n for n in var_names:\n try:\n var = variables[n]\n except KeyError:\n raise ValueError(error_msg.format(n))\n names.append(n)\n cat = pd.Categorical(var.values, ordered=True)\n codes.append(cat.codes)\n levels.append(cat.categories)\n\n idx = pd.MultiIndex(levels, codes, names=names)\n\n vars_to_replace[dim] = IndexVariable(dim, idx)\n vars_to_remove.extend(var_names)\n\n new_variables = {k: v for k, v in variables.items() if k not in vars_to_remove}\n new_variables.update(vars_to_replace)\n new_coord_names = coord_names | set(vars_to_replace)\n new_coord_names -= set(vars_to_remove)\n\n return new_variables, new_coord_names\n\n\ndef split_indexes(\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n variables: Mapping[Hashable, Variable],\n coord_names: Set[Hashable],\n level_coords: Mapping[Hashable, Hashable],\n drop: bool = False,\n) -> Tuple[Dict[Hashable, Variable], Set[Hashable]]:\n \"\"\"Extract (multi-)indexes (levels) as variables.\n\n Not public API. Used in Dataset and DataArray reset_index\n methods.\n \"\"\"\n if isinstance(dims_or_levels, str) or not isinstance(dims_or_levels, Sequence):\n dims_or_levels = [dims_or_levels]\n\n dim_levels: DefaultDict[Any, List[Hashable]] = defaultdict(list)\n dims = []\n for k in dims_or_levels:\n if k in level_coords:\n dim_levels[level_coords[k]].append(k)\n else:\n dims.append(k)\n\n vars_to_replace = {}\n vars_to_create: Dict[Hashable, Variable] = {}\n vars_to_remove = []\n\n for d in dims:\n index = variables[d].to_index()\n if isinstance(index, pd.MultiIndex):\n dim_levels[d] = index.names\n else:\n vars_to_remove.append(d)\n if not drop:\n vars_to_create[str(d) + \"_\"] = Variable(d, index)\n\n for d, levs in dim_levels.items():\n index = variables[d].to_index()\n if len(levs) == index.nlevels:\n vars_to_remove.append(d)\n else:\n vars_to_replace[d] = IndexVariable(d, index.droplevel(levs))\n\n if not drop:\n for lev in levs:\n idx = index.get_level_values(lev)\n vars_to_create[idx.name] = Variable(d, idx)\n\n new_variables = dict(variables)\n for v in set(vars_to_remove):\n del new_variables[v]\n new_variables.update(vars_to_replace)\n new_variables.update(vars_to_create)\n new_coord_names = (coord_names | set(vars_to_create)) - set(vars_to_remove)\n\n return new_variables, new_coord_names\n\n\ndef _assert_empty(args: tuple, msg: str = \"%s\") -> None:\n if args:\n raise ValueError(msg % args)\n\n\ndef as_dataset(obj: Any) -> \"Dataset\":\n \"\"\"Cast the given object to a Dataset.\n\n Handles Datasets, DataArrays and dictionaries of variables. A new Dataset\n object is only created if the provided object is not already one.\n \"\"\"\n if hasattr(obj, \"to_dataset\"):\n obj = obj.to_dataset()\n if not isinstance(obj, Dataset):\n obj = Dataset(obj)\n return obj\n\n\nclass DataVariables(Mapping[Hashable, \"DataArray\"]):\n __slots__ = (\"_dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self._dataset = dataset\n\n def __iter__(self) -> Iterator[Hashable]:\n return (\n key\n for key in self._dataset._variables\n if key not in self._dataset._coord_names\n )\n\n def __len__(self) -> int:\n return len(self._dataset._variables) - len(self._dataset._coord_names)\n\n def __contains__(self, key: Hashable) -> bool:\n return key in self._dataset._variables and key not in self._dataset._coord_names\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key not in self._dataset._coord_names:\n return cast(\"DataArray\", self._dataset[key])\n raise KeyError(key)\n\n def __repr__(self) -> str:\n return formatting.data_vars_repr(self)\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n all_variables = self._dataset.variables\n return Frozen({k: all_variables[k] for k in self})\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._dataset._ipython_key_completions_()\n if key not in self._dataset._coord_names\n ]\n\n\nclass _LocIndexer:\n __slots__ = (\"dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self.dataset = dataset\n\n def __getitem__(self, key: Mapping[Hashable, Any]) -> \"Dataset\":\n if not utils.is_dict_like(key):\n raise TypeError(\"can only lookup dictionaries from Dataset.loc\")\n return self.dataset.sel(key)\n\n\nclass Dataset(Mapping, ImplementsDatasetReduce, DataWithCoords):\n \"\"\"A multi-dimensional, in memory, array database.\n\n A dataset resembles an in-memory representation of a NetCDF file, and\n consists of variables, coordinates and attributes which together form a\n self describing dataset.\n\n Dataset implements the mapping interface with keys given by variable names\n and values given by DataArray objects for each variable name.\n\n One dimensional variables with name equal to their dimension are index\n coordinates used for label based indexing.\n \"\"\"\n\n _accessors: Optional[Dict[str, Any]]\n _attrs: Optional[Dict[Hashable, Any]]\n _coord_names: Set[Hashable]\n _dims: Dict[Hashable, int]\n _encoding: Optional[Dict[Hashable, Any]]\n _indexes: Optional[Dict[Hashable, pd.Index]]\n _variables: Dict[Hashable, Variable]\n\n __slots__ = (\n \"_accessors\",\n \"_attrs\",\n \"_coord_names\",\n \"_dims\",\n \"_encoding\",\n \"_file_obj\",\n \"_indexes\",\n \"_variables\",\n \"__weakref__\",\n )\n\n _groupby_cls = groupby.DatasetGroupBy\n _rolling_cls = rolling.DatasetRolling\n _coarsen_cls = rolling.DatasetCoarsen\n _resample_cls = resample.DatasetResample\n\n def __init__(\n self,\n # could make a VariableArgs to use more generally, and refine these\n # categories\n data_vars: Mapping[Hashable, Any] = None,\n coords: Mapping[Hashable, Any] = None,\n attrs: Mapping[Hashable, Any] = None,\n compat=None,\n ):\n \"\"\"To load data from a file or file-like object, use the `open_dataset`\n function.\n\n Parameters\n ----------\n data_vars : dict-like, optional\n A mapping from variable names to :py:class:`~xarray.DataArray`\n objects, :py:class:`~xarray.Variable` objects or to tuples of the\n form ``(dims, data[, attrs])`` which can be used as arguments to\n create a new ``Variable``. Each dimension must have the same length\n in all variables in which it appears.\n\n The following notations are accepted:\n\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (it will be automatically moved to coords, see below)\n\n Each dimension must have the same length in all variables in which\n it appears.\n coords : dict-like, optional\n Another mapping in similar form as the `data_vars` argument,\n except the each item is saved on the dataset as a \"coordinate\".\n These variables have an associated meaning: they describe\n constant/fixed/independent quantities, unlike the\n varying/measured/dependent quantities that belong in `variables`.\n Coordinates values may be given by 1-dimensional arrays or scalars,\n in which case `dims` do not need to be supplied: 1D arrays will be\n assumed to give index values along the dimension with the same\n name.\n\n The following notations are accepted:\n\n - mapping {coord name: DataArray}\n - mapping {coord name: Variable}\n - mapping {coord name: (dimension name, array-like)}\n - mapping {coord name: (tuple of dimension names, array-like)}\n - mapping {dimension name: array-like}\n (the dimension name is implicitly set to be the same as the coord name)\n\n The last notation implies that the coord name is the same as the\n dimension name.\n\n attrs : dict-like, optional\n Global attributes to save on this dataset.\n compat : deprecated\n \"\"\"\n if compat is not None:\n warnings.warn(\n \"The `compat` argument to Dataset is deprecated and will be \"\n \"removed in 0.15.\"\n \"Instead, use `merge` to control how variables are combined\",\n FutureWarning,\n stacklevel=2,\n )\n else:\n compat = \"broadcast_equals\"\n\n # TODO(shoyer): expose indexes as a public argument in __init__\n\n if data_vars is None:\n data_vars = {}\n if coords is None:\n coords = {}\n\n both_data_and_coords = set(data_vars) & set(coords)\n if both_data_and_coords:\n raise ValueError(\n \"variables %r are found in both data_vars and coords\"\n % both_data_and_coords\n )\n\n if isinstance(coords, Dataset):\n coords = coords.variables\n\n variables, coord_names, dims, indexes = merge_data_and_coords(\n data_vars, coords, compat=compat\n )\n\n self._accessors = None\n self._attrs = dict(attrs) if attrs is not None else None\n self._file_obj = None\n self._encoding = None\n self._variables = variables\n self._coord_names = coord_names\n self._dims = dims\n self._indexes = indexes\n\n @classmethod\n def load_store(cls, store, decoder=None) -> \"Dataset\":\n \"\"\"Create a new dataset from the contents of a backends.*DataStore\n object\n \"\"\"\n variables, attributes = store.load()\n if decoder:\n variables, attributes = decoder(variables, attributes)\n obj = cls(variables, attrs=attributes)\n obj._file_obj = store\n return obj\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n \"\"\"Low level interface to Dataset contents as dict of Variable objects.\n\n This ordered dictionary is frozen to prevent mutation that could\n violate Dataset invariants. It contains all variable objects\n constituting the Dataset, including both data variables and\n coordinates.\n \"\"\"\n return Frozen(self._variables)\n\n @property\n def attrs(self) -> Dict[Hashable, Any]:\n \"\"\"Dictionary of global attributes on this dataset\n \"\"\"\n if self._attrs is None:\n self._attrs = {}\n return self._attrs\n\n @attrs.setter\n def attrs(self, value: Mapping[Hashable, Any]) -> None:\n self._attrs = dict(value)\n\n @property\n def encoding(self) -> Dict:\n \"\"\"Dictionary of global encoding attributes on this dataset\n \"\"\"\n if self._encoding is None:\n self._encoding = {}\n return self._encoding\n\n @encoding.setter\n def encoding(self, value: Mapping) -> None:\n self._encoding = dict(value)\n\n @property\n def dims(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n Note that type of this object differs from `DataArray.dims`.\n See `Dataset.sizes` and `DataArray.sizes` for consistently named\n properties.\n \"\"\"\n return Frozen(SortedKeysDict(self._dims))\n\n @property\n def sizes(self) -> Mapping[Hashable, int]:\n \"\"\"Mapping from dimension names to lengths.\n\n Cannot be modified directly, but is updated when adding new variables.\n\n This is an alias for `Dataset.dims` provided for the benefit of\n consistency with `DataArray.sizes`.\n\n See also\n --------\n DataArray.sizes\n \"\"\"\n return self.dims\n\n def load(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return this dataset.\n Unlike compute, the original dataset is modified and returned.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data\n for k, v in self.variables.items()\n if isinstance(v._data, dask_array_type)\n }\n if lazy_data:\n import dask.array as da\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = da.compute(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n # load everything else sequentially\n for k, v in self.variables.items():\n if k not in lazy_data:\n v.load()\n\n return self\n\n def __dask_graph__(self):\n graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}\n graphs = {k: v for k, v in graphs.items() if v is not None}\n if not graphs:\n return None\n else:\n try:\n from dask.highlevelgraph import HighLevelGraph\n\n return HighLevelGraph.merge(*graphs.values())\n except ImportError:\n from dask import sharedict\n\n return sharedict.merge(*graphs.values())\n\n def __dask_keys__(self):\n import dask\n\n return [\n v.__dask_keys__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ]\n\n def __dask_layers__(self):\n import dask\n\n return sum(\n [\n v.__dask_layers__()\n for v in self.variables.values()\n if dask.is_dask_collection(v)\n ],\n (),\n )\n\n @property\n def __dask_optimize__(self):\n import dask.array as da\n\n return da.Array.__dask_optimize__\n\n @property\n def __dask_scheduler__(self):\n import dask.array as da\n\n return da.Array.__dask_scheduler__\n\n def __dask_postcompute__(self):\n import dask\n\n info = [\n (True, k, v.__dask_postcompute__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._variables.items()\n ]\n args = (\n info,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._file_obj,\n )\n return self._dask_postcompute, args\n\n def __dask_postpersist__(self):\n import dask\n\n info = [\n (True, k, v.__dask_postpersist__())\n if dask.is_dask_collection(v)\n else (False, k, v)\n for k, v in self._variables.items()\n ]\n args = (\n info,\n self._coord_names,\n self._dims,\n self._attrs,\n self._indexes,\n self._encoding,\n self._file_obj,\n )\n return self._dask_postpersist, args\n\n @staticmethod\n def _dask_postcompute(results, info, *args):\n variables = {}\n results2 = list(results[::-1])\n for is_dask, k, v in info:\n if is_dask:\n func, args2 = v\n r = results2.pop()\n result = func(r, *args2)\n else:\n result = v\n variables[k] = result\n\n final = Dataset._construct_direct(variables, *args)\n return final\n\n @staticmethod\n def _dask_postpersist(dsk, info, *args):\n variables = {}\n for is_dask, k, v in info:\n if is_dask:\n func, args2 = v\n result = func(dsk, *args2)\n else:\n result = v\n variables[k] = result\n\n return Dataset._construct_direct(variables, *args)\n\n def compute(self, **kwargs) -> \"Dataset\":\n \"\"\"Manually trigger loading and/or computation of this dataset's data\n from disk or a remote source into memory and return a new dataset.\n Unlike load, the original dataset is left unaltered.\n\n Normally, it should not be necessary to call this method in user code,\n because all xarray functions should either work on deferred data or\n load data automatically. However, this method can be necessary when\n working with many file objects on disk.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.array.compute``.\n\n See Also\n --------\n dask.array.compute\n \"\"\"\n new = self.copy(deep=False)\n return new.load(**kwargs)\n\n def _persist_inplace(self, **kwargs) -> \"Dataset\":\n \"\"\"Persist all Dask arrays in memory\n \"\"\"\n # access .data to coerce everything to numpy or dask arrays\n lazy_data = {\n k: v._data\n for k, v in self.variables.items()\n if isinstance(v._data, dask_array_type)\n }\n if lazy_data:\n import dask\n\n # evaluate all the dask arrays simultaneously\n evaluated_data = dask.persist(*lazy_data.values(), **kwargs)\n\n for k, data in zip(lazy_data, evaluated_data):\n self.variables[k].data = data\n\n return self\n\n def persist(self, **kwargs) -> \"Dataset\":\n \"\"\" Trigger computation, keeping data as dask arrays\n\n This operation can be used to trigger computation on underlying dask\n arrays, similar to ``.compute()`` or ``.load()``. However this\n operation keeps the data as dask arrays. This is particularly useful\n when using the dask.distributed scheduler and you want to load a large\n amount of data into distributed memory.\n\n Parameters\n ----------\n **kwargs : dict\n Additional keyword arguments passed on to ``dask.persist``.\n\n See Also\n --------\n dask.persist\n \"\"\"\n new = self.copy(deep=False)\n return new._persist_inplace(**kwargs)\n\n @classmethod\n def _construct_direct(\n cls,\n variables,\n coord_names,\n dims=None,\n attrs=None,\n indexes=None,\n encoding=None,\n file_obj=None,\n ):\n \"\"\"Shortcut around __init__ for internal use when we want to skip\n costly validation\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n obj = object.__new__(cls)\n obj._variables = variables\n obj._coord_names = coord_names\n obj._dims = dims\n obj._indexes = indexes\n obj._attrs = attrs\n obj._file_obj = file_obj\n obj._encoding = encoding\n obj._accessors = None\n return obj\n\n __default = object()\n\n @classmethod\n def _from_vars_and_coord_names(cls, variables, coord_names, attrs=None):\n return cls._construct_direct(variables, coord_names, attrs=attrs)\n\n # TODO(shoyer): renable type checking on this signature when pytype has a\n # good way to handle defaulting arguments to a sentinel value:\n # https://github.com/python/mypy/issues/1803\n def _replace( # type: ignore\n self,\n variables: Dict[Hashable, Variable] = None,\n coord_names: Set[Hashable] = None,\n dims: Dict[Any, int] = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Optional[Dict[Any, pd.Index]] = __default,\n encoding: Optional[dict] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Fastpath constructor for internal use.\n\n Returns an object with optionally with replaced attributes.\n\n Explicitly passed arguments are *not* copied when placed on the new\n dataset. It is up to the caller to ensure that they have the right type\n and are not used elsewhere.\n \"\"\"\n if inplace:\n if variables is not None:\n self._variables = variables\n if coord_names is not None:\n self._coord_names = coord_names\n if dims is not None:\n self._dims = dims\n if attrs is not self.__default:\n self._attrs = attrs\n if indexes is not self.__default:\n self._indexes = indexes\n if encoding is not self.__default:\n self._encoding = encoding\n obj = self\n else:\n if variables is None:\n variables = self._variables.copy()\n if coord_names is None:\n coord_names = self._coord_names.copy()\n if dims is None:\n dims = self._dims.copy()\n if attrs is self.__default:\n attrs = copy.copy(self._attrs)\n if indexes is self.__default:\n indexes = copy.copy(self._indexes)\n if encoding is self.__default:\n encoding = copy.copy(self._encoding)\n obj = self._construct_direct(\n variables, coord_names, dims, attrs, indexes, encoding\n )\n return obj\n\n def _replace_with_new_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n attrs: Optional[Dict[Hashable, Any]] = __default,\n indexes: Dict[Hashable, pd.Index] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Replace variables with recalculated dimensions.\"\"\"\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes, inplace=inplace\n )\n\n def _replace_vars_and_dims( # type: ignore\n self,\n variables: Dict[Hashable, Variable],\n coord_names: set = None,\n dims: Dict[Hashable, int] = None,\n attrs: Dict[Hashable, Any] = __default,\n inplace: bool = False,\n ) -> \"Dataset\":\n \"\"\"Deprecated version of _replace_with_new_dims().\n\n Unlike _replace_with_new_dims(), this method always recalculates\n indexes from variables.\n \"\"\"\n if dims is None:\n dims = calculate_dimensions(variables)\n return self._replace(\n variables, coord_names, dims, attrs, indexes=None, inplace=inplace\n )\n\n def _overwrite_indexes(self, indexes: Mapping[Any, pd.Index]) -> \"Dataset\":\n if not indexes:\n return self\n\n variables = self._variables.copy()\n new_indexes = dict(self.indexes)\n for name, idx in indexes.items():\n variables[name] = IndexVariable(name, idx)\n new_indexes[name] = idx\n obj = self._replace(variables, indexes=new_indexes)\n\n # switch from dimension to level names, if necessary\n dim_names: Dict[Hashable, str] = {}\n for dim, idx in indexes.items():\n if not isinstance(idx, pd.MultiIndex) and idx.name != dim:\n dim_names[dim] = idx.name\n if dim_names:\n obj = obj.rename(dim_names)\n return obj\n\n def copy(self, deep: bool = False, data: Mapping = None) -> \"Dataset\":\n \"\"\"Returns a copy of this dataset.\n\n If `deep=True`, a deep copy is made of each of the component variables.\n Otherwise, a shallow copy of each of the component variable is made, so\n that the underlying memory region of the new dataset is the same as in\n the original dataset.\n\n Use `data` to create a new object with the same structure as\n original but entirely new data.\n\n Parameters\n ----------\n deep : bool, optional\n Whether each component variable is loaded into memory and copied onto\n the new object. Default is False.\n data : dict-like, optional\n Data to use in the new object. Each item in `data` must have same\n shape as corresponding data variable in original. When `data` is\n used, `deep` is ignored for the data variables and only used for\n coords.\n\n Returns\n -------\n object : Dataset\n New object with dimensions, attributes, coordinates, name, encoding,\n and optionally data copied from original.\n\n Examples\n --------\n\n Shallow copy versus deep copy\n\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])},\n coords={'x': ['one', 'two']})\n >>> ds.copy()\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds_0 = ds.copy(deep=False)\n >>> ds_0['foo'][0, 0] = 7\n >>> ds_0\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds.copy(data={'foo': np.arange(6).reshape(2, 3), 'bar': ['a', 'b']})\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) >> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Coordinates:\n * x (x) Dict[str, Hashable]:\n \"\"\"Return a mapping of all MultiIndex levels and their corresponding\n coordinate name.\n \"\"\"\n level_coords: Dict[str, Hashable] = {}\n for name, index in self.indexes.items():\n if isinstance(index, pd.MultiIndex):\n level_names = index.names\n (dim,) = self.variables[name].dims\n level_coords.update({lname: dim for lname in level_names})\n return level_coords\n\n def _copy_listed(self, names: Iterable[Hashable]) -> \"Dataset\":\n \"\"\"Create a new Dataset with the listed variables from this dataset and\n the all relevant coordinates. Skips all validation.\n \"\"\"\n variables: Dict[Hashable, Variable] = {}\n coord_names = set()\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name in names:\n try:\n variables[name] = self._variables[name]\n except KeyError:\n ref_name, var_name, var = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n variables[var_name] = var\n if ref_name in self._coord_names or ref_name in self.dims:\n coord_names.add(var_name)\n if (var_name,) == var.dims:\n indexes[var_name] = var.to_index()\n\n needed_dims: Set[Hashable] = set()\n for v in variables.values():\n needed_dims.update(v.dims)\n\n dims = {k: self.dims[k] for k in needed_dims}\n\n for k in self._coord_names:\n if set(self.variables[k].dims) <= needed_dims:\n variables[k] = self._variables[k]\n coord_names.add(k)\n if k in self.indexes:\n indexes[k] = self.indexes[k]\n\n return self._replace(variables, coord_names, dims, indexes=indexes)\n\n def _construct_dataarray(self, name: Hashable) -> \"DataArray\":\n \"\"\"Construct a DataArray by indexing this dataset\n \"\"\"\n from .dataarray import DataArray\n\n try:\n variable = self._variables[name]\n except KeyError:\n _, name, variable = _get_virtual_variable(\n self._variables, name, self._level_coords, self.dims\n )\n\n needed_dims = set(variable.dims)\n\n coords: Dict[Hashable, Variable] = {}\n for k in self.coords:\n if set(self.variables[k].dims) <= needed_dims:\n coords[k] = self.variables[k]\n\n if self._indexes is None:\n indexes = None\n else:\n indexes = {k: v for k, v in self._indexes.items() if k in coords}\n\n return DataArray(variable, coords, name=name, indexes=indexes, fastpath=True)\n\n def __copy__(self) -> \"Dataset\":\n return self.copy(deep=False)\n\n def __deepcopy__(self, memo=None) -> \"Dataset\":\n # memo does nothing but is required for compatibility with\n # copy.deepcopy\n return self.copy(deep=True)\n\n @property\n def _attr_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for attribute-style access\n \"\"\"\n return self._item_sources + [self.attrs]\n\n @property\n def _item_sources(self) -> List[Mapping[Hashable, Any]]:\n \"\"\"List of places to look-up items for key-completion\n \"\"\"\n return [\n self.data_vars,\n self.coords,\n {d: self[d] for d in self.dims},\n LevelCoordinatesSource(self),\n ]\n\n def __contains__(self, key: object) -> bool:\n \"\"\"The 'in' operator will return true or false depending on whether\n 'key' is an array in the dataset or not.\n \"\"\"\n return key in self._variables\n\n def __len__(self) -> int:\n return len(self.data_vars)\n\n def __bool__(self) -> bool:\n return bool(self.data_vars)\n\n def __iter__(self) -> Iterator[Hashable]:\n return iter(self.data_vars)\n\n def __array__(self, dtype=None):\n raise TypeError(\n \"cannot directly convert an xarray.Dataset into a \"\n \"numpy array. Instead, create an xarray.DataArray \"\n \"first, either with indexing on the Dataset or by \"\n \"invoking the `to_array()` method.\"\n )\n\n @property\n def nbytes(self) -> int:\n return sum(v.nbytes for v in self.variables.values())\n\n @property\n def loc(self) -> _LocIndexer:\n \"\"\"Attribute for location based indexing. Only supports __getitem__,\n and only when the key is a dict of the form {dim: labels}.\n \"\"\"\n return _LocIndexer(self)\n\n def __getitem__(self, key: Any) -> \"Union[DataArray, Dataset]\":\n \"\"\"Access variables or coordinates this dataset as a\n :py:class:`~xarray.DataArray`.\n\n Indexing with a list of names will return a new ``Dataset`` object.\n \"\"\"\n # TODO(shoyer): type this properly: https://github.com/python/mypy/issues/7328\n if utils.is_dict_like(key):\n return self.isel(**cast(Mapping, key))\n\n if hashable(key):\n return self._construct_dataarray(key)\n else:\n return self._copy_listed(np.asarray(key))\n\n def __setitem__(self, key: Hashable, value) -> None:\n \"\"\"Add an array to this dataset.\n\n If value is a `DataArray`, call its `select_vars()` method, rename it\n to `key` and merge the contents of the resulting dataset into this\n dataset.\n\n If value is an `Variable` object (or tuple of form\n ``(dims, data[, attrs])``), add it to this dataset as a new\n variable.\n \"\"\"\n if utils.is_dict_like(key):\n raise NotImplementedError(\n \"cannot yet use a dictionary as a key \" \"to set Dataset values\"\n )\n\n self.update({key: value})\n\n def __delitem__(self, key: Hashable) -> None:\n \"\"\"Remove a variable from this dataset.\n \"\"\"\n del self._variables[key]\n self._coord_names.discard(key)\n if key in self.indexes:\n assert self._indexes is not None\n del self._indexes[key]\n self._dims = calculate_dimensions(self._variables)\n\n # mutable objects should not be hashable\n # https://github.com/python/mypy/issues/4266\n __hash__ = None # type: ignore\n\n def _all_compat(self, other: \"Dataset\", compat_str: str) -> bool:\n \"\"\"Helper function for equals and identical\n \"\"\"\n\n # some stores (e.g., scipy) do not seem to preserve order, so don't\n # require matching order for equality\n def compat(x: Variable, y: Variable) -> bool:\n return getattr(x, compat_str)(y)\n\n return self._coord_names == other._coord_names and utils.dict_equiv(\n self._variables, other._variables, compat=compat\n )\n\n def broadcast_equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are broadcast equal if they are equal after\n broadcasting all variables against each other.\n\n For example, variables that are scalar in one dataset but non-scalar in\n the other dataset can still be broadcast equal if the the non-scalar\n variable is a constant.\n\n See Also\n --------\n Dataset.equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"broadcast_equals\")\n except (TypeError, AttributeError):\n return False\n\n def equals(self, other: \"Dataset\") -> bool:\n \"\"\"Two Datasets are equal if they have matching variables and\n coordinates, all of which are equal.\n\n Datasets can still be equal (like pandas objects) if they have NaN\n values in the same locations.\n\n This method is necessary because `v1 == v2` for ``Dataset``\n does element-wise comparisons (like numpy.ndarrays).\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.identical\n \"\"\"\n try:\n return self._all_compat(other, \"equals\")\n except (TypeError, AttributeError):\n return False\n\n def identical(self, other: \"Dataset\") -> bool:\n \"\"\"Like equals, but also checks all dataset attributes and the\n attributes on all variables and coordinates.\n\n See Also\n --------\n Dataset.broadcast_equals\n Dataset.equals\n \"\"\"\n try:\n return utils.dict_equiv(self.attrs, other.attrs) and self._all_compat(\n other, \"identical\"\n )\n except (TypeError, AttributeError):\n return False\n\n @property\n def indexes(self) -> Indexes:\n \"\"\"Mapping of pandas.Index objects used for label based indexing\n \"\"\"\n if self._indexes is None:\n self._indexes = default_indexes(self._variables, self._dims)\n return Indexes(self._indexes)\n\n @property\n def coords(self) -> DatasetCoordinates:\n \"\"\"Dictionary of xarray.DataArray objects corresponding to coordinate\n variables\n \"\"\"\n return DatasetCoordinates(self)\n\n @property\n def data_vars(self) -> DataVariables:\n \"\"\"Dictionary of DataArray objects corresponding to data variables\n \"\"\"\n return DataVariables(self)\n\n def set_coords(\n self, names: \"Union[Hashable, Iterable[Hashable]]\", inplace: bool = None\n ) -> \"Dataset\":\n \"\"\"Given names of one or more variables, set them as coordinates\n\n Parameters\n ----------\n names : hashable or iterable of hashables\n Name(s) of variables in this dataset to convert into coordinates.\n\n Returns\n -------\n Dataset\n\n See also\n --------\n Dataset.swap_dims\n \"\"\"\n # TODO: allow inserting new coordinates with this method, like\n # DataFrame.set_index?\n # nb. check in self._variables, not self.data_vars to insure that the\n # operation is idempotent\n _check_inplace(inplace)\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n obj = self.copy()\n obj._coord_names.update(names)\n return obj\n\n def reset_coords(\n self,\n names: \"Union[Hashable, Iterable[Hashable], None]\" = None,\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\":\n \"\"\"Given names of coordinates, reset them to become variables\n\n Parameters\n ----------\n names : hashable or iterable of hashables, optional\n Name(s) of non-index coordinates in this dataset to reset into\n variables. By default, all non-index coordinates are reset.\n drop : bool, optional\n If True, remove coordinates instead of converting them into\n variables.\n\n Returns\n -------\n Dataset\n \"\"\"\n _check_inplace(inplace)\n if names is None:\n names = self._coord_names - set(self.dims)\n else:\n if isinstance(names, str) or not isinstance(names, Iterable):\n names = [names]\n else:\n names = list(names)\n self._assert_all_in_dataset(names)\n bad_coords = set(names) & set(self.dims)\n if bad_coords:\n raise ValueError(\n \"cannot remove index coordinates with reset_coords: %s\" % bad_coords\n )\n obj = self.copy()\n obj._coord_names.difference_update(names)\n if drop:\n for name in names:\n del obj._variables[name]\n return obj\n\n def dump_to_store(self, store: \"AbstractDataStore\", **kwargs) -> None:\n \"\"\"Store dataset contents to a backends.*DataStore object.\n \"\"\"\n from ..backends.api import dump_to_store\n\n # TODO: rename and/or cleanup this method to make it more consistent\n # with to_netcdf()\n dump_to_store(self, store, **kwargs)\n\n def to_netcdf(\n self,\n path=None,\n mode: str = \"w\",\n format: str = None,\n group: str = None,\n engine: str = None,\n encoding: Mapping = None,\n unlimited_dims: Iterable[Hashable] = None,\n compute: bool = True,\n invalid_netcdf: bool = False,\n ) -> Union[bytes, \"Delayed\", None]:\n \"\"\"Write dataset contents to a netCDF file.\n\n Parameters\n ----------\n path : str, Path or file-like object, optional\n Path to which to save this dataset. File-like objects are only\n supported by the scipy engine. If no path is provided, this\n function returns the resulting netCDF file as bytes; in this case,\n we need to use scipy, which does not support netCDF version 4 (the\n default format becomes NETCDF3_64BIT).\n mode : {'w', 'a'}, optional\n Write ('w') or append ('a') mode. If mode='w', any existing file at\n this location will be overwritten. If mode='a', existing variables\n will be overwritten.\n format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',\n 'NETCDF3_CLASSIC'}, optional\n File format for the resulting netCDF file:\n\n * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API\n features.\n * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only\n netCDF 3 compatible API features.\n * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,\n which fully supports 2+ GB files, but is only compatible with\n clients linked against netCDF version 3.6.0 or later.\n * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not\n handle 2+ GB files very well.\n\n All formats are supported by the netCDF4-python library.\n scipy.io.netcdf only supports the last two formats.\n\n The default format is NETCDF4 if you are saving a file to disk and\n have the netCDF4-python library available. Otherwise, xarray falls\n back to using scipy to write netCDF files and defaults to the\n NETCDF3_64BIT format (scipy does not support netCDF4).\n group : str, optional\n Path to the netCDF4 group in the given file to open (only works for\n format='NETCDF4'). The group(s) will be created if necessary.\n engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional\n Engine to use when writing netCDF files. If not provided, the\n default engine is chosen based on available dependencies, with a\n preference for 'netcdf4' if writing to a file on disk.\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,\n 'zlib': True}, ...}``\n\n The `h5netcdf` engine supports both the NetCDF4-style compression\n encoding parameters ``{'zlib': True, 'complevel': 9}`` and the h5py\n ones ``{'compression': 'gzip', 'compression_opts': 9}``.\n This allows using any compression plugin installed in the HDF5\n library, e.g. LZF.\n\n unlimited_dims : iterable of hashable, optional\n Dimension(s) that should be serialized as unlimited dimensions.\n By default, no dimensions are treated as unlimited dimensions.\n Note that unlimited_dims may also be set via\n ``dataset.encoding['unlimited_dims']``.\n compute: boolean\n If true compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n invalid_netcdf: boolean\n Only valid along with engine='h5netcdf'. If True, allow writing\n hdf5 files which are valid netcdf as described in\n https://github.com/shoyer/h5netcdf. Default: False.\n \"\"\"\n if encoding is None:\n encoding = {}\n from ..backends.api import to_netcdf\n\n return to_netcdf(\n self,\n path,\n mode,\n format=format,\n group=group,\n engine=engine,\n encoding=encoding,\n unlimited_dims=unlimited_dims,\n compute=compute,\n invalid_netcdf=invalid_netcdf,\n )\n\n def to_zarr(\n self,\n store: Union[MutableMapping, str, Path] = None,\n mode: str = None,\n synchronizer=None,\n group: str = None,\n encoding: Mapping = None,\n compute: bool = True,\n consolidated: bool = False,\n append_dim: Hashable = None,\n ) -> \"ZarrStore\":\n \"\"\"Write dataset contents to a zarr group.\n\n .. note:: Experimental\n The Zarr backend is new and experimental. Please report any\n unexpected behavior via github issues.\n\n Parameters\n ----------\n store : MutableMapping, str or Path, optional\n Store or path to directory in file system.\n mode : {'w', 'w-', 'a', None}\n Persistence mode: 'w' means create (overwrite if exists);\n 'w-' means create (fail if exists);\n 'a' means append (create if does not exist).\n If ``append_dim`` is set, ``mode`` can be omitted as it is\n internally set to ``'a'``. Otherwise, ``mode`` will default to\n `w-` if not set.\n synchronizer : object, optional\n Array synchronizer\n group : str, optional\n Group path. (a.k.a. `path` in zarr terminology.)\n encoding : dict, optional\n Nested dictionary with variable names as keys and dictionaries of\n variable specific encodings as values, e.g.,\n ``{'my_variable': {'dtype': 'int16', 'scale_factor': 0.1,}, ...}``\n compute: bool, optional\n If True compute immediately, otherwise return a\n ``dask.delayed.Delayed`` object that can be computed later.\n consolidated: bool, optional\n If True, apply zarr's `consolidate_metadata` function to the store\n after writing.\n append_dim: hashable, optional\n If set, the dimension on which the data will be appended.\n\n References\n ----------\n https://zarr.readthedocs.io/\n \"\"\"\n if encoding is None:\n encoding = {}\n if (mode == \"a\") or (append_dim is not None):\n if mode is None:\n mode = \"a\"\n elif mode != \"a\":\n raise ValueError(\n \"append_dim was set along with mode='{}', either set \"\n \"mode='a' or don't set it.\".format(mode)\n )\n elif mode is None:\n mode = \"w-\"\n if mode not in [\"w\", \"w-\", \"a\"]:\n # TODO: figure out how to handle 'r+'\n raise ValueError(\n \"The only supported options for mode are 'w',\" \"'w-' and 'a'.\"\n )\n from ..backends.api import to_zarr\n\n return to_zarr(\n self,\n store=store,\n mode=mode,\n synchronizer=synchronizer,\n group=group,\n encoding=encoding,\n compute=compute,\n consolidated=consolidated,\n append_dim=append_dim,\n )\n\n def __repr__(self) -> str:\n return formatting.dataset_repr(self)\n\n def info(self, buf=None) -> None:\n \"\"\"\n Concise summary of a Dataset variables and attributes.\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n\n See Also\n --------\n pandas.DataFrame.assign\n ncdump: netCDF's ncdump\n \"\"\"\n if buf is None: # pragma: no cover\n buf = sys.stdout\n\n lines = []\n lines.append(\"xarray.Dataset {\")\n lines.append(\"dimensions:\")\n for name, size in self.dims.items():\n lines.append(\"\\t{name} = {size} ;\".format(name=name, size=size))\n lines.append(\"\\nvariables:\")\n for name, da in self.variables.items():\n dims = \", \".join(da.dims)\n lines.append(\n \"\\t{type} {name}({dims}) ;\".format(type=da.dtype, name=name, dims=dims)\n )\n for k, v in da.attrs.items():\n lines.append(\"\\t\\t{name}:{k} = {v} ;\".format(name=name, k=k, v=v))\n lines.append(\"\\n// global attributes:\")\n for k, v in self.attrs.items():\n lines.append(\"\\t:{k} = {v} ;\".format(k=k, v=v))\n lines.append(\"}\")\n\n buf.write(\"\\n\".join(lines))\n\n @property\n def chunks(self) -> Mapping[Hashable, Tuple[int, ...]]:\n \"\"\"Block dimensions for this dataset's data or None if it's not a dask\n array.\n \"\"\"\n chunks: Dict[Hashable, Tuple[int, ...]] = {}\n for v in self.variables.values():\n if v.chunks is not None:\n for dim, c in zip(v.dims, v.chunks):\n if dim in chunks and c != chunks[dim]:\n raise ValueError(\n f\"Object has inconsistent chunks along dimension {dim}. \"\n \"This can be fixed by calling unify_chunks().\"\n )\n chunks[dim] = c\n return Frozen(SortedKeysDict(chunks))\n\n def chunk(\n self,\n chunks: Union[\n None, Number, Mapping[Hashable, Union[None, Number, Tuple[Number, ...]]]\n ] = None,\n name_prefix: str = \"xarray-\",\n token: str = None,\n lock: bool = False,\n ) -> \"Dataset\":\n \"\"\"Coerce all arrays in this dataset into dask arrays with the given\n chunks.\n\n Non-dask arrays in this dataset will be converted to dask arrays. Dask\n arrays will be rechunked to the given chunk sizes.\n\n If neither chunks is not provided for one or more dimensions, chunk\n sizes along that dimension will not be updated; non-dask arrays will be\n converted into dask arrays with a single block.\n\n Parameters\n ----------\n chunks : int or mapping, optional\n Chunk sizes along each dimension, e.g., ``5`` or\n ``{'x': 5, 'y': 5}``.\n name_prefix : str, optional\n Prefix for the name of any new dask arrays.\n token : str, optional\n Token uniquely identifying this dataset.\n lock : optional\n Passed on to :py:func:`dask.array.from_array`, if the array is not\n already as dask array.\n\n Returns\n -------\n chunked : xarray.Dataset\n \"\"\"\n from dask.base import tokenize\n\n if isinstance(chunks, Number):\n chunks = dict.fromkeys(self.dims, chunks)\n\n if chunks is not None:\n bad_dims = chunks.keys() - self.dims.keys()\n if bad_dims:\n raise ValueError(\n \"some chunks keys are not dimensions on this \"\n \"object: %s\" % bad_dims\n )\n\n def selkeys(dict_, keys):\n if dict_ is None:\n return None\n return {d: dict_[d] for d in keys if d in dict_}\n\n def maybe_chunk(name, var, chunks):\n chunks = selkeys(chunks, var.dims)\n if not chunks:\n chunks = None\n if var.ndim > 0:\n token2 = tokenize(name, token if token else var._data)\n name2 = \"%s%s-%s\" % (name_prefix, name, token2)\n return var.chunk(chunks, name=name2, lock=lock)\n else:\n return var\n\n variables = {k: maybe_chunk(k, v, chunks) for k, v in self.variables.items()}\n return self._replace(variables)\n\n def _validate_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Union[int, slice, np.ndarray, Variable]]]:\n \"\"\" Here we make sure\n + indexer has a valid keys\n + indexer is in a valid data type\n + string indexers are cast to the appropriate date type if the\n associated index is a DatetimeIndex or CFTimeIndex\n \"\"\"\n from .dataarray import DataArray\n\n invalid = indexers.keys() - self.dims.keys()\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n # all indexers should be int, slice, np.ndarrays, or Variable\n for k, v in indexers.items():\n if isinstance(v, (int, slice, Variable)):\n yield k, v\n elif isinstance(v, DataArray):\n yield k, v.variable\n elif isinstance(v, tuple):\n yield k, as_variable(v)\n elif isinstance(v, Dataset):\n raise TypeError(\"cannot use a Dataset as an indexer\")\n elif isinstance(v, Sequence) and len(v) == 0:\n yield k, np.empty((0,), dtype=\"int64\")\n else:\n v = np.asarray(v)\n\n if v.dtype.kind in \"US\":\n index = self.indexes[k]\n if isinstance(index, pd.DatetimeIndex):\n v = v.astype(\"datetime64[ns]\")\n elif isinstance(index, xr.CFTimeIndex):\n v = _parse_array_of_cftime_strings(v, index.date_type)\n\n if v.ndim > 1:\n raise IndexError(\n \"Unlabeled multi-dimensional array cannot be \"\n \"used for indexing: {}\".format(k)\n )\n yield k, v\n\n def _validate_interp_indexers(\n self, indexers: Mapping[Hashable, Any]\n ) -> Iterator[Tuple[Hashable, Variable]]:\n \"\"\"Variant of _validate_indexers to be used for interpolation\n \"\"\"\n for k, v in self._validate_indexers(indexers):\n if isinstance(v, Variable):\n if v.ndim == 1:\n yield k, v.to_index_variable()\n else:\n yield k, v\n elif isinstance(v, int):\n yield k, Variable((), v)\n elif isinstance(v, np.ndarray):\n if v.ndim == 0:\n yield k, Variable((), v)\n elif v.ndim == 1:\n yield k, IndexVariable((k,), v)\n else:\n raise AssertionError() # Already tested by _validate_indexers\n else:\n raise TypeError(type(v))\n\n def _get_indexers_coords_and_indexes(self, indexers):\n \"\"\"Extract coordinates and indexes from indexers.\n\n Only coordinate with a name different from any of self.variables will\n be attached.\n \"\"\"\n from .dataarray import DataArray\n\n coords_list = []\n for k, v in indexers.items():\n if isinstance(v, DataArray):\n if v.dtype.kind == \"b\":\n if v.ndim != 1: # we only support 1-d boolean array\n raise ValueError(\n \"{:d}d-boolean array is used for indexing along \"\n \"dimension {!r}, but only 1d boolean arrays are \"\n \"supported.\".format(v.ndim, k)\n )\n # Make sure in case of boolean DataArray, its\n # coordinate also should be indexed.\n v_coords = v[v.values.nonzero()[0]].coords\n else:\n v_coords = v.coords\n coords_list.append(v_coords)\n\n # we don't need to call align() explicitly or check indexes for\n # alignment, because merge_variables already checks for exact alignment\n # between dimension coordinates\n coords, indexes = merge_coordinates_without_align(coords_list)\n assert_coordinate_consistent(self, coords)\n\n # silently drop the conflicted variables.\n attached_coords = {k: v for k, v in coords.items() if k not in self._variables}\n attached_indexes = {\n k: v for k, v in indexes.items() if k not in self._variables\n }\n return attached_coords, attached_indexes\n\n def isel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along the specified\n dimension(s).\n\n This method selects values from each array using its `__getitem__`\n method, except this method does not require knowing the order of\n each array's dimensions.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by integers, slice objects or arrays.\n indexer can be a integer, slice, array-like or DataArray.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables indexed by integers\n instead of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n array and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n See Also\n --------\n Dataset.sel\n DataArray.isel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"isel\")\n # Note: we need to preserve the original indexers variable in order to merge the\n # coords below\n indexers_list = list(self._validate_indexers(indexers))\n\n variables: Dict[Hashable, Variable] = {}\n indexes: Dict[Hashable, pd.Index] = {}\n\n for name, var in self.variables.items():\n var_indexers = {k: v for k, v in indexers_list if k in var.dims}\n if drop and name in var_indexers:\n continue # drop this variable\n\n if name in self.indexes:\n new_var, new_index = isel_variable_and_index(\n name, var, self.indexes[name], var_indexers\n )\n if new_index is not None:\n indexes[name] = new_index\n elif var_indexers:\n new_var = var.isel(indexers=var_indexers)\n else:\n new_var = var.copy(deep=False)\n\n variables[name] = new_var\n\n coord_names = self._coord_names & variables.keys()\n selected = self._replace_with_new_dims(variables, coord_names, indexes)\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(indexers)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n coord_names = self._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def sel(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n drop: bool = False,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed by tick labels\n along the specified dimension(s).\n\n In contrast to `Dataset.isel`, indexers for this method should use\n labels instead of integers.\n\n Under the hood, this method is powered by using pandas's powerful Index\n objects. This makes label based indexing essentially just as fast as\n using integer indexing.\n\n It also means this method uses pandas's (well documented) logic for\n indexing. This means you can use string shortcuts for datetime indexes\n (e.g., '2000-01' to select all values in January 2000). It also means\n that slices are treated as inclusive of both the start and stop values,\n unlike normal Python indexing.\n\n Parameters\n ----------\n indexers : dict, optional\n A dict with keys matching dimensions and values given\n by scalars, slices or arrays of tick labels. For dimensions with\n multi-index, the indexer may also be a dict-like object with keys\n matching index level names.\n If DataArrays are passed as indexers, xarray-style indexing will be\n carried out. See :ref:`indexing` for the details.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for inexact matches:\n\n * None (default): only exact matches\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n drop : bool, optional\n If ``drop=True``, drop coordinates variables in `indexers` instead\n of making them scalar.\n **indexers_kwarg : {dim: indexer, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n A new Dataset with the same contents as this dataset, except each\n variable and dimension is indexed by the appropriate indexers.\n If indexer DataArrays have coordinates that do not conflict with\n this object, then these coordinates will be attached.\n In general, each array's data will be a view of the array's data\n in this dataset, unless vectorized indexing was triggered by using\n an array indexer, in which case the data will be a copy.\n\n\n See Also\n --------\n Dataset.isel\n DataArray.sel\n \"\"\"\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"sel\")\n pos_indexers, new_indexes = remap_label_indexers(\n self, indexers=indexers, method=method, tolerance=tolerance\n )\n result = self.isel(indexers=pos_indexers, drop=drop)\n return result._overwrite_indexes(new_indexes)\n\n def head(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the first `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.tail\n Dataset.thin\n DataArray.head\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"head\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {k: slice(val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def tail(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with the last `n` values of each array\n for the specified dimension(s).\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.thin\n DataArray.tail\n \"\"\"\n if not indexers_kwargs:\n if indexers is None:\n indexers = 5\n if not isinstance(indexers, int) and not is_dict_like(indexers):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"tail\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n indexers_slices = {\n k: slice(-val, None) if val != 0 else slice(val)\n for k, val in indexers.items()\n }\n return self.isel(indexers_slices)\n\n def thin(\n self,\n indexers: Union[Mapping[Hashable, int], int] = None,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Returns a new dataset with each array indexed along every `n`th\n value for the specified dimension(s)\n\n Parameters\n ----------\n indexers : dict or int, default: 5\n A dict with keys matching dimensions and integer values `n`\n or a single integer `n` applied over all dimensions.\n One of indexers or indexers_kwargs must be provided.\n **indexers_kwargs : {dim: n, ...}, optional\n The keyword arguments form of ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n\n See Also\n --------\n Dataset.head\n Dataset.tail\n DataArray.thin\n \"\"\"\n if (\n not indexers_kwargs\n and not isinstance(indexers, int)\n and not is_dict_like(indexers)\n ):\n raise TypeError(\"indexers must be either dict-like or a single integer\")\n if isinstance(indexers, int):\n indexers = {dim: indexers for dim in self.dims}\n indexers = either_dict_or_kwargs(indexers, indexers_kwargs, \"thin\")\n for k, v in indexers.items():\n if not isinstance(v, int):\n raise TypeError(\n \"expected integer type indexer for \"\n \"dimension %r, found %r\" % (k, type(v))\n )\n elif v < 0:\n raise ValueError(\n \"expected positive integer as indexer \"\n \"for dimension %r, found %s\" % (k, v)\n )\n elif v == 0:\n raise ValueError(\"step cannot be zero\")\n indexers_slices = {k: slice(None, None, val) for k, val in indexers.items()}\n return self.isel(indexers_slices)\n\n def broadcast_like(\n self, other: Union[\"Dataset\", \"DataArray\"], exclude: Iterable[Hashable] = None\n ) -> \"Dataset\":\n \"\"\"Broadcast this DataArray against another Dataset or DataArray.\n This is equivalent to xr.broadcast(other, self)[1]\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object against which to broadcast this array.\n exclude : iterable of hashable, optional\n Dimensions that must not be broadcasted\n\n \"\"\"\n if exclude is None:\n exclude = set()\n else:\n exclude = set(exclude)\n args = align(other, self, join=\"outer\", copy=False, exclude=exclude)\n\n dims_map, common_coords = _get_broadcast_dims_map_common_coords(args, exclude)\n\n return _broadcast_helper(args[1], exclude, dims_map, common_coords)\n\n def reindex_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto the indexes of another object, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to pandas.Index objects, which provides coordinates upon\n which to index the variables in this dataset. The indexes on this\n other object need not be the same as the indexes on this\n dataset. Any mis-matched index values will be filled in with\n NaN, and any mis-matched dimension names will simply be ignored.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values from other not found in this\n dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but coordinates from the\n other object.\n\n See Also\n --------\n Dataset.reindex\n align\n \"\"\"\n indexers = alignment.reindex_like_indexers(self, other)\n return self.reindex(\n indexers=indexers,\n method=method,\n copy=copy,\n fill_value=fill_value,\n tolerance=tolerance,\n )\n\n def reindex(\n self,\n indexers: Mapping[Hashable, Any] = None,\n method: str = None,\n tolerance: Number = None,\n copy: bool = True,\n fill_value: Any = dtypes.NA,\n **indexers_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Conform this object onto a new set of indexes, filling in\n missing values with ``fill_value``. The default fill value is NaN.\n\n Parameters\n ----------\n indexers : dict. optional\n Dictionary with keys given by dimension names and values given by\n arrays of coordinates tick labels. Any mis-matched coordinate\n values will be filled in with NaN, and any mis-matched dimension\n names will simply be ignored.\n One of indexers or indexers_kwargs must be provided.\n method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional\n Method to use for filling index values in ``indexers`` not found in\n this dataset:\n\n * None (default): don't fill gaps\n * pad / ffill: propagate last valid index value forward\n * backfill / bfill: propagate next valid index value backward\n * nearest: use nearest valid index value\n tolerance : optional\n Maximum distance between original and new labels for inexact\n matches. The values of the index at the matching locations must\n satisfy the equation ``abs(index[indexer] - target) <= tolerance``.\n copy : bool, optional\n If ``copy=True``, data in the return value is always copied. If\n ``copy=False`` and reindexing is unnecessary, or can be performed\n with only slice operations, then the output may share memory with\n the input. In either case, a new xarray object is always returned.\n fill_value : scalar, optional\n Value to use for newly missing values\n **indexers_kwarg : {dim: indexer, ...}, optional\n Keyword arguments in the same form as ``indexers``.\n One of indexers or indexers_kwargs must be provided.\n\n Returns\n -------\n reindexed : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.reindex_like\n align\n pandas.Index.get_indexer\n\n Examples\n --------\n\n Create a dataset with some fictional data.\n\n >>> import xarray as xr\n >>> import pandas as pd\n >>> x = xr.Dataset(\n ... {\n ... \"temperature\": (\"station\", 20 * np.random.rand(4)),\n ... \"pressure\": (\"station\", 500 * np.random.rand(4))\n ... },\n ... coords={\"station\": [\"boston\", \"nyc\", \"seattle\", \"denver\"]})\n >>> x\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) >> x.indexes\n station: Index(['boston', 'nyc', 'seattle', 'denver'], dtype='object', name='station')\n\n Create a new index and reindex the dataset. By default values in the new index that\n do not have corresponding records in the dataset are assigned `NaN`.\n\n >>> new_index = ['boston', 'austin', 'seattle', 'lincoln']\n >>> x.reindex({'station': new_index})\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 nan 19.22 nan\n pressure (station) float64 324.1 nan 122.8 nan\n\n We can fill in the missing values by passing a value to the keyword `fill_value`.\n\n >>> x.reindex({'station': new_index}, fill_value=0)\n \n Dimensions: (station: 4)\n Coordinates:\n * station (station) object 'boston' 'austin' 'seattle' 'lincoln'\n Data variables:\n temperature (station) float64 18.84 0.0 19.22 0.0\n pressure (station) float64 324.1 0.0 122.8 0.0\n\n Because the index is not monotonically increasing or decreasing, we cannot use arguments\n to the keyword method to fill the `NaN` values.\n\n >>> x.reindex({'station': new_index}, method='nearest')\n Traceback (most recent call last):\n ...\n raise ValueError('index must be monotonic increasing or decreasing')\n ValueError: index must be monotonic increasing or decreasing\n\n To further illustrate the filling functionality in reindex, we will create a\n dataset with a monotonically increasing index (for example, a sequence of dates).\n\n >>> x2 = xr.Dataset(\n ... {\n ... \"temperature\": (\"time\", [15.57, 12.77, np.nan, 0.3081, 16.59, 15.12]),\n ... \"pressure\": (\"time\", 500 * np.random.rand(6))\n ... },\n ... coords={\"time\": pd.date_range('01/01/2019', periods=6, freq='D')})\n >>> x2\n \n Dimensions: (time: 6)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-01 2019-01-02 ... 2019-01-06\n Data variables:\n temperature (time) float64 15.57 12.77 nan 0.3081 16.59 15.12\n pressure (time) float64 103.4 122.7 452.0 444.0 399.2 486.0\n\n Suppose we decide to expand the dataset to cover a wider date range.\n\n >>> time_index2 = pd.date_range('12/29/2018', periods=10, freq='D')\n >>> x2.reindex({'time': time_index2})\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 nan nan nan 15.57 ... 0.3081 16.59 15.12 nan\n pressure (time) float64 nan nan nan 103.4 ... 444.0 399.2 486.0 nan\n\n The index entries that did not have a value in the original data frame (for example, `2018-12-29`)\n are by default filled with NaN. If desired, we can fill in the missing values using one of several options.\n\n For example, to back-propagate the last valid value to fill the `NaN` values,\n pass `bfill` as an argument to the `method` keyword.\n\n >>> x3 = x2.reindex({'time': time_index2}, method='bfill')\n >>> x3\n \n Dimensions: (time: 10)\n Coordinates:\n * time (time) datetime64[ns] 2018-12-29 2018-12-30 ... 2019-01-07\n Data variables:\n temperature (time) float64 15.57 15.57 15.57 15.57 ... 16.59 15.12 nan\n pressure (time) float64 103.4 103.4 103.4 103.4 ... 399.2 486.0 nan\n\n Please note that the `NaN` value present in the original dataset (at index value `2019-01-03`)\n will not be filled by any of the value propagation schemes.\n\n >>> x2.where(x2.temperature.isnull(), drop=True)\n \n Dimensions: (time: 1)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03\n Data variables:\n temperature (time) float64 nan\n pressure (time) float64 452.0\n >>> x3.where(x3.temperature.isnull(), drop=True)\n \n Dimensions: (time: 2)\n Coordinates:\n * time (time) datetime64[ns] 2019-01-03 2019-01-07\n Data variables:\n temperature (time) float64 nan nan\n pressure (time) float64 452.0 nan\n\n This is because filling while reindexing does not look at dataset values, but only compares\n the original and desired indexes. If you do want to fill in the `NaN` values present in the\n original dataset, use the :py:meth:`~Dataset.fillna()` method.\n\n \"\"\"\n indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs, \"reindex\")\n\n bad_dims = [d for d in indexers if d not in self.dims]\n if bad_dims:\n raise ValueError(\"invalid reindex dimensions: %s\" % bad_dims)\n\n variables, indexes = alignment.reindex_variables(\n self.variables,\n self.sizes,\n self.indexes,\n indexers,\n method,\n tolerance,\n copy=copy,\n fill_value=fill_value,\n )\n coord_names = set(self._coord_names)\n coord_names.update(indexers)\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp(\n self,\n coords: Mapping[Hashable, Any] = None,\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n **coords_kwargs: Any,\n ) -> \"Dataset\":\n \"\"\" Multidimensional interpolation of Dataset.\n\n Parameters\n ----------\n coords : dict, optional\n Mapping from dimension names to the new coordinates.\n New coordinate can be a scalar, array-like or DataArray.\n If DataArrays are passed as new coordates, their dimensions are\n used for the broadcasting.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n **coords_kwarg : {dim: coordinate, ...}, optional\n The keyword arguments form of ``coords``.\n One of coords or coords_kwargs must be provided.\n\n Returns\n -------\n interpolated: xr.Dataset\n New dataset on the new coordinates.\n\n Notes\n -----\n scipy is required.\n\n See Also\n --------\n scipy.interpolate.interp1d\n scipy.interpolate.interpn\n \"\"\"\n from . import missing\n\n if kwargs is None:\n kwargs = {}\n\n coords = either_dict_or_kwargs(coords, coords_kwargs, \"interp\")\n indexers = dict(self._validate_interp_indexers(coords))\n\n obj = self if assume_sorted else self.sortby([k for k in coords])\n\n def maybe_variable(obj, k):\n # workaround to get variable for dimension without coordinate.\n try:\n return obj._variables[k]\n except KeyError:\n return as_variable((k, range(obj.dims[k])))\n\n def _validate_interp_indexer(x, new_x):\n # In the case of datetimes, the restrictions placed on indexers\n # used with interp are stronger than those which are placed on\n # isel, so we need an additional check after _validate_indexers.\n if _contains_datetime_like_objects(\n x\n ) and not _contains_datetime_like_objects(new_x):\n raise TypeError(\n \"When interpolating over a datetime-like \"\n \"coordinate, the coordinates to \"\n \"interpolate to must be either datetime \"\n \"strings or datetimes. \"\n \"Instead got\\n{}\".format(new_x)\n )\n return x, new_x\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in obj._variables.items():\n if name in indexers:\n continue\n\n if var.dtype.kind in \"uifc\":\n var_indexers = {\n k: _validate_interp_indexer(maybe_variable(obj, k), v)\n for k, v in indexers.items()\n if k in var.dims\n }\n variables[name] = missing.interp(var, var_indexers, method, **kwargs)\n elif all(d not in indexers for d in var.dims):\n # keep unrelated object array\n variables[name] = var\n\n coord_names = obj._coord_names & variables.keys()\n indexes = {k: v for k, v in obj.indexes.items() if k not in indexers}\n selected = self._replace_with_new_dims(\n variables.copy(), coord_names, indexes=indexes\n )\n\n # attach indexer as coordinate\n variables.update(indexers)\n for k, v in indexers.items():\n assert isinstance(v, Variable)\n if v.dims == (k,):\n indexes[k] = v.to_index()\n\n # Extract coordinates from indexers\n coord_vars, new_indexes = selected._get_indexers_coords_and_indexes(coords)\n variables.update(coord_vars)\n indexes.update(new_indexes)\n\n coord_names = obj._coord_names & variables.keys() | coord_vars.keys()\n return self._replace_with_new_dims(variables, coord_names, indexes=indexes)\n\n def interp_like(\n self,\n other: Union[\"Dataset\", \"DataArray\"],\n method: str = \"linear\",\n assume_sorted: bool = False,\n kwargs: Mapping[str, Any] = None,\n ) -> \"Dataset\":\n \"\"\"Interpolate this object onto the coordinates of another object,\n filling the out of range values with NaN.\n\n Parameters\n ----------\n other : Dataset or DataArray\n Object with an 'indexes' attribute giving a mapping from dimension\n names to an 1d array-like, which provides coordinates upon\n which to index the variables in this dataset.\n method: string, optional.\n {'linear', 'nearest'} for multidimensional array,\n {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}\n for 1-dimensional array. 'linear' is used by default.\n assume_sorted: boolean, optional\n If False, values of coordinates that are interpolated over can be\n in any order and they are sorted first. If True, interpolated\n coordinates are assumed to be an array of monotonically increasing\n values.\n kwargs: dictionary, optional\n Additional keyword passed to scipy's interpolator.\n\n Returns\n -------\n interpolated: xr.Dataset\n Another dataset by interpolating this dataset's data along the\n coordinates of the other object.\n\n Notes\n -----\n scipy is required.\n If the dataset has object-type coordinates, reindex is used for these\n coordinates instead of the interpolation.\n\n See Also\n --------\n Dataset.interp\n Dataset.reindex_like\n \"\"\"\n if kwargs is None:\n kwargs = {}\n coords = alignment.reindex_like_indexers(self, other)\n\n numeric_coords: Dict[Hashable, pd.Index] = {}\n object_coords: Dict[Hashable, pd.Index] = {}\n for k, v in coords.items():\n if v.dtype.kind in \"uifcMm\":\n numeric_coords[k] = v\n else:\n object_coords[k] = v\n\n ds = self\n if object_coords:\n # We do not support interpolation along object coordinate.\n # reindex instead.\n ds = self.reindex(object_coords)\n return ds.interp(numeric_coords, method, assume_sorted, kwargs)\n\n # Helper methods for rename()\n def _rename_vars(self, name_dict, dims_dict):\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n var = v.copy(deep=False)\n var.dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)\n name = name_dict.get(k, k)\n if name in variables:\n raise ValueError(\"the new name %r conflicts\" % (name,))\n variables[name] = var\n if k in self._coord_names:\n coord_names.add(name)\n return variables, coord_names\n\n def _rename_dims(self, name_dict):\n return {name_dict.get(k, k): v for k, v in self.dims.items()}\n\n def _rename_indexes(self, name_dict, dims_set):\n if self._indexes is None:\n return None\n indexes = {}\n for k, v in self.indexes.items():\n new_name = name_dict.get(k, k)\n if new_name not in dims_set:\n continue\n if isinstance(v, pd.MultiIndex):\n new_names = [name_dict.get(k, k) for k in v.names]\n index = pd.MultiIndex(\n v.levels,\n v.labels,\n v.sortorder,\n names=new_names,\n verify_integrity=False,\n )\n else:\n index = pd.Index(v, name=new_name)\n indexes[new_name] = index\n return indexes\n\n def _rename_all(self, name_dict, dims_dict):\n variables, coord_names = self._rename_vars(name_dict, dims_dict)\n dims = self._rename_dims(dims_dict)\n indexes = self._rename_indexes(name_dict, dims.keys())\n return variables, coord_names, dims, indexes\n\n def rename(\n self,\n name_dict: Mapping[Hashable, Hashable] = None,\n inplace: bool = None,\n **names: Hashable,\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables and dimensions.\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or dimension names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables and dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename_vars\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n _check_inplace(inplace)\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename\")\n for k in name_dict.keys():\n if k not in self and k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or dimension in this dataset\" % k\n )\n\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict=name_dict\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def rename_dims(\n self, dims_dict: Mapping[Hashable, Hashable] = None, **dims: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed dimensions only.\n\n Parameters\n ----------\n dims_dict : dict-like, optional\n Dictionary whose keys are current dimension names and\n whose values are the desired names.\n **dims, optional\n Keyword form of ``dims_dict``.\n One of dims_dict or dims must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed dimensions.\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_vars\n DataArray.rename\n \"\"\"\n dims_dict = either_dict_or_kwargs(dims_dict, dims, \"rename_dims\")\n for k in dims_dict:\n if k not in self.dims:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"dimension in this dataset\" % k\n )\n\n variables, coord_names, sizes, indexes = self._rename_all(\n name_dict={}, dims_dict=dims_dict\n )\n return self._replace(variables, coord_names, dims=sizes, indexes=indexes)\n\n def rename_vars(\n self, name_dict: Mapping[Hashable, Hashable] = None, **names: Hashable\n ) -> \"Dataset\":\n \"\"\"Returns a new object with renamed variables including coordinates\n\n Parameters\n ----------\n name_dict : dict-like, optional\n Dictionary whose keys are current variable or coordinate names and\n whose values are the desired names.\n **names, optional\n Keyword form of ``name_dict``.\n One of name_dict or names must be provided.\n\n Returns\n -------\n renamed : Dataset\n Dataset with renamed variables including coordinates\n\n See Also\n --------\n Dataset.swap_dims\n Dataset.rename\n Dataset.rename_dims\n DataArray.rename\n \"\"\"\n name_dict = either_dict_or_kwargs(name_dict, names, \"rename_vars\")\n for k in name_dict:\n if k not in self:\n raise ValueError(\n \"cannot rename %r because it is not a \"\n \"variable or coordinate in this dataset\" % k\n )\n variables, coord_names, dims, indexes = self._rename_all(\n name_dict=name_dict, dims_dict={}\n )\n return self._replace(variables, coord_names, dims=dims, indexes=indexes)\n\n def swap_dims(\n self, dims_dict: Mapping[Hashable, Hashable], inplace: bool = None\n ) -> \"Dataset\":\n \"\"\"Returns a new object with swapped dimensions.\n\n Parameters\n ----------\n dims_dict : dict-like\n Dictionary whose keys are current dimension names and whose values\n are new names. Each value must already be a variable in the\n dataset.\n\n Returns\n -------\n swapped : Dataset\n Dataset with swapped dimensions.\n\n Examples\n --------\n >>> ds = xr.Dataset(data_vars={\"a\": (\"x\", [5, 7]), \"b\": (\"x\", [0.1, 2.4])},\n coords={\"x\": [\"a\", \"b\"], \"y\": (\"x\", [0, 1])})\n >>> ds\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) >> ds.swap_dims({\"x\": \"y\"})\n \n Dimensions: (y: 2)\n Coordinates:\n x (y) \"Dataset\":\n \"\"\"Return a new object with an additional axis (or axes) inserted at\n the corresponding position in the array shape. The new object is a\n view into the underlying array, not a copy.\n\n If dim is already a scalar coordinate, it will be promoted to a 1D\n coordinate consisting of a single value.\n\n Parameters\n ----------\n dim : hashable, sequence of hashable, mapping, or None\n Dimensions to include on the new variable. If provided as hashable\n or sequence of hashable, then dimensions are inserted with length\n 1. If provided as a mapping, then the keys are the new dimensions\n and the values are either integers (giving the length of the new\n dimensions) or array-like (giving the coordinates of the new\n dimensions).\n axis : integer, sequence of integers, or None\n Axis position(s) where new axis is to be inserted (position(s) on\n the result array). If a list (or tuple) of integers is passed,\n multiple axes are inserted. In this case, dim arguments should be\n same length list. If axis=None is passed, all the axes will be\n inserted to the start of the result array.\n **dim_kwargs : int or sequence/ndarray\n The keywords are arbitrary dimensions being inserted and the values\n are either the lengths of the new dims (if int is given), or their\n coordinates. Note, this is an alternative to passing a dict to the\n dim kwarg and will only be used if dim is None.\n\n Returns\n -------\n expanded : same type as caller\n This object, but with an additional dimension(s).\n \"\"\"\n if dim is None:\n pass\n elif isinstance(dim, Mapping):\n # We're later going to modify dim in place; don't tamper with\n # the input\n dim = dict(dim)\n elif isinstance(dim, int):\n raise TypeError(\n \"dim should be hashable or sequence of hashables or mapping\"\n )\n elif isinstance(dim, str) or not isinstance(dim, Sequence):\n dim = {dim: 1}\n elif isinstance(dim, Sequence):\n if len(dim) != len(set(dim)):\n raise ValueError(\"dims should not contain duplicate values.\")\n dim = {d: 1 for d in dim}\n\n dim = either_dict_or_kwargs(dim, dim_kwargs, \"expand_dims\")\n assert isinstance(dim, MutableMapping)\n\n if axis is None:\n axis = list(range(len(dim)))\n elif not isinstance(axis, Sequence):\n axis = [axis]\n\n if len(dim) != len(axis):\n raise ValueError(\"lengths of dim and axis should be identical.\")\n for d in dim:\n if d in self.dims:\n raise ValueError(\"Dimension {dim} already exists.\".format(dim=d))\n if d in self._variables and not utils.is_scalar(self._variables[d]):\n raise ValueError(\n \"{dim} already exists as coordinate or\"\n \" variable name.\".format(dim=d)\n )\n\n variables: Dict[Hashable, Variable] = {}\n coord_names = self._coord_names.copy()\n # If dim is a dict, then ensure that the values are either integers\n # or iterables.\n for k, v in dim.items():\n if hasattr(v, \"__iter__\"):\n # If the value for the new dimension is an iterable, then\n # save the coordinates to the variables dict, and set the\n # value within the dim dict to the length of the iterable\n # for later use.\n variables[k] = xr.IndexVariable((k,), v)\n coord_names.add(k)\n dim[k] = variables[k].size\n elif isinstance(v, int):\n pass # Do nothing if the dimensions value is just an int\n else:\n raise TypeError(\n \"The value of new dimension {k} must be \"\n \"an iterable or an int\".format(k=k)\n )\n\n for k, v in self._variables.items():\n if k not in dim:\n if k in coord_names: # Do not change coordinates\n variables[k] = v\n else:\n result_ndim = len(v.dims) + len(axis)\n for a in axis:\n if a < -result_ndim or result_ndim - 1 < a:\n raise IndexError(\n \"Axis {a} is out of bounds of the expanded\"\n \" dimension size {dim}.\".format(\n a=a, v=k, dim=result_ndim\n )\n )\n\n axis_pos = [a if a >= 0 else result_ndim + a for a in axis]\n if len(axis_pos) != len(set(axis_pos)):\n raise ValueError(\"axis should not contain duplicate\" \" values.\")\n # We need to sort them to make sure `axis` equals to the\n # axis positions of the result array.\n zip_axis_dim = sorted(zip(axis_pos, dim.items()))\n\n all_dims = list(zip(v.dims, v.shape))\n for d, c in zip_axis_dim:\n all_dims.insert(d, c)\n variables[k] = v.set_dims(dict(all_dims))\n else:\n # If dims includes a label of a non-dimension coordinate,\n # it will be promoted to a 1D coordinate with a single value.\n variables[k] = v.set_dims(k).to_index_variable()\n\n new_dims = self._dims.copy()\n new_dims.update(dim)\n\n return self._replace_vars_and_dims(\n variables, dims=new_dims, coord_names=coord_names\n )\n\n def set_index(\n self,\n indexes: Mapping[Hashable, Union[Hashable, Sequence[Hashable]]] = None,\n append: bool = False,\n inplace: bool = None,\n **indexes_kwargs: Union[Hashable, Sequence[Hashable]],\n ) -> \"Dataset\":\n \"\"\"Set Dataset (multi-)indexes using one or more existing coordinates\n or variables.\n\n Parameters\n ----------\n indexes : {dim: index, ...}\n Mapping from names matching dimensions and values given\n by (lists of) the names of existing coordinates or variables to set\n as new (multi-)index.\n append : bool, optional\n If True, append the supplied index(es) to the existing index(es).\n Otherwise replace the existing index(es) (default).\n **indexes_kwargs: optional\n The keyword arguments form of ``indexes``.\n One of indexes or indexes_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n Examples\n --------\n >>> arr = xr.DataArray(data=np.ones((2, 3)),\n ... dims=['x', 'y'],\n ... coords={'x':\n ... range(2), 'y':\n ... range(3), 'a': ('x', [3, 4])\n ... })\n >>> ds = xr.Dataset({'v': arr})\n >>> ds\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 0 1\n * y (y) int64 0 1 2\n a (x) int64 3 4\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n >>> ds.set_index(x='a')\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * x (x) int64 3 4\n * y (y) int64 0 1 2\n Data variables:\n v (x, y) float64 1.0 1.0 1.0 1.0 1.0 1.0\n\n See Also\n --------\n Dataset.reset_index\n Dataset.swap_dims\n \"\"\"\n _check_inplace(inplace)\n indexes = either_dict_or_kwargs(indexes, indexes_kwargs, \"set_index\")\n variables, coord_names = merge_indexes(\n indexes, self._variables, self._coord_names, append=append\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reset_index(\n self,\n dims_or_levels: Union[Hashable, Sequence[Hashable]],\n drop: bool = False,\n inplace: bool = None,\n ) -> \"Dataset\":\n \"\"\"Reset the specified index(es) or multi-index level(s).\n\n Parameters\n ----------\n dims_or_levels : str or list\n Name(s) of the dimension(s) and/or multi-index level(s) that will\n be reset.\n drop : bool, optional\n If True, remove the specified indexes and/or multi-index levels\n instead of extracting them as new coordinates (default: False).\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced coordinates.\n\n See Also\n --------\n Dataset.set_index\n \"\"\"\n _check_inplace(inplace)\n variables, coord_names = split_indexes(\n dims_or_levels,\n self._variables,\n self._coord_names,\n cast(Mapping[Hashable, Hashable], self._level_coords),\n drop=drop,\n )\n return self._replace_vars_and_dims(variables, coord_names=coord_names)\n\n def reorder_levels(\n self,\n dim_order: Mapping[Hashable, Sequence[int]] = None,\n inplace: bool = None,\n **dim_order_kwargs: Sequence[int],\n ) -> \"Dataset\":\n \"\"\"Rearrange index levels using input order.\n\n Parameters\n ----------\n dim_order : optional\n Mapping from names matching dimensions and values given\n by lists representing new level orders. Every given dimension\n must have a multi-index.\n **dim_order_kwargs: optional\n The keyword arguments form of ``dim_order``.\n One of dim_order or dim_order_kwargs must be provided.\n\n Returns\n -------\n obj : Dataset\n Another dataset, with this dataset's data but replaced\n coordinates.\n \"\"\"\n _check_inplace(inplace)\n dim_order = either_dict_or_kwargs(dim_order, dim_order_kwargs, \"reorder_levels\")\n variables = self._variables.copy()\n indexes = dict(self.indexes)\n for dim, order in dim_order.items():\n coord = self._variables[dim]\n index = self.indexes[dim]\n if not isinstance(index, pd.MultiIndex):\n raise ValueError(\"coordinate %r has no MultiIndex\" % dim)\n new_index = index.reorder_levels(order)\n variables[dim] = IndexVariable(coord.dims, new_index)\n indexes[dim] = new_index\n\n return self._replace(variables, indexes=indexes)\n\n def _stack_once(self, dims, new_dim):\n variables = {}\n for name, var in self.variables.items():\n if name not in dims:\n if any(d in var.dims for d in dims):\n add_dims = [d for d in dims if d not in var.dims]\n vdims = list(var.dims) + add_dims\n shape = [self.dims[d] for d in vdims]\n exp_var = var.set_dims(vdims, shape)\n stacked_var = exp_var.stack(**{new_dim: dims})\n variables[name] = stacked_var\n else:\n variables[name] = var.copy(deep=False)\n\n # consider dropping levels that are unused?\n levels = [self.get_index(dim) for dim in dims]\n idx = utils.multiindex_from_product_levels(levels, names=dims)\n variables[new_dim] = IndexVariable(new_dim, idx)\n\n coord_names = set(self._coord_names) - set(dims) | {new_dim}\n\n indexes = {k: v for k, v in self.indexes.items() if k not in dims}\n indexes[new_dim] = idx\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def stack(\n self,\n dimensions: Mapping[Hashable, Sequence[Hashable]] = None,\n **dimensions_kwargs: Sequence[Hashable],\n ) -> \"Dataset\":\n \"\"\"\n Stack any number of existing dimensions into a single new dimension.\n\n New dimensions will be added at the end, and the corresponding\n coordinate variables will be combined into a MultiIndex.\n\n Parameters\n ----------\n dimensions : Mapping of the form new_name=(dim1, dim2, ...)\n Names of new dimensions, and the existing dimensions that they\n replace.\n **dimensions_kwargs:\n The keyword arguments form of ``dimensions``.\n One of dimensions or dimensions_kwargs must be provided.\n\n Returns\n -------\n stacked : Dataset\n Dataset with stacked data.\n\n See also\n --------\n Dataset.unstack\n \"\"\"\n dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, \"stack\")\n result = self\n for new_dim, dims in dimensions.items():\n result = result._stack_once(dims, new_dim)\n return result\n\n def to_stacked_array(\n self,\n new_dim: Hashable,\n sample_dims: Sequence[Hashable],\n variable_dim: str = \"variable\",\n name: Hashable = None,\n ) -> \"DataArray\":\n \"\"\"Combine variables of differing dimensionality into a DataArray\n without broadcasting.\n\n This method is similar to Dataset.to_array but does not broadcast the\n variables.\n\n Parameters\n ----------\n new_dim : Hashable\n Name of the new stacked coordinate\n sample_dims : Sequence[Hashable]\n Dimensions that **will not** be stacked. Each array in the dataset\n must share these dimensions. For machine learning applications,\n these define the dimensions over which samples are drawn.\n variable_dim : str, optional\n Name of the level in the stacked coordinate which corresponds to\n the variables.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n stacked : DataArray\n DataArray with the specified dimensions and data variables\n stacked together. The stacked coordinate is named ``new_dim``\n and represented by a MultiIndex object with a level containing the\n data variable names. The name of this level is controlled using\n the ``variable_dim`` argument.\n\n See Also\n --------\n Dataset.to_array\n Dataset.stack\n DataArray.to_unstacked_dataset\n\n Examples\n --------\n >>> data = Dataset(\n ... data_vars={'a': (('x', 'y'), [[0, 1, 2], [3, 4, 5]]),\n ... 'b': ('x', [6, 7])},\n ... coords={'y': ['u', 'v', 'w']}\n ... )\n\n >>> data\n \n Dimensions: (x: 2, y: 3)\n Coordinates:\n * y (y) >> data.to_stacked_array(\"z\", sample_dims=['x'])\n \n array([[0, 1, 2, 6],\n [3, 4, 5, 7]])\n Coordinates:\n * z (z) MultiIndex\n - variable (z) object 'a' 'a' 'a' 'b'\n - y (z) object 'u' 'v' 'w' nan\n Dimensions without coordinates: x\n\n \"\"\"\n stacking_dims = tuple(dim for dim in self.dims if dim not in sample_dims)\n\n for variable in self:\n dims = self[variable].dims\n dims_include_sample_dims = set(sample_dims) <= set(dims)\n if not dims_include_sample_dims:\n raise ValueError(\n \"All variables in the dataset must contain the \"\n \"dimensions {}.\".format(dims)\n )\n\n def ensure_stackable(val):\n assign_coords = {variable_dim: val.name}\n for dim in stacking_dims:\n if dim not in val.dims:\n assign_coords[dim] = None\n\n expand_dims = set(stacking_dims).difference(set(val.dims))\n expand_dims.add(variable_dim)\n # must be list for .expand_dims\n expand_dims = list(expand_dims)\n\n return (\n val.assign_coords(**assign_coords)\n .expand_dims(expand_dims)\n .stack({new_dim: (variable_dim,) + stacking_dims})\n )\n\n # concatenate the arrays\n stackable_vars = [ensure_stackable(self[key]) for key in self.data_vars]\n data_array = xr.concat(stackable_vars, dim=new_dim)\n\n # coerce the levels of the MultiIndex to have the same type as the\n # input dimensions. This code is messy, so it might be better to just\n # input a dummy value for the singleton dimension.\n idx = data_array.indexes[new_dim]\n levels = [idx.levels[0]] + [\n level.astype(self[level.name].dtype) for level in idx.levels[1:]\n ]\n new_idx = idx.set_levels(levels)\n data_array[new_dim] = IndexVariable(new_dim, new_idx)\n\n if name is not None:\n data_array.name = name\n\n return data_array\n\n def _unstack_once(self, dim: Hashable) -> \"Dataset\":\n index = self.get_index(dim)\n index = index.remove_unused_levels()\n full_idx = pd.MultiIndex.from_product(index.levels, names=index.names)\n\n # take a shortcut in case the MultiIndex was not modified.\n if index.equals(full_idx):\n obj = self\n else:\n obj = self.reindex({dim: full_idx}, copy=False)\n\n new_dim_names = index.names\n new_dim_sizes = [lev.size for lev in index.levels]\n\n variables: Dict[Hashable, Variable] = {}\n indexes = {k: v for k, v in self.indexes.items() if k != dim}\n\n for name, var in obj.variables.items():\n if name != dim:\n if dim in var.dims:\n new_dims = dict(zip(new_dim_names, new_dim_sizes))\n variables[name] = var.unstack({dim: new_dims})\n else:\n variables[name] = var\n\n for name, lev in zip(new_dim_names, index.levels):\n variables[name] = IndexVariable(name, lev)\n indexes[name] = lev\n\n coord_names = set(self._coord_names) - {dim} | set(new_dim_names)\n\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def unstack(self, dim: Union[Hashable, Iterable[Hashable]] = None) -> \"Dataset\":\n \"\"\"\n Unstack existing dimensions corresponding to MultiIndexes into\n multiple new dimensions.\n\n New dimensions will be added at the end.\n\n Parameters\n ----------\n dim : Hashable or iterable of Hashable, optional\n Dimension(s) over which to unstack. By default unstacks all\n MultiIndexes.\n\n Returns\n -------\n unstacked : Dataset\n Dataset with unstacked data.\n\n See also\n --------\n Dataset.stack\n \"\"\"\n if dim is None:\n dims = [\n d for d in self.dims if isinstance(self.get_index(d), pd.MultiIndex)\n ]\n else:\n if isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = [dim]\n else:\n dims = list(dim)\n\n missing_dims = [d for d in dims if d not in self.dims]\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n non_multi_dims = [\n d for d in dims if not isinstance(self.get_index(d), pd.MultiIndex)\n ]\n if non_multi_dims:\n raise ValueError(\n \"cannot unstack dimensions that do not \"\n \"have a MultiIndex: %s\" % non_multi_dims\n )\n\n result = self.copy(deep=False)\n for dim in dims:\n result = result._unstack_once(dim)\n return result\n\n def update(self, other: \"CoercibleMapping\", inplace: bool = None) -> \"Dataset\":\n \"\"\"Update this dataset's variables with those from another dataset.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Variables with which to update this dataset. One of:\n\n - Dataset\n - mapping {var name: DataArray}\n - mapping {var name: Variable}\n - mapping {var name: (dimension name, array-like)}\n - mapping {var name: (tuple of dimension names, array-like)}\n\n\n Returns\n -------\n updated : Dataset\n Updated dataset.\n\n Raises\n ------\n ValueError\n If any dimensions would have inconsistent sizes in the updated\n dataset.\n \"\"\"\n _check_inplace(inplace)\n merge_result = dataset_update_method(self, other)\n return self._replace(inplace=True, **merge_result._asdict())\n\n def merge(\n self,\n other: \"CoercibleMapping\",\n inplace: bool = None,\n overwrite_vars: Union[Hashable, Iterable[Hashable]] = frozenset(),\n compat: str = \"no_conflicts\",\n join: str = \"outer\",\n fill_value: Any = dtypes.NA,\n ) -> \"Dataset\":\n \"\"\"Merge the arrays of two datasets into a single dataset.\n\n This method generally does not allow for overriding data, with the\n exception of attributes, which are ignored on the second dataset.\n Variables with the same name are checked for conflicts via the equals\n or identical methods.\n\n Parameters\n ----------\n other : Dataset or castable to Dataset\n Dataset or variables to merge with this dataset.\n overwrite_vars : Hashable or iterable of Hashable, optional\n If provided, update variables of these name(s) without checking for\n conflicts in this dataset.\n compat : {'broadcast_equals', 'equals', 'identical',\n 'no_conflicts'}, optional\n String indicating how to compare variables of the same name for\n potential conflicts:\n - 'broadcast_equals': all values must be equal when variables are\n broadcast against each other to ensure common dimensions.\n - 'equals': all values and dimensions must be the same.\n - 'identical': all values, dimensions and attributes must be the\n same.\n - 'no_conflicts': only values which are not null in both datasets\n must be equal. The returned dataset then contains the combination\n of all non-null values.\n join : {'outer', 'inner', 'left', 'right', 'exact'}, optional\n Method for joining ``self`` and ``other`` along shared dimensions:\n\n - 'outer': use the union of the indexes\n - 'inner': use the intersection of the indexes\n - 'left': use indexes from ``self``\n - 'right': use indexes from ``other``\n - 'exact': error instead of aligning non-equal indexes\n fill_value: scalar, optional\n Value to use for newly missing values\n\n Returns\n -------\n merged : Dataset\n Merged dataset.\n\n Raises\n ------\n MergeError\n If any variables conflict (see ``compat``).\n \"\"\"\n _check_inplace(inplace)\n merge_result = dataset_merge_method(\n self,\n other,\n overwrite_vars=overwrite_vars,\n compat=compat,\n join=join,\n fill_value=fill_value,\n )\n return self._replace(**merge_result._asdict())\n\n def _assert_all_in_dataset(\n self, names: Iterable[Hashable], virtual_okay: bool = False\n ) -> None:\n bad_names = set(names) - set(self._variables)\n if virtual_okay:\n bad_names -= self.virtual_variables\n if bad_names:\n raise ValueError(\n \"One or more of the specified variables \"\n \"cannot be found in this dataset\"\n )\n\n # Drop variables\n @overload # noqa: F811\n def drop(\n self, labels: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n ...\n\n # Drop index labels along dimension\n @overload # noqa: F811\n def drop(\n self, labels: Any, dim: Hashable, *, errors: str = \"raise\" # array-like\n ) -> \"Dataset\":\n ...\n\n def drop( # noqa: F811\n self, labels=None, dim=None, *, errors=\"raise\", **labels_kwargs\n ):\n \"\"\"Drop variables or index labels from this dataset.\n\n Parameters\n ----------\n labels : hashable or iterable of hashables\n Name(s) of variables or index labels to drop.\n If dim is not None, labels can be any array-like.\n dim : None or hashable, optional\n Dimension along which to drop index labels. By default (if\n ``dim is None``), drops variables rather than index labels.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the variable or index labels passed are not\n in the dataset. If 'ignore', any given labels that are in the\n dataset are dropped and no error is raised.\n **labels_kwargs : {dim: label, ...}, optional\n The keyword arguments form of ``dim`` and ``labels``.\n\n Returns\n -------\n dropped : Dataset\n\n Examples\n --------\n >>> data = np.random.randn(2, 3)\n >>> labels = ['a', 'b', 'c']\n >>> ds = xr.Dataset({'A': (['x', 'y'], data), 'y': labels})\n >>> ds.drop(y=['a', 'c'])\n \n Dimensions: (x: 2, y: 1)\n Coordinates:\n * y (y) >> ds.drop(y='b')\n \n Dimensions: (x: 2, y: 2)\n Coordinates:\n * y (y) \"Dataset\":\n if errors == \"raise\":\n self._assert_all_in_dataset(names)\n\n variables = {k: v for k, v in self._variables.items() if k not in names}\n coord_names = {k for k in self._coord_names if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k not in names}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n def drop_dims(\n self, drop_dims: Union[Hashable, Iterable[Hashable]], *, errors: str = \"raise\"\n ) -> \"Dataset\":\n \"\"\"Drop dimensions and associated variables from this dataset.\n\n Parameters\n ----------\n drop_dims : hashable or iterable of hashable\n Dimension or dimensions to drop.\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if any of the\n dimensions passed are not in the dataset. If 'ignore', any given\n labels that are in the dataset are dropped and no error is raised.\n\n Returns\n -------\n obj : Dataset\n The dataset without the given dimensions (or any variables\n containing those dimensions)\n errors: {'raise', 'ignore'}, optional\n If 'raise' (default), raises a ValueError error if\n any of the dimensions passed are not\n in the dataset. If 'ignore', any given dimensions that are in the\n dataset are dropped and no error is raised.\n \"\"\"\n if errors not in [\"raise\", \"ignore\"]:\n raise ValueError('errors must be either \"raise\" or \"ignore\"')\n\n if isinstance(drop_dims, str) or not isinstance(drop_dims, Iterable):\n drop_dims = {drop_dims}\n else:\n drop_dims = set(drop_dims)\n\n if errors == \"raise\":\n missing_dims = drop_dims - set(self.dims)\n if missing_dims:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dims\n )\n\n drop_vars = {k for k, v in self._variables.items() if set(v.dims) & drop_dims}\n return self._drop_vars(drop_vars)\n\n def transpose(self, *dims: Hashable) -> \"Dataset\":\n \"\"\"Return a new Dataset object with all array dimensions transposed.\n\n Although the order of dimensions on each array will change, the dataset\n dimensions themselves will remain in fixed (sorted) order.\n\n Parameters\n ----------\n *dims : Hashable, optional\n By default, reverse the dimensions on each array. Otherwise,\n reorder the dimensions to this order.\n\n Returns\n -------\n transposed : Dataset\n Each array in the dataset (including) coordinates will be\n transposed to the given order.\n\n Notes\n -----\n This operation returns a view of each array's data. It is\n lazy for dask-backed DataArrays but not for numpy-backed DataArrays\n -- the data will be fully loaded into memory.\n\n See Also\n --------\n numpy.transpose\n DataArray.transpose\n \"\"\"\n if dims:\n if set(dims) ^ set(self.dims):\n raise ValueError(\n \"arguments to transpose (%s) must be \"\n \"permuted dataset dimensions (%s)\" % (dims, tuple(self.dims))\n )\n ds = self.copy()\n for name, var in self._variables.items():\n var_dims = tuple(dim for dim in dims if dim in var.dims)\n ds._variables[name] = var.transpose(*var_dims)\n return ds\n\n def dropna(\n self,\n dim: Hashable,\n how: str = \"any\",\n thresh: int = None,\n subset: Iterable[Hashable] = None,\n ):\n \"\"\"Returns a new dataset with dropped labels for missing values along\n the provided dimension.\n\n Parameters\n ----------\n dim : Hashable\n Dimension along which to drop missing values. Dropping along\n multiple dimensions simultaneously is not yet supported.\n how : {'any', 'all'}, optional\n * any : if any NA values are present, drop that label\n * all : if all values are NA, drop that label\n thresh : int, default None\n If supplied, require this many non-NA values.\n subset : iterable of hashable, optional\n Which variables to check for missing values. By default, all\n variables in the dataset are checked.\n\n Returns\n -------\n Dataset\n \"\"\"\n # TODO: consider supporting multiple dimensions? Or not, given that\n # there are some ugly edge cases, e.g., pandas's dropna differs\n # depending on the order of the supplied axes.\n\n if dim not in self.dims:\n raise ValueError(\"%s must be a single dataset dimension\" % dim)\n\n if subset is None:\n subset = iter(self.data_vars)\n\n count = np.zeros(self.dims[dim], dtype=np.int64)\n size = 0\n\n for k in subset:\n array = self._variables[k]\n if dim in array.dims:\n dims = [d for d in array.dims if d != dim]\n count += np.asarray(array.count(dims)) # type: ignore\n size += np.prod([self.dims[d] for d in dims])\n\n if thresh is not None:\n mask = count >= thresh\n elif how == \"any\":\n mask = count == size\n elif how == \"all\":\n mask = count > 0\n elif how is not None:\n raise ValueError(\"invalid how option: %s\" % how)\n else:\n raise TypeError(\"must specify how or thresh\")\n\n return self.isel({dim: mask})\n\n def fillna(self, value: Any) -> \"Dataset\":\n \"\"\"Fill missing values in this object.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value : scalar, ndarray, DataArray, dict or Dataset\n Used to fill all matching missing values in this dataset's data\n variables. Scalars, ndarrays or DataArrays arguments are used to\n fill all data with aligned coordinates (for DataArrays).\n Dictionaries or datasets match data variables and then align\n coordinates if necessary.\n\n Returns\n -------\n Dataset\n\n Examples\n --------\n\n >>> import numpy as np\n >>> import xarray as xr\n >>> ds = xr.Dataset(\n ... {\n ... \"A\": (\"x\", [np.nan, 2, np.nan, 0]),\n ... \"B\": (\"x\", [3, 4, np.nan, 1]),\n ... \"C\": (\"x\", [np.nan, np.nan, np.nan, 5]),\n ... \"D\": (\"x\", [np.nan, 3, np.nan, 4])\n ... },\n ... coords={\"x\": [0, 1, 2, 3]})\n >>> ds\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 nan 2.0 nan 0.0\n B (x) float64 3.0 4.0 nan 1.0\n C (x) float64 nan nan nan 5.0\n D (x) float64 nan 3.0 nan 4.0\n\n Replace all `NaN` values with 0s.\n\n >>> ds.fillna(0)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 0.0 1.0\n C (x) float64 0.0 0.0 0.0 5.0\n D (x) float64 0.0 3.0 0.0 4.0\n\n Replace all `NaN` elements in column ‘A’, ‘B’, ‘C’, and ‘D’, with 0, 1, 2, and 3 respectively.\n\n >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}\n >>> ds.fillna(value=values)\n \n Dimensions: (x: 4)\n Coordinates:\n * x (x) int64 0 1 2 3\n Data variables:\n A (x) float64 0.0 2.0 0.0 0.0\n B (x) float64 3.0 4.0 1.0 1.0\n C (x) float64 2.0 2.0 2.0 5.0\n D (x) float64 3.0 3.0 3.0 4.0\n \"\"\"\n if utils.is_dict_like(value):\n value_keys = getattr(value, \"data_vars\", value).keys()\n if not set(value_keys) <= set(self.data_vars.keys()):\n raise ValueError(\n \"all variables in the argument to `fillna` \"\n \"must be contained in the original dataset\"\n )\n out = ops.fillna(self, value)\n return out\n\n def interpolate_na(\n self,\n dim: Hashable = None,\n method: str = \"linear\",\n limit: int = None,\n use_coordinate: Union[bool, Hashable] = True,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Interpolate values according to different methods.\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to interpolate.\n method : {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial', 'barycentric', 'krog', 'pchip',\n 'spline'}, optional\n String indicating which method to use for interpolation:\n\n - 'linear': linear interpolation (Default). Additional keyword\n arguments are passed to ``numpy.interp``\n - 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',\n 'polynomial': are passed to ``scipy.interpolate.interp1d``. If\n method=='polynomial', the ``order`` keyword argument must also be\n provided.\n - 'barycentric', 'krog', 'pchip', 'spline': use their respective\n ``scipy.interpolate`` classes.\n use_coordinate : boolean or str, default True\n Specifies which index to use as the x values in the interpolation\n formulated as `y = f(x)`. If False, values are treated as if\n eqaully-spaced along `dim`. If True, the IndexVariable `dim` is\n used. If use_coordinate is a string, it specifies the name of a\n coordinate variariable to use as the index.\n limit : int, default None\n Maximum number of consecutive NaNs to fill. Must be greater than 0\n or None for no limit.\n kwargs : any\n parameters passed verbatim to the underlying interplation function\n\n Returns\n -------\n Dataset\n\n See also\n --------\n numpy.interp\n scipy.interpolate\n \"\"\"\n from .missing import interp_na, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(\n interp_na,\n self,\n dim=dim,\n method=method,\n limit=limit,\n use_coordinate=use_coordinate,\n **kwargs,\n )\n return new\n\n def ffill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values forward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : Hashable\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to forward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import ffill, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)\n return new\n\n def bfill(self, dim: Hashable, limit: int = None) -> \"Dataset\":\n \"\"\"Fill NaN values by propogating values backward\n\n *Requires bottleneck.*\n\n Parameters\n ----------\n dim : str\n Specifies the dimension along which to propagate values when\n filling.\n limit : int, default None\n The maximum number of consecutive NaN values to backward fill. In\n other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. Must be greater\n than 0 or None for no limit.\n\n Returns\n -------\n Dataset\n \"\"\"\n from .missing import bfill, _apply_over_vars_with_dim\n\n new = _apply_over_vars_with_dim(bfill, self, dim=dim, limit=limit)\n return new\n\n def combine_first(self, other: \"Dataset\") -> \"Dataset\":\n \"\"\"Combine two Datasets, default to data_vars of self.\n\n The new coordinates follow the normal broadcasting and alignment rules\n of ``join='outer'``. Vacant cells in the expanded coordinates are\n filled with np.nan.\n\n Parameters\n ----------\n other : Dataset\n Used to fill all matching missing values in this array.\n\n Returns\n -------\n DataArray\n \"\"\"\n out = ops.fillna(self, other, join=\"outer\", dataset_join=\"outer\")\n return out\n\n def reduce(\n self,\n func: Callable,\n dim: Union[Hashable, Iterable[Hashable]] = None,\n keep_attrs: bool = None,\n keepdims: bool = False,\n numeric_only: bool = False,\n allow_lazy: bool = False,\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Reduce this dataset by applying `func` along some dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `f(x, axis=axis, **kwargs)` to return the result of reducing an\n np.ndarray over an integer valued axis.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply `func`. By default `func` is\n applied over all dimensions.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n keepdims : bool, default False\n If True, the dimensions which are reduced are left in the result\n as dimensions of size one. Coordinates that use these dimensions\n are removed.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n **kwargs : Any\n Additional keyword arguments passed on to ``func``.\n\n Returns\n -------\n reduced : Dataset\n Dataset with this object's DataArrays replaced with new DataArrays\n of summarized data and the indicated dimension(s) removed.\n \"\"\"\n if dim is None or dim is ALL_DIMS:\n dims = set(self.dims)\n elif isinstance(dim, str) or not isinstance(dim, Iterable):\n dims = {dim}\n else:\n dims = set(dim)\n\n missing_dimensions = [d for d in dims if d not in self.dims]\n if missing_dimensions:\n raise ValueError(\n \"Dataset does not contain the dimensions: %s\" % missing_dimensions\n )\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n variables: Dict[Hashable, Variable] = {}\n for name, var in self._variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if name in self.coords:\n if not reduce_dims:\n variables[name] = var\n else:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or (var.dtype == np.bool_)\n ):\n if len(reduce_dims) == 1:\n # unpack dimensions for the benefit of functions\n # like np.argmin which can't handle tuple arguments\n reduce_dims, = reduce_dims\n elif len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None # type: ignore\n variables[name] = var.reduce(\n func,\n dim=reduce_dims,\n keep_attrs=keep_attrs,\n keepdims=keepdims,\n allow_lazy=allow_lazy,\n **kwargs,\n )\n\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n attrs = self.attrs if keep_attrs else None\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n\n def apply(\n self,\n func: Callable,\n keep_attrs: bool = None,\n args: Iterable[Any] = (),\n **kwargs: Any,\n ) -> \"Dataset\":\n \"\"\"Apply a function over the data variables in this dataset.\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form `func(x, *args, **kwargs)`\n to transform each DataArray `x` in this dataset into another\n DataArray.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False, the new object will\n be returned without attributes.\n args : tuple, optional\n Positional arguments passed on to `func`.\n **kwargs : Any\n Keyword arguments passed on to `func`.\n\n Returns\n -------\n applied : Dataset\n Resulting dataset from applying ``func`` over each data variable.\n\n Examples\n --------\n >>> da = xr.DataArray(np.random.randn(2, 3))\n >>> ds = xr.Dataset({'foo': da, 'bar': ('x', [-1, 2])})\n >>> ds\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 -0.3751 -1.951 -1.945 0.2948 0.711 -0.3948\n bar (x) int64 -1 2\n >>> ds.apply(np.fabs)\n \n Dimensions: (dim_0: 2, dim_1: 3, x: 2)\n Dimensions without coordinates: dim_0, dim_1, x\n Data variables:\n foo (dim_0, dim_1) float64 0.3751 1.951 1.945 0.2948 0.711 0.3948\n bar (x) float64 1.0 2.0\n \"\"\"\n variables = {\n k: maybe_wrap_array(v, func(v, *args, **kwargs))\n for k, v in self.data_vars.items()\n }\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return type(self)(variables, attrs=attrs)\n\n def assign(\n self, variables: Mapping[Hashable, Any] = None, **variables_kwargs: Hashable\n ) -> \"Dataset\":\n \"\"\"Assign new data variables to a Dataset, returning a new object\n with all the original variables in addition to the new ones.\n\n Parameters\n ----------\n variables : mapping, value pairs\n Mapping from variables names to the new values. If the new values\n are callable, they are computed on the Dataset and assigned to new\n data variables. If the values are not callable, (e.g. a DataArray,\n scalar, or array), they are simply assigned.\n **variables_kwargs:\n The keyword arguments form of ``variables``.\n One of variables or variables_kwargs must be provided.\n\n Returns\n -------\n ds : Dataset\n A new Dataset with the new variables in addition to all the\n existing variables.\n\n Notes\n -----\n Since ``kwargs`` is a dictionary, the order of your arguments may not\n be preserved, and so the order of the new variables is not well\n defined. Assigning multiple variables within the same ``assign`` is\n possible, but you cannot reference other variables created within the\n same ``assign`` call.\n\n See Also\n --------\n pandas.DataFrame.assign\n\n Examples\n --------\n >>> import numpy as np\n >>> import xarray as xr\n >>> x = xr.Dataset(\n ... {\n ... \"temperature_c\": ((\"lat\", \"lon\"), 20 * np.random.rand(4).reshape(2, 2)),\n ... \"precipitation\": ((\"lat\", \"lon\"), np.random.rand(4).reshape(2, 2)),\n ... },\n ... coords={\"lat\": [10, 20], \"lon\": [150, 160]},\n ... )\n >>> x\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n\n Where the value is a callable, evaluated on dataset:\n\n >>> x.assign(temperature_f = lambda x: x.temperature_c * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n Alternatively, the same behavior can be achieved by directly referencing an existing dataarray:\n\n >>> x.assign(temperature_f=x[\"temperature_c\"] * 9 / 5 + 32)\n \n Dimensions: (lat: 2, lon: 2)\n Coordinates:\n * lat (lat) int64 10 20\n * lon (lon) int64 150 160\n Data variables:\n temperature_c (lat, lon) float64 18.04 12.51 17.64 9.313\n precipitation (lat, lon) float64 0.4751 0.6827 0.3697 0.03524\n temperature_f (lat, lon) float64 64.47 54.51 63.75 48.76\n\n \"\"\"\n variables = either_dict_or_kwargs(variables, variables_kwargs, \"assign\")\n data = self.copy()\n # do all calculations first...\n results = data._calc_assign_results(variables)\n # ... and then assign\n data.update(results)\n return data\n\n def to_array(self, dim=\"variable\", name=None):\n \"\"\"Convert this dataset into an xarray.DataArray\n\n The data variables of this dataset will be broadcast against each other\n and stacked along the first axis of the new array. All coordinates of\n this dataset will remain coordinates.\n\n Parameters\n ----------\n dim : str, optional\n Name of the new dimension.\n name : str, optional\n Name of the new data array.\n\n Returns\n -------\n array : xarray.DataArray\n \"\"\"\n from .dataarray import DataArray\n\n data_vars = [self.variables[k] for k in self.data_vars]\n broadcast_vars = broadcast_variables(*data_vars)\n data = duck_array_ops.stack([b.data for b in broadcast_vars], axis=0)\n\n coords = dict(self.coords)\n coords[dim] = list(self.data_vars)\n\n dims = (dim,) + broadcast_vars[0].dims\n\n return DataArray(data, coords, dims, attrs=self.attrs, name=name)\n\n def _to_dataframe(self, ordered_dims):\n columns = [k for k in self.variables if k not in self.dims]\n data = [\n self._variables[k].set_dims(ordered_dims).values.reshape(-1)\n for k in columns\n ]\n index = self.coords.to_index(ordered_dims)\n return pd.DataFrame(dict(zip(columns, data)), index=index)\n\n def to_dataframe(self):\n \"\"\"Convert this dataset into a pandas.DataFrame.\n\n Non-index variables in this dataset form the columns of the\n DataFrame. The DataFrame is be indexed by the Cartesian product of\n this dataset's indices.\n \"\"\"\n return self._to_dataframe(self.dims)\n\n def _set_sparse_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None:\n from sparse import COO\n\n idx = dataframe.index\n if isinstance(idx, pd.MultiIndex):\n try:\n codes = idx.codes\n except AttributeError:\n # deprecated since pandas 0.24\n codes = idx.labels\n coords = np.stack([np.asarray(code) for code in codes], axis=0)\n is_sorted = idx.is_lexsorted\n else:\n coords = np.arange(idx.size).reshape(1, -1)\n is_sorted = True\n\n for name, series in dataframe.items():\n # Cast to a NumPy array first, in case the Series is a pandas\n # Extension array (which doesn't have a valid NumPy dtype)\n values = np.asarray(series)\n\n # In virtually all real use cases, the sparse array will now have\n # missing values and needs a fill_value. For consistency, don't\n # special case the rare exceptions (e.g., dtype=int without a\n # MultiIndex).\n dtype, fill_value = dtypes.maybe_promote(values.dtype)\n values = np.asarray(values, dtype=dtype)\n\n data = COO(\n coords,\n values,\n shape,\n has_duplicates=False,\n sorted=is_sorted,\n fill_value=fill_value,\n )\n self[name] = (dims, data)\n\n def _set_numpy_data_from_dataframe(\n self, dataframe: pd.DataFrame, dims: tuple, shape: Tuple[int, ...]\n ) -> None:\n idx = dataframe.index\n if isinstance(idx, pd.MultiIndex):\n # expand the DataFrame to include the product of all levels\n full_idx = pd.MultiIndex.from_product(idx.levels, names=idx.names)\n dataframe = dataframe.reindex(full_idx)\n\n for name, series in dataframe.items():\n data = np.asarray(series).reshape(shape)\n self[name] = (dims, data)\n\n @classmethod\n def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> \"Dataset\":\n \"\"\"Convert a pandas.DataFrame into an xarray.Dataset\n\n Each column will be converted into an independent variable in the\n Dataset. If the dataframe's index is a MultiIndex, it will be expanded\n into a tensor product of one-dimensional indices (filling in missing\n values with NaN). This method will produce a Dataset very similar to\n that on which the 'to_dataframe' method was called, except with\n possibly redundant dimensions (since all dataset variables will have\n the same dimensionality)\n\n Parameters\n ----------\n dataframe : pandas.DataFrame\n DataFrame from which to copy data and indices.\n sparse : bool\n If true, create a sparse arrays instead of dense numpy arrays. This\n can potentially save a large amount of memory if the DataFrame has\n a MultiIndex. Requires the sparse package (sparse.pydata.org).\n\n Returns\n -------\n New Dataset.\n\n See also\n --------\n xarray.DataArray.from_series\n \"\"\"\n # TODO: Add an option to remove dimensions along which the variables\n # are constant, to enable consistent serialization to/from a dataframe,\n # even if some variables have different dimensionality.\n\n if not dataframe.columns.is_unique:\n raise ValueError(\"cannot convert DataFrame with non-unique columns\")\n\n idx = dataframe.index\n obj = cls()\n\n if isinstance(idx, pd.MultiIndex):\n dims = tuple(\n name if name is not None else \"level_%i\" % n\n for n, name in enumerate(idx.names)\n )\n for dim, lev in zip(dims, idx.levels):\n obj[dim] = (dim, lev)\n shape = tuple(lev.size for lev in idx.levels)\n else:\n index_name = idx.name if idx.name is not None else \"index\"\n dims = (index_name,)\n obj[index_name] = (dims, idx)\n shape = (idx.size,)\n\n if sparse:\n obj._set_sparse_data_from_dataframe(dataframe, dims, shape)\n else:\n obj._set_numpy_data_from_dataframe(dataframe, dims, shape)\n return obj\n\n def to_dask_dataframe(self, dim_order=None, set_index=False):\n \"\"\"\n Convert this dataset into a dask.dataframe.DataFrame.\n\n The dimensions, coordinates and data variables in this dataset form\n the columns of the DataFrame.\n\n Parameters\n ----------\n dim_order : list, optional\n Hierarchical dimension order for the resulting dataframe. All\n arrays are transposed to this order and then written out as flat\n vectors in contiguous order, so the last dimension in this list\n will be contiguous in the resulting DataFrame. This has a major\n influence on which operations are efficient on the resulting dask\n dataframe.\n\n If provided, must include all dimensions on this dataset. By\n default, dimensions are sorted alphabetically.\n set_index : bool, optional\n If set_index=True, the dask DataFrame is indexed by this dataset's\n coordinate. Since dask DataFrames to not support multi-indexes,\n set_index only works if the dataset only contains one dimension.\n\n Returns\n -------\n dask.dataframe.DataFrame\n \"\"\"\n\n import dask.array as da\n import dask.dataframe as dd\n\n if dim_order is None:\n dim_order = list(self.dims)\n elif set(dim_order) != set(self.dims):\n raise ValueError(\n \"dim_order {} does not match the set of dimensions on this \"\n \"Dataset: {}\".format(dim_order, list(self.dims))\n )\n\n ordered_dims = {k: self.dims[k] for k in dim_order}\n\n columns = list(ordered_dims)\n columns.extend(k for k in self.coords if k not in self.dims)\n columns.extend(self.data_vars)\n\n series_list = []\n for name in columns:\n try:\n var = self.variables[name]\n except KeyError:\n # dimension without a matching coordinate\n size = self.dims[name]\n data = da.arange(size, chunks=size, dtype=np.int64)\n var = Variable((name,), data)\n\n # IndexVariable objects have a dummy .chunk() method\n if isinstance(var, IndexVariable):\n var = var.to_base_variable()\n\n dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data\n series = dd.from_array(dask_array.reshape(-1), columns=[name])\n series_list.append(series)\n\n df = dd.concat(series_list, axis=1)\n\n if set_index:\n if len(dim_order) == 1:\n (dim,) = dim_order\n df = df.set_index(dim)\n else:\n # triggers an error about multi-indexes, even if only one\n # dimension is passed\n df = df.set_index(dim_order)\n\n return df\n\n def to_dict(self, data=True):\n \"\"\"\n Convert this dataset to a dictionary following xarray naming\n conventions.\n\n Converts all variables and attributes to native Python objects\n Useful for coverting to json. To avoid datetime incompatibility\n use decode_times=False kwarg in xarrray.open_dataset.\n\n Parameters\n ----------\n data : bool, optional\n Whether to include the actual data in the dictionary. When set to\n False, returns just the schema.\n\n See also\n --------\n Dataset.from_dict\n \"\"\"\n d = {\n \"coords\": {},\n \"attrs\": decode_numpy_dict_values(self.attrs),\n \"dims\": dict(self.dims),\n \"data_vars\": {},\n }\n for k in self.coords:\n d[\"coords\"].update({k: self[k].variable.to_dict(data=data)})\n for k in self.data_vars:\n d[\"data_vars\"].update({k: self[k].variable.to_dict(data=data)})\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n Convert a dictionary into an xarray.Dataset.\n\n Input dict can take several forms::\n\n d = {'t': {'dims': ('t'), 'data': t},\n 'a': {'dims': ('t'), 'data': x},\n 'b': {'dims': ('t'), 'data': y}}\n\n d = {'coords': {'t': {'dims': 't', 'data': t,\n 'attrs': {'units':'s'}}},\n 'attrs': {'title': 'air temperature'},\n 'dims': 't',\n 'data_vars': {'a': {'dims': 't', 'data': x, },\n 'b': {'dims': 't', 'data': y}}}\n\n where 't' is the name of the dimesion, 'a' and 'b' are names of data\n variables and t, x, and y are lists, numpy.arrays or pandas objects.\n\n Parameters\n ----------\n d : dict, with a minimum structure of {'var_0': {'dims': [..], \\\n 'data': [..]}, \\\n ...}\n\n Returns\n -------\n obj : xarray.Dataset\n\n See also\n --------\n Dataset.to_dict\n DataArray.from_dict\n \"\"\"\n\n if not {\"coords\", \"data_vars\"}.issubset(set(d)):\n variables = d.items()\n else:\n import itertools\n\n variables = itertools.chain(\n d.get(\"coords\", {}).items(), d.get(\"data_vars\", {}).items()\n )\n try:\n variable_dict = {\n k: (v[\"dims\"], v[\"data\"], v.get(\"attrs\")) for k, v in variables\n }\n except KeyError as e:\n raise ValueError(\n \"cannot convert dict without the key \"\n \"'{dims_data}'\".format(dims_data=str(e.args[0]))\n )\n obj = cls(variable_dict)\n\n # what if coords aren't dims?\n coords = set(d.get(\"coords\", {})) - set(d.get(\"dims\", {}))\n obj = obj.set_coords(coords)\n\n obj.attrs.update(d.get(\"attrs\", {}))\n\n return obj\n\n @staticmethod\n def _unary_op(f, keep_attrs=False):\n @functools.wraps(f)\n def func(self, *args, **kwargs):\n variables = {}\n for k, v in self._variables.items():\n if k in self._coord_names:\n variables[k] = v\n else:\n variables[k] = f(v, *args, **kwargs)\n attrs = self._attrs if keep_attrs else None\n return self._replace_with_new_dims(variables, attrs=attrs)\n\n return func\n\n @staticmethod\n def _binary_op(f, reflexive=False, join=None):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n return NotImplemented\n align_type = OPTIONS[\"arithmetic_join\"] if join is None else join\n if isinstance(other, (DataArray, Dataset)):\n self, other = align(self, other, join=align_type, copy=False)\n g = f if not reflexive else lambda x, y: f(y, x)\n ds = self._calculate_binary_op(g, other, join=align_type)\n return ds\n\n return func\n\n @staticmethod\n def _inplace_binary_op(f):\n @functools.wraps(f)\n def func(self, other):\n from .dataarray import DataArray\n\n if isinstance(other, groupby.GroupBy):\n raise TypeError(\n \"in-place operations between a Dataset and \"\n \"a grouped object are not permitted\"\n )\n # we don't actually modify arrays in-place with in-place Dataset\n # arithmetic -- this lets us automatically align things\n if isinstance(other, (DataArray, Dataset)):\n other = other.reindex_like(self, copy=False)\n g = ops.inplace_to_noninplace_op(f)\n ds = self._calculate_binary_op(g, other, inplace=True)\n self._replace_with_new_dims(\n ds._variables,\n ds._coord_names,\n attrs=ds._attrs,\n indexes=ds._indexes,\n inplace=True,\n )\n return self\n\n return func\n\n def _calculate_binary_op(self, f, other, join=\"inner\", inplace=False):\n def apply_over_both(lhs_data_vars, rhs_data_vars, lhs_vars, rhs_vars):\n if inplace and set(lhs_data_vars) != set(rhs_data_vars):\n raise ValueError(\n \"datasets must have the same data variables \"\n \"for in-place arithmetic operations: %s, %s\"\n % (list(lhs_data_vars), list(rhs_data_vars))\n )\n\n dest_vars = {}\n\n for k in lhs_data_vars:\n if k in rhs_data_vars:\n dest_vars[k] = f(lhs_vars[k], rhs_vars[k])\n elif join in [\"left\", \"outer\"]:\n dest_vars[k] = f(lhs_vars[k], np.nan)\n for k in rhs_data_vars:\n if k not in dest_vars and join in [\"right\", \"outer\"]:\n dest_vars[k] = f(rhs_vars[k], np.nan)\n return dest_vars\n\n if utils.is_dict_like(other) and not isinstance(other, Dataset):\n # can't use our shortcut of doing the binary operation with\n # Variable objects, so apply over our data vars instead.\n new_data_vars = apply_over_both(\n self.data_vars, other, self.data_vars, other\n )\n return Dataset(new_data_vars)\n\n other_coords = getattr(other, \"coords\", None)\n ds = self.coords.merge(other_coords)\n\n if isinstance(other, Dataset):\n new_vars = apply_over_both(\n self.data_vars, other.data_vars, self.variables, other.variables\n )\n else:\n other_variable = getattr(other, \"variable\", other)\n new_vars = {k: f(self.variables[k], other_variable) for k in self.data_vars}\n ds._variables.update(new_vars)\n ds._dims = calculate_dimensions(ds._variables)\n return ds\n\n def _copy_attrs_from(self, other):\n self.attrs = other.attrs\n for v in other.variables:\n if v in self.variables:\n self.variables[v].attrs = other.variables[v].attrs\n\n def diff(self, dim, n=1, label=\"upper\"):\n \"\"\"Calculate the n-th order discrete difference along given axis.\n\n Parameters\n ----------\n dim : str, optional\n Dimension over which to calculate the finite difference.\n n : int, optional\n The number of times values are differenced.\n label : str, optional\n The new coordinate in dimension ``dim`` will have the\n values of either the minuend's or subtrahend's coordinate\n for values 'upper' and 'lower', respectively. Other\n values are not supported.\n\n Returns\n -------\n difference : same type as caller\n The n-th order finite difference of this object.\n\n Examples\n --------\n >>> ds = xr.Dataset({'foo': ('x', [5, 5, 6, 6])})\n >>> ds.diff('x')\n \n Dimensions: (x: 3)\n Coordinates:\n * x (x) int64 1 2 3\n Data variables:\n foo (x) int64 0 1 0\n >>> ds.diff('x', 2)\n \n Dimensions: (x: 2)\n Coordinates:\n * x (x) int64 2 3\n Data variables:\n foo (x) int64 1 -1\n\n See Also\n --------\n Dataset.differentiate\n \"\"\"\n if n == 0:\n return self\n if n < 0:\n raise ValueError(\"order `n` must be non-negative but got {}\".format(n))\n\n # prepare slices\n kwargs_start = {dim: slice(None, -1)}\n kwargs_end = {dim: slice(1, None)}\n\n # prepare new coordinate\n if label == \"upper\":\n kwargs_new = kwargs_end\n elif label == \"lower\":\n kwargs_new = kwargs_start\n else:\n raise ValueError(\n \"The 'label' argument has to be either \" \"'upper' or 'lower'\"\n )\n\n variables = {}\n\n for name, var in self.variables.items():\n if dim in var.dims:\n if name in self.data_vars:\n variables[name] = var.isel(**kwargs_end) - var.isel(**kwargs_start)\n else:\n variables[name] = var.isel(**kwargs_new)\n else:\n variables[name] = var\n\n indexes = dict(self.indexes)\n if dim in indexes:\n indexes[dim] = indexes[dim][kwargs_new[dim]]\n\n difference = self._replace_with_new_dims(variables, indexes=indexes)\n\n if n > 1:\n return difference.diff(dim, n - 1)\n else:\n return difference\n\n def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):\n \"\"\"Shift this dataset by an offset along one or more dimensions.\n\n Only data variables are moved; coordinates stay in place. This is\n consistent with the behavior of ``shift`` in pandas.\n\n Parameters\n ----------\n shifts : Mapping with the form of {dim: offset}\n Integer offset to shift along each of the given dimensions.\n Positive offsets shift to the right; negative offsets shift to the\n left.\n fill_value: scalar, optional\n Value to use for newly missing values\n **shifts_kwargs:\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwarg must be provided.\n\n Returns\n -------\n shifted : Dataset\n Dataset with the same coordinates and attributes but shifted data\n variables.\n\n See also\n --------\n roll\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.shift(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 0 1 2 3 4\n Data variables:\n foo (x) object nan nan 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"shift\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n var_shifts = {k: v for k, v in shifts.items() if k in var.dims}\n variables[name] = var.shift(fill_value=fill_value, shifts=var_shifts)\n else:\n variables[name] = var\n\n return self._replace(variables)\n\n def roll(self, shifts=None, roll_coords=None, **shifts_kwargs):\n \"\"\"Roll this dataset by an offset along one or more dimensions.\n\n Unlike shift, roll may rotate all variables, including coordinates\n if specified. The direction of rotation is consistent with\n :py:func:`numpy.roll`.\n\n Parameters\n ----------\n\n shifts : dict, optional\n A dict with keys matching dimensions and values given\n by integers to rotate each of the given dimensions. Positive\n offsets roll to the right; negative offsets roll to the left.\n roll_coords : bool\n Indicates whether to roll the coordinates by the offset\n The current default of roll_coords (None, equivalent to True) is\n deprecated and will change to False in a future version.\n Explicitly pass roll_coords to silence the warning.\n **shifts_kwargs : {dim: offset, ...}, optional\n The keyword arguments form of ``shifts``.\n One of shifts or shifts_kwargs must be provided.\n Returns\n -------\n rolled : Dataset\n Dataset with the same coordinates and attributes but rolled\n variables.\n\n See also\n --------\n shift\n\n Examples\n --------\n\n >>> ds = xr.Dataset({'foo': ('x', list('abcde'))})\n >>> ds.roll(x=2)\n \n Dimensions: (x: 5)\n Coordinates:\n * x (x) int64 3 4 0 1 2\n Data variables:\n foo (x) object 'd' 'e' 'a' 'b' 'c'\n \"\"\"\n shifts = either_dict_or_kwargs(shifts, shifts_kwargs, \"roll\")\n invalid = [k for k in shifts if k not in self.dims]\n if invalid:\n raise ValueError(\"dimensions %r do not exist\" % invalid)\n\n if roll_coords is None:\n warnings.warn(\n \"roll_coords will be set to False in the future.\"\n \" Explicitly set roll_coords to silence warning.\",\n FutureWarning,\n stacklevel=2,\n )\n roll_coords = True\n\n unrolled_vars = () if roll_coords else self.coords\n\n variables = {}\n for k, v in self.variables.items():\n if k not in unrolled_vars:\n variables[k] = v.roll(\n **{k: s for k, s in shifts.items() if k in v.dims}\n )\n else:\n variables[k] = v\n\n if roll_coords:\n indexes = {}\n for k, v in self.indexes.items():\n (dim,) = self.variables[k].dims\n if dim in shifts:\n indexes[k] = roll_index(v, shifts[dim])\n else:\n indexes = dict(self.indexes)\n\n return self._replace(variables, indexes=indexes)\n\n def sortby(self, variables, ascending=True):\n \"\"\"\n Sort object by labels or values (along an axis).\n\n Sorts the dataset, either along specified dimensions,\n or according to values of 1-D dataarrays that share dimension\n with calling object.\n\n If the input variables are dataarrays, then the dataarrays are aligned\n (via left-join) to the calling object prior to sorting by cell values.\n NaNs are sorted to the end, following Numpy convention.\n\n If multiple sorts along the same dimension is\n given, numpy's lexsort is performed along that dimension:\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.lexsort.html\n and the FIRST key in the sequence is used as the primary sort key,\n followed by the 2nd key, etc.\n\n Parameters\n ----------\n variables: str, DataArray, or list of either\n 1D DataArray objects or name(s) of 1D variable(s) in\n coords/data_vars whose values are used to sort the dataset.\n ascending: boolean, optional\n Whether to sort by ascending or descending order.\n\n Returns\n -------\n sorted: Dataset\n A new dataset where all the specified dims are sorted by dim\n labels.\n \"\"\"\n from .dataarray import DataArray\n\n if not isinstance(variables, list):\n variables = [variables]\n else:\n variables = variables\n variables = [v if isinstance(v, DataArray) else self[v] for v in variables]\n aligned_vars = align(self, *variables, join=\"left\")\n aligned_self = aligned_vars[0]\n aligned_other_vars = aligned_vars[1:]\n vars_by_dim = defaultdict(list)\n for data_array in aligned_other_vars:\n if data_array.ndim != 1:\n raise ValueError(\"Input DataArray is not 1-D.\")\n (key,) = data_array.dims\n vars_by_dim[key].append(data_array)\n\n indices = {}\n for key, arrays in vars_by_dim.items():\n order = np.lexsort(tuple(reversed(arrays)))\n indices[key] = order if ascending else order[::-1]\n return aligned_self.isel(**indices)\n\n def quantile(\n self, q, dim=None, interpolation=\"linear\", numeric_only=False, keep_attrs=None\n ):\n \"\"\"Compute the qth quantile of the data along the specified dimension.\n\n Returns the qth quantiles(s) of the array elements for each variable\n in the Dataset.\n\n Parameters\n ----------\n q : float in range of [0,1] or array-like of floats\n Quantile to compute, which must be between 0 and 1 inclusive.\n dim : str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n numeric_only : bool, optional\n If True, only apply ``func`` to variables with a numeric dtype.\n\n Returns\n -------\n quantiles : Dataset\n If `q` is a single quantile, then the result is a scalar for each\n variable in data_vars. If multiple percentiles are given, first\n axis of the result corresponds to the quantile and a quantile\n dimension is added to the return Dataset. The other dimensions are\n the dimensions that remain after the reduction of the array.\n\n See Also\n --------\n numpy.nanpercentile, pandas.Series.quantile, DataArray.quantile\n \"\"\"\n\n if isinstance(dim, str):\n dims = {dim}\n elif dim is None or dim is ALL_DIMS:\n dims = set(self.dims)\n else:\n dims = set(dim)\n\n _assert_empty(\n [d for d in dims if d not in self.dims],\n \"Dataset does not contain the dimensions: %s\",\n )\n\n q = np.asarray(q, dtype=np.float64)\n\n variables = {}\n for name, var in self.variables.items():\n reduce_dims = [d for d in var.dims if d in dims]\n if reduce_dims or not var.dims:\n if name not in self.coords:\n if (\n not numeric_only\n or np.issubdtype(var.dtype, np.number)\n or var.dtype == np.bool_\n ):\n if len(reduce_dims) == var.ndim:\n # prefer to aggregate over axis=None rather than\n # axis=(0, 1) if they will be equivalent, because\n # the former is often more efficient\n reduce_dims = None\n variables[name] = var.quantile(\n q,\n dim=reduce_dims,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n )\n\n else:\n variables[name] = var\n\n # construct the new dataset\n coord_names = {k for k in self.coords if k in variables}\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n new = self._replace_with_new_dims(\n variables, coord_names=coord_names, attrs=attrs, indexes=indexes\n )\n if \"quantile\" in new.dims:\n new.coords[\"quantile\"] = Variable(\"quantile\", q)\n else:\n new.coords[\"quantile\"] = q\n return new\n\n def rank(self, dim, pct=False, keep_attrs=None):\n \"\"\"Ranks the data.\n\n Equal values are assigned a rank that is the average of the ranks that\n would have been otherwise assigned to all of the values within\n that set.\n Ranks begin at 1, not 0. If pct is True, computes percentage ranks.\n\n NaNs in the input array are returned as NaNs.\n\n The `bottleneck` library is required.\n\n Parameters\n ----------\n dim : str\n Dimension over which to compute rank.\n pct : bool, optional\n If True, compute percentage ranks, otherwise compute integer ranks.\n keep_attrs : bool, optional\n If True, the dataset's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n\n Returns\n -------\n ranked : Dataset\n Variables that do not depend on `dim` are dropped.\n \"\"\"\n if dim not in self.dims:\n raise ValueError(\"Dataset does not contain the dimension: %s\" % dim)\n\n variables = {}\n for name, var in self.variables.items():\n if name in self.data_vars:\n if dim in var.dims:\n variables[name] = var.rank(dim, pct=pct)\n else:\n variables[name] = var\n\n coord_names = set(self.coords)\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n attrs = self.attrs if keep_attrs else None\n return self._replace(variables, coord_names, attrs=attrs)\n\n def differentiate(self, coord, edge_order=1, datetime_unit=None):\n \"\"\" Differentiate with the second order accurate central\n differences.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n coord: str\n The coordinate to be used to compute the gradient.\n edge_order: 1 or 2. Default 1\n N-th order accurate differences at the boundaries.\n datetime_unit: None or any of {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',\n 'us', 'ns', 'ps', 'fs', 'as'}\n Unit to compute gradient. Only valid for datetime coordinate.\n\n Returns\n -------\n differentiated: Dataset\n\n See also\n --------\n numpy.gradient: corresponding numpy function\n \"\"\"\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(\"Coordinate {} does not exist.\".format(coord))\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = coord_var._to_numeric(datetime_unit=datetime_unit)\n\n variables = {}\n for k, v in self.variables.items():\n if k in self.data_vars and dim in v.dims and k not in self.coords:\n if _contains_datetime_like_objects(v):\n v = v._to_numeric(datetime_unit=datetime_unit)\n grad = duck_array_ops.gradient(\n v.data, coord_var, edge_order=edge_order, axis=v.get_axis_num(dim)\n )\n variables[k] = Variable(v.dims, grad)\n else:\n variables[k] = v\n return self._replace(variables)\n\n def integrate(self, coord, datetime_unit=None):\n \"\"\" integrate the array with the trapezoidal rule.\n\n .. note::\n This feature is limited to simple cartesian geometry, i.e. coord\n must be one dimensional.\n\n Parameters\n ----------\n dim: str, or a sequence of str\n Coordinate(s) used for the integration.\n datetime_unit\n Can be specify the unit if datetime coordinate is used. One of\n {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', 'ps', 'fs',\n 'as'}\n\n Returns\n -------\n integrated: Dataset\n\n See also\n --------\n DataArray.integrate\n numpy.trapz: corresponding numpy function\n \"\"\"\n if not isinstance(coord, (list, tuple)):\n coord = (coord,)\n result = self\n for c in coord:\n result = result._integrate_one(c, datetime_unit=datetime_unit)\n return result\n\n def _integrate_one(self, coord, datetime_unit=None):\n from .variable import Variable\n\n if coord not in self.variables and coord not in self.dims:\n raise ValueError(\"Coordinate {} does not exist.\".format(coord))\n\n coord_var = self[coord].variable\n if coord_var.ndim != 1:\n raise ValueError(\n \"Coordinate {} must be 1 dimensional but is {}\"\n \" dimensional\".format(coord, coord_var.ndim)\n )\n\n dim = coord_var.dims[0]\n if _contains_datetime_like_objects(coord_var):\n if coord_var.dtype.kind in \"mM\" and datetime_unit is None:\n datetime_unit, _ = np.datetime_data(coord_var.dtype)\n elif datetime_unit is None:\n datetime_unit = \"s\" # Default to seconds for cftime objects\n coord_var = datetime_to_numeric(coord_var, datetime_unit=datetime_unit)\n\n variables = {}\n coord_names = set()\n for k, v in self.variables.items():\n if k in self.coords:\n if dim not in v.dims:\n variables[k] = v\n coord_names.add(k)\n else:\n if k in self.data_vars and dim in v.dims:\n if _contains_datetime_like_objects(v):\n v = datetime_to_numeric(v, datetime_unit=datetime_unit)\n integ = duck_array_ops.trapz(\n v.data, coord_var.data, axis=v.get_axis_num(dim)\n )\n v_dims = list(v.dims)\n v_dims.remove(dim)\n variables[k] = Variable(v_dims, integ)\n else:\n variables[k] = v\n indexes = {k: v for k, v in self.indexes.items() if k in variables}\n return self._replace_with_new_dims(\n variables, coord_names=coord_names, indexes=indexes\n )\n\n @property\n def real(self):\n return self._unary_op(lambda x: x.real, keep_attrs=True)(self)\n\n @property\n def imag(self):\n return self._unary_op(lambda x: x.imag, keep_attrs=True)(self)\n\n @property\n def plot(self):\n \"\"\"\n Access plotting functions. Use it as a namespace to use\n xarray.plot functions as Dataset methods\n\n >>> ds.plot.scatter(...) # equivalent to xarray.plot.scatter(ds,...)\n\n \"\"\"\n return _Dataset_PlotMethods(self)\n\n def filter_by_attrs(self, **kwargs):\n \"\"\"Returns a ``Dataset`` with variables that match specific conditions.\n\n Can pass in ``key=value`` or ``key=callable``. A Dataset is returned\n containing only the variables for which all the filter tests pass.\n These tests are either ``key=value`` for which the attribute ``key``\n has the exact value ``value`` or the callable passed into\n ``key=callable`` returns True. The callable will be passed a single\n value, either the value of the attribute ``key`` or ``None`` if the\n DataArray does not have an attribute with the name ``key``.\n\n Parameters\n ----------\n **kwargs : key=value\n key : str\n Attribute name.\n value : callable or obj\n If value is a callable, it should return a boolean in the form\n of bool = func(attr) where attr is da.attrs[key].\n Otherwise, value will be compared to the each\n DataArray's attrs[key].\n\n Returns\n -------\n new : Dataset\n New dataset with variables filtered by attribute.\n\n Examples\n --------\n >>> # Create an example dataset:\n >>> import numpy as np\n >>> import pandas as pd\n >>> import xarray as xr\n >>> temp = 15 + 8 * np.random.randn(2, 2, 3)\n >>> precip = 10 * np.random.rand(2, 2, 3)\n >>> lon = [[-99.83, -99.32], [-99.79, -99.23]]\n >>> lat = [[42.25, 42.21], [42.63, 42.59]]\n >>> dims = ['x', 'y', 'time']\n >>> temp_attr = dict(standard_name='air_potential_temperature')\n >>> precip_attr = dict(standard_name='convective_precipitation_flux')\n >>> ds = xr.Dataset({\n ... 'temperature': (dims, temp, temp_attr),\n ... 'precipitation': (dims, precip, precip_attr)},\n ... coords={\n ... 'lon': (['x', 'y'], lon),\n ... 'lat': (['x', 'y'], lat),\n ... 'time': pd.date_range('2014-09-06', periods=3),\n ... 'reference_time': pd.Timestamp('2014-09-05')})\n >>> # Get variables matching a specific standard_name.\n >>> ds.filter_by_attrs(standard_name='convective_precipitation_flux')\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n * x (x) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * y (y) int64 0 1\n reference_time datetime64[ns] 2014-09-05\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n Data variables:\n precipitation (x, y, time) float64 4.178 2.307 6.041 6.046 0.06648 ...\n >>> # Get all variables that have a standard_name attribute.\n >>> standard_name = lambda v: v is not None\n >>> ds.filter_by_attrs(standard_name=standard_name)\n \n Dimensions: (time: 3, x: 2, y: 2)\n Coordinates:\n lon (x, y) float64 -99.83 -99.32 -99.79 -99.23\n lat (x, y) float64 42.25 42.21 42.63 42.59\n * x (x) int64 0 1\n * y (y) int64 0 1\n * time (time) datetime64[ns] 2014-09-06 2014-09-07 2014-09-08\n reference_time datetime64[ns] 2014-09-05\n Data variables:\n temperature (x, y, time) float64 25.86 20.82 6.954 23.13 10.25 11.68 ...\n precipitation (x, y, time) float64 5.702 0.9422 2.075 1.178 3.284 ...\n\n \"\"\"\n selection = []\n for var_name, variable in self.variables.items():\n has_value_flag = False\n for attr_name, pattern in kwargs.items():\n attr_value = variable.attrs.get(attr_name)\n if (callable(pattern) and pattern(attr_value)) or attr_value == pattern:\n has_value_flag = True\n else:\n has_value_flag = False\n break\n if has_value_flag is True:\n selection.append(var_name)\n return self[selection]\n\n def unify_chunks(self) -> \"Dataset\":\n \"\"\" Unify chunk size along all chunked dimensions of this Dataset.\n\n Returns\n -------\n\n Dataset with consistent chunk sizes for all dask-array variables\n\n See Also\n --------\n\n dask.array.core.unify_chunks\n \"\"\"\n\n try:\n self.chunks\n except ValueError: # \"inconsistent chunks\"\n pass\n else:\n # No variables with dask backend, or all chunks are already aligned\n return self.copy()\n\n # import dask is placed after the quick exit test above to allow\n # running this method if dask isn't installed and there are no chunks\n import dask.array\n\n ds = self.copy()\n\n dims_pos_map = {dim: index for index, dim in enumerate(ds.dims)}\n\n dask_array_names = []\n dask_unify_args = []\n for name, variable in ds.variables.items():\n if isinstance(variable.data, dask.array.Array):\n dims_tuple = [dims_pos_map[dim] for dim in variable.dims]\n dask_array_names.append(name)\n dask_unify_args.append(variable.data)\n dask_unify_args.append(dims_tuple)\n\n _, rechunked_arrays = dask.array.core.unify_chunks(*dask_unify_args)\n\n for name, new_array in zip(dask_array_names, rechunked_arrays):\n ds.variables[name]._data = new_array\n\n return ds\n\n def map_blocks(\n self,\n func: \"Callable[..., T_DSorDA]\",\n args: Sequence[Any] = (),\n kwargs: Mapping[str, Any] = None,\n ) -> \"T_DSorDA\":\n \"\"\"\n Apply a function to each chunk of this Dataset. This method is experimental and\n its signature may change.\n\n Parameters\n ----------\n func: callable\n User-provided function that accepts a Dataset as its first parameter. The\n function will receive a subset of this Dataset, corresponding to one chunk\n along each chunked dimension. ``func`` will be executed as\n ``func(obj_subset, *args, **kwargs)``.\n\n The function will be first run on mocked-up data, that looks like this\n Dataset but has sizes 0, to determine properties of the returned object such\n as dtype, variable names, new dimensions and new indexes (if any).\n\n This function must return either a single DataArray or a single Dataset.\n\n This function cannot change size of existing dimensions, or add new chunked\n dimensions.\n args: Sequence\n Passed verbatim to func after unpacking, after the sliced DataArray. xarray\n objects, if any, will not be split by chunks. Passing dask collections is\n not allowed.\n kwargs: Mapping\n Passed verbatim to func after unpacking. xarray objects, if any, will not be\n split by chunks. Passing dask collections is not allowed.\n\n Returns\n -------\n A single DataArray or Dataset with dask backend, reassembled from the outputs of\n the function.\n\n Notes\n -----\n This method is designed for when one needs to manipulate a whole xarray object\n within each chunk. In the more common case where one can work on numpy arrays,\n it is recommended to use apply_ufunc.\n\n If none of the variables in this Dataset is backed by dask, calling this method\n is equivalent to calling ``func(self, *args, **kwargs)``.\n\n See Also\n --------\n dask.array.map_blocks, xarray.apply_ufunc, xarray.map_blocks,\n xarray.DataArray.map_blocks\n \"\"\"\n from .parallel import map_blocks\n\n return map_blocks(func, self, args, kwargs)\n\n\nops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)\n"},{"className":"DataVariables","col":0,"comment":"null","endLoc":382,"id":3073,"nodeType":"Class","startLoc":344,"text":"class DataVariables(Mapping[Hashable, \"DataArray\"]):\n __slots__ = (\"_dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self._dataset = dataset\n\n def __iter__(self) -> Iterator[Hashable]:\n return (\n key\n for key in self._dataset._variables\n if key not in self._dataset._coord_names\n )\n\n def __len__(self) -> int:\n return len(self._dataset._variables) - len(self._dataset._coord_names)\n\n def __contains__(self, key: Hashable) -> bool:\n return key in self._dataset._variables and key not in self._dataset._coord_names\n\n def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key not in self._dataset._coord_names:\n return cast(\"DataArray\", self._dataset[key])\n raise KeyError(key)\n\n def __repr__(self) -> str:\n return formatting.data_vars_repr(self)\n\n @property\n def variables(self) -> Mapping[Hashable, Variable]:\n all_variables = self._dataset.variables\n return Frozen({k: all_variables[k] for k in self})\n\n def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._dataset._ipython_key_completions_()\n if key not in self._dataset._coord_names\n ]"},{"col":4,"comment":"null","endLoc":355,"header":"def __iter__(self) -> Iterator[Hashable]","id":3074,"name":"__iter__","nodeType":"Function","startLoc":350,"text":"def __iter__(self) -> Iterator[Hashable]:\n return (\n key\n for key in self._dataset._variables\n if key not in self._dataset._coord_names\n )"},{"col":4,"comment":"null","endLoc":358,"header":"def __len__(self) -> int","id":3075,"name":"__len__","nodeType":"Function","startLoc":357,"text":"def __len__(self) -> int:\n return len(self._dataset._variables) - len(self._dataset._coord_names)"},{"col":4,"comment":"null","endLoc":361,"header":"def __contains__(self, key: Hashable) -> bool","id":3076,"name":"__contains__","nodeType":"Function","startLoc":360,"text":"def __contains__(self, key: Hashable) -> bool:\n return key in self._dataset._variables and key not in self._dataset._coord_names"},{"col":4,"comment":"null","endLoc":366,"header":"def __getitem__(self, key: Hashable) -> \"DataArray\"","id":3077,"name":"__getitem__","nodeType":"Function","startLoc":363,"text":"def __getitem__(self, key: Hashable) -> \"DataArray\":\n if key not in self._dataset._coord_names:\n return cast(\"DataArray\", self._dataset[key])\n raise KeyError(key)"},{"col":4,"comment":"null","endLoc":369,"header":"def __repr__(self) -> str","id":3078,"name":"__repr__","nodeType":"Function","startLoc":368,"text":"def __repr__(self) -> str:\n return formatting.data_vars_repr(self)"},{"col":0,"comment":"Apply a ndarray level function over Variable and/or ndarray objects.\n ","endLoc":648,"header":"def apply_variable_ufunc(\n func,\n *args,\n signature,\n exclude_dims=frozenset(),\n dask=\"forbidden\",\n output_dtypes=None,\n output_sizes=None,\n keep_attrs=False\n)","id":3079,"name":"apply_variable_ufunc","nodeType":"Function","startLoc":543,"text":"def apply_variable_ufunc(\n func,\n *args,\n signature,\n exclude_dims=frozenset(),\n dask=\"forbidden\",\n output_dtypes=None,\n output_sizes=None,\n keep_attrs=False\n):\n \"\"\"Apply a ndarray level function over Variable and/or ndarray objects.\n \"\"\"\n from .variable import Variable, as_compatible_data\n\n dim_sizes = unified_dim_sizes(\n (a for a in args if hasattr(a, \"dims\")), exclude_dims=exclude_dims\n )\n broadcast_dims = tuple(\n dim for dim in dim_sizes if dim not in signature.all_core_dims\n )\n output_dims = [broadcast_dims + out for out in signature.output_core_dims]\n\n input_data = [\n broadcast_compat_data(arg, broadcast_dims, core_dims)\n if isinstance(arg, Variable)\n else arg\n for arg, core_dims in zip(args, signature.input_core_dims)\n ]\n\n if any(isinstance(array, dask_array_type) for array in input_data):\n if dask == \"forbidden\":\n raise ValueError(\n \"apply_ufunc encountered a dask array on an \"\n \"argument, but handling for dask arrays has not \"\n \"been enabled. Either set the ``dask`` argument \"\n \"or load your data into memory first with \"\n \"``.load()`` or ``.compute()``\"\n )\n elif dask == \"parallelized\":\n input_dims = [broadcast_dims + dims for dims in signature.input_core_dims]\n numpy_func = func\n\n def func(*arrays):\n return _apply_blockwise(\n numpy_func,\n arrays,\n input_dims,\n output_dims,\n signature,\n output_dtypes,\n output_sizes,\n )\n\n elif dask == \"allowed\":\n pass\n else:\n raise ValueError(\n \"unknown setting for dask array handling in \"\n \"apply_ufunc: {}\".format(dask)\n )\n result_data = func(*input_data)\n\n if signature.num_outputs == 1:\n result_data = (result_data,)\n elif (\n not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs\n ):\n raise ValueError(\n \"applied function does not have the number of \"\n \"outputs specified in the ufunc signature. \"\n \"Result is not a tuple of {} elements: {!r}\".format(\n signature.num_outputs, result_data\n )\n )\n\n output = []\n for dims, data in zip(output_dims, result_data):\n data = as_compatible_data(data)\n if data.ndim != len(dims):\n raise ValueError(\n \"applied function returned data with unexpected \"\n \"number of dimensions: {} vs {}, for dimensions {}\".format(\n data.ndim, len(dims), dims\n )\n )\n\n var = Variable(dims, data, fastpath=True)\n for dim, new_size in var.sizes.items():\n if dim in dim_sizes and new_size != dim_sizes[dim]:\n raise ValueError(\n \"size of dimension {!r} on inputs was unexpectedly \"\n \"changed by applied function from {} to {}. Only \"\n \"dimensions specified in ``exclude_dims`` with \"\n \"xarray.apply_ufunc are allowed to change size.\".format(\n dim, dim_sizes[dim], new_size\n )\n )\n\n if keep_attrs and isinstance(args[0], Variable):\n var.attrs.update(args[0].attrs)\n output.append(var)\n\n if signature.num_outputs == 1:\n return output[0]\n else:\n return tuple(output)"},{"col":4,"comment":"null","endLoc":374,"header":"@property\n def variables(self) -> Mapping[Hashable, Variable]","id":3080,"name":"variables","nodeType":"Function","startLoc":371,"text":"@property\n def variables(self) -> Mapping[Hashable, Variable]:\n all_variables = self._dataset.variables\n return Frozen({k: all_variables[k] for k in self})"},{"col":4,"comment":"null","endLoc":205,"header":"def __setitem__(self, key, value) -> None","id":3081,"name":"__setitem__","nodeType":"Function","startLoc":198,"text":"def __setitem__(self, key, value) -> None:\n if not utils.is_dict_like(key):\n # expand the indexer so we can handle Ellipsis\n labels = indexing.expanded_indexer(key, self.data_array.ndim)\n key = dict(zip(self.data_array.dims, labels))\n\n pos_indexers, _ = remap_label_indexers(self.data_array, key)\n self.data_array[pos_indexers] = value"},{"col":4,"comment":"Provide method for the key-autocompletions in IPython. ","endLoc":382,"header":"def _ipython_key_completions_(self)","id":3082,"name":"_ipython_key_completions_","nodeType":"Function","startLoc":376,"text":"def _ipython_key_completions_(self):\n \"\"\"Provide method for the key-autocompletions in IPython. \"\"\"\n return [\n key\n for key in self._dataset._ipython_key_completions_()\n if key not in self._dataset._coord_names\n ]"},{"col":4,"comment":"Roll date backward to nearest start of quarter","endLoc":422,"header":"def rollback(self, date)","id":3083,"name":"rollback","nodeType":"Function","startLoc":417,"text":"def rollback(self, date):\n \"\"\"Roll date backward to nearest start of quarter\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - QuarterBegin(month=self.month)"},{"col":0,"comment":"null","endLoc":724,"header":"def _apply_blockwise(\n func, args, input_dims, output_dims, signature, output_dtypes, output_sizes=None\n)","id":3084,"name":"_apply_blockwise","nodeType":"Function","startLoc":651,"text":"def _apply_blockwise(\n func, args, input_dims, output_dims, signature, output_dtypes, output_sizes=None\n):\n import dask.array\n\n if signature.num_outputs > 1:\n raise NotImplementedError(\n \"multiple outputs from apply_ufunc not yet \"\n \"supported with dask='parallelized'\"\n )\n\n if output_dtypes is None:\n raise ValueError(\n \"output dtypes (output_dtypes) must be supplied to \"\n \"apply_func when using dask='parallelized'\"\n )\n if not isinstance(output_dtypes, list):\n raise TypeError(\n \"output_dtypes must be a list of objects coercible to \"\n \"numpy dtypes, got {}\".format(output_dtypes)\n )\n if len(output_dtypes) != signature.num_outputs:\n raise ValueError(\n \"apply_ufunc arguments output_dtypes and \"\n \"output_core_dims must have the same length: {} vs {}\".format(\n len(output_dtypes), signature.num_outputs\n )\n )\n (dtype,) = output_dtypes\n\n if output_sizes is None:\n output_sizes = {}\n\n new_dims = signature.all_output_core_dims - signature.all_input_core_dims\n if any(dim not in output_sizes for dim in new_dims):\n raise ValueError(\n \"when using dask='parallelized' with apply_ufunc, \"\n \"output core dimensions not found on inputs must \"\n \"have explicitly set sizes with ``output_sizes``: {}\".format(new_dims)\n )\n\n for n, (data, core_dims) in enumerate(zip(args, signature.input_core_dims)):\n if isinstance(data, dask_array_type):\n # core dimensions cannot span multiple chunks\n for axis, dim in enumerate(core_dims, start=-len(core_dims)):\n if len(data.chunks[axis]) != 1:\n raise ValueError(\n \"dimension {!r} on {}th function argument to \"\n \"apply_ufunc with dask='parallelized' consists of \"\n \"multiple chunks, but is also a core dimension. To \"\n \"fix, rechunk into a single dask array chunk along \"\n \"this dimension, i.e., ``.chunk({})``, but beware \"\n \"that this may significantly increase memory usage.\".format(\n dim, n, {dim: -1}\n )\n )\n\n (out_ind,) = output_dims\n\n blockwise_args = []\n for arg, dims in zip(args, input_dims):\n # skip leading dimensions that are implicitly added by broadcasting\n ndim = getattr(arg, \"ndim\", 0)\n trimmed_dims = dims[-ndim:] if ndim else ()\n blockwise_args.extend([arg, trimmed_dims])\n\n return dask.array.blockwise(\n func,\n out_ind,\n *blockwise_args,\n dtype=dtype,\n concatenate=True,\n new_axes=output_sizes\n )"},{"attributeType":"null","col":4,"comment":"null","endLoc":406,"id":3085,"name":"_default_month","nodeType":"Attribute","startLoc":406,"text":"_default_month"},{"attributeType":"null","col":4,"comment":"null","endLoc":407,"id":3086,"name":"_freq","nodeType":"Attribute","startLoc":407,"text":"_freq"},{"attributeType":"null","col":4,"comment":"null","endLoc":408,"id":3087,"name":"_day_option","nodeType":"Attribute","startLoc":408,"text":"_day_option"},{"className":"YearBegin","col":0,"comment":"null","endLoc":507,"id":3088,"nodeType":"Class","startLoc":485,"text":"class YearBegin(YearOffset):\n _freq = \"AS\"\n _day_option = \"start\"\n _default_month = 1\n\n def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == 1 and date.month == self.month\n\n def rollforward(self, date):\n \"\"\"Roll date forward to nearest start of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + YearBegin(month=self.month)\n\n def rollback(self, date):\n \"\"\"Roll date backward to nearest start of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - YearBegin(month=self.month)"},{"col":4,"comment":"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.","endLoc":493,"header":"def onOffset(self, date)","id":3089,"name":"onOffset","nodeType":"Function","startLoc":490,"text":"def onOffset(self, date):\n \"\"\"Check if the given date is in the set of possible dates created\n using a length-one version of this offset class.\"\"\"\n return date.day == 1 and date.month == self.month"},{"col":4,"comment":"Roll date forward to nearest start of year","endLoc":500,"header":"def rollforward(self, date)","id":3090,"name":"rollforward","nodeType":"Function","startLoc":495,"text":"def rollforward(self, date):\n \"\"\"Roll date forward to nearest start of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date + YearBegin(month=self.month)"},{"attributeType":"null","col":4,"comment":"null","endLoc":186,"id":3091,"name":"__slots__","nodeType":"Attribute","startLoc":186,"text":"__slots__"},{"attributeType":"null","col":4,"comment":"null","endLoc":345,"id":3092,"name":"__slots__","nodeType":"Attribute","startLoc":345,"text":"__slots__"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":348,"id":3093,"name":"_dataset","nodeType":"Attribute","startLoc":348,"text":"self._dataset"},{"className":"_LocIndexer","col":0,"comment":"null","endLoc":394,"id":3094,"nodeType":"Class","startLoc":385,"text":"class _LocIndexer:\n __slots__ = (\"dataset\",)\n\n def __init__(self, dataset: \"Dataset\"):\n self.dataset = dataset\n\n def __getitem__(self, key: Mapping[Hashable, Any]) -> \"Dataset\":\n if not utils.is_dict_like(key):\n raise TypeError(\"can only lookup dictionaries from Dataset.loc\")\n return self.dataset.sel(key)"},{"col":4,"comment":"null","endLoc":394,"header":"def __getitem__(self, key: Mapping[Hashable, Any]) -> \"Dataset\"","id":3095,"name":"__getitem__","nodeType":"Function","startLoc":391,"text":"def __getitem__(self, key: Mapping[Hashable, Any]) -> \"Dataset\":\n if not utils.is_dict_like(key):\n raise TypeError(\"can only lookup dictionaries from Dataset.loc\")\n return self.dataset.sel(key)"},{"attributeType":"DataArray","col":8,"comment":"null","endLoc":189,"id":3096,"name":"data_array","nodeType":"Attribute","startLoc":189,"text":"self.data_array"},{"attributeType":"null","col":4,"comment":"null","endLoc":66,"id":3097,"name":"T_DSorDA","nodeType":"Attribute","startLoc":66,"text":"T_DSorDA"},{"attributeType":"null","col":4,"comment":"null","endLoc":386,"id":3098,"name":"__slots__","nodeType":"Attribute","startLoc":386,"text":"__slots__"},{"attributeType":"Dataset","col":8,"comment":"null","endLoc":389,"id":3099,"name":"dataset","nodeType":"Attribute","startLoc":389,"text":"self.dataset"},{"col":0,"comment":"Cast the given object to a Dataset.\n\n Handles Datasets, DataArrays and dictionaries of variables. A new Dataset\n object is only created if the provided object is not already one.\n ","endLoc":341,"header":"def as_dataset(obj: Any) -> \"Dataset\"","id":3100,"name":"as_dataset","nodeType":"Function","startLoc":331,"text":"def as_dataset(obj: Any) -> \"Dataset\":\n \"\"\"Cast the given object to a Dataset.\n\n Handles Datasets, DataArrays and dictionaries of variables. A new Dataset\n object is only created if the provided object is not already one.\n \"\"\"\n if hasattr(obj, \"to_dataset\"):\n obj = obj.to_dataset()\n if not isinstance(obj, Dataset):\n obj = Dataset(obj)\n return obj"},{"attributeType":"ReprObject","col":0,"comment":"null","endLoc":210,"id":3101,"name":"_THIS_ARRAY","nodeType":"Attribute","startLoc":210,"text":"_THIS_ARRAY"},{"col":4,"comment":"Roll date backward to nearest start of year","endLoc":507,"header":"def rollback(self, date)","id":3102,"name":"rollback","nodeType":"Function","startLoc":502,"text":"def rollback(self, date):\n \"\"\"Roll date backward to nearest start of year\"\"\"\n if self.onOffset(date):\n return date\n else:\n return date - YearBegin(month=self.month)"},{"col":0,"comment":"","endLoc":1,"header":"dataarray.py#","id":3104,"name":"","nodeType":"Function","startLoc":1,"text":"if TYPE_CHECKING:\n T_DSorDA = TypeVar(\"T_DSorDA\", \"DataArray\", Dataset)\n\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n try:\n from cdms2 import Variable as cdms2_Variable\n except ImportError:\n cdms2_Variable = None\n try:\n from iris.cube import Cube as iris_Cube\n except ImportError:\n iris_Cube = None\n\n_THIS_ARRAY = ReprObject(\"\")\n\nops.inject_all_ops_and_reduce_methods(DataArray, priority=60)"},{"attributeType":"null","col":4,"comment":"null","endLoc":486,"id":3105,"name":"_freq","nodeType":"Attribute","startLoc":486,"text":"_freq"},{"attributeType":"null","col":4,"comment":"null","endLoc":487,"id":3106,"name":"_day_option","nodeType":"Attribute","startLoc":487,"text":"_day_option"},{"attributeType":"null","col":4,"comment":"null","endLoc":488,"id":3107,"name":"_default_month","nodeType":"Attribute","startLoc":488,"text":"_default_month"},{"className":"Hour","col":0,"comment":"null","endLoc":552,"id":3108,"nodeType":"Class","startLoc":545,"text":"class Hour(BaseCFTimeOffset):\n _freq = \"H\"\n\n def as_timedelta(self):\n return timedelta(hours=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()"},{"col":4,"comment":"null","endLoc":549,"header":"def as_timedelta(self)","id":3109,"name":"as_timedelta","nodeType":"Function","startLoc":548,"text":"def as_timedelta(self):\n return timedelta(hours=self.n)"},{"col":4,"comment":"null","endLoc":552,"header":"def __apply__(self, other)","id":3110,"name":"__apply__","nodeType":"Function","startLoc":551,"text":"def __apply__(self, other):\n return other + self.as_timedelta()"},{"fileName":"testing.py","filePath":"xarray","id":3111,"nodeType":"File","text":"\"\"\"Testing functions exposed to the user API\"\"\"\nfrom typing import Hashable, Set, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom xarray.core import duck_array_ops, formatting\nfrom xarray.core.dataarray import DataArray\nfrom xarray.core.dataset import Dataset\nfrom xarray.core.indexes import default_indexes\nfrom xarray.core.variable import IndexVariable, Variable\n\n\ndef _decode_string_data(data):\n if data.dtype.kind == \"S\":\n return np.core.defchararray.decode(data, \"utf-8\", \"replace\")\n return data\n\n\ndef _data_allclose_or_equiv(arr1, arr2, rtol=1e-05, atol=1e-08, decode_bytes=True):\n if any(arr.dtype.kind == \"S\" for arr in [arr1, arr2]) and decode_bytes:\n arr1 = _decode_string_data(arr1)\n arr2 = _decode_string_data(arr2)\n exact_dtypes = [\"M\", \"m\", \"O\", \"S\", \"U\"]\n if any(arr.dtype.kind in exact_dtypes for arr in [arr1, arr2]):\n return duck_array_ops.array_equiv(arr1, arr2)\n else:\n return duck_array_ops.allclose_or_equiv(arr1, arr2, rtol=rtol, atol=atol)\n\n\ndef assert_equal(a, b):\n \"\"\"Like :py:func:`numpy.testing.assert_array_equal`, but for xarray\n objects.\n\n Raises an AssertionError if two objects are not equal. This will match\n data values, dimensions and coordinates, but not names or attributes\n (except for Dataset objects for which the variable names must match).\n Arrays with NaN in the same location are considered equal.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n\n See also\n --------\n assert_identical, assert_allclose, Dataset.equals, DataArray.equals,\n numpy.testing.assert_array_equal\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n if isinstance(a, (Variable, DataArray)):\n assert a.equals(b), formatting.diff_array_repr(a, b, \"equals\")\n elif isinstance(a, Dataset):\n assert a.equals(b), formatting.diff_dataset_repr(a, b, \"equals\")\n else:\n raise TypeError(\"{} not supported by assertion comparison\".format(type(a)))\n\n\ndef assert_identical(a, b):\n \"\"\"Like :py:func:`xarray.testing.assert_equal`, but also matches the\n objects' names and attributes.\n\n Raises an AssertionError if two objects are not identical.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n\n See also\n --------\n assert_equal, assert_allclose, Dataset.equals, DataArray.equals\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n if isinstance(a, Variable):\n assert a.identical(b), formatting.diff_array_repr(a, b, \"identical\")\n elif isinstance(a, DataArray):\n assert a.name == b.name\n assert a.identical(b), formatting.diff_array_repr(a, b, \"identical\")\n elif isinstance(a, (Dataset, Variable)):\n assert a.identical(b), formatting.diff_dataset_repr(a, b, \"identical\")\n else:\n raise TypeError(\"{} not supported by assertion comparison\".format(type(a)))\n\n\ndef assert_allclose(a, b, rtol=1e-05, atol=1e-08, decode_bytes=True):\n \"\"\"Like :py:func:`numpy.testing.assert_allclose`, but for xarray objects.\n\n Raises an AssertionError if two objects are not equal up to desired\n tolerance.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n rtol : float, optional\n Relative tolerance.\n atol : float, optional\n Absolute tolerance.\n decode_bytes : bool, optional\n Whether byte dtypes should be decoded to strings as UTF-8 or not.\n This is useful for testing serialization methods on Python 3 that\n return saved strings as bytes.\n\n See also\n --------\n assert_identical, assert_equal, numpy.testing.assert_allclose\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n kwargs = dict(rtol=rtol, atol=atol, decode_bytes=decode_bytes)\n if isinstance(a, Variable):\n assert a.dims == b.dims\n allclose = _data_allclose_or_equiv(a.values, b.values, **kwargs)\n assert allclose, \"{}\\n{}\".format(a.values, b.values)\n elif isinstance(a, DataArray):\n assert_allclose(a.variable, b.variable, **kwargs)\n assert set(a.coords) == set(b.coords)\n for v in a.coords.variables:\n # can't recurse with this function as coord is sometimes a\n # DataArray, so call into _data_allclose_or_equiv directly\n allclose = _data_allclose_or_equiv(\n a.coords[v].values, b.coords[v].values, **kwargs\n )\n assert allclose, \"{}\\n{}\".format(a.coords[v].values, b.coords[v].values)\n elif isinstance(a, Dataset):\n assert set(a.data_vars) == set(b.data_vars)\n assert set(a.coords) == set(b.coords)\n for k in list(a.variables) + list(a.coords):\n assert_allclose(a[k], b[k], **kwargs)\n\n else:\n raise TypeError(\"{} not supported by assertion comparison\".format(type(a)))\n\n\ndef assert_chunks_equal(a, b):\n \"\"\"\n Assert that chunksizes along chunked dimensions are equal.\n\n Parameters\n ----------\n a : xarray.Dataset or xarray.DataArray\n The first object to compare.\n b : xarray.Dataset or xarray.DataArray\n The second object to compare.\n \"\"\"\n\n if isinstance(a, DataArray) != isinstance(b, DataArray):\n raise TypeError(\"a and b have mismatched types\")\n\n left = a.unify_chunks()\n right = b.unify_chunks()\n assert left.chunks == right.chunks\n\n\ndef _assert_indexes_invariants_checks(indexes, possible_coord_variables, dims):\n assert isinstance(indexes, dict), indexes\n assert all(isinstance(v, pd.Index) for v in indexes.values()), {\n k: type(v) for k, v in indexes.items()\n }\n\n index_vars = {\n k for k, v in possible_coord_variables.items() if isinstance(v, IndexVariable)\n }\n assert indexes.keys() <= index_vars, (set(indexes), index_vars)\n\n # Note: when we support non-default indexes, these checks should be opt-in\n # only!\n defaults = default_indexes(possible_coord_variables, dims)\n assert indexes.keys() == defaults.keys(), (set(indexes), set(defaults))\n assert all(v.equals(defaults[k]) for k, v in indexes.items()), (indexes, defaults)\n\n\ndef _assert_variable_invariants(var: Variable, name: Hashable = None):\n if name is None:\n name_or_empty: tuple = ()\n else:\n name_or_empty = (name,)\n assert isinstance(var._dims, tuple), name_or_empty + (var._dims,)\n assert len(var._dims) == len(var._data.shape), name_or_empty + (\n var._dims,\n var._data.shape,\n )\n assert isinstance(var._encoding, (type(None), dict)), name_or_empty + (\n var._encoding,\n )\n assert isinstance(var._attrs, (type(None), dict)), name_or_empty + (var._attrs,)\n\n\ndef _assert_dataarray_invariants(da: DataArray):\n assert isinstance(da._variable, Variable), da._variable\n _assert_variable_invariants(da._variable)\n\n assert isinstance(da._coords, dict), da._coords\n assert all(isinstance(v, Variable) for v in da._coords.values()), da._coords\n assert all(set(v.dims) <= set(da.dims) for v in da._coords.values()), (\n da.dims,\n {k: v.dims for k, v in da._coords.items()},\n )\n assert all(\n isinstance(v, IndexVariable) for (k, v) in da._coords.items() if v.dims == (k,)\n ), {k: type(v) for k, v in da._coords.items()}\n for k, v in da._coords.items():\n _assert_variable_invariants(v, k)\n\n if da._indexes is not None:\n _assert_indexes_invariants_checks(da._indexes, da._coords, da.dims)\n\n\ndef _assert_dataset_invariants(ds: Dataset):\n assert isinstance(ds._variables, dict), type(ds._variables)\n assert all(isinstance(v, Variable) for v in ds._variables.values()), ds._variables\n for k, v in ds._variables.items():\n _assert_variable_invariants(v, k)\n\n assert isinstance(ds._coord_names, set), ds._coord_names\n assert ds._coord_names <= ds._variables.keys(), (\n ds._coord_names,\n set(ds._variables),\n )\n\n assert type(ds._dims) is dict, ds._dims\n assert all(isinstance(v, int) for v in ds._dims.values()), ds._dims\n var_dims: Set[Hashable] = set()\n for v in ds._variables.values():\n var_dims.update(v.dims)\n assert ds._dims.keys() == var_dims, (set(ds._dims), var_dims)\n assert all(\n ds._dims[k] == v.sizes[k] for v in ds._variables.values() for k in v.sizes\n ), (ds._dims, {k: v.sizes for k, v in ds._variables.items()})\n assert all(\n isinstance(v, IndexVariable)\n for (k, v) in ds._variables.items()\n if v.dims == (k,)\n ), {k: type(v) for k, v in ds._variables.items() if v.dims == (k,)}\n assert all(v.dims == (k,) for (k, v) in ds._variables.items() if k in ds._dims), {\n k: v.dims for k, v in ds._variables.items() if k in ds._dims\n }\n\n if ds._indexes is not None:\n _assert_indexes_invariants_checks(ds._indexes, ds._variables, ds._dims)\n\n assert isinstance(ds._encoding, (type(None), dict))\n assert isinstance(ds._attrs, (type(None), dict))\n\n\ndef _assert_internal_invariants(xarray_obj: Union[DataArray, Dataset, Variable],):\n \"\"\"Validate that an xarray object satisfies its own internal invariants.\n\n This exists for the benefit of xarray's own test suite, but may be useful\n in external projects if they (ill-advisedly) create objects using xarray's\n private APIs.\n \"\"\"\n if isinstance(xarray_obj, Variable):\n _assert_variable_invariants(xarray_obj)\n elif isinstance(xarray_obj, DataArray):\n _assert_dataarray_invariants(xarray_obj)\n elif isinstance(xarray_obj, Dataset):\n _assert_dataset_invariants(xarray_obj)\n else:\n raise TypeError(\n \"{} is not a supported type for xarray invariant checks\".format(\n type(xarray_obj)\n )\n )\n"},{"attributeType":"null","col":4,"comment":"null","endLoc":546,"id":3112,"name":"_freq","nodeType":"Attribute","startLoc":546,"text":"_freq"},{"className":"Minute","col":0,"comment":"null","endLoc":562,"id":3113,"nodeType":"Class","startLoc":555,"text":"class Minute(BaseCFTimeOffset):\n _freq = \"T\"\n\n def as_timedelta(self):\n return timedelta(minutes=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()"},{"col":4,"comment":"null","endLoc":559,"header":"def as_timedelta(self)","id":3114,"name":"as_timedelta","nodeType":"Function","startLoc":558,"text":"def as_timedelta(self):\n return timedelta(minutes=self.n)"},{"col":4,"comment":"null","endLoc":562,"header":"def __apply__(self, other)","id":3115,"name":"__apply__","nodeType":"Function","startLoc":561,"text":"def __apply__(self, other):\n return other + self.as_timedelta()"},{"attributeType":"null","col":4,"comment":"null","endLoc":89,"id":3116,"name":"T_DSorDA","nodeType":"Attribute","startLoc":89,"text":"T_DSorDA"},{"attributeType":"null","col":4,"comment":"null","endLoc":556,"id":3117,"name":"_freq","nodeType":"Attribute","startLoc":556,"text":"_freq"},{"attributeType":"null","col":0,"comment":"null","endLoc":98,"id":3118,"name":"_DATETIMEINDEX_COMPONENTS","nodeType":"Attribute","startLoc":98,"text":"_DATETIMEINDEX_COMPONENTS"},{"className":"Second","col":0,"comment":"null","endLoc":572,"id":3119,"nodeType":"Class","startLoc":565,"text":"class Second(BaseCFTimeOffset):\n _freq = \"S\"\n\n def as_timedelta(self):\n return timedelta(seconds=self.n)\n\n def __apply__(self, other):\n return other + self.as_timedelta()"},{"col":0,"comment":"","endLoc":1,"header":"dataset.py#","id":3120,"name":"","nodeType":"Function","startLoc":1,"text":"if TYPE_CHECKING:\n from ..backends import AbstractDataStore, ZarrStore\n from .dataarray import DataArray\n from .merge import CoercibleMapping\n\n T_DSorDA = TypeVar(\"T_DSorDA\", DataArray, \"Dataset\")\n\n try:\n from dask.delayed import Delayed\n except ImportError:\n Delayed = None\n\n_DATETIMEINDEX_COMPONENTS = [\n \"year\",\n \"month\",\n \"day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"microsecond\",\n \"nanosecond\",\n \"date\",\n \"time\",\n \"dayofyear\",\n \"weekofyear\",\n \"dayofweek\",\n \"quarter\",\n]\n\nops.inject_all_ops_and_reduce_methods(Dataset, array_only=False)"},{"col":4,"comment":"null","endLoc":569,"header":"def as_timedelta(self)","id":3121,"name":"as_timedelta","nodeType":"Function","startLoc":568,"text":"def as_timedelta(self):\n return timedelta(seconds=self.n)"},{"col":4,"comment":"null","endLoc":572,"header":"def __apply__(self, other)","id":3122,"name":"__apply__","nodeType":"Function","startLoc":571,"text":"def __apply__(self, other):\n return other + self.as_timedelta()"},{"col":0,"comment":"null","endLoc":17,"header":"def _decode_string_data(data)","id":3123,"name":"_decode_string_data","nodeType":"Function","startLoc":14,"text":"def _decode_string_data(data):\n if data.dtype.kind == \"S\":\n return np.core.defchararray.decode(data, \"utf-8\", \"replace\")\n return data"},{"col":0,"comment":"null","endLoc":28,"header":"def _data_allclose_or_equiv(arr1, arr2, rtol=1e-05, atol=1e-08, decode_bytes=True)","id":3124,"name":"_data_allclose_or_equiv","nodeType":"Function","startLoc":20,"text":"def _data_allclose_or_equiv(arr1, arr2, rtol=1e-05, atol=1e-08, decode_bytes=True):\n if any(arr.dtype.kind == \"S\" for arr in [arr1, arr2]) and decode_bytes:\n arr1 = _decode_string_data(arr1)\n arr2 = _decode_string_data(arr2)\n exact_dtypes = [\"M\", \"m\", \"O\", \"S\", \"U\"]\n if any(arr.dtype.kind in exact_dtypes for arr in [arr1, arr2]):\n return duck_array_ops.array_equiv(arr1, arr2)\n else:\n return duck_array_ops.allclose_or_equiv(arr1, arr2, rtol=rtol, atol=atol)"},{"id":3125,"name":"xarray/tests/data","nodeType":"Package"},{"id":3126,"name":"example.ict","nodeType":"TextFile","path":"xarray/tests/data","text":"27, 1001\nHenderson, Barron\nU.S. EPA\nExample file with artificial data\nJUST_A_TEST\n1, 1\n2018, 04, 27, 2018, 04, 27\n0\nStart_UTC\n7\n1, 1, 1, 1, 1\n-9999, -9999, -9999, -9999, -9999\nlat, degrees_north\nlon, degrees_east\nelev, meters\nTEST_ppbv, ppbv\nTESTM_ppbv, ppbv\n0\n8\nULOD_FLAG: -7777\nULOD_VALUE: N/A\nLLOD_FLAG: -8888\nLLOD_VALUE: N/A, N/A, N/A, N/A, 0.025\nOTHER_COMMENTS: www-air.larc.nasa.gov/missions/etc/IcarttDataFormat.htm\nREVISION: R0\nR0: No comments for this revision.\nStart_UTC, lat, lon, elev, TEST_ppbv, TESTM_ppbv\n43200, 41.00000, -71.00000, 5, 1.2345, 2.220\n46800, 42.00000, -72.00000, 15, 2.3456, -9999\n50400, 42.00000, -73.00000, 20, 3.4567, -7777\n50400, 42.00000, -74.00000, 25, 4.5678, -8888"},{"attributeType":"null","col":4,"comment":"null","endLoc":566,"id":3127,"name":"_freq","nodeType":"Attribute","startLoc":566,"text":"_freq"},{"attributeType":"null","col":0,"comment":"null","endLoc":334,"id":3128,"name":"_MONTH_ABBREVIATIONS","nodeType":"Attribute","startLoc":334,"text":"_MONTH_ABBREVIATIONS"},{"attributeType":"null","col":0,"comment":"null","endLoc":575,"id":3129,"name":"_FREQUENCIES","nodeType":"Attribute","startLoc":575,"text":"_FREQUENCIES"},{"attributeType":"null","col":0,"comment":"null","endLoc":640,"id":3130,"name":"_FREQUENCY_CONDITION","nodeType":"Attribute","startLoc":640,"text":"_FREQUENCY_CONDITION"},{"attributeType":"null","col":0,"comment":"null","endLoc":641,"id":3131,"name":"_PATTERN","nodeType":"Attribute","startLoc":641,"text":"_PATTERN"},{"id":3132,"name":"unit-tests.yml","nodeType":"TextFile","path":"ci/azure","text":"steps:\n\n- template: install.yml\n\n- bash: |\n source activate xarray-tests\n python -OO -c \"import xarray\"\n displayName: Import xarray\n\n# Work around for allowed test failures:\n# https://github.com/microsoft/azure-pipelines-tasks/issues/9302\n- bash: |\n source activate xarray-tests\n pytest \\\n --junitxml=junit/test-results.xml \\\n --cov=xarray \\\n --cov-report=xml \\\n $(pytest_extra_flags) || [ \"$ALLOW_FAILURE\" = \"true\" ]\n displayName: Run tests\n\n- bash: |\n curl https://codecov.io/bash > codecov.sh\n bash codecov.sh -t 688f4d53-31bb-49b5-8370-4ce6f792cf3d\n displayName: Upload coverage to codecov.io\n\n# TODO: publish coverage results to Azure, once we can merge them across\n# multiple jobs: https://stackoverflow.com/questions/56776185\n\n- task: PublishTestResults@2\n condition: succeededOrFailed()\n inputs:\n testResultsFiles: '**/test-*.xml'\n failTaskOnFailedTests: false\n testRunTitle: '$(Agent.JobName)'\n"},{"col":0,"comment":"","endLoc":8,"header":"plot.py#","id":3134,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"\nUse this module directly:\n import xarray.plot as xplt\n\nOr use the methods on a DataArray or Dataset:\n DataArray.plot._____\n Dataset.plot._____\n\"\"\""},{"col":0,"comment":"","endLoc":1,"header":"cftime_offsets.py#","id":3135,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Time offset classes for use with cftime.datetime objects\"\"\"\n\n_MONTH_ABBREVIATIONS = {\n 1: \"JAN\",\n 2: \"FEB\",\n 3: \"MAR\",\n 4: \"APR\",\n 5: \"MAY\",\n 6: \"JUN\",\n 7: \"JUL\",\n 8: \"AUG\",\n 9: \"SEP\",\n 10: \"OCT\",\n 11: \"NOV\",\n 12: \"DEC\",\n}\n\n_FREQUENCIES = {\n \"A\": YearEnd,\n \"AS\": YearBegin,\n \"Y\": YearEnd,\n \"YS\": YearBegin,\n \"Q\": partial(QuarterEnd, month=12),\n \"QS\": partial(QuarterBegin, month=1),\n \"M\": MonthEnd,\n \"MS\": MonthBegin,\n \"D\": Day,\n \"H\": Hour,\n \"T\": Minute,\n \"min\": Minute,\n \"S\": Second,\n \"AS-JAN\": partial(YearBegin, month=1),\n \"AS-FEB\": partial(YearBegin, month=2),\n \"AS-MAR\": partial(YearBegin, month=3),\n \"AS-APR\": partial(YearBegin, month=4),\n \"AS-MAY\": partial(YearBegin, month=5),\n \"AS-JUN\": partial(YearBegin, month=6),\n \"AS-JUL\": partial(YearBegin, month=7),\n \"AS-AUG\": partial(YearBegin, month=8),\n \"AS-SEP\": partial(YearBegin, month=9),\n \"AS-OCT\": partial(YearBegin, month=10),\n \"AS-NOV\": partial(YearBegin, month=11),\n \"AS-DEC\": partial(YearBegin, month=12),\n \"A-JAN\": partial(YearEnd, month=1),\n \"A-FEB\": partial(YearEnd, month=2),\n \"A-MAR\": partial(YearEnd, month=3),\n \"A-APR\": partial(YearEnd, month=4),\n \"A-MAY\": partial(YearEnd, month=5),\n \"A-JUN\": partial(YearEnd, month=6),\n \"A-JUL\": partial(YearEnd, month=7),\n \"A-AUG\": partial(YearEnd, month=8),\n \"A-SEP\": partial(YearEnd, month=9),\n \"A-OCT\": partial(YearEnd, month=10),\n \"A-NOV\": partial(YearEnd, month=11),\n \"A-DEC\": partial(YearEnd, month=12),\n \"QS-JAN\": partial(QuarterBegin, month=1),\n \"QS-FEB\": partial(QuarterBegin, month=2),\n \"QS-MAR\": partial(QuarterBegin, month=3),\n \"QS-APR\": partial(QuarterBegin, month=4),\n \"QS-MAY\": partial(QuarterBegin, month=5),\n \"QS-JUN\": partial(QuarterBegin, month=6),\n \"QS-JUL\": partial(QuarterBegin, month=7),\n \"QS-AUG\": partial(QuarterBegin, month=8),\n \"QS-SEP\": partial(QuarterBegin, month=9),\n \"QS-OCT\": partial(QuarterBegin, month=10),\n \"QS-NOV\": partial(QuarterBegin, month=11),\n \"QS-DEC\": partial(QuarterBegin, month=12),\n \"Q-JAN\": partial(QuarterEnd, month=1),\n \"Q-FEB\": partial(QuarterEnd, month=2),\n \"Q-MAR\": partial(QuarterEnd, month=3),\n \"Q-APR\": partial(QuarterEnd, month=4),\n \"Q-MAY\": partial(QuarterEnd, month=5),\n \"Q-JUN\": partial(QuarterEnd, month=6),\n \"Q-JUL\": partial(QuarterEnd, month=7),\n \"Q-AUG\": partial(QuarterEnd, month=8),\n \"Q-SEP\": partial(QuarterEnd, month=9),\n \"Q-OCT\": partial(QuarterEnd, month=10),\n \"Q-NOV\": partial(QuarterEnd, month=11),\n \"Q-DEC\": partial(QuarterEnd, month=12),\n}\n\n_FREQUENCY_CONDITION = \"|\".join(_FREQUENCIES.keys())\n\n_PATTERN = r\"^((?P\\d+)|())(?P({}))$\".format(_FREQUENCY_CONDITION)\n\nCFTIME_TICKS = (Day, Hour, Minute, Second)"},{"col":0,"comment":"Like :py:func:`numpy.testing.assert_array_equal`, but for xarray\n objects.\n\n Raises an AssertionError if two objects are not equal. This will match\n data values, dimensions and coordinates, but not names or attributes\n (except for Dataset objects for which the variable names must match).\n Arrays with NaN in the same location are considered equal.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n\n See also\n --------\n assert_identical, assert_allclose, Dataset.equals, DataArray.equals,\n numpy.testing.assert_array_equal\n ","endLoc":59,"header":"def assert_equal(a, b)","id":3136,"name":"assert_equal","nodeType":"Function","startLoc":31,"text":"def assert_equal(a, b):\n \"\"\"Like :py:func:`numpy.testing.assert_array_equal`, but for xarray\n objects.\n\n Raises an AssertionError if two objects are not equal. This will match\n data values, dimensions and coordinates, but not names or attributes\n (except for Dataset objects for which the variable names must match).\n Arrays with NaN in the same location are considered equal.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n\n See also\n --------\n assert_identical, assert_allclose, Dataset.equals, DataArray.equals,\n numpy.testing.assert_array_equal\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n if isinstance(a, (Variable, DataArray)):\n assert a.equals(b), formatting.diff_array_repr(a, b, \"equals\")\n elif isinstance(a, Dataset):\n assert a.equals(b), formatting.diff_dataset_repr(a, b, \"equals\")\n else:\n raise TypeError(\"{} not supported by assertion comparison\".format(type(a)))"},{"attributeType":"ReprObject","col":0,"comment":"null","endLoc":37,"id":3137,"name":"_NO_FILL_VALUE","nodeType":"Attribute","startLoc":37,"text":"_NO_FILL_VALUE"},{"id":3138,"name":"py36-bare-minimum.yml","nodeType":"TextFile","path":"ci/requirements","text":"name: xarray-tests\nchannels:\n - conda-forge\ndependencies:\n - python=3.6\n - coveralls\n - pytest\n - pytest-cov\n - pytest-env\n - numpy=1.14\n - pandas=0.24\n"},{"attributeType":"ReprObject","col":0,"comment":"null","endLoc":38,"id":3139,"name":"_DEFAULT_NAME","nodeType":"Attribute","startLoc":38,"text":"_DEFAULT_NAME"},{"id":3140,"name":"py37-windows.yml","nodeType":"TextFile","path":"ci/requirements","text":"name: xarray-tests\nchannels:\n - conda-forge\ndependencies:\n - python=3.7\n - black\n - boto3\n - bottleneck\n - cartopy\n # - cdms2 # Not available on Windows\n # - cfgrib>=0.9.2 # Causes Python interpreter crash on Windows\n - cftime\n - coveralls\n - dask\n - distributed\n - flake8\n - h5netcdf\n - h5py\n - hdf5\n - hypothesis\n - iris\n - lxml # Optional dep of pydap\n - matplotlib\n - mypy=0.730 # Must match .pre-commit-config.yaml\n - nc-time-axis\n - netcdf4\n - numba\n - numpy\n - pandas\n - pip\n - pseudonetcdf\n - pydap\n # - pynio # Not available on Windows\n - pytest\n - pytest-cov\n - pytest-env\n - rasterio\n - scipy\n - seaborn\n - sparse\n - toolz\n - zarr\n - pip:\n - numbagg\n"},{"attributeType":"null","col":0,"comment":"null","endLoc":39,"id":3141,"name":"_JOINS_WITHOUT_FILL_VALUES","nodeType":"Attribute","startLoc":39,"text":"_JOINS_WITHOUT_FILL_VALUES"},{"attributeType":"null","col":0,"comment":"null","endLoc":270,"id":3142,"name":"_JOINERS","nodeType":"Attribute","startLoc":270,"text":"_JOINERS"},{"attributeType":"null","col":0,"comment":"null","endLoc":486,"id":3143,"name":"SLICE_NONE","nodeType":"Attribute","startLoc":486,"text":"SLICE_NONE"},{"col":0,"comment":"","endLoc":3,"header":"computation.py#","id":3144,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"\nFunctions for applying functions that act on arrays to xarray's labeled data.\n\"\"\"\n\nif TYPE_CHECKING:\n from .coordinates import Coordinates # noqa\n from .dataset import Dataset\n\n_NO_FILL_VALUE = utils.ReprObject(\"\")\n\n_DEFAULT_NAME = utils.ReprObject(\"\")\n\n_JOINS_WITHOUT_FILL_VALUES = frozenset({\"inner\", \"exact\"})\n\n_JOINERS = {\n \"inner\": ordered_set_intersection,\n \"outer\": ordered_set_union,\n \"left\": operator.itemgetter(0),\n \"right\": operator.itemgetter(-1),\n \"exact\": assert_and_return_exact_match,\n}\n\nSLICE_NONE = slice(None)"},{"col":0,"comment":"Like :py:func:`xarray.testing.assert_equal`, but also matches the\n objects' names and attributes.\n\n Raises an AssertionError if two objects are not identical.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n\n See also\n --------\n assert_equal, assert_allclose, Dataset.equals, DataArray.equals\n ","endLoc":89,"header":"def assert_identical(a, b)","id":3145,"name":"assert_identical","nodeType":"Function","startLoc":62,"text":"def assert_identical(a, b):\n \"\"\"Like :py:func:`xarray.testing.assert_equal`, but also matches the\n objects' names and attributes.\n\n Raises an AssertionError if two objects are not identical.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n\n See also\n --------\n assert_equal, assert_allclose, Dataset.equals, DataArray.equals\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n if isinstance(a, Variable):\n assert a.identical(b), formatting.diff_array_repr(a, b, \"identical\")\n elif isinstance(a, DataArray):\n assert a.name == b.name\n assert a.identical(b), formatting.diff_array_repr(a, b, \"identical\")\n elif isinstance(a, (Dataset, Variable)):\n assert a.identical(b), formatting.diff_dataset_repr(a, b, \"identical\")\n else:\n raise TypeError(\"{} not supported by assertion comparison\".format(type(a)))"},{"col":0,"comment":"Like :py:func:`numpy.testing.assert_allclose`, but for xarray objects.\n\n Raises an AssertionError if two objects are not equal up to desired\n tolerance.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n rtol : float, optional\n Relative tolerance.\n atol : float, optional\n Absolute tolerance.\n decode_bytes : bool, optional\n Whether byte dtypes should be decoded to strings as UTF-8 or not.\n This is useful for testing serialization methods on Python 3 that\n return saved strings as bytes.\n\n See also\n --------\n assert_identical, assert_equal, numpy.testing.assert_allclose\n ","endLoc":141,"header":"def assert_allclose(a, b, rtol=1e-05, atol=1e-08, decode_bytes=True)","id":3147,"name":"assert_allclose","nodeType":"Function","startLoc":92,"text":"def assert_allclose(a, b, rtol=1e-05, atol=1e-08, decode_bytes=True):\n \"\"\"Like :py:func:`numpy.testing.assert_allclose`, but for xarray objects.\n\n Raises an AssertionError if two objects are not equal up to desired\n tolerance.\n\n Parameters\n ----------\n a : xarray.Dataset, xarray.DataArray or xarray.Variable\n The first object to compare.\n b : xarray.Dataset, xarray.DataArray or xarray.Variable\n The second object to compare.\n rtol : float, optional\n Relative tolerance.\n atol : float, optional\n Absolute tolerance.\n decode_bytes : bool, optional\n Whether byte dtypes should be decoded to strings as UTF-8 or not.\n This is useful for testing serialization methods on Python 3 that\n return saved strings as bytes.\n\n See also\n --------\n assert_identical, assert_equal, numpy.testing.assert_allclose\n \"\"\"\n __tracebackhide__ = True\n assert type(a) == type(b)\n kwargs = dict(rtol=rtol, atol=atol, decode_bytes=decode_bytes)\n if isinstance(a, Variable):\n assert a.dims == b.dims\n allclose = _data_allclose_or_equiv(a.values, b.values, **kwargs)\n assert allclose, \"{}\\n{}\".format(a.values, b.values)\n elif isinstance(a, DataArray):\n assert_allclose(a.variable, b.variable, **kwargs)\n assert set(a.coords) == set(b.coords)\n for v in a.coords.variables:\n # can't recurse with this function as coord is sometimes a\n # DataArray, so call into _data_allclose_or_equiv directly\n allclose = _data_allclose_or_equiv(\n a.coords[v].values, b.coords[v].values, **kwargs\n )\n assert allclose, \"{}\\n{}\".format(a.coords[v].values, b.coords[v].values)\n elif isinstance(a, Dataset):\n assert set(a.data_vars) == set(b.data_vars)\n assert set(a.coords) == set(b.coords)\n for k in list(a.variables) + list(a.coords):\n assert_allclose(a[k], b[k], **kwargs)\n\n else:\n raise TypeError(\"{} not supported by assertion comparison\".format(type(a)))"},{"col":0,"comment":"\n Assert that chunksizes along chunked dimensions are equal.\n\n Parameters\n ----------\n a : xarray.Dataset or xarray.DataArray\n The first object to compare.\n b : xarray.Dataset or xarray.DataArray\n The second object to compare.\n ","endLoc":161,"header":"def assert_chunks_equal(a, b)","id":3148,"name":"assert_chunks_equal","nodeType":"Function","startLoc":144,"text":"def assert_chunks_equal(a, b):\n \"\"\"\n Assert that chunksizes along chunked dimensions are equal.\n\n Parameters\n ----------\n a : xarray.Dataset or xarray.DataArray\n The first object to compare.\n b : xarray.Dataset or xarray.DataArray\n The second object to compare.\n \"\"\"\n\n if isinstance(a, DataArray) != isinstance(b, DataArray):\n raise TypeError(\"a and b have mismatched types\")\n\n left = a.unify_chunks()\n right = b.unify_chunks()\n assert left.chunks == right.chunks"},{"col":0,"comment":"null","endLoc":179,"header":"def _assert_indexes_invariants_checks(indexes, possible_coord_variables, dims)","id":3149,"name":"_assert_indexes_invariants_checks","nodeType":"Function","startLoc":164,"text":"def _assert_indexes_invariants_checks(indexes, possible_coord_variables, dims):\n assert isinstance(indexes, dict), indexes\n assert all(isinstance(v, pd.Index) for v in indexes.values()), {\n k: type(v) for k, v in indexes.items()\n }\n\n index_vars = {\n k for k, v in possible_coord_variables.items() if isinstance(v, IndexVariable)\n }\n assert indexes.keys() <= index_vars, (set(indexes), index_vars)\n\n # Note: when we support non-default indexes, these checks should be opt-in\n # only!\n defaults = default_indexes(possible_coord_variables, dims)\n assert indexes.keys() == defaults.keys(), (set(indexes), set(defaults))\n assert all(v.equals(defaults[k]) for k, v in indexes.items()), (indexes, defaults)"},{"col":0,"comment":"null","endLoc":195,"header":"def _assert_variable_invariants(var: Variable, name: Hashable = None)","id":3150,"name":"_assert_variable_invariants","nodeType":"Function","startLoc":182,"text":"def _assert_variable_invariants(var: Variable, name: Hashable = None):\n if name is None:\n name_or_empty: tuple = ()\n else:\n name_or_empty = (name,)\n assert isinstance(var._dims, tuple), name_or_empty + (var._dims,)\n assert len(var._dims) == len(var._data.shape), name_or_empty + (\n var._dims,\n var._data.shape,\n )\n assert isinstance(var._encoding, (type(None), dict)), name_or_empty + (\n var._encoding,\n )\n assert isinstance(var._attrs, (type(None), dict)), name_or_empty + (var._attrs,)"},{"col":0,"comment":"null","endLoc":215,"header":"def _assert_dataarray_invariants(da: DataArray)","id":3151,"name":"_assert_dataarray_invariants","nodeType":"Function","startLoc":198,"text":"def _assert_dataarray_invariants(da: DataArray):\n assert isinstance(da._variable, Variable), da._variable\n _assert_variable_invariants(da._variable)\n\n assert isinstance(da._coords, dict), da._coords\n assert all(isinstance(v, Variable) for v in da._coords.values()), da._coords\n assert all(set(v.dims) <= set(da.dims) for v in da._coords.values()), (\n da.dims,\n {k: v.dims for k, v in da._coords.items()},\n )\n assert all(\n isinstance(v, IndexVariable) for (k, v) in da._coords.items() if v.dims == (k,)\n ), {k: type(v) for k, v in da._coords.items()}\n for k, v in da._coords.items():\n _assert_variable_invariants(v, k)\n\n if da._indexes is not None:\n _assert_indexes_invariants_checks(da._indexes, da._coords, da.dims)"},{"col":0,"comment":"null","endLoc":252,"header":"def _assert_dataset_invariants(ds: Dataset)","id":3152,"name":"_assert_dataset_invariants","nodeType":"Function","startLoc":218,"text":"def _assert_dataset_invariants(ds: Dataset):\n assert isinstance(ds._variables, dict), type(ds._variables)\n assert all(isinstance(v, Variable) for v in ds._variables.values()), ds._variables\n for k, v in ds._variables.items():\n _assert_variable_invariants(v, k)\n\n assert isinstance(ds._coord_names, set), ds._coord_names\n assert ds._coord_names <= ds._variables.keys(), (\n ds._coord_names,\n set(ds._variables),\n )\n\n assert type(ds._dims) is dict, ds._dims\n assert all(isinstance(v, int) for v in ds._dims.values()), ds._dims\n var_dims: Set[Hashable] = set()\n for v in ds._variables.values():\n var_dims.update(v.dims)\n assert ds._dims.keys() == var_dims, (set(ds._dims), var_dims)\n assert all(\n ds._dims[k] == v.sizes[k] for v in ds._variables.values() for k in v.sizes\n ), (ds._dims, {k: v.sizes for k, v in ds._variables.items()})\n assert all(\n isinstance(v, IndexVariable)\n for (k, v) in ds._variables.items()\n if v.dims == (k,)\n ), {k: type(v) for k, v in ds._variables.items() if v.dims == (k,)}\n assert all(v.dims == (k,) for (k, v) in ds._variables.items() if k in ds._dims), {\n k: v.dims for k, v in ds._variables.items() if k in ds._dims\n }\n\n if ds._indexes is not None:\n _assert_indexes_invariants_checks(ds._indexes, ds._variables, ds._dims)\n\n assert isinstance(ds._encoding, (type(None), dict))\n assert isinstance(ds._attrs, (type(None), dict))"},{"className":"ReturnItem","col":0,"comment":"null","endLoc":130,"id":3153,"nodeType":"Class","startLoc":128,"text":"class ReturnItem:\n def __getitem__(self, key):\n return key"},{"col":4,"comment":"null","endLoc":130,"header":"def __getitem__(self, key)","id":3154,"name":"__getitem__","nodeType":"Function","startLoc":129,"text":"def __getitem__(self, key):\n return key"},{"className":"IndexerMaker","col":0,"comment":"null","endLoc":140,"id":3155,"nodeType":"Class","startLoc":133,"text":"class IndexerMaker:\n def __init__(self, indexer_cls):\n self._indexer_cls = indexer_cls\n\n def __getitem__(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n return self._indexer_cls(key)"},{"col":4,"comment":"null","endLoc":135,"header":"def __init__(self, indexer_cls)","id":3156,"name":"__init__","nodeType":"Function","startLoc":134,"text":"def __init__(self, indexer_cls):\n self._indexer_cls = indexer_cls"},{"col":4,"comment":"null","endLoc":140,"header":"def __getitem__(self, key)","id":3157,"name":"__getitem__","nodeType":"Function","startLoc":137,"text":"def __getitem__(self, key):\n if not isinstance(key, tuple):\n key = (key,)\n return self._indexer_cls(key)"},{"attributeType":"null","col":8,"comment":"null","endLoc":135,"id":3158,"name":"_indexer_cls","nodeType":"Attribute","startLoc":135,"text":"self._indexer_cls"},{"col":0,"comment":"null","endLoc":48,"header":"def _importorskip(modname, minversion=None)","id":3159,"name":"_importorskip","nodeType":"Function","startLoc":38,"text":"def _importorskip(modname, minversion=None):\n try:\n mod = importlib.import_module(modname)\n has = True\n if minversion is not None:\n if LooseVersion(mod.__version__) < LooseVersion(minversion):\n raise ImportError(\"Minimum version not satisfied\")\n except ImportError:\n has = False\n func = pytest.mark.skipif(not has, reason=\"requires {}\".format(modname))\n return has, func"},{"col":0,"comment":"null","endLoc":55,"header":"def LooseVersion(vstring)","id":3160,"name":"LooseVersion","nodeType":"Function","startLoc":51,"text":"def LooseVersion(vstring):\n # Our development version is something like '0.10.9+aac7bfc'\n # This function just ignored the git commit id.\n vstring = vstring.split(\"+\")[0]\n return version.LooseVersion(vstring)"},{"col":0,"comment":"Validate that an xarray object satisfies its own internal invariants.\n\n This exists for the benefit of xarray's own test suite, but may be useful\n in external projects if they (ill-advisedly) create objects using xarray's\n private APIs.\n ","endLoc":273,"header":"def _assert_internal_invariants(xarray_obj: Union[DataArray, Dataset, Variable],)","id":3161,"name":"_assert_internal_invariants","nodeType":"Function","startLoc":255,"text":"def _assert_internal_invariants(xarray_obj: Union[DataArray, Dataset, Variable],):\n \"\"\"Validate that an xarray object satisfies its own internal invariants.\n\n This exists for the benefit of xarray's own test suite, but may be useful\n in external projects if they (ill-advisedly) create objects using xarray's\n private APIs.\n \"\"\"\n if isinstance(xarray_obj, Variable):\n _assert_variable_invariants(xarray_obj)\n elif isinstance(xarray_obj, DataArray):\n _assert_dataarray_invariants(xarray_obj)\n elif isinstance(xarray_obj, Dataset):\n _assert_dataset_invariants(xarray_obj)\n else:\n raise TypeError(\n \"{} is not a supported type for xarray invariant checks\".format(\n type(xarray_obj)\n )\n )"},{"col":0,"comment":"null","endLoc":113,"header":"@contextmanager\ndef raises_regex(error, pattern)","id":3162,"name":"raises_regex","nodeType":"Function","startLoc":104,"text":"@contextmanager\ndef raises_regex(error, pattern):\n __tracebackhide__ = True\n with pytest.raises(error) as excinfo:\n yield\n message = str(excinfo.value)\n if not re.search(pattern, message):\n raise AssertionError(\n \"exception %r did not match pattern %r\" % (excinfo.value, pattern)\n )"},{"attributeType":"null","col":16,"comment":"null","endLoc":4,"id":3163,"name":"np","nodeType":"Attribute","startLoc":4,"text":"np"},{"attributeType":"null","col":17,"comment":"null","endLoc":5,"id":3164,"name":"pd","nodeType":"Attribute","startLoc":5,"text":"pd"},{"col":0,"comment":"","endLoc":1,"header":"testing.py#","id":3165,"name":"","nodeType":"Function","startLoc":1,"text":"\"\"\"Testing functions exposed to the user API\"\"\""},{"col":0,"comment":"Given an ndarray, return the base object which holds its memory, or the\n object itself.\n ","endLoc":153,"header":"def source_ndarray(array)","id":3166,"name":"source_ndarray","nodeType":"Function","startLoc":143,"text":"def source_ndarray(array):\n \"\"\"Given an ndarray, return the base object which holds its memory, or the\n object itself.\n \"\"\"\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", \"DatetimeIndex.base\")\n warnings.filterwarnings(\"ignore\", \"TimedeltaIndex.base\")\n base = getattr(array, \"base\", np.asarray(array).base)\n if base is None:\n base = array\n return base"},{"col":0,"comment":"null","endLoc":163,"header":"def assert_equal(a, b)","id":3167,"name":"assert_equal","nodeType":"Function","startLoc":160,"text":"def assert_equal(a, b):\n xarray.testing.assert_equal(a, b)\n xarray.testing._assert_internal_invariants(a)\n xarray.testing._assert_internal_invariants(b)"},{"col":0,"comment":"null","endLoc":169,"header":"def assert_identical(a, b)","id":3168,"name":"assert_identical","nodeType":"Function","startLoc":166,"text":"def assert_identical(a, b):\n xarray.testing.assert_identical(a, b)\n xarray.testing._assert_internal_invariants(a)\n xarray.testing._assert_internal_invariants(b)"},{"col":0,"comment":"null","endLoc":175,"header":"def assert_allclose(a, b, **kwargs)","id":3169,"name":"assert_allclose","nodeType":"Function","startLoc":172,"text":"def assert_allclose(a, b, **kwargs):\n xarray.testing.assert_allclose(a, b, **kwargs)\n xarray.testing._assert_internal_invariants(a)\n xarray.testing._assert_internal_invariants(b)"},{"attributeType":"null","col":16,"comment":"null","endLoc":9,"id":3170,"name":"np","nodeType":"Attribute","startLoc":9,"text":"np"},{"attributeType":"null","col":25,"comment":"null","endLoc":23,"id":3171,"name":"mpl","nodeType":"Attribute","startLoc":23,"text":"mpl"},{"attributeType":"null","col":0,"comment":"null","endLoc":32,"id":3172,"name":"arm_xfail","nodeType":"Attribute","startLoc":32,"text":"arm_xfail"},{"attributeType":"null","col":0,"comment":"null","endLoc":58,"id":3173,"name":"has_matplotlib","nodeType":"Attribute","startLoc":58,"text":"has_matplotlib"},{"attributeType":"null","col":16,"comment":"null","endLoc":58,"id":3174,"name":"requires_matplotlib","nodeType":"Attribute","startLoc":58,"text":"requires_matplotlib"},{"attributeType":"null","col":0,"comment":"null","endLoc":59,"id":3175,"name":"has_scipy","nodeType":"Attribute","startLoc":59,"text":"has_scipy"},{"attributeType":"null","col":11,"comment":"null","endLoc":59,"id":3176,"name":"requires_scipy","nodeType":"Attribute","startLoc":59,"text":"requires_scipy"},{"attributeType":"null","col":0,"comment":"null","endLoc":60,"id":3177,"name":"has_pydap","nodeType":"Attribute","startLoc":60,"text":"has_pydap"},{"attributeType":"null","col":11,"comment":"null","endLoc":60,"id":3178,"name":"requires_pydap","nodeType":"Attribute","startLoc":60,"text":"requires_pydap"},{"attributeType":"null","col":0,"comment":"null","endLoc":61,"id":3179,"name":"has_netCDF4","nodeType":"Attribute","startLoc":61,"text":"has_netCDF4"},{"attributeType":"null","col":13,"comment":"null","endLoc":61,"id":3180,"name":"requires_netCDF4","nodeType":"Attribute","startLoc":61,"text":"requires_netCDF4"},{"attributeType":"null","col":0,"comment":"null","endLoc":62,"id":3181,"name":"has_h5netcdf","nodeType":"Attribute","startLoc":62,"text":"has_h5netcdf"},{"attributeType":"null","col":14,"comment":"null","endLoc":62,"id":3182,"name":"requires_h5netcdf","nodeType":"Attribute","startLoc":62,"text":"requires_h5netcdf"},{"attributeType":"null","col":0,"comment":"null","endLoc":63,"id":3183,"name":"has_pynio","nodeType":"Attribute","startLoc":63,"text":"has_pynio"},{"attributeType":"null","col":11,"comment":"null","endLoc":63,"id":3184,"name":"requires_pynio","nodeType":"Attribute","startLoc":63,"text":"requires_pynio"},{"attributeType":"null","col":0,"comment":"null","endLoc":64,"id":3185,"name":"has_pseudonetcdf","nodeType":"Attribute","startLoc":64,"text":"has_pseudonetcdf"},{"attributeType":"null","col":18,"comment":"null","endLoc":64,"id":3186,"name":"requires_pseudonetcdf","nodeType":"Attribute","startLoc":64,"text":"requires_pseudonetcdf"},{"attributeType":"null","col":0,"comment":"null","endLoc":65,"id":3187,"name":"has_cftime","nodeType":"Attribute","startLoc":65,"text":"has_cftime"},{"attributeType":"null","col":12,"comment":"null","endLoc":65,"id":3188,"name":"requires_cftime","nodeType":"Attribute","startLoc":65,"text":"requires_cftime"},{"attributeType":"null","col":0,"comment":"null","endLoc":66,"id":3189,"name":"has_dask","nodeType":"Attribute","startLoc":66,"text":"has_dask"},{"attributeType":"null","col":10,"comment":"null","endLoc":66,"id":3190,"name":"requires_dask","nodeType":"Attribute","startLoc":66,"text":"requires_dask"},{"attributeType":"null","col":0,"comment":"null","endLoc":67,"id":3191,"name":"has_bottleneck","nodeType":"Attribute","startLoc":67,"text":"has_bottleneck"},{"attributeType":"null","col":16,"comment":"null","endLoc":67,"id":3192,"name":"requires_bottleneck","nodeType":"Attribute","startLoc":67,"text":"requires_bottleneck"},{"attributeType":"null","col":0,"comment":"null","endLoc":68,"id":3193,"name":"has_nc_time_axis","nodeType":"Attribute","startLoc":68,"text":"has_nc_time_axis"},{"attributeType":"null","col":18,"comment":"null","endLoc":68,"id":3194,"name":"requires_nc_time_axis","nodeType":"Attribute","startLoc":68,"text":"requires_nc_time_axis"},{"attributeType":"null","col":0,"comment":"null","endLoc":69,"id":3195,"name":"has_rasterio","nodeType":"Attribute","startLoc":69,"text":"has_rasterio"},{"attributeType":"null","col":14,"comment":"null","endLoc":69,"id":3196,"name":"requires_rasterio","nodeType":"Attribute","startLoc":69,"text":"requires_rasterio"},{"attributeType":"null","col":0,"comment":"null","endLoc":70,"id":3197,"name":"has_zarr","nodeType":"Attribute","startLoc":70,"text":"has_zarr"},{"attributeType":"null","col":10,"comment":"null","endLoc":70,"id":3198,"name":"requires_zarr","nodeType":"Attribute","startLoc":70,"text":"requires_zarr"},{"attributeType":"null","col":0,"comment":"null","endLoc":71,"id":3199,"name":"has_iris","nodeType":"Attribute","startLoc":71,"text":"has_iris"},{"attributeType":"null","col":10,"comment":"null","endLoc":71,"id":3200,"name":"requires_iris","nodeType":"Attribute","startLoc":71,"text":"requires_iris"},{"attributeType":"null","col":0,"comment":"null","endLoc":72,"id":3201,"name":"has_cfgrib","nodeType":"Attribute","startLoc":72,"text":"has_cfgrib"},{"attributeType":"null","col":12,"comment":"null","endLoc":72,"id":3202,"name":"requires_cfgrib","nodeType":"Attribute","startLoc":72,"text":"requires_cfgrib"},{"attributeType":"null","col":0,"comment":"null","endLoc":73,"id":3203,"name":"has_numbagg","nodeType":"Attribute","startLoc":73,"text":"has_numbagg"},{"attributeType":"null","col":13,"comment":"null","endLoc":73,"id":3204,"name":"requires_numbagg","nodeType":"Attribute","startLoc":73,"text":"requires_numbagg"},{"attributeType":"null","col":0,"comment":"null","endLoc":74,"id":3205,"name":"has_sparse","nodeType":"Attribute","startLoc":74,"text":"has_sparse"},{"attributeType":"null","col":12,"comment":"null","endLoc":74,"id":3206,"name":"requires_sparse","nodeType":"Attribute","startLoc":74,"text":"requires_sparse"},{"attributeType":"null","col":0,"comment":"null","endLoc":77,"id":3207,"name":"has_scipy_or_netCDF4","nodeType":"Attribute","startLoc":77,"text":"has_scipy_or_netCDF4"},{"attributeType":"null","col":0,"comment":"null","endLoc":78,"id":3208,"name":"requires_scipy_or_netCDF4","nodeType":"Attribute","startLoc":78,"text":"requires_scipy_or_netCDF4"},{"attributeType":"null","col":0,"comment":"null","endLoc":81,"id":3209,"name":"has_cftime_or_netCDF4","nodeType":"Attribute","startLoc":81,"text":"has_cftime_or_netCDF4"},{"attributeType":"null","col":0,"comment":"null","endLoc":82,"id":3210,"name":"requires_cftime_or_netCDF4","nodeType":"Attribute","startLoc":82,"text":"requires_cftime_or_netCDF4"},{"attributeType":"null","col":4,"comment":"null","endLoc":87,"id":3211,"name":"has_seaborn","nodeType":"Attribute","startLoc":87,"text":"has_seaborn"},{"attributeType":"null","col":4,"comment":"null","endLoc":89,"id":3212,"name":"has_seaborn","nodeType":"Attribute","startLoc":89,"text":"has_seaborn"},{"attributeType":"null","col":0,"comment":"null","endLoc":90,"id":3213,"name":"requires_seaborn","nodeType":"Attribute","startLoc":90,"text":"requires_seaborn"},{"attributeType":"null","col":0,"comment":"null","endLoc":100,"id":3214,"name":"flaky","nodeType":"Attribute","startLoc":100,"text":"flaky"},{"attributeType":"null","col":0,"comment":"null","endLoc":101,"id":3215,"name":"network","nodeType":"Attribute","startLoc":101,"text":"network"},{"col":0,"comment":"","endLoc":1,"header":"__init__.py#","id":3216,"name":"","nodeType":"Function","startLoc":1,"text":"try:\n import matplotlib as mpl\n\n # Order of imports is important here.\n # Using a different backend makes Travis CI work\n mpl.use(\"Agg\")\nexcept ImportError:\n pass\n\narm_xfail = pytest.mark.xfail(\n platform.machine() == \"aarch64\" or \"arm\" in platform.machine(),\n reason=\"expected failure on ARM\",\n)\n\nhas_matplotlib, requires_matplotlib = _importorskip(\"matplotlib\")\n\nhas_scipy, requires_scipy = _importorskip(\"scipy\")\n\nhas_pydap, requires_pydap = _importorskip(\"pydap.client\")\n\nhas_netCDF4, requires_netCDF4 = _importorskip(\"netCDF4\")\n\nhas_h5netcdf, requires_h5netcdf = _importorskip(\"h5netcdf\")\n\nhas_pynio, requires_pynio = _importorskip(\"Nio\")\n\nhas_pseudonetcdf, requires_pseudonetcdf = _importorskip(\"PseudoNetCDF\")\n\nhas_cftime, requires_cftime = _importorskip(\"cftime\")\n\nhas_dask, requires_dask = _importorskip(\"dask\")\n\nhas_bottleneck, requires_bottleneck = _importorskip(\"bottleneck\")\n\nhas_nc_time_axis, requires_nc_time_axis = _importorskip(\"nc_time_axis\")\n\nhas_rasterio, requires_rasterio = _importorskip(\"rasterio\")\n\nhas_zarr, requires_zarr = _importorskip(\"zarr\")\n\nhas_iris, requires_iris = _importorskip(\"iris\")\n\nhas_cfgrib, requires_cfgrib = _importorskip(\"cfgrib\")\n\nhas_numbagg, requires_numbagg = _importorskip(\"numbagg\")\n\nhas_sparse, requires_sparse = _importorskip(\"sparse\")\n\nhas_scipy_or_netCDF4 = has_scipy or has_netCDF4\n\nrequires_scipy_or_netCDF4 = pytest.mark.skipif(\n not has_scipy_or_netCDF4, reason=\"requires scipy or netCDF4\"\n)\n\nhas_cftime_or_netCDF4 = has_cftime or has_netCDF4\n\nrequires_cftime_or_netCDF4 = pytest.mark.skipif(\n not has_cftime_or_netCDF4, reason=\"requires cftime or netCDF4\"\n)\n\ntry:\n import_seaborn()\n has_seaborn = True\nexcept ImportError:\n has_seaborn = False\n\nrequires_seaborn = pytest.mark.skipif(not has_seaborn, reason=\"requires seaborn\")\n\nset_options(warn_for_unclosed_files=True)\n\nif has_dask:\n import dask\n\n dask.config.set(scheduler=\"single-threaded\")\n\nflaky = pytest.mark.flaky\n\nnetwork = pytest.mark.network"}]}