hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3614d33d41608cc98e11c32c8f6332442dec7bc8
| 44,167
|
py
|
Python
|
statsmodels/tsa/tests/results/arima111_css_results.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 34
|
2018-07-13T11:30:46.000Z
|
2022-01-05T13:48:10.000Z
|
venv/lib/python3.6/site-packages/statsmodels/tsa/tests/results/arima111_css_results.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 6
|
2015-08-28T16:59:03.000Z
|
2019-04-12T22:29:01.000Z
|
venv/lib/python3.6/site-packages/statsmodels/tsa/tests/results/arima111_css_results.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 28
|
2015-04-01T20:02:25.000Z
|
2021-07-03T00:09:28.000Z
|
import numpy as np
llf = np.array([-242.06033399744])
nobs = np.array([ 202])
k = np.array([ 4])
k_exog = np.array([ 1])
sigma = np.array([ .80201496146073])
chi2 = np.array([ 348.43324197088])
df_model = np.array([ 2])
k_ar = np.array([ 1])
k_ma = np.array([ 1])
params = np.array([ .82960638524364,
.93479332833705,
-.75728342544279,
.64322799840686])
cov_params = np.array([ .14317811930738,
-.01646077810033,
.01510986837498,
-.00280799533479,
-.01646077810033,
.00321032468661,
-.00353027620719,
.00097645385252,
.01510986837498,
-.00353027620719,
.00484312817753,
-.00112050648944,
-.00280799533479,
.00097645385252,
-.00112050648944,
.0007715609499]).reshape(4,4)
xb = np.array([ .82960641384125,
.82960641384125,
.697261095047,
.61113905906677,
.51607495546341,
.47362637519836,
.41342103481293,
.40238001942635,
.37454023957253,
.33222004771233,
.32514902949333,
.31093680858612,
.30019253492355,
.31159669160843,
.29182952642441,
.30349296331406,
.29457464814186,
.28427124023438,
.30664679408073,
.29696446657181,
.31270903348923,
.29268020391464,
.28816330432892,
.29006817936897,
.30216124653816,
.30066826939583,
.31728908419609,
.30679926276207,
.3272570669651,
.37292611598969,
.36668366193771,
.40278288722038,
.36799272894859,
.36827209591866,
.38623574376106,
.39983862638474,
.42789059877396,
.43138384819031,
.46953064203262,
.48066720366478,
.48910140991211,
.53098994493484,
.54496067762375,
.55554050207138,
.58130383491516,
.60081332921982,
.58008605241776,
.58214038610458,
.58369606733322,
.53162068128586,
.54543834924698,
.52040082216263,
.50143963098526,
.48708060383797,
.47620677947998,
.48572361469269,
.51068127155304,
.61833620071411,
.61110657453537,
.76539021730423,
.84672522544861,
.92606955766678,
.96840506792068,
1.0892199277878,
1.1097067594528,
1.0187155008316,
1.0030621290207,
.97345739603043,
.95103752613068,
.82755368947983,
.84054774045944,
.85038793087006,
.84008830785751,
.92104357481003,
.89359468221664,
.87280809879303,
.91032028198242,
.95647835731506,
1.0624366998672,
1.1426770687103,
1.1679404973984,
1.311328291893,
1.473167181015,
1.5602221488953,
1.7326545715332,
1.8809853792191,
1.7803012132645,
1.7750589847565,
1.8420933485031,
1.7863517999649,
1.8328944444656,
1.7793855667114,
1.5791050195694,
1.3564316034317,
1.5250737667084,
1.3155146837234,
1.014811873436,
.98235523700714,
.97552710771561,
.97035628557205,
1.0196926593781,
1.0393049716949,
.98315137624741,
.97613000869751,
.89980864524841,
.96626943349838,
.91009211540222,
.88530200719833,
.97303456068039,
.57794612646103,
.63377332687378,
.65829831361771,
.76562696695328,
.86465454101563,
.90414637327194,
.95180231332779,
.95238989591599,
.98833626508713,
1.0333099365234,
1.0851185321808,
1.1066001653671,
1.2293750047684,
1.233595252037,
1.1480363607407,
1.2962552309036,
1.2842413187027,
1.3106474876404,
1.5614050626755,
1.4672855138779,
1.2362524271011,
1.1855486631393,
1.1294020414352,
1.1046353578568,
1.0858771800995,
1.0716745853424,
1.0786685943604,
1.0662157535553,
1.0390332937241,
.96519494056702,
.9802839756012,
.92070508003235,
.91108840703964,
.95705932378769,
.95637094974518,
.97360169887543,
1.0221517086029,
.9701629281044,
.94854199886322,
.98542231321335,
1.048855304718,
1.0081344842911,
1.0305507183075,
1.0475262403488,
.93612504005432,
.85176283121109,
.89438372850418,
.820152759552,
.71068543195724,
.76979607343674,
.76130604743958,
.77262878417969,
.85220617055893,
.84146595001221,
.93983960151672,
.97883212566376,
1.0793634653091,
1.1909983158112,
1.1690304279327,
1.2411522865295,
1.1360056400299,
1.0918840169907,
.9164656996727,
.76586949825287,
.918093085289,
.87360894680023,
.92867678403854,
1.00588285923,
.92233866453171,
.84132260084152,
.90422683954239,
.9873673915863,
.99707210063934,
1.1109310388565,
1.1971517801285,
1.138188958168,
1.2710473537445,
1.1763968467712,
1.7437561750412,
1.4101150035858,
1.3527159690857,
1.4335050582886,
.99765706062317,
1.1067585945129,
1.3086627721786,
1.2968333959579,
1.3547962903976,
1.6768488883972,
1.5905654430389,
2.0774590969086,
1.3218278884888,
.21813294291496,
.30750840902328,
.60612773895264])
y = np.array([np.nan,
29.809606552124,
29.847261428833,
29.961139678955,
29.886075973511,
30.013628005981,
29.96342086792,
30.152379989624,
30.214540481567,
30.142219543457,
30.245149612427,
30.290935516357,
30.3401927948,
30.521595001221,
30.511829376221,
30.683492660522,
30.734575271606,
30.764270782471,
30.996646881104,
31.046964645386,
31.252710342407,
31.242681503296,
31.308164596558,
31.410068511963,
31.582162857056,
31.680667877197,
31.897289276123,
31.956798553467,
32.207256317139,
32.652923583984,
32.8166847229,
33.252780914307,
33.267993927002,
33.468269348145,
33.786235809326,
34.099838256836,
34.527889251709,
34.831386566162,
35.369533538818,
35.780666351318,
36.189102172852,
36.830989837646,
37.344959259033,
37.855541229248,
38.481304168701,
39.100814819336,
39.480087280273,
39.9821434021,
40.483695983887,
40.631618499756,
41.145435333252,
41.420402526855,
41.701438903809,
41.987079620361,
42.276206970215,
42.685726165771,
43.210681915283,
44.318336486816,
44.811107635498,
46.365386962891,
47.646724700928,
49.026069641113,
50.268405914307,
52.089218139648,
53.409706115723,
54.018714904785,
55.003063201904,
55.873458862305,
56.751037597656,
56.927551269531,
57.840549468994,
58.750389099121,
59.540088653564,
60.921043395996,
61.693592071533,
62.472805023193,
63.610321044922,
64.856483459473,
66.562438964844,
68.24267578125,
69.667938232422,
71.911323547363,
74.473167419434,
76.760215759277,
79.732650756836,
82.780990600586,
84.380302429199,
86.475059509277,
89.042091369629,
90.886352539063,
93.332893371582,
95.179389953613,
95.979103088379,
96.356430053711,
99.02507019043,
99.415512084961,
98.914810180664,
99.782356262207,
100.7755279541,
101.770362854,
103.11968994141,
104.33930969238,
105.083152771,
106.07612609863,
106.59980773926,
107.96627044678,
108.61009216309,
109.38529968262,
110.87303924561,
109.27794647217,
110.13377380371,
110.85829162598,
112.16562652588,
113.56465148926,
114.70414733887,
115.95180511475,
116.95239257813,
118.188331604,
119.53330993652,
120.98512268066,
122.30659484863,
124.3293762207,
125.73359680176,
126.54803466797,
128.79624938965,
130.18423461914,
131.81065368652,
134.96139526367,
136.16728210449,
136.33625793457,
137.38554382324,
138.32939147949,
139.40463256836,
140.48587036133,
141.57167053223,
142.77867126465,
143.86622619629,
144.83903503418,
145.46519470215,
146.58029174805,
147.220703125,
148.11108398438,
149.35705566406,
150.35636901855,
151.47360229492,
152.82215881348,
153.5701751709,
154.44854736328,
155.68542480469,
157.14886474609,
158.00813293457,
159.23054504395,
160.44752502441,
160.83612060547,
161.25175476074,
162.39437866211,
162.82015991211,
162.91067504883,
163.96978759766,
164.66130065918,
165.47262573242,
166.75219726563,
167.54145812988,
169.03984069824,
170.27883911133,
171.9793548584,
173.89099121094,
175.06903076172,
176.84115600586,
177.5359954834,
178.49188232422,
178.5164642334,
178.46586608887,
180.21809387207,
180.8736114502,
182.12867736816,
183.60589599609,
184.12232971191,
184.54132080078,
185.80421447754,
187.28736877441,
188.39706420898,
190.2109375,
191.99716186523,
192.93818664551,
195.07104492188,
195.8763885498,
200.94375610352,
200.81010437012,
202.05271911621,
204.13349914551,
202.89764404297,
204.68077087402,
207.22866821289,
208.63482666016,
210.48779296875,
214.17184448242,
215.58755493164,
220.68745422363,
218.21083068848,
212.39213562012,
212.978515625,
215.07511901855])
resid = np.array([np.nan,
-.6596063375473,
-.49726036190987,
-.5911386013031,
-.34607490897179,
-.46362805366516,
-.21342028677464,
-.31237986683846,
-.40454092621803,
-.22221945226192,
-.26514956355095,
-.2509354352951,
-.13019436597824,
-.30159646272659,
-.1318296790123,
-.24349159002304,
-.25457563996315,
-.07427024841309,
-.24664734303951,
-.10696394741535,
-.30270880460739,
-.22268049418926,
-.18816292285919,
-.13006833195686,
-.20216277241707,
-.10066751390696,
-.24728938937187,
-.07679972797632,
.07274255156517,
-.20292413234711,
.03331403434277,
-.35277983546257,
-.16799576580524,
-.06826904416084,
-.08623649924994,
.00015908146452,
-.12788754701614,
.06861615926027,
-.06953293830156,
-.08066567778587,
.11089706420898,
-.03098993562162,
-.04496069997549,
.04446176066995,
.01869462057948,
-.20081178843975,
-.08008606731892,
-.08214038610458,
-.38369914889336,
-.03162068501115,
-.24543529748917,
-.22040157020092,
-.20144037902355,
-.18708138167858,
-.07620526105165,
.01427639275789,
.48931872844696,
-.11833623051643,
.78889113664627,
.43461054563522,
.45327401161194,
.27393117547035,
.73159569501877,
.21077930927277,
-.40970605611801,
-.01871551014483,
-.10306061804295,
-.0734596773982,
-.65103828907013,
.0724478662014,
.05945380032063,
-.05038867890835,
.45991089940071,
-.12104434520006,
-.09359546005726,
.22719417512417,
.28968048095703,
.64352011680603,
.53756183385849,
.25732442736626,
.93205803632736,
1.0886732339859,
.72682982683182,
1.2397809028625,
1.1673469543457,
-.18098846077919,
.31969723105431,
.72494095563889,
.05790812522173,
.61364978551865,
.06710703670979,
-.77938556671143,
-.97910648584366,
1.1435683965683,
-.92507529258728,
-1.5155116319656,
-.11481033265591,
.01764474436641,
.02447287365794,
.32963913679123,
.18031190335751,
-.23930950462818,
.01684862375259,
-.37613153457642,
.40019443631172,
-.2662724852562,
-.11008904129267,
.51469951868057,
-2.1730391979218,
.22205695509911,
.06622361391783,
.54170626401901,
.53436845541,
.2353515625,
.29585054516792,
.04819770529866,
.24760706722736,
.31166675686836,
.36669155955315,
.21487690508366,
.79340130090714,
.17062658071518,
-.33359375596046,
.95196217298508,
.10373862832785,
.31576481461525,
1.589346408844,
-.26140204071999,
-1.0672763586044,
-.13626158237457,
-.18554861843586,
-.02939598634839,
-.00464448658749,
.01412893645465,
.1283223181963,
.02133745700121,
-.06621573865414,
-.33903631567955,
.13481116294861,
-.28028702735901,
-.02071117423475,
.28890857100487,
.04294065013528,
.14363515377045,
.32640132308006,
-.22214868664742,
-.0701690018177,
.25145494937897,
.41458681225777,
-.14886146783829,
.19186246395111,
.16944620013237,
-.54752624034882,
-.43612506985664,
.2482432872057,
-.39438369870186,
-.62015581130981,
.28931456804276,
-.06979911774397,
.03869699314237,
.4273681640625,
-.05220314115286,
.55854320526123,
.26015737652779,
.62115871906281,
.72063958644867,
.00899865385145,
.53098171949387,
-.44116449356079,
-.13600566983223,
-.89187180995941,
-.81647485494614,
.83413660526276,
-.21809615194798,
.32638800144196,
.47133237123489,
-.4058920443058,
-.42233863472939,
.35867437720299,
.49578228592873,
.11262346804142,
.70294010639191,
.58906590938568,
-.19715182483196,
.86181098222733,
-.37105345726013,
3.3236031532288,
-1.543759226799,
-.11011194437742,
.64728397130966,
-2.2335081100464,
.67635416984558,
1.2392344474792,
.10933646559715,
.49816474318504,
2.0072033405304,
-.17484994232655,
3.0224411487579,
-3.7984521389008,
-6.0368394851685,
.27887633442879,
1.4904805421829,
1.3098726272583])
yr = np.array([np.nan,
-.6596063375473,
-.49726036190987,
-.5911386013031,
-.34607490897179,
-.46362805366516,
-.21342028677464,
-.31237986683846,
-.40454092621803,
-.22221945226192,
-.26514956355095,
-.2509354352951,
-.13019436597824,
-.30159646272659,
-.1318296790123,
-.24349159002304,
-.25457563996315,
-.07427024841309,
-.24664734303951,
-.10696394741535,
-.30270880460739,
-.22268049418926,
-.18816292285919,
-.13006833195686,
-.20216277241707,
-.10066751390696,
-.24728938937187,
-.07679972797632,
.07274255156517,
-.20292413234711,
.03331403434277,
-.35277983546257,
-.16799576580524,
-.06826904416084,
-.08623649924994,
.00015908146452,
-.12788754701614,
.06861615926027,
-.06953293830156,
-.08066567778587,
.11089706420898,
-.03098993562162,
-.04496069997549,
.04446176066995,
.01869462057948,
-.20081178843975,
-.08008606731892,
-.08214038610458,
-.38369914889336,
-.03162068501115,
-.24543529748917,
-.22040157020092,
-.20144037902355,
-.18708138167858,
-.07620526105165,
.01427639275789,
.48931872844696,
-.11833623051643,
.78889113664627,
.43461054563522,
.45327401161194,
.27393117547035,
.73159569501877,
.21077930927277,
-.40970605611801,
-.01871551014483,
-.10306061804295,
-.0734596773982,
-.65103828907013,
.0724478662014,
.05945380032063,
-.05038867890835,
.45991089940071,
-.12104434520006,
-.09359546005726,
.22719417512417,
.28968048095703,
.64352011680603,
.53756183385849,
.25732442736626,
.93205803632736,
1.0886732339859,
.72682982683182,
1.2397809028625,
1.1673469543457,
-.18098846077919,
.31969723105431,
.72494095563889,
.05790812522173,
.61364978551865,
.06710703670979,
-.77938556671143,
-.97910648584366,
1.1435683965683,
-.92507529258728,
-1.5155116319656,
-.11481033265591,
.01764474436641,
.02447287365794,
.32963913679123,
.18031190335751,
-.23930950462818,
.01684862375259,
-.37613153457642,
.40019443631172,
-.2662724852562,
-.11008904129267,
.51469951868057,
-2.1730391979218,
.22205695509911,
.06622361391783,
.54170626401901,
.53436845541,
.2353515625,
.29585054516792,
.04819770529866,
.24760706722736,
.31166675686836,
.36669155955315,
.21487690508366,
.79340130090714,
.17062658071518,
-.33359375596046,
.95196217298508,
.10373862832785,
.31576481461525,
1.589346408844,
-.26140204071999,
-1.0672763586044,
-.13626158237457,
-.18554861843586,
-.02939598634839,
-.00464448658749,
.01412893645465,
.1283223181963,
.02133745700121,
-.06621573865414,
-.33903631567955,
.13481116294861,
-.28028702735901,
-.02071117423475,
.28890857100487,
.04294065013528,
.14363515377045,
.32640132308006,
-.22214868664742,
-.0701690018177,
.25145494937897,
.41458681225777,
-.14886146783829,
.19186246395111,
.16944620013237,
-.54752624034882,
-.43612506985664,
.2482432872057,
-.39438369870186,
-.62015581130981,
.28931456804276,
-.06979911774397,
.03869699314237,
.4273681640625,
-.05220314115286,
.55854320526123,
.26015737652779,
.62115871906281,
.72063958644867,
.00899865385145,
.53098171949387,
-.44116449356079,
-.13600566983223,
-.89187180995941,
-.81647485494614,
.83413660526276,
-.21809615194798,
.32638800144196,
.47133237123489,
-.4058920443058,
-.42233863472939,
.35867437720299,
.49578228592873,
.11262346804142,
.70294010639191,
.58906590938568,
-.19715182483196,
.86181098222733,
-.37105345726013,
3.3236031532288,
-1.543759226799,
-.11011194437742,
.64728397130966,
-2.2335081100464,
.67635416984558,
1.2392344474792,
.10933646559715,
.49816474318504,
2.0072033405304,
-.17484994232655,
3.0224411487579,
-3.7984521389008,
-6.0368394851685,
.27887633442879,
1.4904805421829,
1.3098726272583])
mse = np.array([ 1.0121052265167,
.66349595785141,
.65449619293213,
.64957880973816,
.64683443307877,
.64528465270996,
.64440369606018,
.64390099048615,
.64361357688904,
.64344894886017,
.64335465431213,
.64330065250397,
.64326965808868,
.64325189590454,
.64324170351028,
.6432358622551,
.64323252439499,
.64323055744171,
.64322948455811,
.64322882890701,
.64322847127914,
.64322829246521,
.64322817325592,
.64322811365128,
.64322805404663,
.64322805404663,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199,
.64322799444199])
stdp = np.array([ .82960641384125,
.82960641384125,
.697261095047,
.61113905906677,
.51607495546341,
.47362637519836,
.41342103481293,
.40238001942635,
.37454023957253,
.33222004771233,
.32514902949333,
.31093680858612,
.30019253492355,
.31159669160843,
.29182952642441,
.30349296331406,
.29457464814186,
.28427124023438,
.30664679408073,
.29696446657181,
.31270903348923,
.29268020391464,
.28816330432892,
.29006817936897,
.30216124653816,
.30066826939583,
.31728908419609,
.30679926276207,
.3272570669651,
.37292611598969,
.36668366193771,
.40278288722038,
.36799272894859,
.36827209591866,
.38623574376106,
.39983862638474,
.42789059877396,
.43138384819031,
.46953064203262,
.48066720366478,
.48910140991211,
.53098994493484,
.54496067762375,
.55554050207138,
.58130383491516,
.60081332921982,
.58008605241776,
.58214038610458,
.58369606733322,
.53162068128586,
.54543834924698,
.52040082216263,
.50143963098526,
.48708060383797,
.47620677947998,
.48572361469269,
.51068127155304,
.61833620071411,
.61110657453537,
.76539021730423,
.84672522544861,
.92606955766678,
.96840506792068,
1.0892199277878,
1.1097067594528,
1.0187155008316,
1.0030621290207,
.97345739603043,
.95103752613068,
.82755368947983,
.84054774045944,
.85038793087006,
.84008830785751,
.92104357481003,
.89359468221664,
.87280809879303,
.91032028198242,
.95647835731506,
1.0624366998672,
1.1426770687103,
1.1679404973984,
1.311328291893,
1.473167181015,
1.5602221488953,
1.7326545715332,
1.8809853792191,
1.7803012132645,
1.7750589847565,
1.8420933485031,
1.7863517999649,
1.8328944444656,
1.7793855667114,
1.5791050195694,
1.3564316034317,
1.5250737667084,
1.3155146837234,
1.014811873436,
.98235523700714,
.97552710771561,
.97035628557205,
1.0196926593781,
1.0393049716949,
.98315137624741,
.97613000869751,
.89980864524841,
.96626943349838,
.91009211540222,
.88530200719833,
.97303456068039,
.57794612646103,
.63377332687378,
.65829831361771,
.76562696695328,
.86465454101563,
.90414637327194,
.95180231332779,
.95238989591599,
.98833626508713,
1.0333099365234,
1.0851185321808,
1.1066001653671,
1.2293750047684,
1.233595252037,
1.1480363607407,
1.2962552309036,
1.2842413187027,
1.3106474876404,
1.5614050626755,
1.4672855138779,
1.2362524271011,
1.1855486631393,
1.1294020414352,
1.1046353578568,
1.0858771800995,
1.0716745853424,
1.0786685943604,
1.0662157535553,
1.0390332937241,
.96519494056702,
.9802839756012,
.92070508003235,
.91108840703964,
.95705932378769,
.95637094974518,
.97360169887543,
1.0221517086029,
.9701629281044,
.94854199886322,
.98542231321335,
1.048855304718,
1.0081344842911,
1.0305507183075,
1.0475262403488,
.93612504005432,
.85176283121109,
.89438372850418,
.820152759552,
.71068543195724,
.76979607343674,
.76130604743958,
.77262878417969,
.85220617055893,
.84146595001221,
.93983960151672,
.97883212566376,
1.0793634653091,
1.1909983158112,
1.1690304279327,
1.2411522865295,
1.1360056400299,
1.0918840169907,
.9164656996727,
.76586949825287,
.918093085289,
.87360894680023,
.92867678403854,
1.00588285923,
.92233866453171,
.84132260084152,
.90422683954239,
.9873673915863,
.99707210063934,
1.1109310388565,
1.1971517801285,
1.138188958168,
1.2710473537445,
1.1763968467712,
1.7437561750412,
1.4101150035858,
1.3527159690857,
1.4335050582886,
.99765706062317,
1.1067585945129,
1.3086627721786,
1.2968333959579,
1.3547962903976,
1.6768488883972,
1.5905654430389,
2.0774590969086,
1.3218278884888,
.21813294291496,
.30750840902328,
.60612773895264])
icstats = np.array([ 202,
np.nan,
-242.06033399744,
4,
492.12066799488,
505.35373878448])
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(llf=llf, nobs=nobs, k=k, k_exog=k_exog, sigma=sigma, chi2=chi2, df_model=df_model, k_ar=k_ar, k_ma=k_ma, params=params, cov_params=cov_params, xb=xb, y=y, resid=resid, yr=yr, mse=mse, stdp=stdp, icstats=icstats, )
| 34.451638
| 229
| 0.402042
|
d65b4b8b488f7aceea6d7dc05230aa8a1bf929c9
| 3,014
|
py
|
Python
|
core/setup.py
|
tconbeer/dbt
|
bf867f6aff79fd9dad98ed36ceecd4aa181fe106
|
[
"Apache-2.0"
] | null | null | null |
core/setup.py
|
tconbeer/dbt
|
bf867f6aff79fd9dad98ed36ceecd4aa181fe106
|
[
"Apache-2.0"
] | null | null | null |
core/setup.py
|
tconbeer/dbt
|
bf867f6aff79fd9dad98ed36ceecd4aa181fe106
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if sys.version_info < (3, 6):
print('Error: dbt does not support this version of Python.')
print('Please upgrade to Python 3.6 or higher.')
sys.exit(1)
from setuptools import setup
try:
from setuptools import find_namespace_packages
except ImportError:
# the user has a downlevel version of setuptools.
print('Error: dbt requires setuptools v40.1.0 or higher.')
print('Please upgrade setuptools with "pip install --upgrade setuptools" '
'and try again')
sys.exit(1)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
package_name = "dbt-core"
package_version = "0.20.0rc1"
description = """dbt (data build tool) is a command line tool that helps \
analysts and engineers transform data in their warehouse more effectively"""
setup(
name=package_name,
version=package_version,
description=description,
long_description=description,
author="Fishtown Analytics",
author_email="info@fishtownanalytics.com",
url="https://github.com/fishtown-analytics/dbt",
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/index.html',
'include/global_project/dbt_project.yml',
'include/global_project/docs/*.md',
'include/global_project/macros/*.sql',
'include/global_project/macros/**/*.sql',
'include/global_project/macros/**/**/*.sql',
'py.typed',
]
},
test_suite='test',
entry_points={
'console_scripts': [
'dbt = dbt.main:main',
],
},
scripts=[
'scripts/dbt',
],
install_requires=[
'Jinja2==2.11.3',
'PyYAML>=3.11',
'agate>=1.6,<1.6.2',
'colorama>=0.3.9,<0.4.5',
'dataclasses>=0.6,<0.9;python_version<"3.7"',
'hologram==0.0.14',
'isodate>=0.6,<0.7',
'json-rpc>=1.12,<2',
'logbook>=1.5,<1.6',
'mashumaro==2.5',
'minimal-snowplow-tracker==0.0.2',
'networkx>=2.3,<3',
'packaging~=20.9',
'sqlparse>=0.2.3,<0.4',
'tree-sitter==0.19.0',
'tree-sitter-jinja2==0.1.0a1',
'typing-extensions>=3.7.4,<3.8',
'werkzeug>=0.15,<2.0',
# the following are all to match snowflake-connector-python
'requests<3.0.0',
'idna>=2.5,<3',
'cffi>=1.9,<2.0.0',
],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires=">=3.6.3",
)
| 29.54902
| 78
| 0.584605
|
4c6724b907fa4faab66d93b06519ede0cb2ab4c2
| 4,889
|
py
|
Python
|
scripts/google-java-format-diff.py
|
taesu82/google-java-format
|
5202af98006d744aafc2ba7cba2fa19a14e9b7b1
|
[
"Apache-2.0"
] | 4
|
2019-11-08T16:42:02.000Z
|
2021-11-11T17:56:38.000Z
|
scripts/google-java-format-diff.py
|
taesu82/google-java-format
|
5202af98006d744aafc2ba7cba2fa19a14e9b7b1
|
[
"Apache-2.0"
] | null | null | null |
scripts/google-java-format-diff.py
|
taesu82/google-java-format
|
5202af98006d744aafc2ba7cba2fa19a14e9b7b1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2.7
#
#===- google-java-format-diff.py - google-java-format Diff Reformatter -----===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
"""
google-java-format Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | google-java-format-diff.py -p1 -i
svn diff --diff-cmd=diff -x-U0 | google-java-format-diff.py -i
For perforce users:
P4DIFF="git --no-pager diff --no-index" p4 diff | ./google-java-format-diff.py -i -p7
"""
import argparse
import difflib
import re
import string
import subprocess
import StringIO
import sys
from distutils.spawn import find_executable
def main():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=r'.*\.java',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument('-v', '--verbose', action='store_true',
help='be more verbose, ineffective without -i')
parser.add_argument('-a', '--aosp', action='store_true',
help='use AOSP style instead of Google Style (4-space indentation)')
parser.add_argument('--skip-sorting-imports', action='store_true',
help='do not fix the import order')
parser.add_argument('-b', '--binary', help='path to google-java-format binary')
parser.add_argument('--google-java-format-jar', metavar='ABSOLUTE_PATH', default=None,
help='use a custom google-java-format jar')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1;
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
if args.binary:
base_command = [args.binary]
elif args.google_java_format_jar:
base_command = ['java', '-jar', args.google_java_format_jar]
else:
binary = find_executable('google-java-format') or '/usr/bin/google-java-format'
base_command = [binary]
# Reformat files containing changes in place.
for filename, lines in lines_by_file.iteritems():
if args.i and args.verbose:
print 'Formatting', filename
command = base_command[:]
if args.i:
command.append('-i')
if args.aosp:
command.append('--aosp')
if args.skip_sorting_imports:
command.append('--skip-sorting-imports')
command.extend(lines)
command.append(filename)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=None, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode);
if not args.i:
with open(filename) as f:
code = f.readlines()
formatted_code = StringIO.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = string.join(diff, '')
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
main()
| 35.948529
| 90
| 0.605236
|
2b4fd39b448316935008160974f748dcfb693fff
| 15,052
|
py
|
Python
|
python/oneflow/compatible/single_client/__init__.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | 1
|
2022-01-19T07:50:28.000Z
|
2022-01-19T07:50:28.000Z
|
python/oneflow/compatible/single_client/__init__.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/compatible/single_client/__init__.py
|
mosout/oneflow
|
afbb221d900f1a340568ae2462b2022f8fcc4b3d
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow._oneflow_internal
Size = oneflow._oneflow_internal.Size
device = oneflow._oneflow_internal.device
placement = oneflow._oneflow_internal.placement
locals()["dtype"] = oneflow._oneflow_internal.dtype
locals()["bool"] = oneflow._oneflow_internal.bool
locals()["char"] = oneflow._oneflow_internal.char
locals()["float16"] = oneflow._oneflow_internal.float16
locals()["half"] = oneflow._oneflow_internal.float16
locals()["float32"] = oneflow._oneflow_internal.float32
locals()["float"] = oneflow._oneflow_internal.float
locals()["double"] = oneflow._oneflow_internal.double
locals()["float64"] = oneflow._oneflow_internal.float64
locals()["int8"] = oneflow._oneflow_internal.int8
locals()["int"] = oneflow._oneflow_internal.int32
locals()["int32"] = oneflow._oneflow_internal.int32
locals()["int64"] = oneflow._oneflow_internal.int64
locals()["long"] = oneflow._oneflow_internal.int64
locals()["uint8"] = oneflow._oneflow_internal.uint8
locals()["record"] = oneflow._oneflow_internal.record
locals()["tensor_buffer"] = oneflow._oneflow_internal.tensor_buffer
locals()["bfloat16"] = oneflow._oneflow_internal.bfloat16
locals()["uint16"] = oneflow._oneflow_internal.uint16
locals()["uint32"] = oneflow._oneflow_internal.uint32
locals()["uint64"] = oneflow._oneflow_internal.uint64
locals()["uint128"] = oneflow._oneflow_internal.uint128
locals()["int16"] = oneflow._oneflow_internal.int16
locals()["int128"] = oneflow._oneflow_internal.int128
locals()["complex32"] = oneflow._oneflow_internal.complex32
locals()["complex64"] = oneflow._oneflow_internal.complex64
locals()["complex128"] = oneflow._oneflow_internal.complex128
from oneflow.compatible.single_client.framework import (
env_util,
session_context,
session_util,
)
from oneflow.core.job.job_conf_pb2 import JobConfigProto
from oneflow.core.job.job_set_pb2 import ConfigProto
oneflow._oneflow_internal.DestroyGlobalForeignCallback()
oneflow._oneflow_internal.DestroyEnv()
import time
time.sleep(1)
del time
oneflow._oneflow_internal.SetIsMultiClient(False)
session_context.OpenDefaultSession(
session_util.Session(oneflow._oneflow_internal.NewSessionId())
)
oneflow._oneflow_internal.EnableEagerEnvironment(False)
del env_util
del session_util
del session_context
import oneflow.compatible.single_client.framework.c_api_util
from oneflow.compatible.single_client.framework import (
python_callback,
register_python_callback,
)
oneflow._oneflow_internal.RegisterGlobalForeignCallback(
python_callback.global_python_callback
)
del python_callback
del register_python_callback
from oneflow.compatible.single_client.framework import watcher
oneflow._oneflow_internal.RegisterGlobalWatcher(watcher._global_watcher)
del watcher
from oneflow.compatible.single_client.eager import boxing_util
oneflow._oneflow_internal.deprecated.RegisterBoxingUtilOnlyOnce(
boxing_util._global_boxing_util
)
del boxing_util
from oneflow.compatible.single_client.ops.util import custom_op_module
oneflow._oneflow_internal.RegisterPyKernels(
custom_op_module._python_kernel_reg.kernels_
)
del custom_op_module
from oneflow.compatible.single_client.framework import register_class_method_util
register_class_method_util.RegisterMethod4Class()
del register_class_method_util
INVALID_SPLIT_AXIS = oneflow._oneflow_internal.INVALID_SPLIT_AXIS
import atexit
from oneflow.compatible.single_client.framework.session_context import (
TryCloseAllSession,
)
atexit.register(TryCloseAllSession)
del TryCloseAllSession
del atexit
import sys
__original_exit__ = sys.exit
def custom_exit(returncode):
if returncode != 0:
import oneflow
oneflow._oneflow_internal.MasterSendAbort()
__original_exit__(returncode)
sys.exit = custom_exit
del custom_exit
del sys
from oneflow.compatible.single_client.autograd import no_grad
from oneflow.compatible.single_client.advanced.distribute_ops import (
cast_to_current_logical_view,
)
from oneflow.compatible.single_client.deprecated.initializer_util import (
truncated_normal_initializer as truncated_normal,
)
from oneflow.compatible.single_client.experimental.namescope import (
deprecated_name_scope as name_scope,
)
from oneflow.compatible.single_client.framework.check_point_v2 import (
GetAllVariables as get_all_variables,
)
from oneflow.compatible.single_client.framework.check_point_v2 import Load as load
from oneflow.compatible.single_client.framework.check_point_v2 import (
LoadVariables as load_variables,
)
from oneflow.compatible.single_client.framework.check_point_v2 import save
from oneflow.compatible.single_client.framework.dtype import (
convert_oneflow_dtype_to_numpy_dtype,
dtypes,
)
from oneflow.compatible.single_client.framework.env_util import (
api_enable_eager_execution as enable_eager_execution,
)
from oneflow.compatible.single_client.framework.env_util import (
api_get_current_machine_id as current_machine_id,
)
from oneflow.compatible.single_client.framework.env_util import (
api_get_current_resource as current_resource,
)
from oneflow.compatible.single_client.framework.function_desc import (
api_current_global_function_desc as current_global_function_desc,
)
from oneflow.compatible.single_client.framework.function_util import FunctionConfig
from oneflow.compatible.single_client.framework.function_util import (
FunctionConfig as ExecutionConfig,
)
from oneflow.compatible.single_client.framework.function_util import (
FunctionConfig as function_config,
)
from oneflow.compatible.single_client.framework.function_util import (
api_oneflow_function as global_function,
)
from oneflow.compatible.single_client.framework.generator import (
create_generator as Generator,
)
from oneflow.compatible.single_client.framework.generator import manual_seed
from oneflow.compatible.single_client.framework.input_blob_def import (
DeprecatedFixedTensorDef as FixedTensorDef,
)
from oneflow.compatible.single_client.framework.input_blob_def import (
DeprecatedMirroredTensorDef as MirroredTensorDef,
)
from oneflow.compatible.single_client.framework.job_set_util import (
inter_job_reuse_mem_strategy,
)
from oneflow.compatible.single_client.framework.model import Model
from oneflow.compatible.single_client.framework.ops import api_acc as acc
from oneflow.compatible.single_client.framework.ops import (
api_hierarchical_parallel_cast as hierarchical_parallel_cast,
)
from oneflow.compatible.single_client.framework.ops import api_pack as pack
from oneflow.compatible.single_client.framework.ops import (
api_parallel_cast as parallel_cast,
)
from oneflow.compatible.single_client.framework.ops import api_repeat as repeat
from oneflow.compatible.single_client.framework.ops import api_unpack as unpack
from oneflow.compatible.single_client.framework.placement_util import (
deprecated_placement as device_prior_placement,
)
from oneflow.compatible.single_client.framework.placement_util import (
deprecated_placement as fixed_placement,
)
from oneflow.compatible.single_client.framework.scope_util import (
api_current_scope as current_scope,
)
from oneflow.compatible.single_client.framework.session_util import (
TmpInitEagerGlobalSession as InitEagerGlobalSession,
)
from oneflow.compatible.single_client.framework.session_util import (
api_clear_default_session as clear_default_session,
)
from oneflow.compatible.single_client.framework.session_util import (
api_eager_execution_enabled as eager_execution_enabled,
)
from oneflow.compatible.single_client.framework.session_util import (
api_find_or_create_module as find_or_create_module,
)
from oneflow.compatible.single_client.framework.session_util import (
api_sync_default_session as sync_default_session,
)
from oneflow.compatible.single_client.framework.tensor import Tensor
from oneflow.compatible.single_client.ops.array_ops import amp_white_identity
from oneflow.compatible.single_client.ops.array_ops import (
api_slice_update as slice_update,
)
from oneflow.compatible.single_client.ops.array_ops import (
argwhere,
broadcast_like,
cast_to_static_shape,
concat,
dim_gather,
dynamic_reshape,
elem_cnt,
expand,
expand_dims,
flatten,
gather,
gather_nd,
identity,
identity_n,
masked_fill,
nonzero,
ones,
reshape,
reshape_like,
reverse,
scatter_nd,
slice,
slice_v2,
squeeze,
stack,
sync_dynamic_resize,
tensor_scatter_nd_add,
tensor_scatter_nd_update,
transpose,
where,
zeros,
)
from oneflow.compatible.single_client.ops.assign_op import assign
from oneflow.compatible.single_client.ops.stateful_ops import StatefulOp as stateful_op
from oneflow.compatible.single_client.ops.categorical_ordinal_encode_op import (
categorical_ordinal_encode,
)
from oneflow.compatible.single_client.ops.combined_margin_loss import (
combined_margin_loss,
)
from oneflow.compatible.single_client.ops.constant_op import (
constant,
constant_like,
constant_scalar,
ones_like,
zeros_like,
)
from oneflow.compatible.single_client.ops.count_not_finite import (
count_not_finite,
multi_count_not_finite,
)
from oneflow.compatible.single_client.ops.diag_ops import diag
from oneflow.compatible.single_client.ops.eager_nccl_ops import eager_nccl_all_reduce
from oneflow.compatible.single_client.ops.get_variable import (
api_get_variable as get_variable,
)
from oneflow.compatible.single_client.ops.initializer_util import (
constant_initializer,
empty_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_normal_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_normal_initializer as xavier_normal_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_uniform_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
glorot_uniform_initializer as xavier_uniform_initializer,
)
from oneflow.compatible.single_client.ops.initializer_util import (
kaiming_initializer,
ones_initializer,
random_normal_initializer,
random_uniform_initializer,
truncated_normal_initializer,
variance_scaling_initializer,
zeros_initializer,
)
from oneflow.compatible.single_client.ops.linalg import matmul
from oneflow.compatible.single_client.ops.loss_ops import ctc_loss, smooth_l1_loss
from oneflow.compatible.single_client.ops.math_ops import (
broadcast_to_compatible_with as broadcast_to_compatible_with,
)
from oneflow.compatible.single_client.ops.math_ops import cast
from oneflow.compatible.single_client.ops.math_ops import clip_by_value as clamp
from oneflow.compatible.single_client.ops.math_ops import clip_by_value as clip
from oneflow.compatible.single_client.ops.math_ops import (
clip_by_value as clip_by_scalar,
)
from oneflow.compatible.single_client.ops.math_ops import clip_by_value as clip_by_value
from oneflow.compatible.single_client.ops.math_ops import in_top_k as in_top_k
from oneflow.compatible.single_client.ops.math_ops import range
from oneflow.compatible.single_client.ops.math_ops import (
unsorted_batch_segment_sum as unsorted_batch_segment_sum,
)
from oneflow.compatible.single_client.ops.math_ops import (
unsorted_segment_sum as unsorted_segment_sum,
)
from oneflow.compatible.single_client.ops.math_ops import (
unsorted_segment_sum_like as unsorted_segment_sum_like,
)
from oneflow.compatible.single_client.ops.one_hot import one_hot
from oneflow.compatible.single_client.ops.pad import (
constant_pad2d,
pad,
pad_grad,
reflection_pad2d,
replication_pad2d,
same_padding,
zero_pad2d,
)
from oneflow.compatible.single_client.ops.partial_fc_sample import (
distributed_partial_fc_sample,
)
from oneflow.compatible.single_client.ops.sort_ops import argsort, sort
from oneflow.compatible.single_client.ops.tensor_buffer_ops import (
gen_tensor_buffer,
tensor_buffer_to_list_of_tensors,
tensor_buffer_to_tensor,
tensor_to_tensor_buffer,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_image_random_crop as image_random_crop,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_image_resize as image_resize,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
api_image_target_resize as image_target_resize,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
image_batch_align as image_batch_align,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
image_decode as image_decode,
)
from oneflow.compatible.single_client.ops.user_data_ops import image_flip as image_flip
from oneflow.compatible.single_client.ops.user_data_ops import (
image_normalize as image_normalize,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_bbox_flip as object_bbox_flip,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_bbox_scale as object_bbox_scale,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_segm_poly_flip as object_segmentation_polygon_flip,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_segm_poly_scale as object_segmentation_polygon_scale,
)
from oneflow.compatible.single_client.ops.user_data_ops import (
object_segm_poly_to_mask as object_segmentation_polygon_to_mask,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_consistent_user_op_builder as consistent_user_op_builder,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_consistent_user_op_module_builder as consistent_user_op_module_builder,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_user_op_builder as user_op_builder,
)
from oneflow.compatible.single_client.ops.user_op_builder import (
api_user_op_module_builder as user_op_module_builder,
)
from oneflow.compatible.single_client.ops.watch import Watch as watch
from oneflow.compatible.single_client.ops.watch import WatchDiff as watch_diff
from . import (
checkpoint,
config,
data,
distribute,
distributed,
env,
image,
layers,
losses,
math,
model,
optimizer,
profiler,
random,
regularizers,
saved_model,
scope,
summary,
sysconfig,
tensorrt,
train,
typing,
util,
)
| 35.5
| 88
| 0.821884
|
aa303c4affd686620d30062a8a518957c2f54ef1
| 1,830
|
py
|
Python
|
tests/test_synthesizer.py
|
SebastianWolf-SAP/data-synthesis-for-machine-learning
|
b622739776cedf57a906d7304a96aa31f767340c
|
[
"Apache-2.0"
] | 12
|
2019-10-24T08:52:41.000Z
|
2021-12-20T21:54:09.000Z
|
tests/test_synthesizer.py
|
SebastianWolf-SAP/data-synthesis-for-machine-learning
|
b622739776cedf57a906d7304a96aa31f767340c
|
[
"Apache-2.0"
] | 7
|
2020-01-07T23:02:42.000Z
|
2022-02-17T21:36:19.000Z
|
tests/test_synthesizer.py
|
SebastianWolf-SAP/data-synthesis-for-machine-learning
|
b622739776cedf57a906d7304a96aa31f767340c
|
[
"Apache-2.0"
] | 9
|
2019-12-16T19:51:48.000Z
|
2022-02-27T18:40:40.000Z
|
from .testdata import adults01
def test_calculate_degree():
from ds4ml.synthesizer import calculate_degree
degree = calculate_degree(8140, 6, 0.05)
assert 0 < degree <= 6 / 2
def test_greedy_bayes():
from pandas import DataFrame
from ds4ml.synthesizer import greedy_bayes
network = greedy_bayes(DataFrame(adults01), epsilon=0.1)
assert network[0][0] in adults01.columns
assert type(network[0][1]) == list
def test_noisy_distributions():
from ds4ml.synthesizer import noisy_distributions
from pandas import DataFrame
dataset = DataFrame([
[1, 0, 40],
[1, 1, 42],
[0, 0, 30],
[0, 1, 30],
[1, 1, 36],
[1, 1, 50],
[0, 0, 32],
[0, 0, 28]
], columns=['salary', 'sex', 'age'])
columns = ['salary', 'sex']
epsilon = 0.05
noisy = noisy_distributions(dataset, columns, epsilon)
assert noisy.shape == (4, 3)
assert len(noisy[noisy['freq'] >= 0]) == 4
def test_noisy_conditionals():
from ds4ml.synthesizer import noisy_conditionals
from pandas import DataFrame
dataset = DataFrame([
[1, 0, 40],
[1, 1, 42],
[0, 0, 30],
[0, 1, 30],
[1, 1, 36],
[1, 1, 50],
[0, 0, 32],
[0, 0, 28]
], columns=['salary', 'sex', 'age'])
epsilon = 0.05
network = [('salary', ['sex']), ('age', ['sex', 'salary'])]
noisy = noisy_conditionals(network, dataset, epsilon)
assert len(noisy['sex']) == 2
assert (1.0 - sum(noisy['sex'])) < 1e-6
assert len(noisy['salary']) == 2
assert '[0]' in noisy['salary']
assert '[1]' in noisy['salary']
assert len(noisy['age']) == 4
assert '[0, 0]' in noisy['age']
assert '[0, 1]' in noisy['age']
assert '[1, 0]' in noisy['age']
assert '[1, 1]' in noisy['age']
| 28.153846
| 63
| 0.564481
|
aa3b89c0810862bbf3b19b25408247533e949c3b
| 3,217
|
py
|
Python
|
tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py
|
garciasolero/oauthlib
|
8dadaf0fde2b502df359632853fda00963dce3f5
|
[
"BSD-3-Clause"
] | 1
|
2015-10-22T09:49:50.000Z
|
2015-10-22T09:49:50.000Z
|
tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py
|
garciasolero/oauthlib
|
8dadaf0fde2b502df359632853fda00963dce3f5
|
[
"BSD-3-Clause"
] | null | null | null |
tests/oauth2/rfc6749/endpoints/test_revocation_endpoint.py
|
garciasolero/oauthlib
|
8dadaf0fde2b502df359632853fda00963dce3f5
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from json import loads
from mock import MagicMock
from oauthlib.common import urlencode
from oauthlib.oauth2 import RequestValidator, RevocationEndpoint
from ....unittest import TestCase
class RevocationEndpointTest(TestCase):
def setUp(self):
self.validator = MagicMock(wraps=RequestValidator())
self.validator.authenticate_client.return_value = True
self.validator.revoke_token.return_value = True
self.endpoint = RevocationEndpoint(self.validator)
self.uri = 'https://example.com/revoke_token'
self.headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
def test_revoke_token(self):
for token_type in ('access_token', 'refresh_token', 'invalid'):
body = urlencode([('token', 'foo'),
('token_type_hint', token_type)])
h, b, s = self.endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(b, '')
self.assertEqual(s, 200)
def test_revoke_token_without_client_authentication(self):
self.validator.client_authentication_required.return_value = False
self.validator.authenticate_client.return_value = False
for token_type in ('access_token', 'refresh_token', 'invalid'):
body = urlencode([('token', 'foo'),
('token_type_hint', token_type)])
h, b, s = self.endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(b, '')
self.assertEqual(s, 200)
def test_revoke_with_callback(self):
endpoint = RevocationEndpoint(self.validator, enable_jsonp=True)
callback = 'package.hello_world'
for token_type in ('access_token', 'refresh_token', 'invalid'):
body = urlencode([('token', 'foo'),
('token_type_hint', token_type),
('callback', callback)])
h, b, s = endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(b, callback + '();')
self.assertEqual(s, 200)
def test_revoke_unsupported_token(self):
endpoint = RevocationEndpoint(self.validator,
supported_token_types=['access_token'])
body = urlencode([('token', 'foo'),
('token_type_hint', 'refresh_token')])
h, b, s = endpoint.create_revocation_response(self.uri,
headers=self.headers, body=body)
self.assertEqual(h, {})
self.assertEqual(loads(b)['error'], 'unsupported_token_type')
self.assertEqual(s, 400)
h, b, s = endpoint.create_revocation_response(self.uri,
headers=self.headers, body='')
self.assertEqual(h, {})
self.assertEqual(loads(b)['error'], 'invalid_request')
self.assertEqual(s, 400)
| 41.779221
| 77
| 0.607087
|
2c1b6f77bf8583fbc12ecc39334cc773eb7ea663
| 1,536
|
py
|
Python
|
app/infrastructures/repositories/user_repository.py
|
harokki/fastapi-authenticated
|
baca823d3489739843b4df68f57fa785da7bf50d
|
[
"MIT"
] | null | null | null |
app/infrastructures/repositories/user_repository.py
|
harokki/fastapi-authenticated
|
baca823d3489739843b4df68f57fa785da7bf50d
|
[
"MIT"
] | null | null | null |
app/infrastructures/repositories/user_repository.py
|
harokki/fastapi-authenticated
|
baca823d3489739843b4df68f57fa785da7bf50d
|
[
"MIT"
] | null | null | null |
from contextlib import AbstractContextManager
from typing import Callable, List
from sqlalchemy.orm import Session
from app.domains.entities.user import User
from app.domains.repositories.user_repository import UserRepository
from app.schemas.user import UserCreateSchema
class SAUserRepository(UserRepository):
def __init__(self, session_factory: Callable[..., AbstractContextManager[Session]]):
self.session_factory = session_factory
def find_by_username(self, username: str) -> User:
with self.session_factory() as session:
user = session.query(User).filter(User.username == username).first()
return user
def find_by_email(self, email: str) -> User:
with self.session_factory() as session:
user = session.query(User).filter(User.email == email).first()
return user
def create_user(self, user: UserCreateSchema) -> User:
hashed_password = User.get_hashed_password(user.password)
db_user = User(
user.username,
user.email,
user.account_name,
hashed_password,
user.created_by,
)
with self.session_factory() as session:
session.add(db_user)
session.commit()
session.refresh(db_user)
return db_user
def get_users(self, skip: int = 0, limit: int = 100) -> List[User]:
with self.session_factory() as session:
users = session.query(User).offset(skip).limit(limit).all()
return users
| 33.391304
| 88
| 0.66276
|
7e511b00d49dfedea45fc30dc21c606534dc8abc
| 83
|
py
|
Python
|
pys/namta.py
|
nahian-147/my_codes
|
9729c56b227d75354ea49982720de94ed1c21909
|
[
"MIT"
] | null | null | null |
pys/namta.py
|
nahian-147/my_codes
|
9729c56b227d75354ea49982720de94ed1c21909
|
[
"MIT"
] | null | null | null |
pys/namta.py
|
nahian-147/my_codes
|
9729c56b227d75354ea49982720de94ed1c21909
|
[
"MIT"
] | null | null | null |
n = int(input())
for k in range(1,11):
print(str(n)+'x'+str(k)+'='+str(n*k))
| 16.6
| 39
| 0.506024
|
2785c0c59e943376320d0aafd538da33ef19f8a8
| 11,215
|
py
|
Python
|
pymc/backends/ndarray.py
|
michaeloriordan/pymc
|
a099292f1de592447abc54f223ecd6e5d93a95dc
|
[
"Apache-2.0"
] | null | null | null |
pymc/backends/ndarray.py
|
michaeloriordan/pymc
|
a099292f1de592447abc54f223ecd6e5d93a95dc
|
[
"Apache-2.0"
] | null | null | null |
pymc/backends/ndarray.py
|
michaeloriordan/pymc
|
a099292f1de592447abc54f223ecd6e5d93a95dc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NumPy array trace backend
Store sampling values in memory as a NumPy array.
"""
import json
import os
import shutil
import warnings
from typing import Any, Dict, List, Optional
import numpy as np
from pymc.backends import base
from pymc.backends.base import MultiTrace
from pymc.exceptions import TraceDirectoryError
from pymc.model import Model, modelcontext
class SerializeNDArray:
metadata_file = "metadata.json"
samples_file = "samples.npz"
metadata_path = None # type: str
samples_path = None # type: str
def __init__(self, directory: str):
"""Helper to save and load NDArray objects"""
warnings.warn(
"The `SerializeNDArray` class will soon be removed. "
"Instead, use ArviZ to save/load traces.",
FutureWarning,
)
self.directory = directory
self.metadata_path = os.path.join(self.directory, self.metadata_file)
self.samples_path = os.path.join(self.directory, self.samples_file)
@staticmethod
def to_metadata(ndarray):
"""Extract ndarray metadata into json-serializable content"""
if ndarray._stats is None:
stats = ndarray._stats
sampler_vars = None
else:
stats = []
sampler_vars = []
for stat in ndarray._stats:
stats.append({key: value.tolist() for key, value in stat.items()})
sampler_vars.append({key: str(value.dtype) for key, value in stat.items()})
metadata = {
"draw_idx": ndarray.draw_idx,
"draws": ndarray.draws,
"_stats": stats,
"chain": ndarray.chain,
"sampler_vars": sampler_vars,
}
return metadata
def save(self, ndarray):
"""Serialize a ndarray to file
The goal here is to be modestly safer and more portable than a
pickle file. The expense is that the model code must be available
to reload the multitrace.
"""
if not isinstance(ndarray, NDArray):
raise TypeError("Can only save NDArray")
if os.path.isdir(self.directory):
shutil.rmtree(self.directory)
os.mkdir(self.directory)
with open(self.metadata_path, "w") as buff:
json.dump(SerializeNDArray.to_metadata(ndarray), buff)
np.savez_compressed(self.samples_path, **ndarray.samples)
def load(self, model: Model) -> "NDArray":
"""Load the saved ndarray from file"""
if not os.path.exists(self.samples_path) or not os.path.exists(self.metadata_path):
raise TraceDirectoryError("%s is not a trace directory" % self.directory)
new_trace = NDArray(model=model)
with open(self.metadata_path) as buff:
metadata = json.load(buff)
metadata["_stats"] = [
{k: np.array(v) for k, v in stat.items()} for stat in metadata["_stats"]
]
# it seems like at least some old traces don't have 'sampler_vars'
try:
sampler_vars = metadata.pop("sampler_vars")
new_trace._set_sampler_vars(sampler_vars)
except KeyError:
pass
for key, value in metadata.items():
setattr(new_trace, key, value)
new_trace.samples = dict(np.load(self.samples_path))
return new_trace
class NDArray(base.BaseTrace):
"""NDArray trace object
Parameters
----------
name: str
Name of backend. This has no meaning for the NDArray backend.
model: Model
If None, the model is taken from the `with` context.
vars: list of variables
Sampling values will be stored for these variables. If None,
`model.unobserved_RVs` is used.
"""
supports_sampler_stats = True
def __init__(self, name=None, model=None, vars=None, test_point=None):
super().__init__(name, model, vars, test_point)
self.draw_idx = 0
self.draws = None
self.samples = {}
self._stats = None
# Sampling methods
def setup(self, draws, chain, sampler_vars=None) -> None:
"""Perform chain-specific setup.
Parameters
----------
draws: int
Expected number of draws
chain: int
Chain number
sampler_vars: list of dicts
Names and dtypes of the variables that are
exported by the samplers.
"""
super().setup(draws, chain, sampler_vars)
self.chain = chain
if self.samples: # Concatenate new array if chain is already present.
old_draws = len(self)
self.draws = old_draws + draws
self.draw_idx = old_draws
for varname, shape in self.var_shapes.items():
old_var_samples = self.samples[varname]
new_var_samples = np.zeros((draws,) + shape, self.var_dtypes[varname])
self.samples[varname] = np.concatenate((old_var_samples, new_var_samples), axis=0)
else: # Otherwise, make array of zeros for each variable.
self.draws = draws
for varname, shape in self.var_shapes.items():
self.samples[varname] = np.zeros((draws,) + shape, dtype=self.var_dtypes[varname])
if sampler_vars is None:
return
if self._stats is None:
self._stats = []
for sampler in sampler_vars:
data = dict() # type: Dict[str, np.ndarray]
self._stats.append(data)
for varname, dtype in sampler.items():
data[varname] = np.zeros(draws, dtype=dtype)
else:
for data, vars in zip(self._stats, sampler_vars):
if vars.keys() != data.keys():
raise ValueError("Sampler vars can't change")
old_draws = len(self)
for varname, dtype in vars.items():
old = data[varname]
new = np.zeros(draws, dtype=dtype)
data[varname] = np.concatenate([old, new])
def record(self, point, sampler_stats=None) -> None:
"""Record results of a sampling iteration.
Parameters
----------
point: dict
Values mapped to variable names
"""
for varname, value in zip(self.varnames, self.fn(point)):
self.samples[varname][self.draw_idx] = value
if self._stats is not None and sampler_stats is None:
raise ValueError("Expected sampler_stats")
if self._stats is None and sampler_stats is not None:
raise ValueError("Unknown sampler_stats")
if sampler_stats is not None:
for data, vars in zip(self._stats, sampler_stats):
for key, val in vars.items():
data[key][self.draw_idx] = val
self.draw_idx += 1
def _get_sampler_stats(self, varname, sampler_idx, burn, thin):
return self._stats[sampler_idx][varname][burn::thin]
def close(self):
if self.draw_idx == self.draws:
return
# Remove trailing zeros if interrupted before completed all
# draws.
self.samples = {var: vtrace[: self.draw_idx] for var, vtrace in self.samples.items()}
if self._stats is not None:
self._stats = [
{var: trace[: self.draw_idx] for var, trace in stats.items()}
for stats in self._stats
]
# Selection methods
def __len__(self):
if not self.samples: # `setup` has not been called.
return 0
return self.draw_idx
def get_values(self, varname: str, burn=0, thin=1) -> np.ndarray:
"""Get values from trace.
Parameters
----------
varname: str
burn: int
thin: int
Returns
-------
A NumPy array
"""
return self.samples[varname][burn::thin]
def _slice(self, idx):
# Slicing directly instead of using _slice_as_ndarray to
# support stop value in slice (which is needed by
# iter_sample).
# Only the first `draw_idx` value are valid because of preallocation
idx = slice(*idx.indices(len(self)))
sliced = NDArray(model=self.model, vars=self.vars)
sliced.chain = self.chain
sliced.samples = {varname: values[idx] for varname, values in self.samples.items()}
sliced.sampler_vars = self.sampler_vars
sliced.draw_idx = (idx.stop - idx.start) // idx.step
if self._stats is None:
return sliced
sliced._stats = []
for vars in self._stats:
var_sliced = {}
sliced._stats.append(var_sliced)
for key, vals in vars.items():
var_sliced[key] = vals[idx]
return sliced
def point(self, idx) -> Dict[str, Any]:
"""Return dictionary of point values at `idx` for current chain
with variable names as keys.
"""
idx = int(idx)
return {varname: values[idx] for varname, values in self.samples.items()}
def _slice_as_ndarray(strace, idx):
sliced = NDArray(model=strace.model, vars=strace.vars)
sliced.chain = strace.chain
# Happy path where we do not need to load everything from the trace
if (idx.step is None or idx.step >= 1) and (idx.stop is None or idx.stop == len(strace)):
start, stop, step = idx.indices(len(strace))
sliced.samples = {
v: strace.get_values(v, burn=idx.start, thin=idx.step) for v in strace.varnames
}
sliced.draw_idx = (stop - start) // step
else:
start, stop, step = idx.indices(len(strace))
sliced.samples = {v: strace.get_values(v)[start:stop:step] for v in strace.varnames}
sliced.draw_idx = (stop - start) // step
return sliced
def point_list_to_multitrace(
point_list: List[Dict[str, np.ndarray]], model: Optional[Model] = None
) -> MultiTrace:
"""transform point list into MultiTrace"""
_model = modelcontext(model)
varnames = list(point_list[0].keys())
with _model:
chain = NDArray(model=_model, vars=[_model[vn] for vn in varnames])
chain.setup(draws=len(point_list), chain=0)
# since we are simply loading a trace by hand, we need only a vacuous function for
# chain.record() to use. This crushes the default.
def point_fun(point):
return [point[vn] for vn in varnames]
chain.fn = point_fun
for point in point_list:
chain.record(point)
return MultiTrace([chain])
| 34.937695
| 98
| 0.605707
|
3337008f4b17aa8eb9f627980ac7000d0d391fb2
| 6,393
|
py
|
Python
|
gector/datareader.py
|
convobox/gector
|
348be658027f2d16ce2e883755a1fb4a9222831e
|
[
"Apache-2.0"
] | null | null | null |
gector/datareader.py
|
convobox/gector
|
348be658027f2d16ce2e883755a1fb4a9222831e
|
[
"Apache-2.0"
] | null | null | null |
gector/datareader.py
|
convobox/gector
|
348be658027f2d16ce2e883755a1fb4a9222831e
|
[
"Apache-2.0"
] | null | null | null |
"""Tweaked AllenNLP dataset reader."""
import logging
import re
from random import random
from typing import Dict, List
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import TextField, SequenceLabelField, MetadataField, Field
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from overrides import overrides
# for gector
from gector.utils.helpers import SEQ_DELIMETERS, START_TOKEN
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("seq2labels_datareader")
class Seq2LabelsDatasetReader(DatasetReader):
"""
Reads instances from a pretokenised file where each line is in the following format:
WORD###TAG [TAB] WORD###TAG [TAB] ..... \n
and converts it into a ``Dataset`` suitable for sequence tagging. You can also specify
alternative delimiters in the constructor.
Parameters
----------
delimiters: ``dict``
The dcitionary with all delimeters.
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Note that the `output` tags will always correspond to single token IDs based on how they
are pre-tokenised in the data file.
max_len: if set than will truncate long sentences
"""
# fix broken sentences mostly in Lang8
BROKEN_SENTENCES_REGEXP = re.compile(r'\.[a-zA-RT-Z]')
def __init__(self,
token_indexers: Dict[str, TokenIndexer] = None,
delimeters: dict = SEQ_DELIMETERS,
skip_correct: bool = False,
skip_complex: int = 0,
lazy: bool = False,
max_len: int = None,
test_mode: bool = False,
tag_strategy: str = "keep_one",
tn_prob: float = 0,
tp_prob: float = 0,
broken_dot_strategy: str = "keep") -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {'tokens': SingleIdTokenIndexer()}
self._delimeters = delimeters
self._max_len = max_len
self._skip_correct = skip_correct
self._skip_complex = skip_complex
self._tag_strategy = tag_strategy
self._broken_dot_strategy = broken_dot_strategy
self._test_mode = test_mode
self._tn_prob = tn_prob
self._tp_prob = tp_prob
@overrides
def _read(self, file_path):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
for line in data_file:
line = line.strip("\n")
# skip blank and broken lines
if not line or (not self._test_mode and self._broken_dot_strategy == 'skip'
and self.BROKEN_SENTENCES_REGEXP.search(line) is not None):
continue
tokens_and_tags = [pair.rsplit(self._delimeters['labels'], 1)
for pair in line.split(self._delimeters['tokens'])]
try:
tokens = [Token(token) for token, tag in tokens_and_tags]
tags = [tag for token, tag in tokens_and_tags]
except ValueError:
tokens = [Token(token[0]) for token in tokens_and_tags]
tags = None
if tokens and tokens[0] != Token(START_TOKEN):
tokens = [Token(START_TOKEN)] + tokens
words = [x.text for x in tokens]
if self._max_len is not None:
tokens = tokens[:self._max_len]
tags = None if tags is None else tags[:self._max_len]
instance = self.text_to_instance(tokens, tags, words)
if instance:
yield instance
def extract_tags(self, tags: List[str]):
op_del = self._delimeters['operations']
labels = [x.split(op_del) for x in tags]
comlex_flag_dict = {}
# get flags
for i in range(5):
idx = i + 1
comlex_flag_dict[idx] = sum([len(x) > idx for x in labels])
if self._tag_strategy == "keep_one":
# get only first candidates for r_tags in right and the last for left
labels = [x[0] for x in labels]
elif self._tag_strategy == "merge_all":
# consider phrases as a words
pass
else:
raise Exception("Incorrect tag strategy")
detect_tags = ["CORRECT" if label == "$KEEP" else "INCORRECT" for label in labels]
return labels, detect_tags, comlex_flag_dict
def text_to_instance(self, tokens: List[Token], tags: List[str] = None,
words: List[str] = None) -> Instance: # type: ignore
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
sequence = TextField(tokens, self._token_indexers)
fields["tokens"] = sequence
fields["metadata"] = MetadataField({"words": words})
if tags is not None:
labels, detect_tags, complex_flag_dict = self.extract_tags(tags)
if self._skip_complex and complex_flag_dict[self._skip_complex] > 0:
return None
rnd = random()
# skip TN
if self._skip_correct and all(x == "CORRECT" for x in detect_tags):
if rnd > self._tn_prob:
return None
# skip TP
else:
if rnd > self._tp_prob:
return None
fields["labels"] = SequenceLabelField(labels, sequence,
label_namespace="labels")
fields["d_tags"] = SequenceLabelField(detect_tags, sequence,
label_namespace="d_tags")
return Instance(fields)
| 41.784314
| 107
| 0.595026
|
51de8faf5a3d7e6151cc4ab124c94f0d802206e4
| 9,869
|
py
|
Python
|
testing/test_env.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
testing/test_env.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
testing/test_env.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sets environment variables needed to run a chromium unit test."""
import io
import os
import stat
import subprocess
import sys
import time
# This is hardcoded to be src/ relative to this script.
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
def get_sandbox_env(env):
"""Returns the environment flags needed for the SUID sandbox to work."""
extra_env = {}
chrome_sandbox_path = env.get(CHROME_SANDBOX_ENV, CHROME_SANDBOX_PATH)
# The above would silently disable the SUID sandbox if the env value were
# an empty string. We don't want to allow that. http://crbug.com/245376
# TODO(jln): Remove this check once it's no longer possible to disable the
# sandbox that way.
if not chrome_sandbox_path:
chrome_sandbox_path = CHROME_SANDBOX_PATH
extra_env[CHROME_SANDBOX_ENV] = chrome_sandbox_path
return extra_env
def trim_cmd(cmd):
"""Removes internal flags from cmd since they're just used to communicate from
the host machine to this script running on the swarm slaves."""
sanitizers = ['asan', 'lsan', 'msan', 'tsan']
internal_flags = frozenset('--%s=%d' % (name, value)
for name in sanitizers
for value in [0, 1])
return [i for i in cmd if i not in internal_flags]
def fix_python_path(cmd):
"""Returns the fixed command line to call the right python executable."""
out = cmd[:]
if out[0] == 'python':
out[0] = sys.executable
elif out[0].endswith('.py'):
out.insert(0, sys.executable)
return out
def get_sanitizer_env(cmd, asan, lsan, msan, tsan, cfi_diag):
"""Returns the envirnoment flags needed for sanitizer tools."""
extra_env = {}
# Instruct GTK to use malloc while running sanitizer-instrumented tests.
extra_env['G_SLICE'] = 'always-malloc'
extra_env['NSS_DISABLE_ARENA_FREE_LIST'] = '1'
extra_env['NSS_DISABLE_UNLOAD'] = '1'
# TODO(glider): remove the symbolizer path once
# https://code.google.com/p/address-sanitizer/issues/detail?id=134 is fixed.
symbolizer_path = os.path.join(ROOT_DIR,
'third_party', 'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')
if lsan or tsan:
# LSan is not sandbox-compatible, so we can use online symbolization. In
# fact, it needs symbolization to be able to apply suppressions.
symbolization_options = ['symbolize=1',
'external_symbolizer_path=%s' % symbolizer_path]
elif (asan or msan or cfi_diag) and sys.platform not in ['win32', 'cygwin']:
# ASan uses a script for offline symbolization, except on Windows.
# Important note: when running ASan with leak detection enabled, we must use
# the LSan symbolization options above.
symbolization_options = ['symbolize=0']
# Set the path to llvm-symbolizer to be used by asan_symbolize.py
extra_env['LLVM_SYMBOLIZER_PATH'] = symbolizer_path
else:
symbolization_options = []
if asan:
asan_options = symbolization_options[:]
if lsan:
asan_options.append('detect_leaks=1')
if asan_options:
extra_env['ASAN_OPTIONS'] = ' '.join(asan_options)
if sys.platform == 'darwin':
isolate_output_dir = os.path.abspath(os.path.dirname(cmd[0]))
# This is needed because the test binary has @executable_path embedded in
# it that the OS tries to resolve to the cache directory and not the
# mapped directory.
extra_env['DYLD_LIBRARY_PATH'] = str(isolate_output_dir)
if lsan:
if asan or msan:
lsan_options = []
else:
lsan_options = symbolization_options[:]
if sys.platform == 'linux2':
# Use the debug version of libstdc++ under LSan. If we don't, there will
# be a lot of incomplete stack traces in the reports.
extra_env['LD_LIBRARY_PATH'] = '/usr/lib/x86_64-linux-gnu/debug:'
extra_env['LSAN_OPTIONS'] = ' '.join(lsan_options)
if msan:
msan_options = symbolization_options[:]
if lsan:
msan_options.append('detect_leaks=1')
extra_env['MSAN_OPTIONS'] = ' '.join(msan_options)
if tsan:
tsan_options = symbolization_options[:]
extra_env['TSAN_OPTIONS'] = ' '.join(tsan_options)
# CFI uses the UBSan runtime to provide diagnostics.
if cfi_diag:
ubsan_options = symbolization_options[:] + ['print_stacktrace=1']
extra_env['UBSAN_OPTIONS'] = ' '.join(ubsan_options)
return extra_env
def get_sanitizer_symbolize_command(json_path=None, executable_path=None):
"""Construct the command to invoke offline symbolization script."""
script_path = os.path.join(
ROOT_DIR, 'tools', 'valgrind', 'asan', 'asan_symbolize.py')
cmd = [sys.executable, script_path]
if json_path is not None:
cmd.append('--test-summary-json-file=%s' % json_path)
if executable_path is not None:
cmd.append('--executable-path=%s' % executable_path)
return cmd
def get_json_path(cmd):
"""Extract the JSON test summary path from a command line."""
json_path_flag = '--test-launcher-summary-output='
for arg in cmd:
if arg.startswith(json_path_flag):
return arg.split(json_path_flag).pop()
return None
def symbolize_snippets_in_json(cmd, env):
"""Symbolize output snippets inside the JSON test summary."""
json_path = get_json_path(cmd)
if json_path is None:
return
try:
symbolize_command = get_sanitizer_symbolize_command(
json_path=json_path, executable_path=cmd[0])
p = subprocess.Popen(symbolize_command, stderr=subprocess.PIPE, env=env)
(_, stderr) = p.communicate()
except OSError as e:
print >> sys.stderr, 'Exception while symbolizing snippets: %s' % e
raise
if p.returncode != 0:
print >> sys.stderr, "Error: failed to symbolize snippets in JSON:\n"
print >> sys.stderr, stderr
raise subprocess.CalledProcessError(p.returncode, symbolize_command)
def run_command_with_output(argv, stdoutfile, env=None, cwd=None):
""" Run command and stream its stdout/stderr to the console & |stdoutfile|.
"""
print('Running %r in %r (env: %r)' % (argv, cwd, env))
assert stdoutfile
with io.open(stdoutfile, 'w') as writer, io.open(stdoutfile, 'r', 1) as \
reader:
process = subprocess.Popen(argv, env=env, cwd=cwd, stdout=writer,
stderr=subprocess.STDOUT)
while process.poll() is None:
sys.stdout.write(reader.read())
time.sleep(0.1)
# Read the remaining.
sys.stdout.write(reader.read())
print('Command %r returned exit code %d' % (argv, process.returncode))
return process.returncode
def run_executable(cmd, env, stdoutfile=None):
"""Runs an executable with:
- CHROME_HEADLESS set to indicate that the test is running on a
bot and shouldn't do anything interactive like show modal dialogs.
- environment variable CR_SOURCE_ROOT set to the root directory.
- environment variable LANGUAGE to en_US.UTF-8.
- environment variable CHROME_DEVEL_SANDBOX set
- Reuses sys.executable automatically.
"""
extra_env = {
# Set to indicate that the executable is running non-interactively on
# a bot.
'CHROME_HEADLESS': '1',
# Many tests assume a English interface...
'LANG': 'en_US.UTF-8',
}
# Used by base/base_paths_linux.cc as an override. Just make sure the default
# logic is used.
env.pop('CR_SOURCE_ROOT', None)
extra_env.update(get_sandbox_env(env))
# Copy logic from tools/build/scripts/slave/runtest.py.
asan = '--asan=1' in cmd
lsan = '--lsan=1' in cmd
msan = '--msan=1' in cmd
tsan = '--tsan=1' in cmd
cfi_diag = '--cfi-diag=1' in cmd
if stdoutfile or sys.platform in ['win32', 'cygwin']:
# Symbolization works in-process on Windows even when sandboxed.
use_symbolization_script = False
else:
# LSan doesn't support sandboxing yet, so we use the in-process symbolizer.
# Note that ASan and MSan can work together with LSan.
use_symbolization_script = (asan or msan or cfi_diag) and not lsan
if asan or lsan or msan or tsan or cfi_diag:
extra_env.update(get_sanitizer_env(cmd, asan, lsan, msan, tsan, cfi_diag))
if lsan or tsan:
# LSan and TSan are not sandbox-friendly.
cmd.append('--no-sandbox')
cmd = trim_cmd(cmd)
# Ensure paths are correctly separated on windows.
cmd[0] = cmd[0].replace('/', os.path.sep)
cmd = fix_python_path(cmd)
print('Additional test environment:\n%s\n'
'Command: %s\n' % (
'\n'.join(' %s=%s' %
(k, v) for k, v in sorted(extra_env.iteritems())),
' '.join(cmd)))
sys.stdout.flush()
env.update(extra_env or {})
try:
if stdoutfile:
# Write to stdoutfile and poll to produce terminal output.
return run_command_with_output(cmd, env=env, stdoutfile=stdoutfile)
elif use_symbolization_script:
# See above comment regarding offline symbolization.
# Need to pipe to the symbolizer script.
p1 = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE,
stderr=sys.stdout)
p2 = subprocess.Popen(
get_sanitizer_symbolize_command(executable_path=cmd[0]),
env=env, stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
p1.wait()
p2.wait()
# Also feed the out-of-band JSON output to the symbolizer script.
symbolize_snippets_in_json(cmd, env)
return p1.returncode
else:
return subprocess.call(cmd, env=env)
except OSError:
print >> sys.stderr, 'Failed to start %s' % cmd
raise
def main():
return run_executable(sys.argv[1:], os.environ.copy())
if __name__ == '__main__':
sys.exit(main())
| 35.120996
| 80
| 0.69004
|
b88b21df9859466fa2bd0ca00e69abfbe371bd8c
| 2,091
|
py
|
Python
|
rapidsms/messages/incoming.py
|
Code4Salone/rapidsms
|
6f8354c0af04c1eccfa051782fcd71a6095532e9
|
[
"BSD-3-Clause"
] | null | null | null |
rapidsms/messages/incoming.py
|
Code4Salone/rapidsms
|
6f8354c0af04c1eccfa051782fcd71a6095532e9
|
[
"BSD-3-Clause"
] | 2
|
2018-08-03T18:48:09.000Z
|
2019-01-02T19:33:23.000Z
|
rapidsms/messages/incoming.py
|
Code4Salone/rapidsms
|
6f8354c0af04c1eccfa051782fcd71a6095532e9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from rapidsms.messages.base import MessageBase
from rapidsms.messages.error import ErrorMessage
class IncomingMessage(MessageBase):
"""Inbound message that provides an API to handle responses.
"""
def __init__(self, *args, **kwargs):
if 'received_at' in kwargs:
raise Exception("IncomingMessage.received_at is meaningless")
super(IncomingMessage, self).__init__(*args, **kwargs)
#: list of messages created by IncomingMessage.respond()
self.responses = []
@property
def date(self):
raise Exception("IncomingMessage.date is meaningless")
def respond(self, text, **kwargs):
"""
Respond to this message, sending the given text to the connections
that this message came from.
Responses are saved, and sent after incoming processing phases are
complete.
Arbitrary arguments are passed along to the
:py:meth:`~rapidsms.router.send` method.
:param string text: The text of the message
:param connections: (optional) send to a different set of connections
than were in the incoming message.
:type connections: list of :py:class:`~rapidsms.models.Connection`
:param in_response_to: (optional) the message being responded to.
:type in_response_to: :py:class:`~rapidsms.messages.base.MessageBase`
:returns: dictionary with the arguments that will be passed to
:py:meth:`rapidsms.router.send` to send this response.
"""
if 'template' in kwargs:
raise TypeError("`template` is no longer valid usage for "
"respond(). Pass the message text as `text`.")
context = {'text': text,
'connections': self.connections,
'in_response_to': self}
context.update(kwargs)
self.responses.append(context)
return context
def error(self, text, **kwargs):
return self.respond(class_=ErrorMessage, text=text, **kwargs)
| 36.684211
| 77
| 0.643711
|
e3df486cc520c7fa5e11d066dbea40fe85f266d5
| 3,293
|
py
|
Python
|
conans/test/unittests/client/conf/config_installer_test.py
|
laundry-96/conan
|
fd938f7220ca042d94c42ec5eb607ee69c6785a3
|
[
"MIT"
] | 1
|
2021-06-14T01:39:27.000Z
|
2021-06-14T01:39:27.000Z
|
conans/test/unittests/client/conf/config_installer_test.py
|
laundry-96/conan
|
fd938f7220ca042d94c42ec5eb607ee69c6785a3
|
[
"MIT"
] | 2
|
2018-02-22T21:28:04.000Z
|
2018-09-28T13:51:47.000Z
|
conans/test/unittests/client/conf/config_installer_test.py
|
laundry-96/conan
|
fd938f7220ca042d94c42ec5eb607ee69c6785a3
|
[
"MIT"
] | 1
|
2021-06-03T23:08:43.000Z
|
2021-06-03T23:08:43.000Z
|
import os
import unittest
from parameterized import parameterized
from conans.client.conf.config_installer import _process_config_install_item
from conans.errors import ConanException
from conans.test.utils.test_files import temp_folder
from conans.util.files import save
class ConfigInstallerTests(unittest.TestCase):
def process_config_install_item_test(self):
config_type, url_or_path, verify_ssl, args = _process_config_install_item(
"git, whaterver.url.com/repo.git, False, --recusrive --other -b 0.3.4")
self.assertEqual("git", config_type)
self.assertEqual("whaterver.url.com/repo.git", url_or_path)
self.assertFalse(verify_ssl)
self.assertEqual("--recusrive --other -b 0.3.4", args)
config_type, url_or_path, verify_ssl, args = _process_config_install_item("whaterver.url.com/repo.git")
self.assertEqual("git", config_type)
self.assertEqual("whaterver.url.com/repo.git", url_or_path)
self.assertIsNone(verify_ssl)
self.assertIsNone(args)
dir_path = temp_folder()
for dir_item in ["dir, %s, True, None" % dir_path, dir_path]:
config_type, url_or_path, verify_ssl, args = _process_config_install_item(dir_item)
self.assertEqual("dir", config_type)
self.assertEqual(dir_path, url_or_path)
self.assertTrue(verify_ssl) if dir_item.startswith("dir,")\
else self.assertIsNone(verify_ssl)
self.assertIsNone(args)
file_path = os.path.join(dir_path, "file.zip")
save(file_path, "")
for file_item in ["file, %s, True, None" % file_path, file_path]:
config_type, url_or_path, verify_ssl, args = _process_config_install_item(file_item)
self.assertEqual("file", config_type)
self.assertEqual(file_path, url_or_path)
self.assertTrue(verify_ssl) if file_item.startswith("file,") \
else self.assertIsNone(verify_ssl)
self.assertIsNone(args)
for url_item in ["url, http://is/an/absloute/path with spaces/here/file.zip, True, None",
"http://is/an/absloute/path with spaces/here/file.zip"]:
config_type, url_or_path, verify_ssl, args = _process_config_install_item(url_item)
self.assertEqual("url", config_type)
self.assertEqual("http://is/an/absloute/path with spaces/here/file.zip", url_or_path)
self.assertTrue(verify_ssl) if url_item.startswith("url,") \
else self.assertIsNone(verify_ssl)
self.assertIsNone(args)
config_type, url, verify_ssl, args = _process_config_install_item(
"url, http://is/an/absloute/path with spaces/here/file.zip,False, --option ")
self.assertEqual("url", config_type)
self.assertEqual("http://is/an/absloute/path with spaces/here/file.zip", url)
self.assertFalse(verify_ssl)
self.assertEqual("--option", args)
# Test wrong input
for item in ["git@github.com:conan-io/conan.git, None"
"file/not/exists.zip"]:
with self.assertRaisesRegexp(ConanException, "Unable to process config install"):
_, _, _, _ = _process_config_install_item(item)
| 49.149254
| 111
| 0.666262
|
443e67ef077448665b781b7cbd4b746ddf066e0b
| 3,291
|
py
|
Python
|
setup.py
|
sap-archive/cloud-sapjwt
|
b5244e68bc1f1b7bc2e69f8a10115cf1666d5e1b
|
[
"BSD-Source-Code"
] | 2
|
2019-07-09T21:23:18.000Z
|
2019-11-04T18:52:47.000Z
|
setup.py
|
sap-archive/cloud-sapjwt
|
b5244e68bc1f1b7bc2e69f8a10115cf1666d5e1b
|
[
"BSD-Source-Code"
] | null | null | null |
setup.py
|
sap-archive/cloud-sapjwt
|
b5244e68bc1f1b7bc2e69f8a10115cf1666d5e1b
|
[
"BSD-Source-Code"
] | null | null | null |
"""setup for sap-py-jwt
See:
https://github.com/SAP-samples/cloud-sapjwt
"""
# To use a consistent encoding
from codecs import open # pylint: disable=W0622
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__)) # pylint: disable=invalid-name
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read() # pylint: disable=invalid-name
def get_version():
""" get version """
with open('version.txt') as ver_file:
version_str = ver_file.readline().rstrip()
return version_str
def get_install_requires():
""" install requires """
reqs = []
with open('requirements.txt') as reqs_file:
for line in iter(lambda: reqs_file.readline().rstrip(), ''):
reqs.append(line)
return reqs
def get_extras_require():
""" extras """
with open('test-requirements.txt') as reqs_file:
reqs = [line.rstrip() for line in reqs_file.readlines()]
return {'test': reqs}
setup(name="sap_py_jwt",
version=get_version(),
entry_points={"distutils.commands":
["whitesource_update = plugin.WssPythonPlugin:SetupToolsCommand"]},
packages=find_packages(exclude=['contrib', 'docs', 'tests*', 'coverage', 'scripts']),
description="SAP CP Security Client Library for JWT offline validation",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=get_install_requires(),
extras_require=get_extras_require(),
keywords="sap jwt sapjwt python",
author="SAP SE",
author_email="secure@sap.com",
license="SAP Developer",
url="https://github.com/SAP-samples/cloud-sapjwt",
package=['sapjwt'],
package_dir={'sapjwt': 'sapjwt'},
package_data={'sapjwt': ['deps/linux/x64/libsapssoext.so',
'deps/linux/ppc64/libsapssoext.so',
'deps/linux/ppc64le/libsapssoext.so',
'deps/darwin/x64/libsapssoext.dylib',
'deps/win32/x64/sapssoext.dll'
]},
classifiers=[
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 5 - Production/Stable",
"Topic :: Security",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
)
| 37.397727
| 91
| 0.618353
|
0f2a0c417595319bddc65b4145e1c2da365aedf7
| 78
|
py
|
Python
|
sppas/sppas/src/ui/phoenix/page_analyze/__init__.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/ui/phoenix/page_analyze/__init__.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
sppas/sppas/src/ui/phoenix/page_analyze/__init__.py
|
mirfan899/MTTS
|
3167b65f576abcc27a8767d24c274a04712bd948
|
[
"MIT"
] | null | null | null |
from .analyze import sppasAnalyzePanel
__all__ = (
"sppasAnalyzePanel"
)
| 13
| 38
| 0.74359
|
31e1e3fd02b33fcfd3d5a1999511688c10cdff65
| 1,754
|
py
|
Python
|
tensorflow_federated/python/learning/reconstruction/__init__.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | 1
|
2022-02-08T01:11:14.000Z
|
2022-02-08T01:11:14.000Z
|
tensorflow_federated/python/learning/reconstruction/__init__.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/learning/reconstruction/__init__.py
|
zhihansh/federated-oss
|
38cfcb05702ff7297db76d3ccb5f5afef53ca09b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for using federated reconstruction algorithms."""
from tensorflow_federated.python.learning.reconstruction.evaluation_computation import build_federated_evaluation
from tensorflow_federated.python.learning.reconstruction.keras_utils import from_keras_model
from tensorflow_federated.python.learning.reconstruction.model import BatchOutput
from tensorflow_federated.python.learning.reconstruction.model import Model
from tensorflow_federated.python.learning.reconstruction.reconstruction_utils import build_dataset_split_fn
from tensorflow_federated.python.learning.reconstruction.reconstruction_utils import DatasetSplitFn
from tensorflow_federated.python.learning.reconstruction.reconstruction_utils import get_global_variables
from tensorflow_federated.python.learning.reconstruction.reconstruction_utils import get_local_variables
from tensorflow_federated.python.learning.reconstruction.reconstruction_utils import simple_dataset_split_fn
from tensorflow_federated.python.learning.reconstruction.training_process import build_training_process
from tensorflow_federated.python.learning.reconstruction.training_process import ClientOutput
| 64.962963
| 113
| 0.86146
|
e48cdf7a4ce34e8080bd1d05ffd87fcc3c2f1560
| 557
|
py
|
Python
|
advertorch/setup.py
|
sleepstagingrest/rest
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 18
|
2020-02-03T07:14:40.000Z
|
2021-12-20T18:45:43.000Z
|
advertorch/setup.py
|
sleepstagingrest/rest
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 11
|
2020-01-28T23:16:25.000Z
|
2022-02-10T01:04:56.000Z
|
advertorch/setup.py
|
sleepstagingrest/REST
|
cf0de7ae82b6b74fe23e9d057214970cd3c9672d
|
[
"MIT"
] | 2
|
2020-08-20T08:15:09.000Z
|
2021-02-23T07:30:40.000Z
|
# Copyright (c) 2018-present, Royal Bank of Canada.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from setuptools import setup
from setuptools import find_packages
with open(os.path.join(os.path.dirname(__file__), 'advertorch/VERSION')) as f:
version = f.read().strip()
setup(name='advertorch',
version=version,
url='https://github.com/BorealisAI/advertorch',
include_package_data=True,
packages=find_packages())
| 25.318182
| 78
| 0.728905
|
decd98e50120348080b7b12e71cd5bd4863de355
| 2,981
|
py
|
Python
|
q1.py
|
rklogan/Finite-Element-Music-Synthesis
|
7d250678f7bdf6caebbd9a375faef7f3ed69cf16
|
[
"Unlicense"
] | null | null | null |
q1.py
|
rklogan/Finite-Element-Music-Synthesis
|
7d250678f7bdf6caebbd9a375faef7f3ed69cf16
|
[
"Unlicense"
] | null | null | null |
q1.py
|
rklogan/Finite-Element-Music-Synthesis
|
7d250678f7bdf6caebbd9a375faef7f3ed69cf16
|
[
"Unlicense"
] | null | null | null |
import sys
import numpy as np
import time
OUTPUT_TIMING_DATA = False
#define constants
grid_size = 4
eta = 0.0002
rho = 0.5
G = 0.75
#debug function. Not used in submission
def print_grid(grid, current_only=False):
for row in grid:
string = ''
for col in row:
if current_only:
string += str(col[0]) + '\t'
else:
string += str(col) + ', '
print(string)
#processes the internal part of the grid
def iterate(grid):
row = 1
while row < grid_size - 1:
col = 1
while col < grid_size - 1:
grid[row][col][0] = grid[row-1][col][1]
grid[row][col][0] += grid[row+1][col][1]
grid[row][col][0] += grid[row][col-1][1]
grid[row][col][0] += grid[row][col+1][1]
grid[row][col][0] -= 4 * grid[row][col][1]
grid[row][col][0] *= rho
grid[row][col][0] += 2 * grid[row][col][1]
grid[row][col][0] -= (1 - eta) * grid[row][col][2]
grid[row][col][0] /= (1 + eta)
col += 1
row += 1
return grid
#processes the edges and corners
def apply_boundary_conditions(grid):
#apply the first 4 boundary conditions
i = 1
while i < grid_size - 1:
grid[0][i][0] = G * grid[1][i][0]
grid[grid_size-1][i][0] = G * grid[grid_size-2][i][0]
grid[i][0][0] = G * grid[i][1][0]
grid[i][grid_size -1][0] = G * grid[i][grid_size-2][0]
i += 1
#corner cases
grid[0][0][0] = G * grid[1][0][0]
grid[grid_size-1][0][0] = G * grid[grid_size-2][0][0]
grid[0][grid_size-1][0] = G * grid[0][grid_size-2][0]
grid[grid_size-1][grid_size-1][0] = G * grid[grid_size-1][grid_size-2][0]
return grid
#copies u1 to u2 and u0 to u1 for the grid
def propagate(grid):
row = 0
while row < grid_size:
col = 0
while col < grid_size:
grid[row][col][2] = grid[row][col][1]
grid[row][col][1] = grid[row][col][0]
col += 1
row += 1
return grid
if __name__ == "__main__":
# initialize the grid
grid = np.zeros((grid_size, grid_size, 3), dtype=np.float)
grid[grid_size//2,grid_size//2,1] = 1
#get CLAs
num_iterations = 20
if len(sys.argv) >= 2:
num_iterations = int(sys.argv[1])
start = time.time()
#process the grid the required number of times
for i in range(num_iterations):
grid = iterate(grid)
grid = apply_boundary_conditions(grid)
grid = propagate(grid)
#format to match sample
u = grid[grid_size//2][grid_size//2][0]
u_string = '{:.6f}'.format(round(u, 6))
if u >= 0:
u_string = ' ' + u_string
if i < 9:
u_string = ' ' + u_string
print(str(i+1) + ': ' + u_string)
end = time.time()
if OUTPUT_TIMING_DATA:
with open('single-threaded.csv', 'a+') as f:
f.write(str(end-start) + ',')
| 27.859813
| 77
| 0.526669
|
beae614adbbb166cde6e1a7de77bec9bfe772ade
| 1,549
|
py
|
Python
|
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/11_features/numtrees_30/rule_12.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/11_features/numtrees_30/rule_12.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/11_features/numtrees_30/rule_12.py
|
apcarrik/kaggle
|
6e2d4db58017323e7ba5510bcc2598e01a4ee7bf
|
[
"MIT"
] | null | null | null |
def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Age, obj[4]: Education, obj[5]: Occupation, obj[6]: Bar, obj[7]: Coffeehouse, obj[8]: Restaurant20to50, obj[9]: Direction_same, obj[10]: Distance
# {"feature": "Age", "instances": 34, "metric_value": 0.7871, "depth": 1}
if obj[3]>1:
# {"feature": "Education", "instances": 25, "metric_value": 0.9044, "depth": 2}
if obj[4]>0:
# {"feature": "Bar", "instances": 17, "metric_value": 0.9975, "depth": 3}
if obj[6]<=1.0:
# {"feature": "Distance", "instances": 15, "metric_value": 0.971, "depth": 4}
if obj[10]>1:
# {"feature": "Time", "instances": 9, "metric_value": 0.7642, "depth": 5}
if obj[1]<=2:
return 'True'
elif obj[1]>2:
# {"feature": "Occupation", "instances": 4, "metric_value": 1.0, "depth": 6}
if obj[5]<=9:
return 'True'
elif obj[5]>9:
return 'False'
else: return 'False'
else: return 'True'
elif obj[10]<=1:
# {"feature": "Occupation", "instances": 6, "metric_value": 0.9183, "depth": 5}
if obj[5]>11:
return 'False'
elif obj[5]<=11:
# {"feature": "Time", "instances": 3, "metric_value": 0.9183, "depth": 6}
if obj[1]>0:
return 'True'
elif obj[1]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[6]>1.0:
return 'False'
else: return 'False'
elif obj[4]<=0:
return 'True'
else: return 'True'
elif obj[3]<=1:
return 'True'
else: return 'True'
| 36.023256
| 226
| 0.562298
|
fce225e87c40716a539b67a0324275c25ed9202f
| 897
|
py
|
Python
|
test/test_customer_video_stats_list_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | 1
|
2017-05-03T12:48:22.000Z
|
2017-05-03T12:48:22.000Z
|
test/test_customer_video_stats_list_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
test/test_customer_video_stats_list_response.py
|
kinow-io/kaemo-python-sdk
|
610fce09e3a9e631babf09195b0492959d9e4d56
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.customer_video_stats_list_response import CustomerVideoStatsListResponse
class TestCustomerVideoStatsListResponse(unittest.TestCase):
""" CustomerVideoStatsListResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCustomerVideoStatsListResponse(self):
"""
Test CustomerVideoStatsListResponse
"""
model = kinow_client.models.customer_video_stats_list_response.CustomerVideoStatsListResponse()
if __name__ == '__main__':
unittest.main()
| 20.860465
| 103
| 0.735786
|
fd780e955efca20dd3a1104f87d39fd1fd8b9148
| 339
|
py
|
Python
|
setup.py
|
jrminter/ezwrappers
|
89da5bb0f555901813a4da0e1c60a193c3c77d65
|
[
"MIT"
] | null | null | null |
setup.py
|
jrminter/ezwrappers
|
89da5bb0f555901813a4da0e1c60a193c3c77d65
|
[
"MIT"
] | null | null | null |
setup.py
|
jrminter/ezwrappers
|
89da5bb0f555901813a4da0e1c60a193c3c77d65
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(name='ezwrappers',
version='0.0.910.dev0',
description='Utility functions for python 3.x w/o hyperspy',
url='http://github.com/jrminter/ezwrappers',
author='Many',
author_email='jrminter@gmail.com',
license='MIT',
packages=['ezwrappers'],
zip_safe=False)
| 28.25
| 66
| 0.651917
|
56d2e8119b5d2f8b78119df16d4f7749b7e076d8
| 4,371
|
py
|
Python
|
light_field_neural_rendering/src/models/transformer.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-02-25T05:34:44.000Z
|
2022-02-25T05:34:44.000Z
|
light_field_neural_rendering/src/models/transformer.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
light_field_neural_rendering/src/models/transformer.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer model."""
import dataclasses
import functools
from typing import Any, Callable, Optional, Tuple
from flax import linen as nn
from jax import lax
import jax.numpy as jnp
from light_field_neural_rendering.src.utils import config_utils
PRNGKey = Any
Shape = Tuple[int]
Dtype = Any
Array = Any
def _resolve(a, b):
"""Returns a if a is not None, else returns b."""
if a is not None:
return a
else:
return b
# The function is tweaked from https://github.com/google/flax/blob/main/examples/wmt/models.py
class LearnedPositionEmbs(nn.Module):
"""Learned positional embeddings."""
max_length: int # Max length of the 2nd dimension
@nn.compact
def __call__(
self,
inputs,
input_positions=None,
):
"""Add a leaned positional embeding to the input Args:
inputs: input data. (bs, near_view, num_proj, in_dim)
Returns:
output: `(bs, near_view, num_proj, in_dim)`
"""
input_shape = inputs.shape
pos_emb_shape = (1, self.max_length, inputs.shape[-1])
pos_embedding = self.param('pos_embedding',
nn.initializers.normal(stddev=1e-6),
pos_emb_shape)
if input_positions is not None:
pos_embedding = jnp.take(pos_embedding, input_positions, axis=1)
return pos_embedding
class Mlp(nn.Module):
"""Transformer MLP block with single hidden layer."""
hidden_params: Optional[int] = None
out_params: Optional[int] = None
dropout_rate: float = 0.
kernel_init: Callable[[PRNGKey, Shape, Dtype], Array] = (
nn.initializers.xavier_uniform())
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = (
nn.initializers.normal(stddev=1e-6))
@nn.compact
def __call__(self, inputs, deterministic):
h = nn.Dense(
features=_resolve(self.hidden_params, inputs.shape[-1]),
kernel_init=self.kernel_init,
bias_init=self.bias_init)( # pytype: disable=wrong-arg-types
inputs)
h = nn.gelu(h)
h = nn.Dense(
features=_resolve(self.out_params, inputs.shape[-1]),
kernel_init=self.kernel_init,
bias_init=self.bias_init)( # pytype: disable=wrong-arg-types
h)
return h
class SelfAttentionTransformerLayer(nn.Module):
"""Transformer layer."""
attention_heads: int
qkv_params: Optional[int] = None
mlp_params: Optional[int] = None
dropout_rate: float = 0.
@nn.compact
def __call__(self, query, deterministic):
out_params = query.shape[-1]
aux = {}
# Attention from query to value
attention_output = nn.SelfAttention(
num_heads=self.attention_heads,
qkv_features=self.qkv_params,
out_features=out_params,
dropout_rate=self.dropout_rate)(
query, deterministic=deterministic)
normalized_attention_output = nn.LayerNorm()(query + attention_output)
mlp_output = Mlp(
hidden_params=self.mlp_params,
out_params=out_params,
dropout_rate=self.dropout_rate)(
normalized_attention_output, deterministic=deterministic)
return nn.LayerNorm()(normalized_attention_output + mlp_output)
class SelfAttentionTransformer(nn.Module):
"""Self Attention Transformer."""
params: config_utils.TransformerParams # Network parameters.
@nn.compact
def __call__(self, points, deterministic):
"""Call the transformer on a set of inputs."""
for i in range(self.params.num_layers):
points = SelfAttentionTransformerLayer(
attention_heads=self.params.attention_heads,
qkv_params=self.params.qkv_params,
mlp_params=self.params.mlp_params,
dropout_rate=self.params.dropout_rate)(
query=points, deterministic=deterministic)
return points
| 30.566434
| 94
| 0.693663
|
b13d9ed8055b9a5c7e3cb6f4e27e7a6594da50a6
| 259
|
py
|
Python
|
exercises/ex10.py
|
gravyboat/python-exercises
|
50162a9e6f3d51fbb2c15ed08fcecba810d61338
|
[
"MIT"
] | null | null | null |
exercises/ex10.py
|
gravyboat/python-exercises
|
50162a9e6f3d51fbb2c15ed08fcecba810d61338
|
[
"MIT"
] | null | null | null |
exercises/ex10.py
|
gravyboat/python-exercises
|
50162a9e6f3d51fbb2c15ed08fcecba810d61338
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
def overlapping(first_list, second_list):
'''
Takes two lists and checks if they have at least one number in common
'''
for item in first_list:
if item in second_list:
return(True)
return(False)
| 21.583333
| 73
| 0.625483
|
d6990d9cd0c07fe8dfe0c1623a5f20fdd38db5ed
| 3,577
|
py
|
Python
|
aav/util/model_utils.py
|
muell-monster/google-research
|
04d2024f4723bc4be3d639a668c19fb1f6a31478
|
[
"Apache-2.0"
] | 2
|
2021-01-06T04:28:23.000Z
|
2021-02-24T13:46:04.000Z
|
aav/util/model_utils.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 7
|
2021-11-10T19:44:38.000Z
|
2022-02-10T06:48:39.000Z
|
aav/util/model_utils.py
|
Alfaxad/google-research
|
2c0043ecd507e75e2df9973a3015daf9253e1467
|
[
"Apache-2.0"
] | 4
|
2021-02-08T10:25:45.000Z
|
2021-04-17T14:46:26.000Z
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities for extracting information from training checkpoints."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pandas
import tensorflow as tf
def get_best_checkpoint_path(
model_dir, metric='loss', eval_subdir='eval_one_pass'):
"""Gets the path of the best checkpoint by given metric.
Args:
model_dir: (str) Path to tf.Estimator model.
metric: (str) Model evaluation metric over which to optimize.
eval_subdir: (str) Subdir path within model_dir to search for evaluation
events.
Returns:
(str) The path to the model best checkpoint.
Raises:
ValueError: If the given metric is not supported.
"""
events = tf.event_accumulator.EventAccumulator(
os.path.join(model_dir, eval_subdir))
events.Reload() # Actually read the event files into memory.
step = None
if metric == 'precision':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
elif metric == 'loss':
step = _get_best_checkpoint_step(events, metric, higher_is_better=False)
elif metric == 'accuracy':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
elif metric == 'recall':
step = _get_best_checkpoint_step(events, metric, higher_is_better=True)
else:
raise ValueError('Unknown metric "%s" is not supported' % metric)
return os.path.join(model_dir, 'model.ckpt-%d' % step)
def _get_best_checkpoint_step(
events, metric_key='precision', higher_is_better=True):
"""Gets the global step number of the best checkpoint by given metric.
Args:
events: (tf.Events) The summary events for a model evaluation.
metric_key: (str) The model evaluation metric key to optimize over.
higher_is_better: (bool) Is a higher value of the metric better?
Returns:
(int) The global step number of the best checkpoint.
"""
summary_df = pandas.DataFrame([
{'step': entry.step, metric_key: entry.value}
for entry in events.Scalars(metric_key)
])
metric = summary_df[metric_key]
best_index = None
if higher_is_better:
best_index = metric.idxmax()
else:
best_index = metric.idxmin()
best_checkpoint = summary_df.iloc[best_index]
return best_checkpoint.step
| 35.77
| 80
| 0.727705
|
f9a7c684dd923abc734b92869ee33441ddc5a8dd
| 1,510
|
py
|
Python
|
game/passwordValidator.py
|
MayankShrivastava17/algorithms-python-hacktoberfest-2021
|
bfb06448229c6a00f81f126e62f212205ce7d7e7
|
[
"MIT"
] | 4
|
2021-10-01T13:22:20.000Z
|
2021-10-04T11:39:25.000Z
|
game/passwordValidator.py
|
MayankShrivastava17/algorithms-python-hacktoberfest-2021
|
bfb06448229c6a00f81f126e62f212205ce7d7e7
|
[
"MIT"
] | 2
|
2021-10-11T16:56:03.000Z
|
2021-10-30T14:25:25.000Z
|
game/passwordValidator.py
|
MayankShrivastava17/algorithms-python-hacktoberfest-2021
|
bfb06448229c6a00f81f126e62f212205ce7d7e7
|
[
"MIT"
] | 10
|
2021-10-11T12:28:48.000Z
|
2021-10-31T16:37:02.000Z
|
upper=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
upper_new=[]
lower=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
lower_new=[]
number=["0","1","2","3","4","5","6","7","8","9"]
number_new=[]
character=["$","#","@"]
character_new=[]
while True:
password=list(input("Enter Password: "))
if len(password)<6:
print("Password is too short")
continue
elif len(password)>16:
print("Password is too long")
continue
else:
for i in password:
if i in upper:
upper_new.append(i)
elif i in lower:
lower_new.append(i)
elif i in number:
number_new.append(i)
elif i in character:
character_new.append(i)
if len(upper_new)==0:
print("Password Must Contain At Least 1 Uppercase Alphabet")
continue
elif len(lower_new)==0:
print("Password Must Contain At Least 1 Lowercase Alphabet")
continue
elif len(character_new)==0:
print("Password Must Contain At Least 1 Special Character")
continue
elif len(number_new)==0:
print("Password Must Contain At Least 1 Number Between 0-9")
continue
else:
print("Congratulations ! You Entered A Valid Password")
break
| 27.454545
| 111
| 0.49404
|
c947c21401e6c3311c0b41b322793fcd77e2e4d7
| 10,328
|
py
|
Python
|
docs/conf.py
|
invenio-toaster/invenio-marc21
|
98f7259d5dc14763fb44cd90908a0c9b802ec605
|
[
"MIT"
] | 1
|
2016-06-10T04:50:20.000Z
|
2016-06-10T04:50:20.000Z
|
docs/conf.py
|
invenio-toaster/invenio-marc21
|
98f7259d5dc14763fb44cd90908a0c9b802ec605
|
[
"MIT"
] | 41
|
2016-02-11T15:21:32.000Z
|
2018-03-15T16:06:30.000Z
|
docs/conf.py
|
invenio-toaster/invenio-marc21
|
98f7259d5dc14763fb44cd90908a0c9b802ec605
|
[
"MIT"
] | 25
|
2016-02-12T09:51:21.000Z
|
2021-12-07T08:59:05.000Z
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
from __future__ import print_function
import os
import sphinx.environment
from docutils.utils import get_source_line
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Do not warn on external images.
suppress_warnings = ['image.nonlocal_uri']
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Invenio-MARC21'
copyright = u'2016, CERN'
author = u'CERN'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join(os.path.dirname(__file__), '..', 'invenio_marc21',
'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_theme_options = {
'description': 'Invenio module with nice defaults for MARC21 overlay.',
'github_user': 'inveniosoftware',
'github_repo': 'invenio-marc21',
'github_button': False,
'github_banner': True,
'show_powered_by': False,
'extra_nav_links': {
'invenio-marc21@GitHub': 'https://github.com/inveniosoftware/invenio-marc21',
'invenio-marc21@PyPI': 'https://pypi.python.org/pypi/invenio-marc21/',
}
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'invenio-marc21_namedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'invenio-marc21.tex', u'invenio-marc21 Documentation',
u'CERN', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'invenio-marc21', u'invenio-marc21 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'invenio-marc21', u'Invenio-MARC21 Documentation',
author, 'invenio-marc21', 'Invenio module with nice defaults for MARC21 overlay.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/': None,
'flask': (
'http://flask.pocoo.org/docs/', None),
'invenio-pidstore': (
'https://invenio-pidstore.readthedocs.io/en/latest/', None),
'invenio-records': (
'https://invenio-records.readthedocs.io/en/latest/', None),
}
# Autodoc configuraton.
autoclass_content = 'both'
| 31.392097
| 85
| 0.70488
|
252564c77cefbb21c122f70fa88a032329e73b7e
| 112
|
py
|
Python
|
pages/urls.py
|
chulth/CV
|
79be708588455bf2894b005734fb137eb5e338e1
|
[
"CC0-1.0"
] | 3
|
2021-12-08T21:36:52.000Z
|
2021-12-15T15:49:33.000Z
|
pages/urls.py
|
chulth/CV
|
79be708588455bf2894b005734fb137eb5e338e1
|
[
"CC0-1.0"
] | 1
|
2022-02-28T22:50:06.000Z
|
2022-02-28T22:50:06.000Z
|
pages/urls.py
|
chulth/CV
|
79be708588455bf2894b005734fb137eb5e338e1
|
[
"CC0-1.0"
] | 1
|
2022-02-15T17:18:15.000Z
|
2022-02-15T17:18:15.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
]
| 18.666667
| 40
| 0.651786
|
8ede39824e9633bb4c1b875280ccb4d72874754c
| 342
|
py
|
Python
|
DjangoApp/stwms_app/migrations/0021_remove_rawmaterialrequest_status.py
|
EricBrianAnil/smarTangle-auth0
|
777a260260dfc36b338c141aac9a69d475f0adeb
|
[
"Apache-2.0"
] | null | null | null |
DjangoApp/stwms_app/migrations/0021_remove_rawmaterialrequest_status.py
|
EricBrianAnil/smarTangle-auth0
|
777a260260dfc36b338c141aac9a69d475f0adeb
|
[
"Apache-2.0"
] | null | null | null |
DjangoApp/stwms_app/migrations/0021_remove_rawmaterialrequest_status.py
|
EricBrianAnil/smarTangle-auth0
|
777a260260dfc36b338c141aac9a69d475f0adeb
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-06-24 03:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stwms_app', '0020_auto_20200624_0927'),
]
operations = [
migrations.RemoveField(
model_name='rawmaterialrequest',
name='status',
),
]
| 19
| 49
| 0.608187
|
dc9a0e0e0c27aa4732a607d0281f94234dbcf5ef
| 385
|
py
|
Python
|
src/fidesops/schemas/redis_cache.py
|
eastandwestwind/fidesops
|
93e2881c0fdc30075b7cc22024965d18cec0bdea
|
[
"Apache-2.0"
] | 41
|
2021-11-01T23:53:43.000Z
|
2022-03-22T23:07:56.000Z
|
src/fidesops/schemas/redis_cache.py
|
eastandwestwind/fidesops
|
93e2881c0fdc30075b7cc22024965d18cec0bdea
|
[
"Apache-2.0"
] | 235
|
2021-11-01T20:31:55.000Z
|
2022-03-31T15:40:58.000Z
|
src/fidesops/schemas/redis_cache.py
|
eastandwestwind/fidesops
|
93e2881c0fdc30075b7cc22024965d18cec0bdea
|
[
"Apache-2.0"
] | 12
|
2021-11-02T00:44:51.000Z
|
2022-03-14T16:23:10.000Z
|
from typing import Optional
from pydantic import Extra
from fidesops.schemas.base_class import BaseSchema
class PrivacyRequestIdentity(BaseSchema):
"""Some PII grouping pertaining to a human"""
phone_number: Optional[str] = None
email: Optional[str] = None
class Config:
"""Only allow phone_number and email to be supplied"""
extra = Extra.forbid
| 22.647059
| 62
| 0.719481
|
4ac8e1730e0561df713a47a6a427a67c30b8b871
| 765
|
py
|
Python
|
buuctf/31-0ctf_2018_heapstorm2/exp.py
|
RoderickChan/ctf_tasks
|
a021c6d86cade26448d099933f3caa856ed28360
|
[
"MIT"
] | null | null | null |
buuctf/31-0ctf_2018_heapstorm2/exp.py
|
RoderickChan/ctf_tasks
|
a021c6d86cade26448d099933f3caa856ed28360
|
[
"MIT"
] | null | null | null |
buuctf/31-0ctf_2018_heapstorm2/exp.py
|
RoderickChan/ctf_tasks
|
a021c6d86cade26448d099933f3caa856ed28360
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
from pwncli import *
cli_script()
def allocate(p:tube, size):
p.sendlineafter("Command: ", "1")
p.sendlineafter("Size: ", str(size))
p.recvline()
def update(p:tube, idx, size, content):
p.sendlineafter("Command: ", "2")
p.sendlineafter("Index: ", str(idx))
p.sendlineafter("Size: ", str(size))
p.sendafter("Content: ", content)
p.recvline()
def delete(p:tube, idx):
p.sendlineafter("Command: ", "3")
p.sendlineafter("Index: ", str(idx))
p.recvline()
def view(p:tube, idx):
p.sendlineafter("Command: ", "4")
p.sendlineafter("Index: ", str(idx))
msg = p.recvuntil("1. Allocate\n")
info("msg recv: {}".format(msg))
return msg
def attack(p:tube):
pass
attack(gift['io'])
| 20.675676
| 40
| 0.607843
|
7f60a3dcf4ce91ddc38916e9a81b059da308a945
| 232
|
py
|
Python
|
reverse_complement.py
|
rjcc/bioinformatics_algorithms
|
99a5e564f8c02e6da5ee9e186bf8d5d3f1effb92
|
[
"MIT"
] | 1
|
2019-05-13T13:44:57.000Z
|
2019-05-13T13:44:57.000Z
|
reverse_complement.py
|
rjcc/bioinformatics_algorithms
|
99a5e564f8c02e6da5ee9e186bf8d5d3f1effb92
|
[
"MIT"
] | null | null | null |
reverse_complement.py
|
rjcc/bioinformatics_algorithms
|
99a5e564f8c02e6da5ee9e186bf8d5d3f1effb92
|
[
"MIT"
] | 1
|
2019-11-07T12:47:12.000Z
|
2019-11-07T12:47:12.000Z
|
def complement(p):
if (p == "A"): return "T"
if (p == "C"): return "G"
if (p == "G"): return "C"
if (p == "T"): return "A"
def reverse_complement(pattern):
p = ""
for i in pattern:
p = complement(i) + p
return p
| 17.846154
| 32
| 0.517241
|
043429e8f687518ec0c3a02d638f82940fac2853
| 1,083
|
py
|
Python
|
pyscreenshot/check/speedtest.py
|
gatoatigrado/pyscreenshot
|
40233cf17492f2f6b2f7e0950ac42317e9487b40
|
[
"BSD-2-Clause"
] | null | null | null |
pyscreenshot/check/speedtest.py
|
gatoatigrado/pyscreenshot
|
40233cf17492f2f6b2f7e0950ac42317e9487b40
|
[
"BSD-2-Clause"
] | null | null | null |
pyscreenshot/check/speedtest.py
|
gatoatigrado/pyscreenshot
|
40233cf17492f2f6b2f7e0950ac42317e9487b40
|
[
"BSD-2-Clause"
] | null | null | null |
import pyscreenshot
import tempfile
import time
import shutil
def run(force_backend, n):
tmpdir = tempfile.mkdtemp(prefix='pyscreenshot_speedtest_')
start = time.time()
for _ in range(n):
pyscreenshot.grab(
backend=force_backend, childprocess=True)
end = time.time()
dt = end - start
s = ''
s += '%-20s' % force_backend
s += '\t'
s += '%-4.2g sec' % dt
s += '\t'
s += '(%5d ms per call)' % (1000.0 * dt / n)
print(s)
shutil.rmtree(tmpdir)
def run_all(n):
print('')
s = ''
s += 'n=%s' % n
print(s)
print('------------------------------------------------------')
for x in pyscreenshot.backends():
try:
run(x, n)
except pyscreenshot.FailedBackendError as e:
print(e)
def speedtest():
n = 10
run_all(n)
def main(virtual_display=False):
if virtual_display:
from pyvirtualdisplay import Display
with Display(visible=0):
speedtest()
else:
speedtest()
if __name__ == '__main__':
main()
| 19
| 67
| 0.527239
|
69827888d485788c8d6a18a21b8e39b5bb87ebde
| 1,911
|
py
|
Python
|
tests/ogm/test_validators.py
|
TwoBitAlchemist/NeoAlchemy
|
e28634734da3aa6b80eeb439cfc217d9b7de82a8
|
[
"MIT"
] | 25
|
2016-07-19T00:21:22.000Z
|
2021-12-28T14:37:54.000Z
|
tests/ogm/test_validators.py
|
TwoBitAlchemist/NeoAlchemy
|
e28634734da3aa6b80eeb439cfc217d9b7de82a8
|
[
"MIT"
] | 4
|
2016-12-29T22:08:27.000Z
|
2021-09-04T07:03:09.000Z
|
tests/ogm/test_validators.py
|
TwoBitAlchemist/NeoAlchemy
|
e28634734da3aa6b80eeb439cfc217d9b7de82a8
|
[
"MIT"
] | 9
|
2017-03-21T07:40:40.000Z
|
2021-10-25T00:16:57.000Z
|
"""Tests for type validators"""
import uuid
import pytest
from neoalchemy.validators import *
def test_date_validators():
assert isodate('Mar 24, 1985') == '1985-03-24'
assert isodate('03/24/1985') == '1985-03-24'
assert isodate('1985-03-24') == '1985-03-24'
assert isodate('3-24-85') == '1985-03-24'
with pytest.raises(ValueError):
isodate('chicken nugget')
assert isodatetime('Mar 24, 1985 10:50 PM') == '1985-03-24T22:50:00'
assert isodatetime('Mar 24, 1985 10:50 p.m.') == '1985-03-24T22:50:00'
assert isodatetime('24 Mar, 1985 22:50') == '1985-03-24T22:50:00'
with pytest.raises(ValueError):
isodatetime('cat loaf')
def test_uuid_validator():
assert UUID(uuid.uuid1())
assert UUID(uuid.uuid4())
with pytest.raises(ValueError):
assert UUID('12345')
def test_varchar_validator():
max5 = varchar(5)
assert max5('hello') == 'hello'
assert max5('hi') == 'hi'
with pytest.raises(ValueError):
assert max5('hello!')
max3 = varchar(3)
assert max3('hi') == 'hi'
with pytest.raises(ValueError):
assert max3('hello')
def test_IP_validators():
assert IPv4('192.168.1.1') == '192.168.1.1'
assert IPv4('0.0.0.0') == '0.0.0.0'
with pytest.raises(ValueError):
assert IPv4('123.456.789.000')
assert IPv6('::1') == '::1'
assert IPv6('::FFFF:123:456:789:000') == '::FFFF:123:456:789:000'
assert IPv6('0:0:0:0:0:0:0:1') == '0:0:0:0:0:0:0:1'
with pytest.raises(ValueError):
assert IPv6('2345235:5923058209385:wtfisthis')
assert IP('192.168.1.1') == '192.168.1.1'
assert IP('0.0.0.0') == '0.0.0.0'
assert IP('::1') == '::1'
assert IP('::FFFF:123:456:789:000') == '::FFFF:123:456:789:000'
assert IP('0:0:0:0:0:0:0:1') == '0:0:0:0:0:0:0:1'
with pytest.raises(ValueError):
assert IP('Good morning starshine the earth says hello')
| 32.389831
| 74
| 0.608582
|
1feb9d61b5b252eb7443fd00ac639d2c48ec0f59
| 492
|
py
|
Python
|
quizzie/urls.py
|
JogiDheeraj/hello
|
1c918d9709a5c07b1df38da1ff05fbece60585db
|
[
"MIT"
] | null | null | null |
quizzie/urls.py
|
JogiDheeraj/hello
|
1c918d9709a5c07b1df38da1ff05fbece60585db
|
[
"MIT"
] | null | null | null |
quizzie/urls.py
|
JogiDheeraj/hello
|
1c918d9709a5c07b1df38da1ff05fbece60585db
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.views import login, logout
from game.views import index
urlpatterns = [
url(r'^$', index, name='homepage'),
url(r'^accounts/login/$', login, name='login'),
url(r'^accounts/logout/$', logout, name='logout'),
url(r'^auth/', include('social_django.urls', namespace='social')),
url(r'^admin/', admin.site.urls),
url(r'^quiz/', include('game.urls', namespace='quiz')),
]
| 32.8
| 70
| 0.672764
|
0ff29e53e76b7f2e22558926c3cfa8c3baed514b
| 1,312
|
py
|
Python
|
DEVWKS-1512/code/jinja_create_vlan.py
|
akennerly/byrne-workshops
|
5a002c511a42a598736ca6c96967a22ec09432c8
|
[
"MIT"
] | 8
|
2018-10-08T16:51:42.000Z
|
2021-04-05T22:01:30.000Z
|
DEVWKS-1512/code/jinja_create_vlan.py
|
akennerly/byrne-workshops
|
5a002c511a42a598736ca6c96967a22ec09432c8
|
[
"MIT"
] | 3
|
2019-01-10T17:39:38.000Z
|
2021-03-31T18:50:08.000Z
|
DEVWKS-1512/code/jinja_create_vlan.py
|
akennerly/byrne-workshops
|
5a002c511a42a598736ca6c96967a22ec09432c8
|
[
"MIT"
] | 10
|
2019-05-23T07:31:33.000Z
|
2021-02-23T20:04:41.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.0 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
__author__ = "Bryan Byrne <brybyrne@cisco.com>"
__contributors__ = [
]
__copyright__ = "Copyright (c) 2019 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.0"
from jinja2 import Template
template_in = Template("""
vlan {{ id }}
name {{vlan_name }}
!
interface vlan {{ id }}
ip address {{ ip_addr }} {{ mask }}
""")
template_out = template_in.render(id=200,
vlan_name='DATA',
ip_addr='192.168.1.1',
mask="255.255.255.0")
print(template_out)
| 29.818182
| 72
| 0.634909
|
8df2be99c4663be0348ead924e2a13f808f4876d
| 192
|
py
|
Python
|
src/plugin/__init__.py
|
changleibox/flutter_build_script
|
a93a7d9ce276b68c3a2d34b5830a4fc9683e574b
|
[
"Apache-2.0"
] | null | null | null |
src/plugin/__init__.py
|
changleibox/flutter_build_script
|
a93a7d9ce276b68c3a2d34b5830a4fc9683e574b
|
[
"Apache-2.0"
] | null | null | null |
src/plugin/__init__.py
|
changleibox/flutter_build_script
|
a93a7d9ce276b68c3a2d34b5830a4fc9683e574b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 CHANGLEI. All rights reserved.
# Created by changlei on 2020/6/29.
from src.plugin.dingtalk_chatbot import DingtalkChatbot
| 27.428571
| 55
| 0.729167
|
95a9c60c677322304b6cd09bde1d137d7360ea76
| 21,926
|
py
|
Python
|
tensorflow/contrib/layers/python/layers/optimizers_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/layers/python/layers/optimizers_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/layers/python/layers/optimizers_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.layers.python.layers import optimizers as optimizers_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def _setup_model():
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
global_step = variable_scope.get_variable(
"global_step", [],
trainable=False,
dtype=dtypes.int64,
initializer=init_ops.constant_initializer(
0, dtype=dtypes.int64))
return x, var, loss, global_step
def _no_op_learning_rate_decay_fn(lr, global_step):
assert lr is not None
assert global_step is not None
return lr
class OptimizersTest(test.TestCase):
def testSGDOptimizer(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1),
lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr),
"Momentum"
]
for optimizer in optimizers:
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testNoLrCallable(self):
def optimizer_fn():
return gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=None, optimizer=optimizer_fn)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(global_step_value, 1)
def testWrongOptimizer(self):
optimizers = ["blah", variables.Variable, object(), lambda x: None]
for optimizer in optimizers:
with ops.Graph().as_default() as g:
with self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
def testBadSummaries(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer="SGD",
summaries=["loss", "bad_summary"])
def testInvalidLoss(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, _, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
None, global_step, learning_rate=0.1, optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
[[1.0]], global_step, learning_rate=0.1, optimizer="SGD")
def testInvalidGlobalStep(self):
with ops.Graph().as_default() as g, self.session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
with self.assertRaises(AttributeError):
optimizers_lib.optimize_loss(
loss,
global_step=constant_op.constant(
43, dtype=dtypes.int64),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(TypeError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [],
trainable=False,
dtype=dtypes.float64,
initializer=init_ops.constant_initializer(
0.0, dtype=dtypes.float64)),
learning_rate=0.1,
optimizer="SGD")
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss,
global_step=variable_scope.get_variable(
"global_step", [1],
trainable=False,
dtype=dtypes.int64,
initializer=init_ops.constant_initializer(
[0], dtype=dtypes.int64)),
learning_rate=0.1,
optimizer="SGD")
def testInvalidLearningRate(self):
with ops.Graph().as_default() as g, self.session(graph=g):
_, _, loss, global_step = _setup_model()
with self.assertRaises(ValueError):
optimizers_lib.optimize_loss(
loss, global_step, learning_rate=-0.1, optimizer="SGD")
def testGradientNoise(self):
random_seed.set_random_seed(42)
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# Due to randomness the following number may change if graph is different.
self.assertAlmostEqual(var_value, 9.86912, 4)
self.assertEqual(global_step_value, 1)
def testGradientNoiseWithClipping(self):
random_seed.set_random_seed(42)
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
clip_gradients=10.0)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.86912, 4)
self.assertEqual(global_step_value, 1)
def testGradientClip(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=0.1)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.98999, 4)
self.assertEqual(global_step_value, 1)
def testAdaptiveGradientClip(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
clip_gradients = optimizers_lib.adaptive_clipping_fn()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
clip_gradients=clip_gradients)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
self.assertAlmostEqual(var_value, 9.8916, 4)
self.assertEqual(global_step_value, 1)
var_count = 0
for var in variables.global_variables():
if var.name.startswith("OptimizeLoss/AdaptiveMaxNorm"):
var_count += 1
self.assertEqual(2, var_count)
def testGradientMultiply(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: 7.})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testGradientMultiplyInt32Tensor(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
v = array_ops.placeholder(dtypes.float32, [])
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: v})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5, v: 7.})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testGradientMultiplyInt64Tensor(self):
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
v = array_ops.placeholder(dtypes.float64, [])
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_multipliers={var: v})
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5, v: 7.})
var_value, global_step_value = session.run([var, global_step])
# var(0) = 10, x = 5, var(0)/dx = 5,
# var(1) = var(0) - learning_rate * gradient_multiplier * var(0)/dx
self.assertAlmostEqual(var_value, 6.5, 4)
self.assertEqual(global_step_value, 1)
def testIgnoreVariablesWithNoGradients(self):
_, _, loss, global_step = _setup_model()
unused_variable = variable_scope.get_variable("ignore_me", [])
optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0,
gradient_multipliers={unused_variable: 1.},
clip_gradients=10.0)
def testNoGlobalStep(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
def testNoGlobalStepWithDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g):
x = array_ops.placeholder(dtypes.float32, [])
var = variable_scope.get_variable(
"test", [], initializer=init_ops.constant_initializer(10))
loss = math_ops.abs(var * x)
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
with self.assertRaisesRegexp(
ValueError, "global_step is required for learning_rate_decay_fn"):
optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
def testNoGlobalStepArg(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step=None,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOp(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOpNoIncrementGlobalStep(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer=optimizer,
update_ops=[update_op],
increment_global_step=False)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(0, global_step.eval())
def testUpdateOpWithNoOpDecay(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
optimizer=optimizer,
update_ops=[update_op])
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
self.assertEqual(9.5, var.eval())
self.assertEqual(20, update_var.eval())
self.assertEqual(1, global_step.eval())
def testUpdateOpFromCollection(self):
optimizers = [
"SGD", gradient_descent.GradientDescentOptimizer,
gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
]
for optimizer in optimizers:
with ops.Graph().as_default() as g, self.session(graph=g) as session:
x, var, loss, global_step = _setup_model()
update_var = variable_scope.get_variable(
"update", [], initializer=init_ops.constant_initializer(10))
update_op = state_ops.assign(update_var, 20)
ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
train = optimizers_lib.optimize_loss(
loss, global_step, learning_rate=0.1, optimizer=optimizer)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, update_var_value, global_step_value = session.run(
[var, update_var, global_step])
self.assertEqual(var_value, 9.5)
self.assertEqual(update_var_value, 20)
self.assertEqual(global_step_value, 1)
class AdaptiveClipping(test.TestCase):
def testAverages(self):
with self.cached_session() as session:
scale = 2.
grad = array_ops.ones([3, 4]) * scale
log_norm = np.log(np.sqrt(scale**2 * grad.get_shape().num_elements()))
grads_and_vars = [(grad, grad)]
grads_and_vars = optimizers_lib.adaptive_clipping_fn(
decay=0.5)(grads_and_vars)
var_dict = {}
for var in variables.global_variables():
if var.name.startswith("AdaptiveMaxNorm"):
var_dict[var.name.split(":")[0]] = var
self.assertEqual(2, len(var_dict))
moving_mean = var_dict["AdaptiveMaxNorm/mean"]
moving_sq_mean = var_dict["AdaptiveMaxNorm/sq_mean"]
variables.global_variables_initializer().run()
mean, sq_mean = session.run([moving_mean, moving_sq_mean])
self.assertEqual([0], mean)
self.assertEqual([0], sq_mean)
for i in range(20):
mean, sq_mean, _ = session.run(
[moving_mean, moving_sq_mean, grads_and_vars[0][0]])
if i == 0:
self.assertLess(mean, 0.9 * log_norm)
self.assertLess(sq_mean, 0.9 * log_norm**2)
self.assertAlmostEqual(float(mean), log_norm, places=4)
self.assertAlmostEqual(float(sq_mean), log_norm**2, places=4)
def testClip(self):
with self.cached_session() as session:
spike = 1000.
multiplier = array_ops.placeholder(dtypes.float32, [], "multiplier")
step = array_ops.placeholder(dtypes.int32, [], "step")
grad = array_ops.ones([3, 4]) * multiplier
grads_and_vars = [(grad, grad)]
grads_and_vars = optimizers_lib.adaptive_clipping_fn(
decay=0.9, global_step=step)(grads_and_vars)
variables.global_variables_initializer().run()
def run(scale, i):
return session.run(grads_and_vars[0][0],
feed_dict={multiplier: scale,
step: i})
for i in range(20):
scale = [1., -2.][i % 2]
clipped_grad = run(scale, i)
if i > 3:
self.assertAllClose(np.ones(clipped_grad.shape) * scale, clipped_grad)
# assert that the spike will have low influence.
clipped_grad = run(spike, 20)
self.assertTrue((clipped_grad < 25.).all())
# assert that a repeated spike will converge to this new value.
for i in range(10):
clipped_grad = run(spike, i + 21)
self.assertAllClose(np.ones(clipped_grad.shape) * spike, clipped_grad)
if __name__ == "__main__":
test.main()
| 40.754647
| 81
| 0.639332
|
5427837b055703bbbc6bd9a0642e2276e82b6c63
| 2,140
|
py
|
Python
|
generated-libraries/python/netapp/cluster_peer/cluster_peer_active_address.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/cluster_peer/cluster_peer_active_address.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/cluster_peer/cluster_peer_active_address.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.netapp_object import NetAppObject
class ClusterPeerActiveAddress(NetAppObject):
"""
A list of active IP addresses configured in a peer cluster
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_peer_addresses = None
@property
def peer_addresses(self):
"""
The active IP Addresses of the peer cluster.
Attributes: non-creatable, non-modifiable
"""
return self._peer_addresses
@peer_addresses.setter
def peer_addresses(self, val):
if val != None:
self.validate('peer_addresses', val)
self._peer_addresses = val
_cluster_name = None
@property
def cluster_name(self):
"""
The name of the peer cluster.
Attributes: key, non-creatable, non-modifiable
"""
return self._cluster_name
@cluster_name.setter
def cluster_name(self, val):
if val != None:
self.validate('cluster_name', val)
self._cluster_name = val
@staticmethod
def get_api_name():
return "cluster-peer-active-address"
@staticmethod
def get_desired_attrs():
return [
'peer-addresses',
'cluster-name',
]
def describe_properties(self):
return {
'peer_addresses': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'cluster_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| 32.923077
| 95
| 0.63972
|
763910886922259b2b079a7f69baeb548ee01782
| 25,418
|
py
|
Python
|
scheduler/scheduler.py
|
wmodes/blackrockstation
|
8134322517803d225b936958a2d4ad16e286397a
|
[
"MIT"
] | 1
|
2021-04-18T06:46:07.000Z
|
2021-04-18T06:46:07.000Z
|
scheduler/scheduler.py
|
wmodes/blackrockstation
|
8134322517803d225b936958a2d4ad16e286397a
|
[
"MIT"
] | null | null | null |
scheduler/scheduler.py
|
wmodes/blackrockstation
|
8134322517803d225b936958a2d4ad16e286397a
|
[
"MIT"
] | null | null | null |
"""Controller class for scheduler subsystem."""
from shared import config
from shared.controller import Controller
import logging
from datetime import datetime, timedelta
import csv
from columnar import columnar
import time
import re
import random
import shutil
from curses import wrapper
logger = logging.getLogger()
logger.setLevel(config.LOG_LEVEL)
class Scheduler(Controller):
"""Scheduler controller class."""
def __init__(self):
"""Initialize Scheduler class."""
super().__init__()
self.whoami = "scheduler"
self.schedule = []
self.__read_schedule()
self.__sort_schedule()
self.delayed_events = []
self.current_year = config.YEARS[0]
self.last_event = ""
self.last_timeslip = datetime.now()
self.cycle_count = 0
"""
SETUP
"""
def __read_schedule(self):
logging.info('Reading schedule')
with open(config.SCHED_DATA, newline='') as csvfile:
reader = csv.DictReader(csvfile, config.SCHED_FIELDS,restkey='Extras')
# skips the header line
next(reader)
for row in reader:
if row['event'] != '':
self.schedule.append(dict(row))
def __sort_schedule(self):
self.schedule = sorted(
self.schedule, key=lambda k: time.strptime(k['time'], "%H:%M"))
for index in range(len(self.schedule)):
self.schedule[index]["index"] = index
"""
REPORTS
"""
def get_status(self):
"""Full status for controller."""
return {
"controller" : self.whoami,
"running" : True,
"currentYear" : self.current_year,
"nextTrain" : self.get_next_train(),
"nextTimeslip" : self.next_timeslip()
}
def get_next_train(self):
"""Return an obj representing next train."""
return self.get_future_trains(1)[0]
def get_future_trains(self, n=10):
"""Return array of objects representing future trains.
We do this in a few steps:
1) Interate through train schedule until we find the item just after the current now() time
2) Add each item to a list
3) If we have n items already, we can stop, but
4) If we hit the end of the records before n items,
5) We iterate from the beginning of the schedule appending items to our list until we either get n items or hit where we started
"""
future_list = []
# search through train schedule for time that matches H:M
now_dt = datetime.now()
for event in self.schedule:
# if this event is not a train event, skip
if event["controller"] != "train":
continue
# if this event is already in the past, skip
if self.str2dt(event["time"], False) < now_dt:
continue
# all the following events are in the future
# if we have n events, stop
if len(future_list) >= n:
break
future_list.append(event)
# if we don't have
if len(future_list) < n:
for event in self.schedule:
# if this event is not a train event, skip
if event["controller"] != "train":
continue
# if this event is not in the past, stop (i.e., we've wrapped around)
if self.str2dt(event["time"], False) > now_dt:
break
# if we have n events, stop
if len(future_list) >= n:
break
future_list.append(event)
return future_list
def display_future_trains(self, n=10):
"""Return human-readable schedule of future trains."""
event_list = self.get_future_trains(n)
headers = ['Train', 'Arrival', 'Direction', 'Type', 'Notes']
# convert dict to array of arrays
events = []
for event in event_list:
if event['controller'] == "train":
events.append([event['event'], event['time'],
event['direction'], event['traintype'], event['notes']])
if not len(events):
return
width = shutil.get_terminal_size().columns - 2
table = columnar(events, headers, no_borders=True, wrap_max=8, terminal_width=width)
return str(table)
def display_train_schedule(self, event_list=None):
"""Return human-readable schedule of all trains."""
if not event_list:
event_list = self.schedule
headers = ['Train', 'Arrival', 'Direction', 'Type', 'Notes']
# convert dict to array of arrays
events = []
for event in event_list:
if event['controller'] == "train":
events.append([event['event'], event['time'], event['direction'], event['traintype'], event['notes']])
if not len(events):
return
#table = columnar(events, headers, terminal_width=100, column_sep='│', row_sep='─')
table = columnar(events, headers, no_borders=True, terminal_width=110, wrap_max=8)
#table = columnar(events, headers, terminal_width=110, column_sep='|', row_sep='─')
return str(table)
"""
ORDERS
"""
def act_on_order(self, order):
"""
Take action based on orders.
Possible comnmands:
- order *controller* *order*
- reqTrains [num_events]
- reqStatus
- reqLog [num_events]
"""
if not order:
error = "No command received"
return_val = {'status': 'FAIL',
'error': error}
return return_val
if "cmd" not in order:
error = f"No 'cmd' in order received: '{order}'"
logging.info(error)
return_val = {'status': 'FAIL',
'error': error}
return return_val
#
# request future schedule
# Format: {
# "cmd" : "reqTrains",
# "qty" : **integer**
# }
#
if order['cmd'].lower() == "reqtrains":
if "qty" in order:
results = self.get_future_trains(int(order["qty"]))
else:
results = self.get_future_trains()
return_val = {'status': 'OK',
'cmd': 'reqTrains',
'results': results}
return return_val
#
# request full train schedule
# Format: {
# "cmd" : "reqAllTrains"
# }
#
if order['cmd'].lower() == "reqalltrains":
results = self.schedule
return_val = {'status': 'OK',
'cmd': 'reqAllTrains',
'results': results}
return return_val
#
# request status
# Format: {
# "cmd" : "reqStatus"
# }
#
elif order['cmd'].lower() == "reqstatus":
return_val = {'status': 'OK',
'cmd': 'reqStatus',
'results': self.get_status()}
return return_val
#
# request log
# Format: {
# "cmd" : "reqLog",
# "qty" : **integer**
# }
#
elif order['cmd'].lower() == "reqlog":
if "qty" in order:
results = self.get_logs(order["qty"])
else:
results = self.get_logs()
return_val = {'status': 'OK',
'cmd': 'reqLog',
'results': results}
return return_val
#
# send order to other controller
# Format: {
# "cmd" : "order",
# "controller" : **str**,
# "relay" : **order**
# }
elif order['cmd'].lower() == "order":
if "controller" not in order or "relay" not in order:
error = "controller or relay values missing"
logging.warning(error)
return_val = {'status': 'FAIL',
'cmd': 'order',
'error': error}
return return_val
if order['controller'] not in list(config.CONTROLLERS):
error = "invalid controller"
logging.warning(error)
return_val = {'status': 'FAIL',
'cmd': 'order',
'error': error,
'hint': list(config.CONTROLLERS)}
return return_val
results = self.send_order_to_controller(
order["controller"], order["relay"])
return_val = {'status': results['status'],
'cmd': 'order',
'results': results}
return return_val
#
# set year
# Format: {
# "cmd" : "setYear",
# "year" : *year*
# }
#
elif order['cmd'].lower() == "setyear":
if "year" not in order:
error = "No year in order received"
logging.warning(error)
return_val = {'status': 'FAIL',
'cmd': 'setYear',
'error': error}
return return_val
return_val = self.set_year(order['year'])
return return_val
#
# set train
# Format: {
# "cmd" : "setTrain",
# "index" : *int*
# }
#
elif order['cmd'].lower() == "settrain":
if "index" not in order:
error = "No index in order received"
logging.warning(error)
return_val = {'status': 'FAIL',
'cmd': 'setTrain',
'error': error}
return return_val
if order["index"] < 0 or order["index"] > len(self.schedule) - 1:
error = f"Index in order invalid: {index}"
logging.warning(error)
return_val = {'status': 'FAIL',
'cmd': 'setTrain',
'error': error}
return return_val
self.set_train(order['index'])
return_val = {'status': 'OK',
'cmd': 'setTrain'}
return return_val
#
# help
#
elif order['cmd'].lower() == "help":
cmds = [
{'cmd': 'order',
'controller': ["announce", "crossing", "lights", "radio", "scheduler", "bridge",
"train", "television"],
'order': {'cmd': 'reqStatus'}},
{'cmd': 'setTrain',
'index': 7},
{'cmd': 'setYear',
'year': ['1858', '1888', '1938', '1959', '1982', '2014', '2066', '2110']},
{'cmd': 'reqTrains',
'qty': '5'},
{'cmd': 'reqAllTrains'},
{'cmd': 'reqStatus'},
{'cmd': 'reqLog',
'qty': '10'}
]
return_val = {'status': 'OK',
'cmd': 'help',
'commands': cmds}
return return_val
#
# invalid order
#
else:
error = "invalid order received"
logging.warning(error + ': ' + order['cmd'])
return_val = {'status': 'FAIL',
'cmd': order['cmd'],
'error': error}
return return_val
def send_order_to_controller(self, controller, cmd_obj):
"""Send an arbitrary order to another controller."""
now_dt = datetime.now()
results = self.comms.send_order(controller, cmd_obj)
if results["status"] == 'FAIL':
logging.warning(f"order to {controller} failed: {results['error']}")
return results
"""
TIME HELPERS
"""
def str2dt(self, time_str, is_next=True):
"""
Given a time string, returns the next matching datetime.
Params:
time_str (str) representing a time in %H:%M format
is_next (bool) whether today's datetime (False) or the next datetime (True) should be found
"""
now_dt = datetime.now()
tomorrow_dt = now_dt + timedelta(days=1)
try:
nexttime_t = datetime.strptime(time_str, '%H:%M').time()
nexttime_dt = datetime.combine(now_dt, nexttime_t)
if is_next:
time_delta = nexttime_dt - now_dt
if time_delta.total_seconds() < 0:
nexttime_dt = datetime.combine(tomorrow_dt, nexttime_t)
except:
logging.warning(f"Bad date: {time_str}")
nexttime_dt = tomorrow_dt
return nexttime_dt
def next_train(self):
time_str = self.get_next_train()["time"]
next_dt = self.str2dt(time_str)
now_dt = datetime.now()
time_delta = next_dt - now_dt
secs = time_delta.total_seconds()
hrs = int(secs // 3600)
mins = int((secs % 3600) // 60)
secs = int(secs % 60)
return f"{hrs}:{mins:02d}:{secs:02d}"
def next_timeslip(self):
next = self.last_timeslip + timedelta(minutes=config.SCHED_TIMESLIP_INTERVAL)
now_dt = datetime.now()
time_delta = next - now_dt
min = int(time_delta.total_seconds() // 60)
sec = int(time_delta.total_seconds() % 60)
return f"{min}:{sec:02d}"
"""
TIME CHECKS
"""
def check_for_scheduled_event(self):
"""
Check for scheduled event and send command to appropriate controller.
Matches now() in H:M format with train schedule arrival time and records last event to prevent bounce. Note: Two events shouldn't share the same time.
"""
# TODO: Implement variance
# search through train schedule for time that matches H:M
now_t = datetime.now().time()
for event in self.schedule:
# if this event matches current time
if now_t.strftime("%H:%M") == event["time"]:
# if scheduled event already happened, return
if self.last_event == event:
return
# record this event as last_event
self.last_event = event
# make event happen
if event['controller'] == "train":
self.trigger_train(event)
else:
self.trigger_event(event)
break
def check_for_timeslip(self):
"""Check to see if it is time for a timeslip."""
now_dt = datetime.now()
# prevent bounce: if the time in H:M is the same as the last timeslip, return
if now_dt.strftime("%H:%M") == self.last_timeslip.strftime("%H:%M"):
return
next_timeslip = self.last_timeslip + timedelta(minutes=config.SCHED_TIMESLIP_INTERVAL)
if now_dt > next_timeslip:
self.trigger_timeslip()
def check_for_delayed_events(self):
"""Check if it is time for delayed events."""
now_dt = datetime.now()
for event in self.delayed_events:
# logging.debug(f"Delayed event: Now: {now_dt.strftime('%H:%M:%S')}, Time: {event['time_dt'].strftime('%H:%M:%S')}, Delta: {now_delta.strftime('%H:%M:%S')}")
if now_dt > event['time_dt']:
self.trigger_event(event)
self.delayed_events.remove(event)
def check_for_random_events(self):
"""
Check if it is time for random events.
Randomly calculate the chances of any of a list of events happening /right now/
"""
denominator = 24 * 60 * 60 * (1/config.SCHED_LOOP_DELAY)
for event in config.SCHED_PERIODIC:
# an N in 345600 chance
if random.random() < event["times_per_day"]/denominator:
# lucky you! you get chosen!
self.trigger_event(event)
# only one winner at a time, thank you
break
"""
PLAY STUFF
"""
def set_year(self, year):
"""Set year attribute."""
logging.info(f"Setting year: {year}")
if str(year) not in config.VALID_YEARS:
error = f"Invalid year: {year}"
logging.warning(error)
return_val = {'status': 'FAIL',
'error': error}
return return_val
self.trigger_timeslip(year)
return_val = {'status': 'OK',
'cmd': 'setYear'}
return return_val
def set_train(self, index):
event = self.schedule[index]
if event['controller'] == "train":
self.trigger_train(event)
else:
self.trigger_event(event)
"""
EVENTS
"""
def delay_event(self, event):
"""
Add an event to the delayed queue.
The time is a key within the object.
"""
self.delayed_events.append(event)
def trigger_event(self, event):
"""Constuct order from basic event info and send to appropriate controller."""
if event['controller'] == "train":
# send command to train controller
# form: set train *direction* *traintype* *year*
order = {
"cmd" : "setTrain",
"direction" : event['direction'],
"traintype" : event['traintype'],
"year" : self.current_year
}
self.send_order_to_controller("train", order)
elif event['controller'] == "announce":
# send command to announce controller
# form: set announce *id* *year*
order = {
"cmd" : "setAnnounce",
"announceid" : event['announceid'],
"year" : self.current_year
}
self.send_order_to_controller("announce", order)
elif event['controller'] == "crossing":
# send command to crossing controller
# form:
# set on
# set off
if event['event'] == "on":
order = {
"cmd" : "setOn"
}
elif event['event'] == "off":
order = {
"cmd" : "setOff"
}
#TODO: Convert above to True/False?
self.send_order_to_controller("crossing", order)
elif event['controller'] == "bridge":
# send command to bridge controller
# form:
# set go *direction*
# set stop
if event['event'] == "stop":
order = {
"cmd" : "setStop"
}
elif event['event'] == "go":
order = {
"cmd" : "setGo",
"direction" : event['direction']
}
self.send_order_to_controller("bridge", order)
elif re.search('radio|television|lights', event['controller']):
# send command to radio, tv, or lights controller
# form:
# set glitch
# set year *year*
if event['event'] == "glitch":
order = {
"cmd" : "setGlitch"
}
elif event['event'] == "year":
order = {
"cmd" : "setYear",
"year" : self.current_year
}
else:
return
self.send_order_to_controller(event['controller'], order)
def trigger_train(self, train_event):
"""
Trigger the events that happen with a train.
train_event comes from the schedule
"""
# let's calculate the timing of some things to schedule the next few events
now_dt = datetime.now()
# time_announce_arrival = now_dt
# time_signal_is_go = now_dt
time_we_hear_train = now_dt + timedelta(minutes=config.SCHED_BRIDGE_BEFORE)
time_crossing_is_on = time_we_hear_train + timedelta(minutes=config.SCHED_CROSSING_DELAY)
time_departure_announce = time_we_hear_train + timedelta(minutes=float(train_event['duration'])/2)
time_signal_is_stop = time_we_hear_train + timedelta(minutes=float(train_event['duration'])) - timedelta(minutes=config.SCHED_DEPART_TIME)
time_crossing_is_off = time_we_hear_train + timedelta(minutes=float(train_event['duration'])) - timedelta(minutes=config.SCHED_CROSSING_DELAY)
#
# 1) BRIDGE signal turns green as soon as train enters the
# block, i.e., several minutes before we can hear it
self.delay_event({
"controller": "bridge",
"event": "go",
"direction": train_event['direction'],
"time_dt": datetime.now() + timedelta(seconds=1)
})
#
# 2) ANNOUNCE arrival when train approached station
# i.e., we when begin to hear it
if train_event['announceid'] != "":
self.delay_event({
"controller": "announce",
"announceid": f"{train_event['announceid']}-announce-arrival",
"time_dt": datetime.now() + timedelta(seconds=15)
})
#
# 3) TRAINAUDIO starts
train_event['time_dt'] = time_we_hear_train
self.delay_event(train_event)
#
# 4) CROSSING comes on as soon as train nears crossing
# i.e., some minutes after we can hear it
self.delay_event({
"controller": "crossing",
"event": "on",
"time_dt": time_crossing_is_on
})
#
# 5) ANNOUNCE departure before train leaves station
# i.e., halfway through duration
if train_event['announceid'] != "":
self.delay_event({
"controller": "announce",
"announceid": f"{train_event['announceid']}-announce-departure",
"time_dt": time_departure_announce
})
#
# 6) BRIDGE signal turns red as soon as the train passes the
# station, i.e., some minutes before end of duration
self.delay_event({
"controller": "bridge",
"event": "stop",
"direction": train_event['direction'],
"time_dt": time_signal_is_stop
})
#
# 7) CROSSING turns off as soon as the train passes
# i.e., at the end of the train's duration
self.delay_event({
"controller": "crossing",
"event": "off",
"time_dt": time_crossing_is_off
})
def trigger_timeslip(self, year=None):
"""Trigger a timeslip event and the things that go with it."""
if year:
self.current_year = year;
else:
# # find out what the index of the current year is
# index = config.YEARS.index(int(self.current_year))
# # increment one
# index += 1
# # make sure we don't have index overrun
# if index >= len(config.YEARS):
# index = 0
# self.current_year = config.YEARS[index]
#
# now we pick years at random, but prevent a repeat
new_year = self.current_year
while new_year == self.current_year:
new_year = random.choice(config.YEARS)
self.current_year = new_year
# record the time of the timeslip to prevent bounce
self.last_timeslip = datetime.now()
logging.info(f"Timeslip to {self.current_year}")
# trigger glitch events and schedule year event
for controller in ["radio", "television", "lights"]:
self.delay_event({
"controller": controller,
"event": "glitch",
"time_dt": datetime.now() + timedelta(seconds=1)
})
self.delay_event({
"controller": controller,
"event": "year",
"time_dt": datetime.now() + timedelta(seconds=config.SCHED_TIMESLIP_GLITCH)
})
"""
MAIN LOOP
"""
def main_loop(self):
"""Get orders and acts on them."""
while True:
self.act_on_order(self.receive_order())
self.check_for_delayed_events()
self.check_for_timeslip()
self.check_for_random_events()
self.check_for_scheduled_event()
time.sleep(config.SCHED_LOOP_DELAY)
def start(self):
"""Get the party started."""
logging.info('Starting.')
time.sleep(1)
self.trigger_timeslip()
self.main_loop()
def main():
"""Test the class."""
import sys
logging.basicConfig(filename=sys.stderr,
encoding='utf-8',
format='%(asctime)s %(levelname)s:%(message)s',
level=logging.DEBUG)
scheduler = Scheduler()
scheduler.order_act_loop()
if __name__ == '__main__':
main()
| 36.259629
| 169
| 0.511409
|
cd6fbfb85755bf30dc0e353fc07b5ef28fa8ae7a
| 332
|
py
|
Python
|
xautodl/models/cell_searchs/_test_module.py
|
Joey61Liuyi/AutoDL-Projects
|
2092e144920e82d74753a7ac31e1890a150d41cf
|
[
"MIT"
] | 817
|
2020-01-15T00:23:41.000Z
|
2022-03-31T14:52:03.000Z
|
xautodl/models/cell_searchs/_test_module.py
|
Joey61Liuyi/AutoDL-Projects
|
2092e144920e82d74753a7ac31e1890a150d41cf
|
[
"MIT"
] | 77
|
2020-01-14T14:02:45.000Z
|
2022-03-25T07:06:02.000Z
|
xautodl/models/cell_searchs/_test_module.py
|
Joey61Liuyi/AutoDL-Projects
|
2092e144920e82d74753a7ac31e1890a150d41cf
|
[
"MIT"
] | 176
|
2020-01-15T10:39:41.000Z
|
2022-03-31T04:24:53.000Z
|
##################################################
# Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2019 #
##################################################
import torch
from search_model_enas_utils import Controller
def main():
controller = Controller(6, 4)
predictions = controller()
if __name__ == "__main__":
main()
| 22.133333
| 50
| 0.478916
|
3def23c95e424d306ecfd158c75361ce90c44da1
| 1,716
|
py
|
Python
|
minato_namikaze/botmain/bot/cogs/info/feedback.py
|
Onii-Chan-Discord/yondaime-hokage
|
d6e75405a9d30b37bfb4fd588f02c0de813c92b1
|
[
"Apache-2.0"
] | null | null | null |
minato_namikaze/botmain/bot/cogs/info/feedback.py
|
Onii-Chan-Discord/yondaime-hokage
|
d6e75405a9d30b37bfb4fd588f02c0de813c92b1
|
[
"Apache-2.0"
] | null | null | null |
minato_namikaze/botmain/bot/cogs/info/feedback.py
|
Onii-Chan-Discord/yondaime-hokage
|
d6e75405a9d30b37bfb4fd588f02c0de813c92b1
|
[
"Apache-2.0"
] | null | null | null |
import discord
from discord.ext import commands
from discord.ext.commands import command
from ...lib import (
Embed,
ErrorEmbed,
check_if_feedback_system_setup,
return_feedback_channel,
)
class Feedback(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.description = 'Sends your feedback about the server to the server owner. (This can only be done if it is enabled by the server owner)'
@command()
@commands.cooldown(1, 60, commands.BucketType.user)
@commands.guild_only()
@commands.check(check_if_feedback_system_setup)
async def feedback(self, ctx, *, feed):
'''Sends your feedback about the server to the server owner. (This can only be done if it is enabled by the server owner)'''
await ctx.message.delete()
channel = return_feedback_channel(ctx)
e = Embed(
title="Feedback sent!",
description=f"Your feedback '{feed}' has been sent!",
)
await ctx.send(embed=e, delete_after=10)
e2 = discord.Embed(
title=f"{ctx.author} has sent feedback", description=f"{feed}",
colour=ctx.author.color or ctx.author.top_role.colour.value or discord.Color.random()
)
await channel.send(embed=e2)
@feedback.error
async def feedback_handler(self, ctx, error):
if isinstance(error, commands.CheckFailure):
e = ErrorEmbed(
title='No Feedback system setup for this server!',
description='An admin can always setup the **feedback system** using `)setup` command'
)
await ctx.send(embed=e, delete_after=10)
def setup(bot):
bot.add_cog(Feedback(bot))
| 33.647059
| 147
| 0.648601
|
5da8e18e51f5f3a5bac1b38762ca3bfb485d1e4b
| 3,022
|
py
|
Python
|
lib/galaxy/model/migrate/versions/0108_add_extended_metadata.py
|
igorhollaender/sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 2
|
2018-10-14T16:42:39.000Z
|
2018-10-14T16:42:41.000Z
|
lib/galaxy/model/migrate/versions/0108_add_extended_metadata.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/model/migrate/versions/0108_add_extended_metadata.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | null | null | null |
"""
Add the ExtendedMetadata and ExtendedMetadataIndex tables
"""
import logging
from sqlalchemy import Column, ForeignKey, Integer, MetaData, String, Table, TEXT
from galaxy.model.custom_types import JSONType
log = logging.getLogger( __name__ )
metadata = MetaData()
ExtendedMetadata_table = Table("extended_metadata", metadata,
Column( "id", Integer, primary_key=True ),
Column( "data", JSONType ) )
ExtendedMetadataIndex_table = Table("extended_metadata_index", metadata,
Column( "id", Integer, primary_key=True ),
Column( "extended_metadata_id", Integer, ForeignKey("extended_metadata.id",
onupdate="CASCADE",
ondelete="CASCADE" ),
index=True ),
Column( "path", String( 255 )),
Column( "value", TEXT))
extended_metadata_ldda_col = Column( "extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), nullable=True )
def display_migration_details():
print "This migration script adds a ExtendedMetadata tables"
def upgrade(migrate_engine):
print __doc__
metadata.bind = migrate_engine
metadata.reflect()
try:
ExtendedMetadata_table.create()
except:
log.debug("Could not create ExtendedMetadata Table.")
try:
ExtendedMetadataIndex_table.create()
except:
log.debug("Could not create ExtendedMetadataIndex Table.")
# Add the extended_metadata_id to the ldda table
try:
ldda_table = Table( "library_dataset_dataset_association", metadata, autoload=True )
extended_metadata_ldda_col.create( ldda_table )
assert extended_metadata_ldda_col is ldda_table.c.extended_metadata_id
except Exception, e:
print str(e)
log.error( "Adding column 'extended_metadata_id' to library_dataset_dataset_association table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
try:
ExtendedMetadataIndex_table.drop()
except Exception, e:
log.debug( "Dropping 'extended_metadata_index' table failed: %s" % ( str( e ) ) )
try:
ExtendedMetadata_table.drop()
except Exception, e:
log.debug( "Dropping 'extended_metadata' table failed: %s" % ( str( e ) ) )
# Drop the LDDA table's extended metadata ID column.
try:
ldda_table = Table( "library_dataset_dataset_association", metadata, autoload=True )
extended_metadata_id = ldda_table.c.extended_metadata_id
extended_metadata_id.drop()
except Exception, e:
log.debug( "Dropping 'extended_metadata_id' column from library_dataset_dataset_association table failed: %s" % ( str( e ) ) )
| 40.293333
| 134
| 0.62045
|
28160127efd3dd3a1d9e0a1b9da8676dab93d4f2
| 7,951
|
py
|
Python
|
bridge_adaptivity/module/tests/test_utils.py
|
eng-alecrim/bridge-adaptivity
|
8644e512272006eaeedd0928ac743fd6295bd731
|
[
"BSD-3-Clause"
] | 32
|
2017-06-01T16:31:50.000Z
|
2021-11-19T03:37:00.000Z
|
bridge_adaptivity/module/tests/test_utils.py
|
eng-alecrim/bridge-adaptivity
|
8644e512272006eaeedd0928ac743fd6295bd731
|
[
"BSD-3-Clause"
] | 44
|
2017-08-04T13:58:12.000Z
|
2021-06-09T17:25:59.000Z
|
bridge_adaptivity/module/tests/test_utils.py
|
eng-alecrim/bridge-adaptivity
|
8644e512272006eaeedd0928ac743fd6295bd731
|
[
"BSD-3-Clause"
] | 17
|
2017-12-08T07:24:28.000Z
|
2022-03-14T16:56:10.000Z
|
import logging
import ddt
from django.core.exceptions import MultipleObjectsReturned
from django.test import TestCase
from mock import patch
from bridge_lti.models import BridgeUser, LtiLmsPlatform, LtiUser, OutcomeService
from module.models import (
Activity, Collection, CollectionOrder, Engine, GradingPolicy, ModuleGroup, Sequence, SequenceItem
)
from module.utils import choose_activity, select_next_sequence_item
log = logging.getLogger(__name__)
@ddt.ddt
class TestUtilities(TestCase):
fixtures = ['gradingpolicy', 'engine']
@patch('module.tasks.sync_collection_engines.apply_async')
def setUp(self, mock_apply_async):
self.user = BridgeUser.objects.create_user(
username='test_user',
password='test_pass',
email='test@test.com'
)
self.collection = Collection.objects.create(name='testcol1', owner=self.user)
self.collection2 = Collection.objects.create(name='testcol2', owner=self.user)
self.source_launch_url = 'http://test_source_launch_url.com'
self.activity = Activity.objects.create(
name='testactivity1', collection=self.collection, source_launch_url=self.source_launch_url
)
self.activity2 = Activity.objects.create(
name='testactivity2', collection=self.collection2, source_launch_url=self.source_launch_url
)
self.activity3 = Activity.objects.create(
name='testactivity3', collection=self.collection, source_launch_url=f"{self.source_launch_url}3",
)
self.activity4 = Activity.objects.create(
name='testactivity4',
collection=self.collection,
source_launch_url=f"{self.source_launch_url}4",
stype='problem',
)
self.activity5 = Activity.objects.create(
name='testactivity5',
collection=self.collection,
source_launch_url=f"{self.source_launch_url}5",
stype='problem',
)
self.lti_lms_platform = LtiLmsPlatform.objects.create(
consumer_name='test_consumer', consumer_key='test_consumer_key', consumer_secret='test_consumer_secret'
)
self.lti_user = LtiUser.objects.create(
user_id='test_user_id', lti_lms_platform=self.lti_lms_platform, bridge_user=self.user
)
self.engine = Engine.objects.get(engine='engine_mock')
self.gading_policy = GradingPolicy.objects.get(name='trials_count')
self.outcome_service = OutcomeService.objects.create(
lis_outcome_service_url='test_url', lms_lti_connection=self.lti_lms_platform
)
self.test_cg = ModuleGroup.objects.create(name='TestColGroup', owner=self.user)
self.collection_order1 = CollectionOrder.objects.create(
group=self.test_cg,
collection=self.collection,
engine=self.engine,
grading_policy=self.gading_policy
)
self.sequence = Sequence.objects.create(
lti_user=self.lti_user,
collection_order=self.collection_order1,
)
self.vpal_engine = Engine.objects.get(engine='engine_vpal')
self.vpal_group = ModuleGroup.objects.create(name='TestVpalGroup', owner=self.user)
self.collection_order2 = CollectionOrder.objects.create(
group=self.vpal_group,
collection=self.collection,
engine=self.vpal_engine,
)
self.vpal_sequence = Sequence.objects.create(
lti_user=self.lti_user,
collection_order=self.collection_order2,
outcome_service=self.outcome_service
)
self.sequence_item_1 = SequenceItem.objects.create(sequence=self.sequence, activity=self.activity, score=0.4)
self.sequence_item_2 = SequenceItem.objects.create(
sequence=self.sequence, activity=self.activity2, score=0.6, position=2
)
self.sequence_item_3 = SequenceItem.objects.create(sequence=self.sequence, activity=self.activity3, position=3)
self.sequence_item_4 = SequenceItem.objects.create(sequence=self.sequence, activity=self.activity4, position=4)
def test_choose_activity(self):
try:
# test if 2 activities has the same launch url but has different collections
# this method should return only one activity, filtered by collection_order.order and sequence.collection
chosen_activity = choose_activity(sequence=self.sequence)
except MultipleObjectsReturned as e:
log.error(Activity.ojbects.all().values('collection', 'source_launch_url'))
self.fail(e)
expected_activity = Activity.objects.get(
collection=self.sequence.collection_order.collection, source_launch_url=f"{self.source_launch_url}5"
)
self.assertEqual(chosen_activity, expected_activity)
@patch('module.engines.engine_vpal.EngineVPAL.select_activity', return_value=None)
def test_choose_activity_with_unconfigured_engine(self, mock_choose_activity_by_engine):
"""
Test sequence is deleted if after creating first activity is not chosen for any reason.
"""
choose_activity(sequence=self.vpal_sequence)
sequence_is_exists = Sequence.objects.filter(collection_order=self.collection_order2)
self.assertFalse(sequence_is_exists)
@patch('module.engines.engine_vpal.EngineVPAL.select_activity', return_value={'complete': True})
def test_choose_activity_from_completed_collection(self, mock_choose_activity_by_engine):
"""
Test sequence becomes completed if at least one sequence_item exists and there no new activity chosen.
"""
sequence_item = SequenceItem.objects.create(sequence=self.vpal_sequence, activity=self.activity)
choose_activity(sequence_item=sequence_item)
completed_sequence = Sequence.objects.filter(collection_order=self.collection_order2).first()
self.assertEqual(completed_sequence, self.vpal_sequence)
self.assertTrue(completed_sequence.completed)
@ddt.unpack
@ddt.data(
{
'item_index': 1,
'update_activity': False,
'last_item': 4,
'position': 2,
'pre_expected_result': (2, None, None)
},
{
'item_index': 3,
'update_activity': False,
'last_item': 4,
'position': 4,
'pre_expected_result': (4, None, None)
},
{
'item_index': 3,
'update_activity': True,
'last_item': 4,
'position': 4,
'pre_expected_result': (5, None, None)
},
{
'item_index': 4,
'update_activity': False,
'last_item': 4,
'position': 5,
'pre_expected_result': (4, None, None)
},
)
def test_select_next_sequence_item(self, item_index, update_activity, last_item, position, pre_expected_result):
next_item = getattr(self, f"sequence_item_{pre_expected_result[0]}", None)
result = select_next_sequence_item(
getattr(self, f"sequence_item_{item_index}"),
update_activity,
last_item,
position,
)
if position > last_item or update_activity:
next_item = self.sequence.items.last()
expected_result = (next_item, *pre_expected_result[1:])
self.assertEqual(expected_result, result)
def test_sequence_completed_in_select_next_sequence_item(self):
self.sequence_item_5 = SequenceItem.objects.create(sequence=self.sequence, activity=self.activity5, position=5)
sequence_item = self.sequence_item_5
expected_result = (sequence_item, True, None)
result = select_next_sequence_item(sequence_item, update_activity=False, last_item=5, position=6)
self.assertEqual(expected_result, result)
| 43.448087
| 119
| 0.672871
|
8598dfacdcb9949cd7d4adf4c90b56c407aa8544
| 2,587
|
py
|
Python
|
heron/cli/src/python/jars.py
|
sbilly/heron
|
d8cc6977791dd3d8ed4333562e2ce249646a5226
|
[
"Apache-2.0"
] | 1
|
2021-04-25T11:25:44.000Z
|
2021-04-25T11:25:44.000Z
|
heron/cli/src/python/jars.py
|
sbilly/heron
|
d8cc6977791dd3d8ed4333562e2ce249646a5226
|
[
"Apache-2.0"
] | null | null | null |
heron/cli/src/python/jars.py
|
sbilly/heron
|
d8cc6977791dd3d8ed4333562e2ce249646a5226
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, fnmatch
import heron.cli.src.python.utils as utils
################################################################################
# Get the topology jars - TODO, make the jars independent version free
################################################################################
def pick(dirname, pattern):
file_list = fnmatch.filter(os.listdir(dirname), pattern)
return file_list[0] if file_list else None
################################################################################
# Get the topology jars - TODO, make the jars independent version free
################################################################################
def topology_jars():
jars = [
os.path.join(utils.get_heron_lib_dir(), "third_party", "*")
]
return jars
################################################################################
# Get the scheduler jars
################################################################################
def scheduler_jars():
jars = [
os.path.join(utils.get_heron_lib_dir(), "scheduler", "*")
]
return jars
################################################################################
# Get the uploader jars
################################################################################
def uploader_jars():
jars = [
os.path.join(utils.get_heron_lib_dir(), "uploader", "*")
]
return jars
################################################################################
# Get the statemgr jars
################################################################################
def statemgr_jars():
jars = [
os.path.join(utils.get_heron_lib_dir(), "statemgr", "*")
]
return jars
################################################################################
# Get the packing algorithm jars
################################################################################
def packing_jars():
jars = [
os.path.join(utils.get_heron_lib_dir(), "packing", "*")
]
return jars
| 36.957143
| 80
| 0.443757
|
2344428346134dab4e8b6ea8fc0110dbe3f0f59a
| 4,019
|
py
|
Python
|
cv/classification/classicNets/resNet.py
|
XingJinming-real/DL
|
9c793338a60b663ab3cdc702dc73617156b4ae93
|
[
"MIT"
] | 1
|
2021-07-22T13:16:51.000Z
|
2021-07-22T13:16:51.000Z
|
cv/classification/classicNets/resNet.py
|
XingJinming-real/DL
|
9c793338a60b663ab3cdc702dc73617156b4ae93
|
[
"MIT"
] | null | null | null |
cv/classification/classicNets/resNet.py
|
XingJinming-real/DL
|
9c793338a60b663ab3cdc702dc73617156b4ae93
|
[
"MIT"
] | null | null | null |
import time
import d2l.torch
import numpy as np
import pandas as pd
import torch.nn as nn
import torch
from torchvision import datasets
from torchvision import transforms
from torch.utils import data
import matplotlib.pyplot as plt
import torch.nn.functional as F
from sklearn.preprocessing import LabelEncoder
from PIL import Image
class residual(nn.Module):
def __init__(self, inputChannels, outputChannels, stride=1):
super(residual, self).__init__()
self.conv1 = nn.Conv2d(inputChannels, outputChannels, kernel_size=(3, 3), stride=stride, padding=(1, 1))
self.conv2 = nn.Conv2d(outputChannels, outputChannels, kernel_size=(3, 3), padding=(1, 1))
if stride != 1:
self.conv3 = nn.Conv2d(inputChannels, outputChannels, stride=stride, kernel_size=(1, 1))
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2d(outputChannels)
self.bn2 = nn.BatchNorm2d(outputChannels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
y = self.relu(self.bn1(self.conv1(x)))
y = self.bn2(self.conv2(y))
if self.conv3:
x = self.conv3(x)
y += x
return self.relu(y)
b1 = nn.Sequential(nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(2, 2)),
nn.BatchNorm2d(64), nn.ReLU(), nn.MaxPool2d(kernel_size=(3, 3), stride=2, padding=1))
def resnetBlock(inputChannel, outputChannel, numResidual, first=False):
blk = []
for _ in range(numResidual):
if not first and _ == 0:
blk.append(residual(inputChannel, outputChannel, 2))
else:
blk.append(residual(outputChannel, outputChannel))
return blk
b2 = nn.Sequential(*resnetBlock(64, 64, 2, first=True))
b3 = nn.Sequential(*resnetBlock(64, 128, 2))
b4 = nn.Sequential(*resnetBlock(128, 256, 2))
b5 = nn.Sequential(*resnetBlock(256, 512, 2))
net = nn.Sequential(b1, b2, b3, b4, b5, nn.AdaptiveAvgPool2d((1, 1)), nn.Flatten(), nn.Linear(512, 10),
nn.Softmax(dim=0))
x = torch.rand((1, 1, 224, 224))
def getData():
trainIdx = pd.read_csv('../data/leavesClassification/train.csv')
le = LabelEncoder()
label = le.fit_transform(trainIdx['label'])
return data.DataLoader(datasets.FashionMNIST('../../data', transform=transforms.ToTensor()), batch_size=batchSize,
shuffle=True), \
data.DataLoader(datasets.FashionMNIST('../../data', transform=transforms.ToTensor(), train=False))
def accuracy(predict, y):
mask = torch.ones(len(y))
mask = mask[predict.max(axis=1).indices == y]
return float(sum(mask) / len(y))
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
lr, numEpoch, batchSize = 0.4, 100, 256
# trainIter, testIter = getData()
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr, momentum=0.9)
pltX = []
pltY = []
plt.ion()
net.to(device)
bestScore = 0
print(net(torch.rand((1, 1, 224, 224))))
# for epoch in range(numEpoch):
# L = 0
# accumulator = [0, 0, 0]
# # num = 0
# begin = time.time()
# for X, y in trainIter:
# if torch.cuda.is_available():
# X, y = X.to(device), y.to(device)
# optimizer.zero_grad()
# predict = net(X)
# l = loss(predict, y)
# l.backward()
# optimizer.step()
# L += l * len(y)
# # num += len(y)
# accumulator[0] += float(l) * len(y)
# accumulator[1] += accuracy(predict, y)
# accumulator[2] += len(y)
# print(f'loss on train {accumulator[0] / accumulator[2]}, accu on train {accumulator[1] / accumulator[2]}')
# # if accumulator[0] / accumulator[2] > bestScore:
# # bestScore = accumulator[0] / accumulator[2]
# # torch.save(net.state_dict(), './resNet18' + str(epoch) + '.pt')
# pltX.append(epoch)
# pltY.append(accumulator[0] / accumulator[2])
# plt.plot(pltX, pltY)
# plt.show()
# plt.pause(0.5)
# plt.ioff()
# # plt.show()
| 33.491667
| 118
| 0.619059
|
bb9ead70808a80058ecf34502635a152aa495c1c
| 3,640
|
py
|
Python
|
utils.py
|
raoofnaushad/Fourier-Transformation-for-Image-Processing
|
f3f1d11dc97e593392a019ad25d583dd19d07552
|
[
"MIT"
] | 2
|
2020-10-15T11:38:58.000Z
|
2021-09-27T08:30:09.000Z
|
utils.py
|
raoofnaushad/Fourier-Transformation-for-Image-Processing
|
f3f1d11dc97e593392a019ad25d583dd19d07552
|
[
"MIT"
] | null | null | null |
utils.py
|
raoofnaushad/Fourier-Transformation-for-Image-Processing
|
f3f1d11dc97e593392a019ad25d583dd19d07552
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os
from fourier_transform import *
def img_FFT(img):
if len(img.shape) == 2 or len(img.shape) == 3:
return FFT_2D(img)
else:
raise ValueError("Please input a gray or RGB image!")
def img_FFT_inverse(img):
if len(img.shape) == 2 or len(img.shape) == 3:
return inverseFFT_2D(img)
else:
raise ValueError("Please input a gray or RGB image!")
def findpower2(num):
"""find the nearest number that is the power of 2"""
if num & (num-1) == 0:
return num
bin_num = bin(num)
origin_bin_num = str(bin_num)[2:]
near_power2 = pow(10, len(origin_bin_num))
near_power2 = "0b" + str(near_power2)
near_power2 = int(near_power2, base=2)
return near_power2
def image_padding(img):
""" padding the image size to power of 2, for fft computation requirement"""
if len(img.shape) == 2:
h, w = img.shape[0], img.shape[1]
h_pad = findpower2(h)-h
w_pad = findpower2(w)-w
img = np.pad(img, pad_width=((0, h_pad), (0, w_pad)), mode='constant')
return img
elif len(img.shape) == 3:
h, w = img.shape[0], img.shape[1]
h_pad = findpower2(h)-h
w_pad = findpower2(w)-w
img = np.pad(img, pad_width=((0, h_pad), (0, w_pad), (0, 0)), mode='constant')
return img
def image_fft(img_path, result_folder_path="result/"):
""" read, padding, fft, cut to origin size and save """
data_root, img_name = os.path.split(img_path)
if img_name[-3:] != "png" and img_name[-3:] != "tif" \
and img_name[-4:] != "jpeg" and img_name[-3:] != "jpg":
return 0
if not os.path.exists(result_folder_path):
os.mkdir(result_folder_path)
img_origin = cv2.imread(img_path, 0)
img = image_padding(img_origin)
img_fft = img_FFT(img)
if len(img_origin.shape) == 2:
img_fft = img_fft[:img_origin.shape[0], :img_origin.shape[1]]
else:
img_fft = img_fft[:img_origin.shape[0], :img_origin.shape[1], :]
img_fft_complex = img_fft.copy()
img_fft = np.fft.fftshift(img_fft_complex)
# save real value for human seeing
img_fft = np.real(img_fft)
name, _ = img_name.split(".")
save_img_name = result_folder_path + name + "_fft.png"
cv2.imwrite(save_img_name, img_fft)
return img_fft_complex
def image_fft_inverse(img_fft_complex, img_path, result_folder_path="result/"):
""" inverse the read fft_img, cut to origin size and save """
if not os.path.exists(result_folder_path):
os.mkdir(result_folder_path)
_, img_name = os.path.split(img_path)
img_fft = image_padding(img_fft_complex)
img_origin = img_FFT_inverse(img_fft)
img_ifft = np.real(img_origin)
name, _ = img_name.split(".")
save_img_name = result_folder_path + name + "_inverse.png"
if len(img_origin.shape) == 2:
img_ifft = img_ifft[:img_fft_complex.shape[0], :img_fft_complex.shape[1]]
else:
img_ifft = img_ifft[:img_fft_complex.shape[0], :img_fft_complex.shape[1], :]
cv2.imwrite(save_img_name, img_ifft)
return img_origin
if __name__ == '__main__':
x = np.mgrid[:8, :8][0]
# x = np.mgrid[:4, :4][0]
print(x)
print("-------------------")
# print(np.allclose(FFT(x), np.fft.fft(x)))
# print(np.allclose(FFT_2D(x), np.fft.fft2(x)))
# print(FFT_2D(x))
print(inverseFFT_2D(x))
# print(inverseDFT_2D(x))
print("-------------------")
print(np.fft.ifft2(x))
print("-------------------")
# print(np.fft.fft(x))
# print(np.allclose(np.fft.fft(x), np.fft.fft2(x)))
| 28
| 86
| 0.615659
|
0c81eceb34faf06deca8059ed34a529d843304e5
| 30
|
py
|
Python
|
btd6_memory_info/generated/Assets/Scripts/Models/Towers/Behaviors/Abilities/Behaviors/IncreaseRangeModel/increase_range_model.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Assets/Scripts/Models/Towers/Behaviors/Abilities/Behaviors/IncreaseRangeModel/increase_range_model.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/Assets/Scripts/Models/Towers/Behaviors/Abilities/Behaviors/IncreaseRangeModel/increase_range_model.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class IncreaseRangeModel: pass
| 30
| 30
| 0.9
|
9cdf65fe1c1914d17292901be27526fe0de91919
| 14,448
|
py
|
Python
|
dsbox/ml/feature_engineering/timeseries.py
|
Pandinosaurus/dsbox
|
aea56049025ed7e6e66427f8636286f8be1b6e03
|
[
"Apache-2.0"
] | 16
|
2020-05-11T09:10:15.000Z
|
2021-04-13T08:43:28.000Z
|
dsbox/ml/feature_engineering/timeseries.py
|
Pandinosaurus/dsbox
|
aea56049025ed7e6e66427f8636286f8be1b6e03
|
[
"Apache-2.0"
] | 1
|
2020-12-03T20:02:32.000Z
|
2020-12-03T20:02:32.000Z
|
dsbox/ml/feature_engineering/timeseries.py
|
Pandinosaurus/dsbox
|
aea56049025ed7e6e66427f8636286f8be1b6e03
|
[
"Apache-2.0"
] | 1
|
2020-05-11T17:22:20.000Z
|
2020-05-11T17:22:20.000Z
|
import numpy as np
import pandas as pd
from numpy.lib.stride_tricks import as_strided
from sklearn.base import BaseEstimator, TransformerMixin
import types
__author__ = "Vincent Levorato"
__credits__ = "https://github.com/octo-technology/bdacore"
__license__ = "Apache 2.0"
class Shifter(BaseEstimator, TransformerMixin):
"""
Compute shifted values for a given dataframe, and creates columns associated.
Parameters
----------
shifts: list, optional, default [1]
Allows to shift data by periods (meaning by row) using the pandas.shift() method.
prefix: string, optional
Put a prefix in front of the column names.
suffix: string, optional
Put a suffix at the end of the column names.
group_by_cols: list, optional, default None
Allows to shift values grouped by columns.
ignore_cols: list, optional, default None
Allows to ignore some columns to be not shifted.
Examples
--------
>>> from dsbox.ml.feature_engineering.timeseries import Shifter
>>> df_ts = pd.DataFrame({'data': [0.0, 1.0, 2.0, 3.0, 4.0]})
>>> shifter = Shifter(shifts=[1],prefix='t-')
>>> df_shift_ts = shifter.transform(df_ts)
>>> df_shift_ts
t-1_data
0 NaN
1 0.0
2 1.0
3 2.0
4 3.0
"""
def __init__(self, shifts=[1], prefix='', suffix='', **kwargs):
self.shifts = shifts
self.prefix = prefix
self.suffix = suffix
self.kwargs = kwargs
def fit(self, X=None, y=None):
"""
No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Parameters
----------
X: array-like
y: array-like
Returns
-------
self: Shifter
"""
return self
def transform(self, X):
"""
Transform a dataframe into shift values corresponding to shift periods
Parameters
----------
X: dataframe
Input pandas dataframe.
Returns
-------
X: dataframe
Dataframe with columns having shifted values (one by shift)
"""
X_shifted = pd.DataFrame()
for shift_value in self.shifts:
X_shifted_tmp = X
X_shifted_tmp = X_shifted_tmp.shift(periods=shift_value, **self.kwargs)
prefix = ''
suffix = ''
if self.prefix != '':
prefix = self.prefix + str(shift_value) + '_'
if self.suffix != '':
suffix = self.suffix + str(shift_value)
if self.suffix == '' and self.prefix == '':
suffix = '_' + str(shift_value)
X_shifted_tmp.columns = X_shifted_tmp.columns.map(lambda x: prefix + x + suffix)
if len(X_shifted) == 0:
X_shifted = X_shifted_tmp
else:
X_shifted = X_shifted.join(X_shifted_tmp)
return X_shifted
class RollingWindower(BaseEstimator, TransformerMixin):
"""
Compute rolling aggregated values for a given dataframe.
Classical operators (mean, std, median, etc.) or custom operators can be used to compute the windows.
Windows are based on index, which could be a simple integer or a pandas timestamp. Use **kwargs to pass extra
arguments to pandas rolling function (like min_periods for instance).
Parameters
----------
operation: str or function, optional, default 'mean'
Set the aggregation function used to aggregate a window.
windows: list, optional, default [3]
Set the windows used to aggregate data. Time windows can be set also (see pandas time unit
syntax) if the dataframe index is a timestamp
Examples:
[3]: one window of size 3
[2,5]: one window of size 2 and one window of size 5
[2s, 10s]: one window of 2 secs and one window of 10 secs
diff_mode: boolean, optional, default False
Process the difference between values and its window aggregate value.
Examples
--------
>>> import pandas as pd
>>> from dsbox.ml.feature_engineering.timeseries import RollingWindower
>>> df = pd.DataFrame({'data': [0, 1, 2, 3, 4]})
>>> roller = RollingWindower(windows=[2,3])
>>> df_roll = roller.transform(df)
>>> df_roll
mean_2_data mean_3_data
0 NaN NaN
1 0.5 NaN
2 1.5 1.0
3 2.5 2.0
4 3.5 3.0
>>> df_ts = pd.DataFrame({'data': [0, 1, 2, 3, 4]}, \
index=[pd.Timestamp('20130101 09:00:00'), \
pd.Timestamp('20130101 09:00:02'), \
pd.Timestamp('20130101 09:00:03'), \
pd.Timestamp('20130101 09:00:05'), \
pd.Timestamp('20130101 09:00:06')])
>>> roller = RollingWindower(windows=['5s', '2s'])
>>> df_roll = roller.transform(df_ts)
>>> df_roll
mean_5s_data mean_2s_data
2013-01-01 09:00:00 0.0 0.0
2013-01-01 09:00:02 0.5 1.0
2013-01-01 09:00:03 1.0 1.5
2013-01-01 09:00:05 2.0 3.0
2013-01-01 09:00:06 2.5 3.5
"""
def __init__(self, operation='mean', windows=[3], **kwargs):
self.operation = operation
self.windows = windows
self.kwargs = kwargs
def fit(self, X=None, y=None):
"""
No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Parameters
----------
X: array-like
y: array-like
Returns
-------
self: RollingWindower
"""
return self
def transform(self, raw_X):
"""
Transform a dataframe into aggregated values corresponding to window sizes
Parameters
----------
raw_X: dataframe
Input pandas dataframe.
Returns
-------
X: dataframe
Dataframe with columns having rolling measures (one per window)
"""
X = pd.DataFrame()
for window in self.windows:
X_m = raw_X.rolling(window, **self.kwargs).agg(self.operation)
columns_name = []
if isinstance(self.operation, types.FunctionType):
col_name = self.operation.__name__
else:
col_name = self.operation
for col in X_m.columns:
columns_name.append(col_name + '_' + str(window) + '_' + col)
X_m.columns = columns_name
if len(X) == 0:
X = X_m
else:
X = X.join(X_m)
return X
class DistributionTransformer(BaseEstimator, TransformerMixin):
"""
Build a discrete distribution (histogram) for feature engineering for each column, per line,
following a rolling window. It captures the evolving distribution of a feature.
For instance, if a serie is composed of the following values: [3, 10, 12, 23], and with a window parameter of 3,
it takes these values, and apply histogram (bins=4) function on it:
[3] => [0, 0, 1, 0]
[3, 10] => [1, 0, 0, 1]
[3, 10, 12] => [1, 0, 0, 2]
[10, 12, 23] => [2, 0, 0, 1]
Parameters
----------
window: int
Size of the rolling window.
bins: int, optional, (default=4)
Amount of bins used to estimate distribution
quantiles: int, optional, (default=None)
If set, the transformer will return quantiles information.
Examples
--------
>>> import pandas as pd
>>> from dsbox.ml.feature_engineering.timeseries import DistributionTransformer
>>> df = pd.DataFrame({'sales': [3, 10, 12, 23, 48, 19, 21]})
>>> distrib_transformer = DistributionTransformer(3)
>>> distrib_transformer.fit_transform(df)
sales_bin_1 sales_bin_2 sales_bin_3 sales_bin_4
0 0 0 1 0
1 1 0 0 1
2 1 0 0 2
3 2 0 0 1
4 1 1 0 1
5 2 0 0 1
6 2 0 0 1
"""
def __init__(self, window, bins=4, quantiles=None):
self.window = window
self.bins = bins
self.quantiles = quantiles
def fit(self, X=None, y=None):
"""
No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Parameters
----------
X: array-like
y: array-like
Returns
-------
self
"""
return self
def transform(self, X):
"""
Transform a dataframe to build discrete distribution per column.
Parameters
----------
X: dataframe
Input pandas dataframe.
Returns
-------
X: dataframe
Dataframe with bin values per column.
"""
X_distrib = pd.DataFrame()
for col in X.columns:
col_serie = X[col]
bins_list = []
for i in range(0, len(col_serie)):
min_bound = i - self.window + 1
if min_bound < 0:
min_bound = 0
max_bound = i + 1
if max_bound >= len(col_serie):
max_bound = len(col_serie)
if self.quantiles is None:
bins_list.append(np.histogram(col_serie[min_bound:max_bound], bins=self.bins)[0])
else:
bins_list.append(np.quantile(col_serie[min_bound:max_bound], self.quantiles))
X_col_distrib = pd.DataFrame(bins_list)
X_col_distrib = X_col_distrib.set_index(X.index)
if self.quantiles is None:
X_col_distrib.columns = [col + '_bin_' + str(i) for i in range(1, self.bins + 1)]
else:
X_col_distrib.columns = [col + '_quantile_' + str(i) for i in range(1, len(self.quantiles) + 1)]
X_col_distrib = X_col_distrib.fillna(0)
if len(X_distrib) == 0:
X_distrib = X_col_distrib
else:
X_distrib = X_distrib.join(X_col_distrib)
return X_distrib
def transform_datetxt2int(X, col_date, format='%Y-%m-%d'):
"""
Inplace transformation of a string date column into an integer format date column.
Parameters
----------
X: dataframe
col_date: str
Column name to transform
format: str
Pandas date str format
"""
X[col_date] = pd.to_datetime(X[col_date], format=format)
X[col_date] = X[col_date].map(lambda x: (x.year * 10 ** 4) + (x.month * 10 ** 2) + x.day)
X[col_date] = X[col_date].astype('int')
def create_diff_shift_features(df_shift, cols=[], prefix='diff_'):
"""
Create diff values between time series columns with have been shifted.
Parameters
----------
df_shift: dataframe
Dataset with columns to make apply diff.
cols: list
Columns names, in order, to apply diff.
prefix: str
Prefix used to name diff columns.
"""
for i in range(0, len(cols) - 1):
df_shift[prefix + cols[i + 1] + '_' + cols[i]] = df_shift[cols[i + 1]] - df_shift[cols[i]]
def _get_numpy_array_strides(array, window):
shape = array.shape[:-1] + (array.shape[-1], window)
strides = array.strides + (array.strides[-1],)
sequence_strides = as_strided(array, shape=shape, strides=strides)
return sequence_strides
def np_rolling_agg_window(array, window=3, agg_func=np.nanmean):
"""
Calculating rolling aggregation using function, based on zero memory copying with numpy.
By default, the aggregation function is mean, so it returns rolling mean.
Efficient version of Pandas rolling function (with less parameters).
Parameters
----------
array: numpy.array or pandas.Series
array values
window: int, default=3
window size for the aggregate function rolling calculation
agg_func: Callable, default=numpy.nanmean
function used for aggregation
Returns
-------
agg: numpy.array
Rolling result.
"""
if type(array) == pd.Series:
array = array.values
sequence_strides = _get_numpy_array_strides(array, window)
agg = np.roll(agg_func(sequence_strides, axis=1), window - 1)
agg[0:window - 1] = np.nan
return agg
def np_rolling_agg_abs_deviation_window(array, window=3, agg_func=np.nanmean):
"""
Calculating rolling absolute deviation related to an aggregation function, using zero memory copying based on numpy.
By default, the aggregation function is mean, so it returns rollling Mean Absolute Deviation. Other calculation can be done
like the rolling Median Absolute Deviation.
Parameters
----------
array: numpy.array or pandas.Series
array values
window: int, default=3
window size for the aggregate function rolling calculation
agg_func: Callable, default=numpy.nanmean
function used for aggregation
Returns
-------
agg_abs_deviation: numpy.array
Rolling "Agg" Absolute Deviation result.
"""
if type(array) == pd.Series:
array = array.values
sequence_strides = _get_numpy_array_strides(array, window)
m = agg_func(sequence_strides, axis=1).reshape(-1, 1)
agg_abs_deviation = np.roll(agg_func(np.abs(sequence_strides - m), axis=1), window - 1)
agg_abs_deviation[0:window - 1] = np.nan
return agg_abs_deviation
| 30.871795
| 127
| 0.55018
|
07e3f17638e89cc1e785f0ea1ab11efd910cc619
| 4,638
|
py
|
Python
|
models/hr_attendance_day.py
|
gregoril/hr_cgt
|
10a92c0d54f63aa7216318448f7053c874d1be7d
|
[
"Apache-2.0"
] | null | null | null |
models/hr_attendance_day.py
|
gregoril/hr_cgt
|
10a92c0d54f63aa7216318448f7053c874d1be7d
|
[
"Apache-2.0"
] | null | null | null |
models/hr_attendance_day.py
|
gregoril/hr_cgt
|
10a92c0d54f63aa7216318448f7053c874d1be7d
|
[
"Apache-2.0"
] | 2
|
2019-08-07T18:25:43.000Z
|
2019-08-19T16:19:57.000Z
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
from odoo.tools import DEFAULT_SERVER_DATE_FORMAT, \
DEFAULT_SERVER_DATETIME_FORMAT
import pytz
from datetime import datetime
class FullAttendanceDay(models.Model):
_name = 'hr.attendance.day'
_description = "Attendance by Day"
_order = "day desc"
_sql_constraints = [
(
'employee_day_unique',
'unique(employee_id, day)',
'Day be unique for employee'
),
]
@api.multi
def name_get(self):
result = []
for attendance in self:
result.append((attendance.id, _("%(empl_name)s for %(day)s") % {
'empl_name': attendance.employee_id.name_related,
'day': fields.Date.to_string(
datetime.strptime(
attendance.day,
DEFAULT_SERVER_DATE_FORMAT
)
),
}))
return result
@api.depends('attendance_ids.check_in',
'attendance_ids.check_out',
'attendance_id.check_in')
def _compute_day(self):
# detect timezone
if self.env.user.tz:
local = pytz.timezone(self.env.user.tz)
else:
local = pytz.utc
for r in self:
if isinstance(r.id, models.NewId):
continue
attendances = self.env['hr.attendance'].search(
[('attendance_day_id', '=', r.id)],
order="check_in asc",
)
tot_worked = 0
tot_break = 0
check_in = False
check_out = False
if not attendances:
attendances = [r.attendance_id]
first_a = attendances[0]
check_in = first_a.check_in
day = datetime.strftime(
pytz.utc.localize(
datetime.strptime(
check_in,
DEFAULT_SERVER_DATETIME_FORMAT
)
).astimezone(local), DEFAULT_SERVER_DATE_FORMAT)
prev = False
for a in attendances:
if prev:
delta = datetime.strptime(
a.check_in, DEFAULT_SERVER_DATETIME_FORMAT
) - datetime.strptime(
prev.check_out, DEFAULT_SERVER_DATETIME_FORMAT
)
tot_break += delta.total_seconds() / 3600.0
tot_worked += a.worked_hours
check_out = a.check_out
prev = a
r.day = day
r.check_in = check_in
r.check_out = check_out
r.worked_hours = tot_worked
r.break_hours = tot_break
# Fields
day = fields.Date(
string=u'Day',
compute=_compute_day,
store=True,
readonly=True,
required=True,
default=fields.Date.context_today,
)
worked_hours = fields.Float(
string='Worked Hours',
compute=_compute_day,
store=True,
readonly=True
)
break_hours = fields.Float(
string='Break Hours',
compute=_compute_day,
store=True,
readonly=True
)
check_in = fields.Datetime(
string="Check In",
compute=_compute_day,
store=True,
readonly=True
)
check_out = fields.Datetime(
string="Check Out",
compute=_compute_day,
store=True,
readonly=True
)
employee_id = fields.Many2one(
'hr.employee',
string="Employee",
required=True,
ondelete='cascade',
index=True,
readonly=True
)
attendance_ids = fields.One2many(
string=u'Attendance',
comodel_name='hr.attendance',
inverse_name='attendance_day_id',
)
attendance_id = fields.Many2one(
'hr.attendance',
string="Creating attendance",
required=True,
readonly=True
)
@api.multi
def unlink(self):
"""
Delete all record(s) from recordset
return True on success, False otherwise
@return: True on success, False otherwise
#TODO: process before delete resource
"""
related_attendances = self.env['hr.attendance'].search([
('attendance_day_id', 'in', self.ids)
])
result = super(FullAttendanceDay, self).unlink()
related_attendances.unlink()
return result
| 26.20339
| 76
| 0.525873
|
96f433068e3392fa16f667bd2b6aacd25a03c4ff
| 1,309
|
py
|
Python
|
settings/EnsembleSAXS_PP_Panel_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | null | null | null |
settings/EnsembleSAXS_PP_Panel_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 1
|
2019-10-22T21:28:31.000Z
|
2019-10-22T21:39:12.000Z
|
settings/EnsembleSAXS_PP_Panel_settings.py
|
bopopescu/Lauecollect
|
60ae2b05ea8596ba0decf426e37aeaca0bc8b6be
|
[
"MIT"
] | 2
|
2019-06-06T15:06:46.000Z
|
2020-07-20T02:03:22.000Z
|
Calibration.CustomView = ['X-ray Scope Trigger', 'Ps Laser Osc. Delay', 'Ps Laser Trigger', 'Ns Laser Q-Switch Trigger', 'Ns Laser Flash Lamp Trigger', 'Laser Scope Trigger', 'Heatload Chopper Phase', 'Heatload Chop. Act. Phase', 'ChemMat Chopper Phase', 'ChemMat Chop. Act. Phase', 'X-ray Shutter Delay']
Calibration.view = 'Custom'
CustomView = ['Delay', 'Mode', 'Period [1-kHz cycles]', 'Laser', 'X-ray ms shutter', 'Pump', 'Trigger code', 'X-ray detector trigger', 'Image number', 'X-ray detector trigger count', 'X-ray detector acquistion count', 'X-ray scope trigger count', 'X-ray scope acquistion count', 'Laser scope trigger count', 'Laser scope acquistion count', 'Image number increment', 'Acquiring', 'Queue active', 'Queue length [sequences]', 'Current queue length [seq]', 'Current queue sequence cnt', 'Queue repeat count', 'Queue max repeat count', 'Next queue sequence cnt', 'Packets generated', 'Packets loaded', 'Sequencer Running', 'Sequence generator', 'Sequence generator version', 'Heatload chopper phase', 'Heatload chop. act. phase', 'High-speed chopper phase', 'P0 shift']
refresh_period = 1.0
view = 'Custom'
TimingConfiguration.CustomView = ['#', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24']
| 218.166667
| 764
| 0.679908
|
5b4414a98835034efd4fbc353ec021208fbe2d83
| 238
|
py
|
Python
|
smartbackup/__main__.py
|
alexsilva/smartbackup
|
fa203ff7c4c8e7eba5ae0f7e8e72578c6c844685
|
[
"MIT"
] | null | null | null |
smartbackup/__main__.py
|
alexsilva/smartbackup
|
fa203ff7c4c8e7eba5ae0f7e8e72578c6c844685
|
[
"MIT"
] | null | null | null |
smartbackup/__main__.py
|
alexsilva/smartbackup
|
fa203ff7c4c8e7eba5ae0f7e8e72578c6c844685
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
# What is '__main__' module ?
# Reference - https://docs.python.org/2/using/cmdline.html#cmdoption-m
__author__ = 'alex'
def main():
import bakthat
bakthat.main()
if __name__ == '__main__':
main()
| 15.866667
| 70
| 0.663866
|
02b89d5f168b6cd6fd05fb18a196571e26be7c7f
| 328
|
py
|
Python
|
Chapter07/c7_24_read_ffcMonthly.py
|
John-ye666/Python-for-Finance-Second-Edition
|
dabef09bcdd7b0ec2934774741bd0a7e1950de73
|
[
"MIT"
] | 236
|
2017-07-02T03:06:54.000Z
|
2022-03-31T03:15:33.000Z
|
Chapter07/c7_24_read_ffcMonthly.py
|
John-ye666/Python-for-Finance-Second-Edition
|
dabef09bcdd7b0ec2934774741bd0a7e1950de73
|
[
"MIT"
] | null | null | null |
Chapter07/c7_24_read_ffcMonthly.py
|
John-ye666/Python-for-Finance-Second-Edition
|
dabef09bcdd7b0ec2934774741bd0a7e1950de73
|
[
"MIT"
] | 139
|
2017-06-30T10:28:16.000Z
|
2022-01-19T19:43:34.000Z
|
"""
Name : c7_24_read_ffMonthly.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 6/6/2017
email : yany@canisius.edu
paulyxy@hotmail.com
"""
import pandas as pd
x=pd.read_pickle("c:/temp/ffMonthly.pkl")
print(x.head())
print(x.tail())
| 21.866667
| 41
| 0.631098
|
14c3e0e1b201402ecba988680caead86765bb5b1
| 2,877
|
py
|
Python
|
sdk/network/azure-mgmt-dns/setup.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-dns/setup.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/network/azure-mgmt-dns/setup.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-dns"
PACKAGE_PPRINT_NAME = "DNS Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# azure v0.x is not compatible with this package
# azure v0.x used to have a __version__ attribute (newer versions don't)
try:
import azure
try:
ver = azure.__version__
raise Exception(
'This package is incompatible with azure=={}. '.format(ver) +
'Uninstall it with "pip uninstall azure".'
)
except AttributeError:
pass
except ImportError:
pass
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.5.0',
'msrestazure>=0.4.32,<2.0.0',
'azure-common~=1.1',
],
extras_require={
":python_version<'3.0'": ['azure-mgmt-nspkg'],
}
)
| 32.325843
| 91
| 0.601668
|
005c023bb35eb6941588b482adcd80a017afa84f
| 2,758
|
py
|
Python
|
tests/test_item_rates.py
|
Canisback/solari
|
bd7c46e5cb727033644b32531f4559702cb41f68
|
[
"MIT"
] | 16
|
2021-01-04T21:44:24.000Z
|
2022-01-07T12:02:26.000Z
|
tests/test_item_rates.py
|
Canisback/solari
|
bd7c46e5cb727033644b32531f4559702cb41f68
|
[
"MIT"
] | 1
|
2021-01-26T07:45:24.000Z
|
2021-02-03T19:11:39.000Z
|
tests/test_item_rates.py
|
Canisback/solari
|
bd7c46e5cb727033644b32531f4559702cb41f68
|
[
"MIT"
] | 2
|
2021-01-05T14:00:28.000Z
|
2021-07-25T04:03:44.000Z
|
from solari import Leona
from solari.stats import ItemPickrate, ItemWinrate
def test_item_pickrate(match_set_1):
l = Leona([
ItemPickrate()
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# Nightbringer picked 7 times in 3 games
assert stats["Pickrate"].loc[4636] == 7/30
def test_item_pickrate_with_duplicate(match_set_1):
l = Leona([
ItemPickrate()
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# Needlessly large rod picked 8 times, but twice in duplicate
assert stats["Pickrate"].loc[1058] == 6/30
def test_item_winrate(match_set_1):
l = Leona([
ItemWinrate()
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# Nightbringer won 3 times in 7 picks
assert stats["Winrate"].loc[4636] == 3/7
def test_item_winrate_with_duplicate(match_set_1):
l = Leona([
ItemWinrate()
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# Needlessly large rod won 4 times out of the 8 picks, but twice in duplicate
assert stats["Winrate"].loc[1058] == 4/6
def test_item_pickrate_by_champion(match_set_1):
l = Leona([
ItemPickrate(by_champion=True)
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# Nightbringer picked 2 times in 3 Zoe
assert stats["Pickrate"].loc[(142,4636)] == 2/3
def test_item_winrate_by_champion(match_set_1):
l = Leona([
ItemWinrate(by_champion=True)
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# Nightbringer won 1 time in 2 picks by Zoe
assert stats["Winrate"].loc[(142,4636)] == 1/2
def test_item_multiple_keys(match_set_1):
l = Leona([
ItemPickrate(),
ItemPickrate(by_champion=True)
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# There should by two DataFrames
assert len(stats) == 2
# One index by itemId only
assert stats[("itemId",)]["Pickrate"].loc[4636] == 7/30
# One index by itemId and championId
assert stats[("championId","itemId")]["Pickrate"].loc[(142,4636)] == 2/3
def test_item_by_key(match_set_1):
l = Leona([
ItemPickrate(),
ItemPickrate(by_champion=True)
])
for m in match_set_1:
l.push_match(m)
stats = l.get_stats()
# There should by two DataFrames
assert len(stats) == 2
# Directly check for the itemId index
l.get_stats(("itemId",))["Pickrate"].loc[4636] == 7/30
| 23.176471
| 81
| 0.592821
|
d5521e804ac7b05c00179c152fe3c2a0e316a994
| 10,153
|
py
|
Python
|
yt/geometry/coordinates/coordinate_handler.py
|
themousepotato/yt
|
6befef2bc0427250fd62395962599be41b193e65
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/geometry/coordinates/coordinate_handler.py
|
themousepotato/yt
|
6befef2bc0427250fd62395962599be41b193e65
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
yt/geometry/coordinates/coordinate_handler.py
|
themousepotato/yt
|
6befef2bc0427250fd62395962599be41b193e65
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import numpy as np
import weakref
from numbers import Number
from yt.funcs import \
validate_width_tuple, \
fix_unitary, \
iterable
from yt.units.yt_array import \
YTArray, YTQuantity
from yt.utilities.exceptions import \
YTCoordinateNotImplemented, \
YTInvalidWidthError
def _unknown_coord(field, data):
raise YTCoordinateNotImplemented
def _get_coord_fields(axi, units = "code_length"):
def _dds(field, data):
rv = data.ds.arr(data.fwidth[...,axi].copy(), units)
return data._reshape_vals(rv)
def _coords(field, data):
rv = data.ds.arr(data.fcoords[...,axi].copy(), units)
return data._reshape_vals(rv)
return _dds, _coords
def _get_vert_fields(axi, units = "code_length"):
def _vert(field, data):
rv = data.ds.arr(data.fcoords_vertex[...,axi].copy(), units)
return rv
return _vert
def validate_iterable_width(width, ds, unit=None):
if isinstance(width[0], tuple) and isinstance(width[1], tuple):
validate_width_tuple(width[0])
validate_width_tuple(width[1])
return (ds.quan(width[0][0], fix_unitary(width[0][1])),
ds.quan(width[1][0], fix_unitary(width[1][1])))
elif isinstance(width[0], Number) and isinstance(width[1], Number):
return (ds.quan(width[0], 'code_length'),
ds.quan(width[1], 'code_length'))
elif isinstance(width[0], YTQuantity) and isinstance(width[1], YTQuantity):
return (ds.quan(width[0]), ds.quan(width[1]))
else:
validate_width_tuple(width)
# If width and unit are both valid width tuples, we
# assume width controls x and unit controls y
try:
validate_width_tuple(unit)
return (ds.quan(width[0], fix_unitary(width[1])),
ds.quan(unit[0], fix_unitary(unit[1])))
except YTInvalidWidthError:
return (ds.quan(width[0], fix_unitary(width[1])),
ds.quan(width[0], fix_unitary(width[1])))
class CoordinateHandler:
name = None
def __init__(self, ds, ordering):
self.ds = weakref.proxy(ds)
self.axis_order = ordering
def setup_fields(self):
# This should return field definitions for x, y, z, r, theta, phi
raise NotImplementedError
def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
# This should *actually* be a pixelize call, not just returning the
# pixelizer
raise NotImplementedError
def pixelize_line(self, field, start_point, end_point, npoints):
raise NotImplementedError
def distance(self, start, end):
p1 = self.convert_to_cartesian(start)
p2 = self.convert_to_cartesian(end)
return np.sqrt(((p1-p2)**2.0).sum())
def convert_from_cartesian(self, coord):
raise NotImplementedError
def convert_to_cartesian(self, coord):
raise NotImplementedError
def convert_to_cylindrical(self, coord):
raise NotImplementedError
def convert_from_cylindrical(self, coord):
raise NotImplementedError
def convert_to_spherical(self, coord):
raise NotImplementedError
def convert_from_spherical(self, coord):
raise NotImplementedError
_data_projection = None
@property
def data_projection(self):
if self._data_projection is not None:
return self._data_projection
dpj = {}
for ax in self.axis_order:
dpj[ax] = None
self._data_projection = dpj
return dpj
_data_transform = None
@property
def data_transform(self):
if self._data_transform is not None:
return self._data_transform
dtx = {}
for ax in self.axis_order:
dtx[ax] = None
self._data_transform = dtx
return dtx
_axis_name = None
@property
def axis_name(self):
if self._axis_name is not None:
return self._axis_name
an = {}
for axi, ax in enumerate(self.axis_order):
an[axi] = ax
an[ax] = ax
an[ax.capitalize()] = ax
self._axis_name = an
return an
_axis_id = None
@property
def axis_id(self):
if self._axis_id is not None:
return self._axis_id
ai = {}
for axi, ax in enumerate(self.axis_order):
ai[ax] = ai[axi] = axi
self._axis_id = ai
return ai
_image_axis_name = None
@property
def image_axis_name(self):
# Default
if self._image_axis_name is not None:
return self._image_axis_name
self._image_axis_name = rv = {}
for i in range(3):
rv[i] = (self.axis_name[self.x_axis[i]],
self.axis_name[self.y_axis[i]])
rv[self.axis_name[i]] = rv[i]
rv[self.axis_name[i].capitalize()] = rv[i]
return rv
_x_axis = None
@property
def x_axis(self):
if self._x_axis is not None:
return self._x_axis
ai = self.axis_id
xa = {}
for a1, a2 in self._x_pairs:
xa[a1] = xa[ai[a1]] = ai[a2]
self._x_axis = xa
return xa
_y_axis = None
@property
def y_axis(self):
if self._y_axis is not None:
return self._y_axis
ai = self.axis_id
ya = {}
for a1, a2 in self._y_pairs:
ya[a1] = ya[ai[a1]] = ai[a2]
self._y_axis = ya
return ya
@property
def period(self):
raise NotImplementedError
def sanitize_depth(self, depth):
if iterable(depth):
validate_width_tuple(depth)
depth = (self.ds.quan(depth[0], fix_unitary(depth[1])), )
elif isinstance(depth, Number):
depth = (self.ds.quan(depth, 'code_length',
registry=self.ds.unit_registry), )
elif isinstance(depth, YTQuantity):
depth = (depth, )
else:
raise YTInvalidWidthError(depth)
return depth
def sanitize_width(self, axis, width, depth):
if width is None:
# initialize the index if it is not already initialized
self.ds.index
# Default to code units
if not iterable(axis):
xax = self.x_axis[axis]
yax = self.y_axis[axis]
w = self.ds.domain_width[np.array([xax, yax])]
else:
# axis is actually the normal vector
# for an off-axis data object.
mi = np.argmin(self.ds.domain_width)
w = self.ds.domain_width[np.array((mi, mi))]
width = (w[0], w[1])
elif iterable(width):
width = validate_iterable_width(width, self.ds)
elif isinstance(width, YTQuantity):
width = (width, width)
elif isinstance(width, Number):
width = (self.ds.quan(width, 'code_length'),
self.ds.quan(width, 'code_length'))
else:
raise YTInvalidWidthError(width)
if depth is not None:
depth = self.sanitize_depth(depth)
return width + depth
return width
def sanitize_center(self, center, axis):
if isinstance(center, str):
if center.lower() == "m" or center.lower() == "max":
v, center = self.ds.find_max(("gas", "density"))
center = self.ds.arr(center, 'code_length')
elif center.lower() == "c" or center.lower() == "center":
# domain_left_edge and domain_right_edge might not be
# initialized until we create the index, so create it
self.ds.index
center = (self.ds.domain_left_edge + self.ds.domain_right_edge) / 2
else:
raise RuntimeError('center keyword \"%s\" not recognized' % center)
elif isinstance(center, YTArray):
return self.ds.arr(center), self.convert_to_cartesian(center)
elif iterable(center):
if isinstance(center[0], str) and isinstance(center[1], str):
if center[0].lower() == "min":
v, center = self.ds.find_min(center[1])
elif center[0].lower() == "max":
v, center = self.ds.find_max(center[1])
else:
raise RuntimeError("center keyword \"%s\" not recognized" % center)
center = self.ds.arr(center, 'code_length')
elif iterable(center[0]) and isinstance(center[1], str):
center = self.ds.arr(center[0], center[1])
else:
center = self.ds.arr(center, 'code_length')
else:
raise RuntimeError("center keyword \"%s\" not recognized" % center)
# This has to return both a center and a display_center
display_center = self.convert_to_cartesian(center)
return center, display_center
def sanitize_buffer_fill_values(self, buff):
"""Replace nans with +inf in buff, if all valid values are positive"""
# In buffer with only positive values, maplotlib will raise a warning
# if nan is used as a filler, while it tolerates np.inf just fine
minval = buff[~np.isnan(buff)].min()
if minval >= 0:
buff[np.isnan(buff)] = np.inf
def cartesian_to_cylindrical(coord, center = (0,0,0)):
c2 = np.zeros_like(coord)
if not isinstance(center, YTArray):
center = center * coord.uq
c2[...,0] = ((coord[...,0] - center[0])**2.0
+ (coord[...,1] - center[1])**2.0)**0.5
c2[...,1] = coord[...,2] # rzt
c2[...,2] = np.arctan2(coord[...,1] - center[1],
coord[...,0] - center[0])
return c2
def cylindrical_to_cartesian(coord, center = (0,0,0)):
c2 = np.zeros_like(coord)
if not isinstance(center, YTArray):
center = center * coord.uq
c2[...,0] = np.cos(coord[...,0]) * coord[...,1] + center[0]
c2[...,1] = np.sin(coord[...,0]) * coord[...,1] + center[1]
c2[...,2] = coord[...,2]
return c2
| 35.253472
| 87
| 0.578548
|
9fc8326051148f3440f52ec4c2b97ddbfc6657e9
| 714
|
py
|
Python
|
postprocessing/partner_annotations/luigi_pipeline_spec_dir/run_debug.py
|
d-v-b/CNNectome
|
2b1f4786282306edf94b231c9fcf64419d8d1e2a
|
[
"BSD-2-Clause"
] | 4
|
2019-06-21T18:06:22.000Z
|
2021-11-29T08:28:46.000Z
|
CNNectome/postprocessing/partner_annotations/luigi_pipeline_spec_dir/run_debug.py
|
GenevieveBuckley/CNNectome
|
bde8528ed5adc0a4aefca3b19ecc4c2144f2cbcc
|
[
"BSD-2-Clause"
] | 4
|
2018-12-18T19:31:04.000Z
|
2022-01-10T16:06:45.000Z
|
CNNectome/postprocessing/partner_annotations/luigi_pipeline_spec_dir/run_debug.py
|
GenevieveBuckley/CNNectome
|
bde8528ed5adc0a4aefca3b19ecc4c2144f2cbcc
|
[
"BSD-2-Clause"
] | 9
|
2018-03-22T18:17:57.000Z
|
2022-03-24T01:17:21.000Z
|
import luigi
import os
from cleftreport_luigi import CleftReport
from partnerreport_luigi import PartnerReport
import logging
class AllEvaluations(luigi.WrapperTask):
it = 90000
data_train = "data2016-unaligned"
augmentation = "deluxe"
data_eval = "data2017-unaligned"
mode = "validation"
samples = luigi.TupleParameter(default=("A", "B", "C"))
def requires(self):
yield PartnerReport(
self.it,
self.data_train,
self.augmentation,
self.data_eval,
self.mode,
self.samples,
(self.data_eval,),
)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
luigi.run()
| 23.032258
| 59
| 0.630252
|
4b2579fa231d30e6360dc0d4b65381bc4a1ba122
| 8,662
|
py
|
Python
|
pedash/utils.py
|
theoturner/Pedash
|
cb3dbdefbc970ca9b1a5147498010e9a23558732
|
[
"Apache-2.0"
] | null | null | null |
pedash/utils.py
|
theoturner/Pedash
|
cb3dbdefbc970ca9b1a5147498010e9a23558732
|
[
"Apache-2.0"
] | null | null | null |
pedash/utils.py
|
theoturner/Pedash
|
cb3dbdefbc970ca9b1a5147498010e9a23558732
|
[
"Apache-2.0"
] | null | null | null |
"""
Utilities for the issuer and prover plus a postgres wallet plugin
"""
import sys
import asyncio
import json
import random
from ctypes import cdll, CDLL
from time import sleep
import platform
import logging
from indy import wallet
from indy.error import ErrorCode, IndyError
from vcx.api.connection import Connection
from vcx.api.credential_def import CredentialDef
from vcx.api.issuer_credential import IssuerCredential
from vcx.api.credential import Credential
from vcx.api.proof import Proof
from vcx.api.disclosed_proof import DisclosedProof
from vcx.api.schema import Schema
from vcx.api.utils import vcx_agent_provision, vcx_messages_download
from vcx.api.vcx_init import vcx_init_with_config
from vcx.state import State, ProofState
async def create_schema_and_cred_def(schema_uuid, schema_name, schema_attrs, creddef_uuid, creddef_name):
version = format("%d.%d.%d" % (random.randint(1, 101), random.randint(1, 101), random.randint(1, 101)))
schema = await Schema.create(schema_uuid, schema_name, version, schema_attrs, 0)
schema_id = await schema.get_schema_id()
cred_def = await CredentialDef.create(creddef_uuid, creddef_name, schema_id, 0)
cred_def_handle = cred_def.handle
await cred_def.get_cred_def_id()
cred_def_json = await cred_def.serialize()
print(" >>> cred_def_handle", cred_def_handle)
return cred_def_json
async def send_credential_request(my_connection, cred_def_json, schema_attrs, cred_tag, cred_name):
cred_def = await CredentialDef.deserialize(cred_def_json)
cred_def_handle = cred_def.handle
print(" >>> cred_def_handle", cred_def_handle)
credential = await IssuerCredential.create(cred_tag, schema_attrs, cred_def_handle, cred_name, '0')
await credential.send_offer(my_connection)
# serialize/deserialize credential - waiting for prover to respond with credential request
credential_data = await credential.serialize()
while True:
my_credential = await IssuerCredential.deserialize(credential_data)
await my_credential.update_state()
credential_state = await my_credential.get_state()
if credential_state == State.RequestReceived:
break
else:
credential_data = await my_credential.serialize()
sleep(2)
await my_credential.send_credential(my_connection)
# serialize/deserialize - waiting for prover to accept credential
credential_data = await my_credential.serialize()
while True:
my_credential2 = await IssuerCredential.deserialize(credential_data)
await my_credential2.update_state()
credential_state = await my_credential2.get_state()
if credential_state == State.Accepted:
break
else:
credential_data = await my_credential2.serialize()
sleep(2)
async def send_proof_request(my_connection, institution_did, proof_attrs, proof_uuid, proof_name, proof_predicates):
proof = await Proof.create(proof_uuid, proof_name, proof_attrs, {}, requested_predicates=proof_predicates)
await proof.request_proof(my_connection)
# serialize/deserialize proof
proof_data = await proof.serialize()
while True:
my_proof = await Proof.deserialize(proof_data)
await my_proof.update_state()
proof_state = await my_proof.get_state()
if proof_state == State.Accepted:
break
else:
proof_data = await my_proof.serialize()
sleep(2)
await my_proof.get_proof(my_connection)
# Check proof is valid
if my_proof.proof_state == ProofState.Verified:
print("proof is verified!!")
else:
print("could not verify proof :(")
async def handle_messages(my_connection, handled_offers, handled_requests):
offers = await Credential.get_offers(my_connection)
for offer in offers:
handled = False
for handled_offer in handled_offers:
if offer[0]['msg_ref_id'] == handled_offer['msg_ref_id']:
print(">>> got back offer that was already handled", offer[0]['msg_ref_id'])
handled = True
break
if not handled:
save_offer = offer[0].copy()
print(" >>> handling offer", save_offer['msg_ref_id'])
await handle_credential_offer(my_connection, offer)
handled_offers.append(save_offer)
requests = await DisclosedProof.get_requests(my_connection)
for request in requests:
print("request", type(request), request)
handled = False
for handled_request in handled_requests:
if request['msg_ref_id'] == handled_request['msg_ref_id']:
print(">>> got back request that was already handled", request['msg_ref_id'])
handled = True
break
if not handled:
save_request = request.copy()
print(" >>> handling proof", save_request['msg_ref_id'])
await handle_proof_request(my_connection, request)
handled_requests.append(save_request)
async def handle_credential_offer(my_connection, offer):
credential = await Credential.create('credential', offer)
await credential.send_request(my_connection, 0)
# serialize/deserialize credential - wait for Issuer to send credential
credential_data = await credential.serialize()
while True:
my_credential = await Credential.deserialize(credential_data)
await my_credential.update_state()
credential_state = await my_credential.get_state()
if credential_state == State.Accepted:
break
else:
credential_data = await my_credential.serialize()
sleep(2)
async def handle_proof_request(my_connection, request):
proof = await DisclosedProof.create('proof', request)
credentials = await proof.get_creds()
# Include self-attested attributes (not included in credentials)
self_attested = {}
# Use the first available credentials to satisfy the proof request
for attr in credentials['attrs']:
if 0 < len(credentials['attrs'][attr]):
credentials['attrs'][attr] = {
'credential': credentials['attrs'][attr][0]
}
else:
self_attested[attr] = 'my self-attested value'
for attr in self_attested:
del credentials['attrs'][attr]
print('credentials', credentials)
print('self_attested', self_attested)
await proof.generate_proof(credentials, self_attested)
# FIXME possible segfault
await proof.send_proof(my_connection)
# serialize/deserialize proof
proof_data = await proof.serialize()
while True:
my_proof = await DisclosedProof.deserialize(proof_data)
await my_proof.update_state()
proof_state = await my_proof.get_state()
if proof_state == State.Accepted:
break
else:
proof_data = await my_proof.serialize()
sleep(2)
print("proof_state", proof_state)
EXTENSION = {"darwin": ".dylib", "linux": ".so", "win32": ".dll", 'windows': '.dll'}
def file_ext():
your_platform = platform.system().lower()
return EXTENSION[your_platform] if (your_platform in EXTENSION) else '.so'
# load postgres dll and configure postgres wallet
def load_postgres_plugin(provisionConfig):
print("Initializing postgres wallet")
stg_lib = cdll.LoadLibrary("libindystrgpostgres" + file_ext())
result = stg_lib.postgresstorage_init()
if result != 0:
print("Error unable to load postgres wallet storage", result)
sys.exit(0)
provisionConfig['wallet_type'] = 'postgres_storage'
provisionConfig['storage_config'] = '{"url":"localhost:5432"}'
provisionConfig['storage_credentials'] = '{"account":"postgres","password":"mysecretpassword","admin_account":"postgres","admin_password":"mysecretpassword"}'
print("Success, loaded postgres wallet storage")
async def create_postgres_wallet(provisionConfig):
print("Provision postgres wallet in advance")
wallet_config = {
'id': provisionConfig['wallet_name'],
'storage_type': provisionConfig['wallet_type'],
'storage_config': json.loads(provisionConfig['storage_config']),
}
wallet_creds = {
'key': provisionConfig['wallet_key'],
'storage_credentials': json.loads(provisionConfig['storage_credentials']),
}
try:
await wallet.create_wallet(json.dumps(wallet_config), json.dumps(wallet_creds))
except IndyError as ex:
if ex.error_code == ErrorCode.PoolLedgerConfigAlreadyExistsError:
pass
print("Postgres wallet provisioned")
| 40.476636
| 162
| 0.69903
|
cd361d9e8a3441547ed67fc0dc239dd81c6d7d6a
| 8,064
|
py
|
Python
|
Tools/LyTestTools/tests/unit/test_editor_test_utils.py
|
GameInstitute/o3de
|
daf4dbb54d94b1c4240a273a9fdd831cce543339
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-12-25T02:43:48.000Z
|
2021-12-25T02:43:48.000Z
|
Tools/LyTestTools/tests/unit/test_editor_test_utils.py
|
RoddieKieley/o3de
|
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
|
[
"Apache-2.0",
"MIT"
] | 2
|
2021-09-08T03:30:28.000Z
|
2022-03-12T00:59:27.000Z
|
Tools/LyTestTools/tests/unit/test_editor_test_utils.py
|
RoddieKieley/o3de
|
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-07-09T06:02:14.000Z
|
2021-07-09T06:02:14.000Z
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import pytest
import os
import unittest.mock as mock
import unittest
import ly_test_tools.o3de.editor_test_utils as editor_test_utils
pytestmark = pytest.mark.SUITE_smoke
class TestEditorTestUtils(unittest.TestCase):
@mock.patch('ly_test_tools.environment.process_utils.kill_processes_named')
def test_KillAllLyProcesses_IncludeAP_CallsCorrectly(self, mock_kill_processes_named):
process_list = ['Editor', 'Profiler', 'RemoteConsole', 'AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder']
editor_test_utils.kill_all_ly_processes(include_asset_processor=True)
mock_kill_processes_named.assert_called_once_with(process_list, ignore_extensions=True)
@mock.patch('ly_test_tools.environment.process_utils.kill_processes_named')
def test_KillAllLyProcesses_NotIncludeAP_CallsCorrectly(self, mock_kill_processes_named):
process_list = ['Editor', 'Profiler', 'RemoteConsole']
ap_process_list = ['AssetProcessor', 'AssetProcessorBatch', 'AssetBuilder']
editor_test_utils.kill_all_ly_processes(include_asset_processor=False)
mock_kill_processes_named.assert_called_once()
assert ap_process_list not in mock_kill_processes_named.call_args[0]
def test_GetTestcaseModuleFilepath_NoExtension_ReturnsPYExtension(self):
mock_module = mock.MagicMock()
file_path = os.path.join('path', 'under_test')
mock_module.__file__ = file_path
assert file_path + '.py' == editor_test_utils.get_testcase_module_filepath(mock_module)
def test_GetTestcaseModuleFilepath_PYExtension_ReturnsPYExtension(self):
mock_module = mock.MagicMock()
file_path = os.path.join('path', 'under_test.py')
mock_module.__file__ = file_path
assert file_path == editor_test_utils.get_testcase_module_filepath(mock_module)
def test_GetModuleFilename_PythonModule_ReturnsFilename(self):
mock_module = mock.MagicMock()
file_path = os.path.join('path', 'under_test.py')
mock_module.__file__ = file_path
assert 'under_test' == editor_test_utils.get_module_filename(mock_module)
def test_RetrieveLogPath_NormalProject_ReturnsLogPath(self):
mock_workspace = mock.MagicMock()
mock_workspace.paths.project.return_value = 'mock_project_path'
expected = os.path.join('mock_project_path', 'user', 'log_test_0')
assert expected == editor_test_utils.retrieve_log_path(0, mock_workspace)
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveCrashOutput_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
mock_log = 'mock crash info'
with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
assert mock_log == editor_test_utils.retrieve_crash_output(0, mock_workspace, 0)
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveCrashOutput_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
error_message = "No crash log available"
assert error_message in editor_test_utils.retrieve_crash_output(0, mock_workspace, 0)
@mock.patch('os.path.getmtime', mock.MagicMock())
@mock.patch('os.rename')
@mock.patch('time.strftime')
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('os.path.exists')
def test_CycleCrashReport_DmpExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_strftime,
mock_rename):
mock_exists.side_effect = [False, True]
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
mock_strftime.return_value = 'mock_strftime'
editor_test_utils.cycle_crash_report(0, mock_workspace)
mock_rename.assert_called_once_with(os.path.join('mock_log_path', 'error.dmp'),
os.path.join('mock_log_path', 'error_mock_strftime.dmp'))
@mock.patch('os.path.getmtime', mock.MagicMock())
@mock.patch('os.rename')
@mock.patch('time.strftime')
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('os.path.exists')
def test_CycleCrashReport_LogExists_NamedCorrectly(self, mock_exists, mock_retrieve_log_path, mock_strftime,
mock_rename):
mock_exists.side_effect = [True, False]
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_workspace = mock.MagicMock()
mock_strftime.return_value = 'mock_strftime'
editor_test_utils.cycle_crash_report(0, mock_workspace)
mock_rename.assert_called_once_with(os.path.join('mock_log_path', 'error.log'),
os.path.join('mock_log_path', 'error_mock_strftime.log'))
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveEditorLogContent_CrashLogExists_ReturnsLogInfo(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_logname = 'mock_log.log'
mock_workspace = mock.MagicMock()
mock_log = 'mock log info'
with mock.patch('builtins.open', mock.mock_open(read_data=mock_log)) as mock_file:
assert f'[{mock_logname}] {mock_log}' == editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace)
@mock.patch('ly_test_tools.o3de.editor_test_utils.retrieve_log_path')
@mock.patch('ly_test_tools.environment.waiter.wait_for', mock.MagicMock())
def test_RetrieveEditorLogContent_CrashLogNotExists_ReturnsError(self, mock_retrieve_log_path):
mock_retrieve_log_path.return_value = 'mock_log_path'
mock_logname = 'mock_log.log'
mock_workspace = mock.MagicMock()
expected = f"-- Error reading {mock_logname}"
assert expected in editor_test_utils.retrieve_editor_log_content(0, mock_logname, mock_workspace)
def test_RetrieveLastRunTestIndexFromOutput_SecondTestFailed_Returns0(self):
mock_test = mock.MagicMock()
mock_test.__name__ = 'mock_test_name'
mock_test_list = [mock_test]
mock_editor_output = 'mock_test_name\n' \
'mock_test_name_1'
assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output)
def test_RetrieveLastRunTestIndexFromOutput_TenthTestFailed_Returns9(self):
mock_test_list = []
mock_editor_output = ''
for x in range(10):
mock_test = mock.MagicMock()
mock_test.__name__ = f'mock_test_name_{x}'
mock_test_list.append(mock_test)
mock_editor_output += f'{mock_test.__name__}\n'
mock_editor_output += 'mock_test_name_x'
assert 9 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output)
def test_RetrieveLastRunTestIndexFromOutput_FirstItemFailed_Returns0(self):
mock_test_list = []
mock_editor_output = ''
for x in range(10):
mock_test = mock.MagicMock()
mock_test.__name__ = f'mock_test_name_{x}'
mock_test_list.append(mock_test)
assert 0 == editor_test_utils.retrieve_last_run_test_index_from_output(mock_test_list, mock_editor_output)
| 49.472393
| 132
| 0.728919
|
c625843e6d75f46dbf1c64d752abd82c11f8668a
| 8,672
|
py
|
Python
|
platform/gsutil/gslib/addlhelp/metadata.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
platform/gsutil/gslib/addlhelp/metadata.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
platform/gsutil/gslib/addlhelp/metadata.py
|
IsaacHuang/google-cloud-sdk
|
52afa5d1a75dff08f4f5380c5cccc015bf796ca5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional help about object metadata."""
from __future__ import absolute_import
from gslib.help_provider import HelpProvider
_DETAILED_HELP_TEXT = ("""
<B>OVERVIEW OF METADATA</B>
Objects can have associated metadata, which control aspects of how
GET requests are handled, including Content-Type, Cache-Control,
Content-Disposition, and Content-Encoding (discussed in more detail in
the subsections below). In addition, you can set custom metadata that
can be used by applications (e.g., tagging that particular objects possess
some property).
There are two ways to set metadata on objects:
- at upload time you can specify one or more headers to associate with
objects, using the gsutil -h option. For example, the following command
would cause gsutil to set the Content-Type and Cache-Control for each
of the files being uploaded:
gsutil -h "Content-Type:text/html" \\
-h "Cache-Control:public, max-age=3600" cp -r images \\
gs://bucket/images
Note that -h is an option on the gsutil command, not the cp sub-command.
- You can set or remove metadata fields from already uploaded objects using
the gsutil setmeta command. See "gsutil help setmeta".
More details about specific pieces of metadata are discussed below.
<B>CONTENT TYPE</B>
The most commonly set metadata is Content-Type (also known as MIME type),
which allows browsers to render the object properly.
gsutil sets the Content-Type automatically at upload time, based on each
filename extension. For example, uploading files with names ending in .txt
will set Content-Type to text/plain. If you're running gsutil on Linux or
MacOS and would prefer to have content type set based on naming plus content
examination, see the use_magicfile configuration variable in the gsutil/boto
configuration file (See also "gsutil help config"). In general, using
use_magicfile is more robust and configurable, but is not available on
Windows.
If you specify a Content-Type header with -h when uploading content (like the
example gsutil command given in the previous section), it overrides the
Content-Type that would have been set based on filename extension or content.
This can be useful if the Content-Type detection algorithm doesn't work as
desired for some of your files.
You can also completely suppress content type detection in gsutil, by
specifying an empty string on the Content-Type header:
gsutil -h 'Content-Type:' cp -r images gs://bucket/images
In this case, the Google Cloud Storage service will not attempt to detect
the content type. In general this approach will work better than using
filename extension-based content detection in gsutil, because the list of
filename extensions is kept more current in the server-side content detection
system than in the Python library upon which gsutil content type detection
depends. (For example, at the time of writing this, the filename extension
".webp" was recognized by the server-side content detection system, but
not by gsutil.)
<B>CACHE-CONTROL</B>
Another commonly set piece of metadata is Cache-Control, which allows
you to control whether and for how long browser and Internet caches are
allowed to cache your objects. Cache-Control only applies to objects with
a public-read ACL. Non-public data are not cacheable.
Here's an example of uploading an object set to allow caching:
gsutil -h "Cache-Control:public,max-age=3600" cp -a public-read \\
-r html gs://bucket/html
This command would upload all files in the html directory (and subdirectories)
and make them publicly readable and cacheable, with cache expiration of
one hour.
Note that if you allow caching, at download time you may see older versions
of objects after uploading a newer replacement object. Note also that because
objects can be cached at various places on the Internet there is no way to
force a cached object to expire globally (unlike the way you can force your
browser to refresh its cache).
Another use of the Cache-Control header is through the "no-transform" value,
which instructs Google Cloud Storage to not apply any content transformations
based on specifics of a download request, such as removing gzip
content-encoding for incompatible clients. Note that this parameter is only
respected by the XML API. The Google Cloud Storage JSON API respects only the
no-cache and max-age Cache-Control parameters.
<B>CONTENT-ENCODING</B>
You can specify a Content-Encoding to indicate that an object is compressed
(for example, with gzip compression) while maintaining its Content-Type.
You will need to ensure that the files have been compressed using the
specified Content-Encoding before using gsutil to upload them. Consider the
following example for Linux:
echo "Highly compressible text" | gzip > foo.txt
gsutil -h "Content-Encoding:gzip" -h "Content-Type:text/plain" \\
cp foo.txt gs://bucket/compressed
Note that this is different from uploading a gzipped object foo.txt.gz with
Content-Type: application/x-gzip because most browsers are able to
dynamically decompress and process objects served with Content-Encoding: gzip
based on the underlying Content-Type.
For compressible content, using Content-Encoding: gzip saves network and
storage costs, and improves content serving performance. However, for content
that is already inherently compressed (archives and many media formats, for
instance) applying another level of compression via Content-Encoding is
typically detrimental to both object size and performance and should be
avoided.
Note also that gsutil provides an easy way to cause content to be compressed
and stored with Content-Encoding: gzip: see the -z option in "gsutil help cp".
<B>CONTENT-DISPOSITION</B>
You can set Content-Disposition on your objects, to specify presentation
information about the data being transmitted. Here's an example:
gsutil -h 'Content-Disposition:attachment; filename=filename.ext' \\
cp -r attachments gs://bucket/attachments
Setting the Content-Disposition allows you to control presentation style
of the content, for example determining whether an attachment should be
automatically displayed vs should require some form of action from the user to
open it. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1
for more details about the meaning of Content-Disposition.
<B>CUSTOM METADATA</B>
You can add your own custom metadata (e.g,. for use by your application)
to an object by setting a header that starts with "x-goog-meta", for example:
gsutil -h x-goog-meta-reviewer:jane cp mycode.java gs://bucket/reviews
You can add multiple differently named custom metadata fields to each object.
<B>SETTABLE FIELDS; FIELD VALUES</B>
You can't set some metadata fields, such as ETag and Content-Length. The
fields you can set are:
- Cache-Control
- Content-Disposition
- Content-Encoding
- Content-Language
- Content-MD5
- Content-Type
- Any field starting with a matching Cloud Storage Provider
prefix, such as x-goog-meta- (i.e., custom metadata).
Header names are case-insensitive.
x-goog-meta- fields can have data set to arbitrary Unicode values. All
other fields must have ASCII values.
<B>VIEWING CURRENTLY SET METADATA</B>
You can see what metadata is currently set on an object by using:
gsutil ls -L gs://the_bucket/the_object
""")
class CommandOptions(HelpProvider):
"""Additional help about object metadata."""
# Help specification. See help_provider.py for documentation.
help_spec = HelpProvider.HelpSpec(
help_name='metadata',
help_name_aliases=[
'cache-control', 'caching', 'content type', 'mime type', 'mime',
'type'],
help_type='additional_help',
help_one_line_summary='Working With Object Metadata',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
| 43.144279
| 80
| 0.758649
|
888cfea0d976ad02e38ff51870ebd54a09163f6d
| 860
|
py
|
Python
|
python/test.py
|
ajdinm/crypto
|
d013e607de5e099749e20a748bff5bf7ef4f6689
|
[
"MIT"
] | null | null | null |
python/test.py
|
ajdinm/crypto
|
d013e607de5e099749e20a748bff5bf7ef4f6689
|
[
"MIT"
] | 1
|
2019-11-29T19:07:26.000Z
|
2019-11-29T19:07:26.000Z
|
python/test.py
|
ajdinm/crypto
|
d013e607de5e099749e20a748bff5bf7ef4f6689
|
[
"MIT"
] | null | null | null |
from termcolor import colored
def test(test_cases, f, f_name, ok_msg = 'OK', nok_msg = 'NOK', print_ok_case = False, print_nok_case = True):
print '--- init testing ' + f_name + ' function ---'
for test_case in test_cases:
result = f(*test_case[:-1])
is_ok = result == test_case[-1]
to_print = ''
color = 'green'
if is_ok:
to_print = to_print + 'OK'
else:
color = 'red'
to_print = to_print + 'NOK'
if (is_ok and print_ok_case) or (not is_ok and print_nok_case):
to_print = to_print + ': ' + f_name + str(test_case[:-1])
to_print = to_print + ': expected: ' + str(test_case[-1])
to_print = to_print + '; got: ' + str(result)
print colored(to_print, color)
print '--- done testing ' + f_name + ' function ---\n'
| 40.952381
| 110
| 0.553488
|
7c0a837cce384af4c07f196f0608df57074a5b08
| 7,271
|
py
|
Python
|
FGAN/fgan_model.py
|
sumersumerdjl/kozistr-Awesome-GANs
|
6e20e9cd07d0ec413a187d496159b97d793dab0c
|
[
"MIT"
] | 1
|
2021-08-16T01:40:46.000Z
|
2021-08-16T01:40:46.000Z
|
FGAN/fgan_model.py
|
siavelis/Awesome-GANs
|
9add33fdfcb9fead75c37dd7afdbede625a303c9
|
[
"MIT"
] | null | null | null |
FGAN/fgan_model.py
|
siavelis/Awesome-GANs
|
9add33fdfcb9fead75c37dd7afdbede625a303c9
|
[
"MIT"
] | 1
|
2021-08-16T01:35:21.000Z
|
2021-08-16T01:35:21.000Z
|
import tensorflow as tf
import numpy as np
import sys
sys.path.append('../')
import tfutil as t
tf.set_random_seed(777)
class FGAN:
def __init__(self, s, batch_size=64, height=28, width=28, channel=1,
sample_num=8 * 8, sample_size=8,
z_dim=128, dfc_unit=256, gfc_unit=1024, lr=2e-4,
divergence_method='KL', use_tricky_g_loss=False):
"""
# General Settings
:param s: TF Session
:param batch_size: training batch size, default 64
:param height: input image height, default 28
:param width: input image width, default 28
:param channel: input image channel, default 1
# Output Settings
:param sample_num: the number of sample images, default 64
:param sample_size: sample image size, default 8
# Model Settings
:param z_dim: z noise dimension, default 128
:param dfc_unit: the number of fully connected units used at disc, default 256
:param gfc_unit: the number of fully connected units used at gen, default 1024
# Training Settings
:param lr: learning rate, default 2e-4
:param divergence_method: the method of f-divergences, default 'KL'
:param use_tricky_g_loss: use g_loss referred in f-GAN Section 3.2, default False
"""
self.s = s
self.batch_size = batch_size
self.height = height
self.width = width
self.channel = channel
self.sample_size = sample_size
self.sample_num = sample_num
self.image_shape = [self.height, self.width, self.channel]
self.n_input = self.height * self.width * self.channel
self.z_dim = z_dim
self.dfc_unit = dfc_unit
self.gfc_unit = gfc_unit
# pre-defined
self.d_loss = 0.
self.g_loss = 0.
self.g = None
self.d_op = None
self.g_op = None
self.merged = None
self.writer = None
self.saver = None
# Placeholders
self.x = tf.placeholder(tf.float32, shape=[None, self.n_input], name='x-images')
self.z = tf.placeholder(tf.float32, shape=[None, self.z_dim], name='z-noise')
# Training Options
self.beta1 = 0.5
self.lr = lr
self.divergence = divergence_method
self.use_tricky_g_loss = use_tricky_g_loss
self.bulid_fgan() # build f-GAN model
def discriminator(self, x, reuse=None):
with tf.variable_scope('discriminator', reuse=reuse):
x = t.dense(x, self.dfc_unit, name='disc-fc-1')
x = tf.nn.elu(x)
x = t.dense(x, self.dfc_unit, name='disc-fc-2')
x = tf.nn.elu(x)
x = tf.layers.flatten(x)
x = t.dense(x, 1, name='disc-fc-3')
return x
def generator(self, z, reuse=None, is_train=True):
with tf.variable_scope('generator', reuse=reuse):
x = t.dense(z, self.gfc_unit, name='gen-fc-1')
x = t.batch_norm(x, is_train=is_train, name='gen-bn-1')
x = tf.nn.relu(x)
x = t.dense(x, self.gfc_unit, name='gen-fc-2')
x = t.batch_norm(x, is_train=is_train, name='gen-bn-2')
x = tf.nn.relu(x)
x = t.dense(x, self.n_input, name='gen-fc-3')
x = tf.nn.sigmoid(x)
return x
def bulid_fgan(self):
# Generator
self.g = self.generator(self.z)
# Discriminator
d_real = self.discriminator(self.x)
d_fake = self.discriminator(self.g, reuse=True)
# Losses
if self.divergence == 'GAN':
def activation(x): return -tf.reduce_mean(-t.safe_log(1. + tf.exp(-x)))
def conjugate(x): return -tf.reduce_mean(-t.safe_log(1. - tf.exp(x)))
elif self.divergence == 'KL': # tf.distribution.kl_divergence
def activation(x): return -tf.reduce_mean(x)
def conjugate(x): return -tf.reduce_mean(tf.exp(x - 1.))
elif self.divergence == 'Reverse-KL':
def activation(x): return -tf.reduce_mean(-tf.exp(x))
def conjugate(x): return -tf.reduce_mean(-1. - x) # remove log
elif self.divergence == 'JS':
def activation(x): return -tf.reduce_mean(tf.log(2.) - t.safe_log(1. + tf.exp(-x)))
def conjugate(x): return -tf.reduce_mean(-t.safe_log(2. - tf.exp(x)))
elif self.divergence == 'JS-Weighted':
def activation(x): return -tf.reduce_mean(-np.pi * np.log(np.pi) - t.safe_log(1. + tf.exp(-x)))
def conjugate(x): return -tf.reduce_mean((1. - np.pi) *
t.safe_log((1. - np.pi) / (1. - np.pi * tf.exp(x / np.pi))))
elif self.divergence == 'Squared-Hellinger':
def activation(x): return -tf.reduce_mean(1. - tf.exp(x))
def conjugate(x): return -tf.reduce_mean(x / (1. - x))
elif self.divergence == 'Pearson':
def activation(x): return -tf.reduce_mean(x)
def conjugate(x): return -tf.reduce_mean(tf.square(x) / 4. + x)
elif self.divergence == 'Neyman':
def activation(x): return -tf.reduce_mean(1. - tf.exp(x))
def conjugate(x): return -tf.reduce_mean(2. - 2. * tf.sqrt(1. - x))
elif self.divergence == 'Jeffrey':
from scipy.special import lambertw
def activation(x): return -tf.reduce_mean(x)
def conjugate(x):
lambert_w = lambertw(self.s.run(tf.exp(1. - x))) # need to be replaced with another tensor func
return -tf.reduce_mean(lambert_w + 1. / lambert_w + x - 2.)
elif self.divergence == 'Total-Variation':
def activation(x): return -tf.reduce_mean(tf.nn.tanh(x) / 2.)
def conjugate(x): return -tf.reduce_mean(x)
else:
raise NotImplementedError("[-] Not Implemented f-divergence %s" % self.divergence)
d_real_loss = activation(d_real)
d_fake_loss = conjugate(d_fake)
self.d_loss = d_real_loss - d_fake_loss
if self.use_tricky_g_loss:
self.g_loss = activation(d_fake)
else:
self.g_loss = d_fake_loss
# Summary
tf.summary.scalar("loss/d_real_loss", d_real_loss)
tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
tf.summary.scalar("loss/d_loss", self.d_loss)
tf.summary.scalar("loss/g_loss", self.g_loss)
# Collect trainer values
t_vars = tf.trainable_variables()
d_params = [v for v in t_vars if v.name.startswith('d')]
g_params = [v for v in t_vars if v.name.startswith('g')]
# Optimizer
self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr,
beta1=self.beta1).minimize(self.d_loss, var_list=d_params)
self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr,
beta1=self.beta1).minimize(self.g_loss, var_list=g_params)
# Merge summary
self.merged = tf.summary.merge_all()
# Model Saver
self.saver = tf.train.Saver(max_to_keep=1)
self.writer = tf.summary.FileWriter('./model/%s/' % self.divergence, self.s.graph)
| 36.174129
| 113
| 0.581213
|
5b637ff6d8b11e209b1b17f4529389b9e5bce031
| 691
|
py
|
Python
|
range-exclude.py
|
bpcox/range-exclude
|
c151f843964408481a5f6569718537a786ff5722
|
[
"MIT"
] | 1
|
2016-06-27T15:11:37.000Z
|
2016-06-27T15:11:37.000Z
|
range-exclude.py
|
bpcox/range-exclude
|
c151f843964408481a5f6569718537a786ff5722
|
[
"MIT"
] | null | null | null |
range-exclude.py
|
bpcox/range-exclude
|
c151f843964408481a5f6569718537a786ff5722
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3.4
import ipaddress
import math
supernet = False
subnet = False
while not supernet:
inputRange = input('Input the IP range you would like remove a subrange from: ')
try:
supernet =ipaddress.ip_network(inputRange)
except ValueError:
print('Invalid input, try again')
while not subnet:
inputRange = input('Input the IP range you would like to remove: ')
try:
subnet = ipaddress.ip_network(inputRange)
except ValueError:
print('Invalid input, try again')
if (supernet.version == subnet.version):
result =supernet.address_exclude(subnet)
for IPrange in result:
print(IPrange)
else:
print('Both IP ranges must be of the same type (IPv4 or IPv6)')
| 23.033333
| 81
| 0.740955
|
909a25babe1f0965af793a6d69d15dcd1f295b81
| 3,245
|
py
|
Python
|
game.py
|
iamgreaser/fireball
|
2c5afb3dc5756a3b26da9045278f7e4a2bc036d2
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-05-06T15:11:17.000Z
|
2019-05-06T15:11:17.000Z
|
game.py
|
iamgreaser/fireball
|
2c5afb3dc5756a3b26da9045278f7e4a2bc036d2
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
game.py
|
iamgreaser/fireball
|
2c5afb3dc5756a3b26da9045278f7e4a2bc036d2
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
Copyright 2011 Ben Russell & contributors. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are
permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of
conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list
of conditions and the following disclaimer in the documentation and/or other materials
provided with the distribution.
THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those of the
authors and should not be interpreted as representing official policies, either expressed
or implied, of the contributors.
"""
import world, event, entity
class AbstractGame:
def __init__(self, *args, **kwargs):
self.entities_main = [None for i in xrange(256)]
self.entities_main_free = set([i for i in xrange(len(self.entities_main))])
self.entities_anon = set()
def set_world(self, world):
self.world = world
def add_entity_main(self, ent, idx=None):
# TODO: catch some errors
if idx == None:
idx = self.entities_main_free.pop()
else:
self.entities_main_free.remove(idx)
self.entities_main[idx] = ent
ent.set_game(idx, self)
def rm_entity_main(self, idx):
ent = self.entities_main[idx]
if ent == None:
return
ent.set_game(-1, None)
self.entities_main[idx] = None
self.entities_main_free.add(idx)
def update(self, dt):
for ent in self.entities_main:
if ent == None:
continue
ent.update(dt)
for ent in self.entities_anon:
ent.update(dt)
class LocalGame(AbstractGame):
def __init__(self, *args, **kwargs):
AbstractGame.__init__(self, *args, **kwargs)
pass
def send_event(self, idx, event):
self.recv_event(idx, event)
def recv_event(self, idx, event):
if idx == None:
ent = self.entities_main[idx]
if ent != None:
ent.handle_event(event)
else:
for ent in self.entities_main:
if ent == None:
continue
ent.handle_event()
for ent in self.entities_anon:
ent.handle_event()
class NetworkGame(AbstractGame):
def __init__(self, *args, **kwargs):
AbstractGame.__init__(self, *args, **kwargs)
pass
class ServerGame(NetworkGame):
def __init__(self, *args, **kwargs):
NetworkGame.__init__(self, *args, **kwargs)
pass
class ClientGame(NetworkGame):
def __init__(self, *args, **kwargs):
NetworkGame.__init__(self, *args, **kwargs)
pass
| 30.046296
| 92
| 0.735901
|
00749ac20ab37e147373dec3cca8fd4757f24e69
| 694
|
py
|
Python
|
funds/funds/models/funds.py
|
zxc111/funds
|
0d52d1fc305d329c3d31a909aed41f092ed12160
|
[
"MIT"
] | null | null | null |
funds/funds/models/funds.py
|
zxc111/funds
|
0d52d1fc305d329c3d31a909aed41f092ed12160
|
[
"MIT"
] | null | null | null |
funds/funds/models/funds.py
|
zxc111/funds
|
0d52d1fc305d329c3d31a909aed41f092ed12160
|
[
"MIT"
] | null | null | null |
# coding: utf8
from sqlalchemy import Column, Integer, String
from .config import Base
class Fund(Base):
__tablename__ = "funds"
id = Column(Integer, primary_key=True)
name = Column(String(256))
fundcode = Column(String(32), unique=True)
manager = Column(String(32))
@classmethod
def get_exist_fundcode_list(cls, fundcode_list, session):
if not fundcode_list:
return fundcode_list
fc_set = set(fundcode_list)
res = session.query(cls.fundcode).filter(
cls.fundcode.in_(fundcode_list)
).all()
result = []
for fundcode in res:
result.append(fundcode[0])
return result
| 22.387097
| 61
| 0.631124
|
df93fea315585c30af4c09a7c4775e83fbff50b7
| 2,247
|
py
|
Python
|
gcc/models/gcn.py
|
TangYucopper/GCC
|
26c9e0bbe9db330748055924bc0f2bc3a0868cdf
|
[
"MIT"
] | 243
|
2020-06-16T08:06:57.000Z
|
2022-03-31T10:09:37.000Z
|
gcc/models/gcn.py
|
TangYucopper/GCC
|
26c9e0bbe9db330748055924bc0f2bc3a0868cdf
|
[
"MIT"
] | 21
|
2020-06-22T01:31:35.000Z
|
2022-01-06T08:02:07.000Z
|
gcc/models/gcn.py
|
TangYucopper/GCC
|
26c9e0bbe9db330748055924bc0f2bc3a0868cdf
|
[
"MIT"
] | 49
|
2020-06-28T02:37:17.000Z
|
2022-03-29T08:07:35.000Z
|
#!/usr/bin/env python
# encoding: utf-8
# File Name: gcn.py
# Author: Jiezhong Qiu
# Create Time: 2019/12/13 15:38
# TODO:
import dgl
import dgl.function as fn
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.model_zoo.chem.gnn import GCNLayer
from dgl.nn.pytorch import AvgPooling, Set2Set
class UnsupervisedGCN(nn.Module):
def __init__(
self,
hidden_size=64,
num_layer=2,
readout="avg",
layernorm: bool = False,
set2set_lstm_layer: int = 3,
set2set_iter: int = 6,
):
super(UnsupervisedGCN, self).__init__()
self.layers = nn.ModuleList(
[
GCNLayer(
in_feats=hidden_size,
out_feats=hidden_size,
activation=F.relu if i + 1 < num_layer else None,
residual=False,
batchnorm=False,
dropout=0.0,
)
for i in range(num_layer)
]
)
if readout == "avg":
self.readout = AvgPooling()
elif readout == "set2set":
self.readout = Set2Set(
hidden_size, n_iters=set2set_iter, n_layers=set2set_lstm_layer
)
self.linear = nn.Linear(2 * hidden_size, hidden_size)
elif readout == "root":
# HACK: process outside the model part
self.readout = lambda _, x: x
else:
raise NotImplementedError
self.layernorm = layernorm
if layernorm:
self.ln = nn.LayerNorm(hidden_size, elementwise_affine=False)
# self.ln = nn.BatchNorm1d(hidden_size, affine=False)
def forward(self, g, feats, efeats=None):
for layer in self.layers:
feats = layer(g, feats)
feats = self.readout(g, feats)
if isinstance(self.readout, Set2Set):
feats = self.linear(feats)
if self.layernorm:
feats = self.ln(feats)
return feats
if __name__ == "__main__":
model = UnsupervisedGCN()
print(model)
g = dgl.DGLGraph()
g.add_nodes(3)
g.add_edges([0, 0, 1], [1, 2, 2])
feat = torch.rand(3, 64)
print(model(g, feat).shape)
| 29.181818
| 78
| 0.558522
|
a2651aee8e6701544a6b83b225c20e91bfbb2482
| 2,591
|
py
|
Python
|
pysnmp/CISCO-POP-MGMT-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-POP-MGMT-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-POP-MGMT-CAPABILITY.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-POP-MGMT-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-POP-MGMT-CAPABILITY
# Produced by pysmi-0.3.4 at Mon Apr 29 17:52:48 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
ModuleCompliance, NotificationGroup, AgentCapabilities = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "AgentCapabilities")
Gauge32, MibIdentifier, ObjectIdentity, IpAddress, TimeTicks, Counter32, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, ModuleIdentity, Counter64, Unsigned32, Bits, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibIdentifier", "ObjectIdentity", "IpAddress", "TimeTicks", "Counter32", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "ModuleIdentity", "Counter64", "Unsigned32", "Bits", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoPopMgmtCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 447))
ciscoPopMgmtCapability.setRevisions(('2005-10-12 00:00', '2005-08-25 00:00',))
if mibBuilder.loadTexts: ciscoPopMgmtCapability.setLastUpdated('200510120000Z')
if mibBuilder.loadTexts: ciscoPopMgmtCapability.setOrganization('Cisco Systems, Inc.')
ciscoPopMgmtCapabilityV12R04 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 447, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPopMgmtCapabilityV12R04 = ciscoPopMgmtCapabilityV12R04.setProductRelease('Cisco IOS 12.4 for C3600 family platforms')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPopMgmtCapabilityV12R04 = ciscoPopMgmtCapabilityV12R04.setStatus('current')
mibBuilder.exportSymbols("CISCO-POP-MGMT-CAPABILITY", ciscoPopMgmtCapabilityV12R04=ciscoPopMgmtCapabilityV12R04, ciscoPopMgmtCapability=ciscoPopMgmtCapability, PYSNMP_MODULE_ID=ciscoPopMgmtCapability)
| 103.64
| 477
| 0.791586
|
f4865eaa08850bb7be0a373c47e33c0b92147a15
| 1,870
|
py
|
Python
|
mac_os_scripts/configure_ntp.py
|
initialed85/mac_os_scripts
|
aa8a2c1dc9193dbce796985f5f125c82f6f90bed
|
[
"MIT",
"BSD-3-Clause"
] | 32
|
2017-11-01T17:20:41.000Z
|
2020-06-22T02:18:48.000Z
|
mac_os_scripts/configure_ntp.py
|
initialed85/mac_os_scripts
|
aa8a2c1dc9193dbce796985f5f125c82f6f90bed
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
mac_os_scripts/configure_ntp.py
|
initialed85/mac_os_scripts
|
aa8a2c1dc9193dbce796985f5f125c82f6f90bed
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
"""
This script is responsible for enabling NTP and setting the NTP server
Commands used:
- systemsetup -setusingnetworktime on
- systemsetup -setnetworktimeserver (NTP server)
"""
from common import CLITieIn
class NTPConfigurator(CLITieIn):
def enable_ntp(self):
command = '/usr/sbin/systemsetup -setusingnetworktime on'
command_output = self.command(command)
if command_output.error_level != 0:
self._logger.error(
'{0} failed stating {1}'.format(
command, command_output
)
)
return False
return True
def set_ntp_server(self, server):
command = '/usr/sbin/systemsetup -setnetworktimeserver {0}'.format(server)
command_output = self.command(command)
if command_output.error_level != 0:
self._logger.error(
'{0} failed stating {1}'.format(
command, command_output
)
)
return False
return True
def run(self, server):
if not self.enable_ntp():
self._logger.error('failed enable_ntp; cannot continue')
return False
if not self.set_ntp_server(server):
self._logger.error('failed set_ntp_server; cannot continue')
return False
self._logger.debug('passed')
return True
if __name__ == '__main__':
from utils import get_argparser, get_args
parser = get_argparser()
parser.add_argument(
'-s',
'--server',
type=str,
required=True,
help='NTP server to use'
)
args = get_args(parser)
actor = NTPConfigurator(
sudo_password=args.sudo_password,
)
result = actor.run(
server=args.server,
)
if not result:
exit(1)
exit(0)
| 22.261905
| 82
| 0.582353
|
5b17faf1258bbe066c95df11238fb4f0cc4f86a3
| 6,132
|
py
|
Python
|
wfexs_backend/podman_container.py
|
stain/WfExS-backend
|
e0e1cf5bc1466791043f0dd052ef9ea04927e108
|
[
"Apache-2.0"
] | 10
|
2021-04-21T20:56:51.000Z
|
2022-03-28T15:03:11.000Z
|
wfexs_backend/podman_container.py
|
stain/WfExS-backend
|
e0e1cf5bc1466791043f0dd052ef9ea04927e108
|
[
"Apache-2.0"
] | 22
|
2021-05-14T10:05:22.000Z
|
2022-03-31T22:35:19.000Z
|
wfexs_backend/podman_container.py
|
Acivico/WfExS-backend
|
05173ceca4dfdcd65f2a957e3638a65d76c6eb25
|
[
"Apache-2.0"
] | 2
|
2021-09-22T09:34:33.000Z
|
2021-11-03T09:27:42.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020-2021 Barcelona Supercomputing Center (BSC), Spain
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import json
import subprocess
import tempfile
from typing import Dict, List, Tuple
from .common import *
from .container import ContainerFactory, ContainerFactoryException
DOCKER_PROTO = 'docker://'
class PodmanContainerFactory(ContainerFactory):
def __init__(self, cacheDir=None, local_config=None, engine_name='unset', tempDir=None):
super().__init__(cacheDir=cacheDir, local_config=local_config, engine_name=engine_name, tempDir=tempDir)
self.runtime_cmd = local_config.get('tools', {}).get('podmanCommand', DEFAULT_PODMAN_CMD)
self._environment.update({
'XDG_DATA_HOME': self.containersCacheDir,
})
# Now, detect whether userns could work
userns_supported = False
if self.supportsFeature('host_userns'):
userns_supported = True
self._features.add('userns')
self.logger.debug(f'Podman supports userns: {userns_supported}')
@classmethod
def ContainerType(cls) -> ContainerType:
return ContainerType.Podman
def _inspect(self, dockerTag : ContainerTaggedName, matEnv) -> Tuple[int, bytes, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"querying podman container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'inspect', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"podman inspect {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="rb") as c_stF:
d_out_v = c_stF.read().decode('utf-8', errors='continue')
with open(d_err.name, mode="r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"podman inspect stdout: {d_out_v}")
self.logger.debug(f"podman inspect stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def _pull(self, dockerTag : ContainerTaggedName, matEnv) -> Tuple[int, str, str]:
with tempfile.NamedTemporaryFile() as d_out, tempfile.NamedTemporaryFile() as d_err:
self.logger.debug(f"pulling podman container {dockerTag}")
d_retval = subprocess.Popen(
[self.runtime_cmd, 'pull', dockerTag],
env=matEnv,
stdout=d_out,
stderr=d_err
).wait()
self.logger.debug(f"podman pull {dockerTag} retval: {d_retval}")
with open(d_out.name, mode="r") as c_stF:
d_out_v = c_stF.read()
with open(d_err.name,"r") as c_stF:
d_err_v = c_stF.read()
self.logger.debug(f"podman pull stdout: {d_out_v}")
self.logger.debug(f"podman pull stderr: {d_err_v}")
return d_retval , d_out_v , d_err_v
def materializeContainers(self, tagList: List[ContainerTaggedName], simpleFileNameMethod: ContainerFileNamingMethod, offline: bool = False) -> List[Container]:
"""
It is assured the containers are materialized
"""
containersList = []
matEnv = dict(os.environ)
matEnv.update(self.environment)
for tag in tagList:
# It is an absolute URL, we are removing the docker://
if tag.startswith(DOCKER_PROTO):
dockerTag = tag[len(DOCKER_PROTO):]
podmanPullTag = tag
else:
dockerTag = tag
podmanPullTag = DOCKER_PROTO + tag
self.logger.info(f"downloading podman container: {tag}")
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
# Time to pull the image
if d_retval != 0:
d_retval , d_out_v , d_err_v = self._pull(podmanPullTag, matEnv)
if d_retval == 0:
# Second try
d_retval , d_out_v , d_err_v = self._inspect(dockerTag, matEnv)
if d_retval != 0:
errstr = """Could not materialize podman image {}. Retval {}
======
STDOUT
======
{}
======
STDERR
======
{}""".format(podmanPullTag, d_retval, d_out_v, d_err_v)
raise ContainerFactoryException(errstr)
# Parsing the output from docker inspect
try:
manifests = json.loads(d_out_v)
manifest = manifests[0]
except Exception as e:
raise ContainerFactoryException(f"FATAL ERROR: Podman finished properly but it did not properly materialize {tag}: {e}")
# Then, compute the signature
tagId = manifest['Id']
fingerprint = None
if len(manifest['RepoDigests']) > 0:
fingerprint = manifest['RepoDigests'][0]
containersList.append(
Container(
origTaggedName=tag,
taggedName=dockerTag,
signature=tagId,
fingerprint=fingerprint,
type=self.containerType
)
)
return containersList
| 37.619632
| 163
| 0.580072
|
c31946b04feb4d7894e2b4173ad8779bc994afa1
| 499
|
py
|
Python
|
users/migrations/0002_auto_20190224_0337.py
|
JySa65/platzi-gram
|
e991a59a8ebe28574671f39cf8bd552e31799cd8
|
[
"MIT"
] | null | null | null |
users/migrations/0002_auto_20190224_0337.py
|
JySa65/platzi-gram
|
e991a59a8ebe28574671f39cf8bd552e31799cd8
|
[
"MIT"
] | null | null | null |
users/migrations/0002_auto_20190224_0337.py
|
JySa65/platzi-gram
|
e991a59a8ebe28574671f39cf8bd552e31799cd8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1 on 2019-02-24 07:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='created',
new_name='created_at',
),
migrations.RenameField(
model_name='profile',
old_name='modified',
new_name='updated_at',
),
]
| 20.791667
| 45
| 0.553106
|
909e3f8ccad76fd04557fdc6d0e8eb449816c506
| 1,199
|
py
|
Python
|
djangocms_snippet/migrations/0006_auto_20160831_0729.py
|
Bernardvdv/djangocms-snippet
|
883f9f895c6036f243ae90ce24a6e30ec82ad57f
|
[
"BSD-3-Clause"
] | 27
|
2015-03-29T01:06:26.000Z
|
2020-02-04T02:02:55.000Z
|
djangocms_snippet/migrations/0006_auto_20160831_0729.py
|
Bernardvdv/djangocms-snippet
|
883f9f895c6036f243ae90ce24a6e30ec82ad57f
|
[
"BSD-3-Clause"
] | 65
|
2015-01-02T05:15:17.000Z
|
2020-09-20T17:56:04.000Z
|
djangocms_snippet/migrations/0006_auto_20160831_0729.py
|
adam-murray/djangocms-snippet
|
a58974888ea4e2569a22feafe03b051f28bc22db
|
[
"BSD-3-Clause"
] | 28
|
2015-01-10T06:34:43.000Z
|
2020-09-20T17:35:03.000Z
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_snippet', '0005_set_related_name_for_cmsplugin_ptr'),
]
operations = [
migrations.AlterModelOptions(
name='snippetptr',
options={'verbose_name': 'Snippet', 'verbose_name_plural': 'Snippets'},
),
migrations.AlterField(
model_name='snippet',
name='name',
field=models.CharField(unique=True, max_length=255, verbose_name='Name'),
),
migrations.AlterField(
model_name='snippet',
name='slug',
field=models.SlugField(default='', unique=True, max_length=255, verbose_name='Slug'),
),
migrations.AlterField(
model_name='snippet',
name='template',
field=models.CharField(help_text='Enter a template (i.e. "snippets/plugin_xy.html") which will be rendered. If "template" is given, the contents of field "HTML" will be passed as template variable {{ html }} to the template. Else, the content of "HTML" is rendered.', max_length=255, verbose_name='Template', blank=True),
),
]
| 38.677419
| 333
| 0.622185
|
09716c10e7765af5eb12ad9cdc2cab32a9540016
| 2,078
|
py
|
Python
|
setup.py
|
manonthemat/froide
|
698c49935eaf2e922f3c9f6a46af0fd545ccbbbb
|
[
"MIT"
] | null | null | null |
setup.py
|
manonthemat/froide
|
698c49935eaf2e922f3c9f6a46af0fd545ccbbbb
|
[
"MIT"
] | null | null | null |
setup.py
|
manonthemat/froide
|
698c49935eaf2e922f3c9f6a46af0fd545ccbbbb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import codecs
import re
import os
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name="froide",
version=find_version("froide", "__init__.py"),
url='https://github.com/okfde/froide',
license='MIT',
description="German Freedom of Information Portal",
long_description=read('README.md'),
author='Stefan Wehrmeyer',
author_email='mail@stefanwehrmeyer.com',
packages=find_packages(),
scripts=['manage.py'],
install_requires=[
'Django',
'Markdown',
'celery',
'geoip2',
'django-elasticsearch-dsl',
'django-taggit',
'pytz',
'requests',
'python-magic',
'djangorestframework',
'djangorestframework-csv',
'djangorestframework-jsonp',
'python-mimeparse',
'django-configurations',
'django-crossdomainmedia',
'django-storages',
'django-wikidata',
'dj-database-url',
'django-cache-url',
'django-filter',
'phonenumbers',
'django-filingcabinet',
'icalendar',
],
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP'
]
)
| 27.706667
| 68
| 0.593359
|
90aeb33200d94264d3875f7066881ff3b799d13f
| 29,957
|
py
|
Python
|
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | null | null | null |
sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_client.py
|
vchske/azure-sdk-for-python
|
6383ed3676b7355af7be394562b126209961ec13
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from datetime import datetime
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Any, Dict, Generator, Mapping, Optional
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
from ._shared import KeyVaultClientBase
from ._models import Key, KeyBase, DeletedKey, KeyOperationResult
class KeyClient(KeyVaultClientBase):
"""KeyClient is a high-level interface for managing a vault's keys.
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key_client]
:end-before: [END create_key_client]
:language: python
:caption: Creates a new instance of the Key client
:dedent: 4
"""
# pylint:disable=protected-access
def create_key(
self,
name,
key_type,
size=None,
key_operations=None,
enabled=None,
expires=None,
not_before=None,
tags=None,
curve=None,
**kwargs
):
# type: (str, str, Optional[int], Optional[List[str]], Optional[bool], Optional[datetime], Optional[datetime], Optional[Dict[str, str]], Optional[str], Mapping[str, Any]) -> Key
"""Creates a new key, stores it, then returns the key to the client.
The create key operation can be used to create any key type in Azure
Key Vault. If the named key already exists, Azure Key Vault creates a
new version of the key. It requires the keys/create permission.
:param name: The name for the new key. The system will generate
the version name for the new key.
:type name: str
:param key_type: The type of key to create. For valid values, see
JsonWebKeyType. Possible values include: 'EC', 'EC-HSM', 'RSA',
'RSA-HSM', 'oct'
:type key_type: str or ~azure.keyvault.keys._generated.v7_0.models.JsonWebKeyType
:param size: The key size in bits. For example: 2048, 3072, or
4096 for RSA.
:type size: int
:param key_operations: Supported key operations.
:type key_operations: list[str or
~azure.keyvault.keys._generated.v7_0.models.JsonWebKeyOperation]
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param expires: Expiry date of the key in UTC.
:type expires: datetime.datetime
:param not_before: Not before date of the key in UTC
:type not_before: datetime.datetime
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: Dict[str, str]
:param curve: Elliptic curve name. If none then defaults to 'P-256'. For valid values, see
JsonWebKeyCurveName. Possible values include: 'P-256', 'P-384',
'P-521', 'SECP256K1'
:type curve: str or ~azure.keyvault.keys._generated.v7_0.models.JsonWebKeyCurveName
:returns: The created key
:rtype: ~azure.keyvault.keys._models.Key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_key]
:end-before: [END create_key]
:language: python
:caption: Creates a key in the key vault
:dedent: 8
"""
if enabled is not None or not_before is not None or expires is not None:
attributes = self._client.models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires)
else:
attributes = None
bundle = self._client.create_key(
self.vault_url,
name,
key_type,
size,
key_attributes=attributes,
key_ops=key_operations,
tags=tags,
curve=curve,
**kwargs
)
return Key._from_key_bundle(bundle)
def create_rsa_key(
self,
name,
hsm,
size=None,
key_operations=None,
enabled=None,
expires=None,
not_before=None,
tags=None,
**kwargs
):
# type: (str, bool, Optional[int], Optional[List[str]], Optional[bool], Optional[datetime], Optional[datetime], Optional[Dict[str, str]], Mapping[str, Any]) -> Key
"""Creates a new RSA type key, stores it, then returns key to the client.
The create key operation can be used to create any key type in Azure
Key Vault. If the named key already exists, Azure Key Vault creates a
new version of the key. It requires the keys/create permission.
:param name: The name for the new key. The system will generate
the version name for the new key.
:type name: str
:param hsm: Whether to create as a hardware key (HSM) or software key.
:type hsm: bool
:param size: The key size in bits. For example: 2048, 3072, or
4096 for RSA.
:type size: int
:param key_operations: Supported key operations.
:type key_operations: list[str or
~azure.keyvault.keys._generated.v7_0.models.JsonWebKeyOperation]
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param expires: Expiry date of the key in UTC.
:type expires: datetime.datetime
:param not_before: Not before date of the key in UTC
:type not_before: datetime.datetime
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: Dict[str, str]
:returns: The created RSA key
:rtype: ~azure.keyvault.keys._models.Key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_rsa_key]
:end-before: [END create_rsa_key]
:language: python
:caption: Creates a RSA key in the key vault
:dedent: 8
"""
key_type = "RSA-HSM" if hsm else "RSA"
return self.create_key(
name,
key_type=key_type,
size=size,
key_operations=key_operations,
enabled=enabled,
expires=expires,
not_before=not_before,
tags=tags,
**kwargs
)
def create_ec_key(
self,
name,
hsm,
curve=None,
key_operations=None,
enabled=None,
expires=None,
not_before=None,
tags=None,
**kwargs
):
# type: (str, bool, Optional[str], Optional[List[str]], Optional[bool], Optional[datetime], Optional[datetime], Optional[Dict[str, str]], Mapping[str, Any]) -> Key
"""Creates a new Elliptic curve type key, stores it, then returns key to the client.
The create key operation can be used to create any key type in Azure
Key Vault. If the named key already exists, Azure Key Vault creates a
new version of the key. It requires the keys/create permission.
:param name: The name for the new key. The system will generate
the version name for the new key.
:type name: str
:param hsm: Whether to create as a hardware key (HSM) or software key.
:type hsm: bool
:param curve: Elliptic curve name. If none then defaults to 'P-256'. For valid values, see
JsonWebKeyCurveName. Possible values include: 'P-256', 'P-384',
'P-521', 'SECP256K1'
:type curve: str or
~azure.keyvault.keys._generated.v7_0.models.JsonWebKeyCurveName
:param key_operations: Supported key operations.
:type key_operations: list[str or
~azure.keyvault.keys._generated.v7_0.models.JsonWebKeyOperation]
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param expires: Expiry date of the key in UTC.
:type expires: datetime.datetime
:param not_before: Not before date of the key in UTC
:type not_before: datetime.datetime
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: Dict[str, str]
:returns: The created EC key
:rtype: ~azure.keyvault.keys._models.Key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START create_ec_key]
:end-before: [END create_ec_key]
:language: python
:caption: Creates an EC key in the key vault
:dedent: 8
"""
key_type = "EC-HSM" if hsm else "EC"
return self.create_key(
name,
key_type=key_type,
curve=curve,
key_operations=key_operations,
enabled=enabled,
expires=expires,
not_before=not_before,
tags=tags,
**kwargs
)
def delete_key(self, name, **kwargs):
# type: (str, Mapping[str, Any]) -> DeletedKey
"""Deletes a key from the Key Vault.
The delete key operation cannot be used to remove individual versions
of a key. This operation removes the cryptographic material associated
with the key, which means the key is not usable for Sign/Verify,
Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the
keys/delete permission.
:param name: The name of the key to delete.
:type name: str
:returns: The deleted key
:rtype: ~azure.keyvault.keys._models.DeletedKey
:raises: ~azure.core.exceptions.ResourceNotFoundError if the client failed to retrieve the key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START delete_key]
:end-before: [END delete_key]
:language: python
:caption: Deletes a key in the key vault
:dedent: 8
"""
bundle = self._client.delete_key(self.vault_url, name, error_map={404: ResourceNotFoundError}, **kwargs)
return DeletedKey._from_deleted_key_bundle(bundle)
def get_key(self, name, version=None, **kwargs):
# type: (str, Optional[str], Mapping[str, Any]) -> Key
"""Gets the public part of a stored key.
The get key operation is applicable to all key types. If the requested
key is symmetric, then no key material is released in the response.
This operation requires the keys/get permission.
:param name: The name of the key to get.
:type name: str
:param version: Retrieves a specific version of a key. If the version is None or an empty string,
the latest version of the key is returned
:type version: str
:returns: Key
:rtype: ~azure.keyvault.keys._models.Key
:raises: ~azure.core.exceptions.ResourceNotFoundError if the client failed to retrieve the key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_key]
:end-before: [END get_key]
:language: python
:caption: Retrieves a key from the key vault
:dedent: 8
"""
bundle = self._client.get_key(
self.vault_url, name, key_version=version or "", error_map={404: ResourceNotFoundError}, **kwargs
)
return Key._from_key_bundle(bundle)
def get_deleted_key(self, name, **kwargs):
# type: (str, Mapping[str, Any]) -> DeletedKey
"""Gets a deleted key from the Key Vault
The Get Deleted Key operation is applicable for soft-delete enabled
vaults. While the operation can be invoked on any vault, it will return
an error if invoked on a non soft-delete enabled vault. This operation
requires the keys/get permission.
:param name: The name of the key.
:type name: str
:returns: The deleted key
:rtype: ~azure.keyvault.keys._models.DeletedKey
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START get_deleted_key]
:end-before: [END get_deleted_key]
:language: python
:caption: Retrieves a deleted key from the key vault
:dedent: 8
"""
bundle = self._client.get_deleted_key(self.vault_url, name, error_map={404: ResourceNotFoundError}, **kwargs)
return DeletedKey._from_deleted_key_bundle(bundle)
def list_deleted_keys(self, **kwargs):
# type: (Mapping[str, Any]) -> Generator[DeletedKey]
"""Lists the deleted keys in the Key Vault
Retrieves a list of the keys in the Key Vault as JSON Web Key
structures that contain the public part of a deleted key. This
operation includes deletion-specific information. The Get Deleted Keys
operation is applicable for vaults enabled for soft-delete. While the
operation can be invoked on any vault, it will return an error if
invoked on a non soft-delete enabled vault. This operation requires the
keys/list permission.
:returns: An iterator like instance of DeletedKey
:rtype:
Generator[~azure.keyvault.keys._models.DeletedKey]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_deleted_keys]
:end-before: [END list_deleted_keys]
:language: python
:caption: List all the deleted keys in the vault
:dedent: 8
"""
max_page_size = kwargs.get("max_page_size", None)
pages = self._client.get_deleted_keys(self._vault_url, maxresults=max_page_size, **kwargs)
return (DeletedKey._from_deleted_key_item(item) for item in pages)
def list_keys(self, **kwargs):
# type: (Mapping[str, Any]) -> Generator[KeyBase]
"""List the keys in the Key Vault
Retrieves a list of the keys in the Key Vault as JSON Web Key
structures that contain the public part of a stored key. The LIST
operation is applicable to all key types, however only the base key
identifier, attributes, and tags are provided in the response.
Individual versions of a key are not listed in the response. This
operation requires the keys/list permission.
:returns: An iterator like instance of KeyBase
:rtype:
Generator[~azure.keyvault.keys._models.KeyBase]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_keys]
:end-before: [END list_keys]
:language: python
:caption: List all keys in the vault
:dedent: 8
"""
max_page_size = kwargs.get("max_page_size", None)
pages = self._client.get_keys(self._vault_url, maxresults=max_page_size, **kwargs)
return (KeyBase._from_key_item(item) for item in pages)
def list_key_versions(self, name, **kwargs):
# type: (str, Mapping[str, Any]) -> Generator[KeyBase]
"""Retrieves a list of individual key versions with the same key name.
The full key identifier, attributes, and tags are provided in the
response. This operation requires the keys/list permission.
:param name: The name of the key.
:type name: str
:returns: An iterator like instance of KeyBase
:rtype:
Generator[~azure.keyvault.keys._models.KeyBase]
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START list_key_versions]
:end-before: [END list_key_versions]
:language: python
:caption: List all versions of the specified key
:dedent: 8
"""
max_page_size = kwargs.get("max_page_size", None)
pages = self._client.get_key_versions(self._vault_url, name, maxresults=max_page_size, **kwargs)
return (KeyBase._from_key_item(item) for item in pages)
def purge_deleted_key(self, name, **kwargs):
# type: (str, Mapping[str, Any]) -> None
"""Permanently deletes the specified key.
The Purge Deleted Key operation is applicable for soft-delete enabled
vaults. While the operation can be invoked on any vault, it will return
an error if invoked on a non soft-delete enabled vault. This operation
requires the keys/purge permission.
:param name: The name of the key
:type name: str
:returns: None
:rtype: None
Example:
.. code-block:: python
# if the vault has soft-delete enabled, purge permanently deletes a deleted key
# (with soft-delete disabled, delete itself is permanent)
key_client.purge_deleted_key("key-name")
"""
self._client.purge_deleted_key(self.vault_url, name, kwargs)
def recover_deleted_key(self, name, **kwargs):
# type: (str, Mapping[str, Any]) -> Key
"""Recovers the deleted key to its latest version.
The Recover Deleted Key operation is applicable for deleted keys in
soft-delete enabled vaults. It recovers the deleted key back to its
latest version under /keys. An attempt to recover an non-deleted key
will return an error. Consider this the inverse of the delete operation
on soft-delete enabled vaults. This operation requires the keys/recover
permission.
:param name: The name of the deleted key.
:type name: str
:returns: The recovered deleted key
:rtype: ~azure.keyvault.keys._models.Key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START recover_deleted_key]
:end-before: [END recover_deleted_key]
:language: python
:caption: Recovers the specified soft-deleted key
:dedent: 8
"""
bundle = self._client.recover_deleted_key(self.vault_url, name, kwargs)
return Key._from_key_bundle(bundle)
def update_key(
self, name, version=None, key_operations=None, enabled=None, expires=None, not_before=None, tags=None, **kwargs
):
# type: (str, Optional[str], Optional[List[str]], Optional[bool], Optional[datetime], Optional[datetime], Optional[Dict[str, str]], Mapping[str, Any]) -> Key
"""The update key operation changes specified attributes of a stored key
and can be applied to any key type and key version stored in Azure Key
Vault.
In order to perform this operation, the key must already exist in the
Key Vault. Note: The cryptographic material of a key itself cannot be
changed. This operation requires the keys/update permission.
:param name: The name of key to update.
:type name: str
:param version: The version of the key to update.
:type version: str
:param key_operations: Json web key operations. For more information on
possible key operations, see JsonWebKeyOperation.
:type key_operations: list[str or
~azure.keyvault.keys._generated.v7_0.models.JsonWebKeyOperation]
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param expires: Expiry date of the key in UTC.
:type expires: datetime.datetime
:param not_before: Not before date of the key in UTC
:type not_before: datetime.datetime
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: Dict[str, str]
:returns: The updated key
:rtype: ~azure.keyvault.keys._models.Key
:raises: ~azure.core.exceptions.ResourceNotFoundError if the client failed to retrieve the key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START update_key]
:end-before: [END update_key]
:language: python
:caption: Updates a key in the key vault
:dedent: 8
"""
if enabled is not None or not_before is not None or expires is not None:
attributes = self._client.models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires)
else:
attributes = None
bundle = self._client.update_key(
self.vault_url,
name,
key_version=version or "",
key_ops=key_operations,
tags=tags,
key_attributes=attributes,
error_map={404: ResourceNotFoundError},
**kwargs
)
return Key._from_key_bundle(bundle)
def backup_key(self, name, **kwargs):
# type: (str, Mapping[str, Any]) -> bytes
"""Backs up the specified key.
Requests that a backup of the specified secret be downloaded to the client.
The Key Backup operation exports a key from Azure Key Vault in a
protected form. Note that this operation does NOT return key material
in a form that can be used outside the Azure Key Vault system, the
returned key material is either protected to a Azure Key Vault HSM or
to Azure Key Vault itself. The intent of this operation is to allow a
client to GENERATE a key in one Azure Key Vault instance, BACKUP the
key, and then RESTORE it into another Azure Key Vault instance. The
BACKUP operation may be used to export, in protected form, any key type
from Azure Key Vault. Individual versions of a key cannot be backed up.
BACKUP / RESTORE can be performed within geographical boundaries only;
meaning that a BACKUP from one geographical area cannot be restored to
another geographical area. For example, a backup from the US
geographical area cannot be restored in an EU geographical area. This
operation requires the key/backup permission.
:param name: The name of the key.
:type name: str
:returns: The raw bytes of the key backup.
:rtype: bytes
:raises: ~azure.core.exceptions.ResourceNotFoundError if the client failed to retrieve the key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START backup_key]
:end-before: [END backup_key]
:language: python
:caption: Backs up the specified key to the key vault
:dedent: 8
"""
backup_result = self._client.backup_key(self.vault_url, name, error_map={404: ResourceNotFoundError}, **kwargs)
return backup_result.value
def restore_key(self, backup, **kwargs):
# type: (bytes, Mapping[str, Any]) -> Key
"""Restores a backed up key to the Key Vault
Imports a previously backed up key into Azure Key Vault, restoring the
key, its key identifier, attributes and access control policies. The
RESTORE operation may be used to import a previously backed up key.
Individual versions of a key cannot be restored. The key is restored in
its entirety with the same key name as it had when it was backed up. If
the key name is not available in the target Key Vault, the RESTORE
operation will be rejected. While the key name is retained during
restore, the final key identifier will change if the key is restored to
a different vault. Restore will restore all versions and preserve
version identifiers. The RESTORE operation is subject to security
constraints: The target Key Vault must be owned by the same Microsoft
Azure Subscription as the source Key Vault The user must have RESTORE
permission in the target Key Vault. This operation requires the
keys/restore permission.
:param backup: The raw bytes of the key backup
:type backup: bytes
:returns: The restored key
:rtype: ~azure.keyvault.keys._models.Key
:raises: ~azure.core.exceptions.ResourceExistsError if the client failed to retrieve the key
Example:
.. literalinclude:: ../tests/test_samples_keys.py
:start-after: [START restore_key]
:end-before: [END restore_key]
:language: python
:caption: Restores a backed up key to the vault
:dedent: 8
"""
bundle = self._client.restore_key(self.vault_url, backup, error_map={409: ResourceExistsError}, **kwargs)
return Key._from_key_bundle(bundle)
def import_key(self, name, key, hsm=None, enabled=None, not_before=None, expires=None, tags=None, **kwargs):
# type: (str, List[str], Optional[bool], Optional[bool], Optional[datetime], Optional[datetime], Optional[Dict[str, str]], Mapping[str, Any]) -> Key
"""Imports an externally created key, stores it, and returns the key to the client.
The import key operation may be used to import any key type into an
Azure Key Vault. If the named key already exists, Azure Key Vault
creates a new version of the key. This operation requires the
keys/import permission.
:param name: Name for the imported key.
:type name: str
:param key: The Json web key
:type key: ~azure.security.keyvault.v7_0.models.JsonWebKey
:param hsm: Whether to import as a hardware key (HSM) or software key.
:type hsm: bool
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param expires: Expiry date of the key in UTC.
:type expires: datetime.datetime
:param not_before: Not before date of the key in UTC
:type not_before: datetime.datetime
:param tags: Application specific metadata in the form of key-value
pairs.
:type tags: Dict[str, str]
:returns: The imported key
:rtype: ~azure.keyvault.keys._models.Key
"""
if enabled is not None or not_before is not None or expires is not None:
attributes = self._client.models.KeyAttributes(enabled=enabled, not_before=not_before, expires=expires)
else:
attributes = None
bundle = self._client.import_key(
self.vault_url, name, key=key, hsm=hsm, key_attributes=attributes, tags=tags, **kwargs
)
return Key._from_key_bundle(bundle)
def wrap_key(self, name, algorithm, value, version=None, **kwargs):
# type: (str, str, Optional[str], bytes, Mapping[str, Any]) -> KeyOperationResult
"""Wraps a symmetric key using a specified key.
The WRAP operation supports encryption of a symmetric key using a key
encryption key that has previously been stored in an Azure Key Vault.
The WRAP operation is only strictly necessary for symmetric keys stored
in Azure Key Vault since protection with an asymmetric key can be
performed using the public portion of the key. This operation is
supported for asymmetric keys as a convenience for callers that have a
key-reference but do not have access to the public key material. This
operation requires the keys/wrapKey permission.
:param name: The name of the key.
:type name: str
:param version: The version of the key.
:type version: str
:param algorithm: algorithm identifier. Possible values include:
'RSA-OAEP', 'RSA-OAEP-256', 'RSA1_5'
:type algorithm: str or
~azure.security.keyvault.v7_0.models.JsonWebKeyEncryptionAlgorithm
:param value:
:type value: bytes
:returns: The wrapped symmetric key.
:rtype: ~azure.keyvault.keys._models.KeyOperationResult
"""
if version is None:
version = ""
bundle = self._client.wrap_key(
self.vault_url, name, key_version=version, algorithm=algorithm, value=value, **kwargs
)
return KeyOperationResult(id=bundle.kid, value=bundle.result)
def unwrap_key(self, name, algorithm, value, version=None, **kwargs):
# type: (str, str, Optional[str], bytes, Mapping[str, Any]) -> KeyOperationResult
"""Unwraps a symmetric key using the specified key that was initially used
for wrapping that key.
The UNWRAP operation supports decryption of a symmetric key using the
target key encryption key. This operation is the reverse of the WRAP
operation. The UNWRAP operation applies to asymmetric and symmetric
keys stored in Azure Key Vault since it uses the private portion of the
key. This operation requires the keys/unwrapKey permission.
:param name: The name of the key.
:type name: str
:param version: The version of the key.
:type version: str
:param algorithm: algorithm identifier. Possible values include:
'RSA-OAEP', 'RSA-OAEP-256', 'RSA1_5'
:type algorithm: str or
~azure.security.keyvault.v7_0.models.JsonWebKeyEncryptionAlgorithm
:param value:
:type value: bytes
:returns: The unwrapped symmetric key.
:rtype: ~azure.keyvault.keys._models.KeyOperationResult
"""
if version is None:
version = ""
bundle = self._client.unwrap_key(
self.vault_url, name, key_version=version, algorithm=algorithm, value=value, **kwargs
)
return KeyOperationResult(id=bundle.kid, value=bundle.result)
| 43.542151
| 185
| 0.632807
|
fa1d88bb6d07924afde5015c96f82b97699def28
| 1,304
|
py
|
Python
|
mi/dataset/driver/fdchp_a/fdchp_a_recovered_driver.py
|
petercable/mi-dataset
|
d3c1607ea31af85fbba5719a31d4a60bf39f8dd3
|
[
"BSD-2-Clause"
] | 1
|
2018-09-14T23:28:29.000Z
|
2018-09-14T23:28:29.000Z
|
mi/dataset/driver/fdchp_a/fdchp_a_recovered_driver.py
|
petercable/mi-dataset
|
d3c1607ea31af85fbba5719a31d4a60bf39f8dd3
|
[
"BSD-2-Clause"
] | 33
|
2017-04-25T19:53:45.000Z
|
2022-03-18T17:42:18.000Z
|
mi/dataset/driver/fdchp_a/fdchp_a_recovered_driver.py
|
petercable/mi-dataset
|
d3c1607ea31af85fbba5719a31d4a60bf39f8dd3
|
[
"BSD-2-Clause"
] | 31
|
2015-03-04T01:01:09.000Z
|
2020-10-28T14:42:12.000Z
|
#!/usr/bin/env python
"""
@package mi.dataset.driver.fdchp_a
@file mi/dataset/driver/fdchp_a/fdchp_a_recovered_driver.py
@author Emily Hahn
@brief Driver for the fdchp series a recovered instrument
"""
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.fdchp_a import FdchpAParser
from mi.core.versioning import version
@version("15.7.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rb') as stream_handle:
# create and instance of the concrete driver class defined below
driver = FdchpARecoveredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class FdchpARecoveredDriver(SimpleDatasetDriver):
"""
Derived fdchp a driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
# build the parser
return FdchpAParser(stream_handle, self._exception_callback)
| 30.325581
| 87
| 0.753834
|
706c9917f17617e5469d6dc54d2d396cebfe5b74
| 4,481
|
py
|
Python
|
train_stage2.py
|
pravn/StackGAN
|
cbb76e0af44b05a74a0c852cdb2fb9b5736a95b8
|
[
"MIT"
] | 2
|
2020-02-11T11:28:27.000Z
|
2021-09-16T06:33:28.000Z
|
train_stage2.py
|
pravn/StackGAN
|
cbb76e0af44b05a74a0c852cdb2fb9b5736a95b8
|
[
"MIT"
] | null | null | null |
train_stage2.py
|
pravn/StackGAN
|
cbb76e0af44b05a74a0c852cdb2fb9b5736a95b8
|
[
"MIT"
] | 1
|
2019-08-24T20:41:54.000Z
|
2019-08-24T20:41:54.000Z
|
import argparse
import torch
import os
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch import autograd
#from torchvision.datasets import MNIST
#from torchvision.transforms import transforms
from torchvision.utils import save_image
from torch.optim.lr_scheduler import StepLR
from torch.nn import functional as F
from utils import weights_init_G
from utils import weights_init_D
def get_loss(gen, tgt):
loss = (gen-tgt).pow(2)
return torch.sum(loss)
def run_stage2_trainer(train_loader, G2, D2, optimizer_G2, optimizer_D2,
args):
batch_size = args.batchSize
real_label = 1
fake_label = 0
device = torch.device("cuda:0" if args.cuda else "cpu")
if args.restart == '':
G2.apply(weights_init_G)
D2.apply(weights_init_D)
else:
G2 = torch.load('./G2_model.pt')
D2 = torch.load('./D2_model.pt')
criterion_BCE = nn.BCELoss()
criterion_MSE = nn.MSELoss()
criterion_L1 = nn.L1Loss()
width = args.Stage2imageSize
height = args.Stage2imageSize
channels = args.nc
if args.cuda:
criterion_BCE = criterion_BCE.cuda()
criterion_MSE = criterion_MSE.cuda()
criterion_L1 = criterion_L1.cuda()
for epoch in range(100):
for i, (src, tgt) in enumerate(train_loader):
if i==16000:
break
src = Variable(src)
src = src.cuda()
tgt = Variable(tgt)
tgt = tgt.cuda()
label = torch.full((batch_size,5,5), real_label, device=device)
for p in G2.parameters():
p.requires_grad = True
for p in D2.parameters():
p.requires_grad = False
G2.zero_grad()
#z = torch.FloatTensor(args.batchSize, args.nz).normal_(0,1)
#if args.cuda:
# z = z.cuda()
#z = Variable(z)
#fake1 = G1(z)
fake = G2(src)
D2_fake, feats_fake = D2(fake)
D2_tgt, feats_tgt = D2(tgt)
#exp_feats_fake = torch.mean(feats_fake, dim=0)
#exp_feats_tgt = torch.mean(feats_tgt, dim=0)
#G2_loss = criterion_MSE(exp_feats_fake, exp_feats_tgt)
#G2_loss = 0.001*get_loss(exp_feats_fake, exp_feats_tgt)
G2_loss = 0.01 * criterion_MSE(feats_fake, feats_tgt)
#Supervised (L1) loss
L1_loss = criterion_L1(fake, tgt)
#L1_loss *= 1.0
L1_loss.backward(retain_graph=True)
#fill with label '1'
#label.fill_(real_label)
#Global Adversarial Loss
#G2_loss = criterion_BCE(D2_fake, label)
#G2_loss = criterion_MSE(D2_fake, label)
#G2_loss *= 0.1
G2_loss.backward(retain_graph=True)
optimizer_G2.step()
#train D2
for p in D2.parameters():
p.requires_grad = True
for p in G2.parameters():
p.requires_grad = False
D2.zero_grad()
#real
D2_real, _ = D2(tgt)
label.fill_(real_label)
#D2_loss_real = criterion_BCE(D2_real, label)
D2_loss_real = criterion_MSE(D2_real, label)
D2_loss_real.backward(retain_graph=True)
fake = G2(src)
D_fake, _ = D2(fake.detach())
label.fill_(fake_label)
#D2_loss_fake = criterion_BCE(D_fake, label)
D2_loss_fake = criterion_MSE(D_fake, label)
D2_loss_fake.backward(retain_graph=True)
optimizer_D2.step()
if i %100 == 0:
print('saving images for batch', i)
save_image(src.squeeze().data.cpu().detach(), 'source.png')
save_image(tgt.squeeze().data.cpu().detach(), 'target.png')
save_image(fake.squeeze().data.cpu().detach(), 'fake.png')
if i % 100 == 0:
torch.save(G2, './G2_model.pt')
torch.save(D2, './D2_model.pt')
print('%d [%d/%d] G Loss [L1/GAdv] [%.4f/%.4f] Loss D (real/fake) [%.4f/%.4f]'%
(epoch, i, len(train_loader), L1_loss,
G2_loss, D2_loss_real, D2_loss_fake))
| 28.909677
| 95
| 0.552778
|
4ab508ec6d76d7d86d55af5c3712895e2a5d2bee
| 439
|
py
|
Python
|
firecares/weather/migrations/0006_auto_20181015_1358.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 12
|
2016-01-30T02:28:35.000Z
|
2019-05-29T15:49:56.000Z
|
firecares/weather/migrations/0006_auto_20181015_1358.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 455
|
2015-07-27T20:21:56.000Z
|
2022-03-11T23:26:20.000Z
|
firecares/weather/migrations/0006_auto_20181015_1358.py
|
FireCARES/firecares
|
aa708d441790263206dd3a0a480eb6ca9031439d
|
[
"MIT"
] | 14
|
2015-07-29T09:45:53.000Z
|
2020-10-21T20:03:17.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('weather', '0005_auto_20181015_1355'),
]
operations = [
migrations.AlterField(
model_name='weatherwarnings',
name='warnid',
field=models.CharField(db_index=True, max_length=200, blank=True),
),
]
| 21.95
| 78
| 0.624146
|
61fac75e765587b715755954029c8304ec12f8ec
| 599
|
py
|
Python
|
TWLight/applications/migrations/0009_auto_20160527_1505.py
|
jajodiaraghav/TWLight
|
22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70
|
[
"MIT"
] | 1
|
2019-10-24T04:49:52.000Z
|
2019-10-24T04:49:52.000Z
|
TWLight/applications/migrations/0009_auto_20160527_1505.py
|
jajodiaraghav/TWLight
|
22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70
|
[
"MIT"
] | 1
|
2019-03-29T15:29:45.000Z
|
2019-03-29T15:57:20.000Z
|
TWLight/applications/migrations/0009_auto_20160527_1505.py
|
jajodiaraghav/TWLight
|
22359ab0b95ee3653e8ffa0eb698acd7bb8ebf70
|
[
"MIT"
] | 1
|
2019-09-26T14:40:27.000Z
|
2019-09-26T14:40:27.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('applications', '0008_auto_20160527_1502'),
]
operations = [
migrations.RemoveField(
model_name='application',
name='user',
),
migrations.AlterField(
model_name='application',
name='editor',
field=models.ForeignKey(related_name='applications', default=1, to='users.Editor'),
preserve_default=False,
),
]
| 23.96
| 95
| 0.597663
|
d34d4a134f4dc3edfb20a6f83f81aee69524029e
| 657
|
py
|
Python
|
Web/urlib.py
|
YangChenye/Python-Code
|
66a7edff84143ed6cc9518717c555399683e334c
|
[
"MIT"
] | 41
|
2019-01-30T05:19:06.000Z
|
2022-01-30T06:37:16.000Z
|
Web/urlib.py
|
YangChenye/Python-Code
|
66a7edff84143ed6cc9518717c555399683e334c
|
[
"MIT"
] | null | null | null |
Web/urlib.py
|
YangChenye/Python-Code
|
66a7edff84143ed6cc9518717c555399683e334c
|
[
"MIT"
] | 44
|
2019-01-30T05:14:26.000Z
|
2022-01-02T08:15:19.000Z
|
from urllib import request
if __name__ == "__main__":
#访问网址
url = 'http://www.whatismyip.com.tw/'
#这是代理IP
proxy = {'http':'106.46.136.112:808'}
#创建ProxyHandler
proxy_support = request.ProxyHandler(proxy)
#创建Opener
opener = request.build_opener(proxy_support)
#添加User Angent
opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36')]
#安装OPener
request.install_opener(opener)
#使用自己安装好的Opener
response = request.urlopen(url)
#读取相应信息并解码
html = response.read().decode("utf-8")
#打印信息
print(html)
| 29.863636
| 156
| 0.668189
|
de75c874b64242d7348d4329dfa083046b036a7b
| 6,877
|
py
|
Python
|
tensorflow/compiler/plugin/poplar/tests/sendrecv_ops_test.py
|
DebeshJha/tensorflow-1
|
2b5a225c49d25273532d11c424d37ce394d7579a
|
[
"Apache-2.0"
] | 2
|
2021-03-08T23:32:06.000Z
|
2022-01-13T03:43:49.000Z
|
tensorflow/compiler/plugin/poplar/tests/sendrecv_ops_test.py
|
DebeshJha/tensorflow-1
|
2b5a225c49d25273532d11c424d37ce394d7579a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/compiler/plugin/poplar/tests/sendrecv_ops_test.py
|
DebeshJha/tensorflow-1
|
2b5a225c49d25273532d11c424d37ce394d7579a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from absl.testing import parameterized
from tensorflow.compiler.plugin.poplar.ops import gen_sendrecv_ops
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu import utils
from tensorflow.python.ipu.scopes import ipu_scope
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class IpuSendRecvOpsTest(xla_test.XLATestCase, parameterized.TestCase): # pylint: disable=abstract-method
@combinations.generate(
combinations.combine(
dtype=[dtypes.float16, dtypes.float32, dtypes.int32]))
def testSendScalar(self, dtype):
with self.session() as sess:
def device_fn(x):
return gen_sendrecv_ops.ipu_send_to_host(x,
tensor_name="test_tensor",
send_device="/device:IPU:0",
send_device_incarnation=0,
recv_device="/device:CPU:0")
inputs = array_ops.placeholder(dtype=dtype, shape=())
with ipu_scope("/device:IPU:0"):
send_op = ipu_compiler.compile(device_fn, inputs=[inputs])
with ops.device("/device:CPU:0"):
recv_op = gen_sendrecv_ops.ipu_recv_at_host(
T=dtype,
tensor_name="test_tensor",
send_device="/device:IPU:0",
send_device_incarnation=0,
recv_device="/device:CPU:0")
opts = utils.create_ipu_config()
utils.configure_ipu_system(opts)
sent, received = sess.run([send_op, recv_op], feed_dict={inputs: 1})
self.assertIsNone(sent) # Send op has no output
self.assertEqual(dtype, received.dtype)
self.assertEqual(0, len(received.shape))
self.assertEqual(1, received)
def testSendFromTwoEngines(self):
with self.session() as sess:
def make_device_fn(i):
def device_fn(x):
return gen_sendrecv_ops.ipu_send_to_host(
x,
tensor_name="tensor_{}".format(i),
send_device="/device:IPU:0",
send_device_incarnation=0,
recv_device="/device:CPU:0")
return device_fn
input_1 = array_ops.placeholder(dtype=dtypes.float32, shape=())
input_2 = array_ops.placeholder(dtype=dtypes.float32, shape=())
with ipu_scope("/device:IPU:0"):
send_1 = ipu_compiler.compile(make_device_fn(1), inputs=[input_1])
send_2 = ipu_compiler.compile(make_device_fn(2), inputs=[input_2])
with ops.device("/device:CPU:0"):
recv_1 = gen_sendrecv_ops.ipu_recv_at_host(T=dtypes.float32,
tensor_name="tensor_1",
send_device="/device:IPU:0",
send_device_incarnation=0,
recv_device="/device:CPU:0")
recv_2 = gen_sendrecv_ops.ipu_recv_at_host(T=dtypes.float32,
tensor_name="tensor_2",
send_device="/device:IPU:0",
send_device_incarnation=0,
recv_device="/device:CPU:0")
opts = utils.create_ipu_config()
utils.configure_ipu_system(opts)
# Test it a couple of times to verify the communication channel is reusable.
for i in range(2):
_, _, result_1, result_2 = sess.run([send_1, send_2, recv_1, recv_2],
feed_dict={
input_1: i,
input_2: i + 1
})
self.assertEqual(i, result_1)
self.assertEqual(i + 1, result_2)
@combinations.generate(
combinations.combine(dtype=[dtypes.float16, dtypes.float32]))
def testSendMatrices(self, dtype):
with self.session() as sess:
L = 3
def device_fn(x):
for i in range(L):
x = math_ops.matmul(x, x)
if i < L - 1:
gen_sendrecv_ops.ipu_send_to_host(x,
tensor_name="x_{}".format(i),
send_device="/device:IPU:0",
send_device_incarnation=0,
recv_device="/device:CPU:0")
return x
N = 2
inputs = array_ops.placeholder(dtype=dtype, shape=(N, N))
with ipu_scope("/device:IPU:0"):
[device_out] = ipu_compiler.compile(device_fn, inputs=[inputs])
received = []
with ops.device("/device:CPU:0"):
for i in range(L - 1):
received.append(
gen_sendrecv_ops.ipu_recv_at_host(T=dtype,
tensor_name="x_{}".format(i),
send_device="/device:IPU:0",
send_device_incarnation=0,
recv_device="/device:CPU:0"))
opts = utils.create_ipu_config()
utils.configure_ipu_system(opts)
received_values, device_value = sess.run(
[received, device_out], feed_dict={inputs: np.ones((N, N))})
self.assertAllClose(2 * np.ones((N, N)), received_values[0])
self.assertAllClose(8 * np.ones((N, N)), received_values[1])
self.assertAllClose(128 * np.ones((N, N)), device_value)
if __name__ == "__main__":
os.environ['TF_XLA_FLAGS'] = ('--tf_xla_min_cluster_size=1 ' +
os.environ.get('TF_XLA_FLAGS', ''))
googletest.main()
| 40.934524
| 106
| 0.566381
|
0e2e807c886e18878f8f76a1ab8a971bb43d0fc9
| 6,961
|
py
|
Python
|
spinoffs/oryx/oryx/experimental/nn/convolution.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | 2
|
2020-12-17T20:43:24.000Z
|
2021-06-11T22:09:16.000Z
|
spinoffs/oryx/oryx/experimental/nn/convolution.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | 2
|
2021-08-25T16:14:51.000Z
|
2022-02-10T04:47:11.000Z
|
spinoffs/oryx/oryx/experimental/nn/convolution.py
|
brianwa84/probability
|
6f8e78d859ac41170be5147c8c7bde54cc5aa83e
|
[
"Apache-2.0"
] | 1
|
2021-01-03T20:23:52.000Z
|
2021-01-03T20:23:52.000Z
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Contains building blocks for convolutional neural networks."""
import collections
import itertools
from jax import lax
from jax import random
from jax.experimental import stax
import jax.numpy as np
from oryx.core import state
from oryx.experimental.nn import base
__all__ = [
'conv_info',
'Conv',
'Deconv',
]
DIMENSION_NUMBERS = ('NHWC', 'HWIO', 'NHWC')
ConvParams = collections.namedtuple('ConvParams', ['kernel', 'bias'])
ConvInfo = collections.namedtuple('ConvInfo',
['strides', 'padding', 'one', 'use_bias'])
def conv_info(in_shape, out_chan, filter_shape,
strides=None, padding='VALID',
kernel_init=None, bias_init=stax.randn(1e-6),
transpose=False):
"""Returns parameters and output shape information given input shapes."""
# Essentially the `stax` implementation
if len(in_shape) != 3:
raise ValueError('Need to `jax.vmap` in order to batch')
in_shape = (1,) + in_shape
lhs_spec, rhs_spec, out_spec = DIMENSION_NUMBERS
one = (1,) * len(filter_shape)
strides = strides or one
kernel_init = kernel_init or stax.glorot(
rhs_spec.index('O'), rhs_spec.index('I'))
filter_shape_iter = iter(filter_shape)
kernel_shape = tuple([out_chan if c == 'O' else
in_shape[lhs_spec.index('C')] if c == 'I' else
next(filter_shape_iter) for c in rhs_spec])
if transpose:
out_shape = lax.conv_transpose_shape_tuple(
in_shape, kernel_shape, strides, padding, DIMENSION_NUMBERS)
else:
out_shape = lax.conv_general_shape_tuple(
in_shape, kernel_shape, strides, padding, DIMENSION_NUMBERS)
bias_shape = [out_chan if c == 'C' else 1 for c in out_spec]
bias_shape = tuple(itertools.dropwhile(lambda x: x == 1, bias_shape))
out_shape = out_shape[1:]
shapes = (out_shape, kernel_shape, bias_shape)
inits = (kernel_init, bias_init)
return shapes, inits, (strides, padding, one)
class Conv(base.Layer):
"""Neural network layer for 2D convolution."""
@classmethod
def initialize(cls, key, in_spec, out_chan, filter_shape,
strides=None, padding='VALID',
kernel_init=None, bias_init=stax.randn(1e-6),
use_bias=True):
in_shape = in_spec.shape
shapes, inits, (strides, padding, one) = conv_info(
in_shape, out_chan, filter_shape,
strides=strides, padding=padding,
kernel_init=kernel_init, bias_init=bias_init
)
info = ConvInfo(strides, padding, one, use_bias)
_, kernel_shape, bias_shape = shapes
kernel_init, bias_init = inits
k1, k2 = random.split(key)
if use_bias:
params = ConvParams(
base.create_parameter(k1, kernel_shape, init=kernel_init),
base.create_parameter(k2, bias_shape, init=bias_init),
)
else:
params = ConvParams(
base.create_parameter(k1, kernel_shape, init=kernel_init),
None
)
return base.LayerParams(params, info=info)
@classmethod
def spec(cls, in_spec, out_chan, filter_shape,
strides=None, padding='VALID',
kernel_init=None, bias_init=stax.randn(1e-6),
use_bias=True):
del use_bias
in_shape = in_spec.shape
shapes, _, _ = conv_info(
in_shape, out_chan, filter_shape,
strides=strides, padding=padding,
kernel_init=kernel_init, bias_init=bias_init
)
return state.Shape(shapes[0], dtype=in_spec.dtype)
def _call_batched(self, x):
params, info = self.params, self.info
result = lax.conv_general_dilated(x, params.kernel,
info.strides, info.padding,
info.one, info.one,
DIMENSION_NUMBERS)
if info.use_bias:
result += params.bias
return result
def _call(self, x):
"""Applies 2D convolution of the params with the input x."""
if len(x.shape) != 3:
raise ValueError('Need to `jax.vmap` in order to batch: {}'.format(
x.shape))
result = self._call_batched(x[np.newaxis])
return result[0]
class Deconv(base.Layer):
"""Neural network layer for 2D transposed convolution."""
@classmethod
def initialize(cls, key, in_spec, out_chan, filter_shape,
strides=None, padding='VALID',
kernel_init=None, bias_init=stax.randn(1e-6),
use_bias=True):
in_shape = in_spec.shape
shapes, inits, (strides, padding, one) = conv_info(
in_shape, out_chan, filter_shape,
strides=strides, padding=padding,
kernel_init=kernel_init, bias_init=bias_init,
transpose=True
)
info = ConvInfo(strides, padding, one, use_bias)
_, kernel_shape, bias_shape = shapes
kernel_init, bias_init = inits
k1, k2 = random.split(key)
if use_bias:
params = ConvParams(
base.create_parameter(k1, kernel_shape, init=kernel_init),
base.create_parameter(k2, bias_shape, init=bias_init),
)
else:
params = ConvParams(
base.create_parameter(k1, kernel_shape, init=kernel_init),
None
)
return base.LayerParams(params, info=info)
@classmethod
def spec(cls, in_spec, out_chan, filter_shape,
strides=None, padding='VALID',
kernel_init=None, bias_init=stax.randn(1e-6),
use_bias=True):
del use_bias
in_shape = in_spec.shape
shapes, _, _ = conv_info(
in_shape, out_chan, filter_shape,
strides=strides, padding=padding,
kernel_init=kernel_init, bias_init=bias_init,
transpose=True
)
return state.Shape(shapes[0], dtype=in_spec.dtype)
def _call_batched(self, x):
params, info = self.params, self.info
result = lax.conv_transpose(x, params.kernel,
info.strides, info.padding,
dimension_numbers=DIMENSION_NUMBERS)
if info.use_bias:
result += params.bias
return result
def _call(self, x):
"""Applies 2D transposed convolution of the params with the input x."""
if len(x.shape) != 3:
raise ValueError('Need to `jax.vmap` in order to batch')
result = self._call_batched(x[np.newaxis])
return result[0]
| 35.515306
| 78
| 0.644304
|
f4546ec5f67b9126df66038612427cd79680eeee
| 8,269
|
py
|
Python
|
deepsim/deepsim/core/math.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | 1
|
2022-03-25T07:20:49.000Z
|
2022-03-25T07:20:49.000Z
|
deepsim/deepsim/core/math.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | null | null | null |
deepsim/deepsim/core/math.py
|
aws-deepracer/deepsim
|
cad2639f525c2f94ec5c03d8b855cc65b0b8ee55
|
[
"Apache-2.0"
] | null | null | null |
#################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for math."""
import math
from typing import Tuple, Union, TypeVar
import numpy as np
Point = TypeVar('Point')
Vector3 = TypeVar('Vector3')
Quaternion = TypeVar('Quaternion')
Euler = TypeVar('Euler')
# The order of rotation applied: yaw (z) -> pitch (y) -> roll (x)
def euler_to_quaternion(roll: float = 0, pitch: float = 0, yaw: float = 0) -> Tuple[float, float, float, float]:
"""
Convert Euler to Quaternion
Args:
roll (float): roll angle in radian (x-axis)
pitch (float): pitch angle in radian (y-axis)
yaw (float): yaw angle in radian (z-axis)
Returns:
Tuple[float, float, float, float]: x, y, z, w
"""
# Abbreviations for the various angular functions
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
# Quaternion
w = cr * cp * cy + sr * sp * sy
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
return x, y, z, w
def quaternion_to_euler(x: float, y: float, z: float, w: float) -> Tuple[float, float, float]:
"""
Convert quaternion x, y, z, w to euler angle roll, pitch, yaw
Args:
x (float): quaternion x
y (float): quaternion y
z (float): quaternion z
w (float): quaternion w
Returns:
Tuple: (roll, pitch, yaw) in radian
"""
# roll (x-axis rotation)
sinr_cosp = 2.0 * (w * x + y * z)
cosr_cosp = 1.0 - 2.0 * (x * x + y * y)
roll = math.atan2(sinr_cosp, cosr_cosp)
# pitch (y-axis rotation)
sinp = 2.0 * (w * y - z * x)
if abs(sinp) >= 1.0:
pitch = math.copysign(math.pi / 2.0, sinp) # use 90 degrees if out of range
else:
pitch = math.asin(sinp)
# yaw (z-axis rotation)
siny_cosp = 2.0 * (w * z + x * y)
cosy_cosp = 1.0 - 2.0 * (y * y + z * z)
yaw = math.atan2(siny_cosp, cosy_cosp)
return roll, pitch, yaw
def project_to_2d(point_on_plane: Union[Point, Vector3],
plane_center: Union[Point, Vector3],
plane_width: float,
plane_height: float,
plane_quaternion: Quaternion) -> Tuple[float, float]:
"""
Project the point to 2d.
Args:
point_on_plane (Union[Point, Vector3]): the point on plane to project.
plane_center (Union[Point, Vector3]): plane center
plane_width (float): width of plane
plane_height (float): height of plane
plane_quaternion (Quaternion): plane orientation
Returns:
Tuple[float, float]: x and y in 2d space scaled between 0.0 and 1.0.
"""
from deepsim.core.vector3 import Vector3
from deepsim.core.quaternion import Quaternion
from deepsim.core.euler import Euler
point_on_plane = point_on_plane if isinstance(point_on_plane, Vector3) else point_on_plane.to_vector()
plane_center = plane_center if isinstance(plane_center, Vector3) else plane_center.to_vector()
# Transpose the center back to origin
point_on_plane_from_origin = point_on_plane - plane_center
# Reverse the rotation so plane can align back to y-axis
inverse_cam_quaternion = plane_quaternion.inverse()
point_on_y_axis = point_on_plane_from_origin.rotate(inverse_cam_quaternion)
# Rotate pitch 90 degree and yaw 90 degree, so plane will align to x and y axis
# Remember rotation order is roll, pitch, yaw in euler_to_quaternion method
project_2d_quaternion = Quaternion.from_euler(Euler(pitch=np.pi / 2.0, yaw=np.pi / 2.0))
point_on_2d_plane = point_on_y_axis.rotate(project_2d_quaternion)
# Align plane to origin at x, y = (0, 0)
point_on_2d_plane = point_on_2d_plane + Vector3.from_list([plane_width / 2.0, plane_height / 2.0, 0.0])
# Re-scale x and y space between 0 and 1
return (point_on_2d_plane[0] / plane_width), (point_on_2d_plane[1] / plane_height)
def lerp(a: float, b: float, t: float) -> float:
"""
Linear Interpolation
Args:
a (float): start value
b (float): end value
t (float): fraction
Returns:
float: interpolated value
"""
t = np.clip(t, 0.0, 1.0).item()
return a + t * (b - a)
def lerp_angle_rad(a: float, b: float, t: float) -> float:
"""
Angular Linear Interpolation in radian.
Args:
a (float): current angle value in radian
b (float): target angle value in radian
t (float): fraction
Returns:
float: interpolated angle value in radian
"""
t = np.clip(t, 0.0, 1.0).item()
max_ang = 2.0 * math.pi
diff = b - a
da = np.clip(diff - diff // max_ang * max_ang, 0.0, max_ang)
return lerp(a, a + ((da - max_ang) if da > math.pi else da), t)
def cross(v1: Vector3, v2: Vector3) -> Vector3:
"""
Cross product of two vectors
Args:
v1 (Vector3): first vector
v2 (Vector3): second vector
Returns:
Vector3: Cross product of two vectors
"""
return v1.cross(v2)
def dot(v1: Union[Vector3, Quaternion], v2: Union[Vector3, Quaternion]) -> float:
"""
Dot product of two vectors or two quaternions
Args:
v1 (Union[Vector3, Quaternion]): first vector or quaternion
v2 (Union[Vector3, Quaternion]): second vector or quaternion
Returns:
float: Dot product of two vectors or two quaternions
"""
return v1.dot(v2)
def magnitude(item: Union[Vector3, Quaternion]) -> float:
"""
Returns the magnitude of given vector or quaternion.
Args:
item (Union[Vector3, Quaternion]): vector or quaternion to find the magnitude.
Returns:
float: the magnitude of given vector or quaternion.
"""
return math.sqrt(item.dot(item))
def sqr_magnitude(item: Union[Vector3, Quaternion]) -> float:
"""
Returns the squared magnitude of given vector or quaternion.
Args:
item (Union[Vector3, Quaternion]): vector or quaternion to find the squared magnitude.
Returns:
float: the squared magnitude of given vector or quaternion.
"""
return item.dot(item)
def unit(item: Union[Vector3, Quaternion]) -> Union[Vector3, Quaternion]:
"""
Returns a unit vector or quaternion in the direction of v
Args:
item (Union[Vector3, Quaternion]): vector or quaternion to find the unit.
Returns:
Union[Vector3, Quaternion]: A unit vector or quaternion in the direction of item
"""
return item / magnitude(item)
def distance(a: Union[Point, Vector3], b: Union[Point, Vector3]) -> float:
"""
Returns the distance between v1 and v2.
- (a - b).magnitude
Args:
a (Vector3): vector a
b (Vector3): vector b
Returns:
float: (a - b).manitude
"""
from deepsim.core.vector3 import Vector3
a = a if isinstance(a, Vector3) else a.to_vector()
b = b if isinstance(b, Vector3) else b.to_vector()
diff = a - b
return diff.magnitude
| 32.813492
| 112
| 0.58943
|
e324012a32d7e4b62dde14d0c920e33fa27eb7d5
| 30,026
|
py
|
Python
|
airflow/providers/google/cloud/hooks/tasks.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 1
|
2020-11-26T16:00:01.000Z
|
2020-11-26T16:00:01.000Z
|
airflow/providers/google/cloud/hooks/tasks.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 9
|
2020-07-28T15:07:03.000Z
|
2022-03-29T22:27:52.000Z
|
airflow/providers/google/cloud/hooks/tasks.py
|
troywinter/airflow
|
ba66ba0d97941c55d9f00f66329a9d3c7ad673e7
|
[
"Apache-2.0"
] | 1
|
2019-06-15T08:38:53.000Z
|
2019-06-15T08:38:53.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a CloudTasksHook
which allows you to connect to GCP Cloud Tasks service,
performing actions to queues or tasks.
"""
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.tasks_v2 import CloudTasksClient, enums
from google.cloud.tasks_v2.types import FieldMask, Queue, Task
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudTasksHook(GoogleBaseHook):
"""
Hook for Google Cloud Tasks APIs. Cloud Tasks allows developers to manage
the execution of background work in their applications.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self):
"""
Provides a client for interacting with the Cloud Tasks API.
:return: GCP Cloud Tasks API Client
:rtype: google.cloud.tasks_v2.CloudTasksClient
"""
if not self._client:
self._client = CloudTasksClient(
credentials=self._get_credentials(),
client_info=self.client_info
)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: Union[Dict, Queue],
project_id: str,
queue_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Queue:
"""
Creates a queue in Cloud Tasks.
:param location: The location name in which the queue will be created.
:type location: str
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:type task_queue: dict or google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
full_location_path = CloudTasksClient.location_path(project_id, location)
return client.create_queue(
parent=full_location_path,
queue=task_queue,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: str,
location: Optional[str] = None,
queue_name: Optional[str] = None,
update_mask: Optional[FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Queue:
"""
Updates a queue in Cloud Tasks.
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:type task_queue: dict or google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:type location: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:type update_mask: dict or google.cloud.tasks_v2.types.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name and location:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
return client.update_queue(
queue=task_queue,
update_mask=update_mask,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Queue:
"""
Gets a queue from Cloud Tasks.
:param location: The location name in which the queue was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.get_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: str,
results_filter: Optional[str] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Lists queues from Cloud Tasks.
:param location: The location name in which the queues were created.
:type location: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param results_filter: (Optional) Filter used to specify a subset of queues.
:type results_filter: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_location_path = CloudTasksClient.location_path(project_id, location)
queues = client.list_queues(
parent=full_location_path,
filter_=results_filter,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(queues)
@GoogleBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> None:
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
:param location: The location name in which the queue will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
client.delete_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
:param location: The location name in which the queue will be purged.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.purge_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Pauses a queue in Cloud Tasks.
:param location: The location name in which the queue will be paused.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.pause_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Resumes a queue in Cloud Tasks.
:param location: The location name in which the queue will be resumed.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.resume_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: Union[Dict, Task],
project_id: str,
task_name: Optional[str] = None,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Task:
"""
Creates a task in Cloud Tasks.
:param location: The location name in which the task will be created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:type task: dict or google.cloud.tasks_v2.types.Task
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:type task_name: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
if task_name:
full_task_name = CloudTasksClient.task_path(
project_id, location, queue_name, task_name
)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task['name'] = full_task_name
else:
raise AirflowException('Unable to set task_name.')
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.create_task(
parent=full_queue_name,
task=task,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Task:
"""
Gets a task from Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.get_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Task]:
"""
Lists the tasks in Cloud Tasks.
:param location: The location name in which the tasks were created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Task]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
tasks = client.list_tasks(
parent=full_queue_name,
response_view=response_view,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(tasks)
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> None:
"""
Deletes a task from Cloud Tasks.
:param location: The location name in which the task will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
client.delete_task(
name=full_task_name, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Task:
"""
Forces to run a task in Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.run_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
| 42.529745
| 96
| 0.643009
|
d250dc6cde24da9826634f0d45793bab58267b81
| 1,405
|
py
|
Python
|
test/test_node_partitions_node_partition_statfs.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_node_partitions_node_partition_statfs.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
test/test_node_partitions_node_partition_statfs.py
|
Atomicology/isilon_sdk_python
|
91039da803ae37ed4abf8d2a3f59c333f3ef1866
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.node_partitions_node_partition_statfs import NodePartitionsNodePartitionStatfs
class TestNodePartitionsNodePartitionStatfs(unittest.TestCase):
""" NodePartitionsNodePartitionStatfs unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testNodePartitionsNodePartitionStatfs(self):
"""
Test NodePartitionsNodePartitionStatfs
"""
model = swagger_client.models.node_partitions_node_partition_statfs.NodePartitionsNodePartitionStatfs()
if __name__ == '__main__':
unittest.main()
| 28.673469
| 111
| 0.760142
|
dd2cd4738c86a30adb627297acaa3db38fccf148
| 153
|
py
|
Python
|
F_django/Scripts/django-admin.py
|
lincoco/learning_log
|
980c4ae41cd4e34d3208057a77e7d232c389dec3
|
[
"MIT"
] | 1
|
2019-06-03T03:41:26.000Z
|
2019-06-03T03:41:26.000Z
|
F_django/Scripts/django-admin.py
|
lincoco/learning_log
|
980c4ae41cd4e34d3208057a77e7d232c389dec3
|
[
"MIT"
] | null | null | null |
F_django/Scripts/django-admin.py
|
lincoco/learning_log
|
980c4ae41cd4e34d3208057a77e7d232c389dec3
|
[
"MIT"
] | null | null | null |
#!c:\py_\f_django\f_django\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 25.5
| 46
| 0.777778
|
e4a30816c6b68cb8e83ec352c7a27365d18f31e2
| 5,094
|
py
|
Python
|
src/gamesbyexample/bouncingdvd.py
|
spp2/PythonStdioGames
|
7edc6a07ef816a44579800e773f30217541971fa
|
[
"MIT"
] | null | null | null |
src/gamesbyexample/bouncingdvd.py
|
spp2/PythonStdioGames
|
7edc6a07ef816a44579800e773f30217541971fa
|
[
"MIT"
] | null | null | null |
src/gamesbyexample/bouncingdvd.py
|
spp2/PythonStdioGames
|
7edc6a07ef816a44579800e773f30217541971fa
|
[
"MIT"
] | 1
|
2020-10-12T06:43:06.000Z
|
2020-10-12T06:43:06.000Z
|
"""Bouncing DVD Logo, by Al Sweigart al@inventwithpython.com
A bouncing DVD logo animation. You have to be "of a certain age" to
appreciate this. Press Ctrl-C to stop.
NOTE: Do not resize the terminal window while this program is running.
This and other games are available at https://nostarch.com/XX
Tags: short, artistic, bext, terminal"""
__version__ = 0
import sys, random, time
try:
import bext
except ImportError:
print('This program requires the bext module, which you')
print('can install by following the instructions at')
print('https://pypi.org/project/Bext/')
sys.exit()
# Set up the constants:
WIDTH, HEIGHT = bext.size()
# We can't print to the last column on Windows without it adding a
# newline automatically, so reduce the width by one:
WIDTH -= 1
NUMBER_OF_LOGOS = 5 # (!) Try changing this to 1 or 100.
PAUSE_AMOUNT = 0.2 # (!) Try changing this to 1.0 or 0.0.
# (!) Try changing this list to fewer colors:
COLORS = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white']
UP_RIGHT = 'ur'
UP_LEFT = 'ul'
DOWN_RIGHT = 'dr'
DOWN_LEFT = 'dl'
DIRECTIONS = (UP_RIGHT, UP_LEFT, DOWN_RIGHT, DOWN_LEFT)
# Key names for logo dictionaries:
COLOR = 'color'
X = 'x'
Y = 'y'
DIR = 'direction'
def main():
bext.clear()
# Generate some logos.
logos = []
for i in range(NUMBER_OF_LOGOS):
logos.append({COLOR: random.choice(COLORS),
X: random.randint(1, WIDTH - 4),
Y: random.randint(1, HEIGHT - 4),
DIR: random.choice(DIRECTIONS)})
if logos[-1][X] % 2 == 1:
# Make sure X is even so it can hit the corner.
logos[-1][X] -= 1
cornerBounces = 0 # Count how many times a logo hits a corner.
while True: # Main program loop.
for logo in logos: # Handle each logo in the logos list.
# Erase the logo's current location:
bext.goto(logo[X], logo[Y])
print(' ', end='')
originalDirection = logo[DIR]
# See if the logo bounces off the corners:
if logo[X] == 0 and logo[Y] == 0:
logo[DIR] = DOWN_RIGHT
cornerBounces += 1
elif logo[X] == 0 and logo[Y] == HEIGHT - 1:
logo[DIR] = UP_RIGHT
cornerBounces += 1
elif logo[X] == WIDTH - 3 and logo[Y] == 0:
logo[DIR] = DOWN_LEFT
cornerBounces += 1
elif logo[X] == WIDTH - 3 and logo[Y] == HEIGHT - 1:
logo[DIR] = UP_LEFT
cornerBounces += 1
# See if the logo bounces off the left edge:
elif logo[X] == 0 and logo[DIR] == UP_LEFT:
logo[DIR] = UP_RIGHT
elif logo[X] == 0 and logo[DIR] == DOWN_LEFT:
logo[DIR] = DOWN_RIGHT
# See if the logo bounces off the right edge:
# (WIDTH - 3 because 'DVD' has 3 letters.)
elif logo[X] == WIDTH - 3 and logo[DIR] == UP_RIGHT:
logo[DIR] = UP_LEFT
elif logo[X] == WIDTH - 3 and logo[DIR] == DOWN_RIGHT:
logo[DIR] = DOWN_LEFT
# See if the logo bounces off the top edge:
elif logo[Y] == 0 and logo[DIR] == UP_LEFT:
logo[DIR] = DOWN_LEFT
elif logo[Y] == 0 and logo[DIR] == UP_RIGHT:
logo[DIR] = DOWN_RIGHT
# See if the logo bounces off the bottom edge:
elif logo[Y] == HEIGHT - 1 and logo[DIR] == DOWN_LEFT:
logo[DIR] = UP_LEFT
elif logo[Y] == HEIGHT - 1 and logo[DIR] == DOWN_RIGHT:
logo[DIR] = UP_RIGHT
if logo[DIR] != originalDirection:
# Change color when the logo bounces:
logo[COLOR] = random.choice(COLORS)
# Move the logo. (X moves by 2 because the terminal
# characters are twice as tall as they are wide.)
if logo[DIR] == UP_RIGHT:
logo[X] += 2
logo[Y] -= 1
elif logo[DIR] == UP_LEFT:
logo[X] -= 2
logo[Y] -= 1
elif logo[DIR] == DOWN_RIGHT:
logo[X] += 2
logo[Y] += 1
elif logo[DIR] == DOWN_LEFT:
logo[X] -= 2
logo[Y] += 1
# Display number of corner bounces:
bext.goto(5, 0)
bext.fg('white')
print('Corner bounces:', cornerBounces, end='')
for logo in logos:
# Draw the logos at their new location:
bext.goto(logo[X], logo[Y])
bext.fg(logo[COLOR])
print('DVD', end='')
bext.goto(0, 0)
sys.stdout.flush() # (Required for bext-using programs.)
time.sleep(PAUSE_AMOUNT)
# If this program was run (instead of imported), run the game:
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print()
print('Bouncing DVD Logo, by Al Sweigart')
sys.exit() # When Ctrl-C is pressed, end the program.
| 34.187919
| 71
| 0.540636
|
f56291a9ec5414b75b831e6f75a491faa1950a91
| 3,357
|
py
|
Python
|
apps/connections/statistics.py
|
Houston-ARTCC/zhu-core
|
99a2f95fa7c61e27560557b3a8aee41cc995d101
|
[
"MIT"
] | 1
|
2020-12-25T04:21:07.000Z
|
2020-12-25T04:21:07.000Z
|
apps/connections/statistics.py
|
Houston-ARTCC/zhu-core
|
99a2f95fa7c61e27560557b3a8aee41cc995d101
|
[
"MIT"
] | 3
|
2021-09-21T18:22:51.000Z
|
2021-12-10T20:31:19.000Z
|
apps/connections/statistics.py
|
Houston-ARTCC/zhu-core
|
99a2f95fa7c61e27560557b3a8aee41cc995d101
|
[
"MIT"
] | 1
|
2021-04-13T23:33:39.000Z
|
2021-04-13T23:33:39.000Z
|
from datetime import timedelta
from django.db.models import Sum, Q, DurationField
from django.db.models.functions import Coalesce, Cast
from django.utils import timezone
from .models import ControllerSession
from ..users.models import User, Status
def annotate_hours(query):
"""
Annotates given QuerySet with controlling hours for the
current (curr_hours), previous (prev_hours), and
penultimate (prev_prev_hours) months.
"""
MONTH_NOW = timezone.now().month
YEAR_NOW = timezone.now().year
CURR_MONTH = (Q(sessions__start__month=MONTH_NOW)
& Q(sessions__start__year=YEAR_NOW))
PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 1 if MONTH_NOW > 1 else 12)
& Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 1 else YEAR_NOW - 1))
PREV_PREV_MONTH = (Q(sessions__start__month=MONTH_NOW - 2 if MONTH_NOW > 2 else 12 if MONTH_NOW > 1 else 11)
& Q(sessions__start__year=YEAR_NOW if MONTH_NOW > 2 else YEAR_NOW - 1))
return query.annotate(
curr_hours=Coalesce(Sum('sessions__duration', filter=CURR_MONTH), Cast(timedelta(), DurationField())),
prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_MONTH), Cast(timedelta(), DurationField())),
prev_prev_hours=Coalesce(Sum('sessions__duration', filter=PREV_PREV_MONTH), Cast(timedelta(), DurationField())),
)
def get_user_hours():
"""
Returns query set of active users annotated with controlling
hours for the current (curr_hours), previous (prev_hours),
and penultimate (prev_prev_hours) months.
"""
return annotate_hours(User.objects.exclude(status=Status.NON_MEMBER))
def get_top_controllers():
"""
Returns query set of active users annotated with controlling
hour sums for the current month (hours) sorted by most
controlling hours (controllers with no hours are not included).
"""
SAME_MONTH = Q(sessions__start__month=timezone.now().month)
SAME_YEAR = Q(sessions__start__year=timezone.now().year)
users = User.objects.exclude(status=Status.NON_MEMBER)
users = users.annotate(hours=Sum('sessions__duration', filter=SAME_MONTH & SAME_YEAR))
return users.exclude(hours__isnull=True).order_by('-hours')
def get_top_positions():
SAME_MONTH = Q(start__month=timezone.now().month)
SAME_YEAR = Q(start__year=timezone.now().year)
sessions = ControllerSession.objects.filter(SAME_MONTH & SAME_YEAR)
position_durations = {}
for session in sessions:
position = session.facility + '_' + session.level
if position in position_durations:
position_durations[position] += session.duration
else:
position_durations[position] = session.duration
sorted_positions = sorted(position_durations, key=position_durations.get, reverse=True)
return [{'position': position, 'hours': position_durations[position]} for position in sorted_positions]
def get_daily_statistics(year, user=None):
"""
Returns a query dictionary of every day of the
given year annotated with the controlling hours
for that day.
"""
sessions = ControllerSession.objects.filter(start__year=year)
if user:
sessions = sessions.filter(user=user)
return sessions.extra({'day': 'date(start)'}).values('day').annotate(value=Sum('duration'))
| 39.964286
| 120
| 0.716413
|
ad5d530c875330f18c850d869e31e10471526da8
| 3,063
|
py
|
Python
|
speech.py
|
derHeinz/speech_recognition
|
fe715980826ebdcfff8c416efdf1957a13098c8e
|
[
"Apache-2.0"
] | null | null | null |
speech.py
|
derHeinz/speech_recognition
|
fe715980826ebdcfff8c416efdf1957a13098c8e
|
[
"Apache-2.0"
] | null | null | null |
speech.py
|
derHeinz/speech_recognition
|
fe715980826ebdcfff8c416efdf1957a13098c8e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from speech_recognition import Microphone, RequestError, UnknownValueError
import json
import os
import sys
import logging
from porcupinerecognizer import CallbackCapablePorcupineRecognizer
from tonegenerator_multithreading import ToneGeneratorThread
import postopenhab
from healthcheck import HealthCheck
logger = logging.getLogger(__file__)
def setup_logging():
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger.info("logger configured")
def read_config_file(filename):
logger.info("reading file {}".format(filename))
relative_path = os.path.join(os.path.dirname(__file__), filename)
data = None
with open(relative_path) as data_file:
return json.load(data_file)
if __name__ == '__main__':
# logging
setup_logging()
# read configs
config = read_config_file('config.json')
voice_itemname = config['voice_itemname']
openhab_baseurl = config['openhab_baseurl']
hotword = config['hotword']
# monitoring-health endpoint
h = HealthCheck(config['health-port'])
h.start()
# Initialize the recognizer
a = ToneGeneratorThread()
r = CallbackCapablePorcupineRecognizer(lambda: a.ready(), lambda: a.confirm())
while(True):
# Exception handling to handle
# exceptions at the runtime
try:
# use the microphone as source for input.
with Microphone(chunk_size=512, sample_rate=16000) as src: # use same chunk size than porcupine.frame_length
# wait for a second to let the recognizer
# adjust the energy threshold based on
# the surrounding noise level
r.adjust_for_ambient_noise(src, duration=0.2)
#listens for the user's input
logger.info("listening for hotword...")
audio_part = r.listen_hotword(src, phrase_time_limit=20, keywords=[hotword])
logger.info("...returning from listen_hotword")
# Using ggogle to recognize audio
text = r.recognize_google(audio_part, language="de-DE")
# remove the keyword
if (text is not None):
text = text.replace(hotword, '')
text = text.replace(hotword.title(), '')
text = text.replace(hotword.lower(), '')
text = text.replace(hotword.upper(), '')
text = text.strip()
logger.info("Did you say '{}'".format(text))
postopenhab.post_value_to_openhab(voice_itemname, text, openhab_baseurl)
except RequestError as e:
logger.error("Could not request results; {0}".format(e))
except UnknownValueError:
logger.error("unknown error occured")
| 35.206897
| 120
| 0.603983
|
0cd926f0660759b0d22e439c89144dc8cc27ba65
| 1,128
|
py
|
Python
|
eth2/beacon/types/proposal_signed_data.py
|
kushagrasharma/trinity
|
0dd33ee304630b93192861626ac5e9eca6fc4d92
|
[
"MIT"
] | null | null | null |
eth2/beacon/types/proposal_signed_data.py
|
kushagrasharma/trinity
|
0dd33ee304630b93192861626ac5e9eca6fc4d92
|
[
"MIT"
] | null | null | null |
eth2/beacon/types/proposal_signed_data.py
|
kushagrasharma/trinity
|
0dd33ee304630b93192861626ac5e9eca6fc4d92
|
[
"MIT"
] | null | null | null |
from eth_typing import (
Hash32,
)
import rlp
from eth2.beacon._utils.hash import hash_eth2
from eth2.beacon.sedes import (
uint64,
hash32,
)
from eth2.beacon.typing import (
SlotNumber,
ShardNumber,
)
class ProposalSignedData(rlp.Serializable):
"""
Note: using RLP until we have standardized serialization format.
"""
fields = [
# Slot number
('slot', uint64),
# Shard number (or `2**64 - 1` for beacon chain)
('shard', uint64),
# block root
('block_root', hash32),
]
def __init__(self,
slot: SlotNumber,
shard: ShardNumber,
block_root: Hash32) -> None:
super().__init__(
slot,
shard,
block_root,
)
_hash = None
@property
def hash(self) -> Hash32:
if self._hash is None:
self._hash = hash_eth2(rlp.encode(self))
return self._hash
@property
def root(self) -> Hash32:
# Alias of `hash`.
# Using flat hash, might change to SSZ tree hash.
return self.hash
| 20.888889
| 68
| 0.553191
|
51d6977ca2cea8afc292595613721344730eb8d2
| 1,914
|
py
|
Python
|
Mdd-master/willusetfidf.py
|
ayushsahu1999/startupGuidance
|
f6ca8c2c81cdd1f135aedd780b18771bd2681acd
|
[
"MIT"
] | null | null | null |
Mdd-master/willusetfidf.py
|
ayushsahu1999/startupGuidance
|
f6ca8c2c81cdd1f135aedd780b18771bd2681acd
|
[
"MIT"
] | null | null | null |
Mdd-master/willusetfidf.py
|
ayushsahu1999/startupGuidance
|
f6ca8c2c81cdd1f135aedd780b18771bd2681acd
|
[
"MIT"
] | 2
|
2019-10-13T16:14:36.000Z
|
2020-10-10T08:36:40.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 22:13:41 2019
@author: Dell
"""
# Making the Uniqueness Function of MDD Model
# Let's get started BITCH!
# Importing the Libraries
def Uniqueness():
import numpy as np
import pandas as pd
import math
# Importing the dataset
dataset = pd.read_csv('startup_funding_1.csv')
# Cleaning the texts and tf
import re
import nltk
# nltk.download('stopwords')
from nltk.corpus import stopwords
corpus = []
ct = 0
oc = []
for i in range(0, 347):
review = re.sub('[^a-zA-Z]', ' ', dataset['SubVertical'][i])
review = review.lower()
review = review.split()
corpus.append(review)
# counting numbers
d = 0
ct = [0]*len(review)
for word in review:
ct[d] = ct[d] + review.count(word)
d = d + 1
oc.append(ct)
for i in range(len(oc)):
for j in range(len(oc[i])):
oc[i][j] = oc[i][j]/(len(oc[i]))
# Getting the Idf Value
i = 0
j = 0
k = 0
l = 0
money = 0
idf1 = []
no = 0
for i in range(0, 347):
idf = [0] * len(corpus[i])
for j in range(0, len(corpus[i])):
no = 0
for k in range(0, 347):
money = 0
for l in range(0, len(corpus[k])):
if (corpus[i][j] == corpus[k][l]):
money = money + 1
if (money > 0):
no = no + 1
idf[j] = no
idf1.append(idf)
i = 0
j = 0
for i in range(0, 347):
for j in range(0, len(corpus[i])):
idf1[i][j] = math.log((347/idf1[i][j]))
# Making the TFIDF Model
i = 0
j = 0
for i in range(0, 347):
for j in range(0, len(oc[i])):
idf1[i][j] = oc[i][j] * idf1[i][j]
return (idf1)
| 22
| 68
| 0.471264
|
a06fb51a97ccf6c6e5d1bf55e453bba47b6aa4db
| 12,509
|
py
|
Python
|
tests/test_keep_largest_connected_componentd.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | 1
|
2022-03-16T01:18:43.000Z
|
2022-03-16T01:18:43.000Z
|
tests/test_keep_largest_connected_componentd.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
tests/test_keep_largest_connected_componentd.py
|
function2-llx/MONAI
|
4cddaa830b61b88ec78e089bb5f21e05bb1a78f4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.transforms import KeepLargestConnectedComponentd
from tests.utils import TEST_NDARRAYS, assert_allclose
grid_1 = [[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]
grid_2 = [[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [1, 0, 1, 1, 2], [1, 0, 1, 2, 2], [0, 0, 0, 0, 1]]]
grid_3 = [
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
]
grid_4 = [
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
]
grid_5 = [[[0, 0, 1, 0, 0], [0, 1, 1, 1, 1], [1, 1, 1, 0, 0], [1, 1, 0, 1, 0], [1, 1, 0, 0, 1]]]
VALID_CASES = []
for p in TEST_NDARRAYS:
VALID_CASES.append(
[
"value_1",
{"keys": ["img"], "independent": False, "applied_labels": 1, "is_onehot": False},
{"img": p(grid_1)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]),
]
)
VALID_CASES.append(
[
"value_2",
{"keys": ["img"], "independent": False, "applied_labels": [2], "is_onehot": False},
{"img": p(grid_1)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 0]]]),
]
)
VALID_CASES.append(
[
"independent_value_1_2",
{"keys": ["img"], "independent": True, "applied_labels": [1, 2], "is_onehot": False},
{"img": p(grid_1)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 1, 0], [2, 2, 0, 0, 0]]]),
]
)
VALID_CASES.append(
[
"dependent_value_1_2",
{"keys": ["img"], "independent": False, "applied_labels": [1, 2], "is_onehot": False},
{"img": p(grid_1)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 1, 0], [2, 2, 0, 0, 2]]]),
]
)
VALID_CASES.append(
[
"value_1",
{"keys": ["img"], "independent": True, "applied_labels": [1], "is_onehot": False},
{"img": p(grid_2)},
torch.tensor([[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 0]]]),
]
)
VALID_CASES.append(
[
"independent_value_1_2",
{"keys": ["img"], "independent": True, "applied_labels": [1, 2], "is_onehot": False},
{"img": p(grid_2)},
torch.tensor([[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 0]]]),
]
)
VALID_CASES.append(
[
"dependent_value_1_2",
{"keys": ["img"], "independent": False, "applied_labels": [1, 2], "is_onehot": False},
{"img": p(grid_2)},
torch.tensor([[[0, 0, 0, 0, 1], [0, 0, 1, 1, 1], [0, 0, 1, 1, 2], [0, 0, 1, 2, 2], [0, 0, 0, 0, 1]]]),
]
)
VALID_CASES.append(
[
"value_1_connect_1",
{"keys": ["img"], "independent": False, "applied_labels": [1], "connectivity": 1, "is_onehot": False},
{"img": p(grid_1)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 0, 0], [2, 2, 0, 0, 2]]]),
]
)
VALID_CASES.append(
[
"independent_value_1_2_connect_1",
{"keys": ["img"], "independent": True, "applied_labels": [1, 2], "connectivity": 1, "is_onehot": False},
{"img": p(grid_1)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [0, 2, 1, 0, 0], [0, 2, 0, 0, 0], [2, 2, 0, 0, 0]]]),
]
)
VALID_CASES.append(
[
"onehot_none_dependent_value_1_2_connect_1",
{"keys": ["img"], "independent": False, "applied_labels": [1, 2], "connectivity": 1},
{"img": p(grid_1)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 2, 1, 1, 1], [1, 2, 1, 0, 0], [1, 2, 0, 0, 0], [2, 2, 0, 0, 0]]]),
]
)
VALID_CASES.append(
[
"onehot_independent_batch_2_apply_label_1_connect_1",
{"keys": ["img"], "independent": True, "applied_labels": [1], "connectivity": 1, "is_onehot": True},
{"img": p(grid_3)},
torch.tensor(
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
]
),
]
)
VALID_CASES.append(
[
"onehot_independent_batch_2_apply_label_1_connect_2",
{"keys": ["img"], "independent": True, "applied_labels": [1], "connectivity": 2, "is_onehot": True},
{"img": p(grid_3)},
torch.tensor(
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 1.0],
],
]
),
]
)
VALID_CASES.append(
[
"onehot_independent_batch_2_apply_label_1_2_connect_2",
{"keys": ["img"], "independent": True, "applied_labels": [1, 2], "connectivity": 2, "is_onehot": True},
{"img": p(grid_3)},
torch.tensor(
[
[
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
],
]
),
]
)
VALID_CASES.append(
[
"onehot_dependent_batch_2_apply_label_1_2_connect_2",
{"keys": ["img"], "independent": False, "applied_labels": [1, 2], "connectivity": 2, "is_onehot": True},
{"img": p(grid_4)},
torch.tensor(
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
]
),
]
)
VALID_CASES.append(
[
"onehot_none_dependent_batch_2_apply_label_1_2_connect_1",
{"keys": ["img"], "independent": False, "applied_labels": [1, 2], "connectivity": 1},
{"img": p(grid_4)},
torch.tensor(
[
[
[1.0, 1.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 0.0],
],
[
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
],
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
]
),
]
)
VALID_CASES.append(
[
"single_channel_onehot",
{"keys": ["img"], "independent": False, "applied_labels": 0, "connectivity": 1, "is_onehot": True},
{"img": p(grid_5)},
torch.tensor([[[0, 0, 1, 0, 0], [0, 1, 1, 1, 1], [1, 1, 1, 0, 0], [1, 1, 0, 0, 0], [1, 1, 0, 0, 0]]]),
]
)
class TestKeepLargestConnectedComponentd(unittest.TestCase):
@parameterized.expand(VALID_CASES)
def test_correct_results(self, _, args, input_dict, expected):
converter = KeepLargestConnectedComponentd(**args)
result = converter(input_dict)
assert_allclose(result["img"], expected, type_test=False)
if __name__ == "__main__":
unittest.main()
| 36.048991
| 116
| 0.34679
|
09e6bb214cc83de36d8633ec0b2628a12398198d
| 2,029
|
py
|
Python
|
setup.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 10,287
|
2017-04-07T12:33:37.000Z
|
2022-03-30T03:32:16.000Z
|
setup.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 209
|
2017-04-07T15:57:11.000Z
|
2022-03-27T10:43:03.000Z
|
setup.py
|
ScriptBox99/deepmind-sonnet
|
5cbfdc356962d9b6198d5b63f0826a80acfdf35b
|
[
"Apache-2.0"
] | 1,563
|
2017-04-07T13:15:06.000Z
|
2022-03-29T15:26:04.000Z
|
"""Setup for pip package."""
from setuptools import find_namespace_packages
from setuptools import setup
def _get_sonnet_version():
with open('sonnet/__init__.py') as fp:
for line in fp:
if line.startswith('__version__'):
g = {}
exec(line, g) # pylint: disable=exec-used
return g['__version__']
raise ValueError('`__version__` not defined in `sonnet/__init__.py`')
def _parse_requirements(requirements_txt_path):
with open(requirements_txt_path) as fp:
return fp.read().splitlines()
_VERSION = _get_sonnet_version()
EXTRA_PACKAGES = {
'tensorflow': ['tensorflow>=2'],
'tensorflow with gpu': ['tensorflow-gpu>=2'],
}
setup(
name='dm-sonnet',
version=_VERSION,
url='https://github.com/deepmind/sonnet',
license='Apache 2.0',
author='DeepMind',
description=(
'Sonnet is a library for building neural networks in TensorFlow.'),
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author_email='sonnet-dev-os@google.com',
# Contained modules and scripts.
packages=find_namespace_packages(exclude=['*_test.py']),
install_requires=_parse_requirements('requirements.txt'),
extras_require=EXTRA_PACKAGES,
tests_require=_parse_requirements('requirements-test.txt'),
requires_python='>=3.6',
include_package_data=True,
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
)
| 32.206349
| 75
| 0.66831
|
a6fa75d51247bb002b5fbfa7ce99d2439332ff22
| 2,415
|
py
|
Python
|
spiceypy/utils/callbacks.py
|
seignovert/SpiceyPy
|
6bee6198d4842d38ea9c6e9f4a54976407ce8d26
|
[
"MIT"
] | null | null | null |
spiceypy/utils/callbacks.py
|
seignovert/SpiceyPy
|
6bee6198d4842d38ea9c6e9f4a54976407ce8d26
|
[
"MIT"
] | null | null | null |
spiceypy/utils/callbacks.py
|
seignovert/SpiceyPy
|
6bee6198d4842d38ea9c6e9f4a54976407ce8d26
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) [2015-2017] [Andrew Annex]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import functools
from ctypes import c_bool, c_double, POINTER, CFUNCTYPE, byref
UDFUNS = CFUNCTYPE(None, c_double, POINTER(c_double))
UDFUNB = CFUNCTYPE(None, UDFUNS, c_double, POINTER(c_bool))
def SpiceUDFUNS(f):
"""
Decorator for wrapping python functions in spice udfuns callback type
:param f: function that has one argument of type float, and returns a float
:type f: builtins.function
:return: wrapped udfunc function
:rtype: builtins.function
"""
@functools.wraps(f)
def wrapping_udfuns(x, value):
result = f(x)
value[0] = c_double(result)
return UDFUNS(wrapping_udfuns)
def SpiceUDFUNB(f):
"""
Decorator for wrapping python functions in spice udfunb callback type
:param f:
:type f: builtins.function
:return:
"""
@functools.wraps(f)
def wrapping_udfunb(udf, et, xbool):
result = f(udf, et) # the function takes a udffunc as a argument
xbool[0] = c_bool(result)
return UDFUNB(wrapping_udfunb)
def CallUDFUNS(f, x):
"""
We are given a UDF CFUNCTYPE and want to call it in python
:param f: SpiceUDFUNS
:type f: CFUNCTYPE
:param x: some scalar
:type x: float
:return: value
:rtype: float
"""
value = c_double()
f(x, byref(value))
return value.value
| 32.2
| 79
| 0.722567
|
ae929dd48f7f4d6040d8671348b2ac7dd7334e06
| 2,464
|
py
|
Python
|
pokemongo_bot/event_manager.py
|
walaoaaa1234/PokemonGo-Bot_
|
24affedf5ff4b70aa816da00a6186a9eb37dc40f
|
[
"MIT"
] | 1
|
2018-12-29T07:47:48.000Z
|
2018-12-29T07:47:48.000Z
|
pokemongo_bot/event_manager.py
|
walaoaaa1234/PokemonGo-Bot_
|
24affedf5ff4b70aa816da00a6186a9eb37dc40f
|
[
"MIT"
] | 2
|
2021-03-25T21:33:45.000Z
|
2021-06-01T21:42:48.000Z
|
pokemongo_bot/event_manager.py
|
walaoaaa1234/PokemonGo-Bot_
|
24affedf5ff4b70aa816da00a6186a9eb37dc40f
|
[
"MIT"
] | 1
|
2018-02-10T23:56:46.000Z
|
2018-02-10T23:56:46.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from sys import stdout
class EventNotRegisteredException(Exception):
pass
class EventMalformedException(Exception):
pass
class EventHandler(object):
def __init__(self):
pass
def handle_event(self, event, kwargs):
raise NotImplementedError("Please implement")
class EventManager(object):
def __init__(self, limit_output=False, *handlers):
self._registered_events = dict()
self._handlers = list(handlers) or []
self._last_event = None
self._limit_output = limit_output
def event_report(self):
for event, parameters in self._registered_events.iteritems():
print('-'*80)
print('Event: {}'.format(event))
if parameters:
print('Parameters:')
for parameter in parameters:
print('* {}'.format(parameter))
def add_handler(self, event_handler):
self._handlers.append(event_handler)
def register_event(self, name, parameters=[]):
self._registered_events[name] = parameters
def emit(self, event, sender=None, level='info', formatted='', data={}):
if not sender:
raise ArgumentError('Event needs a sender!')
levels = ['info', 'warning', 'error', 'critical', 'debug']
if not level in levels:
raise ArgumentError('Event level needs to be in: {}'.format(levels))
if event not in self._registered_events:
raise EventNotRegisteredException("Event %s not registered..." % event)
if self._limit_output:
if (event == self._last_event) and (event in ["moving_to_fort", "moving_to_lured_fort", "position_update"]):
stdout.write("\033[1A\033[0K\r")
stdout.flush()
if level == "info" and formatted:
self._last_event = event
# verify params match event
parameters = self._registered_events[event]
if parameters:
for k, v in data.iteritems():
if k not in parameters:
raise EventMalformedException("Event %s does not require parameter %s" % (event, k))
formatted_msg = formatted.format(**data)
# send off to the handlers
for handler in self._handlers:
handler.handle_event(event, sender, level, formatted_msg, data)
| 31.589744
| 120
| 0.620536
|
21dda736c3d4aaee37185a85702ad8c80c0265a8
| 7,296
|
py
|
Python
|
activitysim/activitysim/core/util.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
activitysim/activitysim/core/util.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
activitysim/activitysim/core/util.py
|
ual/DOE-repo-deliverable
|
4bafdd9a702a9a6466dd32ae62f440644d735d3c
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import psutil
import gc
from operator import itemgetter
import numpy as np
import pandas as pd
from zbox import toolz as tz
def memory_info():
gc.collect()
process = psutil.Process(os.getpid())
bytes = process.memory_info().rss
mb = (bytes / (1024 * 1024.0))
gb = (bytes / (1024 * 1024 * 1024.0))
return "memory_info: %s MB (%s GB)" % (int(mb), round(gb, 2))
def left_merge_on_index_and_col(left_df, right_df, join_col, target_col):
"""
like pandas left merge, but join on both index and a specified join_col
FIXME - for now return a series of ov values from specified right_df target_col
Parameters
----------
left_df : pandas DataFrame
index name assumed to be same as that of right_df
right_df : pandas DataFrame
index name assumed to be same as that of left_df
join_col : str
name of column to join on (in addition to index values)
should have same name in both dataframes
target_col : str
name of column from right_df whose joined values should be returned as series
Returns
-------
target_series : pandas Series
series of target_col values with same index as left_df
i.e. values joined to left_df from right_df with index of left_df
"""
assert left_df.index.name == right_df.index.name
# want to know name previous index column will have after reset_index
idx_col = right_df.index.name
# SELECT target_col FROM full_sample LEFT JOIN unique_sample on idx_col, join_col
merged = \
pd.merge(
left_df[[join_col]].reset_index(),
right_df[[join_col, target_col]].reset_index(),
on=[idx_col, join_col],
how="left")
merged.set_index(idx_col, inplace=True)
return merged[target_col]
def reindex(series1, series2):
"""
This reindexes the first series by the second series. This is an extremely
common operation that does not appear to be in Pandas at this time.
If anyone knows of an easier way to do this in Pandas, please inform the
UrbanSim developers.
The canonical example would be a parcel series which has an index which is
parcel_ids and a value which you want to fetch, let's say it's land_area.
Another dataset, let's say of buildings has a series which indicate the
parcel_ids that the buildings are located on, but which does not have
land_area. If you pass parcels.land_area as the first series and
buildings.parcel_id as the second series, this function returns a series
which is indexed by buildings and has land_area as values and can be
added to the buildings dataset.
In short, this is a join on to a different table using a foreign key
stored in the current table, but with only one attribute rather than
for a full dataset.
This is very similar to the pandas "loc" function or "reindex" function,
but neither of those functions return the series indexed on the current
table. In both of those cases, the series would be indexed on the foreign
table and would require a second step to change the index.
Parameters
----------
series1, series2 : pandas.Series
Returns
-------
reindexed : pandas.Series
"""
# turns out the merge is much faster than the .loc below
df = pd.merge(series2.to_frame(name='left'),
series1.to_frame(name='right'),
left_on="left",
right_index=True,
how="left")
return df.right
# return pd.Series(series1.loc[series2.values].values, index=series2.index)
def other_than(groups, bools):
"""
Construct a Series that has booleans indicating the presence of
something- or someone-else with a certain property within a group.
Parameters
----------
groups : pandas.Series
A column with the same index as `bools` that defines the grouping
of `bools`. The `bools` Series will be used to index `groups` and
then the grouped values will be counted.
bools : pandas.Series
A boolean Series indicating where the property of interest is present.
Should have the same index as `groups`.
Returns
-------
others : pandas.Series
A boolean Series with the same index as `groups` and `bools`
indicating whether there is something- or something-else within
a group with some property (as indicated by `bools`).
"""
counts = groups[bools].value_counts()
merge_col = groups.to_frame(name='right')
pipeline = tz.compose(
tz.curry(pd.Series.fillna, value=False),
itemgetter('left'),
tz.curry(
pd.DataFrame.merge, right=merge_col, how='right', left_index=True,
right_on='right'),
tz.curry(pd.Series.to_frame, name='left'))
gt0 = pipeline(counts > 0)
gt1 = pipeline(counts > 1)
return gt1.where(bools, other=gt0)
def quick_loc_df(loc_list, target_df, attribute):
"""
faster replacement for target_df.loc[loc_list][attribute]
pandas DataFrame.loc[] indexing doesn't scale for large arrays (e.g. > 1,000,000 elements)
Parameters
----------
loc_list : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series)
target_df : pandas.DataFrame containing column named attribute
attribute : name of column from loc_list to return
Returns
-------
pandas.Series
"""
left_on = "left"
if isinstance(loc_list, pd.Int64Index):
left_df = pd.DataFrame({left_on: loc_list.values})
elif isinstance(loc_list, pd.Series):
left_df = loc_list.to_frame(name=left_on)
elif isinstance(loc_list, np.ndarray):
left_df = pd.DataFrame({left_on: loc_list})
else:
raise RuntimeError("quick_loc_df loc_list of unexpected type %s" % type(loc_list))
df = pd.merge(left_df,
target_df[[attribute]],
left_on=left_on,
right_index=True,
how="left")
# regression test
# assert list(df[attribute]) == list(target_df.loc[loc_list][attribute])
return df[attribute]
def quick_loc_series(loc_list, target_series):
"""
faster replacement for target_series.loc[loc_list]
pandas Series.loc[] indexing doesn't scale for large arrays (e.g. > 1,000,000 elements)
Parameters
----------
loc_list : list-like (numpy.ndarray, pandas.Int64Index, or pandas.Series)
target_series : pandas.Series
Returns
-------
pandas.Series
"""
left_on = "left"
if isinstance(loc_list, pd.Int64Index):
left_df = pd.DataFrame({left_on: loc_list.values})
elif isinstance(loc_list, pd.Series):
left_df = loc_list.to_frame(name=left_on)
elif isinstance(loc_list, np.ndarray):
left_df = pd.DataFrame({left_on: loc_list})
else:
raise RuntimeError("quick_loc_series loc_list of unexpected type %s" % type(loc_list))
df = pd.merge(left_df,
target_series.to_frame(name='right'),
left_on=left_on,
right_index=True,
how="left")
# regression test
# assert list(df.right) == list(target_series.loc[loc_list])
return df.right
| 32.426667
| 94
| 0.658306
|
f2e4158eef445ba8f1bc21de095f108bae4aa8ea
| 8,038
|
py
|
Python
|
flash/tabular/forecasting/data.py
|
ar90n/lightning-flash
|
61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131
|
[
"Apache-2.0"
] | null | null | null |
flash/tabular/forecasting/data.py
|
ar90n/lightning-flash
|
61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131
|
[
"Apache-2.0"
] | null | null | null |
flash/tabular/forecasting/data.py
|
ar90n/lightning-flash
|
61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Type, Union
from torch.utils.data.sampler import Sampler
from flash.core.data.callback import BaseDataFetcher
from flash.core.data.data_module import DataModule
from flash.core.data.io.input import Input
from flash.core.data.io.input_transform import INPUT_TRANSFORM_TYPE, InputTransform
from flash.core.utilities.imports import _PANDAS_AVAILABLE, _TABULAR_TESTING
from flash.core.utilities.stages import RunningStage
from flash.tabular.forecasting.input import TabularForecastingDataFrameInput
if _PANDAS_AVAILABLE:
from pandas.core.frame import DataFrame
else:
DataFrame = object
# Skip doctests if requirements aren't available
if not _TABULAR_TESTING:
__doctest_skip__ = ["TabularForecastingData", "TabularForecastingData.*"]
class TabularForecastingData(DataModule):
"""The ``TabularForecastingData`` class is a :class:`~flash.core.data.data_module.DataModule` with a set of
classmethods for loading data for tabular forecasting."""
input_transform_cls = InputTransform
@property
def parameters(self) -> Optional[Dict[str, Any]]:
"""The ``parameters`` dictionary from the ``TimeSeriesDataSet`` object created from the train data when
constructing the ``TabularForecastingData`` object."""
return getattr(self.train_dataset, "parameters", None)
@classmethod
def from_data_frame(
cls,
time_idx: Optional[str] = None,
target: Optional[Union[str, List[str]]] = None,
group_ids: Optional[List[str]] = None,
parameters: Optional[Dict[str, Any]] = None,
train_data_frame: Optional[DataFrame] = None,
val_data_frame: Optional[DataFrame] = None,
test_data_frame: Optional[DataFrame] = None,
predict_data_frame: Optional[DataFrame] = None,
input_cls: Type[Input] = TabularForecastingDataFrameInput,
transform: INPUT_TRANSFORM_TYPE = InputTransform,
transform_kwargs: Optional[Dict] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
val_split: Optional[float] = None,
batch_size: Optional[int] = None,
num_workers: int = 0,
sampler: Optional[Type[Sampler]] = None,
pin_memory: bool = True,
persistent_workers: bool = True,
**input_kwargs: Any,
) -> "TabularForecastingData":
"""Creates a :class:`~flash.tabular.forecasting.data.TabularForecastingData` object from the given data
frames.
.. note::
The ``time_idx``, ``target``, and ``group_ids`` do not need to be provided if ``parameters`` are passed
instead. These can be obtained from the
:attr:`~flash.tabular.forecasting.data.TabularForecastingData.parameters` attribute of the
:class:`~flash.tabular.forecasting.data.TabularForecastingData` object that contains your training data.
To learn how to customize the transforms applied for each stage, read our
:ref:`customizing transforms guide <customizing_transforms>`.
Args:
time_idx: Column denoting the time index of each observation.
target: Column denoting the target or list of columns denoting the target.
group_ids: List of column names identifying a time series. This means that the group_ids identify a sample
together with the time_idx. If you have only one timeseries, set this to the name of a column that is
constant.
parameters: Parameters to use for the timeseries if ``time_idx``, ``target``, and ``group_ids`` are not
provided (e.g. when loading data for inference or validation).
train_data_frame: The pandas DataFrame to use when training.
val_data_frame: The pandas DataFrame to use when validating.
test_data_frame: The pandas DataFrame to use when testing.
predict_data_frame: The pandas DataFrame to use when predicting.
input_cls: The :class:`~flash.core.data.io.input.Input` type to use for loading the data.
transform: The :class:`~flash.core.data.io.input_transform.InputTransform` type to use.
transform_kwargs: Dict of keyword arguments to be provided when instantiating the transforms.
input_kwargs: Additional keyword arguments to be used when creating the TimeSeriesDataset.
Returns:
The constructed :class:`~flash.tabular.forecasting.data.TabularForecastingData`.
Examples
________
.. testsetup::
>>> from pytorch_forecasting.data.examples import generate_ar_data
>>> data = generate_ar_data(seasonality=10.0, timesteps=100, n_series=5, seed=42)
We have a DataFrame `data` with the following contents:
.. doctest::
>>> data.head(3)
series time_idx value
0 0 0 -0.000000
1 0 1 0.141552
2 0 2 0.232782
.. doctest::
>>> from pandas import DataFrame
>>> from flash import Trainer
>>> from flash.tabular import TabularForecaster, TabularForecastingData
>>> datamodule = TabularForecastingData.from_data_frame(
... "time_idx",
... "value",
... ["series"],
... train_data_frame=data,
... predict_data_frame=DataFrame.from_dict(
... {
... "time_idx": list(range(50)),
... "value": [0.0] * 50,
... "series": [0] * 50,
... }
... ),
... time_varying_unknown_reals=["value"],
... max_encoder_length=30,
... max_prediction_length=20,
... batch_size=32,
... )
>>> model = TabularForecaster(
... datamodule.parameters,
... backbone="n_beats",
... backbone_kwargs={"widths": [16, 256]},
... )
>>> trainer = Trainer(fast_dev_run=True)
>>> trainer.fit(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Training...
>>> trainer.predict(model, datamodule=datamodule) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Predicting...
.. testcleanup::
>>> del data
"""
ds_kw = dict(
time_idx=time_idx,
group_ids=group_ids,
target=target,
parameters=parameters,
**input_kwargs,
)
train_input = input_cls(RunningStage.TRAINING, train_data_frame, **ds_kw)
ds_kw["parameters"] = train_input.parameters if train_input else parameters
return cls(
train_input,
input_cls(RunningStage.VALIDATING, val_data_frame, **ds_kw),
input_cls(RunningStage.TESTING, test_data_frame, **ds_kw),
input_cls(RunningStage.PREDICTING, predict_data_frame, **ds_kw),
transform=transform,
transform_kwargs=transform_kwargs,
data_fetcher=data_fetcher,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
pin_memory=pin_memory,
persistent_workers=persistent_workers,
)
| 42.983957
| 118
| 0.635606
|
fc3dd87173d8e3e1a808e0fda902045e844dddbf
| 12
|
py
|
Python
|
mock/ten.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
mock/ten.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
mock/ten.py
|
kevinghst/UDA_sup
|
c970622370d5de6b8c48b458cb8b4fe59e37effb
|
[
"Apache-2.0"
] | null | null | null |
print("ten")
| 12
| 12
| 0.666667
|
f0d3931242c1d4cebe05d5322a31738405d02220
| 1,006
|
py
|
Python
|
main.py
|
aallali/SFJ-MSR
|
f87f45df46341ae8d9f2e84484cccbdde26b9baf
|
[
"MIT"
] | 1
|
2022-03-04T14:32:27.000Z
|
2022-03-04T14:32:27.000Z
|
main.py
|
aallali/SFJ-MSR
|
f87f45df46341ae8d9f2e84484cccbdde26b9baf
|
[
"MIT"
] | null | null | null |
main.py
|
aallali/SFJ-MSR
|
f87f45df46341ae8d9f2e84484cccbdde26b9baf
|
[
"MIT"
] | null | null | null |
# **************************************************************************** #
# #
# ::: :::::::: #
# main.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: aallali <hi@allali.me> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2021/10/18 17:59:13 by aallali #+# #+# #
# Updated: 2021/10/18 18:00:55 by aallali ### ########.fr #
# #
# **************************************************************************** #
import uvicorn
if __name__ == "__main__":
uvicorn.run("app.api:app", host="0.0.0.0", port=7080, reload=True)
| 59.176471
| 80
| 0.162028
|
602a4b937ce16f95b41d5122f2b7d6175ee1cdd9
| 4,266
|
py
|
Python
|
handy_tools/_scratch_rot_morph.py
|
VelionaVollerei/PMX-VMD-Scripting-Tools
|
f935669e3bbc14f4a9b47803a1ccea1464b538b2
|
[
"MIT"
] | null | null | null |
handy_tools/_scratch_rot_morph.py
|
VelionaVollerei/PMX-VMD-Scripting-Tools
|
f935669e3bbc14f4a9b47803a1ccea1464b538b2
|
[
"MIT"
] | null | null | null |
handy_tools/_scratch_rot_morph.py
|
VelionaVollerei/PMX-VMD-Scripting-Tools
|
f935669e3bbc14f4a9b47803a1ccea1464b538b2
|
[
"MIT"
] | null | null | null |
_SCRIPT_VERSION = "Script version: Nuthouse01 - 6/10/2021 - v6.00"
"""
DO FEET THE BEST WAY
!!!
in the final model, first vertex morph is applied, then bone morph is applied
requires: vertex morph from pointed feet pose to flat feet pose
sequence:
1. invert v morph that goes point > flat, save as feetflat
2. use "avg normals face" and "avg normals near" to create set of normals for when feet are flat
3. create "rigid" version of flat mesh, save as feetflatrigid
4. import feetflatrigid into point for comparing
5. determine what bone rotation to apply to point that gets me the closest to flat, save as morph (also apply this rotation to the toe IK bones and get their yz delta into the bone morph too)
6. with bone rotation applied (looks like shit) do "update model" on pointed version, go to list view, copy all bones, then undo
7. paste into list view of feetflat, will move bones to be aligned with flat-foot geometry, save!
8. apply inverse bone rotation to feetflat in transform view, do "update model", save as feetflatrot
9. get v morph from feetpoint to feetflatrot
10. apply b + v to feetpoint to verify it's close, only bdef2 zones are divergent
11. invert v-point-to-rot to apply, then "update model" with b-rot-to-almost to apply, then save feetalmost
12. get v morph from feetalmost to feetflat, copy to feetpoint
13. correct the morph by rotation-per-weight
14. merge the point-to-rot vmorph and almost-to-flat(rotated) vmorph using "join morph (boolean)"
15. transfer-by-order normals from feetflatrot to feetpoint
"""
import sys
try:
sys.path.append("../")
from python import nuthouse01_core as core
from python import nuthouse01_pmx_parser as pmxlib
from python import nuthouse01_pmx_struct as pmxstruct
from python import morph_scale
except ImportError as eee:
print(eee)
print("ERROR: failed to import some of the necessary files, all my scripts must be together in the same folder!")
print("...press ENTER to exit...")
input()
exit()
core = pmxlib = pmxstruct = morph_scale = None
import math
matchbones = (17, 21, 30, 31,)
# rotamt = (28, 28, -28, -28,)
rotamt = (-28, -28, 28, 28,)
# first try opposite values from the point-to-flat morph
###################
# rotate morph
###################
def main():
m = core.prompt_user_filename(".pmx")
pmx = pmxlib.read_pmx(m)
core.MY_PRINT_FUNC("")
# valid input is any string that can matched aginst a morph idx
s = core.MY_GENERAL_INPUT_FUNC(lambda x: morph_scale.get_idx_in_pmxsublist(x, pmx.morphs) is not None,
["Please specify the target morph: morph #, JP name, or EN name (names are not case sensitive).",
"Empty input will quit the script."])
# do it again, cuz the lambda only returns true/false
morph = morph_scale.get_idx_in_pmxsublist(s, pmx.morphs)
print(pmx.morphs[morph].name_jp)
newmorphitems = []
print("target morph controls %d verts" % len(pmx.morphs[morph].items))
count = 0
for item in pmx.morphs[morph].items:
item:pmxstruct.PmxMorphItemVertex
v = pmx.verts[item.vert_idx]
w = v.weight
# already know its all mode1
rot = 0
# only care about BDEF2, right? or sdef
# if not a bdef2 vertex, then rot=0 meaning no change
if v.weighttype in (pmxstruct.WeightMode.BDEF2, pmxstruct.WeightMode.SDEF):
for b,r in zip(matchbones, rotamt):
# get the weight %, multiply it by how much the bone is rotated by
if w[0][0] == b:
rot += r * w[0][1]
elif w[1][0] == b:
rot += r * w[1][1]
# count how many actually get rotated
if rot != 0: count += 1
# convert from degrees to radians for rotate2d()
rot = math.radians(rot)
# now the YZ component of the morph vector is rotated around the origin
ny, nz = core.rotate2d((0,0), rot, item.move[1:3])
newitem = pmxstruct.PmxMorphItemVertex(item.vert_idx, [item.move[0], ny, nz])
newmorphitems.append(newitem)
print("partial-rotated %d verts" % count)
newmorph = pmxstruct.PmxMorph("v-rot", "v-rot",
morphtype=pmxstruct.MorphType.VERTEX,
panel=pmxstruct.MorphPanel.OTHER,
items=newmorphitems)
pmx.morphs.append(newmorph)
# done iter, now write
OUT = core.get_unused_file_name("NEW.pmx")
pmxlib.write_pmx(OUT, pmx)
print("done")
if __name__ == "__main__":
print(_SCRIPT_VERSION)
main()
| 36.152542
| 191
| 0.716128
|
093de2884f525651728367c2b55e829a7b94cbd6
| 1,073
|
py
|
Python
|
tensorflow/python/keras/saving/saved_model/constants.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 2
|
2021-01-06T23:39:38.000Z
|
2021-01-06T23:39:38.000Z
|
tensorflow/python/keras/saving/saved_model/constants.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 8
|
2019-07-08T10:09:18.000Z
|
2019-09-26T20:55:43.000Z
|
tensorflow/python/keras/saving/saved_model/constants.py
|
khanhlvg/tensorflow
|
a59b74ccaafae59d616ecf08204d63023ff6f49c
|
[
"Apache-2.0"
] | 1
|
2021-02-27T07:40:01.000Z
|
2021-02-27T07:40:01.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for Keras SavedModel serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Namespace used to store all attributes added during serialization.
# e.g. the list of layers can be accessed using `loaded.keras_api.layers`, in an
# object loaded from `tf.saved_model.load()`.
KERAS_ATTR = 'keras_api'
| 42.92
| 80
| 0.719478
|
debadbd655f80c9770d752693710fb57412fa4d4
| 8,956
|
py
|
Python
|
geeet/meteo.py
|
Kripankc/geeet
|
64f183e62376dc119989ba0e7db0ed6acbd041c6
|
[
"MIT"
] | 12
|
2021-03-11T12:09:13.000Z
|
2022-03-15T07:53:02.000Z
|
geeet/meteo.py
|
Kripankc/geeet
|
64f183e62376dc119989ba0e7db0ed6acbd041c6
|
[
"MIT"
] | 2
|
2021-08-29T11:39:12.000Z
|
2022-01-12T13:10:45.000Z
|
geeet/meteo.py
|
Kripankc/geeet
|
64f183e62376dc119989ba0e7db0ed6acbd041c6
|
[
"MIT"
] | 5
|
2021-03-27T19:48:13.000Z
|
2022-02-28T05:57:26.000Z
|
"""Meteorological functions"""
from geeet.common import is_img
import numpy as np
# Constants defined as in
# https://www.ecmwf.int/sites/default/files/elibrary/2016/17117-part-iv-physical-processes.pdf#subsection.L.5
Rdry = 287.0597 # gas constant for dry air, J/(kg*degK)
Rvap = 461.5250 # gas constant for water vapor, J/(kg*degK)
epsilon = Rdry/Rvap # (~0.622) ratio of the molecular weight of water vapor to dry air
c_pd = (7/2)*Rdry # Heat capacity of dry air at constant pressure, J kg-1 K-1
c_pv = 4*Rvap # Heat capacity of water vapor at constant pressure, J kg-1 K-1
# Constants for Teten's formula using parameters from Buck (1981)
# for saturation over water.
a1 = 611.21 # in Pa
a3 = 17.502
a4 = 32.19 # in K
T0 = 273.16 # in K
def teten(T):
'''
Compute Teten's formula for saturation water vapour pressure (esat (T)) in Pa
with parameters set according to Buck (1981) for saturation over water.
Reference:
https://www.ecmwf.int/sites/default/files/elibrary/2016/17117-part-iv-physical-processes.pdf#subsection.7.2.1
Input: T (numpy array or ee.Image) Temperature in Kelvin
'''
if is_img(T):
T1 = T.subtract(T0) # in K
T2 = T.subtract(a4) # in K
esat = T1.divide(T2).multiply(a3).exp().multiply(a1)
else:
esat = a1*np.exp(a3*(T-T0)/(T-a4))
return esat
def specific_humidity(T,P):
'''
Input: (ee.Images or np.arrays):
- P: surface pressure in Pascals
- T: temperature in Kelvin
Output: (ee.Image or np.array):
- Q: specific humidity
'''
if is_img(T):
esat = teten(T)
denom = P.subtract(esat.multiply(1-epsilon))
Q = esat.multiply(epsilon).divide(denom)
else:
esat = teten(T)
Q = epsilon*esat/(P-(1-epsilon)*esat)
return Q
def relative_humidity(temperature, dewpoint_temperature, pressure, band_name='relative_humidity'):
'''
Input: (ee.Images or np.arrays):
- temperature, in Kelvin
- dewpoint_temperature, in Kelvin
- pressure: surface pressure in Pascals
Output: (ee.Image or np.array):
- RH: relative humidity(%)
Equation 7.91 in:
https://www.ecmwf.int/sites/default/files/elibrary/2016/17117-part-iv-physical-processes.pdf
'''
if is_img(temperature):
Q = specific_humidity(dewpoint_temperature,pressure)
esat = teten(temperature)
denom = Q.multiply(1/epsilon -1).add(1).multiply(esat)
RH = pressure.multiply(Q).multiply(100/epsilon).divide(denom).rename(band_name)
else:
esat = teten(temperature)
Q = specific_humidity(dewpoint_temperature,pressure)
RH = (pressure*Q*100/epsilon)/(esat*(1+Q*((1/epsilon) - 1)))
return RH
def vpd(RH, Temp_K, band_name=None):
'''
Function to compute the vapor pressure deficit in kPa.
Inputs:
- RH (numpy array or ee.Image): the relative humidity [0-100].
- Temp_K (numpy array or ee.Image): array with the temperature
values in Kelvin.
Outputs:
- VPD (numpy array or ee.Image): the vapor pressure deficit [kPa].
References
----------
Allen et al., 1998
'''
is_RH_img = is_img(RH)
is_TempK_img = is_img(Temp_K)
if is_RH_img != is_TempK_img:
print('Either both inputs should be numpy array or ee.Img, but not mixed.')
return
esat = teten(Temp_K) # in Pa
if is_RH_img:
ea = (RH.divide(100.0)).multiply(esat)
VPD = esat.subtract(ea)
VPD = VPD.divide(1000) # convert to KPa
if band_name:
VPD = VPD.rename(band_name)
else:
ea = (RH/100.0)*esat
VPD = esat - ea
VPD = VPD/1000.0 # conver to KPa
return VPD
def LatHeatVap(Temp_K):
""" Calculates the Latent heat of vaporization
Inputs (ee.Image or np.array):
- temperature: air temperature (Kelvin).
Outputs (ee. Image or np.array):
- L: latent heat of vaporization (MJ kg-1)
based on Eq. 3-1 Allen FAO98
"""
if is_img(Temp_K):
from ee import Image
L = Image(2.501).subtract((Temp_K.subtract(273.15)).multiply(2.361e-3))
else:
L = 2.501 - (2.361e-3*(Temp_K-273.15)) # at 20C this is ~2.45 MJ kg-1
return L
def compute_met_params(temperature, pressure):
"""
Calculates several temperature and/or pressure-dependent
parameters related to heat flux in air,
which are commonly used in ET models
Inputs (ee.Image or np.arrays):
- temperature: air temperature at reference height (Kelvin).
- pressure: total air pressure (dry air + water vapor) (Pa)
Outputs: (ee.Image with following bands, OR list of np.arrays:)
- q (specific humidity)
- ea (water vapor pressure), in Pa
- rho (air density), in kg m-3
- cp (air heat capacity), in (J kg-1 K-1)
- s (delta vapor pressure, i.e. slope of the saturation water vapor pressure), in Pa K-1
- lambda (latent heat of vaporization), in MJ kg-1
- psicr (psicrometric constant), in Pa K-1
- taylor (=s/(s+psicr)), in Pa K-1
"""
q = specific_humidity(temperature, pressure)
ea = teten(temperature) # in Pa
Lambda = LatHeatVap(temperature) # in MJ kg-1
if is_img(pressure):
from ee import Image
mfactor = Image(1.0).subtract(ea.multiply(1.0-epsilon).divide(pressure))
rho = pressure.divide(temperature.multiply(Rdry)).multiply(mfactor)
cp = ((Image(1.0).subtract(q)).multiply(c_pd)).add(q.multiply(c_pv))
s = ea.multiply(a3*(T0-a4)).divide((temperature.subtract(a4)).pow(2))
psicr = cp.multiply(pressure).divide(Lambda.multiply(epsilon*1e6))
taylor = s.divide(s.add(psicr))
met_params = q.addBands(ea).addBands(rho).addBands(cp).addBands(s).addBands(Lambda).addBands(psicr).addBands(taylor)
met_params = met_params.rename(['q', 'ea', 'rho', 'cp', 'delta', 'Lambda', 'gamma', 'taylor'])
else:
# Hydrology - An Introduction (Brutsaert 2005) eq 2.6 (pp 25)
rho = (pressure/(Rdry * temperature )) * (1.0 - (1.0 - epsilon) * ea / pressure)
# Rearranged from https://www.ecmwf.int/sites/default/files/elibrary/2016/17117-part-iv-physical-processes.pdf#section.2.7
cp = (1.0-q)*c_pd + q*c_pv
# Slope of saturation water vapor pressure (i.e. slope of teten's formula):
#esat = a1*np.exp(a3*(T-T0)/(T-a4))
# desat/dT =
# a3*(T0-a4) * a1*np.exp(a3*(T-T0)/(T-a4)) /(T-a4)**2
# = a3*(t0-a4) * esat / (T-a4)**2
s = a3*(T0-a4)*ea/((temperature-a4)**2) # in Pa K-1
# Psicrometric constant
psicr = cp*pressure/(epsilon*Lambda*1e6) # Pa/K
# Priestley-Taylor term DELTA/ (DELTA+GAMMA)
taylor = s/(s+psicr)
met_params = [np.array(q), np.array(ea), np.array(rho), np.array(cp), np.array(s), np.array(Lambda), np.array(psicr), np.array(taylor)]
return met_params
def compute_roughness(CH, fm=0.125, fd=0.65, kb=2.0, min_values = [0.003, 0.003, 0.004], band_names = ['ZM', 'ZH', 'D0']):
"""
Roughness length (m) for momentum and heat transport (ZM, ZH)
and zero-plane displacement height (m) (D0)
Inputs:
- CH: canopy height in m (ee.Image or numpy array)
Scalar (optional) inputs:
- fm: ratio of vegetation height used for ZM (default is 0.125)
- fd: ratio of vegetation height used for D0 (default is 0.65)
- kb: parameter kb=ln(ZM/ZH) (default is 2.0)
- min_values: minimum values for ZM, ZH, and D0 given as a list.
- band_names: if provided, rename the output ee.Image with these names
Defaults to 'ZM', 'ZH', and 'D0'
Outputs: rough (ee.Image or list) containing ZM, ZH, and D0
either as bands in an ee.Image or numpy arrays
ZM and D0 are based on simple fractions (fm, fd) of canopy height
while ZH is based on a log relation between ZM and ZH: ln(ZM/ZH)~kb
these are all mentioned in-text in Norman et al., 1995 (N95):
ZM = canopy height * fm Brutsaert (1982)
ZH = ZM/exp(kb) Garrat and Hicks (1973) mentioned in N95
D0 = canopy height * fd Brutsaert (1982)
The default ratio values for ZM and D0 are fm=1/8 and fd=0.65, respectively
The default kb parameter is 2.0
Minimum values for all parameters can be set by default (3 mm, 3 mm, and 4 mm).
"""
import numpy as np
zM_min, zH_min, D0_min = min_values
if is_img(CH):
from ee import Image
ZM = CH.multiply(fm)
ZM = ZM.max(Image(zM_min))
ZH = ZM.divide(Image(kb).exp())
ZH = ZH.max(Image(zH_min))
D0 = CH.multiply(fd)
D0 = D0.max(Image(D0_min))
rough = ZM.addBands(ZH).addBands(D0).rename(band_names)
else:
ZM = np.maximum(zM_min, fm*CH)
ZH = np.maximum(zH_min, ZM/np.exp(kb))
D0 = np.maximum(D0_min, fd*CH)
rough = [ZM, ZH, D0]
return rough
| 39.280702
| 143
| 0.625167
|
e6c484d7c63a1dc15b997611d27f24e47edea40f
| 4,564
|
py
|
Python
|
keras_gym/wrappers/video.py
|
KristianHolsheimer/keras-gym
|
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
|
[
"MIT"
] | 16
|
2019-07-01T10:56:26.000Z
|
2021-01-31T18:56:56.000Z
|
keras_gym/wrappers/video.py
|
KristianHolsheimer/keras-gym
|
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
|
[
"MIT"
] | 10
|
2019-03-10T21:56:10.000Z
|
2020-09-06T21:49:55.000Z
|
keras_gym/wrappers/video.py
|
KristianHolsheimer/keras-gym
|
0296ddcc8685e1ce732c3173caaa0fd25af9ef58
|
[
"MIT"
] | 5
|
2019-08-02T22:11:19.000Z
|
2020-04-19T20:18:38.000Z
|
import gym
import numpy as np
from PIL import Image
from ..utils import check_numpy_array
from ..base.mixins import AddOrigToInfoDictMixin
from ..base.errors import NumpyArrayCheckError
__all__ = (
'ImagePreprocessor',
'FrameStacker',
)
class ImagePreprocessor(gym.Wrapper, AddOrigToInfoDictMixin):
"""
Preprocessor for images.
This preprocessing is adapted from this blog post:
https://becominghuman.ai/lets-build-an-atari-ai-part-1-dqn-df57e8ff3b26
Parameters
----------
env : gym environment
A gym environment.
height : positive int
Output height (number of pixels).
width : positive int
Output width (number of pixels).
grayscale : bool, optional
Whether to convert RGB image to grayscale.
assert_input_shape : shape tuple, optional
If provided, the preprocessor will assert the given input shape.
"""
def __init__(
self, env, height, width,
grayscale=True,
assert_input_shape=None):
super().__init__(env)
self.height = int(height)
self.width = int(width)
self.grayscale = bool(grayscale)
# check input shape?
self.assert_input_shape = assert_input_shape
if self.assert_input_shape is not None:
self.assert_input_shape = tuple(self.assert_input_shape)
# check original shape / dtype
shape = self.env.observation_space.shape
dtype = self.env.observation_space.dtype
assert len(shape) == 3, "bad shape: {}".format(shape)
assert shape[2] == 3, "bad shape: {}".format(shape)
assert dtype == 'uint8', "bad dtype: {}".format(dtype)
# update observation space
if self.grayscale:
shape = (self.height, self.width)
else:
shape = (self.height, self.width, shape[2])
self.observation_space = gym.spaces.Box(
shape=shape, low=0, high=255, dtype='uint8')
def _preprocess_frame(self, s):
check_numpy_array(s, shape=self.assert_input_shape)
img = Image.fromarray(s)
if self.grayscale:
img = img.convert('L')
img = img.resize((self.width, self.height))
return np.array(img)
def reset(self):
self._s_orig = self.env.reset()
s = self._preprocess_frame(self._s_orig) # shape: [h, w]
return s
def step(self, a):
self._s_next_orig, r, done, info = self.env.step(a)
self._add_s_orig_to_info_dict(info)
s_next = self._preprocess_frame(self._s_next_orig)
return s_next, r, done, info
class FrameStacker(gym.Wrapper, AddOrigToInfoDictMixin):
"""
Stack multiple frames into one state observation.
Parameters
----------
env : gym environment
A gym environment.
num_frames : positive int, optional
Number of frames to stack in order to build a state feature vector.
"""
def __init__(
self, env,
num_frames=4):
super().__init__(env)
self.num_frames = int(num_frames)
s = self.env.observation_space.sample()
check_numpy_array(s, dtype=('uint8', 'int'))
if s.ndim == 2:
self._perm = (1, 2, 0)
elif s.ndim == 3:
self._perm = (1, 2, 3, 0)
else:
NumpyArrayCheckError(
"expected ndim equal to 2 or 3, got shape: {}".format(s.shape))
# update observation space
shape = s.shape + (self.num_frames,)
self.observation_space = gym.spaces.Box(
shape=shape, low=0, high=255, dtype='uint8')
def reset(self):
frame_shape = tuple(self.env.observation_space.shape) # [h, w, c?]
shape = (self.num_frames,) + frame_shape # [f, h, w, c?]
self._frames = np.zeros(shape, self.observation_space.dtype)
self._s_orig = self.env.reset() # shape: [h, w, c?]
s = np.expand_dims(self._s_orig, axis=0) # shape: [1, h, w, c?]
self._frames[...] = s # broadcast along axis=0
s = np.transpose(self._frames, self._perm) # to shape: [h, w, c?, f]
return s
def step(self, a):
self._s_next_orig, r, done, info = self.env.step(a)
self._add_s_orig_to_info_dict(info)
self._frames = np.roll(self._frames, -1, axis=0)
self._frames[-1] = self._s_next_orig
s_next = np.transpose(self._frames, self._perm) # shape: [h, w, c?, f]
return s_next, r, done, info
| 29.636364
| 79
| 0.599036
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.