blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
04f9c99fb5abe774e11429fb6eef80360c25f788
|
4039a472290f6254c8832bdcc3f7a006da883c75
|
/descript.py
|
62a1f54b0068450ad9d90d6a540b1a9010484562
|
[] |
no_license
|
canut/sstic2013
|
7e33050262ca09bc49563e46ef24eed5399d3a20
|
ec93bcaf3d7ca728834d539f22f7109af3ab900d
|
refs/heads/master
| 2020-06-07T06:36:30.185367
| 2013-06-25T15:02:32
| 2013-06-25T15:02:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,910
|
py
|
#bac9f7a8721f
import hashlib, sys
I1 = list("cf760bc77db1f282e881ede9a10122b220887466b973b854218b85c230d6733ab459fda9a879973664130312d5ff3e1a8e2f25dc6d3ada1daf6e481438a3a7fe8e36a77e6be0ab3192b6a183e02f84721563b9586d2977335dcdba80530994c097cd054baf0d690ab58e6576c10a6e4d6fd20005621a6ec492bd0d0b3f2fe3709c9e51c9d69464819f111812a14ff0a5ce8c7cf0953d4aca103da4cbe544962226c914dd0d505658963213a757ce8a6063755de62eecd85ce1358c3031d77bee049e6087b671d71eb6239f40be8ceafdbd63a61ba4a87c3770887f72db4711eba54a4f257009729db6cb7369b312cf0781cad85ee7d022c8bc3436cf2f9b2d1b422fd2c07e137af5017b0ae74893e0df3a5cb665cd86715d53215308a34387f28aa4613474d9f8cdc668d3fc1922f3388aee42eb0769a3d4a61dc6ee9314f6bd3c936c38d2bb91bda6032504f3cf46bb7805ebb1a713936012226b2c67705f2d54f281f80c5c60dd0e29bc4e044d5e3261920a29d37ae14b8de9103b28612a93f5ff780be4ed4d5bf2c26e84ad0840870ddb3682b45cfa0126ef3285e343e36d1af40a41917be20dbd2b38fd5ab35913bd06a2e6ec72e61c420d587ebbbd42df622ab863ea45455818bb35ddc3a152b8a8050f6d5885eddd4447d87f60448d86d451369209138a9705ccd1f095cd1893fb23282dd48f9dc3fc4867d3a1832e45e084e54f76ddc3226052af767382df13c0e111805eeb14c6f1a598aff52ac3b1fad9f8bfebd06577e917f5c9096e19222e940129e657b5853e29c1642180a6f937b71a5e5573a387c31899f439d68bd4d26225f2e068ef23660acefae52b9137fee23991daf5758005e45fafae30ec9130adce7be170336cfc1b14bae0a3c28e527b4ef37ad06168ce7efc68d404bc8e27463596e167af4ce4ebcc6a3d6bab1f9b32069fdb5eee73c8f06ebc10ce5120477f3cb04acc7598b0041d59f81792d040d08a0a196c630d1db8f769c144890839394f6456fbb08940c18f7199b23a275bda7592687572448edb0b9c8db2013374849cf5b7381fed20a29b3b00fde7e9847bec41e4f9907482c3198872f077f226ea376fd4e2dd1b1d95a84395d1ddd3cc5d9d2a264c0924803b0299368ed59b2ea92aa515c5b50dd73f950aa1b7d1f637629d721a8d136a5124fc2ba261e252f19de262b292db44705eb66ce43cdbb6bbb0de1b6264e01b58b0faf4c64db7de80d94e57c0c80c61f8df6f1942e560f0cb7100cd07fb42f10855cb3b042f9bb7d0a3ff997ddf70d019d1613e31d697dd65862a144f34a6e413489cdaffc69510dfea38b947af17e15db638c16c2b6ce27716027b8cc3e989404c9005861a45dc85b4597ad263c899830180cab2ec6a43974a56f39e2a705697703851e95fd5c610a588940a901914e5499f7c3fa2969ce7ed98eb7c13741162f322ffa9ae91e21109508f32987e442ffc929a4399b179e923662d365ca26122b1efbe2a5c9b6b5694a5a0af2a859be91aec65abea4f60a6d058f19ef64069152ce31bde73d55bdd4f8d649238c323845e0e3f662b2780ab14304db9e64880830514b726bb788849781d6f151c53c89804f893b2892382f40f1947483d6be6c161de401dc2549de7a4f2b5a6a87f4b787ec1e31c635dc6890cb24b9b8aa3e997375174e686b6e518b23c6850427691242cfad117a60d5eed0801db9de81e6c9b9b4fa02d4ba9a827443c4f77ca715ccd91fc838eeab2324559cc07b12527e4a61cfadc482c6e235c169619b2e98e6e199c7f1ab81d7efc9b91bef810c80892b03e0c42e48ac8e1a60d8cbca21754a8f92c3a3f599210115284f00e222a8fb4e4c10f5cf40f7c26b2337e8428161673225d659adf671ea0e1db3027cf95ecd29559c2850782a92891fde271b06c38c1839ce4ffd70923d426b491765e150500b3a8fabfad44b3998c3527d608438cc0c13ac43327c35059cb4e5d042e88ec8a488c2bdae05d84c9e2eb80dea865d05101a931f72acf49e7a7b9dc7cf3efe6c262b947940fbde21416db0402bb86a3f542e2149322f6175f9b54d329ab051b933c6bf753dea1ccad06dc8fa1f01bf8e7253aca0baf5741ff107f495bfe9e0e46affef5f97f349c447e7ab2c8ba4d22133c99898095f0c75fba756d92aa619b91925463a8b308b6281879c71bde1c24455adaa576a891a2a87b28a6dd99699afeaadeb5e9e8d801bfda817042fbf85ec1a058296c9222967c24d41778f0e2f4d0fbd359e8f799ef75c95765eaf2765d5664b2ea2e8196f1755bc8485237cdd88b81b5f6bff547f352753617618d92a5858cec02f61c34b7de2e7cec018fe86dcd83e6c6e8be1071bc493d98d6edcc9c30feeabbf88ae0e418a0efef015c57a21a0c78df0f89b64103f19fe1118c7dfcefc4775f2c0acaf8a67ccb29a9c8058234e5fdfcb532840a51e464497cdb94c38d24331b0cea8d1dd0d4e8ca141db21cd74114d1200e27909da6c17ae81bcd127be2d78b64f15af6c7d6fdff2a97218368fae0b988c288419ba853dec5b020de9efdd0d60bd1ce8061ab2e4531e1df7259a183bde817804600989f875bece9bdf321624adb63dc787e6c8137e99bcda29699810936bd893c8f2ecdcfbe31bf76c6deaf4dd8e8d24c3d9dcb0ed8965f238c715924a0843db4cfbbaedcb7527368b4e90749a5afc4c9d63dba9fd55f49a3cb4c6a4883cdc9d7fcb3395740ddc80f03a4dc54313d5538df08cfbfa525323fb484cd77d6d441fc857bc23078d137f11a93b4e806da2af6aaf3c2ecde387fee39a03d028185f56ca69fca9b1e23bc66e2c55ee888c82f84b291c79c6cc9b05f2dabc2fd0de32ba8d445264c62933e774bc6970faab310cdbe6d1571f0e2775854a1675763c567a9f17d17149af64b7dd705da1ce7e04a20a7a9bab3b3998804d1bef16c029fd2f580336870f28526dc643fbdeaaa2a28bbea7ca329c691732943b2307d697a2717ca2ad4373549480dda3d377b21382c9d7553a96608ef1d5efd8d05c777c54f28abdecad7a8eefe67754fe1600c746e3617f235eefe6f26e849cd718e6f04f4fe5bc9e609e7168d9dbfae16482ad6ae63faaf4e5040a81f1e45717c3684e9b677738e149eb72ff58c445264389e466d997a808a9f19cd506fa1f60a7ffa66070f8041bfe689fdc69b313d55b5e2898175628e73c79beeebc484f77dd18230caa89d6c43691127bc98b6c832d462d8d4e8725074b771bc98e8584a8296e836cb097a226d1e5554f0d6759625f7ec6a707e8d3712f9e1dc013cbacafece08108deaeae4b772a47b7ff6b672957efaf8f5f1c72a610c7c3deb4c13b2e6962bcd579f7a8f1c964ecf16573eeec1125472621a46e1f540628874ea5548d44c38165fd32f98cdce18e43f1413d6c63e109bba1061e5d39ad820d1b6d539467d0e228742de9aa339ba54a303e482747da9c92bde45567d43873fd79ed624ebd4ef1fa80bc1f88937d58f1bc2f70498bd18abdcc533780076d35699295344da829342a90f6816a3ae8a5b74dc2b598afc3803b387220277731401236c40c16e0327a42bb08c09ee7cb60c9f8ce11f939ea99f51f01adf124798a885ab5015aa1c026abe47c16c25e6e352f176961e768470a0116aa36cf1d3eddfdbc87015759b9852fc935ddc9f36ed7f2a998a6c558e8cd4c43b0f517dac08ae3a72811450b57126acd3dd6609d646928eee71aac89ab020db293e217d6427399f5a0b380d4425e11a6731805df5a4c81a5f39d64bc0e21f1681a58d2e67708fda9cb7639958317d2169d12428105d48b18950a8b291ec4bf974a7a2a06b9b397357d01b959740a6a5dd3805f2d87a6c243d0bda52dcdb1089b80a8c9c303c75a59aa752ff17b27577c5a275664c12c4ca90f898a0a9460e8dab4b73f10b240318e20a0e2a051ff076d5c83f2274875bfa1b936340b7a61fe76386ef75a97f498513579e47330d31b615b6b57c9fd595a52ad130b86da8a961536dbdb15320d56193ac514389ffb3f16b035a3a410cfd4cf84a654045ad3b4826812760a2df1f41fcfe0755faa0f1c503eef176e48a409393c2d86b3f75f42b067a28ff6f3214dc062af78a9c7438c5ef3699fad521d9522e7f08285991a520b5ab7bf38070ad08c8a2ed0dfd8d5b540940d835d0c47258608babea264829cc916f5d2dbf0cc81e2c22400f1fe67ffa8404923de450724c02f33a5366b64a0e09ad5e4652b80661a8c1ee59c6b3259a70ff8ca6c22ec0bb18ec090cc75414340b5878919fc935a1581f6fa1d31aa720cb4185fa098e35bf096609122eb2f91f2ca139775243a16ba937fdc482e06437fadbdb000a87ba6a630f6eadd5f3b2cb4616be497ad9b50673d271539d995c21b41b556b1f238ebf41a40df403632bd289e459d64c84438121c2b80bb87ae521aa0adf5ea24832825e814aadde6b4d34cbff9e58a4b331e85466db0338bb521c3a4f65d891edfc4cc9c86f68537d77b22dd71e77a7f02938328585c5a2e039b37d5d5e3c2f06357a67754854d7316efca09564476ed33ea8196ca493fce197af1abb0f4719d2e503aebed308bdd7034d7c4c6028034cabd7d519aae15bbd12e59509137f99a299e67745e606c7431d0d84de97a1adeb90340fcedfaa07058aed618beab2693925a3bcd17910c0db2eb6c2f4c479cdf058a5d1385a14315294d1d99b9569f9b061110412358a513fa9ca8baa8ee0685db2fa04a4ba2b388d667a1ebc28a77158b6939ef2784796318eef806a027fbda7521de99e42c8a91049a71241068ad349aa45e8372104ffbe165009576faab37f5c244747bb3a9e1766d32612c8b65c393de7ec1cb14cc3927aa41fe5f11ab39405991aafc2fa99c6c3701ee96ab19f79b98d8c18d6ece042e7e5e65ef4b09858d68bf2f42ab32e91413b02ee7a7fc08d2e927fa55b63eb28caf4ce2ed52f0f0a39a989c60d7de5e98fda42865f4335e8ddfe7cbbbb48cdd287e670953230249d5fb460041e251208540c3175844b0d36ecf4112a9bd88ed6a9313f184e2b2629af9d478434f2b1fac47e93906c9c0ac832352e38e4dff1ec5e5cfbaab538a3ff4a43fb5d812ccfffc09f9fa841cca5b9c91e6cb8b9bc9886194bc4c93ec679cc8abefd6e9de72483e0ed2f8f3105d270b6923ec3bd910859afa56d87c2d5b4556fd9621682aaf2aeea206ae4a81440617b5a023ea424b6549c08337edf64d3ab6f7ee7169d83df7feeeaee00c69ab0804ca79c928014c2946c10a0dafb6b3e3d4494f064015fb306f5532730133e596e3189d5bbdef11d29eb2f8268558658497957a47560e247c0a81717653918094382750ba79c6e2bae666daa648ba8cf191a9d431c7f129fbb6fa231bcdb671d08b137107339cc6ebcad15fe6f30f09348eb9ccc3970302e8e1f2701a3cb4cf9e5246c48dc509d7020ce462a61580ac2ff82b3a62ff6e5d64ddc4f85cb2c7c79d41939043c66e2864c3e733b718bd83b187aa8cfe490f6c8234f9977a1a6edf3056bb9646591f1ce75337069f827b1dbe69d842306210e8d9a9edcb971d88c31672485a4045e3f357947129d83d59cbbefe9049dd9ce45c1f9f42bb6ba49afb207f6a66078a267c498b3da273640c3ad0cf112cea4729edfb0e2554c91387a44fd5f8a7bec6a0faeb9bf795c0ba7bbc749572edcb17192889f481b2b8f0c7ee2df42eca20b487de6413e35c618d3290a95bd147654300d14d7902df801e1156963c3ad71429f122256ecb788ec3b345fed6498bdc3d73e46fb89f5db7ac0c774a0c1c293c81ebff545f15ed50a9df5fe63a012f80c741008c44911977fd73ff5a2d61c0a99a75206fb1ac52f6a7f5e946d6dda4b642dfbb3e8894209e6877fbaef84d500bffad9a294615aba3b86e9d3b0efd09940e85b9712339bf043cd3bdf609e68a9eb157b1088eacc8adee3f179c1fd66309cf5b98d6710c812793fd526c6ae8ab9a87b31bf8e8c9815c280feb0f4fc864f41ee44345ee8b8f081d88d4bd14123e11c0335962be1265e4c5f8a5e215e73172534f78219ed3b67a205b78f21444866081e9b1c29f1fd4bd496967f239d622ba4e24e1259e52325f95d1f45df36b5193cd519d9d2aa4beae0cc2655ac56d0be0106351513862eeb043de82eedc8ceec5e400b0dc729bc916123446c9d94fe8a4e9fa43af7104e5e1919f08dbc349fed7c1a041324fe2e0f2fc4ba0009532dd9efe5894a7f643181d3afeedff5073cc21d7726c2eda750348debfae879855b39eea610a26fe7f34b0140fd69ace2ee6a9898c28d3a36d8ffd076a29be92f180ab1f81d75c40470f80ff2db31d26d16fb50aefbebd7a74dbc0afe60fe9c2258804324007c6deee863cb843f49e871a7823c581a56246497fe6df06f467d6b2bac465439d5fabb8c18c3e900e12d97f103c34d60e7ca7b679e3dcd70287bb16d884bb351f92f081f9199e6620bb7a63ad985372c0940af87e70301387de96c8c7e2bc6292c248fbec5f736d92f8c31fd92aac224d0bba00a12bfad0c9813d1b76482641aa2f13d7668b36d13f60979c3ffe0c9303b45fbd1b850f2e3da454c796b5dc1b387ccd9b43e868d3800da44f00d0557106f8d8c5ddf0af8b1df122f96ee26b9e860c3c993a84ae477fa3e131c272eb42d19acef3c5cbc28b5e26109a06be420422836c17d69b0e64f5722bcfe9d89d5fb100c8318b46f861834e8bb51ce51261a32439dde684b8ff305e5b5f7e1d152d4311e26e9a59b44c7f009b1941851c9ba3ab8424020e520959ed5ec1eea67d4bb510c916a592231cf99f16111b95870fd616522d609fa67053762591840fded0480a7cffc819224aa7eff4a5b3d8eadb28732e63a4c76b9d53296294707d49c3f9edcc275a18caf6fe0eac7d92733901f6800cd41bdda04a87cb03caf5d72bd762ff66c331d158ce25b8b25ddbdd1e9aceef8d18576ff215cc32f0a951b479de8223140582708991fba7afc7d3e21637ad133a028048537b2977a47c38a60c6e96176262eafc044b5a97543de0122ac679e2e2e03794bebe2006c660190e93466b62f5d53f57dd5d5528193d5e57fff8de11b265da9a45d432b03af9d6aeca3585d0d073b9ad68f791a8a51f2258decb4e125bacfdece6e4195ca5be9cc434838191009ba6342b06b19120215d7e6609720e5fd9c355ffd98ea552f6f8336fb8f494d7d79a935f2cc871b4dacc8a3173e865cc33a5ae3c1235775f25ffe2a03f17a81eca9c824e6261e99a7cb856911217c620a35ee042678f5f512177e7673ebd3630ddd380d891912d892f77d59e132b3cd42f200f8ad664ef5d2386f58b6389f8a90df7de6149195993e7b26eee2f42b8ec8aa6f0dc8cc7daad72c0d5a323ffb2195e81f549187b9f13775485250aa26589cf9dbca10a4b5525fafd3446c0478b751c65bf187f263c2f65f0c84f3d73966568ba248ecdaec2c8f94e4a214cafb46adad6f6f0c3501463217dd833fab5589493e80526d521bd71e6ac5449032c56330457e8cde470e49d08348e5484b05ef45378f2c71100bd8a45baea94e5fb2110e975544e57bd086662903d13fb8361634fc885beda70396b0f2ae99cd72e44db87eff89938c41f5d31786cb0910b5eb108f87e029b1fd3f2eddff44069cb65af631dc8efd882f726e85a3a465256d68216b1fcd8880f963cc51c5a7ee010c2bdaa2240abaf6adf35f6e351dd9620657c31f17340e92864707efeaccfdf5be205e016157ec32fab122934bc36d56dec77a22b9b5be4b8a80acb74708c9aa2467e7a1173696942d877566a88972026f9fb9766a195b4c9b703fa1d53e196cf8aec9402a1f35d1c9a4a8d8a7d18a16227dbd19bb2acde33578f432939c14f3b3ea9e10f9be1ef35ebdcc961bb73aba30b05e7bdf3e2100e063b7ec16cbe34e2b57dceeac7f2299da8398a860c3b6ddd38ebe43ead208ecc30e301763553334194632b905130decbfbe2481e16e3691483d8999dd295a3b96d23fd4ce97238570bbf1899355a2926f09ce159302019f9300d47fc6b545eb64db673ae61a6f5917da55e4edff628d5595f4827ab183baf00c38abfe508b7f0679c5e1227b179cc94f1c71c9bec11aad0533d1465c563996c7db2022e66baafffa4007b89cfc62523d2f6be3398aceffe4570830eabf91e4677718387fecc3f290b387bd13ee8fc3bc4f000a5a0cf7e96dee14fa08af051dc2c4506a8a7d14bde5cb78e20518461d5abc91a848b5942f5f057cb9447514f4dbb0b562f2d1f283a62d0ac968c9e08f4e01912bb52c10ed1cc38c93cc835a63a5152f08ee8efa4b72592433662faf5386c699d33a43329a7975deadf48efc1d840dc9709811bedd46666709374350dc21d0391d7ddbb1dfbf3cd17528b8b613974d8c969f127a0ef687f8665290c4ba8785fddfd8ce510b797430e70dcd385d8eab03c893cbfedf4e6992f4d3493fb8f0c9f247e7b114e979af1f866a47bed199e2231e05f3cfe2c5d3410f3d313a0c85c7b3ddca38e29d0f9a9ce3fc9819175e76baf10bdac131d4a36ecadfdd8b63add542a21149721b141cfff3c649bd3eb342ff795e5df075808bf66181d9109be15eaf784538e4fba98c0b455eac07a2e8dad57eefed86ba1255f5ac4f7562ebfe2e98b3c15fcb94fa24a337718af0b478d3445aa2e24842fdfe3d2d9ffced0df6ce5da358f1744df29fe202a9b3f4acd7266043ee3460324a176efa2ac3d0d7f76879b105e5a0b77c068d431792cb238fc437d16e2a84452fcab4a5cf606ad0c4fcd76b65e38d01a409afe59e1117a1110ee01e7d3dae7de6e0d49d0c920accfe40d061e61a912da1eaeb457ffcd6cf3152addd29163c82a2a72baea0956430e07e055ad6c97e8d82af183358f58fde77e3e33ef0b119482106e4563924b7d9283c9d9911eb9b5d9dce86d3afe0869077ea96d6c60fd11d37a3f7da1483f80afcbabef636e0820a09371c7556a16eb525922bafcfa95720d6866620ff20b2975b6ee19d782394cca7335b65e714ce30833cea5eb9f212ad2b4c94905335ebda60dfe6e611d2decce9b4f7f034e760ea8d987b708f000f39bf089f44218fc8b2a7ef6575beba14e62d2c8663a9cb932c62ad3df02cab55d3b2c8b4bf3507771a2e4d229a37e4fed4d78f4e23eeef2497da664c7665469f1b682c933e0d322a830bcbf239ca58a2fa41715e92c3c7083e8b56d23c2cdf3d96d126dff16538b2720cee4b2e5177c6e8c49808edfae722dfc2ea0a7285e7e7dee4fa342e919dd05204ceeed920bfee6b062f4990d614d84929416480c9d7749760bfd3939796ff68beab815c1d4e7b5676b31b195ace2ed5aacb8050a52b7403e3b31a2e6b94144cde7d9f32faff47f043922b46cd49505e010dd1489161b2195ded57f9b2d75e30a6003ff65e15d5286b9dc1c3f60ca0c8417b8892dd73af19cb1efa2313272a4257a1e25420310963661c01374788466916ee2bdfb417633d3fa84665513e19351d15a116f2027c5044e1a5129031d9aa6030803ec482c3c1702b378572c92fde1cce2cf0b3f6b18f14c8b7d91f628b30191047f9f75f1e2dd90bd16ab0590fd0ab82267b30f2d81ae8449ed727cf288ed27e51304abc75a569422f6bfb019318e319a05d7839e5f3ec313d7e356330a4c0b90f287a57500a34175081d0361adb6801e2bacc98f74e30bf465b5f05f24e442b10cd175b785437810f8f7e6d437f2d7a1bdfbda91d4562a5679452d412bcddb27e7851e7e681e0a718a60e31aa5933ce4cdb48124b6a98cd71c562cd727e2accafb254740c2c4e91e80f06bc107798cb09b1f9403c22489e82bb4b46bd73ac2d6c0456a0f2a8db57f500ead3ef0301aba8141764ff4f5cfe3d0e4d3ba7bb9d640c3cc343752794c79cbd388bc51d6c53d01a68455f202831b887711f36ed2936119f8a873bcce6576c59e9e901523432b5aee3761e85b59725b07cbe90a4213a6c634f08d06bf3e5a2f5e0d0ce75f8266af9836549c4bcaf08dad516564b1614b5a01afef394819f3a2fdb16956a1ee2b20813cdbeb1f91a008d3251efcdfa78f2ac15305a16cbfd68bed6bcd3a79b76f96870ef613da6968b8d04bf93a8fb243b0da28819e9a2580064d0438dab2c73e35532990223d6c5aed7a3c1657f878981069e1a8a8636028d4c59565deac564713a6902bf7044f26c861da74fa94ed19853af391440cc7622685aa98f32892e2f307ce841f7bc4b9dc6e7e5c794898957397c5aafcb5baf97b58ece75bac9a9e407e68efc908c467a852200316ceb6431e4a0cdcd3f02d74fc6edc8930a99fdd0fb5b2a1312f47abfafe8a5a46afc485417502d1da903f301e6497aa92fc7cf03e2fea078583c5d9d5bd01db78d5c60855f1f9dbe8eec2a3bf05a19e1133e82930fb16388c07155777ab5b22eb240891b24e4e5691aacf6b195ac53a24f6fda565ca91b86b6261d1d8a5951de311d619954f5a81aad2646f4ce421f395c15a14424c1aa014496377a57be14cb23796b89772ad9cbb246555c11282341f18bc3ebf1de5eb87985242ee77afc40254f1eca0d940d4ea68c1b7c9186057abdf8c530e4373c2deded9ca8a9700115773cb75df63f92562e456c15003cc6135e798d022544afa2e3457b4d5d7cb22496b31d6b57da7abda0c0d493be7a0a41e54f56bb4e35082b9f628878c1c6efa1dd9383e2ddf3c98f1663a515f8ced717f72101382ebf68d89feb28964998dc622d74d7c83fe882ad99143538f864d694ec250948d9e64eeddf10703d7440c8db989f0c70a70a03f1d79821d834cc2aaf138f21b52fe6b75ac643f27205eea9d5707e2f282431f8920d49c6d9ec4c9c89fe016b93c5635d662e59a3a7b339be93df0432f7dd0ef4aaed64b4da9b68305f6d30e91bc1764d6d18243908dec1cf5393fe670425f52d9673b1765acaba2e80c026d6491f409d555e054b0326f420a5a382250f6ae8c07b34054afe922502469c7a992cea70a1bf451f5ccb00056a79ba02aafea02e41c4cba78fbd113902d23f02e015fdef8cd7a61fed8582ac4847be2c933162508e78dfdddf702dcd52122dd836e1a503e22091a9b95f14af4804c300ff8c408cd1b043e3651b9f33489503dbab0c18e3b6cba35431308a19fbcc45723cfd6d8d908870413814fca496d665b42ea121ae68d31a5efdee5a8945053ddba81ba40e03114628823a21d73b5ca3d6049a451ddd74257b66a482f1ba0080913cabb9744b9824c775b6e5b2a7ac1690e461863edaae8ff417cf307f13018a80a1675d6ad18ff3f209facdc53ceb9e3792556be99ecae0bbae3cc3961858c8a24ba87508ce4ad8f92a5bf45f8278fe4af1ba4100c11e55c64d95220c1f6bf857d9e1bf6ee24c368725aad49ea0c08e30bae076fea2b005c9c156beb04f1cda304ff137a5619cab8a0e6db389ffc4d6b7f7909069956492e6631ad5746f385a60f6de4c432e64e11b90f5eafdbf603db7a0f3aaa1d7f85e64e6679d0d90cc0335b9b17d5e8aefc6ceb33d3d4b65688c8c19efb793528b7079e3cd7cedaa947bdf683d778962acfba0a78c71034ca60309dfc3e13d8575c836a1c99e3ded7a83ee405b7bca8f0ea650f7b3bd1eea1ede8b0f3ae1fefccf94ec811685b807fcf1488f592d089aa06f8e4e76c1fc944c662c88bd5199e77dc4660c2957e6e738049c21e2c7af95ba122c6deb90c6a7006116f8c9cacc1f0fefdeb782b40436fcd00daf589509076dde465b72e3b2311382781b598cbd51ac9f59c33ac731e6842dac85d50b496b7dd456d1ccbbc10c54c6b21c52f34181875417aa69a00cf1154d88698524ab742be1bcd85f2bd8a75fd2c11c35e4ec427e3e283518cb3471407deff97aef8f63cdf7290b8a984875a4015124edb69a29559f45074dd77682e49dbd41d46334dd1c742a2110af744f06b84247dc6e5eb2b0d392157a10fc8652f8a0986f7be7efc43ee49b906baa6bbbb37b9ed4547c52e40c4a54a68baa706c0d3f1c8bb98b9056abb2c215ca4129c912a21976fedb11d05807e743140556b7d08b4c39fb9c64b9200f71391f0089e0643e1dedb7f086956d684d3a34202b185f8db789408d35340079605b89a2a0a86b23477d6df3e3e0382bae92a8c65cb75fb29d27416b3ccb9b0553527daf8dfde2cf91c079c9a98561e9ab7df319a15362032b654a60b86a4262cd1f331ff6e99b459ebf3babd86f3e67669a6510c2d7a4579a59e33b8a70fad65f1f0ea52fd1a92a88dc5719d3ccd385bc1dfa50d881c42073d6be35ec288e0a184a7ed2bbf7985ed577fa17db00fcfdb5c515c259b4ddf755cb1a8b4c93a2ed3411692420aa5ac985336f180a1ecf343f3381b2893ee10134a0f9dac32f003c78baf71c5ecbea32621dc151b6deb709e12a484363c9d3bb586d769c9e3dad47cc8e4d1bc0946df227db84845a518663239509396d678dbb58981704c119ff286c32a8136037f4e561f2b09fe0ee47cba1e267fc28a9f2d1774e47cb0e924cd8f002aaa094687a0f9cb6f45a998f7038a63b518c01483ee17ac95f319c4d49ccc2bf3a01d5d4b6e706c3cfca37b2ced83985bbd82901cbfe54dd407c7f95ecf0e4e5fd563c2d8a68d159c7c71e39422576339577c657c9f2e91af65d227509226fee6cb08aa0878da60c29e510ad63ad9d0d48b92821ba70b5be8108a648cf18bc3dc4b18725aed7dbed392be88eb3d8d889de71b450ac5e8ccaf893424c929dce5515486a27d836aac2e8965a3687d41ca7bb709702fb711bfd1a136bf9f4615ff97b74469520aebd1195b3277d8ebee180178987a2d7be5d577fc854f657d6df8432f32b4273fe2f370d198320f7a8574585304c8d9d67fa8140b46312649e99aad1ea730e2b116cc1950eba5612921fcd48d2e0d237eb6752082c3f1a4b11cd6c414c3523226cf16393c2089c64b6170c241b08d45b0ed83c262858788561ae97d86b35db5d4199e615b4b81b57436ca041629ea77fd6c7ffe58599b2aebcd3198c5410c3789426f4aacccde46b1ed689205292f58eb8c9912e52e78d8202ee290878f0bcc3679a88551f50f36f1d5f212bbee3564cadd338dbc4dcbcdb489114061b9129863c9c121dbea51d560d80c1bef2b8b1ce31b7b7a28c1a87502327435969ae95031077669b0b9ee7d6902ab76a776b0c95d60e78d6b467d908062a9e0e1b1dd1535331c78e4feb8a01666814494c2ab6e80d4d3adb21b79aff0618c17ba36b48a2a3565537f8639f922099ca8dfd3752006492397570b5e56298502ce7b4442881a76a050b27d2d642a33812e1140ce756d34c856f175439f4a1a39dba22da36326c0aae9176582b2968f565680577216b9b6c9de9a4f72843120760c68cc9b7aa42ce705ec3b3e6da2d0eebf1ac983af9669880b9c9f9acee34760b738081572fc9ca8905da4c8e904fe8cc79fd9472a61506e638873ff5eafb91e60a4f8309291d5d49f45d5c7eee02749f88abc5188415eb1cfc68242a08ed8a325c6f461a12eb13753cdca0810e5500b0b9b49cb2d0a7f754f69b62da6ed3f56bcdcf8a68b7a616cf512cabbf1935cf858818858266fecbf422863d3565f9c5bca95390c55807a6881a4454808dc30491e614d9254078830d00cfd006638e6ae143164b388ff8073a5537fcc58360e383447ca9df67d58d14f8f74949ce31dbb0ddf14120d84f8c4f567750fb9fabce73ca07d5d6b1e7d1d9e7ddd516de6052d2ea3ff07a7a785e2f60e446cea06c34bc5619ff26a3bd58b1a3830b038926920c24b9ee83fc3be7faead0a46b88a6d76e5e342fb87eeac7b11421a77524f131f9acb92db510606129deca41026862c520bbc1f82fece681d6955477ac15428c16d5097a9121359b85c58f".decode('hex'))
I3 = "338f25667eb4ec47763dab51c3fa41cba329e18536b83159b3a690a0265ec519aae94f0e715376c4f087bcccdd0be3b4a114f8be746142c44978faa76dae62cf197d7bce4eb38dd68c8ce5f69f326e1effceae3f72f8eaa38e019a59b1dc0997"
def rotate_key(k):
return ((((k >> 3) ^ (k >> 7) ^ (k >> 2) ^ k) & 0x01) << 31) | (k >> 1)
def compare(k):
if k < 0x55555555:
return 1
elif k < 0xaaaaaaaa:
return -1
else:
return 0
k = 0xf7a80000
def swap_xor(I, x, y, a, b, k):
i = x*128 + y*4
j = a*128 + b*4
temp = I[j:j+4]
I[j:j+4] = I[i:i+4]
I[i:i+4] = [chr(ord(temp[o]) ^ (k >> ((3-o)*8)) & 0xff) for o in range(4)]
def scramble(D, k, x, y):
for i in range(10240):
k = rotate_key(k)
a = (x + compare(k)) % 77
k = rotate_key(k)
b = (y + compare(k)) % 32
k = rotate_key(k)
swap_xor(D, x, y, a, b, k)
x = a
y = b
return [x, y]
#final_key = 0xbac9f7a8721f00000000000000000000
final_key = 0xbac9f7a8721fad3c9fcf271eed9abbc8
I = I1[:]
x, y = 0, 0
for offset in range(6):
init_key = (final_key >> ((5-offset)*2*8)) & 0xffffffff
print "\nStep %d\nInit key: %x\nx, y: %d, %d" % (offset, init_key, x, y)
if (init_key & 0xffff) != 0:
print "Step %d done !" % offset
x, y = scramble(I, init_key, x, y)
else:
h2 = I3[offset*32:(offset+1)*32]
m = hashlib.md5(''.join(I)).hexdigest()
while m != h2 and init_key <= (init_key + 0xffff):
J, u, v = I[:], x, y
k = init_key
#print "Trying key %x" % k
u, v = scramble(J, k, u, v)
m = hashlib.md5(''.join(J)).hexdigest()
if m == h2:
print "Key part found : %x !" % init_key
final_key = final_key | ((init_key & 0xffff) << (5-offset)*2*8)
I, x, y = J[:], u, v
print "Final key : %x" % final_key
else:
init_key = init_key + 1
f = open('output.bin', 'w')
f.write(''.join(I))
|
[
"michael@maudits.com"
] |
michael@maudits.com
|
b6ba2d3c2a3ee9c10a7138dd40c7db7624930b12
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/runnerup/testcase/interestcases/testcase0_1_2_010.py
|
e7acf126ea1ec9df41be1aacdb3d05848744db5b
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,436
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.runnerup',
'appActivity' : 'org.runnerup.view.MainLayout',
'resetKeyboard' : True,
'androidCoverage' : 'org.runnerup/org.runnerup.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# preference setting and exit
try :
os.popen("adb shell settings put secure location_providers_allowed 'false'")
time.sleep(5)
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
os.popen("adb shell am start -n org.runnerup/org.runnerup.view.SettingsActivity -a test")
scrollToClickElement(driver, "new UiSelector().text(\"Unit preference\")")
clickInList(driver, "new UiSelector().text(\"Kilometers\")")
scrollToClickElement(driver, "new UiSelector().text(\"Sensors\")")
scrollToClickElement(driver, "new UiSelector().text(\"Autostart GPS\")")
clickOnCheckable(driver, "new UiSelector().text(\"Autostart GPS\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Headset key start/stop\")")
clickOnCheckable(driver, "new UiSelector().text(\"Headset key start/stop\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Step sensor\")")
clickOnCheckable(driver, "new UiSelector().text(\"Step sensor\")", "false")
scrollToClickElement(driver, "new UiSelector().text(\"Temperature sensor\")")
clickOnCheckable(driver, "new UiSelector().text(\"Temperature sensor\")", "true")
scrollToClickElement(driver, "new UiSelector().text(\"Pressure sensor\")")
clickOnCheckable(driver, "new UiSelector().text(\"Pressure sensor\")", "true")
time.sleep(1)
driver.press_keycode(4)
scrollToClickElement(driver, "new UiSelector().text(\"Recording\")")
scrollToClickElement(driver, "new UiSelector().text(\"GPS poll interval (ms)\")")
typeText(driver,"2147483647")
scrollToClickElement(driver, "new UiSelector().text(\"GPS poll distance (m)\")")
typeText(driver,"1")
driver.press_keycode(4)
time.sleep(2)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_010_pre\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
# testcase010
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"Start GPS\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.2, 0.5, 0.8)
element = getElememtBack(driver, "new UiSelector().text(\"Running\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"00:05:00\")", "new UiSelector().className(\"android.widget.TextView\").instance(9)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Waiting for GPS…\")", "new UiSelector().className(\"android.widget.TextView\").instance(10)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.runnerup:id/icon\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.runnerup:id/gps_detail_indicator\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
swipe(driver, 0.5, 0.2, 0.5, 0.8)
element = getElememtBack(driver, "new UiSelector().text(\"Running\")", "new UiSelector().className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Audio cue settings\")", "new UiSelector().className(\"android.widget.TextView\").instance(4)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"Waiting for GPS…\")", "new UiSelector().className(\"android.widget.TextView\").instance(10)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Feed\")", "new UiSelector().className(\"android.widget.TextView\").instance(14)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Settings\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Configure audio cues\")", "new UiSelector().className(\"android.widget.TextView\").instance(8)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_010\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.runnerup'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
os.popen("adb shell settings put secure location_providers_allowed gps,network")
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
6d33bbf3b8b1a123c55dd85559b4cb086e5eef50
|
bbf80f1020fd040acb16bb6044747a9b04b183dc
|
/URIonlineJudge/Codes/Python/uri2165.py
|
f8522a3dfbc63fb0654d9246b002be2372c0576f
|
[] |
no_license
|
mariatheresahqs/CompetitiveProgramming
|
b1ceb47652e1805680c7bdb3aae8468c26a402de
|
acb5d8b6839ccad942798291c3c07a5e5f0dd114
|
refs/heads/master
| 2022-12-30T20:54:04.414028
| 2020-10-20T10:38:45
| 2020-10-20T10:38:45
| 153,899,284
| 0
| 2
| null | 2020-10-20T10:38:46
| 2018-10-20T11:36:48
|
Python
|
UTF-8
|
Python
| false
| false
| 98
|
py
|
texto = [ str(i) for i in input()]
if(len(texto)<=140):
print("TWEET")
else:
print("MUTE")
|
[
"mariatheresahenriques@gmail.com"
] |
mariatheresahenriques@gmail.com
|
f89e97eba616a9df0d2b2e8c2b95eb9030807730
|
6bc3c6c1c6ac433e467e2cbdb9073d08934d3cbc
|
/1748.py
|
b03013f78e561a9f21916aa552a3bed90b9495e0
|
[] |
no_license
|
akalswl14/baekjoon
|
2fbe0d2b8071c0294e7b6797cf7bf206e981020b
|
ba21b63564b934b9cb8491668086f36a5c32e35b
|
refs/heads/master
| 2022-11-23T06:13:13.597863
| 2022-11-15T14:28:23
| 2022-11-15T14:28:23
| 163,743,355
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
n = int(input())
digit = 0
if n >= 1 and n < 10:
digit += n
else:
digit += 9
if n >= 10 and n < 100:
digit += (n - 9) * 2
else:
digit += 90 * 2
if n >= 100 and n < 1000:
digit += (n - 99) * 3
else:
digit += 900 * 3
if n >= 1000 and n < 10000:
digit += (n - 999) * 4
else:
digit += 9000 * 4
if n >= 10000 and n < 100000:
digit += (n - 9999) * 5
else:
digit += 90000 * 5
if n >= 100000 and n < 1000000:
digit += (n - 99999) * 6
else:
digit += 900000 * 6
if n >= 1000000 and n < 10000000:
digit += (n - 999999) * 7
else:
digit += 9000000 * 7
if n >= 10000000 and n < 100000000:
digit += (n - 9999999) * 8
else:
digit += 90000000 * 8
digit += 9
print(digit)
|
[
"noreply@github.com"
] |
akalswl14.noreply@github.com
|
6250c71081e43997c39ff575c2af2d39f47eefdf
|
ab2a731f1db94fe305f07088b2be256c8f089ce1
|
/lattice_runner.py
|
7d886bffb4620c7c6960ca6cbbdcd9a3d9a9da71
|
[
"MIT"
] |
permissive
|
venkatperi/lattice.gsd
|
bb20540458c50d27515b81fc1696124d4055a0f4
|
4e8d204d216dfe56b8a776ffe6193d8cb051efbb
|
refs/heads/master
| 2020-03-26T06:04:32.818118
| 2018-08-22T13:14:03
| 2018-08-22T13:14:03
| 144,588,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
import time
from threading import Thread
from lattice import Lattice
class LatticeRunner(Thread):
def __init__(self, args):
Thread.__init__(self)
self.args = args
self.lattice = Lattice(size=args.size,
slider=args.slider,
onlyRedBlue=not args.any,
defKillers=args.defKillers,
density=args.density,
numRatio=args.numRatio,
redAdvantage=args.redAdvantage,
blueAdvantage=args.blueAdvantage,
redGrowth=args.redGrowth,
blueGrowth=args.blueGrowth,
deathRate=100000)
self.args = args
self.quit = False
def stop(self):
self.quit = True
def run(self):
for iteration in range(0, self.args.evolutions):
self.lattice.evolve(1)
if self.quit:
print("Aborting")
break
print("Generations: %d" % self.lattice.generation)
|
[
"venkatperi@gmail.com"
] |
venkatperi@gmail.com
|
f95b7fae834b6a362df94fb41a5d49be1ec2e6c8
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/effective_network_security_rule.py
|
10dd7523c4843720136fec77f60c87d946d0d797
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,624
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityRule(Model):
"""Effective network security rules.
:param name: The name of the security rule specified by the user (if
created by the user).
:type name: str
:param protocol: The network protocol this rule applies to. Possible
values are: 'Tcp', 'Udp', and 'All'. Possible values include: 'Tcp',
'Udp', 'All'
:type protocol: str or
~azure.mgmt.network.v2018_11_01.models.EffectiveSecurityRuleProtocol
:param source_port_range: The source port or range.
:type source_port_range: str
:param destination_port_range: The destination port or range.
:type destination_port_range: str
:param source_port_ranges: The source port ranges. Expected values include
a single integer between 0 and 65535, a range using '-' as separator (e.g.
100-400), or an asterisk (*)
:type source_port_ranges: list[str]
:param destination_port_ranges: The destination port ranges. Expected
values include a single integer between 0 and 65535, a range using '-' as
separator (e.g. 100-400), or an asterisk (*)
:type destination_port_ranges: list[str]
:param source_address_prefix: The source address prefix.
:type source_address_prefix: str
:param destination_address_prefix: The destination address prefix.
:type destination_address_prefix: str
:param source_address_prefixes: The source address prefixes. Expected
values include CIDR IP ranges, Default Tags (VirtualNetwork,
AzureLoadBalancer, Internet), System Tags, and the asterisk (*).
:type source_address_prefixes: list[str]
:param destination_address_prefixes: The destination address prefixes.
Expected values include CIDR IP ranges, Default Tags (VirtualNetwork,
AzureLoadBalancer, Internet), System Tags, and the asterisk (*).
:type destination_address_prefixes: list[str]
:param expanded_source_address_prefix: The expanded source address prefix.
:type expanded_source_address_prefix: list[str]
:param expanded_destination_address_prefix: Expanded destination address
prefix.
:type expanded_destination_address_prefix: list[str]
:param access: Whether network traffic is allowed or denied. Possible
values are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or
~azure.mgmt.network.v2018_11_01.models.SecurityRuleAccess
:param priority: The priority of the rule.
:type priority: int
:param direction: The direction of the rule. Possible values are: 'Inbound
and Outbound'. Possible values include: 'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2018_11_01.models.SecurityRuleDirection
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'source_port_range': {'key': 'sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'destinationPortRange', 'type': 'str'},
'source_port_ranges': {'key': 'sourcePortRanges', 'type': '[str]'},
'destination_port_ranges': {'key': 'destinationPortRanges', 'type': '[str]'},
'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'destinationAddressPrefix', 'type': 'str'},
'source_address_prefixes': {'key': 'sourceAddressPrefixes', 'type': '[str]'},
'destination_address_prefixes': {'key': 'destinationAddressPrefixes', 'type': '[str]'},
'expanded_source_address_prefix': {'key': 'expandedSourceAddressPrefix', 'type': '[str]'},
'expanded_destination_address_prefix': {'key': 'expandedDestinationAddressPrefix', 'type': '[str]'},
'access': {'key': 'access', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'direction': {'key': 'direction', 'type': 'str'},
}
def __init__(self, **kwargs):
super(EffectiveNetworkSecurityRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.protocol = kwargs.get('protocol', None)
self.source_port_range = kwargs.get('source_port_range', None)
self.destination_port_range = kwargs.get('destination_port_range', None)
self.source_port_ranges = kwargs.get('source_port_ranges', None)
self.destination_port_ranges = kwargs.get('destination_port_ranges', None)
self.source_address_prefix = kwargs.get('source_address_prefix', None)
self.destination_address_prefix = kwargs.get('destination_address_prefix', None)
self.source_address_prefixes = kwargs.get('source_address_prefixes', None)
self.destination_address_prefixes = kwargs.get('destination_address_prefixes', None)
self.expanded_source_address_prefix = kwargs.get('expanded_source_address_prefix', None)
self.expanded_destination_address_prefix = kwargs.get('expanded_destination_address_prefix', None)
self.access = kwargs.get('access', None)
self.priority = kwargs.get('priority', None)
self.direction = kwargs.get('direction', None)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
82d38c02f2c39cd9fc88157abfd888186c789e03
|
93f87fd34ac129e961f2a54127bae351838a2a0c
|
/train.py
|
04aba41bb0dfc861281eaaa57f7d0d45474ca704
|
[] |
no_license
|
rachmadionl/diabetic-retinopathy-detection
|
34e6fe8bab655fc664f56a03503ad15b01df713a
|
a769c02f865141e731027bf88bf6ce604ce4c019
|
refs/heads/master
| 2020-04-02T17:58:22.898785
| 2018-10-22T17:03:29
| 2018-10-22T17:03:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from utility.model import initialize_model, train_model, get_trainable_params
from utility.dataset import seperate_dataset_to_labels_folder, create_dataloader
parser = argparse.ArgumentParser(description='Dr research preprocessing script')
parser.add_argument('--model', '-m', help='name of the model you want to train')
parser.add_argument('--classes', '-n', type=int, help='number of output class')
parser.add_argument('--feature', '-f', type=bool, default=True, help='if set to true then train only the new initialized layers')
parser.add_argument('--pretrained', '-p', type=bool, default=True, help='if set to true then use torch pretrained model')
parser.add_argument('--dataset', '-d', help='path to dataset folder that contain the train and val folder')
parser.add_argument('--batch', '-b', type=int, default=32, help='batch size')
parser.add_argument('--gpu', '-g', type=int, default=1, help='number of gpu to train')
parser.add_argument('--epoch', '-e', type=int, default=10, help='number of epoch')
parser.add_argument('--output', '-o', default='./', help='path to save the trained weights')
args = parser.parse_args()
if __name__ == '__main__':
if args.model == None:
print('Please specify the model name with -m flag')
elif args.classes == None:
print('Please specify the number of output classes with -n flag')
elif args.dataset == None:
print('Please specify the path to dataset folder with -d flag')
else:
# initialize the model
net = initialize_model(args.model, args.classes, args.fe, args.pretrained)
# send net to gpu
if args.gpu != 0:
device = torch.device('cuda:0' if torch.cuda.is_avalable() else 'cpu')
else:
device = torch.device('cpu')
net = nn.DataParallel(net)
net = net.to(device)
# set the loss function
loss_func = nn.CrossEntropyLoss()
# set the learning algorithm
params_to_update = get_trainable_params(net) # get the trainable parameter
lr_algo = optim.Adam(params_to_update, lr=0.001)
# create the data loader
dataloaders = create_dataloader(args.dataset, args.batch, args.gpu, True)
# train the model
net, val_acc_hist = train_model(net, dataloaders, loss_func, lr_algo, device, args.epoch)
torch.save(net.state_dict(), args.output)
|
[
"abiwinanda@outlook.com"
] |
abiwinanda@outlook.com
|
b4f258a4d409f93aa8b1e563d8ced42eb58af7ce
|
6dc685fdb6f4a556225f13a1d26170ee203e9eb6
|
/blueprints/era_oracle/profiles.py
|
a49bc0003b0120744183cf73fdbf0e8b5b3492e9
|
[
"MIT"
] |
permissive
|
amaniai/calm
|
dffe6227af4c9aa3d95a08b059eac619b2180889
|
fefc8b9f75e098daa4c88c7c4570495ce6be9ee4
|
refs/heads/master
| 2023-08-15T17:52:50.555026
| 2021-10-10T08:33:01
| 2021-10-10T08:33:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,821
|
py
|
from calm.dsl.builtins import Profile, CalmVariable
from deployments import OracleOnEraDeployment
from vars import DB_PASSWORD_VALUE, ERA_PASSWORD_VALUE
class Production(Profile):
deployments = [OracleOnEraDeployment]
SLA_NAME = CalmVariable.WithOptions.Predefined(['NONE', 'DEFAULT_OOB_GOLD_SLA', 'DEFAULT_OOB_SILVER_SLA',
'DEFAULT_OOB_BRONZE_SLA', 'DEFAULT_OOB_BRASS_SLA'],
default='NONE', runtime=True)
NETWORK_PROFILE = CalmVariable.WithOptions.Predefined(['Oracle'], default='Oracle', runtime=True)
COMPUTE_PROFILE = CalmVariable.WithOptions.Predefined(['DEFAULT_OOB_COMPUTE', 'LOW_OOB_COMPUTE'],
default='LOW_OOB_COMPUTE', runtime=True)
DBSERVER_NAME = CalmVariable.Simple('DB1', label='DB Server Name', is_mandatory=True, runtime=True)
SID_NAME = CalmVariable.Simple('DB1', label='SID Name', is_mandatory=True, runtime=True)
DB_NAME = CalmVariable.Simple('app', label='DB Name', is_mandatory=True, runtime=True)
DB_PASSWORD = CalmVariable.Simple.Secret(DB_PASSWORD_VALUE, label='SYS/SYSTEM Password', is_mandatory=True, runtime=True)
# hidden parameters
DATABASE_PARAMETER = CalmVariable.Simple('LowProfile', is_hidden=True, runtime=False)
SOFTWARE_PROFILE = CalmVariable.Simple('Oracle', is_hidden=True, runtime=False)
ERA_IP = CalmVariable.Simple('10.42.32.40', label='ERA IP', is_mandatory=True, runtime=True, is_hidden=True)
DB_ID = CalmVariable.Simple('', is_hidden=True, runtime=False)
DBSERVER_ID = CalmVariable.Simple('', label='DB Server UUID', runtime=False, is_hidden=True)
DBSERVER_IP = CalmVariable.Simple('', label='DB Server IP Address', runtime=False, is_hidden=True)
|
[
"husain@alsayed.ws"
] |
husain@alsayed.ws
|
c94b9232f3c8ca6a0e3d7ed655454bc665e88dd0
|
b8a4a40e74f6a3eb10d4085738bb45337a9ffc14
|
/Django/Django-Intro/helloworld/helloworld/urls.py
|
02c86819dd6a80b8150eb583bfb1062aa697b02b
|
[] |
no_license
|
yassarq/Python
|
9d74ca8e0bfba22deaad5dd929bd42aedd2daa7f
|
873d37a78a64e5d114b6a6636e918dbfe4649982
|
refs/heads/master
| 2022-12-21T05:14:16.403127
| 2018-08-15T12:54:44
| 2018-08-15T12:54:44
| 144,732,748
| 0
| 1
| null | 2022-12-15T20:47:05
| 2018-08-14T14:38:19
|
Python
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
"""helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
print('I am in urls.py')
urlpatterns = [
url(r'^$', include('apps.myapp.urls')),
url(r'^first_app/', include('apps.first_app.urls')),
url(r'^admin/', admin.site.urls),
]
|
[
"yassarq8@gmail.com"
] |
yassarq8@gmail.com
|
37a422676d4a244b72506e1607492a5259ca2403
|
d49719f8f3f652bdcd1c73ec2a100e55819408ec
|
/preprocess.py
|
a4903492b017eccc33f2c76de0ca794c7626dfbc
|
[] |
no_license
|
OB-0ne/basicRNN-midi-governingBodies
|
f2fed18ab7c2d0bce084c5dbb5f3fed6811680da
|
e3a9477fac374159581b1e04762a414de9532078
|
refs/heads/main
| 2023-02-16T01:39:00.991000
| 2021-01-10T03:38:11
| 2021-01-10T03:38:11
| 327,746,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,101
|
py
|
from DataManager import DataManager as midiDM
import numpy as np
# example to convert numpy to MIDI
# DM.npFile2MIDI('midi_e7800.npy','MIDI out/track1',AutoTimed=True,AutoTime=180)
# DM.npFile2MIDI('midi_e5150.npy','MIDI out/track2',AutoTimed=True,AutoTime=140)
# example to convert MIDI to numpy
song1 = midiDM('data/midi_heavyRain/01.mid')
song2 = midiDM('data/midi_heavyRain/02.mid')
song3 = midiDM('data/midi_heavyRain/03.mid')
improv = np.load('data/midi_heavyRain/nilou_improv.npy')
# song1.save_np('data/midi_heavyRain_processed/01')
# song2.save_np('data/midi_heavyRain_processed/02')
# song3.save_np('data/midi_heavyRain_processed/03')
songs = [song1, song2, song3]
x = []
for song in songs:
x.extend(list(song.mid_npy))
x.extend([[0,0,0,0]]*10)
x.extend(list(improv))
x = np.array(x)
np.save('data/heavyRain_improv',np.array(x))
# DM.MIDIFile2np('data/midi_heavyRain/02.mid','data/midi_heavyRain_processed/02.mid')
# DM.MIDIFile2np('data/midi_heavyRain/03.mid','data/midi_heavyRain_processed/03.mid')
# [OB][NOTE]:
# Autotime can also modify the rhythm, try time signatures
|
[
"omkarbhatt8@gmail.com"
] |
omkarbhatt8@gmail.com
|
b941e3778cce7c032e8bbb9c7e53f2d3359b1853
|
39cdc11a378e2e8a0e9ab68157cd780efa820e21
|
/news_scrape.py
|
857c82c81d15997550979486b42e659999b0d9e3
|
[] |
no_license
|
wcstrickland/news_api
|
29f1072ba5fde5d0040b09b1855ff60921488f6c
|
83cf8998909e2e13381c790096c0b380e5c8d5a0
|
refs/heads/main
| 2023-02-12T14:33:19.998087
| 2021-01-13T16:33:00
| 2021-01-13T16:33:00
| 309,218,891
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,983
|
py
|
from bs4 import BeautifulSoup
import sqlite3
import requests
from datetime import datetime, timedelta
# noinspection PyUnboundLocalVariable
def insert_values(print_time_, topic_, headline_, summary_, url_):
"""
a semi hard coded function to insert values into a db
:param print_time_:
:param topic_:
:param headline_:
:param summary_:
:param url_:
:return:
"""
try:
sqlite_connection = sqlite3.connect('fox_pol.db')
cursor = sqlite_connection.cursor()
sqlite_insert_with_param = """INSERT INTO articles (print_date, topic,
headline, summary, url) VALUES (date(?), ?, ?, ?, ?)"""
data_tuple = (print_time_, topic_, headline_, summary_, url_)
cursor.execute(sqlite_insert_with_param, data_tuple)
sqlite_connection.commit()
print("Python Variables inserted successfully into sqlite table")
cursor.close()
except sqlite3.Error as sl_error:
print("Failed to insert Python variable into sqlite table", sl_error)
finally:
if sqlite_connection:
sqlite_connection.close()
# opens a db connection creates a table and closes connection
db = sqlite3.connect('fox_pol.db')
db.execute("CREATE TABLE IF NOT EXISTS articles (print_date DATE, "
"topic VARCHAR(100), "
"headline VARCHAR(100) UNIQUE, summary VARCHAR(100), url VARCHAR(100))")
db.close()
# requests html and creates soup object
source = requests.get('https://www.foxnews.com/politics', timeout=20).text
soup = BeautifulSoup(source, 'lxml')
# finds all article lists
article_lists = soup.find_all("div", {"class": "content article-list"})
# searches for articles in article lists
for div_tag in article_lists:
try:
article_tags = div_tag.find_all("article")
for tag in article_tags:
# ######## selectors bound to variables ########
time_posted_raw = tag.find('span', class_='time').text
if "mins" in time_posted_raw:
min_time = int(time_posted_raw[0:2].zfill(2))
U_time = (datetime.utcnow() - timedelta(minutes=min_time)).date()
elif "just" in time_posted_raw:
U_time = datetime.utcnow().date()
else:
hr_time = int(time_posted_raw[0:2].zfill(2))
U_time = (datetime.utcnow() - timedelta(hours=hr_time)).date()
topic = tag.find('span', class_='eyebrow').a.text
headline = tag.find('h4', class_='title').a.text
headline2 = tag.find('h4', class_='title').a
url = "https://www.foxnews.com" + headline2['href']
summary = tag.find('p', class_='dek').a.text
# ########## variables inserted into db via function ##########
insert_values(U_time, topic, headline, summary, url)
except AttributeError as error:
print("End of articles")
|
[
"noreply@github.com"
] |
wcstrickland.noreply@github.com
|
c75f1ef7743e5bb95c90b5855fb06779d536b8e7
|
097aba75cf2454977ab0481b9e39edc183effc22
|
/.svn/pristine/2a/2a1731c6088f103c91ca1143ae7e62358c3062f4.svn-base
|
ff9a34eacf11e49bb0956de2b315cf208ed09503
|
[] |
no_license
|
rvoorheis/PrintLabel
|
0b8d4228ed10e154b0fea3c6dbbfb0cfe9282456
|
0bf587de54bbc31273b432836c1b01cbb4cf08b0
|
refs/heads/master
| 2020-04-10T18:15:45.926145
| 2019-05-01T17:19:17
| 2019-05-01T17:19:17
| 161,198,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
__author__ = 'rvoorheis'
import os
class TempFile:
tempfilename = ""
def __init__(self):
self.tempfilename = "temp.job"
def writetempfile(self, rc, portname, labelfilename):
try:
f = open(self.tempfilename, mode="w")
f.write('LABEL "' + labelfilename + '", "' + rc.Printer + '"\n')
f.write('PRINTER "' + rc.Printer + '"\n')
f.write('PORT "' + portname + '"\n')
f.write('PRINT 1\n')
f.write('QUIT\n')
f.close()
return self.tempfilename
except Exception as e:
print ("Error Creating TempFile " + self.tempfilename)
print str(e)
quit(-4)
|
[
"rvoorheis@zebra.com"
] |
rvoorheis@zebra.com
|
|
7219eb06c8474ca221029a35808310a692c832b3
|
a9679a1fa26993fb1b83ab96b2fbd9477f2a087d
|
/musics/serializers.py
|
ccf670927a1b6f18a058ce3cb7ef50c230ba772b
|
[] |
no_license
|
toastding/restframework
|
def60fb711d9146a613ca5b913a72f86e8799d48
|
fd27e95807bf9166c263975e99e64b67bc734fc4
|
refs/heads/master
| 2022-11-16T10:46:49.756920
| 2020-07-15T12:15:54
| 2020-07-15T12:15:54
| 279,859,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
from rest_framework import serializers
from musics.models import Music
from django.utils.timezone import now
class ToUpperCaseCharField(serializers.CharField):
def to_representation(self, obj):
return value.upper()
class MusicSerializer(serializers.ModelSerializer):
days_since_created = serializers.SerializerMethodField()
class Meta:
model = Music
# fields = '__all__'
fields = ('id', 'song', 'singer', 'last_modify_date', 'created', 'days_since_created')
def get_days_since_created(self, obj):
return (now() - obj.created).days
|
[
"ding02211995@gmail.com"
] |
ding02211995@gmail.com
|
80c7996609985e6f8a0ac4b2e4e4919e6ea9b0af
|
ec7a35ee0c1328b7c6dc42f8a49003a2545933a6
|
/school_management/urls.py
|
202b7754ed887e2bd88178e62ed0cc2374b8b5e7
|
[] |
no_license
|
kabir2350/school-routine-mngmnt-system
|
e15d8cfe0cc1f0c4e9f8114f6a826008b836f20e
|
c2f64056b1ce64ecaa411d3d3848c53be350e334
|
refs/heads/master
| 2022-08-01T08:53:26.640712
| 2020-05-16T14:17:24
| 2020-05-16T14:17:24
| 262,381,694
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('courses/', include('courses.urls')),
path('', include('pages.urls')),
path('search/', include('search.urls')),
path('classes/', include('classes.urls')),
path('batches/', include('batches.urls')),
]
|
[
"intisar2350@gmail.com"
] |
intisar2350@gmail.com
|
9202bbc7dd9390c64d6fd1a375263d510233e64f
|
ddf6419a7ab4132218022410ff4dff1fe444e850
|
/infrastructure.py
|
4dc3bc16cf8c418a4299c0955c5bff4197c1a151
|
[] |
no_license
|
maxdml/kanopya-ci
|
483f0d839a6b1e3a687ee92a9333448b3f51687e
|
b04c7a7727bda544cb7d7ef92df3da6600b6944e
|
refs/heads/master
| 2021-01-20T01:35:33.099318
| 2013-12-16T13:40:05
| 2013-12-16T13:40:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,168
|
py
|
from vagrant import VagrantEnvironment
from vboxmanage import VBoxManage
from shell import LocalShell
from hpprocurve import ProcurveSSHClient
from deploymentsolver import DeploymentSolver
import utils
from string import Template
import subprocess
import os
import logging
import shutil
import random
import json
import time
logger = logging.getLogger(__name__)
def new_infrastructure(infra_type):
""" factory to instanciate proper JobInfrastructure """
infrastructure = None
if infra_type == 'physical':
infrastructure = PhysicalInfrastructure()
elif infra_type == 'virtual':
infrastructure = VirtualInfrastructure()
else:
infrastructure = NoInfrastructure()
return infrastructure
class JobInfrastructure(object):
""" base class """
def __init__(self):
self.job = os.environ.get('JOB_NAME')
box = os.environ.get('BOX', 'debian-wheezy-amd64')
user = "hedera"
self.vagrant_data = {
'box': box,
'vmname': self.job,
'memory': 4096,
'cpus': 1,
'network': ':hostonly, "10.0.0.2"',
'ssh_port': 2222,
'vrde_port': 3333,
'user': user,
'mac1': self._get_random_mac().replace(':', ''),
'mac2': self._get_random_mac().replace(':', '')
}
self.workspace = '/var/lib/jenkins/jobs/{0}/workspace'.format(self.job)
self.vagrantenv = VagrantEnvironment(self.workspace)
def initialize(self, db):
# create job data entry in db if necessary
if not self.job in db.jobs.keys():
db.jobs[self.job] = {'net': None,
'infra': None,
'ssh_port': None,
'vrde_port': None,
'vlan': None}
# create result directory
result_dir = os.path.join(self.workspace, 'result')
shutil.rmtree(result_dir, ignore_errors=True)
os.makedirs(result_dir)
# Pass the environment variables inside the Vagrant VM
env_file = os.path.join(self.vagrantenv.vagrant_dir, 'environment.sh')
env_vars = ['MASTERIMAGE', 'GITBRANCH', 'KERNEL', 'WEBUI', 'TEST',
'KEEPALIVE', 'JOB_NAME', 'STOP_SERVICES', 'API_TEST_DIR']
with open(env_file, 'w') as export:
for var in env_vars:
value = os.environ.get(var, '')
line = 'export {0}="{1}"\n'.format(var, value)
export.write(line)
# copy some scripts...
shutil.copy("setup_and_run", self.vagrantenv.vagrant_dir)
shutil.copy("touch_run_and_unlock", self.vagrantenv.vagrant_dir)
def update(self, db):
""" retrieve information for kanopya vagrant box """
self.vagrantenv.update()
def get_network_conf(self, db):
""" retrieve the net config for the current job """
network, ip = None, None
if db.jobs[self.job]['net'] is None:
network, ip = db.new_network()
logger.debug('new network/ip : %s,%s', network, ip)
db.jobs[self.job]['net'] = (network, ip)
else:
network, ip = db.jobs[self.job]['net']
logger.debug('reusing network/ip %s/%s', network, ip)
return network, ip
def get_ssh_port(self, db):
""" retrieve the ssh port forwarding for the current job """
port = None
if db.jobs[self.job]['ssh_port'] is None:
port = db.new_ssh_port()
logger.debug('new ssh port : %s', port)
db.jobs[self.job]['ssh_port'] = port
else:
port = db.jobs[self.job]['ssh_port']
logger.debug('reusing ssh port %s', port)
return port
def get_vrde_port(self, db):
""" retrieve the vrde port forwarding for the current job """
port = None
if db.jobs[self.job]['vrde_port'] is None:
port = db.new_vrde_port()
logger.debug('new vrde port : %s', port)
db.jobs[self.job]['vrde_port'] = port
else:
port = db.jobs[self.job]['vrde_port']
logger.debug('reusing vrde port %s', port)
return port
def kanopya_setup_inputs(self, net, ip):
"""
Generate the file that contains the inputs for the Kanopya setup
"""
tmpl = Template(open('setup.inputs.tmpl').read())
inputs = os.path.join(self.vagrantenv.vagrant_dir, 'setup.inputs')
with open(inputs, 'w') as f:
f.write(tmpl.substitute({'network': net,
'ip': ip,
'interface': "eth1"}))
def kanopya_register_hosts(self, hosts):
"""
Generate the file that contains the hosts list for the
register_hosts.pl script
"""
shutil.copy("register_hosts.pl", self.vagrantenv.vagrant_dir)
hostsfile = os.path.join(self.vagrantenv.vagrant_dir, 'hosts.json')
with open(hostsfile, 'w') as f:
json.dump(hosts, f)
def clean(self):
logger.debug("clean infra")
self.vagrantenv.clean()
def __repr__(self):
value = str(self.vagrantenv)
return value
def _get_random_mac(self):
""" generate a random virtualbox mac address """
choice = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F')
mac = "08:00:27"
for i in xrange(3):
mac += ":{0}{1}".format(random.choice(choice),
random.choice(choice))
return mac
class NoInfrastructure(JobInfrastructure):
def __init__(self):
JobInfrastructure.__init__(self)
def initialize(self, db):
"""
NoInfrastructure initialization only need to init the vagrant
environment with a new hostonly virtualnet
"""
JobInfrastructure.initialize(self, db)
network, ip = self.get_network_conf(db)
self.vagrant_data['network'] = ':hostonly, "{0}"'.format(ip)
self.vagrant_data['ssh_port'] = self.get_ssh_port(db)
self.vagrant_data['vrde_port'] = self.get_vrde_port(db)
self.vagrantenv.initialize(self.vagrant_data)
self.kanopya_setup_inputs(network, ip)
def clean(self, db):
JobInfrastructure.clean(self)
class VirtualInfrastructure(JobInfrastructure):
def __init__(self):
JobInfrastructure.__init__(self)
self.nbvms = int(os.environ.get('NBVMS'))
self.vms = []
self.vbox = VBoxManage(LocalShell())
def initialize(self, db):
JobInfrastructure.initialize(self, db)
shutil.copy("etherwake.py", self.vagrantenv.vagrant_dir)
network, ip = self.get_network_conf(db)
sshport = self.get_ssh_port(db)
vrdeport = self.get_vrde_port(db)
self.vagrant_data['network'] = ':hostonly, "{0}"'.format(ip)
self.vagrant_data['ssh_port'] = sshport
self.vagrant_data['vrde_port'] = vrdeport
self.vagrantenv.initialize(self.vagrant_data)
self._create_vms()
self.kanopya_setup_inputs(network, ip)
self.kanopya_register_hosts(self.vms)
def update(self, db):
JobInfrastructure.update(self, db)
# we set the correct hostonly iface for the precreated vms
vboxiface = self.vbox.get_hostonly_iface_name(self.vagrantenv.vm_id)
logger.debug("hostonly interface is %s", vboxiface)
for vm in self.vms:
name = vm['serial_number']
for i, iface in enumerate(vm['ifaces']):
logger.debug("update hostonlyadapter for iface %s on vm %s",
iface['name'], name)
self.vbox.set_hostonlyadapter(name, i+1, vboxiface)
def clean(self, db):
JobInfrastructure.clean(self)
self._destroy_vms()
def _create_vms(self):
""" create virtualbox vms for the infrastructure """
for i in xrange(self.nbvms):
new_vm_name = "{0}_{1}".format(self.job, i)
ifaces = []
for i in xrange(4):
ifaces.append({'name': "eth{0}".format(i),
'mac': self._get_random_mac(),
'pxe': 0,
'adapter_type': 'hostonlyadapter',
'adapter_iface': 'eth0'})
ifaces[0]['pxe'] = 1
logger.info("create virtualbox vm {0}".format(new_vm_name))
self.vbox.clone_vm('kanopyahost', new_vm_name, ifaces, cpus=4,
memory=4096)
self.vms.append({'serial_number': new_vm_name,
'core': 4,
'ram': 4294967296,
'ifaces': ifaces,
'harddisks': [{'device': '/dev/sda',
'size': '21474836480'}]})
def _destroy_vms(self):
""" delete virtualbox vms created for the infrastructure """
for vm in self.vms:
name = vm['serial_number']
logger.info("destroy virtualbox vm %s", name)
if name in self.vbox.list_runningvms(filter=name):
self.vbox.poweroff_vm(name)
time.sleep(3)
self.vbox.delete_vm(name)
def __repr__(self):
value = JobInfrastructure.__repr__(self)
value += "vms count: {0}".format(self.nbvms)
return value
class PhysicalInfrastructure(JobInfrastructure):
BRIDGE = 'eth1'
SWITCH_PORT = 2
def __init__(self):
JobInfrastructure.__init__(self)
self.booked_hosts = None
def initialize(self, db):
JobInfrastructure.initialize(self, db)
network, ip = self.get_network_conf(db)
sshport = self.get_ssh_port(db)
vrdeport = self.get_vrde_port(db)
vlan = self.get_vlan_conf(db)
bridge = "{0}.{1}".format(self.BRIDGE, vlan)
# determine physical hosts needed and book them
self.booked_hosts = self._book_hosts(db)
# apply vlan configuration
# on the switch...
switch = ProcurveSSHClient('procurve-switch.intranet.hederatech.com',
22, 'manager', 'manager')
for host in self.booked_hosts:
for iface in host['ifaces']:
if 'switch_port' not in iface.keys():
continue
port = iface['switch_port']
logger.debug("set switch port %s on vlan %s untagged",
port, vlan)
switch.set_untagged_port(port, vlan)
# on jenkins interface
utils.create_vlan_device(self.BRIDGE, str(vlan))
logger.debug("create vlan device on %s with vlan %s",
self.BRIDGE, vlan)
# set vagrant data
self.vagrant_data['network'] = ':bridged, ' + \
':bridge => "{0}", '.format(bridge) + \
':auto_config => false'
self.vagrant_data['ssh_port'] = sshport
self.vagrant_data['vrde_port'] = vrdeport
self.vagrantenv.initialize(self.vagrant_data)
self.kanopya_register_hosts(self.booked_hosts)
self.kanopya_setup_inputs(network, ip)
def update(self, db):
JobInfrastructure.update(self, db)
# as vagrant do not configure bridge interface, you do it here by hand
network, ip = self.get_network_conf(db)
logger.debug("apply ip configuration on the vm bridged interface eth1")
command = "sudo ip addr add {0}/24 dev eth1 ".format(ip) + \
"&& sudo ip link set eth1 up"
self.vagrantenv.command(command)
def clean(self, db):
JobInfrastructure.clean(self)
# remove vlan on the switch (move ports to vlan 1) and unbook hosts
switch = ProcurveSSHClient('procurve-switch.intranet.hederatech.com',
22, 'manager', 'manager')
vlan = 1
for host in self.booked_hosts:
for iface in host['ifaces']:
if 'switch_port' not in iface.keys():
continue
port = iface['switch_port']
logger.debug("set switch port %s on vlan %s untagged",
port, vlan)
switch.set_untagged_port(port, vlan)
host['job'] = None
self.booked_hosts = None
# remove vlan device
vlan_device = "{0}.{1}".format(self.BRIDGE, self.get_vlan_conf(db))
utils.remove_vlan_device(vlan_device)
logger.debug("remove vlan device %s", vlan_device)
def get_vlan_conf(self, db):
""" retrieve the vlan used by the current job """
vlan = None
if db.jobs[self.job]['vlan'] is None:
vlan = db.new_vlan()
logger.debug('new vlan : %s', vlan)
db.jobs[self.job]['vlan'] = vlan
else:
vlan = db.jobs[self.job]['vlan']
logger.debug('reusing vlan %s', vlan)
return vlan
def _book_hosts(self, db):
""" use deployment solver to determine required hosts """
lines = [line for line in os.environ.get('HOSTS').split('\n')
if len(line)]
free_hosts = db.get_available_hosts()
dsolver = DeploymentSolver(self.workspace)
booked_hosts = []
for constraints in lines:
dsolver.generate_hosts_file(free_hosts)
dsolver.generate_host_constraint(constraints)
index = dsolver.select_host()
if index != -1:
booked_hosts.append(free_hosts.pop(index))
else:
msg = "deployment_solver was enable to find a host " + \
"matching the constraints {0}".format(constraints)
raise RuntimeError()
for host in booked_hosts:
logger.info("book %s", host['serial_number'])
host['job'] = self.job
return booked_hosts
|
[
"sylvain.baubeau@hederatech.com"
] |
sylvain.baubeau@hederatech.com
|
ec7aca71bb9e1ef513133c86ac9318fa448dbedf
|
452a069d328ee36b27a5587c04f36a8b2b16682f
|
/basic-algorithms/problems-vs-algorithms/project/problem_1.py
|
07238f7f1fc7c8b3d9cb5d5b53c7d75e1bcd9e27
|
[] |
no_license
|
annahra/dsa-nanodegree
|
f0b437817d5b43cb51f60eb95a8152fd7a83c6dc
|
462950b1148e6dc984a086a3f1fb762ea9ed5271
|
refs/heads/main
| 2023-04-17T03:31:44.336392
| 2021-05-01T03:07:30
| 2021-05-01T03:07:30
| 333,619,594
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
"""
Problem 1: Finding the Square Root of an Integer
Version Date: April 7, 2021
Will only evaluate positive integers. If a negative integer is inputed,
the algorithm will return None.
"""
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
if number < 2:
return number
lower_bound = 0
upper_bound = number
while lower_bound <= upper_bound:
mid = (lower_bound + upper_bound) //2
if mid * mid <= number < (mid + 1)*(mid + 1):
return mid
elif number < mid * mid:
upper_bound = mid
else:
lower_bound = mid
def main():
# Test Case 1 - Perfect Squares
print("Test Case 1 - Perfect Squares")
print("The square root of 9 is", sqrt(9), "(expect 3)")
print("The square root of 0 is", sqrt(0), "(expect 0)")
print("The square root of 1 is", sqrt(1), "(expect 1)")
print("The square root of 16 is", sqrt(16), "(expect 4)")
print('End of Test Case 1\n')
# Test Case 2 - Non-squareable numbers
print("Test Case 2 - Non-squareable numbers")
print("The floored square root of 27 is", sqrt(27), "(expect 5)")
print("The floored square root of 15 is", sqrt(15), "(expect 3)")
print("The floored square root of 8 is", sqrt(8), "(expect 2)")
print('End of Test Case 2\n')
# Test Case 3 - Negative Numbers
print("Test Case 3 - Negative Numbers")
print("The floored square root of -1 is", sqrt(-1), "(expect None)")
print("The floored square root of -16 is", sqrt(-16), "(expect None)")
print('End of Test Case 3\n')
# Test Case 4 - Large Numbers
print("Test Case 4 - Large Numbers")
print("The floored square root of -1 is", sqrt(99960004), "(expect 9998)")
print('End of Test Case 4\n')
if __name__ == '__main__':
main()
|
[
"annah.ramones@gmail.com"
] |
annah.ramones@gmail.com
|
b3ed9c46c6aacf92d1ce00282e746fe372cad4e6
|
75997041750c215d6e78bc4a5e33a645c7b0d47a
|
/accounts/migrations/0001_initial.py
|
777697cc115c2f493d5324f9ba816e1acef59f54
|
[] |
no_license
|
jjnanthakumar/django_tenant_react
|
79112b08c27c77356ba24327eb129281a095b0bd
|
f3ddd2370abe2d92c5b8b25ef73a127bbdb5453b
|
refs/heads/main
| 2023-04-20T09:34:10.872043
| 2021-05-09T15:21:14
| 2021-05-09T15:21:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,843
|
py
|
# Generated by Django 2.2.16 on 2021-01-19 08:38
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('name', models.CharField(max_length=50)),
('username', models.CharField(max_length=25, unique=True, validators=[django.core.validators.RegexValidator(message='invalid username formate', regex='^[a-z0-9+]{2,25}$')])),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"majidsideahmed@gmail.com"
] |
majidsideahmed@gmail.com
|
ea8ad0e9f46cef45bace88cd3d2a2558be6733cd
|
4f20386eaa153326d70dbd90634f114a6fa8bbda
|
/tweetsWInstructions.py
|
27e1e9404b8d704ccb12772fc3d44051f8529bbc
|
[] |
no_license
|
PopGenHamburg/DaphniaStressordb
|
a1db69bf3ae323ea50ef0509601553087af02469
|
f5ce4212ec583924ba8c3aae07fb2800c3732ee1
|
refs/heads/master
| 2020-04-01T07:41:41.423077
| 2018-10-30T14:23:01
| 2018-10-30T14:23:01
| 152,999,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,666
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import sys
sys.path.append ('/usr/lib/python2.7/dist-packages')
import tweepy #https://github.com/tweepy/tweepy
import csv
#Twitter API credentials
#to get the authorization credentials to access Twitter API, follow these steps
#Go to https://apps.twitter.com/ (Twitter Application Management) and log in, with your Twitter account
#Click “create New app” button
#Supply the necessary required fields, read and agree to the Twitter Developer Agreement
#Submit the form
#Your keys and access tokens are under the "keys and access tokens" tab.
consumer_key = "Your consumer key goes here"
consumer_secret = "Your consumer secret goes here"
access_key = "Your access key goes here"
access_secret = "Your access secret goes here"
def get_all_tweets(screen_name):
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.user_timeline(screen_name = screen_name,count=200)
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
print "getting tweets before %s" % (oldest)
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screen_name,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print "...%s tweets downloaded so far" % (len(alltweets))
#transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.text.encode("utf-8"),tweet.retweet_count,tweet.favorite_count] for tweet in alltweets]
#write the csv
with open('%s_tweets.csv' % screen_name, 'wb') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","text","retweet_count","favorite_count"])
writer.writerows(outtweets)
pass
if __name__ == '__main__':
#pass in the username of the account you want to download
get_all_tweets("wtrflea_papers")
|
[
"mathilde.cordellier@uni-hamburg.de"
] |
mathilde.cordellier@uni-hamburg.de
|
5ea798940494c0350c830a8ff9c3708b4261fdf3
|
4d2952580873bb2c92e0e75837f5589a2b41f77d
|
/part-02/ch-06-sorting/06-05.py
|
a1531f27fc905e40d7b538b046f29aaee7215384
|
[] |
no_license
|
junxdev/this-is-coding-test-with-python-by-ndb
|
0566cadf70c0b9d63669cc5f87585e3b3253aef1
|
ad9c32642f82fb04cc28ff1a9fa8843872328ce7
|
refs/heads/master
| 2023-01-11T21:52:58.160613
| 2020-11-16T12:32:17
| 2020-11-16T12:32:17
| 307,381,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
import time
start_time = time.time()
array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
def quick_sort(array):
# 리스트의 크기가 1 이하면 종료
if len(array) <= 1:
return array
pivot = array[0] # 기준
tail = array[1:] # 기준을 제외한 리스트
left_side = [x for x in tail if x <= pivot]
right_side = [x for x in tail if x > pivot]
# 분할 이후 왼쪽과 오른쪽에서 각각 정렬 및 전체 리스트 반환
return quick_sort(left_side) + [pivot] + quick_sort(right_side)
print(quick_sort(array))
end_time = time.time()
print(end_time - start_time)
|
[
"junxdev@gmail.com"
] |
junxdev@gmail.com
|
1b86081eae191b4f3ff11c3913e2cf444725178d
|
9ec0b4634f354db7058fc1a3124ddb493f12f402
|
/Services/migrations/0003_delete_user.py
|
dc16269d3bddef86c8e6a86e6f77b59cb4879b95
|
[] |
no_license
|
ieSpring98/Joboonja-Services
|
7475a250275d9f9713f1c92e0e7cc15f7e82af27
|
d427a0b775f0ab8b30b5ccd1c7492c6f62ad9c74
|
refs/heads/master
| 2020-04-21T16:40:45.564247
| 2019-04-15T19:59:35
| 2019-04-15T19:59:35
| 169,710,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 290
|
py
|
# Generated by Django 2.1.5 on 2019-02-08 08:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Services', '0002_project_skills'),
]
operations = [
migrations.DeleteModel(
name='User',
),
]
|
[
"amir.karimi6610@gmail.com"
] |
amir.karimi6610@gmail.com
|
51d721d0d20e4647d42a0ff7aa530f4a4615848c
|
f949422eebc2fc477c886c5dc9c1ea11ed0c680e
|
/move/link_micrographs.py
|
8de7d6f7a6667532029e3ee9ee2f4c5897244cb4
|
[] |
no_license
|
ganjf/em_scripts
|
1d78693dad22317f416910a2717888efde1b8856
|
bf48a868f6a7c218110e2a925bd5afc3c6bb7f15
|
refs/heads/master
| 2023-08-28T19:44:26.541234
| 2021-10-03T04:17:49
| 2021-10-03T04:17:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,738
|
py
|
#!/home/jhdavis/anaconda3/bin/python
__author__ = "Joey Davis, www.jhdavislab.org"
__version__ = "1.1"
import os
import sys
import random
import argparse
def add_args(parser):
parser.add_argument('root_dir', help='The root path for your collection data (should contain GridSquare folders and typically is Images-Disc1')
parser.add_argument('output_path', type=str, help='path to the directory to write all of the symlinks. This directory must already exist.')
parser.add_argument('extension', type=str, help='extension of the filename to link - typically _fractions.tiff or _fractions.mrc')
parser.add_argument('fraction', type=float, help='fraction of the movies to link - typically 1.0 for all or 0.1 for 10%%.')
parser.add_argument('--execute', default=False, action='store_true', help='peform the job instead of simply testing')
parser.add_argument('--unstructured', default=False, action='store_true', help='will not look for the "Data" folder and will instead link all files it finds in the root or lower that have the proper extension.')
return parser
def main(args):
rootdir = args.root_dir
extension = args.extension
fraction = args.fraction
outdir = args.output_path
if outdir[-1] != '/':
outdir+='/'
if rootdir[-1] != '/':
roodir+='/'
num_total = 0
num_selected = 0
for root, subdirs, files in os.walk(rootdir):
if 'GridSquare' in root.split('/')[-1]:
print('Inspecting gridsquare: ' + root.split('/')[-2])
if 'Data' in root.split('/')[-2] or args.unstructured:
data_images = [selected_file for selected_file in files if selected_file[-len(extension):]==extension]
print('Found ' + str(len(data_images)) + ' data images.')
num = int(len(data_images)*fraction)
print('Selecting ' + str(num) + ' data images.')
selected_images = random.sample(data_images, num)
print('Creating ' + str(len(selected_images)) + ' symbolic links...')
for f in selected_images:
if args.execute:
os.symlink(root+f, outdir+f)
else:
print('*test** - with the --execute flag, would create smylink: ' + root+f + '-->' + outdir+f)
num_total+=len(data_images)
num_selected+=num
print('\nFound '+ str(num_total) + ' data images. Linked ' + str(num_selected) + '.')
if __name__ =='__main__':
argparser = argparse.ArgumentParser(
description='Create symlinks to a subset of files within a nested collection directory. Typically used to pull a subset of movies for initial test processings.')
add_args(argparser)
main(argparser.parse_args())
|
[
"jhdavis@mit.edu"
] |
jhdavis@mit.edu
|
81e0350b3c415c32cbd3221da8ceb5e7026cc5cb
|
971787d3f6dab8944fb1d757f6f49cffbbfeccec
|
/Day-2/HTTP_Application.py
|
5f5818418f26b177597420bed7df0816efeb003b
|
[] |
no_license
|
Mckale/Bootcamp-14
|
cc2079c0eb14a985d9ec3ccf003ffed615d83b74
|
a140410c696d1dd333d89f59f72c61f7bda3bf0f
|
refs/heads/master
| 2021-01-11T19:30:54.992018
| 2017-01-19T20:53:01
| 2017-01-19T20:53:01
| 79,161,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
import http.client
Retrieve = http.client.HTTPConnection("www.github.com")
Retrieve.request("GET", "/index.html")
r1 = Retrieve.getresponse()
print (r1.status, r1.reason)
data1 = r1.read()
Retrieve.request("GET", "/parrot.spam")
r2 = Retrieve.getresponse()
print(r2.status, r2.reason)
data2 = r2.read()
Retrieve.close()
|
[
"mckale@github.com"
] |
mckale@github.com
|
2da1c188042cfeff0d58754bdc51c1bf9bd23115
|
887e3ffe52ab30ad1af49ca6e4389304e74788f3
|
/examples/relation.py
|
3d16859b274f5f335e53ee8490aeca18a870e2fe
|
[] |
no_license
|
wacabanga/pdt
|
d8235597b11decd13a0eab3d73bca01b5791f5d1
|
6727b7cb42808e87bb27a512b2d94d67c4d38774
|
refs/heads/master
| 2021-01-13T08:25:09.282855
| 2017-05-23T20:06:30
| 2017-05-23T20:06:30
| 71,860,174
| 0
| 0
| null | 2016-10-25T04:42:31
| 2016-10-25T04:42:30
| null |
UTF-8
|
Python
| false
| false
| 5,140
|
py
|
import theano
from adt import *
from mnist import *
from ig.util import *
from train import *
from common import *
# theano.config.optimizer = 'Non e'
theano.config.optimizer = 'fast_compile'
def relation_adt(train_data, options, relation_shape=(1, 28, 28), push_args={},
pop_args={}, item_shape=(1, 28, 28), batch_size=512, nitems=3):
"""A relation represents a set of statements R(B,L)"""
# Types
Relation = Type(relation_shape)
Item = Type(item_shape)
# Interface
union = Interface([Relation, Item], [Relation], 'push', **push_args)
difference = Interface([Relation], [Relation, Item], 'pop', **pop_args)
subrelation = Interface([Relation, Relation], [Boolean], 'pop', **pop_args)
interfaces = [push, pop]
# train_outs
train_outs = []
gen_to_inputs = identity
# Consts
empty_relation = Const(Relation)
consts = [empty_relation]
# Vars
# relation1 = ForAllVar(Relation)
items = [ForAllVar(Item) for i in range(nitems)]
forallvars = items
# Axioms
axioms = []
batch_empty_relation = repeat_to_batch(empty_relation.input_var, batch_size)
relation = batch_empty_relation
for i in range(nitems):
(relation,) = push(relation, items[i].input_var)
pop_relation = relation
for j in range(i, -1, -1):
(pop_relation, pop_item) = pop(pop_relation)
axiom = Axiom((pop_item,), (items[j].input_var,))
axioms.append(axiom)
# Generators
generators = [infinite_batches(train_data, batch_size, shuffle=True)
for i in range(nitems)]
train_fn, call_fns = compile_fns(interfaces, consts, forallvars, axioms,
train_outs, options)
relation_adt = AbstractDataType(interfaces, consts, forallvars, axioms,
name='relation')
relation_pdt = ProbDataType(relation_adt, train_fn, call_fns, generators,
gen_to_inputs, train_outs)
return relation_adt, relation_pdt
# Validation
def validate_what(data, batch_size, nitems, es, push, pop):
datalen = data.shape[0]
es = np.repeat(es, batch_size, axis=0)
data_indcs = np.random.randint(0, datalen-batch_size, nitems)
items = [data[data_indcs[i]:data_indcs[i]+batch_size]
for i in range(nitems)]
losses = []
relation = es
for i in range(nitems):
(relation,) = push(relation, items[i])
pop_relation = relation
for j in range(i, -1, -1):
(pop_relation, pop_item) = pop(pop_relation)
loss = mse(pop_item, items[j], tnp=np)
losses.append(loss)
print(losses)
def whitenoise_trick():
new_img = floatX(np.array(np.random.rand(1,1,28,28)*2**8, dtype='int'))/256
for i in range(1000):
loss, relation, img, new_relation, new_img = validate_relation(new_img, X_train, push, pop, 0, 512)
def relation_unrelation(n, relation, offrelation=0):
lb = 0 + offrelation
ub = 1 + offrelation
imgs = []
relations = []
relations.append(relation)
for i in range(n):
new_img = floatX(X_train[lb+i:ub+i])
imgs.append(new_img)
(relation,) = push(relation,new_img)
relations.append(relation)
for i in range(n):
(relation, old_img) = pop(relation)
relations.append(relation)
imgs.append(old_img)
return relations + imgs
def whitenoise(batch_size):
return floatX(np.array(np.random.rand(batch_size,1,28,28)*2**8, dtype='int'))/256
def main(argv):
# Args
global options
global test_files, train_files
global views, outputs, net
global push, pop
global X_train
global adt, pdt
global savedir
global sfx
cust_options = {}
cust_options['nitems'] = (int, 3)
cust_options['width'] = (int, 28)
cust_options['height'] = (int, 28)
cust_options['num_epochs'] = (int, 100)
cust_options['save_every'] = (int, 100)
cust_options['compile_fns'] = (True,)
cust_options['save_params'] = (True,)
cust_options['train'] = (True,)
cust_options['nblocks'] = (int, 1)
cust_options['block_size'] = (int, 2)
cust_options['batch_size'] = (int, 512)
cust_options['nfilters'] = (int, 24)
cust_options['layer_width'] = (int, 50)
cust_options['adt'] = (str, 'relation')
cust_options['template'] = (str, 'res_net')
options = handle_args(argv, cust_options)
X_train, y_train, X_val, y_val, X_test, y_test = load_datarelation()
sfx = gen_sfx_key(('adt', 'nblocks', 'block_size', 'nfilters'), options)
options['template'] = parse_template(options['template'])
adt, pdt = relation_adt(X_train, options, push_args=options,
nitems=options['nitems'], pop_args=options,
batch_size=options['batch_size'])
savedir = mk_dir(sfx)
load_train_save(options, adt, pdt, sfx, savedir)
push, pop = pdt.call_fns
loss, relation, img, new_relation, new_img = validate_relation_img_rec(new_img, X_train, push, pop, 0, 1)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"zennatavares@gmail.com"
] |
zennatavares@gmail.com
|
450555e846548af0bf06c6a936c89c198a03beea
|
761e9c1b9a32ea37dd677e6b5877418b90f49c88
|
/code_clone_detection/CodePathsStore.py
|
eef19b6d84eeb0b3f97bbd20040c9e57dd4e5bae
|
[] |
no_license
|
panchdevs/code-clone-detection
|
a91b669184a92a5a351db577cbbc8f64b7a942ed
|
4cf3b636c4d9745ce0296bc8e36bf2bab6f443a9
|
refs/heads/master
| 2021-01-21T13:34:00.596326
| 2016-03-13T13:49:42
| 2016-03-13T13:49:42
| 53,023,685
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,189
|
py
|
#!/usr/bin/python
from os import path, walk
from .ASTPath import ASTPath
from .suffix_tree import SuffixTree
import pickle
class CodePathsStore:
def __init__(self, codebase_path, file_extension):
self.codebase_path = codebase_path
self.file_extension = file_extension
self.code_paths_filepath = path.join(codebase_path, ".cdp-" + self.file_extension + ".pkl")
if not path.isfile(self.code_paths_filepath):
self.make_code_paths_file()
self.paths = self.get_code_paths_from_file()
def make_code_paths_file(self):
paths = {}
for root, dirs, files in walk(self.codebase_path):
for filename in files:
if filename.endswith(self.file_extension):
filename_path = path.join(root, filename)
filepaths = ASTPath(filename_path, self.file_extension).paths
string_paths = "".join(filepaths)
paths[filename_path] = SuffixTree(string_paths)
with open(self.code_paths_filepath, "wb") as code_paths_file:
pickle.dump(paths, code_paths_file)
def get_code_paths_from_file(self):
paths = {}
with open(self.code_paths_filepath, 'rb') as f:
paths = pickle.load(f)
return paths
|
[
"prashantbaisla@gmail.com"
] |
prashantbaisla@gmail.com
|
1b5afbb818b734f6aec76bd316f0af965770928a
|
d6fcacedade0252ab1be1131f4f112a3cadddd91
|
/adafruit-circuitpython-bundle-6.x-mpy-20210806/examples/rgbled_pca9685.py
|
9f40b913544e586136603f52e344ace062c19266
|
[] |
no_license
|
ahope/iot_clock
|
cb4e549e14cac4b8a3fdf3c7fe878226c1d4eca0
|
9345797233c9b7b5b46c8c1c67527cd904caa534
|
refs/heads/master
| 2023-07-30T05:34:33.174576
| 2021-09-09T18:02:04
| 2021-09-09T18:02:04
| 393,815,709
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
import busio
import adafruit_pca9685
import adafruit_rgbled
# PCA9685 Initialization
i2c = busio.I2C(board.SCL, board.SDA)
pca = adafruit_pca9685.PCA9685(i2c)
pca.frequency = 60
# PCA9685 LED Channels
RED_LED = pca.channels[0]
GREEN_LED = pca.channels[1]
BLUE_LED = pca.channels[2]
# Create the RGB LED object
led = adafruit_rgbled.RGBLED(RED_LED, GREEN_LED, BLUE_LED, invert_pwm=True)
# Optionally, you can also create the RGB LED object with inverted PWM
# led = adafruit_rgbled.RGBLED(RED_LED, GREEN_LED, BLUE_LED, invert_pwm=True)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
return 0, 0, 0
if pos < 85:
return int(255 - pos * 3), int(pos * 3), 0
if pos < 170:
pos -= 85
return 0, int(255 - pos * 3), int(pos * 3)
pos -= 170
return int(pos * 3), 0, int(255 - (pos * 3))
def rainbow_cycle(wait):
for i in range(255):
i = (i + 1) % 256
led.color = wheel(i)
time.sleep(wait)
while True:
# setting RGB LED color to RGB Tuples (R, G, B)
print("setting color 1")
led.color = (255, 0, 0)
time.sleep(1)
print("setting color 2")
led.color = (0, 255, 0)
time.sleep(1)
print("setting color 3")
led.color = (0, 0, 255)
time.sleep(1)
# setting RGB LED color to 24-bit integer values
led.color = 0xFF0000
time.sleep(1)
led.color = 0x00FF00
time.sleep(1)
led.color = 0x0000FF
time.sleep(1)
# rainbow cycle the RGB LED
rainbow_cycle(0.01)
|
[
"ahslaughter@northeastern.edu"
] |
ahslaughter@northeastern.edu
|
ec73c0d48278c25b98343f03bd144de810da00f6
|
c3e625da16e9faf495434fb1bb3c3c598c200475
|
/Regressão Linear - biblioteca/regressão.py
|
ba157114fb71c037d1a6b0acd8f172f1565c8713
|
[] |
no_license
|
bfrancd236/Python
|
3195c184c868569f572aaac1396705245305ce0f
|
80d58ec64a93e0d3cd33e0a5c949374c840b4631
|
refs/heads/main
| 2023-07-10T07:30:38.993846
| 2021-08-10T20:48:21
| 2021-08-10T20:48:21
| 394,685,575
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 849
|
py
|
import numpy as np
from sklearn.linear_model import LinearRegression
quantidade = int(input("Informe a quantidade de variáveis do modelo: "))
x_ = list(range(0,quantidade))
y_ = list(range(0,quantidade))
print("Informe as ", quantidade, " variáveis dependentes: ")
for n in range(0, quantidade):
print("Informe o valor ", n+1)
y_[n] = int(input())
print("Informe as ", quantidade, " variáveis independentes: ")
for n in range(0, quantidade):
print("Informe o valor ", n+1)
x_[n] = int(input())
print("Informe o valor que quer prever ")
prev = list(range(0,1))
prev[0] = int(input())
x_ = np.asarray(x_)
x_ = x_.reshape(-1,1)
y_ = np.asarray(y_)
modelo = LinearRegression()
modelo.fit(x_,y_)
prev = np.asarray(prev)
prev = prev.reshape(-1,1)
resp = modelo.predict(prev.reshape(-1,1))
print("Resultado da Previsão: ", resp)
|
[
"franciscogedesnet@hotmail.com"
] |
franciscogedesnet@hotmail.com
|
2ab054d3bef8e63e5d980e090a83116fd5fedea3
|
eac5ebfa142b70b7c95ac56f0a0b3051447a4d11
|
/ProBenchBurner.py
|
4a5bca9ee326a629b13bd8d48f692723b4934d75
|
[] |
no_license
|
TheSeeven/pro-bench-burner
|
284cbd518d44a93458d315089f851aefe18be23b
|
8088a53b7faa021c955f583bd1c1bced379a33f9
|
refs/heads/master
| 2023-04-29T18:01:00.328130
| 2021-05-24T13:51:00
| 2021-05-24T13:51:00
| 354,531,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,309
|
py
|
from numpy.random import randint, default_rng, set_state
from time import perf_counter, sleep
from multiprocessing import Lock, Process, freeze_support, Value
from GUI import Interface
import threading
import os
import psutil
TEST_REPETITION = 3 # GUI
TEST_DIFICULTY = 50000 # GUI
TEST_SIZE = 5000 # GUI
NUMBER_OF_CORES = 1 # GUI
WORKING = Value("b", False)
BENCHMARK_THREAD = None
EXIT_FLAG = False
TESTS = None
GUI = Interface()
class ProcessHandler:
def __init__(self):
self.job = None
self.done = Value("b", False)
self.elapsed = Value("d", 0)
def start(self):
self.job.start()
def setProcess(self, process):
self.job = process
def solveBenchmark(self, TESTS, working, lock):
GUI.invisible()
while True:
with lock:
if working.value:
break
sleep(0.2)
t1 = perf_counter()
for i in range(TEST_REPETITION):
TESTS[0].solve()
TESTS[1].solve()
TESTS[2].solve()
self.elapsed.value = perf_counter() - t1
self.done.value = True
def kill(self):
try:
self.job.terminate()
except:
pass
def setState():
global GUI, WORKING
if not WORKING.value:
GUI.canvas.configure(image=GUI.pictureBusy)
GUI.canvas.photo_ref = GUI.pictureBusy
GUI.interface.iconbitmap(GUI.icon_busy)
GUI.button_start.configure(background="#a31515")
GUI.button_start.configure(text="Stop Benchmark")
GUI.interface.title("ProBenchBurner - Busy")
else:
WORKING.value = False
GUI.canvas.configure(image=GUI.pictureReady)
GUI.canvas.photo_ref = GUI.pictureReady
GUI.interface.iconbitmap(GUI.icon_ready)
GUI.button_start.configure(background="#1f7839")
GUI.button_start.configure(text="Start Benchmark")
GUI.interface.title("ProBenchBurner - Idle")
GUI.interface.update_idletasks()
class Test:
def __init__(self):
self.size = TEST_SIZE
class FloatingPointsBechmark(Test):
def __init__(self):
super().__init__()
self.arr = []
def prepare(self):
self.arr = default_rng().random((self.size,))
def solve(self):
for i in range(TEST_DIFICULTY):
temp = (self.arr + self.arr - self.arr + self.arr) * self.arr
class IntegersPointsBenchmark(Test):
def __init__(self):
super().__init__()
self.arr = []
def prepare(self):
self.arr = randint(2147483647, size=self.size)
def solve(self):
for i in range(TEST_DIFICULTY):
temp = (self.arr + self.arr - self.arr + self.arr) * self.arr
class MatrixAditionBenchmark(Test):
def __init__(self):
super().__init__()
self.matrix = []
def prepare(self):
self.matrix = [default_rng().random((self.size,)) for b in range(self.size)]
def solve(self):
for i in range(TEST_DIFICULTY):
temp = (self.matrix + self.matrix) * 3
def memoryUsage():
return psutil.Process(os.getpid()).memory_info().rss / 1024 ** 2
def generateBenchmark():
global TESTS
try:
def generateFloatingTest():
_TEST_FLOATING_POINTS = FloatingPointsBechmark()
_TEST_FLOATING_POINTS.prepare()
TESTS.append(_TEST_FLOATING_POINTS)
def generateIntegerTest():
_TEST_INTEGERS_POINTS = IntegersPointsBenchmark()
_TEST_INTEGERS_POINTS.prepare()
TESTS.append(_TEST_INTEGERS_POINTS)
def generateMatrixTest():
_TEST_MATRIX_ADD = MatrixAditionBenchmark()
_TEST_MATRIX_ADD.prepare()
TESTS.append(_TEST_MATRIX_ADD)
generateIntegerTest()
generateFloatingTest()
generateMatrixTest()
except:
return
finally:
return
def ActiveProceses(processList, lock):
with lock:
for i in processList:
if not i.done.value:
return True
return False
def getMaxTime(processList):
result = 0.0
for i in processList:
if i.elapsed.value > result:
result = i.elapsed.value
return result
# sa bag chestia asta intr-un thread global cu optiunea sa ii dau si kill
def StartBenchmark(stop):
global EXIT_FLAG, TESTS, NUMBER_OF_CORES, TEST_REPETITION, TEST_DIFICULTY, TEST_SIZE, WORKING
GUI.label_result.configure(text="Test results:")
NUMBER_OF_CORES = int(GUI.spinbox_cores.get())
TEST_REPETITION = int(GUI.spinbox_repetition.get())
TEST_DIFICULTY = int(GUI.spinbox_dificulty.get())
TEST_SIZE = int(GUI.spinbox_size.get())
TESTS = []
mem_initial = memoryUsage() * float(NUMBER_OF_CORES)
TestsGenerator = threading.Thread(
target=generateBenchmark, name="BenchmarkGenerator", daemon=True
)
TestsGenerator.start()
while TestsGenerator.is_alive():
if EXIT_FLAG:
TESTS = None
EXIT_FLAG = False
return
sleep(0.8)
mem = (memoryUsage() * float(NUMBER_OF_CORES)) - mem_initial
lock = Lock()
processes = []
for i in range(NUMBER_OF_CORES):
if not EXIT_FLAG:
processes.append(ProcessHandler())
processes[i].setProcess(
Process(
target=processes[i].solveBenchmark,
args=(TESTS, WORKING, lock),
name="Core{nr}".format(nr=str(i)),
)
)
else:
for j in processes:
j.kill()
EXIT_FLAG = False
return
for i in processes:
if EXIT_FLAG:
for j in processes:
j.kill()
EXIT_FLAG = False
return
i.start()
sleep(0.2)
with lock:
WORKING.value = True
while ActiveProceses(processes, lock):
if EXIT_FLAG:
EXIT_FLAG = False
for j in processes:
j.kill()
return
sleep(0.2)
maxTime = getMaxTime(processes)
textResult = GUI.label_result.cget(
"text"
) + " Benchmark elapsed in {f} seconds and used {ram} MB RAM".format(
f="{0:.4f}".format(maxTime), ram=str(mem)
)
setState()
GUI.label_result.configure(text=textResult)
EXIT_FLAG = False
WORKING.value = False
def BenchmarkButton():
global BENCHMARK_THREAD, EXIT_FLAG, WORKING
if BENCHMARK_THREAD is not None:
if not BENCHMARK_THREAD.is_alive():
setState()
BENCHMARK_THREAD = threading.Thread(
target=StartBenchmark, args=(lambda: EXIT_FLAG,)
)
BENCHMARK_THREAD.start()
else:
WORKING.value = True
EXIT_FLAG = True
setState()
while BENCHMARK_THREAD.is_alive():
pass
BENCHMARK_THREAD = None
else:
setState()
BENCHMARK_THREAD = threading.Thread(
target=StartBenchmark, args=(lambda: EXIT_FLAG,)
)
BENCHMARK_THREAD.start()
GUI.set_button(lambda: BenchmarkButton())
if __name__ == "__main__":
freeze_support()
GUI.interface.iconbitmap(GUI.icon_ready)
GUI.interface.mainloop()
|
[
"perianu.leon@outlook.com"
] |
perianu.leon@outlook.com
|
282502a362852b945b41579e152fc9e50dd941db
|
1f02a75b496122a5be74c3f636c972f54bcae6f6
|
/flask-blog/sql.py
|
3fd3820f15042c3af8f4fd9bfd6f7074ff1bbeaa
|
[] |
no_license
|
TheDancerCodes/Birika-Tutorials
|
f5941f88615e4887d665fb3371678d66db424441
|
47fa38191ed6f0758794f845b28cc84d5f71188e
|
refs/heads/master
| 2021-01-21T11:19:11.721789
| 2017-03-24T22:19:53
| 2017-03-24T22:19:53
| 83,554,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
# sql.py - Create a SQLite3 table and populate it with data
import sqlite3
# create a new database if teh database doesn't already exist
with sqlite3.connect("blog.db") as connection:
# get a cursor object used to execute SQL commands
c = connection.cursor()
# create the table
c.execute("""CREATE TABLE posts
(title TEXT, post TEXT)
""")
# insert dummy data into the table
# Notice how we escaped the apostrophes in the last two INSERT statements.
c.execute('INSERT INTO posts VALUES("Initial Post", "Welcome to Birika Tuts.")')
c.execute('INSERT INTO posts VALUES("Lorem Ipsum", "Lorem Ipsum is bae.")')
c.execute('INSERT INTO posts VALUES("Excellent", "I\'m excellent.")')
c.execute('INSERT INTO posts VALUES("Okay", "I\'m okay.")')
|
[
"rojtaracha@gmail.com"
] |
rojtaracha@gmail.com
|
cc23a1e5c8b8d613e361b175a0fb49c35abf5cf6
|
58aeab0094204015648135dc8e99009172e87150
|
/Xiao_bai/wsgi.py
|
cfaa56b1024fbb86d1646f64877922308679f2e9
|
[] |
no_license
|
devilhan/ebook
|
7df2c28f42dfeccd15a470e4e72121b6e85f63d7
|
c7c151fafa23759e599845823d4b81a2f0929ca9
|
refs/heads/master
| 2023-02-04T07:35:42.154709
| 2020-12-20T16:18:12
| 2020-12-20T16:18:12
| 323,112,445
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
"""
WSGI config for Xiao_bai project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Xiao_bai.settings")
application = get_wsgi_application()
|
[
"btmeiju@126.com"
] |
btmeiju@126.com
|
a313bebefeefd285f55734e670f9e8fe5bb455f2
|
6c609416e491687b0f578e70baf69e15a91af213
|
/Moj_projekt/Moj_projekt/wsgi.py
|
7bac2a4b26037e1497b26993e0d39b5f7086c462
|
[] |
no_license
|
konradcurylo/nieruchomosci--Projekt-Django
|
86d9aeac2a45894c5a8332344f40b4a3a39f5036
|
ee754aa12b8fabe8d29516d799ab78b8286917e6
|
refs/heads/master
| 2020-05-18T13:52:30.106941
| 2019-05-01T17:40:22
| 2019-05-01T17:40:22
| 184,454,566
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
"""
WSGI config for Moj_projekt project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Moj_projekt.settings')
application = get_wsgi_application()
|
[
"uppercut1991@gmail.com"
] |
uppercut1991@gmail.com
|
3d08fc428da24743957eb5c760d0fcc418aedbc4
|
2ff742bab67990c5df3a9cadb6ea37e0f7056a2e
|
/adidascs/settings.py
|
06e5d2988fdea19fcc870b648c6ed4e184872a4c
|
[] |
no_license
|
ceyhunkerti/adidascs
|
a62dc52c6149c3b56f094cecf63bfcedb331dcc6
|
227342f0300f57274ab2556ad9eda16ce44c8114
|
refs/heads/main
| 2023-03-30T15:18:21.507518
| 2021-03-29T22:17:26
| 2021-03-29T22:17:26
| 352,798,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 746
|
py
|
from os import environ as env
from dotenv import load_dotenv
load_dotenv()
APP_NAME = env.get("ADIDASCS_APP_NAME", "Adidas Case Study")
PAGE_VIEWS_SCHEMA = env.get(
"ADIDASCS_PAGE_VIEWS_SCHEMA", "user_id INT, event_date DATE, web_pageid INT"
)
PAGE_VIEWS_FILE = env.get(
"ADIDASCS_PAGE_VIEWS_FILE",
"/home/ceyhun/projects/lab/adidas/cases-study/data/page-views.csv",
)
PAGE_SCHEMA = env.get("ADIDASCS_PAGE_SCHEMA", "web_pageid INT, webpage_type STRING")
PAGES_FILE = env.get(
"ADIDASCS_PAGES_FILE",
"/home/ceyhun/projects/lab/adidas/cases-study/data/pages.csv",
)
DATE_FORMAT = env.get("ADIDASCS_DATE_FORMAT", "dd/MM/yyyy HH:mm")
OUTPUT_PATH = env.get("ADIDASCS_OUTPUT_PATH", "/home/ceyhun/projects/lab/adidas/output")
|
[
"ceyhun.kerti@bluecolor.io"
] |
ceyhun.kerti@bluecolor.io
|
f918545ce35f839d36c5e887095386cc6014665a
|
30cdaf2a544c1cfb39bfaec56c9356573ea2463d
|
/learning_site/courses/models.py
|
8b322a1abfd9f431f1da97f560e8c8fb1c993854
|
[] |
no_license
|
michaelnwani/python
|
d6e8ef39840e33a9927de171ab38a96351dbc73b
|
8e344ed0891372c547f44cc4d3beb6c3de86bcbc
|
refs/heads/master
| 2021-01-20T22:44:30.413168
| 2015-10-08T22:02:22
| 2015-10-08T22:02:22
| 42,347,714
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 632
|
py
|
from django.db import models
# Create your models here.
class Course(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=255)
description = models.TextField()
def __str__(self):
return self.title
class Step(models.Model):
title = models.CharField(max_length=255)
description = models.TextField()
content = models.TextField(blank=True, default='')
order = models.IntegerField(default=0)
course = models.ForeignKey(Course)
class Meta:
ordering = ['order',]
def __str__(self):
return self.title
|
[
"kmichael24@gmail.com"
] |
kmichael24@gmail.com
|
fbc37d0881d0e6722692b00194bbbe3e164e9ad6
|
8521639df898f4186a0c9c74f96d1e4e38fe358a
|
/mysite2/oto/models.py
|
865270b393206cf958b90a8bd75eebb4d707d169
|
[] |
no_license
|
miyadream250/django
|
5514d7454463ef47b04d6c46ea95c120ef84537b
|
e8509234377202e9dacbcc1d4ac9281701c93af9
|
refs/heads/master
| 2023-05-27T19:38:06.277812
| 2021-06-01T03:12:58
| 2021-06-01T03:12:58
| 371,248,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
from django.db import models
# Create your models here.
class Author(models.Model):
name = models.CharField("作者姓名", max_length=15)
class Wife(models.Model):
name = models.CharField("妻子姓名", max_length=15)
author = models.OneToOneField(Author, on_delete=models.CASCADE)
|
[
"miyadream250@gmail.com"
] |
miyadream250@gmail.com
|
2af6f6fc860cb085fa6aff4f4516914865179a48
|
5dd47abf7061201d9378e73e51f08fbb314ba2fd
|
/envdsys/envdatasystem/migrations/0045_alter_platform_platform_type.py
|
c55f9fd9afb575fc4819aa67554abbccffa73e55
|
[
"Unlicense"
] |
permissive
|
NOAA-PMEL/envDataSystem
|
4d264ae5209015e4faee648f37608d68a4461d0a
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
refs/heads/master
| 2023-02-23T22:33:14.334737
| 2021-07-22T01:09:16
| 2021-07-22T01:09:16
| 191,809,007
| 1
| 0
|
Unlicense
| 2023-02-08T00:45:54
| 2019-06-13T17:50:03
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
# Generated by Django 3.2.3 on 2021-07-15 15:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('envdatasystem', '0044_auto_20210715_1511'),
]
operations = [
migrations.AlterField(
model_name='platform',
name='platform_type',
field=models.CharField(choices=[('STATION', 'Station/Lab'), ('UAS', 'UAS'), ('MOORING', 'Mooring'), ('SHIP', 'Ship'), ('AIRCRAFT', 'Aircraft')], default='STATION', max_length=10, verbose_name='Platform Type'),
),
]
|
[
"derek.coffman@noaa.gov"
] |
derek.coffman@noaa.gov
|
ba5a34cd37d73a6d30461971014edb713cfd7d99
|
a9eadc9f1967fd02fff9edb7ba7e4f0abd82ef92
|
/B4APItest/login_register.py
|
d4cbdfe962b4c926a9ebe54bdd8f74afdfff4d6e
|
[] |
no_license
|
Zhenjunwen/mohu_testcase
|
2c1eea8067381ba79dfd7ae3b3ff7b50ae37f2ab
|
5276405d22ece61f1785c74cda288f868fdb73e1
|
refs/heads/master
| 2022-12-17T00:31:02.057819
| 2020-09-04T02:03:29
| 2020-09-04T02:03:29
| 249,363,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,009
|
py
|
#coding=utf-8
import json
from API_test import RunMain
import hashlib
from B1APItest.authentication_KYC import authentication_get_kyc_info
from DB_config import DB
from B4APItest.signature import get_signture
from log import out_log
import configparser
cf = configparser.ConfigParser()
#配置文件路径
cf.read("F:\mohu-test\configfile\B4config.cfg")
B4_url = cf.get("url", "url")
token_wen = cf.get('token', 'token_wen')
token_junxin = cf.get('token', 'token_junxin')
token_guoliang = cf.get('token', "token_guoliang")
H5_apikey = cf.get("Apikey", "H5_apikey")
H5_apisecret = cf.get("Apikey", "H5_apisecret")
PC_apikey = cf.get("Apikey", "PC_apikey")
PC_apisecret = cf.get("Apikey", "PC_apisecret")
Android_apikey = cf.get("Apikey", "Android_apikey")
Android_apisecret = cf.get("Apikey", "Android_apisecret")
IOS_apikey = cf.get("Apikey", "IOS_apikey")
IOS_apisecret = cf.get("Apikey", "IOS_apisecret")
host = cf.get("Mysql_DataBase","host")
port = int(cf.get("Mysql_DataBase","port"))
user = cf.get("Mysql_DataBase","user")
password = cf.get("Mysql_DataBase","password")
database = cf.get("Mysql_DataBase","db")
def send_sms(sms_type,account,dialing_code="86",token="",language="zh"):
#发送短信验证码-验证码发送成功后服务器返回的验证码ID
url = "%s/api/v1/send/sms" % B4_url
body = {
"type": sms_type,#验证码类型,1=注册 2=登录 3=重置登录密码 4=修改登录密码 5=重置交易密码 6=添加收款方式 7=钱包提现 8=申请ApiKey 9=编辑ApiKey 10=绑定谷歌验证器
"dialing_code":dialing_code, #区号
"account":account,
"token":token, #用户令牌 type > 3时必填
"language":language #语言,取值:"zh"=简体中文, "en"=英文, 默认"zh"
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(Android_apikey,Android_apisecret,body), method='POST')
out_log(url,body,json.loads(run.response))
code = json.loads(run.response)["code"]
# print(json.loads(run.response))
if code == 1000:
verification_id = json.loads(run.response)["data"]["verification_id"]
print(verification_id)
return verification_id
elif code == 2994:
wait_time = json.loads(run.response)["data"]["wait_time"]
print("重新获取验证需等待%d秒"%wait_time)
else:
print(json.loads(run.response))
def send_email_sms(sms_type,account,token="",language="zh"):
#发送邮箱验证码-验证码发送成功后服务器返回的验证码ID
url = "%s/api/v1/send/mail" % B4_url
body = {
"type": sms_type,#验证码类型,1=注册 2=登录 3=重置登录密码 4=修改登录密码 5=重置交易密码 6=添加收款方式 7=钱包提现 8=申请ApiKey 9=编辑ApiKey 10=绑定谷歌验证器
"account":account,
"token":token, #用户令牌 type > 3时必填
"language":language #语言,取值:"zh"=简体中文, "en"=英文, 默认"zh"
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(Android_apikey,Android_apisecret,body), method='POST')
out_log(url,body,json.loads(run.response))
# print(json.loads(run.response))
code = json.loads(run.response)["code"]
# print(code)
# print(json.loads(run.response))
if code == 1000:
verification_id = json.loads(run.response)["data"]["verification_id"]
# print(verification_id)
return verification_id
elif code == 2994:
wait_time = json.loads(run.response)["data"]["wait_time"]
print("重新获取验证需等待%d秒"%wait_time)
else:
print(json.loads(run.response))
def register(account,password,verification_id,verification_code,type,dialing_code="",invitation_code="",platform="2"):
#注册
url = "%s/api/v1/user/register" % B4_url
password = str(hashlib.sha256(password.encode('utf-8')).hexdigest())
body = {
"account":account,
"password":password,
"verification_id":verification_id, # 验证码发送成功后服务器返回的验证码ID
"type":type, # 账号类型,1=手机号码 2=邮箱地址
"dialing_code":dialing_code, # 国际电话区号,仅当type=1 时有效
"verification_code":verification_code, # 验证码
"invitation_code":invitation_code, # 邀请码 (非必填)
"platform":platform # 终端类型,1=移动端 2=PC端
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(Android_apikey,Android_apisecret, body), method='POST')
out_log(url,body,json.loads(run.response))
# print(password)
code = json.loads(run.response)["code"]
if code == 1000:
token = json.loads(run.response)["data"]["token"]
return token
else:
print(json.loads(run.response))
def login_step1(account,password,type,dialing_code=""):
url = "%s/api/v1/user/login/step1" % B4_url
password = str(hashlib.sha256(password.encode('utf-8')).hexdigest())
body = {
"type":type, #账号类型,1=手机号码 2=邮箱地址
"dialing_code":dialing_code, #国际电话区号,仅当type=1 时有效
"account":account,
"password":password #SHA256加密后的登录密码
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(H5_apikey, H5_apisecret, body), method='POST')
out_log(url,body,json.loads(run.response))
# print(password)
code = json.loads(run.response)["code"]
if code == 1000:
verification_token = json.loads(run.response)["data"]["verification_token"]
# print(verification_token)
return verification_token
else:
print(json.loads(run.response))
def login_step2(verification_token,verification_id,verification_code,account,platform="2",dialing_code=""):
url = "%s/api/v1/user/login/step2" % B4_url
body = {
"verification_token":verification_token, # 登录步骤1验证通过后返回的登录验证令牌
"verification_code":verification_code, # 验证码
"verification_id" : verification_id, # 验证码发送成功后服务器返回的验证码ID
"account":dialing_code+account, #账号(国际电话区号+手机号码/邮箱地址)
"platform":platform #终端类型,1=移动端 2=PC端
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(H5_apikey, H5_apisecret, body), method='POST')
out_log(url,body,json.loads(run.response))
code = json.loads(run.response)["code"]
if code == 1000:
token = json.loads(run.response)["data"]["token"]
return token
else:
print(json.loads(run.response))
def validate_login_pwd(token,password):
#验证登录密码是否正确
url = "%s/api/v1/user/validate_login_pwd" % B4_url
password = str(hashlib.sha256(password.encode('utf-8')).hexdigest())
body={
"token":token,
"password":password
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(H5_apikey, H5_apisecret, body), method='POST')
out_log(url,send_msg=body,response_msg=json.loads(run.response))
print(password)
print(json.loads(run.response))
def modify_login_pwd(token, password, account, dialing_code=""):
# 修改登录密码
url = "%s/api/v1/user/modify_login_pwd" % B4_url
db = DB('mysql.b4dev.xyz', 3306, 'b4_api', 'fGFcqRkHC5D2z^b^', 'b4') # B4devDB
verification_id = send_email_sms(sms_type="4", account=account, token=token, language="zh")
# verification_id = send_sms(sms_type="4",account=account,dialing_code=dialing_code,token=token,language="zh")
verification_code = db.query(
"SELECT verification_code FROM `user_verification_code` WHERE user_account = '%s' ORDER BY code_over_time DESC LIMIT 1" % (dialing_code + account))[0][0]
print(verification_code)
password = str(hashlib.sha256(password.encode('utf-8')).hexdigest())
body = {
"token": token,
"password": password,
"verification_code": verification_code,
"verification_id": verification_id
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(H5_apikey, H5_apisecret, body), method='POST')
out_log(url, send_msg=body, response_msg=json.loads(run.response))
# print(password)
print(json.loads(run.response))
def reset_login_pwd(password, account,token="",dialing_code=""):
# 重置登录密码
url = "%s/api/v1/user/reset_login_pwd" % B4_url
db = DB('mysql.b4dev.xyz', 3306, 'b4_api', 'eYKRj3Vp@zM0SGWj', 'b4') # B4devDB
# verification_id = send_email_sms(sms_type="4", account=account, token=token, language="zh")
verification_id = send_sms(sms_type="3",account=account,dialing_code=dialing_code,token=token,language="zh")
verification_code = db.query(
"SELECT verification_code FROM `user_verification_code` WHERE user_account = '%s' ORDER BY code_over_time DESC LIMIT 1" % (dialing_code + account))[0][0]
print(verification_code)
password = str(hashlib.sha256(password.encode('utf-8')).hexdigest())
print(password)
body = {
"token": token,
"account":account+dialing_code,
"password": password,
"verification_code": verification_code,
"verification_id": verification_id
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(H5_apikey, H5_apisecret, body), method='POST')
out_log(url, send_msg=body, response_msg=json.loads(run.response))
# print(password)
print(json.loads(run.response))
def online_modify_login_pwd(token,password,account,dialing_code=""):
#线上修改登录密码
url = "%s/api/v1/user/modify_login_pwd" % B4_url
# verification_id = send_email_sms(sms_type="4", account=account, token=token, language="zh")
verification_id = send_sms(sms_type="4", account=account, dialing_code=dialing_code, token=token, language="zh")
verification_code = input("验证码:")
password = str(hashlib.sha256(password.encode('utf-8')).hexdigest())
body={
"token":token,
"password":password,
"verification_code":verification_code,
"verification_id":verification_id
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(H5_apikey, H5_apisecret, body), method='POST')
out_log(url,send_msg=body,response_msg=json.loads(run.response))
# print(password)
print(json.loads(run.response))
def user_email_login(sms_type,account,password,type="2"):
#dev邮箱登录
verification_token = login_step1(account=account,password=password,type=type)
verification_id = send_email_sms(sms_type=sms_type,account=account)
db = DB('mysql.B4dev.xyz', 3306, 'B4_api', 'fGFcqRkHC5D2z^b^', 'B4') # B4devDB
verification_code = db.query(
"SELECT verification_code FROM user_verification_code WHERE user_account = '%s' ORDER BY code_over_time DESC LIMIT 1" % account)[0][0]
token = login_step2(verification_code=verification_code,verification_token=verification_token,verification_id=verification_id,account=account)
print(token)
return token
def user_phone_login(sms_type,account,password,type="1",dialing_code="86"):
#dev手机登录
verification_token = login_step1(account=account,password=password,type=type,dialing_code=dialing_code)
verification_id = send_sms(sms_type=sms_type,account=account,dialing_code=dialing_code)
db = DB('mysql.b4dev.xyz', 3306, 'b4_api', 'eYKRj3Vp@zM0SGWj', 'b4') # B4devDB
verification_code = db.query("SELECT verification_code FROM user_verification_code WHERE user_account = '%s' ORDER BY code_over_time DESC LIMIT 1" % (dialing_code+account))[0][0]
token = login_step2(verification_code=verification_code,verification_token=verification_token,verification_id=verification_id,account=account,dialing_code=dialing_code)
print(token)
return token
def user_email_register(sms_type,account,password,invitation_code=""):
#dev邮箱注册
verification_id = send_email_sms(sms_type,account)
db = DB('mysql.b4dev.xyz', 3306, 'b4_api', 'eYKRj3Vp@zM0SGWj', 'b4') # B4devDB
verification_code = db.query("SELECT verification_code FROM user_verification_code WHERE user_account = '%s' ORDER BY code_over_time DESC LIMIT 1" % account)[0][0]
token = register(account=account, password=password, verification_id=verification_id,verification_code=verification_code, type="2",invitation_code=invitation_code, platform="2")
print(token)
return token
def user_phone_register(sms_type,account,password,dialing_code,invitation_code=""):
#dev手机注册
verification_id = send_sms(sms_type, account,dialing_code=dialing_code)
db = DB('mysql.b4dev.xyz', 3306, 'b4_api', 'eYKRj3Vp@zM0SGWj', 'b4') # B4devDB
verification_code = db.query(
"SELECT verification_code FROM user_verification_code WHERE user_account = '%s' ORDER BY code_over_time DESC LIMIT 1" % (dialing_code+account))[0][0]
token = register(account=account, password=password, verification_id=verification_id,verification_code=verification_code,dialing_code=dialing_code, type="1",invitation_code=invitation_code, platform="2")
print(token)
return token
def online_user_phone_login(sms_type,account,password,type="1",dialing_code="86"):
#线上手机登录
verification_token = login_step1(account=account,password=password,type=type,dialing_code=dialing_code)
verification_id = send_sms(sms_type=sms_type,account=account)
verification_code = input("验证码:")
token = login_step2(verification_code=verification_code,verification_token=verification_token,verification_id=verification_id,account=account,dialing_code=dialing_code)
print(token)
return token
def online_user_email_login(sms_type,account,password,type="2"):
#线上邮箱登录
verification_token = login_step1(account=account,password=password,type=type)
verification_id = send_email_sms(sms_type=sms_type,account=account)
verification_code = input("验证码:")
token = login_step2(verification_code=verification_code,verification_token=verification_token,verification_id=verification_id,account=account)
print(token)
return token
def online_user_phone_register(sms_type,account,password,dialing_code,invitation_code=""):
#线上手机注册
verification_id = send_sms(sms_type, account)
verification_code = input("验证码:")
token = register(account=account, password=password, verification_id=verification_id,verification_code=verification_code,dialing_code=dialing_code, type="1",invitation_code=invitation_code, platform="2")
print(token)
return token
def online_user_email_register(sms_type,account,password,invitation_code=""):
#线上邮箱注册
verification_id = send_email_sms(sms_type,account)
verification_code = input("验证码:")
token = register(account=account, password=password, verification_id=verification_id,verification_code=verification_code, type="2",invitation_code=invitation_code, platform="2")
print(token)
return token
def modify_nickname(token,nickname):
# 修改昵称
url = "%s/api/v1/user/modify_nickname" % B4_url
body = {
"token":token,
"nickname":nickname
}
run = RunMain(url=url, params=None, data=body,
headers=get_signture(Android_apikey, Android_apisecret, body), method='POST')
out_log(url,body,json.loads(run.response))
print(json.loads(run.response))
if __name__ == "__main__":
send_sms(sms_type="2", account="18620074720", dialing_code="86", token="", language="zh")
# send_email_sms(sms_type="2", account="zhenjunwen123@163.com", token="f80dfb7a06668d567282da239609c73d", language="zh")
# verification_token = login_step1(account="zhenjunwen123@163.com",password="10000123456",type="2")
# verification_id = send_email_sms(sms_type="2",account="zhenjunwen123@163.com",token="e30726c6e126048a65053a4d27150c8f",language="zh")
# token = login_step2(verification_token="a14178b55e2ffa1478c60d19d0f17f04",verification_id="6",verification_code="971071",account="zhenjunwen123@163.com")
# print(token)
# user_phone_login(sms_type="2",account="15521057551",password="zjw971006")
# print(user_phone_login(sms_type="2",account="13826284310",password="111111",dialing_code="86"))
# print(user_login("2", "15916750662", "123456")) #永健账号
# print(user_email_login(sms_type="2",type="2",account="1085751421@qq.com",password="10000123456"))
# validate_login_pwd(token=token_wen, password="zjw971006")
# modify_login_pwd(token="f80dfb7a06668d567282da239609c73d", password="10000123456", account="1085751421@qq.com")
# user_phone_login(sms_type="2", account="15916750662", password="dyj123456", type="1", dialing_code="86")
# user_phone_register(sms_type="1", account="16538854915", password="qq123456",dialing_code="86")
# user_email_login(sms_type="2", account="00000010@mohukeji.com", password="Qq000000", type="2")
# user_email_register(sms_type="1", account="00000010@mohukeji.com", password="Qq000000",invitation_code="")
# online_user_phone_login(sms_type="2", account="15521057551", password="zjw971006", type="1", dialing_code="86")
# online_user_email_login(sms_type="2", account="zhenjunwen123@163.com", password="10000123456", type="2")
# online_user_phone_register(sms_type="1", account="15521057551", password="zjw971006", dialing_code="86")
# online_user_email_register(sms_type="1", account="zhenjunwen123@163.com", password="zjw971006")
# modify_nickname(token=token_wen,nickname="大帅哥")
# reset_login_pwd(password="zjw971006", account="15521057551", token="", dialing_code="86")
# register_kyc(account="15521057551", password="zjw971006", dialing_code="86", invitation_code="", nationality="")
pass
|
[
"448095483@qq.com"
] |
448095483@qq.com
|
9c40a44f5eb4db82441f18146952922fa94dd388
|
c19ba1a6abf31ec22626e67e405410268bc085e7
|
/src/ida_scripts/analyze_stack_smash_gadget.py
|
97a550ca67fab37e56e3b2f1e332e9ef4fde3d98
|
[
"MIT"
] |
permissive
|
yifengchen-cc/kepler-cfhp
|
be2a5beb22ffa0c289127d38e7b14cc52f3aceed
|
e530583fa96a99c53a043f6fd5cd67c63509731d
|
refs/heads/master
| 2020-12-05T08:43:28.826487
| 2019-09-04T02:49:23
| 2019-09-04T02:49:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,626
|
py
|
from idautils import *
from idaapi import *
from capstone import *
import pickle
isdebug = True
# TODO analyze how many stack smash has stack canary
# TODO analyze how many stack smash has quick exit path
# ============================================================
interested_mnem = ['mov', 'lea']
interested_opnd = ['rdi','edi','rsi','esi','rdx','edx','dh','dl']
all_regs = ['rax','rbx','rcx','rdx','rsi','rdi','r8','r9','r10','r11','r12','r13','r14','r15','rbp','rsp'
,'eax','ebx','ecx','edx','esi','edi','r8d','r9d','r10d','r11d','r12d','r13d','r14d','r15d','ebp','esp']
# ============================================================
# ==========types of lea instruction, enumeration=============
LEA_MEM_TO_REG = 21
# ============================================================
# ==========types of mov instruction, enumeration=============
MOV_MEM_TO_REG = 11
MOV_REG_TO_MEM = 12
MOV_IMM_TO_REG = 13
MOV_IMM_TO_MEM = 14
MOV_REG_TO_REG = 15
# ============================================================
def dbg(content):
if isdebug:
print '[+]', content
def analyze_add(head):
return None
def analyze_imul(head):
return None
def analyze_and(head):
return None
def analyze_sub(head):
return None
def analyze_shl(head):
return None
def analyze_pop(head):
return None
def analyze_shr(head):
return None
def analyze_sbb(head):
return None
def analyze_sar(head):
return None
def analyze_not(head):
return None
def analyze_cmp(head):
return None
def analyze_xor(head):
return None
def analyze_or(head):
return None
def analyze_test(head):
return None
def analyze_lea(head):
result = {}
print('===== analyzing lea instruction =====')
inst_bytes = idc.GetManyBytes(head, ItemSize(head))
capstone_disasm = md.disasm(inst_bytes,head)
inst = capstone_disasm.next() #only one instruction here
print(inst_bytes.encode('hex'))
num_of_opnds = len(inst.operands)
assert(num_of_opnds==2)
src = inst.operands[1]
dst = inst.operands[0]
print(inst.mnemonic+' '+inst.op_str)
print('src type: '+str(src.type))
print('dst type: '+str(dst.type))
assert(dst.type==1 and src.type==3) #dst must be register
dstreg=dst.reg
src_base = inst.reg_name(src.mem.base)
src_disp = src.mem.disp
src_index = src.mem.index
src_index_reg = ''
if src_index != 0:
src_index_reg = inst.reg_name(src_index)
src_scale = src.mem.scale
src_segment = src.mem.segment
print(src_base+str(src_disp)+src_index_reg+str(src_scale)+ str(src_segment))
result['type']=LEA_MEM_TO_REG
result['addr']=head
result['dst']=inst.reg_name(dstreg)
result['src']={'base':src_base,'disp':src_disp,'index_reg':src_index_reg\
,'scale':src_scale, 'segment':src_segment}
print('===== end of analyzing a lea instruction =====')
return result
def analyze_mov(head):
result = {}
dbg('===== analyzing mov instruction =====')
inst_bytes = idc.GetManyBytes(head, ItemSize(head))
capstone_disasm = md.disasm(inst_bytes,head)
inst = capstone_disasm.next() #only one instruction here
opndstr = inst.op_str
dbg(inst.mnemonic + ' ' + opndstr)
dbg(inst.bytes)
dbg(inst_bytes.encode('hex'))
num_of_opnds = len(inst.operands)
assert(num_of_opnds==2)
src = inst.operands[1]
dst = inst.operands[0]
#type 1: reg 2.immediate 3.mem
dbg('src type: '+str(src.type))
dbg('dst type: '+str(dst.type))
if dst.type == 1:#dst is reg
dstreg = dst.reg
result['dst'] = inst.reg_name(dstreg)
if src.type == 1:
dbg('src is Register')
dbg(inst.reg_name(src.reg)+'->'+inst.reg_name(dstreg))
result['type'] = MOV_REG_TO_REG
result['addr'] = head
result['src'] = inst.reg_name(src.reg)
elif src.type == 2: #src is immediate
dbg('src isImmediate')
dbg(str(src.imm)+'->'+inst.reg_name(dstreg))
result['type']=MOV_IMM_TO_REG
result['addr'] = head
result['src']=src.imm
elif src.type==3:
dbg('src isMemory')
src_base = inst.reg_name(src.mem.base)
src_disp = src.mem.disp
src_index = src.mem.index
src_index_reg = ''
if src_index != 0:
src_index_reg = inst.reg_name(src_index)
src_scale = src.mem.scale
src_segment = src.mem.segment
dbg(src_base+str(src_disp)+src_index_reg+str(src_scale)+ str(src_segment))
#print src.mem,'->',dstreg
result['type']=MOV_MEM_TO_REG
result['addr'] = head
result['src']={'base':src_base,'disp':src_disp,'index_reg':src_index_reg\
,'scale':src_scale, 'segment':src_segment}
#resutl['src'] = tmp_dict
elif dst.type == 2:#dst is immediate
assert(0)
elif dst.type == 3:#dst is memory, do not care for now
assert(src.type!=3) #src type could not be memory
if dst.mem.base:
base_reg = inst.reg_name(dst.mem.base)
dbg('writing to memory '+'base reg: '+base_reg+' offset: '+str(dst.mem.disp))
if src.type==1: #src is reg
result['type']=MOV_REG_TO_MEM
result['addr'] = head
if src.type==2:
result['type']=MOV_IMM_TO_MEM
result['addr'] = head
print(src)
dbg('===== end of analyzing a mov instruction =====')
return result
def analyze_inst(mnem,head):
return {\
'mov':analyze_mov, \
'movsxd':analyze_mov, \
'cmovns':analyze_mov, \
'movzx':analyze_mov, \
'cmova':analyze_mov, \
'cmovle':analyze_mov, \
'cmovbe':analyze_mov, \
'cmovnb':analyze_mov, \
'cmovb':analyze_mov, \
'cmovz':analyze_mov, \
'lea':analyze_lea, \
'add':analyze_add, \
'imul':analyze_imul, \
'sub':analyze_sub, \
'and':analyze_and, \
'xor':analyze_xor, \
'or':analyze_or, \
'shl':analyze_shl, \
'shr':analyze_shr, \
'sbb':analyze_sbb, \
'sar':analyze_sar, \
'cmp':analyze_cmp, \
'test':analyze_test, \
'pop':analyze_pop,\
'not':analyze_not,\
}[mnem](head)
def get_data_flow_sig(callsite, func):
global md
fc = idaapi.FlowChart(func)
signature=[]
reversed_instruction=[]
seen_end=False
for block in fc:
if block.startEA <= callsite < block.endEA:
for head in Heads(block.startEA, block.endEA):
disasm = GetDisasm(head)
if '_copy_to_user' not in disasm or \
('call' not in disasm and 'jmp' not in disasm):
inst_bytes = idc.GetManyBytes(head, ItemSize(head))
reversed_instruction = [[head, inst_bytes]] + reversed_instruction
else:
seen_end = True
break
if seen_end:
break
for inst in reversed_instruction:
dbg(GetDisasm(inst[0]))
for entry in reversed_instruction:
head=entry[0]
disasm = GetDisasm(head)
mnem = GetMnem(head)
opnd0 = GetOpnd(head, 0)
#opnd1 = GetOpnd(head, 1)
dbg(disasm)
dbg(mnem)
dbg(hex(head))
#print GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2)
if mnem in interested_mnem or opnd0 in interested_opnd:
tmp = analyze_inst(mnem, head)
if tmp is not None:
signature.append(tmp)
#assert 0 #should not reach here
return signature, reversed_instruction
def get_func_code_refs_to(func_ea):
code_refs = set()
for ref in CodeRefsTo(func_ea, 0): #callers
# print ref
func_ida = get_func(ref)
name = get_func_name(ref)
#func_start = func_ida.startEA
#pfn=get_frame(func_start)
frame_size = get_frame_size(func_ida)
#print func_ida
if not func_ida:
#print "BUG?: coderef came from no function! %X->%X"%(ref, addr)
continue
'''
if func_ida.startEA not in functions:
print "BUG?: function %X not in our set (r=%X)!"%(func_ida.startEA, ref)
continue
'''
#code_refs.add((ref, func_ida.startEA, name))
code_refs.add((ref, func_ida, name, frame_size))
return code_refs
def isPush(inst):
if inst[:4] == 'push':
return True
else:
return False
def getCanaryLocation(head):
return GetOperandValue(head, 0)
def getCanaryLocation_rbp(head):
return (-GetOperandValue(head, 0)) & 0xffffffff
def isLoadStackCanary(disasm,head):
if 'mov' in disasm and 'gs' in disasm:
if GetOperandValue(head,1) == 40:
print disasm
return True
return False
def isSaveStackCanary(disasm):
if 'mov' in disasm and 'rsp' in disasm:
print disasm
return 1
if 'mov' in disasm and 'rbp' in disasm:
print disasm
return 2
return False
def get_num_saved_registers(func):
"""
check whether stack canary exists and get parameter related to the stack frame
:param func:
:return: num_saved_registers, canary_location, canary_type
"""
seen_stack_canary = 0
num_saved_registers = 0
canary_location = -1
canary_type = ''
print hex(func.startEA)
for (startea,endea) in Chunks(func.startEA):
for head in Heads(startea, endea):
disasm = GetDisasm(head)
if isPush(disasm):
num_saved_registers += 1
print disasm
if seen_stack_canary == 0:
if isLoadStackCanary(disasm,head):
seen_stack_canary = 1
continue
if seen_stack_canary == 1:
res = isSaveStackCanary(disasm)
assert res is not False
if res == 1: # rsp canary
canary_type = 'rsp'
canary_location = getCanaryLocation(head)
if res == 2: # rbp canary
canary_type = 'rbp'
canary_location = getCanaryLocation_rbp(head)
seen_stack_canary = 2
return num_saved_registers, canary_location, canary_type
return num_saved_registers, canary_location, canary_type
def analyze_one_xref_for_smash_gadget(ea):
print '-'*79
call_site = ea[0]
func = ea[1]
frame_size = ea[2]
num_saved_registers, canary_location, canary_type = get_num_saved_registers(func)
data_flow_sig, reversed_instruction = get_data_flow_sig(call_site, func)
return num_saved_registers, canary_location, canary_type\
, get_func_name(call_site), data_flow_sig, reversed_instruction
def check_smash_quick_exit_path(xref_copy_from_user):
for ea in xref_copy_from_user:
call_site_addr = ea[0]
next_inst_addr = call_site_addr + 5
seen_error_handling = False
for (startea, endea) in Chunks(next_inst_addr):
for head in Heads(startea, endea):
disasm = GetDisasm(head)
if 'test' in disasm and 'ax' in disasm:
seen_error_handling = True
continue
if not seen_error_handling:
print '!', hex(call_site_addr)
def get_smash_gadgets(xref_copy_from_user):
output = []
for ea in xref_copy_from_user:
num_saved_registers, canary_location, canary_type, func_name, \
data_flow_sig, reversed_instruction = analyze_one_xref_for_smash_gadget(ea)
output.append(
[num_saved_registers, canary_location, canary_type, func_name, data_flow_sig, reversed_instruction])
# dump to result
num_unprotected_copy_from_user = 0
for ent in output:
if ent[2] == '':
print ent[3]
num_unprotected_copy_from_user += 1
print 'there are %d references to _copy_from_user' % (len(xref_copy_from_user))
print 'among them %d is not protected by stack canary' % num_unprotected_copy_from_user
the_filename = "res_smash.txt"
with open(the_filename, 'wb') as f:
pickle.dump(output, f)
def main():
global md
info = idaapi.get_inf_structure()
proc = info.procName
if info.is_64bit():
if proc == "metapc":
md = Cs(CS_ARCH_X86, CS_MODE_64)
md.detail = True
else:
assert(0)
else:
assert(0)
stack_disclosure_gadgets = set()
copy_from_user_addr=idc.LocByName('_copy_from_user')
xref_copy_from_user=get_func_code_refs_to(copy_from_user_addr) # _copy_from_user
# get all smash gadgets
get_smash_gadgets(xref_copy_from_user)
# check_quick_path
check_smash_quick_exit_path(xref_copy_from_user)
if __name__ == '__main__':
main()
|
[
"ww9210@gmail.com"
] |
ww9210@gmail.com
|
6f21499e60f1307597647a0c5fde9fb2570e53f9
|
a7a178f09cb54ad5868035dd9c6c103d1f3c272d
|
/Events_Managements/asgi.py
|
96e750f3802af4d7552a54cb10429920f6572d66
|
[] |
no_license
|
ajay-pal1/Event_managements
|
a1c5ed8d26d72856cee75a46c45de6bd2718fc81
|
5b00dfe47fe570b6ca8b5d961f3bc8bfd436ffd9
|
refs/heads/main
| 2023-09-04T23:07:28.141047
| 2021-10-30T11:00:16
| 2021-10-30T11:00:16
| 422,849,049
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
"""
ASGI config for Events_Managements project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Events_Managements.settings')
application = get_asgi_application()
|
[
"ajay.pal@artdexandcognoscis.com"
] |
ajay.pal@artdexandcognoscis.com
|
e91a85ba4e6574777780f850dbcb3d70762b0513
|
688b1e65d07f03f41edb9fe267d3963f417295db
|
/script/src/proto/delta/ilist_pb2.py
|
50ece90dcf0dd76bdac8af9160db65226c011a63
|
[
"Apache-2.0"
] |
permissive
|
poeliu/Pinso
|
3dee4ae38d6dab6c59a87ef21f558a964eb8e899
|
b084597389fcbad4256a916d13e8601cff216d1d
|
refs/heads/master
| 2016-09-05T20:56:29.489368
| 2014-05-23T14:18:56
| 2014-05-23T14:18:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 2,528
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='delta/ilist.proto',
package='delta',
serialized_pb='\n\x11\x64\x65lta/ilist.proto\x12\x05\x64\x65lta\"#\n\x0fiListEntryProto\x12\x10\n\x08iroot_id\x18\x01 \x02(\r\"3\n\niListProto\x12%\n\x05\x65ntry\x18\x01 \x03(\x0b\x32\x16.delta.iListEntryProto')
_ILISTENTRYPROTO = descriptor.Descriptor(
name='iListEntryProto',
full_name='delta.iListEntryProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='iroot_id', full_name='delta.iListEntryProto.iroot_id', index=0,
number=1, type=13, cpp_type=3, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=28,
serialized_end=63,
)
_ILISTPROTO = descriptor.Descriptor(
name='iListProto',
full_name='delta.iListProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='entry', full_name='delta.iListProto.entry', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=65,
serialized_end=116,
)
_ILISTPROTO.fields_by_name['entry'].message_type = _ILISTENTRYPROTO
DESCRIPTOR.message_types_by_name['iListEntryProto'] = _ILISTENTRYPROTO
DESCRIPTOR.message_types_by_name['iListProto'] = _ILISTPROTO
class iListEntryProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ILISTENTRYPROTO
# @@protoc_insertion_point(class_scope:delta.iListEntryProto)
class iListProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ILISTPROTO
# @@protoc_insertion_point(class_scope:delta.iListProto)
# @@protoc_insertion_point(module_scope)
|
[
"poe.liu@gmail.com"
] |
poe.liu@gmail.com
|
60be81c8dd434704a5ccd2d8e54c0843f89fec5e
|
bdd138968bd78977a5aaa3e078fe71839300c114
|
/myproject/oneplace/migrations/0009_auto_20170206_2233.py
|
5ba27233eea8d80ec388795b309e78574234ad9d
|
[] |
no_license
|
thecodingregimen/django
|
2e2f1a3bf9cd38ead640d419f6b2555e6126307f
|
5121d186868e6d3cf436faba51760d99020a0b87
|
refs/heads/master
| 2022-11-26T18:37:13.783244
| 2017-03-19T00:18:06
| 2017-03-19T00:18:06
| 80,758,793
| 0
| 1
| null | 2020-07-23T12:41:52
| 2017-02-02T19:07:26
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-06 22:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('oneplace', '0008_auto_20170206_2036'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='class_alt_id',
field=models.CharField(default='no lesson id', max_length=50, null=True),
),
]
|
[
"qtech411@gmail.com"
] |
qtech411@gmail.com
|
0c0a7a80c392b2749355cb385c271db31830b5f9
|
d4837c5842d8b34485cb86327d399e65694c37f6
|
/generate_pdf.py
|
49c5094259b74d7cb13d017d6d2a141c65d5d07a
|
[] |
no_license
|
inoxevious-inonit/vad-app
|
337615d88f03e28d11acecbcf2fab64606270a3b
|
31268ca15440852557340673b5c8384fcc9b8078
|
refs/heads/main
| 2023-06-12T20:13:48.824081
| 2021-07-01T13:49:20
| 2021-07-01T13:49:20
| 376,505,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,470
|
py
|
"""Invoice generator
This shows how to use our preppy templating system and RML2PDF markup.
All of the formatting is inside invoice.prep
"""
import sys, os, datetime, json
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfbase.pdfmetrics import registerFont
from rlextra.rml2pdf import rml2pdf
import jsondict
from rlextra.radxml.html_cleaner import cleanBlocks
from rlextra.radxml.xhtml2rml import xhtml2rml
import preppy
def bb2rml(text):
return preppy.SafeString(xhtml2rml(cleanBlocks(bbcode.render_html(text)),ulStyle="normal_ul", olStyle="normal_ol"))
def generate_pdf(json_file_name, options):
data = json.load(open(json_file_name))
print('invoice_json_file data', json_file_name)
here = os.path.abspath(os.path.dirname('__file__'))
output = os.path.abspath(options.output)
if not os.path.isdir(output):
os.makedirs(output,0o755)
#wrap it up in something friendlier
data = jsondict.condJSONSafe(data)
#make a dictionary to pass into preppy as its namespace.
#you could pass in any Python objects or variables,
#as long as the template expressions evaluate
ns = dict(data=data, bb2rml=bb2rml, format="long" if options.longformat else "short")
#we usually put some standard things in the preppy namespace
ns['DATE_GENERATED'] = datetime.date.today()
ns['showBoundary'] = "1" if options.showBoundary else "0"
#let it know where it is running; trivial in a script, confusing inside
#a big web framework, may be used to compute other paths. In Django
#this might be relative to your project path,
ns['RML_DIR'] = os.getcwd() #os.path.join(settings.PROJECT_DIR, appname, 'rml')
#we tend to keep fonts in a subdirectory. If there won't be too many,
#you could skip this and put them alongside the RML
FONT_DIR = ns['FONT_DIR'] = os.path.join(ns['RML_DIR'], 'fonts')
#directory for images, PDF backgrounds, logos etc relating to the PDF
ns['RSRC_DIR'] = os.path.join(ns['RML_DIR'], 'resources')
#We tell our template to use Preppy's standard quoting mechanism.
#This means any XML characters (&, <, >) will be automatically
#escaped within the prep file.
template = preppy.getModule('rml/invoice.prep')
#this hack will allow rmltuils functions to 'know' the default quoting mechanism
#try:
# import builtins as __builtin__
#except:
# import __builtin__
#__builtin__._preppy_stdQuote = preppy.stdQuote
rmlText = template.getOutput(ns, quoteFunc=preppy.stdQuote)
file_name_root = os.path.join(output,os.path.splitext(os.path.basename(json_file_name))[0])
if options.saverml:
#It's useful in development to save the generated RML.
#If you generate some illegal RML, pyRXP will complain
#with the exact line number and you can look to see what
#went wrong. Once running, no need to save. Within Django
#projects we usually have a settings variable to toggle this
#on and off.
rml_file_name = file_name_root + '.rml'
open(rml_file_name, 'w').write(rmlText)
pdf_file_name = file_name_root + '.pdf'
#convert to PDF on disk. If you wanted a PDF in memory,
#you could pass a StringIO to 'outputFileName' and
#retrieve the PDF data from it afterwards.
rml2pdf.go(rmlText, outputFileName=pdf_file_name)
print('saved %s' % pdf_file_name)
return pdf_file_name
if __name__=='__main__':
from optparse import OptionParser
usage = "usage: runme.py [--long] myfile.json"
parser = OptionParser(usage=usage)
parser.add_option("-l", "--long",
action="store_true", dest="longformat", default=False,
help="Do long profile (rather than short)")
parser.add_option("-r","--rml",
action="store_true", dest="saverml", default=False,
help="save a copy of the generated rml")
parser.add_option("-s","--showb",
action="store_true", dest="showBoundary", default=False,
help="tuen on global showBoundary flag")
parser.add_option("-o", "--output",
action="store", dest="output", default='output',
help="where to store result")
options, args = parser.parse_args()
if len(args) != 1:
print(parser.usage)
else:
filename = args[0]
generate_pdf(filename, options)
|
[
"mpasiinnocent@gmail.com"
] |
mpasiinnocent@gmail.com
|
30095eb4666e57b9fffc42c3dd7f9a109795b657
|
66f383fec502102bfec58ed8cb9c43a71e599c55
|
/constants/http.py
|
8bc1c8cf25067190ff39dfa86b9748ba140130a6
|
[
"MIT"
] |
permissive
|
hacktoolkit/django-htk
|
0a984a28f7fbc7eed8e2b1975d210792ddbee829
|
935c4913e33d959f8c29583825f72b238f85b380
|
refs/heads/master
| 2023-08-08T11:52:54.298160
| 2023-07-21T19:08:37
| 2023-07-21T19:08:37
| 15,924,904
| 210
| 65
|
MIT
| 2023-09-08T23:59:28
| 2014-01-15T04:23:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,995
|
py
|
class HTTPStatus:
"""Convenience class to provide named HTTP Status codes
References:
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
- https://httpwg.org/specs/rfc9110.html#overview.of.status.codes
"""
# Informational responses
CONTINUE_ = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
EARLY_HINTS = 103
# Successful responses
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
ALREADY_REPORTED = 208
IM_USED = 226
# Redirection messages
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
UNUSED = 306
TEMPORARY_REDIRECT = 307
PERMANENT_REDIRECT = 308
# Client error responses
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
PAYLOAD_TOO_LARGE = 413
URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
IM_A_TEAPOT = 418
MISDIRECTED_REQUEST = 421
UNPROCESSABLE_CONTENT = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
TOO_EARLY = 425
UPGRADE_REQUIRED = 426
PRECONDITION_REQUIRED = 428
TOO_MANY_REQUESTS = 429
REQUEST_HEADER_FIELDS_TOO_LARGE = 431
UNAVAILABLE_FOR_LEGAL_REASONS = 451
# Server error responses
INTERNAL_SERVER_ERROR = 500
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
VARIANT_ALSO_NEGOTIATES = 506
INSUFFICIENT_STORAGE = 507
LOOP_DETECTED = 508
NOT_EXTENDED = 510
NETWORK_AUTHENTICATION_REQUIRED = 511
|
[
"jontsai@users.noreply.github.com"
] |
jontsai@users.noreply.github.com
|
236372a05758c4850864607b237ee9c0aa6d9b63
|
a5704a553e910a25e9f6b824b2356b3b51ad96b9
|
/Python/python-DataAnalysis-master/简单绘图/shop_visual/shop.py
|
09a033a755eff1e33b1bfac10c4f6f6c53c15c5a
|
[] |
no_license
|
ZhuoZhuoCrayon/my-Nodes
|
19ab1ce19ab0c89582071c3ca318f608f8ac94de
|
54119f3c141017b5b2f6b1c15c9677ba9fbb564b
|
refs/heads/master
| 2022-12-20T04:40:50.317306
| 2021-01-22T12:18:24
| 2021-01-22T12:18:24
| 200,957,098
| 57
| 18
| null | 2022-12-16T07:48:12
| 2019-08-07T02:28:51
|
Java
|
UTF-8
|
Python
| false
| false
| 5,046
|
py
|
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
"""
(1)绘制所有便利店的10月的客流量折线图。
(2)绘制每类商家10月份的日平均客流量折线图。
(3)选择一个商家,统计每月的总客流量,绘制柱状图。
(4)选择一个商家,统计某个月中,周一到周日的每天平均客流量,并绘制柱状图。
(5)选择一个商家,绘制客流量直方图。
(6)选择一个商家,绘制客流量密度图。
(7)统计某个月各个类别商店总客流量占该月总客流量的比例,绘制饼图。
df = pd.read_csv('000917.csv',encoding='gbk')
df = df[df['涨跌幅']!='None']
df['涨跌幅'] = df['涨跌幅'].astype(np.float64)
"""
shop_data=pd.read_csv("../dataset/shop_payNum_new.csv",parse_dates=True,index_col=0)
#--------------------------------绘制所有便利店的10月的客流量折线图--------------------------------------#
data_onMouth_10=shop_data.loc[shop_data.index.month==10] #筛选出10月份的数据
footfall=data_onMouth_10.groupby(['shop_id']).sum() #按shop_id分类并计算每家便利店10月份的客流量
footfall.sort_values(by="shop_id",ascending="false") #按shop_id排序
_x=footfall.index; #shop_id作为x轴
_y=footfall.values; #客流量作为y轴
plt.figure(figsize=(16,10),dpi=100) #设置图尺寸
plt.xticks(range(len(_x))[::3],_x[::3].astype(int)) #设置x轴长度,因为id太多,每隔3个进行显示
# plt.xticks(x,xtk,size=12,rotation=50) #设置字体大小和字体倾斜度
#在折线上显示每个id及对应值
for x,y in zip(range(len(_x)),_y):
plt.text(x,y,str(_x[x])+","+str(y),ha='center',size=6)
plt.xlabel('shop id',size=15)
plt.ylabel('footfall of every shop',size=15)
plt.title('work[1]:footfall on Oct',size=20)
plt.plot(range(len(_x)),_y)
plt.grid()
plt.show() #显示图表
#--------------------------------------------------------------------------------------------------#
#-------------------------------绘制每类商家10月份的日平均客流量折线图----------------------------------#
"""
# 对客流量按类别归类,取每类平均
footfall_mean=data_onMouth_10['pay_num'].groupby(data_onMouth_10['cate_2_name']).mean()
# 绘制折线图
footfall_mean.plot(kind='line')
# 设置标签及标题
plt.xlabel('cate_2_name',size=15)
plt.ylabel('the mean of footfall',size=15)
plt.title('work[2]:the mean of footfall of every type on Oct',size=20)
plt.show()
"""
#-------------------------------------------------------------------------------------------------#
#-----------------------------选择一个商家,统计每月的总客流量,绘制柱状图--------------------------------#
"""
shop14_data=shop_data[shop_data.shop_id==14] # 取出id为14的商家
# 对客流量按月份归类求和
shop14_data=shop14_data['pay_num'].groupby(shop14_data.index.month).sum()
# 绘图
_x=shop14_data.index
_y=shop14_data.values
plt.figure(figsize=(16,10),dpi=80) #尺寸
plt.bar(range(len(_x)),_y) #绘制柱状图
# 添加数值
for x,y in zip(range(len(_x)),_y):
plt.text(x,y+5,str(y),ha='center',size=12)
plt.xticks(range(len(_x)),_x)
plt.yticks(range(max(_y)+50)[::150]) #设置y轴间隔为150
plt.xlabel("MONTH",size=15)
plt.ylabel("total footfall",size=15)
plt.title("NO.14 Shop Footfall Every Month",size=20)
plt.show()
"""
#-------------------------------------------------------------------------------------------------#
#----------------------------------选择一个商家,绘制客流量直方图--------------------------------------#
"""
shop14_data=shop_data[shop_data.shop_id==14]['pay_num'] #筛选出id14的客流量数据
_x=shop14_data.index
_y=shop14_data.values
#画图
plt.figure(figsize=(16,10),dpi=80)
plt.bar(range(len(_x)),_y)
plt.xticks(range(len(_x))[::30],_x[::30].astype(str)) #日期数量太多,每隔30个显示
plt.xlabel("TIME",size=15)
plt.ylabel("FOOTFALL",size=15)
plt.title("NO.14 Shop Footfall",size=20)
plt.show()
"""
#-------------------------------------------------------------------------------------------------#
#----------------------------------选择一个商家,绘制客流量密度图-------------------------------------#
"""
shop14_data=shop_data[shop_data.shop_id==14]['pay_num'] #筛选出id14的客流量数据
shop14_data.plot(x='SHOP 14',kind="kde") #绘制密度图
plt.show()
"""
#-------------------------------------------------------------------------------------------------#
#------------------统计某个月各个类别商店总客流量占该月总客流量的比例,绘制饼图-----------------------------#
footfall_class=data_onMouth_10['pay_num'].groupby(data_onMouth_10['cate_2_name']).sum()
footfall_rate=footfall_class/footfall_class.sum()
footfall_rate.plot(kind='pie')
plt.title("Different Type Shop Footfall Rate",size=20)
plt.show()
#-------------------------------------------------------------------------------------------------#
|
[
"873217631@qq.com"
] |
873217631@qq.com
|
8de25b31dd7c1c42830a58cf0adb9de57891e6b8
|
bd7109357c6db56ed1bc3a0d29e85d7d96b12a49
|
/feature_extraction.py
|
dcf91ae25f005ffe0a69f08201fa91a7be225869
|
[] |
no_license
|
Siraj2602/image_to_paragraph
|
f396e8c62b8c4186eb29e867281f5e3e41ea08d6
|
57be509659a0fd0ef5e01b7a653eb93757cbbde6
|
refs/heads/master
| 2020-06-17T01:55:48.748688
| 2019-07-08T07:44:06
| 2019-07-08T07:44:06
| 195,761,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,777
|
py
|
from os import listdir
from pickle import dump
from keras.applications.inception_v3 import InceptionV3
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.inception_v3 import preprocess_input
from keras.models import Model
import string
from timeit import default_timer as timer
start = timer()
# extract features from each photo in the directory
def extract_features(directory):
# load the model
model = InceptionV3()
# re-structure the model
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
# summarize
print(model.summary())
# extract features from each photo
features = dict()
i = 0
for name in listdir(directory):
# load an image from file
i+=1
print("image number : ",i)
filename = directory + '/' + name
image = load_img(filename, target_size=(299, 299))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# get features
feature = model.predict(image, verbose=0)
# get image id
image_id = name.split('.')[0]
# store feature
features[image_id] = feature
print('>%s' % name)
return features
# extract features from all images
directory = 'im2p_train'
features = extract_features(directory)
print('Extracted Features: %d' % len(features))
# save to file
dump(features, open('features.pkl', 'wb'))
print(timer() - start)
|
[
"noreply@github.com"
] |
Siraj2602.noreply@github.com
|
8734025eab0daa7c9219611efee7b822e43d17a0
|
eef6a248944ec6be4a6bde967201d58cb79d55c0
|
/FDImage.py
|
542c2d1f4dd2052302acb979961b54cfa13f42fc
|
[] |
no_license
|
PengLei-Adam/FD_Adaboost
|
2f270a88ea5124b5baa0db53128901d5d7f11102
|
078cac954105714b1becf70c2bccc5b46abcf4e5
|
refs/heads/master
| 2020-07-26T19:00:57.394147
| 2016-11-20T08:09:07
| 2016-11-20T08:09:07
| 73,717,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,980
|
py
|
# -*- coding: utf-8 -*-
"""
Image Object for image processing based on numpy as the data matrix.
Functions :
Convert source image to integral image.
Created on Mon Nov 14 21:59:22 2016
@author: Peng Lei
"""
import numpy as np
import cv2
class FDImage:
def __init__(self, img):
if isinstance(img, basestring):
self.readFile(img)
elif isinstance(img, np.ndarray):
self.readData(img)
else:
print 'Error type for initiation'
def readFile(self, img_path):
img = cv2.imread(img_path)
"""
if img null :
throws ...
"""
self.data = img
if img.ndim == 2:
self.height, self.width = img.shape
self.channels = 1
elif img.ndim == 3:
self.height, self.width, self.channels = img.shape
def readData(self, img):
self.data = img;
if img.ndim == 2:
self.height, self.width = img.shape
self.channels = 1
elif img.ndim == 3:
self.height, self.width, self.channels = img.shape
def cvtIntegral(self):
integral = np.zeros((self.height, self.width))
imgGray = self.cvtGray()
s = 0
for i in range(self.width):
s += imgGray[0, i]
integral[0, i] = s
for j in range(1, self.height):
s = 0
for i in range(self.width):
s += imgGray[j, i]
integral[j, i] = integral[j - 1, i] + s
return integral
def cvtGray(self):
if self.channels == 1:
return self.data
elif self.channels == 3:
temp = np.zeros((self.height, self.width))
for i in range(self.height):
for j in range(self.width):
bgr = self.data[i,j]
temp[i, j] = bgr[0]*0.114 + bgr[1]*0.587 + bgr[2]*0.299
return temp
|
[
"ftgh2003@126.com"
] |
ftgh2003@126.com
|
2d33a0e6a6d25870fa3f43b045afab66c08ac987
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02582/s221094084.py
|
d5bd0a88d18d43d3a874b7f28a7fcdedb3227cbf
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
s = input()
ans = 0
strek = 0
for i in range(3):
if s[i] == 'R':
tmp = "R"
strek += 1
ans = max(strek, ans)
else:
strek = 0
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9a494befbb53a4cc9524ec697d3bc181766a9355
|
8316c4457984c46aa1bea608b3514731a2beb7b5
|
/news/migrations/0002_auto_20151029_1651.py
|
076a0578a2448c7cee723dab88ae67d71af44e93
|
[] |
no_license
|
ssalam1/viewmusic
|
11ba245af7d4648d14692cf7444fc9983a083646
|
da2c2465e2384990a721fb6248ce8a30b008c8ab
|
refs/heads/master
| 2021-01-10T03:43:00.566887
| 2015-11-01T18:41:59
| 2015-11-01T18:41:59
| 45,352,699
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='album',
name='name',
field=models.CharField(max_length=50, verbose_name=b'album', db_tablespace=b'something', db_index=True),
),
migrations.AlterField(
model_name='artist',
name='name',
field=models.CharField(max_length=50, db_tablespace=b'indexes', db_index=True),
),
]
|
[
"ssalam@localhost.localdomain"
] |
ssalam@localhost.localdomain
|
419b6708511d0c32fe52f52506b90123b3adbb71
|
10a18a1d5ef8d62200acdcee0fe766cb8f71f96e
|
/Oedipus/utils/graphics.py
|
680150007330448992cf9d2436eaf6d83270b465
|
[
"Apache-2.0"
] |
permissive
|
ronhab/Oedipus
|
0858b0b3c8fab8984d74f642f922e598c43f9994
|
abe2848643bbf4cb686836feb5b332cae31e67e0
|
refs/heads/master
| 2020-03-25T07:24:13.980063
| 2018-08-15T21:44:15
| 2018-08-15T21:44:15
| 143,558,746
| 0
| 0
| null | 2018-08-04T19:50:26
| 2018-08-04T19:50:26
| null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
#!/usr/bin/python
###################
# Library Imports #
###################
from Oedipus.utils.misc import *
import time
######################
# Defining variables #
######################
# Gray, Red, Green, Yellow, Blue, Magenta, Cyan, White, Crimson
colorIndex = [ "30", "31", "32", "33", "34", "35", "36", "37", "38" ]
####################
# Defining Methods #
####################
def prettyPrint(msg, mode="info"):
""" Pretty prints a colored message. "info": Green, "error": Red, "warning": Yellow, "info2": Blue, "output": Magenta, "debug": White """
if mode == "info":
color = "32" # Green
elif mode == "error":
color = "31" # Red
elif mode == "warning":
color = "33" # Yellow
elif mode == "info2":
color = "34" # Blue
elif mode == "output":
color = "35" # Magenta
elif mode == "debug":
color = "37" # White
else:
color = "32"
msg = "[*] %s. %s" % (msg, getTimestamp())
print("\033[1;%sm%s\n%s\033[1;m" % (color, msg, '-'*len(msg)))
|
[
"salem@in.tum.de"
] |
salem@in.tum.de
|
45fe16babb559ede512954ceed7cc060f672490a
|
3f23e73376673c8a7e62d592a2317734ebd8852a
|
/gcloud/taskflow3/migrations/0014_auto_20210319_1757.py
|
4dd1264ac1d16028c87d99223a66e3f0eb46d744
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
AmosLeee/bk-sops
|
cfd07820cff08d48c358822c3b274353ec750351
|
4e6291771c77d8e51632c8485dbeca812a85b3e0
|
refs/heads/V3.6.X
| 2023-05-29T18:22:05.749290
| 2021-06-18T07:50:56
| 2021-06-18T07:50:56
| 361,690,448
| 0
| 0
|
NOASSERTION
| 2021-05-08T02:35:03
| 2021-04-26T09:23:35
| null |
UTF-8
|
Python
| false
| false
| 972
|
py
|
# Generated by Django 2.2.6 on 2021-03-19 09:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("taskflow3", "0013_auto_20210125_1943"),
]
operations = [
migrations.AlterField(
model_name="taskflowinstance",
name="category",
field=models.CharField(
choices=[
("OpsTools", "运维工具"),
("MonitorAlarm", "监控告警"),
("ConfManage", "配置管理"),
("DevTools", "开发工具"),
("EnterpriseIT", "企业IT"),
("OfficeApp", "办公应用"),
("Other", "其它"),
("Default", "默认分类"),
],
default="Default",
max_length=255,
verbose_name="任务类型,继承自模板",
),
),
]
|
[
"1158341873@qq.com"
] |
1158341873@qq.com
|
6fcf1097b8c7d5af9dfb84ccfbfbfc704e1a9809
|
deb38059226b0a4b4d9ef258418d58a84a6720c2
|
/Python3/Lecture_03_Functions/program.py
|
70d52849a503a2a0c7a3134192dbc728d755baea
|
[] |
no_license
|
atanas-bg/SoftUni
|
38745ac8d76f347ce5d7938d7a24f4fc94f8ae9c
|
d22a0f56d49d0a8092c2cf04fc98db3dc83b7eb0
|
refs/heads/master
| 2021-05-04T11:07:38.963942
| 2017-02-17T11:56:23
| 2017-02-17T11:56:23
| 50,620,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
# import functions from other python files
from demo_package import calling_functions
from demo_package.demo_functions import div_mod
import pytz
print(calling_functions.convert_fahrenheit_to_celsius(10))
print(div_mod(5, 2))
|
[
"atanas.v.atanasov@gmail.com"
] |
atanas.v.atanasov@gmail.com
|
eb84367a388c566b2ffcd837573927cf79008d5e
|
cd1026dc0c93e7c6ae244794a542ccf3e6424516
|
/file_upload/backadmin/apps.py
|
e1451b1e2ced2979da77345815eeca29b5038a59
|
[
"CC-BY-4.0",
"Apache-2.0",
"LicenseRef-scancode-other-permissive"
] |
permissive
|
Skeletrox/usb-backend-pinut
|
6a6990acb924c940c2dca9f9c842264391bc89d8
|
9d1be58058d283ed8daab8c92bcd1323adcb83a0
|
refs/heads/master
| 2021-06-05T07:16:15.976640
| 2020-03-16T22:06:08
| 2020-03-16T22:06:08
| 95,878,061
| 0
| 1
| null | 2017-08-15T11:29:22
| 2017-06-30T10:35:58
|
CSS
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class BackadminConfig(AppConfig):
name = 'backadmin'
|
[
"android.svr@gmail.com"
] |
android.svr@gmail.com
|
e3cdba3a9fb8ebdc8badc36483e3f79339519048
|
7bef33c18b71e428ef78fdfaa5621a05bc023f2b
|
/paco/__init__.py
|
53217c2272226639a3245e5172cfd38406174bbc
|
[
"MIT"
] |
permissive
|
strongbugman/paco
|
a3ab69ba249a465c198fa6f66197a68a3e7ae534
|
2358e3130b882e20ab89660122fc31cd6ba5a912
|
refs/heads/master
| 2021-01-24T11:45:23.431346
| 2018-02-06T15:25:23
| 2018-02-06T15:25:23
| 123,099,089
| 1
| 0
| null | 2018-02-27T08:40:57
| 2018-02-27T08:40:56
| null |
UTF-8
|
Python
| false
| false
| 1,518
|
py
|
from .map import map
from .run import run
from .each import each
from .some import some
from .race import race
from .once import once
from .wait import wait
from .curry import curry
from .wraps import wraps
from .apply import apply
from .defer import defer
from .every import every
from .until import until
from .times import times
from .thunk import thunk
from .gather import gather
from .repeat import repeat
from .filter import filter
from .filterfalse import filterfalse
from .reduce import reduce
from .whilst import whilst
from .series import series
from .partial import partial
from .timeout import timeout, TimeoutLimit
from .compose import compose
from .interval import interval
from .flat_map import flat_map
from .constant import constant, identity
from .throttle import throttle
from .dropwhile import dropwhile
from .concurrent import ConcurrentExecutor, concurrent
__author__ = 'Tomas Aparicio'
__license__ = 'MIT'
# Current package version
__version__ = '0.2.0'
# Explicit symbols to export
__all__ = (
'ConcurrentExecutor',
'apply',
'compose',
'concurrent',
'constant',
'curry',
'defer',
'dropwhile',
'each',
'every',
'filter',
'filterfalse',
'flat_map',
'gather',
'identity',
'interval',
'map',
'once',
'partial',
'race',
'reduce',
'repeat',
'run',
'series',
'some',
'throttle',
'thunk',
'timeout',
'TimeoutLimit',
'times',
'until',
'wait',
'whilst',
'wraps',
)
|
[
"tomas@aparicio.me"
] |
tomas@aparicio.me
|
b1db45abc934d873b5f7b170f521e93e5cc96acc
|
bbe49335f58e91632d1f062b559f94034d5faa04
|
/test.py
|
d5e2a848a7eacc6fa5f8409185950cd7e6589935
|
[] |
no_license
|
lemquan/telemetry
|
5ced3c140d86f39da160fd72a4d332cf52e8ee62
|
032510d0ff0d5e1b7ec76d247e71b900923ccf16
|
refs/heads/master
| 2021-01-01T03:35:17.460985
| 2016-04-20T16:34:45
| 2016-04-20T16:34:45
| 56,703,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,173
|
py
|
import pandas as pd
import numpy as np
import sys
import time
import pickle
from sklearn import cross_validation, linear_model, metrics, ensemble, grid_search, svm, decomposition
from scipy import interp
from pprint import pprint
if sys.platform == 'darwin':
import matplotlib as mil
mil.use('TkAgg')
import matplotlib.pyplot as plt
plot_on = True
print "Running OS X"
elif sys.platform == 'linux' or sys.platform == 'linux2':
print "Running Linux. Plots are saved."
import matplotlib as mil
mil.use('Agg')
import matplotlib.pyplot as plt
plot_on = False
def timeit(func):
def timed(*args, **kwargs):
ts = time.time()
res = func(*args, **kwargs)
te = time.time()
print '%r (%r, %r) %f sec' % \
(func.__name__, args, kwargs, te-ts)
return res
return timed
def load(d):
df = pd.read_csv('data/final.csv', index_col=0)
df = df.sample(frac=1)
y = df[['health']].as_matrix().reshape(-1)
df = df.drop('health', 1)
features = list(df)
if d == 1:
df = df.drop(['ThrottledPacketsReceived', 'CRCErrors', 'SecondsSinceLastClearCounters', \
'OutputQueueDrops', 'OutputUnderruns', 'InputErrors', 'AvailabilityFlag', \
'InputDrops', 'OutputDrops', 'OutputBuffersSwappedOut', 'Resets', 'InputAborts', \
'GiantPacketsReceived', 'FramingErrorsReceived', 'OutputBufferFailures', \
'CarrierTransitions', 'RuntPacketsReceived', 'InputQueueDrops', 'InputOverruns', \
'OutputErrors', 'Applique', 'InputIgnoredPackets', 'MulticastPacketsSent', \
'MulticastPacketsReceived', 'ParityPacketsReceived', 'UnknownProtocolPacketsReceived', \
'PacketsReceived', 'PacketsSent', 'BytesReceived', 'BytesSent', \
'delta.PacketsReceived', 'delta.PacketsSent', 'delta.BytesSent', \
'delta.BytesReceived', 'epoch_time', 'LastDataTime', 'LastDiscontinuityTime',\
'ip', 'hostname'], 1)
features = list(df)
# pprint (list(df))
X = df.as_matrix()
svd = decomposition.TruncatedSVD(n_components=17, random_state=55)
trans_X = svd.fit_transform(X)
# plotting the decomposed and original data
# pos_idx = np.where(y == 1)
# neg_idx = np.where(y == 0)
#
# X_pos = trans_X[pos_idx]
# X_neg = trans_X[neg_idx]
#
# f, (ax1, ax2) = plt.subplots(1, 2, figsize=(25,25))
# ax1.plot(X[pos_idx][:,18], X[pos_idx][:,14], 'r+')
# ax1.plot(X[neg_idx][:,18], X[neg_idx][:,14], 'go')
# ax1.set_title('Original Data')
# ax2.plot( X_pos[:,0], X_pos[:,11], 'r+', label='positive')
# ax2.plot(X_neg[:,0], X_neg[:,11], 'go', label='negative')
# ax2.set_title('Dimensionality Reduced')
# plt.show()
#f.savefig('norm_reduced.png')
return trans_X, y, features
def split(X, y):
# split the data set into 80/20
X_train, X_test, y_train, y_test = cross_validation.train_test_split \
(X, y, test_size=0.33, random_state=42)
p = np.where(y_test == 1)
n = np.where(y_test == 0)
p2 = np.where(y_train == 1)
n2 = np.where(y_train == 0)
print 'train', 'pos:', len(p2[0]), 'neg:', len(n2[0]), 'size', X_train.shape
print 'test', 'pos:', len(p[0]), 'neg:', len(n[0]), 'size', X_test.shape
return X_train, X_test, y_train, y_test
def find_feat_importance(X_train, y_train, X_test, y_test, features):
print 'finding important features using random forest....'
clf = ensemble.RandomForestClassifier(n_estimators=700, max_features='log2', criterion='entropy', random_state=45)
clf = clf.fit(X_train, y_train)
print metrics.classification_report(y_test, clf.predict(X_test))
# plot the important features
f = 100. * (clf.feature_importances_ / clf.feature_importances_.max())
sorted_idx = np.argsort(f)
pos = np.arange(sorted_idx.shape[0]) + 0.5
plt.figure(figsize=(16, 12))
plt.barh(pos, f[sorted_idx], align='center')
plt.yticks(pos, np.asanyarray(features)[sorted_idx])
plt.title('Important Features')
plt.savefig('feature_importances_3.png')
if plot_on == True:
plt.show()
def create_model(X_train, y_train, model='log_reg'):
if model == 'random_forest':
print 'creating the random forest model....'
clf = ensemble.RandomForestClassifier(random_state=45)
params = {'n_estimators': [10, 100, 500, 800], 'criterion':['gini', 'entropy']}
elif model == 'svm':
print 'creating svm...'
clf = svm.SVC(verbose=1)
params = {'kernel':['rbf'], 'C': [0.01, 1, 1.5]}
else:
# default to logistic regression
print 'creating the log reg model....'
clf = linear_model.LogisticRegression(random_state=45, n_jobs=-1)
params = {'C': np.logspace(0.001, 1.5, 40)}
# parameter search
print 'running grid search....'
scoring = None
if model is not 'svm':
scoring = 'f1'
grid = grid_search.GridSearchCV(estimator=clf, param_grid=params, cv=5, scoring=scoring) # score='f1'
grid.fit(X_train, y_train)
print 'best estimator parameters:'
print grid.best_estimator_
return grid.best_estimator_
@timeit
def train_model(X, y, X_test, y_test, clf, folds=5):
strat_k_fold = cross_validation.StratifiedKFold(y, n_folds=folds, shuffle=True, random_state=45)
y_hats = []
for train_idx, valid_idx in strat_k_fold:
X_train, X_valid = X[train_idx], X[valid_idx]
y_train = y[train_idx]
clf = clf.fit(X_train, y_train)
y_hats.append((y[valid_idx], clf.predict(X_valid)))
# assess the accuracy of validation
mean_tpr = 0.0
mean_fpr = np.linspace(0,1, 100)
all_tpr = []
fig = plt.figure(figsize=(10, 8))
for i, (y_valid, y_hat) in enumerate(y_hats):
print 'Accuracy for Fold', i
print metrics.classification_report(y_valid, y_hat)
# plot the ROC curve
fpr, tpr, thresholds = metrics.roc_curve(y_valid, y_hat)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC Fold %d (area = %0.02f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color='0.75', label='Random Guess')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC')
plt.legend(loc="lower right")
plt.savefig('roc_svm_f1.png')
if plot_on == True:
plt.show()
# predict on test
y_test_preds = clf.predict(X_test)
print 'accuracy for test:'
print metrics.classification_report(y_test, y_test_preds)
if __name__ == '__main__':
# find the important features
X, y, features = load(d=1)
#X_train, X_test, y_train, y_test = split(X,y)
#find_feat_importance(X_train, y_train, X_test, y_test, features)
#create_model(X_train, y_train, model='log_reg')
#clf = create_model(X_train, y_train, model='svm')
#train_model(X_train, y_train, X_test, y_test, clf)
|
[
"le.m.quan@gmail.com"
] |
le.m.quan@gmail.com
|
65e2c31fd6e5b5f7cb0a8312d0b149d2474900c4
|
c1aa240e35a7a8355a7ca9dcd546bdb6446b8509
|
/venv/Scripts/easy_install-script.py
|
5bdae166e18d1f797e8c10ec64131f06edaaccc7
|
[] |
no_license
|
LGSW/Algorithms
|
a713685c737b7f7578704cd9c859e98a411e0a59
|
1cdf9caf74299c36735069fef41e8e95f4ed68fc
|
refs/heads/master
| 2020-03-24T22:21:58.025620
| 2018-08-12T03:23:28
| 2018-08-12T03:23:28
| 143,058,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
#!C:\Users\cxu29\PycharmProjects\Algorithm\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.0.1','console_scripts','easy_install'
__requires__ = 'setuptools==39.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.0.1', 'console_scripts', 'easy_install')()
)
|
[
"41303602+LGSW@users.noreply.github.com"
] |
41303602+LGSW@users.noreply.github.com
|
1cd6e2ba2ad679213fc83e9dc4ab4cdf8ce01f01
|
a80963fbac8c0edcef5b1b9bad67a4b5913cd569
|
/itertools/itertools_permutations.py
|
3fb99df8b7a0b85c63513bce70fb251b2364cd9d
|
[] |
no_license
|
manurp/Python_programs
|
946877caf93a2ff0239c68dc4e8e02c72fe6f156
|
1a0c896b05b72afee7a48dd1bc2bef2aa7ffe0af
|
refs/heads/master
| 2020-06-28T01:22:59.140092
| 2018-09-01T17:03:18
| 2018-09-01T17:03:18
| 97,080,367
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
from itertools import permutations
s,n = input().split()
print(*[''.join(i) for i in permutations(sorted(s),int(n))],sep='\n')
#Alternate codes
# from itertools import permutations
# li = input().split()
# strli = list(li[0])
# strli.sort()
# for i in permutations(strli,int(li[1])):
# for j in i:
# print(j,end='')
# print()
|
[
"manojrpoojary@gmail.com"
] |
manojrpoojary@gmail.com
|
a156b7b94128a599bffab93242e0db94981fcbee
|
03402505d3db3ab7f17457989b6d3e7f606ad3e6
|
/check_divisiblity.py
|
9271c903afe369d70956217221d79b556e96b51f
|
[] |
no_license
|
arunatuoh/assignment-code
|
f1741c1c4e7651727eb3e0af841dccbffb89e8ef
|
33aa3af0ba0fa58664498dfa684dac90ee8e506a
|
refs/heads/master
| 2020-07-27T11:45:53.397205
| 2019-09-17T14:54:59
| 2019-09-17T14:54:59
| 209,079,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
def check_divisiblity(a, b):
if a % b == 0:
print("a is divisible by b")
else:
print("a is not divisibla by b")
check_divisiblity(2, 4)
|
[
"barunsingh31@gmail.com"
] |
barunsingh31@gmail.com
|
2ecbcabfa0207fe708c054e99ac7400b259f0576
|
d335cedebcce56deed2105514fb5ef225301a6be
|
/tensorflow_v1/01_-_Introduction_to_Tensorflow/02_-_Tensor_operations.py
|
53ca05e60c03b5ba6b38db8b8b8faa33e5652038
|
[
"MIT"
] |
permissive
|
vkozyk/deeplearningtutorial
|
372511aa17d94464a2a5b213beffc48f312a49fd
|
f020567c91f199c03f811e1e4f5173e5c97c8fa9
|
refs/heads/master
| 2023-01-23T11:17:00.427202
| 2019-05-31T04:45:54
| 2019-05-31T04:45:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
import tensorflow as tf
g = tf.Graph()
with g.as_default():
x = tf.placeholder(tf.float32, [2, 3], 'x') #float matrix of 2 rows and 3 columns
#Get the shape of a tensor
shape = tf.shape(x)
#Get a sub-tensor
subtensor = x[1, 1:]
#Concatenate two tensors together
concat = tf.concat([ x, x ], axis=1)
#Reshape a tensor
reshape = tf.reshape(x, [6, 1])
#Tile a tensor
tile = tf.tile(x, [2, 2])
g.finalize()
with tf.Session() as s:
[ result_shape, result_subtensor, result_concat, result_reshape, result_tile ] = s.run([ shape, subtensor, concat, reshape, tile ], { x: [ [ 1.0, 2.0, 3.0 ], [ 4.0, 5.0, 6.0 ] ] })
print('shape')
print(result_shape)
print()
print('sub-tensor')
print(result_subtensor)
print()
print('concatenate')
print(result_concat)
print()
print('reshape')
print(result_reshape)
print()
print('tile')
print(result_tile)
|
[
"marctanti@gmail.com"
] |
marctanti@gmail.com
|
ca9abec9b8b8df24ca336063d5984fce8ce2253b
|
bad62c2b0dfad33197db55b44efeec0bab405634
|
/sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2019_01_01/aio/_configuration.py
|
3206a8df47a52fd344cf2c17f5f8a42f0082e9b4
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
test-repo-billy/azure-sdk-for-python
|
20c5a2486456e02456de17515704cb064ff19833
|
cece86a8548cb5f575e5419864d631673be0a244
|
refs/heads/master
| 2022-10-25T02:28:39.022559
| 2022-10-18T06:05:46
| 2022-10-18T06:05:46
| 182,325,031
| 0
| 0
|
MIT
| 2019-07-25T22:28:52
| 2019-04-19T20:59:15
|
Python
|
UTF-8
|
Python
| false
| false
| 3,513
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for PolicyClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2019-01-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(PolicyClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2019-01-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-resource/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
[
"noreply@github.com"
] |
test-repo-billy.noreply@github.com
|
813d7a9fca333f388a964b23cdee31a7552c8682
|
5f9226f4523aec8e769e753fb3bff61cb2e4209d
|
/examples/cm360_report_replicate_example.py
|
9f0b3ef9c33b91ef1ff37eae6e5a030bf250034e
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Ressmann/starthinker
|
813c699c9d5fa6bce0380009be07b36dc8629cc7
|
301c5cf17e382afee346871974ca2f4ae905a94a
|
refs/heads/master
| 2023-08-30T21:10:34.748144
| 2021-10-06T14:01:26
| 2021-10-06T14:02:12
| 299,899,333
| 0
| 0
|
Apache-2.0
| 2020-09-30T11:38:27
| 2020-09-30T11:38:27
| null |
UTF-8
|
Python
| false
| false
| 4,971
|
py
|
###########################################################################
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
#
# This code generated (see scripts folder for possible source):
# - Command: "python starthinker_ui/manage.py example"
#
###########################################################################
import argparse
import textwrap
from starthinker.util.configuration import Configuration
from starthinker.task.drive.run import drive
from starthinker.task.dataset.run import dataset
from starthinker.task.cm_report_replicate.run import cm_report_replicate
def recipe_cm360_report_replicate(config, auth_read, recipe_name, auth_write, account, recipe_slug, report_id, report_name, delete, Aggregate):
"""Replicate a report across multiple networks and advertisers.
Args:
auth_read (authentication) - Credentials used for reading data.
recipe_name (string) - Sheet to read ids from.
auth_write (authentication) - Credentials used for writing data.
account (integer) - CM network id.
recipe_slug (string) - NA
report_id (integer) - CM template report id, for template
report_name (string) - CM template report name, empty if using id instead.
delete (boolean) - Use only to reset the reports if setup changes.
Aggregate (boolean) - Append report data to existing table, requires Date column.
"""
drive(config, {
'auth':'user',
'copy':{
'source':'https://docs.google.com/spreadsheets/d/1Su3t2YUWV_GG9RD63Wa3GNANmQZswTHstFY6aDPm6qE/',
'destination':recipe_name
}
})
dataset(config, {
'auth':auth_write,
'dataset':recipe_slug
})
cm_report_replicate(config, {
'auth':auth_read,
'report':{
'account':account,
'id':report_id,
'name':report_name,
'delete':delete
},
'replicate':{
'sheets':{
'sheet':recipe_name,
'tab':'Accounts',
'range':''
}
},
'write':{
'bigquery':{
'dataset':recipe_slug,
'is_incremental_load':Aggregate
}
}
})
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""
Replicate a report across multiple networks and advertisers.
1. Provide the name or ID of an existing report.
2. Run the recipe once to generate the input sheet called .
3. Enter network and advertiser ids to replicate the report.
4. Data will be written to BigQuery > > > _All
"""))
parser.add_argument("-project", help="Cloud ID of Google Cloud Project.", default=None)
parser.add_argument("-key", help="API Key of Google Cloud Project.", default=None)
parser.add_argument("-client", help="Path to CLIENT credentials json file.", default=None)
parser.add_argument("-user", help="Path to USER credentials json file.", default=None)
parser.add_argument("-service", help="Path to SERVICE credentials json file.", default=None)
parser.add_argument("-verbose", help="Print all the steps as they happen.", action="store_true")
parser.add_argument("-auth_read", help="Credentials used for reading data.", default='user')
parser.add_argument("-recipe_name", help="Sheet to read ids from.", default='')
parser.add_argument("-auth_write", help="Credentials used for writing data.", default='service')
parser.add_argument("-account", help="CM network id.", default='')
parser.add_argument("-recipe_slug", help="", default='')
parser.add_argument("-report_id", help="CM template report id, for template", default='')
parser.add_argument("-report_name", help="CM template report name, empty if using id instead.", default='')
parser.add_argument("-delete", help="Use only to reset the reports if setup changes.", default=False)
parser.add_argument("-Aggregate", help="Append report data to existing table, requires Date column.", default=False)
args = parser.parse_args()
config = Configuration(
project=args.project,
user=args.user,
service=args.service,
client=args.client,
key=args.key,
verbose=args.verbose
)
recipe_cm360_report_replicate(config, args.auth_read, args.recipe_name, args.auth_write, args.account, args.recipe_slug, args.report_id, args.report_name, args.delete, args.Aggregate)
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
8ba161f28f3d05e9d6a6981f3eee775b45379723
|
1a2d2d539ce0471483ae3f903636eb28697ce7ec
|
/KEDD/cowbull.py
|
09396032a658ae14351dde7336a7a84b84529123
|
[] |
no_license
|
Lovi96/ccstuff
|
84bdc7ca8426d183a99b1917a0076b212c230772
|
eddf002c5bff9173314787a2421b9e6edee42a54
|
refs/heads/master
| 2021-01-11T04:27:54.420495
| 2017-01-11T09:27:51
| 2017-01-11T09:27:51
| 71,187,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import random
random_number = random.randint(1000, 9999)
print(random_number)
random_number = str(random_number)
while True:
cow = 0
bull = 0
user_number = (input("agyál számt"))
for num in range(0, 4):
if user_number[num] in random_number:
cow += 1
if random_number[num] == user_number[num]:
cow -= 1
bull += 1
print(cow, "cow")
print(bull, "bull")
if bull >= 4:
print("Nyertél!")
break
|
[
"m.lovacsi@gmail.com"
] |
m.lovacsi@gmail.com
|
efe2f47e0cbd7bf9a4ddb51d94bf165baf8b4479
|
e731fe303e0dd8378656820d5b9abfbc3f8a4250
|
/python/object_generate_presigned_url.py
|
3f6e1adabefc862aaf612da6ffeb7b2f7bc115cb
|
[] |
no_license
|
rgtech3/101-AWS-S3-Hacks
|
800e9c424cd8447a6917a97669b94e73488ddfde
|
ad278809e753c600c43a01c0522990b4e0d9824e
|
refs/heads/master
| 2021-09-26T17:29:41.791624
| 2018-10-31T20:06:01
| 2018-10-31T20:06:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
#!/usr/bin/python
"""
- Hack : Generate a presigned url
- AWS CLI: There is no CLI
"""
import boto3
if __name__ == "__main__":
client = boto3.client('s3', region_name="us-west-2")
bucketname = "us-west-2.nag"
post_url = client.generate_presigned_url('get_object', {'Bucket': bucketname , 'Key':'hello1.txt' }, ExpiresIn=3600)
print "URL to test : ", post_url
|
[
"nmedida@netflix.com"
] |
nmedida@netflix.com
|
58c622d9bbbbdb37475e7b750bde4aac32636342
|
82306f5d80d6918a9063b4a39045e59b49465cce
|
/troep/x.py
|
4215dc32328932e587005d0fca39c534eb5ab827
|
[] |
no_license
|
khualu/proxem.bot
|
9a196cb0d8680d413998b387c10f2ab20dca9f6e
|
052ec6d2614bd9772dafe74e42c03c953645d777
|
refs/heads/master
| 2022-12-02T22:49:24.024992
| 2020-07-30T08:13:05
| 2020-07-30T08:13:05
| 283,710,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def root():
return 'lol'
@app.route('/hallo/<path:name>')
def hoi(name):
return 'hoi %s' % name
app.run(host='127.0.0.1', port=1337, debug=True)
|
[
"noreply@github.com"
] |
khualu.noreply@github.com
|
c57e987a9e4e517fc97762f5b73de6014b9f5d29
|
0e4dc82a94563dacb0c25d0d43fbcbe3def21f72
|
/233-Number-of-Digit-One/Python/Solution01.py
|
ab4675c5e08f8745a311412b307cb0aa36f04a8d
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
Eroica-cpp/LeetCode
|
3ce3b05b3098e8097c1090e2116b813efaadd2a3
|
07276bd11558f3d0e32bec768b09e886de145f9e
|
refs/heads/master
| 2021-06-20T05:41:30.506250
| 2017-03-16T05:17:39
| 2017-03-16T05:17:39
| 35,126,816
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,018
|
py
|
#!/usr/bin/python
"""
==============================================================================
Author: Tao Li (taoli@ucsd.edu)
Date: Jul 8, 2015
Question: 233-Number-of-Digit-One
Link: https://leetcode.com/problems/number-of-digit-one/
==============================================================================
Given an integer n, count the total number of digit 1 appearing in all
non-negative integers less than or equal to n.
For example:
Given n = 13,
Return 6, because digit 1 occurred in the following numbers: 1, 10, 11, 12, 13.
==============================================================================
Method: brute force
Time Complexity: Exp
Space Complexity: Exp
Note: OK but apparently "Memory Limit Exceeded"
==============================================================================
"""
class Solution:
# @param {integer} n
# @return {integer}
def countDigitOne(self, n):
stack = [str(i) for i in xrange(1,n+1)]
return "".join(stack).count("1")
|
[
"eroicacmcs@gmail.com"
] |
eroicacmcs@gmail.com
|
a9eaca2c01fc21aabd9883fb710f25dac24d52ff
|
cdbd49f88466f04dbaab01f80aacda541bf7532f
|
/setup.py
|
9398c3b10d19cfc196f522d076707c4e4ac323c0
|
[
"MIT"
] |
permissive
|
amit2014/qb
|
60a41a8d17c895e2e128d7559208f19a3c502ad8
|
0b2a86ecec7e7a7aedd19eaf0feed5f539d38046
|
refs/heads/master
| 2021-07-19T13:05:12.649941
| 2017-10-27T00:40:23
| 2017-10-27T00:40:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
import os
from setuptools import setup, find_packages, Command
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as output:
return output.read()
requirements = [
'scipy',
'numpy',
'wikipedia',
'nltk',
'scikit-learn',
'regex',
'fuzzywuzzy',
'py4j',
'python-Levenshtein',
'requests',
'click',
'pyfunctional',
'luigi',
'jinja2',
'progressbar2',
'boto3',
'pyhcl',
'pycrayon',
'matplotlib',
'tagme',
'spacy'
]
class DownloadCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import nltk
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
path = 'data/external/nltk_download_SUCCESS'
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write('Downloaded nltk: stopwords, pinkt, wordnet')
setup(
name='qb',
version='2.0.0',
description='Quiz Bowl AI system named QANTA',
license='MIT',
long_description=read('README.md'),
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
cmdclass={'download': DownloadCommand}
)
|
[
"ski.rodriguez@gmail.com"
] |
ski.rodriguez@gmail.com
|
a8d045bdee3518b366525860fa9cfff70c74894b
|
9ef6c1caeadda2ace8919480c69a1e92eaf4ed78
|
/data_analysis/reshaping.py
|
2e6e1962497fc6101f2964c3d3db2559b4decdd1
|
[] |
no_license
|
sambonuruddeen/TakenMind-Data-science-Internship
|
feb621d8664db46f339a903b3bf0a5107371f14b
|
b8a3e3fcf95a0f614c207821359706456b502c72
|
refs/heads/master
| 2020-09-23T05:02:19.747591
| 2019-12-03T15:32:48
| 2019-12-03T15:32:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 674
|
py
|
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
df1 = DataFrame(np.arange(8).reshape(2,4), index=pd.Index(['Uber','Grab'], name='cabs'), columns=pd.Index(['c1','c2','c3','c4'],name="attributes"))
print df1
stackdf1 = df1.stack()
print stackdf1
df1unstack = stackdf1.unstack()
print'unstack'
print df1unstack
df3 = stackdf1.unstack('cabs')
print df3
df4 = stackdf1.unstack('attributes')
print 'attributes'
print df4
#series
s1 = Series([5,10,15], index=['A','B','C'])
s2 = Series([15,20,25], index=['B','C','D'])
s3 = pd.concat([s1,s2],keys=['k1','k2'])
print s3
df = s3.unstack()
print df
print df.stack()
print df.stack(dropna=False)
|
[
"sambonuruddeen@gmail.com"
] |
sambonuruddeen@gmail.com
|
45dfa9017f465bde1711e5b6d100a3b5f9c631b1
|
497d9263a78146d3cf5fe5e53af8e83bd70c24ca
|
/API/OCRLY.py
|
e91e1ad4594d2d8d84b172330fe1e3ccaaa46f6d
|
[] |
no_license
|
Codingmace/Tobias
|
7dc8b63a9033417632ef9be478f00e1e84f965f4
|
9c17f34df2b69eeaeb42fd74e1d2d26063a589bd
|
refs/heads/main
| 2023-03-08T14:57:46.050468
| 2021-02-22T16:14:01
| 2021-02-22T16:14:01
| 337,618,415
| 1
| 1
| null | 2021-02-21T23:25:39
| 2021-02-10T04:40:41
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
import requests
from API.variables import rapidApiKey
def OImage2Text(imageUrl, filename):
url = "https://ocrly-image-to-text.p.rapidapi.com/"
querystring = {"imageurl": imageUrl,"filename": filename}
headers = {
'x-rapidapi-key': rapidApiKey,
'x-rapidapi-host': "ocrly-image-to-text.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers, params=querystring)
return response
|
[
"codingmace@gmail.com"
] |
codingmace@gmail.com
|
a6369c509d70147d94a60a37ee79f96c174fbfe8
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2224/60703/255283.py
|
e93e9f3b29b914560d846d75a99db210b13b0f95
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
strr = input()
max = int(strr)
length = len(strr)
list = []
for i in range(length):
list.append(strr[i])
for i in range(0,length):
for j in range(i+1,length):
temp = list.copy()
tempnum = temp[i]
temp[i] = temp[j]
temp[j] = tempnum
res = int("".join(temp))
if(res>max):
max = res
if(max==8263):
print(strr)
print(max)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
982f5de395f9bcfeb8dd2d31fcaf9e43d4fae2c6
|
4be2f92639419b492211c9b471949b37f456b0d7
|
/datos/datos_user.py
|
72a46b0d9d5d2c8eb1f0e650e47699d5312ef3ba
|
[] |
no_license
|
AlanHedz/crud-tkinter
|
717d908b74ceb45b9bf6ea08d552834b051e3fd3
|
c01a3704f5a1242045b1ead362f8ee2ae1aa088c
|
refs/heads/master
| 2020-06-14T12:24:10.135735
| 2016-11-29T01:50:44
| 2016-11-29T01:50:44
| 75,025,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
import sys
sys.path.append('./models')
from table import *
@db_session
def all_persons():
persons = db.select("SELECT * FROM persona")[:]
return persons
@db_session
def create_person(name, age):
Persona(name = name, age = age)
@db_session
def update_person(id_persona, name, age):
person = Persona[id_persona]
person.set(name = name, age = age)
@db_session
def delete_person(id_persona):
Persona[id_persona].delete()
|
[
"conejito.de.oro@hotmail.com"
] |
conejito.de.oro@hotmail.com
|
6954fcaa023b10672767da3151ef564cf6db90e3
|
dc4e3f408ed08a00f5a016eef1790a9d79a8e3c4
|
/class_count.py
|
523e7063f7e2baa3fd8b16a32a601f143e238864
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
dark-nova/twitch_rss
|
32881ea2e49c9dbcfa99a64dcd1f04cef8cffc31
|
bd338a30a450ff93c104b7eae0b67b8e3a4e9c93
|
refs/heads/master
| 2020-07-18T16:51:02.354647
| 2019-12-25T06:02:51
| 2019-12-25T06:02:51
| 206,279,012
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
import json
import sys
from collections import defaultdict
from html.parser import HTMLParser
classes = defaultdict(int)
class ClassCounter(HTMLParser):
def handle_starttag(self, tag, attrs):
for (attr, val) in attrs:
if attr == 'class':
classes[val] += 1
parser = ClassCounter()
try:
with open(sys.argv[1], 'r') as f:
parser.feed(f.read())
print(json.dumps(classes, indent = 4))
except Exception as e:
print(e)
|
[
"31264514+dark-nova@users.noreply.github.com"
] |
31264514+dark-nova@users.noreply.github.com
|
ab82c2b4dd7d571e278475129f71a924ef7ddd6f
|
b5d98dee2d1476ac6ff3f511f25a763adeeaf1c0
|
/manage.py
|
d10ae475c54d9a7e814ca09e5325e8db44ce1b6c
|
[] |
no_license
|
mungaihosea/innomed
|
166e9b2518092e42ef970be30c2fa9a174baf18f
|
744ab24c904550d3892b071e07fce051e8c696fc
|
refs/heads/master
| 2022-12-15T12:20:53.790563
| 2020-09-20T03:24:56
| 2020-09-20T03:24:56
| 296,910,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'innomed.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"mungaihosea@gmail.com"
] |
mungaihosea@gmail.com
|
e1fe9f2ea0ae8713a17150c8568ff987412ce618
|
5d7893168db1267aff32269dabfc80c449d0f778
|
/euler31.py
|
4eaa2d6c8dad9a656512af7ef593ed19febf3e27
|
[] |
no_license
|
bewakes/project-euler
|
3c89195e7d7e4411c160531dab727cbe5a8e9ce3
|
75fa890459a2d80dd6064e3867f4a0e1b6996c7a
|
refs/heads/master
| 2021-07-06T02:34:45.863000
| 2020-11-26T08:25:24
| 2020-11-26T08:25:24
| 73,823,676
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
# copied from thread
def nway(total, coins):
if not coins: return 0
c, coins = coins[0], coins[1:]
count = 0
if total%c==0: count+=1
for amount in xrange(0,total,c):
count+=nway(total-amount, coins)
return count
l = [1,2,5,10,20,50,100,200]
print nway(200, l)
|
[
"spirit_bibek@yahoo.com"
] |
spirit_bibek@yahoo.com
|
c36ed7efd1cfac3828d1b4eb8795b5b733c4e6e9
|
17b771514ea773b5d34d31576313a6294562c4c2
|
/nplm/v0/nplm.py
|
03b0aaf9e98a6b8f3e7fde5f8a167f9b889df75b
|
[] |
no_license
|
xuanhan863/neural_prob_lang_model
|
ce26353073078d1f2f13d645c21b3ffa83206402
|
dc594773448cb444a1631797855cc5c5e751de05
|
refs/heads/master
| 2020-12-24T19:13:04.387633
| 2015-07-28T05:54:11
| 2015-07-28T05:54:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,282
|
py
|
#!/usr/bin/env python
import cPickle as pickle
import gzip
import os
import sys
import time
import numpy
import theano
import theano.tensor as T
import theano.sparse as ssp
# basically a copy of http://deeplearning.net/tutorial/code/logistic_sgd.py
class LogisticRegression(object):
def __init__(self, input, n_in, n_out,
W=None, b=None,
print_internal_vars=False):
if W == None:
W = numpy.zeros((n_in, n_out), dtype=theano.config.floatX)
self.W = theano.shared(value=W, name='W', borrow=True)
if b == None:
b = numpy.zeros((n_out,), dtype=theano.config.floatX)
self.b = theano.shared(value=b, name='b', borrow=True)
linear_output = T.dot(input, self.W) + self.b
if print_internal_vars:
linear_output = theano.printing.Print('output pre softmax p_y_given_x')(linear_output)
p_y_given_x = T.nnet.softmax(linear_output)
if print_internal_vars:
p_y_given_x = theano.printing.Print('output softmaxed p_y_given_x')(p_y_given_x)
self.p_y_given_x = p_y_given_x
y_pred = T.argmax(self.p_y_given_x, axis=1)
if print_internal_vars:
y_pred = theano.printing.Print('output argmax y_pred')(y_pred)
self.y_pred = y_pred
self.params = [self.W, self.b]
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
y_pred = self.y_pred
if print_internal_vars:
y_pred= theano.printing.Print('errors y_pred')(y_pred)
return T.mean(T.neq(y_pred, y))
else:
raise NotImplementedError()
# basically a copy of Hidden layer from http://deeplearning.net/tutorial/code/mlp.py
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh, print_internal_vars=False):
self.input = input
if W == None:
# See Glorot & Bengio, 2010, "Understanding the difficulty of training deep feedforward neural networks"
W_weight_range = numpy.sqrt(6. / (n_in + n_out))
W = numpy.asarray(rng.uniform(
low=-W_weight_range,
high=W_weight_range,
size=(n_in, n_out)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
W *= 4
self.W = theano.shared(value=W, name='W', borrow=True)
if b == None:
b = numpy.zeros((n_out,), dtype=theano.config.floatX)
self.b = theano.shared(value=b, name='b', borrow=True)
linear_output = T.dot(input, self.W) + self.b
output = linear_output if activation is None else activation(linear_output)
if print_internal_vars:
output = theano.printing.Print('output of hidden layer')(output)
self.output = output
self.params = [self.W, self.b]
# a token embedding layer
# see Bengio et al, 2003, "A Neural Probabilistic Language Model"
class ProjectionLayer(object):
def __init__(self, rng, input, vocab_size, projection_dim, W=None,
print_internal_vars=False):
"""
:type vocab_size: int
:param vocab_size: |V|. W.shape = (|V|, d)
:type projection_dim: int
:param projection_dim: projection dimension, d. W.shape = (|V|, d)
"""
if W == None:
W = numpy.asarray(rng.uniform(low=-1, high=1, size=(vocab_size, projection_dim)),
dtype=theano.config.floatX)
self.W = theano.shared(value=W, name='W', borrow=True)
# input is a pair of indexes; w1 and w2
if print_internal_vars:
input = theano.printing.Print('input to projection layer')(input)
self.input = input
# # @aboSamoor's sparse matrix dot product
# # https://groups.google.com/forum/#!searchin/theano-users/one$20hot$20vector/theano-users/lobCNFMlMeA/hUoNUb270N4J
# data_flat = input.flatten()
# data_ones = T.cast(T.ones_like(data_flat), config.floatX)
# shape1 = T.as_tensor_variable([data_ones.shape[0], self.W.shape[0]])
# indptr = T.arange(data_ones.shape[0]+1)
# m1 = ssp.CSR(data_ones, data_flat, indptr, shape1)
# m2 = ssp.dot(m1, self.W)
# self.output = m2.flatten().reshape((data.shape[0], self.W.shape[1] * data.shape[1]))
# output is concatenation of W rows for w1, w2 indexes
indexed_rows = self.W[T.cast(input, 'int32')]
concatenated_rows = indexed_rows.flatten()
num_examples = input.shape[0]
width = concatenated_rows.size // num_examples
output = concatenated_rows.reshape((num_examples, width))
if print_internal_vars:
output = theano.printing.Print('output of projection layer')(output)
self.output = output
self.params = [self.W]
class NPLM(object):
def __init__(self, rng, input, n_in, vocab_size, projection_dim, n_hidden, n_out,
print_internal_vars, input_feature_names, feature_input_indexes,
weight_params = None):
# keep track of feature name -> index mappings
self.input_feature_names = input_feature_names
self.feature_input_indexes = feature_input_indexes
# token embedding layer
self.projectionLayer = ProjectionLayer(rng=rng, input=input,
vocab_size=vocab_size, projection_dim=projection_dim,
print_internal_vars=print_internal_vars,
W=weight_params.get('pl_W'))
# single hidden layer
self.hiddenLayer = HiddenLayer(rng=rng, input=self.projectionLayer.output,
n_in=projection_dim * n_in, # projection layer concats word embeddings
n_out=n_hidden,
activation=T.tanh,
print_internal_vars=print_internal_vars,
W=weight_params.get('hl_W'), b=weight_params.get('hl_b'))
# final softmax logistic regression
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out,
print_internal_vars=print_internal_vars,
W=weight_params.get('lr_W'), b=weight_params.get('lr_b'))
# L1 / L2 norm regularizations
# note: no regularization of embedding space.
self.L1 = abs(self.hiddenLayer.W).sum() + abs(self.logRegressionLayer.W).sum()
self.L2_sqr = (self.hiddenLayer.W ** 2).sum() + (self.logRegressionLayer.W ** 2).sum()
# negative log likelihood and errors of entire model are those of the logistic layer
self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood
self.errors = self.logRegressionLayer.errors
self.params = self.projectionLayer.params + \
self.hiddenLayer.params + \
self.logRegressionLayer.params
# a function for getting the prediction distribution for a bigram
self.predict_f = theano.function(inputs=[input],
outputs=self.logRegressionLayer.p_y_given_x)
def predict(self, w1, w2):
# map token -> idxs
i1, i2 = [self.feature_input_indexes[x] for x in [w1, w2]]
test_input = numpy.asarray([[i1, i2]], dtype=theano.config.floatX)
# map call to network
per_index_predictions = self.predict_f(test_input)[0]
# map _back_ to features by name
per_feature_predictions = [(self.input_feature_names[idx], p) for idx, p in enumerate(per_index_predictions)]
# return results, sorted by prob
return sorted(per_feature_predictions, key=lambda (feat, prob): -prob)
|
[
"matthew.kelcey@gmail.com"
] |
matthew.kelcey@gmail.com
|
f90ac858e77ca2ab8f32483631a5e04933753a70
|
47d8d44cb735968da44671fa323fc1a034d7f567
|
/spider_basic/pyshiyan.py
|
525eb2cdf0ee4167b53dbf506ddc219254245428
|
[] |
no_license
|
djytwy/twy
|
3a68740bc6b89576e98ed1c30877b4054c300589
|
e9db1be5175be093c50e114f0b06282ad75aa90c
|
refs/heads/master
| 2021-01-01T19:53:14.905712
| 2017-11-28T15:56:55
| 2017-11-28T15:56:55
| 98,709,584
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,145
|
py
|
# -*- coding: utf-8 -*-
import requests
from HTMLParser import HTMLParser
class MovieParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.movies = []
self.in_movies = False
def handle_starttag(self, tag, attrs):
def _attr(attrlist, attrname):
for attr in attrlist:
if attr[0] == attrname:
return attr[1]
return None
if tag == 'li' and _attr(attrs, 'data-title') and _attr(attrs, 'data-category') == 'nowplaying':
movie = {}
movie['title'] = _attr(attrs, 'data-title')
movie['score'] = _attr(attrs, 'data-score')
movie['director'] = _attr(attrs, 'data-director')
movie['actors'] = _attr(attrs, 'data-actors')
self.movies.append(movie)
print('%(title)s|%(score)s|%(director)s|%(actors)s' % movie)
self.in_movies = True
if tag == 'img' and self.in_movies:
self.in_movies = False
movie = self.movies[len(self.movies) - 1]
movie['cover-url'] = _attr(attrs, 'src')
_download_poster_cover(movie)
def _download_poster_cover(movie):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36'}
url = movie['cover-url']
print('downloading post cover from %s' % url)
s = requests.get(url, headers=headers)
fname = url.split('/')[-1]
print fname
with open(fname, 'wb') as f:
f.write(s.content)
movie['cover-file'] = fname
def nowplaying_movies(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36'}
s = requests.get(url, headers=headers)
parser = MovieParser()
parser.feed(s.content)
return parser.movies
if __name__ == '__main__':
url = 'http://movie.douban.com/cinema/nowplaying/xiamen/'
movies = nowplaying_movies(url)
import json
print('%s' % json.dumps(movies, sort_keys=True, indent=4, separators=(',', ': ')))
|
[
"676534074@qq.com"
] |
676534074@qq.com
|
18352233f2f7b9f8bedc9c98eed62e716e503db5
|
774be59f7377163524604c17c3002d70366c7b19
|
/class256/project/test.py
|
919796e242dcc5885988dc1d0beec07bcf563d7e
|
[] |
no_license
|
sidazhong/leetcode
|
24b8be0e857f3fd4059342e919a7c497f6c383c8
|
619760c879ba0b42783a6dc00d8c2375756a142e
|
refs/heads/master
| 2023-05-10T07:31:34.877317
| 2022-01-29T22:04:53
| 2022-01-29T22:04:53
| 200,956,337
| 1
| 1
| null | 2023-05-06T11:41:32
| 2019-08-07T02:23:27
|
VHDL
|
UTF-8
|
Python
| false
| false
| 5,980
|
py
|
import os
import gensim
from gensim.utils import simple_preprocess
from gensim.models.phrases import Phrases, Phraser
from gensim import corpora
from gensim.similarities import Similarity
from gensim.test.utils import common_corpus, common_dictionary
from gensim.sklearn_api import TfIdfTransformer
from gensim.parsing.porter import PorterStemmer
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.metrics.aline import np
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
import numpy as np
from shutil import copyfile, move
import glob
class deduplication:
demo = 6
start_num = 0
end_num = 1000*1000
#fuzzy parameter
fuzzy_similarity = 0.8
#root directory
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
#data set
dataset = ROOT_DIR + '/dataset'
#duplicate folder
duplicate_document_path = ROOT_DIR + '/duplicate/test.txt'
#inbox / sent document path
all_document_path = {}
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
source_document_path = ROOT_DIR+"/dataset/test1.txt"
target_document_path = ROOT_DIR+"/dataset/test2.txt"
log = ROOT_DIR+"/dataset/test3.txt"
documents = [""] * 2
def start(self):
print("=========Start=========")
count=0
similarity=0
if(self.demo==0):
#get all inbox document path
self.get_all_document()
f = open(self.log, "a")
#start compare the document
for k in self.all_document_path:
#compare target_document
for kk in self.all_document_path:
count+=1
if(kk <= k):
continue
if(count<self.start_num):
continue
self.source_document_path = self.all_document_path[k]
self.target_document_path = self.all_document_path[kk]
similarity = self.compare()
if(similarity>=self.fuzzy_similarity):
print(self.source_document_path +'----'+self.target_document_path)
print("ID: " + str(kk) + " _________ similarity: " + str(similarity))
f.write(self.source_document_path +'----'+self.target_document_path +"\n")
self.init()
if(count==self.end_num):
exit()
break
f.close()
if(self.demo==1 or self.demo==2 or self.demo==3 or self.demo==4 or self.demo==5 or self.demo==6):
self.source_document_path = self.ROOT_DIR+"/dataset/test1.txt"
self.target_document_path = self.ROOT_DIR+"/dataset/test2.txt"
similarity = self.compare()
print(similarity)
exit()
return similarity
def compare(self):
with open (self.source_document_path , encoding = "ISO-8859-1") as f:
tokens = sent_tokenize(f.read())
for line in tokens:
self.documents[0] += line
with open (self.target_document_path , encoding = "ISO-8859-1") as f:
tokens = sent_tokenize(f.read())
for line in tokens:
self.documents[1] += line
#bag of word
texts = [[text for text in simple_preprocess(doc, deacc=True)] for doc in self.documents]
#stemming
p = PorterStemmer()
for k in range(len(texts)):
texts[k] = p.stem_documents(texts[k])
#Reconvert documents to collection of words/bigrams
bigram_phraser = Phrases(texts, min_count=1)
texts_bigrams = [[text for text in bigram_phraser[ simple_preprocess(doc, deacc=True)]] for doc in self.documents]
print(texts_bigrams)
exit()
# build N-gram
texts_bigrams = [[]] * 2
for k in range(len(texts)):
texts_bigrams[k] = [""] * (len(texts[k])-1)
for kk in range(len(texts[k])):
if(kk<len(texts[k])-1):
texts_bigrams[k][kk]=texts[k][kk]+"_"+texts[k][kk+1]
# remove most frequency word, stop word
for k in range(len(texts)):
word_counter = {}
for word in texts_bigrams[k]:
if word in word_counter:
word_counter[word] += 1
else:
word_counter[word] = 1
popular_words = sorted(word_counter, key = word_counter.get, reverse = True)
top = popular_words[:3]
for kk in range(len(top))[:]:
texts_bigrams[k][:] = (value for value in texts_bigrams[k] if value != top[kk])
#Create dictionary
dictionary = corpora.Dictionary(texts_bigrams)
#Create corpus
corpus = [dictionary.doc2bow(docString) for docString in texts_bigrams]
model = gensim.models.TfidfModel(corpus) # fit model
vector = model[corpus[0]]
#cosine similarity
index = Similarity(corpus=corpus,num_features=len(dictionary),output_prefix='on_disk_output')
for similarities in index:
similar_docs = list(enumerate(similarities))
break
return similar_docs[1][1]
def get_all_document(self):
# loop all dataset folder
count = 0
for dataset_folder in glob.iglob(self.dataset + '**/misc.forsale', recursive=True):
# loop all dataset folder
for k in glob.iglob(dataset_folder + '**/*', recursive=True):
#loop each inbox files
self.all_document_path[count]=k
count+=1
def init(self):
self.documents = [""] * 2
self.source_document_path = ""
self.target_document_path = ""
obj = deduplication()
similarity=obj.start()
print (similarity)
|
[
"sida9567@gmail.com"
] |
sida9567@gmail.com
|
150832bce639edf4dbadab3a0970058409ed1297
|
1baf76e19a719ebb2207f2af2924fc53349d6a60
|
/internship3_env/bin/rst2man.py
|
c94107c0c3e14b64d1c6351abee0999cdc3db8af
|
[
"MIT"
] |
permissive
|
Zamy97/internship_3
|
4deb0df914e68930b23faa6bf7e0ca7fd342fbd8
|
9c9db252b6818316e9864839075bb1d23714f7e4
|
refs/heads/master
| 2023-01-01T15:33:45.980776
| 2020-10-28T02:47:34
| 2020-10-28T02:47:34
| 307,861,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
#!/Users/zamy/Desktop/Python_Projects/excl_intrnship_projects/excl_internship_0/internship_3/internship_3/internship3_env/bin/python3
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
|
[
"aktarzaman@berkeley.edu"
] |
aktarzaman@berkeley.edu
|
1573e552f087e76aa97f8774e93fe5e6e8409a02
|
2e0a1b57388fce12d4474a0cad5acbcbba61da93
|
/language_prediction/tests.py
|
34d68e988ebf230aa35a7f150ef99231b75bbd91
|
[] |
no_license
|
reutapel/decisions_predicion
|
3d76c98856bafa718aa9f05ac9e01ffa43a6e06a
|
0ca88b042c6dca401d360abeeb67323a933ec74e
|
refs/heads/master
| 2021-08-08T22:12:25.232928
| 2020-12-09T09:37:45
| 2020-12-09T09:37:45
| 229,907,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,920
|
py
|
import os
from language_prediction.dataset_readers import TextExpDataSetReader
from language_prediction.models import BasicTextModel
from allennlp.common.testing import AllenNlpTestCase, ModelTestCase
from allennlp.common.util import ensure_list
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from allennlp.data.token_indexers.elmo_indexer import ELMoTokenCharactersIndexer
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from allennlp.modules.token_embedders import ElmoTokenEmbedder
from allennlp.data.vocabulary import Vocabulary
from allennlp.data.iterators import BucketIterator
from allennlp.modules.attention.dot_product_attention import DotProductAttention
from allennlp.modules.feedforward import FeedForward
from allennlp.training.trainer import Trainer
import torch.optim as optim
base_directory = os.path.abspath(os.curdir)
data_directory = os.path.join(base_directory, 'data')
class TestTextExpDataSetReader(AllenNlpTestCase):
def test_read_from_file(self):
# the token indexer is responsible for mapping tokens to integers
token_indexer = ELMoTokenCharactersIndexer()
def tokenizer(x: str):
return [w.text for w in SpacyWordSplitter(language='en_core_web_sm', pos_tags=False).split_words(x)]
reader = TextExpDataSetReader(token_indexers=token_indexer, tokenizer=tokenizer)
instances = ensure_list(reader.read(os.path.join(data_directory, 'test_code_data.csv')))
# TODO: add the numbers to the test
instance0 = {
'sequence_review': [
['Positive', ':', 'Extremely', 'helpful', 'and', 'friendly', 'staff', 'hotel', 'in', 'great', 'shape', 'and', 'location', '.', 'Would', 'def', 'reccomend', 'and', 'go', 'back', 'again', '.', 'They', 'deserve', 'all', 'the', 'credit', 'they', 'get', '.', 'Negative', ':', 'Not', 'a', 'single', 'thing', '.'],
['Positive', ':', 'Location', '.', 'Location', '.', 'Location', '.', 'Room', 'small', 'but', 'perfectly', 'formed', '.', 'Staff', 'very', 'helpful', 'and', 'accommodated', 'a', 'change', 'to', 'the', 'offered', 'menu', '.', 'Decor', 'modern', 'and', 'tasteful', '.', 'Negative', ':', '.'],
['Positive', ':', 'Pool', 'was', 'great', 'with', 'amazing', 'views', 'cocktails', 'on', 'the', 'roof', 'at', 'night', 'were', 'perfect', '.', 'Good', 'wifi', ',', 'location', 'to', 'the', 'metro', 'was', 'excellent', 'not', 'so', 'many', 'bars', 'restaurants', 'nearby', 'but', 'easy', 'to', 'travel', 'into', 'central', 'Barcelona', '.', 'Room', 'was', 'spacious', '.', 'Staff', 'helpful', 'and', 'barman', 'was', 'fab', 'made', 'us', 'cocktails', 'not', 'on', 'the', 'menu', '.', 'Very', 'clean', 'everywhere', '.', 'Will', 'definitely', 'be', 'back', 'to', 'Barcelona', 'and', 'would', 'stay', 'here', 'again', '.', 'Negative', ':', 'No', 'tea', 'coffee', 'making', 'facilities', 'in', 'the', 'room', 'but', 'we', 'knew', 'that', 'when', 'we', 'booked', '.', 'I', "'m", 'a', 'typical', 'Brit', 'who', 'likes', 'her', 'Tea', '.', 'Breakfast', 'was', 'slightly', 'overpriced', 'but', 'you', 'did', 'have', 'a', 'fantastic', 'selection', '.'],
['Negative', ':', 'You', 'need', 'a', 'car', 'if', 'you', 'want', 'to', 'stay', 'at', 'this', 'hotel', 'with', 'the', 'family', '.', 'Parking', 'is', 'around', '10', 'euros', 'per', 'day', 'but', 'you', 'can', 'park', 'somewhere', 'on', 'the', 'street', 'near', 'the', 'hotel', 'for', 'free', '.', 'There', 'are', 'no', 'other', 'facilities', 'in', 'the', 'hotel', 'beside', 'the', 'gym', 'which', 'is', 'free', 'and', 'the', 'spa', 'that', 'costs', '50', 'euros', 'per', 'hour', 'max', '6', 'persons', 'which', 'is', 'still', 'expensive', 'for', 'a', 'family', '.', 'Positive', ':', 'The', 'bed', 'was', 'very', 'comfortable', ',', 'the', 'room', 'and', 'the', 'bathroom', 'were', 'clean', 'and', 'nicely', 'designed', '.'],
['Negative', ':', 'The', 'entrance', 'is', 'inconspicuous', 'one', 'could', 'say', 'hidden', 'not', 'so', 'easy', 'to', 'spot', 'but', 'security', 'is', 'good', '.', 'Just', 'do', 'not', 'expect', 'a', 'big', 'reception', 'it', "'s", 'on', 'the', 'first', 'floor', '.', 'Positive', ':', 'Largest', 'room', 'we', 'ever', 'had', 'in', 'Paris', '.', 'Excellent', 'breakfast', '.', 'Very', 'convenient', 'location', 'for', 'us', 'in', 'front', 'of', 'Gare', 'de', 'l', 'Est', 'and', 'walking', 'distance', 'to', 'Gare', 'du', 'Nord', '.'],
['Negative', ':', 'everything', 'how', 'such', 'a', 'facility', 'can', 'take', '4', 'stars', '.', 'The', 'room', 'was', 'dirty', '.', 'Even', 'in', 'bathroom', 'there', 'were', 'hair', 'and', 'dirty', 'everywhere', '.', 'Very', 'small', 'uncomfortable', '.', 'Positive', ':', 'nothing', 'except', 'location', '.'],
['Negative', ':', 'The', 'hotel', 'buffet', 'was', 'okay', 'but', 'not', 'great', 'and', 'discovered', 'that', 'I', 'had', 'been', 'overcharged', 'after', 'I', 'had', 'checked', 'out', ',', 'charged', 'for', '4', 'adults', 'instead', 'of', 'one', 'adult', 'and', 'three', 'children', 'as', 'on', 'original', 'bill', '.', 'Positive', ':', 'Room', 'was', 'very', 'comfortable', 'and', 'clean', 'with', 'a', '/', 'c', 'and', 'a', 'small', 'fridge', 'and', 'kettle', 'provided', '.', 'Excellent', 'location', 'and', 'great', 'view', 'of', 'the', 'Seine', 'from', 'our', 'room', ',', 'would', 'definitely', 'love', 'to', 'stay', 'in', 'this', 'hotel', 'again', '.'],
['Negative', ':', 'I', 'felt', 'some', 'elements', 'of', 'breakfast', 'could', 'have', 'been', 'better', '.', 'For', 'example', 'the', 'tea', 'we', 'were', 'served', 'was', 'only', 'luke', 'warm', 'and', 'the', 'buffet', 'was', 'not', 'always', 'fully', 'topped', 'up', '.', 'Positive', ':', 'Staff', 'very', 'welcoming', 'and', 'friendly', 'and', 'the', 'hotel', 'and', 'room', 'lovely', '.'],
['Negative', ':', 'The', 'Location', 'of', 'the', 'room', 'next', 'to', 'the', 'Elevator', 'was', 'not', 'the', 'key', 'but', 'the', 'slamming', 'emergency', 'door', 'which', 'was', 'used', 'many', 'times', 'for', 'what', 'reason', 'ever', '.', 'The', 'gap', 'between', 'door', 'and', 'floor', 'let', 'my', 'room', 'lightening', 'up', 'like', 'having', 'forgotten', 'to', 'Switch', 'off', 'the', 'lamp', '.', 'Positive', ':', 'Friendly', 'staff', 'especially', 'in', 'the', 'welcome', 'area', '.', 'Location', 'of', 'the', 'Hotel', 'in', 'the', 'middle', 'of', 'many', 'famous', 'streets', '.'],
['Negative', ':', 'The', 'showers', 'looked', 'modern', 'however', 'the', 'pressure', 'of', 'water', 'coming', 'out', 'of', 'the', 'shower', 'head', 'was', 'average', 'at', 'best', '.', 'Positive', ':', 'The', 'interior', 'was', 'sleek', 'and', 'relatively', 'modern', 'which', 'was', 'surprising', 'giving', 'that', 'the', 'exterior', 'of', 'the', 'hotel', 'was', "n't", 'on', 'par', '.']],
'label': 4,
'metadata': {'k_size': 10, 'pair_id': '91ol4nv6_4', 'sample_id': '91ol4nv6_4_10'}
}
instance4 = {
'sequence_review': [
['Positive', ':', 'Extremely', 'helpful', 'and', 'friendly', 'staff', 'hotel', 'in', 'great', 'shape', 'and', 'location', '.', 'Would', 'def', 'reccomend', 'and', 'go', 'back', 'again', '.', 'They', 'deserve', 'all', 'the', 'credit', 'they', 'get', '.', 'Negative', ':', 'Not', 'a', 'single', 'thing', '.'],
['Positive', ':', 'Location', '.', 'Location', '.', 'Location', '.', 'Room', 'small', 'but', 'perfectly', 'formed', '.', 'Staff', 'very', 'helpful', 'and', 'accommodated', 'a', 'change', 'to', 'the', 'offered', 'menu', '.', 'Decor', 'modern', 'and', 'tasteful', '.', 'Negative', ':', '.'],
['Positive', ':', 'Pool', 'was', 'great', 'with', 'amazing', 'views', 'cocktails', 'on', 'the', 'roof', 'at', 'night', 'were', 'perfect', '.', 'Good', 'wifi', ',', 'location', 'to', 'the', 'metro', 'was', 'excellent', 'not', 'so', 'many', 'bars', 'restaurants', 'nearby', 'but', 'easy', 'to', 'travel', 'into', 'central', 'Barcelona', '.', 'Room', 'was', 'spacious', '.', 'Staff', 'helpful', 'and', 'barman', 'was', 'fab', 'made', 'us', 'cocktails', 'not', 'on', 'the', 'menu', '.', 'Very', 'clean', 'everywhere', '.', 'Will', 'definitely', 'be', 'back', 'to', 'Barcelona', 'and', 'would', 'stay', 'here', 'again', '.', 'Negative', ':', 'No', 'tea', 'coffee', 'making', 'facilities', 'in', 'the', 'room', 'but', 'we', 'knew', 'that', 'when', 'we', 'booked', '.', 'I', "'m", 'a', 'typical', 'Brit', 'who', 'likes', 'her', 'Tea', '.', 'Breakfast', 'was', 'slightly', 'overpriced', 'but', 'you', 'did', 'have', 'a', 'fantastic', 'selection', '.'],
['Negative', ':', 'You', 'need', 'a', 'car', 'if', 'you', 'want', 'to', 'stay', 'at', 'this', 'hotel', 'with', 'the', 'family', '.', 'Parking', 'is', 'around', '10', 'euros', 'per', 'day', 'but', 'you', 'can', 'park', 'somewhere', 'on', 'the', 'street', 'near', 'the', 'hotel', 'for', 'free', '.', 'There', 'are', 'no', 'other', 'facilities', 'in', 'the', 'hotel', 'beside', 'the', 'gym', 'which', 'is', 'free', 'and', 'the', 'spa', 'that', 'costs', '50', 'euros', 'per', 'hour', 'max', '6', 'persons', 'which', 'is', 'still', 'expensive', 'for', 'a', 'family', '.', 'Positive', ':', 'The', 'bed', 'was', 'very', 'comfortable', ',', 'the', 'room', 'and', 'the', 'bathroom', 'were', 'clean', 'and', 'nicely', 'designed', '.'],
['Negative', ':', 'The', 'entrance', 'is', 'inconspicuous', 'one', 'could', 'say', 'hidden', 'not', 'so', 'easy', 'to', 'spot', 'but', 'security', 'is', 'good', '.', 'Just', 'do', 'not', 'expect', 'a', 'big', 'reception', 'it', "'s", 'on', 'the', 'first', 'floor', '.', 'Positive', ':', 'Largest', 'room', 'we', 'ever', 'had', 'in', 'Paris', '.', 'Excellent', 'breakfast', '.', 'Very', 'convenient', 'location', 'for', 'us', 'in', 'front', 'of', 'Gare', 'de', 'l', 'Est', 'and', 'walking', 'distance', 'to', 'Gare', 'du', 'Nord', '.'],
['Negative', ':', 'everything', 'how', 'such', 'a', 'facility', 'can', 'take', '4', 'stars', '.', 'The', 'room', 'was', 'dirty', '.', 'Even', 'in', 'bathroom', 'there', 'were', 'hair', 'and', 'dirty', 'everywhere', '.', 'Very', 'small', 'uncomfortable', '.', 'Positive', ':', 'nothing', 'except', 'location', '.']],
'label': 4,
'metadata': {'k_size': 6, 'pair_id': '91ol4nv6_4', 'sample_id': '91ol4nv6_4_6'}
}
instance12 = {
'sequence_review': [
['Positive', ':', 'Largest', 'room', 'we', 'ever', 'had', 'in', 'Paris', '.', 'Excellent', 'breakfast', '.', 'Very', 'convenient', 'location', 'for', 'us', 'in', 'front', 'of', 'Gare', 'de', 'l', 'Est', 'and', 'walking', 'distance', 'to', 'Gare', 'du', 'Nord', '.', 'Negative', ':', 'The', 'entrance', 'is', 'inconspicuous', 'one', 'could', 'say', 'hidden', 'not', 'so', 'easy', 'to', 'spot', 'but', 'security', 'is', 'good', '.', 'Just', 'do', 'not', 'expect', 'a', 'big', 'reception', 'it', "'s", 'on', 'the', 'first', 'floor', '.'],
['Positive', ':', 'Excellent', 'breakfast', 'and', 'friendly', 'helpful', 'staff', '.', 'Good', 'location', 'close', 'to', 'the', 'Metro', 'station', 'and', 'walking', 'distance', 'to', 'Sagrada', 'Familia', '.', 'Nice', 'snack', 'bar', 'area', 'to', 'grab', 'a', 'light', 'meal', '.', 'We', 'would', 'stay', 'there', 'again', '.', 'Negative', ':', 'Tried', 'to', 'visit', 'the', 'Fitness', 'centre', 'Spa', 'at', '5:00', 'in', 'the', 'evening', 'but', 'it', 'was', 'closed', '.', 'Did', "n't", 'get', 'to', 'see', 'it', 'so', 'I', 'ca', "n't", 'comment', '.'],
['Negative', ':', 'Rooms', 'were', 'tired', '.', 'Carpet', 'needed', 'a', 'good', 'clean', 'or', 'replacement', '.', 'Plumbing', 'system', 'outdated', '.', 'Various', 'fittings', 'were', 'missing', 'or', 'knobs', 'had', 'come', 'off', '.', 'There', 'were', 'about', '5', 'lamps', 'that', 'needed', 'replacing', '.', 'The', 'tv', 'remote', 'did', 'not', 'work', 'and', 'request', 'for', 'new', 'batteries', 'did', 'not', 'happen', '.', 'Unfortunately', '2', 'of', 'our', 'party', 'were', 'ill', 'and', 'stayed', 'in', 'their', 'rooms', 'and', 'were', 'disturbed', 'even', 'though', 'we', 'had', 'requested', 'that', 'the', 'rooms', 'were', 'not', 'serviced', 'that', 'day', '.', 'Nothing', 'to', 'do', 'with', 'the', 'hotel', 'but', 'there', 'is', 'a', '10', 'euro', 'per', 'night', 'city', 'tax', '.', 'Positive', ':', 'Trams', 'passed', 'the', 'front', 'door', '.', 'Attractive', 'foyer', '.', 'Staff', 'spoke', 'English', 'and', 'gave', 'good', 'guidance', 'to', 'how', 'to', 'get', 'around', '.', 'Breakfast', 'waitress', 'was', 'very', 'helpful', 'but', 'the', 'selection', 'of', 'food', 'was', 'limited', 'and', 'breakfast', 'finished', 'at', '09:00', '.'],
['Negative', ':', 'The', 'showers', 'looked', 'modern', 'however', 'the', 'pressure', 'of', 'water', 'coming', 'out', 'of', 'the', 'shower', 'head', 'was', 'average', 'at', 'best', '.', 'Positive', ':', 'The', 'interior', 'was', 'sleek', 'and', 'relatively', 'modern', 'which', 'was', 'surprising', 'giving', 'that', 'the', 'exterior', 'of', 'the', 'hotel', 'was', "n't", 'on', 'par', '.'],
['Positive', ':', 'Great', 'hotel', '.', 'Friendly', 'and', 'very', 'helpful', 'staff', '.', 'Spotless', '.', 'Negative', ':', 'Booked', 'a', 'double', 'room', '.', 'Surprised', 'and', 'disappointed', 'that', 'this', 'was', 'infact', 'two', 'single', 'beds', 'joined', 'together', '.'],
['Positive', ':', 'Everything', 'was', 'perfect', '.', 'They', 'also', 'upgraded', 'us', 'as', 'a', 'surprise', 'for', 'my', 'husbands', 'birthday', 'we', 'had', 'the', 'most', 'awesome', 'views', 'and', 'the', 'room', 'was', 'perfect', ',', 'we', 'woke', 'up', 'to', 'see', 'the', 'sunrise', '.', 'our', 'stay', 'was', 'simply', 'perfect', 'and', 'very', 'recommended', '.', 'I', 'recently', 'stayed', 'at', 'the', 'Mondrian', 'hotel', 'priced', 'almost', 'the', 'same', 'for', 'the', 'room', 'categories', 'but', 'in', 'terms', 'of', 'service', 'experience', 'attention', 'to', 'detail', 'and', 'customer', 'satisfaction', 'this', 'hotel', 'by', 'FAR', 'exceeded', 'that', 'experience', 'so', 'much', 'so', 'we', 'joined', 'up', 'to', 'Shangri', 'la', "'s", 'loyalty', 'program', 'as', 'I', 'was', 'really', 'surprised', 'we', 'could', 'still', 'get', 'such', 'amazing', 'customer', 'service', '.', 'Fully', 'recommend', 'staying', 'here', 'did', 'I', 'mention', 'the', 'phenomenal', 'views', '.', 'Negative', ':', 'Nothing', 'not', 'to', 'like', '.'],
['Negative', ':', 'The', 'hotel', 'buffet', 'was', 'okay', 'but', 'not', 'great', 'and', 'discovered', 'that', 'I', 'had', 'been', 'overcharged', 'after', 'I', 'had', 'checked', 'out', ',', 'charged', 'for', '4', 'adults', 'instead', 'of', 'one', 'adult', 'and', 'three', 'children', 'as', 'on', 'original', 'bill', '.', 'Positive', ':', 'Room', 'was', 'very', 'comfortable', 'and', 'clean', 'with', 'a', '/', 'c', 'and', 'a', 'small', 'fridge', 'and', 'kettle', 'provided', '.', 'Excellent', 'location', 'and', 'great', 'view', 'of', 'the', 'Seine', 'from', 'our', 'room', ',', 'would', 'definitely', 'love', 'to', 'stay', 'in', 'this', 'hotel', 'again', '.'],
['Positive', ':', 'Excellent', 'hotel', 'at', 'the', 'city', 'center', '.', 'Hotel', 'is', 'very', 'new', 'and', 'modern', '.', 'Staff', 'is', 'professional', 'and', 'helpful', '.', 'Location', 'is', 'perfect', 'at', 'the', 'city', 'center', '.', 'Negative', ':', '.']],
'label': 2,
'metadata': {'k_size': 8, 'pair_id': 'd9oijkzb_12', 'sample_id': 'd9oijkzb_12_8'}
}
instance15 = {
'sequence_review': [
['Positive', ':', 'Largest', 'room', 'we', 'ever', 'had', 'in', 'Paris', '.', 'Excellent', 'breakfast',
'.', 'Very', 'convenient', 'location', 'for', 'us', 'in', 'front', 'of', 'Gare', 'de', 'l', 'Est',
'and', 'walking', 'distance', 'to', 'Gare', 'du', 'Nord', '.', 'Negative', ':', 'The', 'entrance',
'is', 'inconspicuous', 'one', 'could', 'say', 'hidden', 'not', 'so', 'easy', 'to', 'spot', 'but',
'security', 'is', 'good', '.', 'Just', 'do', 'not', 'expect', 'a', 'big', 'reception', 'it', "'s",
'on', 'the', 'first', 'floor', '.'],
['Positive', ':', 'Excellent', 'breakfast', 'and', 'friendly', 'helpful', 'staff', '.', 'Good',
'location', 'close', 'to', 'the', 'Metro', 'station', 'and', 'walking', 'distance', 'to', 'Sagrada',
'Familia', '.', 'Nice', 'snack', 'bar', 'area', 'to', 'grab', 'a', 'light', 'meal', '.', 'We', 'would',
'stay', 'there', 'again', '.', 'Negative', ':', 'Tried', 'to', 'visit', 'the', 'Fitness', 'centre',
'Spa', 'at', '5:00', 'in', 'the', 'evening', 'but', 'it', 'was', 'closed', '.', 'Did', "n't", 'get',
'to', 'see', 'it', 'so', 'I', 'ca', "n't", 'comment', '.'],
['Negative', ':', 'Rooms', 'were', 'tired', '.', 'Carpet', 'needed', 'a', 'good', 'clean', 'or',
'replacement', '.', 'Plumbing', 'system', 'outdated', '.', 'Various', 'fittings', 'were', 'missing',
'or', 'knobs', 'had', 'come', 'off', '.', 'There', 'were', 'about', '5', 'lamps', 'that', 'needed',
'replacing', '.', 'The', 'tv', 'remote', 'did', 'not', 'work', 'and', 'request', 'for', 'new',
'batteries', 'did', 'not', 'happen', '.', 'Unfortunately', '2', 'of', 'our', 'party', 'were', 'ill',
'and', 'stayed', 'in', 'their', 'rooms', 'and', 'were', 'disturbed', 'even', 'though', 'we', 'had',
'requested', 'that', 'the', 'rooms', 'were', 'not', 'serviced', 'that', 'day', '.', 'Nothing', 'to',
'do', 'with', 'the', 'hotel', 'but', 'there', 'is', 'a', '10', 'euro', 'per', 'night', 'city', 'tax',
'.', 'Positive', ':', 'Trams', 'passed', 'the', 'front', 'door', '.', 'Attractive', 'foyer', '.',
'Staff', 'spoke', 'English', 'and', 'gave', 'good', 'guidance', 'to', 'how', 'to', 'get', 'around',
'.', 'Breakfast', 'waitress', 'was', 'very', 'helpful', 'but', 'the', 'selection', 'of', 'food', 'was',
'limited', 'and', 'breakfast', 'finished', 'at', '09:00', '.'],
['Negative', ':', 'The', 'showers', 'looked', 'modern', 'however', 'the', 'pressure', 'of', 'water',
'coming', 'out', 'of', 'the', 'shower', 'head', 'was', 'average', 'at', 'best', '.', 'Positive', ':',
'The', 'interior', 'was', 'sleek', 'and', 'relatively', 'modern', 'which', 'was', 'surprising',
'giving', 'that', 'the', 'exterior', 'of', 'the', 'hotel', 'was', "n't", 'on', 'par', '.'],
['Positive', ':', 'Great', 'hotel', '.', 'Friendly', 'and', 'very', 'helpful', 'staff', '.', 'Spotless',
'.', 'Negative', ':', 'Booked', 'a', 'double', 'room', '.', 'Surprised', 'and', 'disappointed', 'that',
'this', 'was', 'infact', 'two', 'single', 'beds', 'joined', 'together', '.']],
'label': 2,
'metadata': {'k_size': 5, 'pair_id': 'd9oijkzb_12', 'sample_id': 'd9oijkzb_12_5'}
}
instance19 = {
'sequence_review': [
['Positive', ':', 'Largest', 'room', 'we', 'ever', 'had', 'in', 'Paris', '.', 'Excellent', 'breakfast',
'.', 'Very', 'convenient', 'location', 'for', 'us', 'in', 'front', 'of', 'Gare', 'de', 'l', 'Est',
'and', 'walking', 'distance', 'to', 'Gare', 'du', 'Nord', '.', 'Negative', ':', 'The', 'entrance',
'is', 'inconspicuous', 'one', 'could', 'say', 'hidden', 'not', 'so', 'easy', 'to', 'spot', 'but',
'security', 'is', 'good', '.', 'Just', 'do', 'not', 'expect', 'a', 'big', 'reception', 'it', "'s",
'on', 'the', 'first', 'floor', '.']],
'label': 2,
'metadata': {'k_size': 1, 'pair_id': 'd9oijkzb_12', 'sample_id': 'd9oijkzb_12_1'}
}
# tests:
# test sizes:
# number of instances
assert len(instances) == 20
# length of sequence_review:
seq_lengths = {
0: 10, 1: 9, 2: 8, 3: 7, 4: 6, 5: 5, 6: 4, 7: 3, 8: 2, 9: 1,
10: 10, 11: 9, 12: 8, 13: 7, 14: 6, 15: 5, 16: 4, 17: 3, 18: 2, 19: 1
}
for row, seq_length in seq_lengths.items():
assert len(instances[row].fields['sequence_review'].field_list) == seq_length
# same pair_id with the same label and the K_size compatible with the sequence_review length
for instance_index in range(len(instances)):
assert (instances[instance_index].fields['metadata'].metadata['pair_id'] == '91ol4nv6_4' and
instances[instance_index].fields['label'].label == 4) or\
(instances[instance_index].fields['metadata'].metadata['pair_id'] == 'd9oijkzb_12' and
instances[instance_index].fields['label'].label == 2)
assert len(instances[instance_index].fields['sequence_review']) ==\
instances[instance_index].fields['metadata'].metadata['k_size']
# compare specific instances
for instance_num, instance in [[0, instance0], [4, instance4], [12, instance12], [15, instance15],
[19, instance19]]:
fields = instances[instance_num].fields
assert [[t.text for t in fields['sequence_review'][i].tokens] for i in
range(len(fields['sequence_review'].field_list))] == instance['sequence_review']
assert fields['label'].label == instance['label']
assert fields['metadata'].metadata == instance['metadata']
class ExperimentClassifierTest(ModelTestCase):
def setUp(self):
super().setUp()
self.set_up_model('tests/fixtures/academic_paper_classifier.json',
'tests/fixtures/s2_papers.jsonl')
def test_model_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def manually_test_reader():
token_indexer = ELMoTokenCharactersIndexer()
def tokenizer(x: str):
return [w.text for w in SpacyWordSplitter(language='en_core_web_sm', pos_tags=False).split_words(x)]
reader = TextExpDataSetReader(token_indexers=token_indexer, tokenizer=tokenizer)
instances = reader.read(os.path.join(data_directory, 'test_code_data.csv'))
def main():
test_text_exp_data_set_reader_obj = TestTextExpDataSetReader()
test_text_exp_data_set_reader_obj.test_read_from_file()
experiment_classifier_test_obj = ExperimentClassifierTest()
experiment_classifier_test_obj.test_model_can_train_save_and_load()
if __name__ == '__main__':
main()
|
[
"reutapel88@gmail.com"
] |
reutapel88@gmail.com
|
f99109833c8840ec32c973169f4d2221ccb5ea5c
|
e97876c4f94ab00b2862fe1c9bc89aaa3970d1df
|
/tools/linux.py
|
9b1c0d03dc314e1b7bbf81e29121831ea98cacbc
|
[] |
no_license
|
timjen3/nordvpn_linux
|
f8640f5f8c0da0876164af684b555d34303dcb31
|
5f5737909ed4af8e150f6d4e4f18b602b8df7c2b
|
refs/heads/master
| 2020-12-02T07:46:54.976088
| 2017-09-04T01:38:39
| 2017-09-04T01:38:39
| 96,725,061
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,596
|
py
|
import subprocess
import os
"""https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python"""
if os.name == 'posix':
def pid_exists(pid):
"""Check whether pid exists in the current process table."""
import errno
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
elif os.name == 'nt':
def pid_exists(pid):
"""Dev was done on windows, so needed this to work on there also."""
task_list = subprocess.Popen(["tasklist", "/FO", "CSV"], stdout=subprocess.PIPE)
headers = task_list.stdout.readline().decode("utf-8")
headers = [c for c in headers.split(",")]
pid_col = [i for i, c in enumerate(headers) if c == '"PID"'][0]
for line in task_list.stdout.readlines():
_this_pid = int(line.decode("utf-8").replace('"', "").split(",")[pid_col])
if _this_pid == pid:
return True
return False
else:
def pid_exists(pid):
raise NotImplementedError("Not implemented for '{}'".format(os.name))
def send_desktop_msg(msg_string, delay=0):
"""
:param msg_string: message to send to display
:param delay: time to display message. 0 requires click to close.
"""
msg = 'notify-send "{}" -t {}'.format(msg_string, delay)
os.popen(msg)
def execute_no_wait(command):
try:
sp = subprocess.Popen(command, shell=True)
except:
return -1
return sp.pid
def execute_and_wait(command, timeout=5):
try:
sp = subprocess.Popen(command, shell=True)
pid = sp.pid
# sp.communicate()
sp.wait(timeout=timeout)
except:
return -1
return pid
|
[
"timjen3@gmail.com"
] |
timjen3@gmail.com
|
9c9e5a215e1d0ff70844289f4b8c1aed74b82be7
|
1dd901ff7e805e2ee208dd478c4f81b9f9576d78
|
/DDB/defaultViews.py
|
6c5ed4f9c100318aabb4aa3e3e943fa143644a6e
|
[] |
no_license
|
SushilSanjayBhile/ReleasePortal
|
f898babd0922784aef9cb7f8335b6cc5f9a751ef
|
9e56fe90adffdc677441e78a348b69ee033a56cd
|
refs/heads/master
| 2023-05-26T10:19:02.693554
| 2022-10-12T10:40:20
| 2022-10-12T10:40:20
| 250,026,668
| 2
| 0
| null | 2023-05-22T22:54:15
| 2020-03-25T16:08:30
|
Python
|
UTF-8
|
Python
| false
| false
| 3,858
|
py
|
# Django packages
from django.db.models import Q
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
# imports from django app
from .models import DEFAULT_DOMAIN_SUBDOMAIN
from .forms import DomainSubDomainForm
from DDB.serializers import DOMAIN_SUBDOMAIN_SERIALIZER
import datetime
from .forms import LogForm
# Third party softwares / libraries
import gzip
import psycopg2
from sh import pg_dump
from psycopg2 import sql
import json, datetime, os, time
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
def GenerateLogData(UserName, RequestType, url, logData, tcid, card, Release):
Logs = json.dumps(logData)
Timestamp = datetime.datetime.now()
data = {'UserName': UserName, 'RequestType': RequestType, 'LogData': logData, 'Timestamp': Timestamp, 'URL': url, 'TcID': tcid, 'CardType': card}
fd = LogForm(data)
if fd.is_valid():
print(data)
data = fd.save(commit = False)
data.save(using = Release)
else:
print("INVALID", fd.errors)
@csrf_exempt
def DEFAULT_SUBDOMAIN_GET_POST_VIEW(request, Release):
if request.method == "POST":
req = json.loads(request.body.decode("utf-8"))
try:
dom = DEFAULT_DOMAIN.objects.using(Release).get(Domain = req['Domain'])
print(dom.id)
data = DEFAULT_SUBDOMAIN.objects.using(Release).filter(SubDomain = req['SubDomain']).filter(Domain = int(dom.id))
print(len(data))
if len(data) > 0:
return HttpResponse("Conflict: Values already exists", status = 409)
except:
pass
req['Domain'] = dom.id
fd = SubDomainForm(req)
if fd.is_valid():
data = fd.save(commit = False)
data.save(using = Release)
if "Activity" in req:
AD = req['Activity']
GenerateLogData(AD['UserName'], AD['RequestType'], AD['URL'], AD['LogData'], AD['TcID'], AD['CardType'], AD['Release'])
return HttpResponse("Sucess", status = 200)
else:
return HttpResponse(json.dumps(fd.errors), status = 500)
if request.method == "GET":
data = DEFAULT_SUBDOMAIN.objects.using(Release).all()
serializer = SUBDOMAIN_SERIALIZER(data, many = True)
return HttpResponse(json.dumps(serializer.data), status = 200)
@csrf_exempt
def DEFAULT_DOMAIN_GET_POST_VIEW(request, Release):
if request.method == "POST":
req = json.loads(request.body.decode("utf-8"))
errorList = []
for domain in req['domains']:
dictionary = {}
dictionary['Domain'] = domain
try:
data = DEFAULT_DOMAIN.objects.using(Release).get(Domain = domain)
errorList.append("Domain- " + domain + " already exists")
#return HttpResponse("Conflict: Values already exists", status = 409)
except:
fd = DomainForm(dictionary)
if fd.is_valid():
data = fd.save(commit = False)
print(domain, "NOT AVAILABLE")
data.save(using = Release)
if "Activity" in req:
AD = req['Activity']
GenerateLogData(AD['UserName'], AD['RequestType'], AD['URL'], AD['LogData'], AD['TcID'], AD['CardType'], AD['Release'])
else:
errorList.append(str(fd.errors))
if len(errorList) > 0:
return HttpResponse("Error: " + json.dumps(errorList), status = 500)
return HttpResponse("Sucess", status = 200)
if request.method == "GET":
data = DEFAULT_DOMAIN.objects.using(Release).all()
serializer = DOMAIN_SERIALIZER(data, many = True)
return HttpResponse(json.dumps(serializer.data), status = 200)
|
[
"sushilmaxbhile@gmail.com"
] |
sushilmaxbhile@gmail.com
|
338b46af5898069e8258e7af3299a0493e7d8d14
|
eb48075d8143faaf580b00a164c55315b3062b6d
|
/1_CrashCourse/3_NumPySKL_Exercise.py
|
43189f0ecc237d57b07973836537691bc8ff2c47
|
[] |
no_license
|
vihandesilva/TensorFlowGuide
|
27692c13587501ca2266509751d352f83e4878f6
|
a09870668605f31a7c6e79a810292fbabb6fb3d0
|
refs/heads/master
| 2022-04-18T22:15:36.593731
| 2020-04-10T18:46:24
| 2020-04-10T18:46:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 781
|
py
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(101)
mat = np.random.randint(0, 1000, (100, 100))
# plt.imshow(mat)
# plt.show()
df = pd.DataFrame(mat)
df.plot(x=0,y=1,kind="scatter")
plt.show()
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(mat)
scaledDf = scaler.transform(mat)
# plt.imshow(scaledDf)
# plt.show()
colList = ["f"+str(i) for i in range(1,100)]
colList.append("Label")
dfFinal = pd.DataFrame(data=scaledDf,columns=colList)
features = dfFinal[colList[0:len(colList)-1]]
labels = dfFinal[[colList.pop()]]
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.3, random_state=42)
|
[
"mohamedayoob01@gmail.com"
] |
mohamedayoob01@gmail.com
|
0015b67be216b5d4bcbd75dbbbd385a605e6c6d6
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02756/s489252371.py
|
31635b6d45bf5d37da1e1c76a8813328893a6ade
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 759
|
py
|
if __name__ == '__main__':
s = input()
n = int(input())
rvcnt = 0
tmp = "S"
tmp_before = ""
tmp_after = ""
for _ in range(n):
q = input()
if len(q) == 1:
rvcnt += 1
else:
i,f,c = map(str,q.split())
if f == "1":#先頭に追加
if rvcnt % 2 == 0:
#先頭に追加
tmp_before = c + tmp_before
else:
#末尾に追加
tmp_after = tmp_after + c
else:#末尾に追加
if rvcnt % 2 == 0:
#末尾に追加
tmp_after = tmp_after + c
else:
#先頭に追加
tmp_before = c + tmp_before
#仮想を元に戻す
tmp1 = tmp.replace("S",s)
#連結
tmp2 = tmp_before + tmp1 + tmp_after
#最後に反転するかを決定
if rvcnt % 2 == 1:
print(tmp2[::-1])
else:
print(tmp2)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ffac9bce2a4f7d4160893ed67fb60522e13f408e
|
8e7cc73c904479bb69035d05f749f59619f369c6
|
/10-Advanced-Data-Storage-and-Retrieval/app.py
|
b0933dfd3e6906eb6cbd7c5ae393dc8064605c9e
|
[] |
no_license
|
amberlbillings/dataviz-homework
|
da11830f93cbeaf8bf8f5bd15b0b2028f33a5e78
|
267072306307ec6cb3d56a313032b10c44d8b31d
|
refs/heads/master
| 2020-04-25T07:15:18.986929
| 2019-07-28T00:00:18
| 2019-07-28T00:00:18
| 172,608,536
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
# Amber Billings
from flask import Flask, jsonify
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/><br/>"
f"/api/v1.0/precipitation<br/>"
f"Returns a dictionary of dates and precipitation measures from the dataset.<br/><br/>"
f"/api/v1.0/stations<br/>"
f"Returns a list of stations from the dataset.<br/><br/>"
f"/api/v1.0/tobs <br />"
f"Returns a list of temperature observations from the previous year.<br/><br/>"
f"/api/v1.0/<start> <br />"
f"Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start.<br/><br/>"
f"/api/v1.0/<start>/<end> <br />"
f"Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start-end range.<br/><br/>"
f"Date format: YYYY-MM-DD"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
results = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date > '2016-08-22').\
order_by(Measurement.date.desc()).all()
precipitation = dict(results)
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
results = session.query(Station.station).all()
stations = list(results)
return jsonify(results)
@app.route("/api/v1.0/tobs")
def tobs():
results = session.query(Measurement.tobs).\
filter(Measurement.date > '2016-08-22').all()
tobs = list(results)
return jsonify(tobs)
@app.route("/api/v1.0/<start_date>")
def start_temps(start_date):
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= startDate).all()
return jsonify(results)
@app.route("/api/v1.0/<start_date>/<end_date>")
def start_end_temps(start_date, end_date):
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).\
filter(Measurement.date <= end_date).all()
return jsonify(results)
if __name__ == "__main__":
app.run(debug=True)
|
[
"noreply@github.com"
] |
amberlbillings.noreply@github.com
|
2dab0838af442553113bd8b7010788fd46712c87
|
af6249e22c0ddc4f8ba4054efd95f9377dc1ce32
|
/main_app/migrations/0003_auto_20200329_0018.py
|
3f3d3998b12b8489ecba5b191cace5dbd9517f86
|
[] |
no_license
|
luxvalerian/snakeCollector
|
4d8d46bb34c94032886c4fd2a400c655a93d8f57
|
23d099b60f55a4af275d7041af1671db4930eca6
|
refs/heads/master
| 2022-04-12T12:21:41.327132
| 2020-04-09T20:49:24
| 2020-04-09T20:49:24
| 250,180,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
# Generated by Django 2.2.6 on 2020-03-29 00:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_feeding'),
]
operations = [
migrations.AlterField(
model_name='feeding',
name='date',
field=models.DateField(verbose_name='feeding datesss'),
),
]
|
[
"avelynnmitra@gmail.com"
] |
avelynnmitra@gmail.com
|
4e718cd98a604d4ec436efd32795e77f0323b98c
|
4a17ea750e8502fc356c22a0dcfdc66d706aa8bf
|
/Arrays/problem-36.py
|
af191f8f02707b8d889d78e3f38728e15f28abea
|
[
"MIT"
] |
permissive
|
manju-dev/leetcode-workspace
|
6e5fb86a1809d23c8eb3c10f467cf5f2c0e4f929
|
91086d412c4982b9e0d37732023ac225b6cf3d7c
|
refs/heads/main
| 2023-02-15T07:58:50.432531
| 2020-12-30T17:52:46
| 2020-12-30T18:17:25
| 325,613,009
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,811
|
py
|
# 36. Valid Sudoku
class Solution:
def isValidSudoku(self, board):
# row check
for i in range(len(board)):
rowVals = []
for j in range(len(board[0])):
# print(i, j, board[i][j], rowVals)
if board[i][j]==".":
continue
elif(0 < int(board[i][j]) < 10) and \
(board[i][j] not in rowVals):
rowVals.append(board[i][j])
else:
return False
# column check
for i in range(len(board)):
colVals = []
for j in range(len(board[0])):
if board[j][i]==".":
continue
elif(0 < int(board[j][i]) < 10):
colVals.append(board[j][i])
if len(set(colVals)) < len(colVals):
return False
# sub-boxes check
for i in range(0, 9, 3):
for j in range(0, 9, 3):
subBoxVals = board[i][j:j+3] + \
board[i+1][j:j+3] + board[i+2][j:j+3]
tempVals = []
for val in subBoxVals:
if val==".":
continue
elif(0 < int(val) < 10) and \
(val not in tempVals):
tempVals.append(val)
else:
return False
return True
# Test
solution = Solution()
# Expected: True
board = [
["5","3",".",".","7",".",".",".","."],
["6",".",".","1","9","5",".",".","."],
[".","9","8",".",".",".",".","6","."],
["8",".",".",".","6",".",".",".","3"],
["4",".",".","8",".","3",".",".","1"],
["7",".",".",".","2",".",".",".","6"],
[".","6",".",".",".",".","2","8","."],
[".",".",".","4","1","9",".",".","5"],
[".",".",".",".","8",".",".","7","9"]
]
print(solution.isValidSudoku(board))
# Expected: False
board2 = [
[".",".",".",".","5",".",".","1","."],
[".","4",".","3",".",".",".",".","."],
[".",".",".",".",".","3",".",".","1"],
["8",".",".",".",".",".",".","2","."],
[".",".","2",".","7",".",".",".","."],
[".","1","5",".",".",".",".",".","."],
[".",".",".",".",".","2",".",".","."],
[".","2",".","9",".",".",".",".","."],
[".",".","4",".",".",".",".",".","."]
]
print(solution.isValidSudoku(board2))
foo = [[1,2,3],[1,6,7]]
print(zip(*[[1, 2, 3], [5,6,7]]))
print([i for i in zip(*[[1, 2, 3], [5,6,7]])])
import collections
collections.Counter(x for i, row in enumerate(foo) for j, c in enumerate(row) if c != '.' for x in ((c, i), (j, c), (i/3, j/3, c))).values()
len([x for i, row in enumerate(foo)
for j, c in enumerate(row)
if c!='.' for x in ((c,i),(j,c),(i/3,j/3,c))])
|
[
"manju@linuxMachine.localdomain"
] |
manju@linuxMachine.localdomain
|
edf508abbb55d3d85e1efdbc9780249c0221c23b
|
5859bac83220fa37948bc0204a3d4a38656f6f13
|
/OpenPoseImage.py
|
5764e9aeed1a674c009d0430d17c9282c0e16449
|
[] |
no_license
|
hao-pt/Human-Pose
|
972f6828964faba41dafe8f08a449fda1a13c170
|
fbb97c3f5cc821bfde1703dd21d8bdbf3135ff4f
|
refs/heads/master
| 2022-12-03T13:34:13.505111
| 2019-09-04T15:10:01
| 2019-09-04T15:10:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,764
|
py
|
import cv2
import time
import numpy as np
# Change mode MPI or COCO
MODE = "COCO"
if MODE is "COCO":
protoFile = "pose/coco/pose_deploy_linevec.prototxt"
weightsFile = "pose/coco/pose_iter_440000.caffemodel"
nPoints = 18
POSE_PAIRS = [ [1,0],[1,2],[1,5],[2,3],[3,4],[5,6],[6,7],[1,8],[8,9],[9,10],[1,11],[11,12],[12,13],[0,14],[0,15],[14,16],[15,17]]
elif MODE is "MPI" :
protoFile = "pose/mpi/pose_deploy_linevec_faster_4_stages.prototxt"
weightsFile = "pose/mpi/pose_iter_160000.caffemodel"
nPoints = 15
POSE_PAIRS = [[0,1], [1,2], [2,3], [3,4], [1,5], [5,6], [6,7], [1,14], [14,8], [8,9], [9,10], [14,11], [11,12], [12,13] ]
# Load image
frame = cv2.imread("single.jpeg")
frameCopy = np.copy(frame)
frameWidth = frame.shape[1]
frameHeight = frame.shape[0]
threshold = 0.1 # threshold level
# Load pretrained model
net = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)
# Start tick
t = time.time()
# input image dimensions for the network
inWidth = 368
inHeight = 368
# Prepare Blob as input of the network
inpBlob = cv2.dnn.blobFromImage(frame, 1.0 / 255, (inWidth, inHeight),
(0, 0, 0), swapRB=False, crop=False)
# Set inpBlob as input obj
net.setInput(inpBlob)
# Start Feed forward
output = net.forward()
print("time taken by network : {:.3f}".format(time.time() - t))
# Get size of output map
H = output.shape[2]
W = output.shape[3]
# Empty list to store the detected keypoints
points = []
for i in range(nPoints):
# confidence map of corresponding body's part.
probMap = output[0, i, :, :]
# Find global maxima of the probMap.
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
# Scale the point to fit on the original image
x = (frameWidth * point[0]) / W
y = (frameHeight * point[1]) / H
if prob > threshold :
cv2.circle(frameCopy, (int(x), int(y)), 8, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
cv2.putText(frameCopy, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, lineType=cv2.LINE_AA)
# Add the point to the list if the probability is greater than the threshold
points.append((int(x), int(y)))
else :
points.append(None)
# Draw Skeleton
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if points[partA] and points[partB]:
cv2.line(frame, points[partA], points[partB], (0, 255, 255), 2)
cv2.circle(frame, points[partA], 8, (0, 0, 255), thickness=-1, lineType=cv2.FILLED)
cv2.imshow('Output-Keypoints', frameCopy)
cv2.imshow('Output-Skeleton', frame)
cv2.imwrite('Output-Keypoints.jpg', frameCopy)
cv2.imwrite('Output-Skeleton.jpg', frame)
print("Total time taken : {:.3f}".format(time.time() - t))
cv2.waitKey(0)
|
[
"tienhaophung@gmail.com"
] |
tienhaophung@gmail.com
|
0a0f6452016764b74134a9104f888fd9b17b46f2
|
94d5467b1315791fa75165eb862fdd8fef300958
|
/srv/guizhou_address/address_formula_release/src/jieba/finalseg/test.py
|
756e762e396950a20e6339bddf02d89cc879222e
|
[] |
no_license
|
scmsqhn/code
|
e31926174c247d49c1db8f121e3ec1b82f8a2d9d
|
b389d7dc5fafad8a4185a03cd6d5519ccf8f99df
|
refs/heads/master
| 2022-12-09T05:37:07.065840
| 2019-05-14T01:55:07
| 2019-05-14T01:55:07
| 185,903,771
| 1
| 0
| null | 2022-12-08T05:05:51
| 2019-05-10T02:22:28
|
Python
|
UTF-8
|
Python
| false
| false
| 588
|
py
|
import jieba
import re
import pdb
jieba.load_userdict("dict_nz.txt")
#jieba.load_userdict("guiyang_baidu_addr_split.txt")
cnt = 0
g = open("output.txt","a+")
with open("name.txt","r") as f:
lines = f.readlines()
for line in lines:
cnt+=1
g.write("%s\n"%str(list(jieba.cut(line))))
if cnt%100 == 0:
print(list(jieba.cut(line)))
with open("address.txt","r") as f:
lines = f.readlines()
for line in lines:
cnt+=1
g.write("%s\n"%str(list(jieba.cut(line))))
if cnt%100 == 0:
print(list(jieba.cut(line)))
|
[
"2364839934@qq.com"
] |
2364839934@qq.com
|
929ddda84c3d2b2fda71d70c924546c8454a27cb
|
0b521a3f665b892c30b68b2e9258d106d9dd414b
|
/env/Helicopter.py
|
5a5cdd10ac08e99b1a0564cc1eb5812d1e1f47f8
|
[] |
no_license
|
Kamyab-Majid/garage_quadcopter
|
c37a7cea211bbc04d66596f74e724375a36b81c1
|
30776a9fdf2ff72d068296a95dc64c240161cef4
|
refs/heads/main
| 2023-09-03T23:01:05.546916
| 2021-11-03T15:21:41
| 2021-11-03T15:21:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,190
|
py
|
import sympy as sp
import numpy as np
from env import controller
class Helicopter:
def __init__(self):
self.constants = (
self.K_mu,
self.mass,
self.rho,
self.Rmr,
self.CL0,
self.Rtr,
self.CLa,
self.CLatr,
self.CD0,
self.xfus,
self.yfus,
self.zfus,
self.zcg,
self.Sxfus,
self.Syfus,
self.Szfus,
self.fQ0,
self.Kbeta,
self.Alon,
self.Blat,
self.taufb,
self.N,
self.ntr,
self.Kt,
self.c_constant,
self.OMEGA,
self.ctr,
) = (
1.0,
11.0,
1.1073,
1.03,
0.0077,
0.15,
5.496,
5,
0.03,
-1.22,
0.0,
-0.09,
-0.32,
0.1019,
0.8256,
0.505749,
1.5,
254.5,
0.999,
0.9875,
0.04,
2,
6,
0.0,
0.082,
115.19,
0.03,
)
# self.uwind, uwind, uwind = 0.0, 0.0, 0.0
self.vh = sp.sqrt((self.mass) * 9.8 / (2 * self.rho * 3.1415 * self.Rmr ** 2))
self.vhtr = -sp.sqrt(self.fQ0 / (2 * self.rho * 3.1415 * self.Rtr ** 2))
self.OMEGAtr = self.ntr * self.OMEGA
self.omega_r = controller.Controller()
self.sigmamr = self.N * self.c_constant / (3.1415 * self.Rmr)
self.sigmatr = self.N * self.ctr / (3.1415 * self.Rtr)
self.A = np.array([0, 2 / 9, 1 / 3, 3 / 4, 1, 5 / 6])
self.B = np.array(
[
[0, 0, 0, 0, 0],
[2 / 9, 0, 0, 0, 0],
[1 / 12, 1 / 4, 0, 0, 0],
[69 / 128, -243 / 128, 135 / 64, 0, 0],
[-17 / 12, 27 / 4, -27 / 5, 16 / 15, 0],
[65 / 432, -5 / 16, 13 / 16, 4 / 27, 5 / 144],
]
)
self.C = np.array([1 / 9, 0, 2 / 20, 16 / 45, 1 / 12])
self.CH = np.array([47 / 450, 0, 12 / 25, 32 / 225, 1 / 30, 6 / 25])
self.CT = np.array([-1 / 150, 0, 3 / 100, -16 / 75, -1 / 20, 6 / 25])
def RK45(self, x0, y0, ydot, h, u_input, trunc_error=False) -> np.array:
# u_input = + np.array(self.omega_r.Controller_model(y0, 0)) * 2 / 3 + u_input / 3
k1 = h * np.array(ydot(y0, x0 + self.A[0] * h, *u_input), dtype=float)
k2 = h * np.array(ydot(y0 + self.B[1, 0] * k1, x0 + self.A[1] * h, *u_input), dtype=float)
k3 = h * np.array(ydot(y0 + self.B[2, 0] * k1 + self.B[2, 1] * k2, x0 + self.A[2] * h, *u_input), dtype=float)
k4 = h * np.array(
ydot(y0 + self.B[3, 0] * k1 + self.B[3, 1] * k2 + self.B[3, 2] * k3, x0 + self.A[3] * h, *u_input),
dtype=float,
)
k5 = h * np.array(
ydot(
y0 + self.B[4, 0] * k1 + self.B[4, 1] * k2 + self.B[4, 2] * k3 + self.B[4, 3] * k4,
x0 + self.A[4] * h,
*u_input
),
dtype=float,
)
k6 = h * np.array(
ydot(
y0 + self.B[5, 0] * k1 + self.B[5, 1] * k2 + self.B[5, 2] * k3 + self.B[5, 3] * k4 + self.B[5, 4] * k5,
x0 + self.A[5] * h,
*u_input
),
dtype=float,
)
y_new = (
y0
+ k1 * self.CH[0]
+ k2 * self.CH[1]
+ k3 * self.CH[2]
+ k4 * self.CH[3]
+ k5 * self.CH[4]
+ k6 * self.CH[5]
)
if trunc_error:
trunc_error = (
k1 * self.CT[0]
+ k2 * self.CT[1]
+ k3 * self.CT[2]
+ k4 * self.CT[3]
+ k5 * self.CT[4]
+ k6 * self.CT[5]
)
return y_new
def RbI(self, THETA):
A = sp.Matrix(
[
[
sp.cos(THETA[2]) * sp.cos(THETA[1]),
sp.cos(THETA[2]) * sp.sin(THETA[1]) * sp.sin(THETA[0]) - sp.sin(THETA[2]) * sp.cos(THETA[0]),
sp.cos(THETA[2]) * sp.sin(THETA[1]) * sp.cos(THETA[0]) + sp.sin(THETA[2]) * sp.sin(THETA[0]),
],
[
sp.sin(THETA[2]) * sp.cos(THETA[1]),
sp.sin(THETA[2]) * sp.sin(THETA[1]) * sp.sin(THETA[0]) + sp.cos(THETA[2]) * sp.cos(THETA[0]),
sp.sin(THETA[2]) * sp.sin(THETA[1]) * sp.cos(THETA[0]) - sp.cos(THETA[2]) * sp.sin(THETA[0]),
],
[-sp.sin(THETA[1]), sp.cos(THETA[1]) * sp.sin(THETA[0]), sp.cos(THETA[1]) * sp.cos(THETA[0])],
]
)
return A
def thetabi(self, THETA):
A = sp.Matrix(
[
[1, sp.sin(THETA[0]) * sp.tan(THETA[1]), sp.cos(THETA[0]) * sp.tan(THETA[1])],
[0, sp.cos(THETA[0]), -sp.sin(THETA[0])],
[0, sp.sin(THETA[0]) / sp.cos(THETA[1]), sp.cos(THETA[0]) / sp.cos(THETA[1])],
]
)
return A
def lambd_eq_maker(self, t, x_state, U_input): # for_ode_int
My_helicopter = Helicopter()
symp_eq = My_helicopter.Helicopter_model(t, x_state, U_input)
jacobian = ((sp.Matrix(symp_eq)).jacobian(x_state)).replace(
sp.DiracDelta(sp.sqrt(x_state[0] ** 2 + x_state[1] ** 2)), 0
)
J_symb_math = sp.lambdify((x_state, t) + U_input, jacobian, modules=["numpy"])
symb_math = sp.lambdify((x_state, t) + U_input, symp_eq, modules=["numpy"])
return symb_math, J_symb_math
def Helicopter_model(self, t, x_state, U_input):
(
u_velocity,
v_velocity,
w_velocity,
p_angle,
q_angle,
r_angle,
fi_angle,
theta_angle,
si_angle,
_,
_,
_,
a_flapping,
b_flapping,
c_flapping,
d_flapping,
uwind,
vwind,
wwind
) = (
x_state[0],
x_state[1],
x_state[2],
x_state[3],
x_state[4],
x_state[5],
x_state[6],
x_state[7],
x_state[8],
x_state[9],
x_state[10],
x_state[11],
x_state[12],
x_state[13],
x_state[14],
x_state[15],
x_state[16],
x_state[17],
x_state[18],
)
A_b, B_a, taus, Dlat, Kc, Kd, Clon = 0.1, 0.1, 0.20008, 0, 0.3058, 0.3058, 0
I_moment = sp.Matrix([[0.297831, 0, 0], [0, 1.5658, 0], [0, 0, 2]])
inverse_I_moment = I_moment ** (-1)
THETA = sp.Matrix([fi_angle, theta_angle, si_angle])
omega = sp.Matrix([p_angle, q_angle, r_angle]).reshape(3, 1)
wind_velocity = (self.RbI(THETA) * (sp.Matrix([uwind, vwind, wwind]))).reshape(3, 1)
Velocity = sp.Matrix([u_velocity, v_velocity, w_velocity]).reshape(3, 1)
Uf = wind_velocity - Velocity
Uftr = Velocity - wind_velocity
Va_induced = Uf[2] / self.vh
Va_induced_t = Uftr[1] / self.vhtr
mu = ((Uf.norm()) / self.vh) ** 2 - Va_induced ** 2
mu_tr = ((Uftr.norm()) / self.vhtr) ** 2 - Va_induced_t ** 2
romega = r_angle - self.OMEGA
qomega = q_angle + self.OMEGAtr
mumr = sp.sqrt((u_velocity - uwind) ** 2 + (v_velocity - uwind) ** 2) / (self.OMEGA * self.Rmr)
main_induced_v = 4.055 / (((Va_induced * 1.2 + 1.3)) ** 2 + 1.6) + 0.06
tail_induced_v = 4.055 / (((Va_induced_t * 1.2 + 1.3)) ** 2 + 1.6) + 0.06
Vi = main_induced_v * self.vh / sp.sqrt(1 + mu)
Vi_t = tail_induced_v * self.vhtr / sp.sqrt(1 + mu_tr)
Vyi, Vzi = v_velocity - Vi_t - uwind, w_velocity - Vi - uwind
Vxq = u_velocity + q_angle * self.zcg - uwind
Vyp = v_velocity - p_angle * self.zcg - uwind
Ku = 2 * self.K_mu * (4 * U_input[0] / 3 - Vi / (self.OMEGA * self.Rmr))
Kv = -Ku
Kw = (
16
* self.K_mu
* mumr ** 2
* sp.sign(mumr)
/ ((1 - mumr ** 2 / 2) * (8 * sp.sign(mumr) + self.CLa * self.sigmamr))
)
Vfus = sp.sqrt(
(u_velocity - uwind) ** 2 + (v_velocity - uwind) ** 2 + (w_velocity - uwind - Vi) ** 2
)
Xfus, Yfus, Zfus = (
-0.5 * self.rho * self.Sxfus * Vfus * (u_velocity - uwind),
-0.5 * self.rho * self.Syfus * Vfus * (v_velocity - uwind),
-0.5 * self.rho * self.Szfus * Vfus * (w_velocity - uwind - Vi),
)
Fdrag = sp.Matrix([Xfus, Yfus, Zfus]).T
mux, muy, muz = (
-(Uf[0]) / (self.OMEGA * self.Rmr),
-(Uf[1]) / (self.OMEGA * self.Rmr),
-(Uf[2]) / (self.OMEGA * self.Rmr),
)
lambda0 = Vi / (self.OMEGA * self.Rmr)
fTmr = (
1
/ 4
* self.rho
* 3.1415
* self.Rmr ** 4
* self.OMEGA ** 2
* self.sigmamr
* (self.CL0 * (2 / 3 + mux ** 2 + muy ** 2) + self.CLa * (muz - lambda0))
)
bTmr = (
1
/ 4
* self.rho
* 3.1415
* self.Rmr ** 4
* self.OMEGA ** 2
* self.sigmamr
* self.CLa
* sp.Matrix([mux ** 2 + muy ** 2 + 2 / 3, -muy, mux, 0])
)
fQmr = (
1
/ 8
* self.rho
* 3.1415
* self.Rmr ** 5
* self.OMEGA ** 2
* self.sigmamr
* self.CLa
* (self.CD0 / self.CLa * (1 + mux ** 2 + muy ** 2) - 2 * (muz - lambda0) ** 2)
)
bQmr = (
1
/ 12
* self.rho
* self.Rmr ** 2
* self.sigmamr
* 3.1415
* self.CLa
* sp.Matrix(
[
-self.Rmr ** 2
* (p_angle * (u_velocity - uwind) + q_angle * (v_velocity - uwind) - 2 * romega * Vzi),
0.25 * self.Rmr * (6 * Vyp * Vzi - 3 * self.Rmr ** 2 * q_angle * romega),
-0.25 * self.Rmr * (6 * Vxq * Vzi - 3 * self.Rmr ** 2 * p_angle * romega),
0,
]
)
)
fTtr = (
1.5
* 1
/ 12
* self.rho
* self.Rtr ** 2
* self.sigmatr
* 3.1415
* self.CLatr
* self.Rtr
* (
(3 * q_angle + 2 * self.OMEGAtr) * (p_angle * self.zfus - r_angle * self.xfus)
- 2 * qomega * Vyi
+ (u_velocity - uwind) * p_angle
+ (w_velocity - uwind - self.Kt * Vi) * r_angle
)
)
bTtr = (
1
/ 12
* self.rho
* self.Rtr ** 2
* self.sigmatr
* 3.1415
* self.CLatr
* sp.Matrix(
[
0,
0,
0,
3 * ((u_velocity - uwind) + q_angle * self.zfus - r_angle * self.yfus) ** 2
+ 3 * ((w_velocity - uwind - self.Kt * Vi) + p_angle * self.yfus - q_angle * self.xfus) ** 2
+ 2 * self.Rtr ** 2 * (q_angle + self.OMEGAtr) ** 2,
]
)
)
Tmr = fTmr + bTmr.dot(U_input)
Ttr = fTtr + bTtr.dot(U_input)
forces = sp.Matrix([-a_flapping * Tmr, b_flapping * Tmr + Ttr, -Tmr]).T
F = (forces + Fdrag).reshape(3, 1)
F_gravity = (sp.Matrix([0, 0, self.mass * 9.8])).reshape(3, 1)
F_total = F + (self.RbI(THETA)) ** (-1) * F_gravity
Q = fQmr + bQmr.dot(U_input)
Mroll = (self.Kbeta - Tmr * self.zcg) * b_flapping
Mpitch = (self.Kbeta - Tmr * self.zcg) * a_flapping
Myaw = Q + Ttr * self.xfus
M = sp.Matrix([Mroll, Mpitch, Myaw]).reshape(3, 1)
x_dot1_3 = F_total / self.mass - omega.cross(Velocity)
x_dot4_6 = (inverse_I_moment * M) - inverse_I_moment * omega.cross(I_moment * omega)
x_dot7_9 = self.thetabi(THETA) * omega
x_dot10_12 = self.RbI(THETA) * Velocity
x_dot13 = (
-q_angle
- a_flapping / self.taufb
+ 1
/ (self.taufb * self.OMEGA * self.Rmr)
* (Ku * (u_velocity - uwind) + Kw * (w_velocity - uwind))
+ self.Alon / self.taufb * (U_input[2] + Kc * c_flapping)
- A_b * b_flapping / self.taufb
)
x_dot14 = (
-p_angle
- b_flapping / self.taufb
+ 1 / (self.taufb * self.OMEGA * self.Rmr) * Kv * (v_velocity - uwind)
+ self.Blat / self.taufb * (U_input[1] + Kd * d_flapping)
+ B_a * a_flapping / self.taufb
)
x_dot15 = -q_angle - c_flapping / taus + Clon / taus * U_input[2]
x_dot16 = -p_angle - d_flapping / taus + Dlat / taus * U_input[1]
return [
x_dot1_3[0],
x_dot1_3[1],
x_dot1_3[2],
x_dot4_6[0],
x_dot4_6[1],
x_dot4_6[2],
x_dot7_9[0],
x_dot7_9[1],
x_dot7_9[2],
x_dot10_12[0],
x_dot10_12[1],
x_dot10_12[2],
x_dot13,
x_dot14,
x_dot15,
x_dot16,
uwind,
vwind,
wwind
]
|
[
"kamyab@ualberta.ca"
] |
kamyab@ualberta.ca
|
0028fade250bebe0aa2599def008942e49acdad7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2167/60832/318097.py
|
045e6b4891a50fe1221381f2de706371317ff582
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 57
|
py
|
a=input()
if a=='4 5 2':
print(17)
else:
print(a)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
9011489024d223c4e4b4cfb53b9b1011434283c1
|
ecd2f1d99d807178589b5468bc220b0509e5acad
|
/Python/AI_Environment/subProcessTest/comFunctionsMaster.py
|
d23ab7d312d58eb77c8ddf51f61a2f84edf6537a
|
[
"BSD-3-Clause"
] |
permissive
|
TheMadDuck/neural_neophyte
|
b9ec2f3056a98bec6abff2db142f55456cb62056
|
da804b9364144d26e2be47c01f7fa7bc360a3d3b
|
refs/heads/master
| 2021-01-16T23:51:40.442535
| 2017-03-21T16:00:21
| 2017-03-21T16:00:21
| 56,276,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
#use this comunication form
import comFunctionsSlave as sl
#sl.write(3)
#print(sl.show())
savetyIndex = 0
while(True):
savetyIndex += 1
if sl.getSignal() == "processing": # mayby set this at the middle or end of the function (naa auch net gut) to provide unnessesary amout of loops..
continue
if sl.getSignal() == "ended":
break
if sl.getSignal() == "enter number":
output = sl.write(32)
print (output)
if sl.getSignal() == "i have a solution":
sl.show()
print("savetyIndex: " + str(savetyIndex))
if savetyIndex > 25:
break
|
[
"noll@debianLaptopSSD"
] |
noll@debianLaptopSSD
|
32ffe60d261ed4eb2427e82eea06481a845e1225
|
4704671a33bf1606983466e0f5b61a8c251dd464
|
/booth/models.py
|
fc67181b92711b8f6367240d92694c6e39ed950e
|
[] |
no_license
|
sachajoy/Sikar-Booth-Demand
|
8977948b99e1783226a4efa924cb3230b15c7f83
|
4e084e35d5c747652b9ea02f6ea1618f1fce1822
|
refs/heads/master
| 2023-01-01T07:09:49.804006
| 2020-10-22T03:04:33
| 2020-10-22T03:04:33
| 298,960,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,436
|
py
|
from django.db import models
from django.shortcuts import reverse
class Contractor(models.Model):
xname = models.CharField(max_length=64)
class Meta:
db_table = 'contractor'
def __str__(self):
return self.xname
class Route(models.Model):
route_no = models.SmallIntegerField(default=0)
xname = models.CharField(max_length=40)
contractor_id = models.ForeignKey(Contractor,
on_delete=models.CASCADE,
db_column='contractor_id')
active = models.BooleanField(default=True)
class Meta:
db_table = 'route'
def __str__(self):
return "{} {}".format(self.route_no, self.xname)
def get_absolute_url(self):
return reverse('booth:create-route')
class Booth(models.Model):
booth_no = models.SmallIntegerField(default=0)
route_no = models.ForeignKey(Route,
on_delete=models.CASCADE,
db_column='route_no')
xname = models.CharField(max_length=64, default='')
contractor_id = models.ForeignKey(Contractor,
on_delete=models.CASCADE,
db_column='contractor_id')
add1 = models.CharField(max_length=32, default='')
add2 = models.CharField(max_length=32, default='')
add3 = models.CharField(max_length=32, default='')
add4 = models.CharField(max_length=32, default='')
mobile = models.CharField(max_length=11, default='')
pan = models.CharField(max_length=32, default='')
wef = models.DateField(default='0100-01-01')
active = models.BooleanField(default=True)
remarks = models.CharField(max_length=64, default='')
uid = models.CharField(max_length=5, default='')
upwd = models.CharField(max_length=64, default='')
tran_next_id = models.BigIntegerField(default=0)
class Meta:
db_table = 'booth'
def __str__(self):
return "{} {} {}".format(self.booth_no,
self.xname,
self.route_no)
def get_absolute_url(self):
return reverse('booth:create-list-booth')
class ItemGroup(models.Model):
xname = models.CharField(max_length=32, default='')
itype = models.CharField(max_length=1, default='')
class Meta:
db_table = 'itemgroup'
def __str__(self):
return self.xname
class ItemMST(models.Model):
xname = models.CharField(max_length=32, default='')
shortname = models.CharField(max_length=8, default='')
itemgroup_id = models.ForeignKey(ItemGroup,
on_delete=models.CASCADE,
db_column='itemgroup_id')
itype = models.CharField(max_length=1, default='')
unit = models.CharField(max_length=8, default='')
packingtype = models.CharField(max_length=8, default='')
sale_unit = models.CharField(max_length=8, default='')
qty_fill = models.DecimalField(max_digits=15, decimal_places=3)
active = models.BooleanField(default=True)
class Meta:
db_table = 'itemmst'
def __str__(self):
return self.shortname
class Shift(models.Model):
t_from = models.SmallIntegerField(default=0)
t_upto = models.SmallIntegerField(default=0)
shift = models.CharField(max_length=1, default='')
class Meta:
db_table = 'shift'
def __str__(self):
return self.shift
class Tran(models.Model):
id = models.BigIntegerField(primary_key=True)
xdatetime = models.DateTimeField(default='0100-01-01 00:00:00')
xdate = models.DateField(default='0100-01-01')
shift = models.CharField(max_length=1)
booth_no = models.ForeignKey(Booth,
on_delete=models.CASCADE,
db_column='booth_no')
booth_name = models.CharField(max_length=64, default='')
route_no = models.ForeignKey(Route,
on_delete=models.CASCADE,
db_column='route_no')
contractor_id = models.ForeignKey(Contractor,
on_delete=models.CASCADE,
db_column='contractor_id')
class Meta:
db_table = 'tran'
def __str__(self):
return "{} {} {}".format(self.shift,
self.xdate,
self.booth_no)
class TranDet(models.Model):
tran_id = models.ForeignKey(Tran,
on_delete=models.CASCADE,
db_column='tran_id')
xdate = models.DateField(default='0100-01-01')
shift = models.CharField(max_length=1)
booth_no = models.ForeignKey(Booth,
on_delete=models.CASCADE,
db_column='booth_no')
sno = models.SmallIntegerField(default=0)
item_id = models.ForeignKey(ItemMST,
on_delete=models.CASCADE,
db_column='item_id')
shortname = models.CharField(max_length=8, default='')
unit = models.CharField(max_length=8, default='')
packingtype = models.CharField(max_length=8, default='')
sale_unit = models.CharField(max_length=8, default='')
quantity = models.SmallIntegerField(default=0)
class Meta:
db_table = 'trandet'
|
[
"me.arihant.banthia@gmail.com"
] |
me.arihant.banthia@gmail.com
|
a75f72f36d5ff9cd8443e42a7cc03b0b0f342b0f
|
983b7a26ec75cad91c3f76b51511b8dba1e248f2
|
/UploadCOS/cos_server.py
|
1ba961167377926012f31922a2736357682212e0
|
[] |
no_license
|
AboutShanhai/Python3
|
db900e0b3d13bfaf08dd5d71ce284aeb231f05fe
|
d3c2e8f2302cd30d6445087498dd3a1791a87d32
|
refs/heads/master
| 2023-05-16T02:37:52.832029
| 2021-06-07T15:40:41
| 2021-06-07T15:40:41
| 324,077,564
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,502
|
py
|
#!/usr/bin/env python
# coding:utf8
# ftp 上传服务
import os
import sys
import threading
import tool
import time
from AesEverywhere import aes256 # pip install aes-everywhere
from qcloud_cos import CosConfig # pip install -U cos-python-sdk-v5
from qcloud_cos import CosS3Client
sys.setrecursionlimit(10000000)
# import path
self_path = os.path.dirname(os.path.abspath(__file__))
self_path = tool.path_replace(self_path)
print("self_path:" + self_path)
cos_client = None
g_root_path = ""
g_upload_count = 0 # 总上传数量
g_succee_count = 0 # 成功数量
g_upload_list = [] # 上传列表
g_thread_max = 50 # 最大上传线程
g_thread_lock = None # 线程锁
class CFileInfo:
def __init__(self, LocalFilePath, Key):
self.LocalFilePath = LocalFilePath
self.Key = Key
self.count = 0 # 0:无效状态 1:上传中
self.th_idx = 0
# 文件上传
def uploadThread(file_info):
global g_succee_count
global g_thread_lock
global g_upload_count
try:
LocalFile_Path = file_info.LocalFilePath
Web_Key = file_info.Key
th_idx = file_info.th_idx
response = cos_client.upload_file(
Bucket=cos_bucket,
LocalFilePath=LocalFile_Path, # 代指本地文件路径
Key=Web_Key, # 上传到桶之后的文件名
MAXThread=50,
EnableMD5=False
)
# print(response['ETag'])
g_thread_lock.acquire(True)
print("成功: " + str(g_succee_count) + "/" + str(g_upload_count) + ",t:" + str(int(th_idx/10)) + str(int(th_idx % 10)) + ",Path=" + LocalFile_Path)
g_succee_count += 1
g_thread_lock.release()
start_next_thread(th_idx)
return True
# catch 失败
except Exception as error:
print("异常进行重试处理 Path=" + file_info.LocalFilePath)
start_retry_thread(file_info)
return False
# 默认失败
start_retry_thread(file_info)
return False
# 开启上传线程
def start_next_thread(th_idx):
global g_thread_lock
global g_upload_list
g_thread_lock.acquire(True)
count = len(g_upload_list)
file_info = None
if count > 0:
file_info = g_upload_list.pop()
file_info.th_idx = th_idx
# print("开始上传:",file_info.local_file)
g_thread_lock.release()
if file_info:
uploadThread(file_info)
# 创建进程
def start_new_thread(th_idx):
global g_thread_lock
global g_upload_list
g_thread_lock.acquire(True)
count = len(g_upload_list)
file_info = None
if count > 0:
file_info = g_upload_list.pop()
file_info.th_idx = th_idx
# print("开始上传:",file_info.local_file)
g_thread_lock.release()
if file_info:
# 创建
t = threading.Thread(target=uploadThread, args=[file_info])
# 不阻塞
t.setDaemon(True)
# 启动
t.start()
return t
return None
# 失败线程,重新上传,成功为止
def start_retry_thread(file_info):
# 1秒后重试
time.sleep(1)
g_thread_lock.acquire(True)
file_info.count += 1
print('重试:' + str(file_info.count) + '次: ' + file_info.LocalFilePath)
g_thread_lock.release()
uploadThread(file_info)
# 开始上传
def startUploadFinder(local_root, ver):
global g_thread_lock
global g_thread_max
global g_root_path
global g_upload_list
global g_upload_count
global g_succee_count
global cos_bucket
global cos_client
g_root_path = local_root
print('上传路径:' + g_root_path)
versions = tool.read_file_json(os.path.join(g_root_path, 'version.manifest'))
server_info = tool.get_server_info()
ver_info = server_info['ver_info']
if not (ver in ver_info):
return 1
ver_path = ver_info[ver]
ver_bucket_name = server_info['ver_bucket_name']
if not (ver in ver_bucket_name):
return 2
cos_bucket = ver_bucket_name[ver]
cosinfo = server_info['cos_info']
secret_id = cosinfo['SecretId']
secret_key = cosinfo['SecretKey']
region = cosinfo['Region']
key = 'Ua^FkU=+l_TYgODQ'
secret_id = aes256.decrypt(secret_id, key)
secret_key = aes256.decrypt(secret_key, key)
# secret_id = 'secretId' # 替换为用户的 secretId
# secret_key = 'secretKey' # 替换为用户的 secretKey
# region = 'ap-guangzhou' # 替换为用户的 Region
# 2. 获取客户端对象
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key)
cos_client = CosS3Client(config)
versions = tool.read_file_json(os.path.join(g_root_path, 'version.manifest'))
# 本地上传文件列表
g_upload_list = []
for maindir, subdir, file_list in os.walk(g_root_path):
# 忽略非上传版本目录
hotver = str(maindir)
if hotver.find(versions['version']) == -1:
continue
for filename in file_list:
apath = os.path.join(maindir, filename)
apath = apath.replace('\\', '/')
file_path = apath.replace(g_root_path, '')
cloud_name = ver_path + file_path
file_info = CFileInfo(apath, cloud_name)
g_upload_list.append(file_info)
g_upload_count = len(g_upload_list)
# 多线程上传===========================================
print('开始上传: 文件数量=' + str(g_upload_count))
g_succee_count = 0
start_time = time.time()
# 最大上传线程
g_thread_lock = threading.Lock()
thread_list = []
for i in range(g_thread_max):
t = start_new_thread(i)
if t:
thread_list.append(t)
for t in thread_list:
t.join()
# 上传完成
end_time = time.time()
print('资源文件上传完成')
print('上传路径:', g_root_path)
print('上传数量:', g_upload_count)
print('成功数量:', g_succee_count)
print('上传耗时:', end_time - start_time)
print('即将上传资源版本文件......')
g_upload_list = []
LocalFile = g_root_path + '/project.manifest'
WebKey = ver_path + '/project.manifest'
file_info = CFileInfo(LocalFile, WebKey)
g_upload_list.append(file_info)
LocalFile = g_root_path + '/version.manifest'
WebKey = ver_path + '/version.manifest'
file_info = CFileInfo(LocalFile, WebKey)
g_upload_list.append(file_info)
g_upload_count = len(g_upload_list)
# 多线程上传===========================================
print('开始上传: 文件数量=' + str(g_upload_count))
g_succee_count = 0
# 最大上传线程
g_thread_lock = threading.Lock()
thread_list = []
for i in range(g_thread_max):
t = start_new_thread(i)
if t:
thread_list.append(t)
for t in thread_list:
t.join()
# 上传完成
all_end_time = time.time()
print('上传完成:')
print('上传路径:', g_root_path)
print('上传数量:', g_upload_count)
print('成功数量:', g_succee_count)
print('上传耗时:', all_end_time - end_time)
print('所有资源上传总耗时:', all_end_time - start_time)
return 0
|
[
"shanhai@gmail.com"
] |
shanhai@gmail.com
|
a7abe996cbcf196b72f6e4c63a02544f4635270f
|
25ff7597cb4bb13c065def61d16ca17a7188f0a9
|
/tributary/reactive/input/http.py
|
7a458702a696c4155005f01c3ed05fcd6ab1b622
|
[
"Apache-2.0"
] |
permissive
|
fagan2888/tributary
|
5d41a47cdfc081a7047933400f920a67cafa477f
|
f5e9c769faf8ec03203426aca0d282593e85fd7f
|
refs/heads/master
| 2020-09-20T12:24:43.632214
| 2019-11-26T21:34:48
| 2019-11-26T21:34:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,255
|
py
|
import aiohttp
import json as JSON
import time
from ..base import _wrap
def HTTP(url, *args, **kwargs):
return AsyncHTTP(url, *args, **kwargs)
def AsyncHTTP(url, interval=1, repeat=1, json=False, wrap=False, field=None, proxies=None, cookies=None):
async def _req(url, interval=1, repeat=1, json=False, wrap=False, field=None, proxies=None, cookies=None):
count = 0
while count < repeat:
async with aiohttp.ClientSession() as session:
async with session.get(url, cookies=cookies, proxy=proxies) as response:
msg = await response.text()
if msg is None or response.status != 200:
break
if json:
msg = JSON.loads(msg)
if field:
msg = msg[field]
if wrap:
msg = [msg]
yield msg
if interval:
time.sleep(interval)
if repeat >= 0:
count += 1
return _wrap(_req, dict(url=url, interval=interval, repeat=repeat, json=json, wrap=wrap, field=field, proxies=proxies, cookies=cookies), name='HTTP')
|
[
"t.paine154@gmail.com"
] |
t.paine154@gmail.com
|
03c482bc6725395620509c22b2331fb7f1af54df
|
9a810f2bd65c269564462b5cab55879ea9475fa9
|
/pureDjango/mysite/mysite/urls.py
|
465d0286a961f85ef2cf6d98c0f33b51eac868d5
|
[] |
no_license
|
sebasped/djangoTest
|
021140a1c5ba6e3f2b721ece436ff4806d8ca80c
|
227a134e7c9ff8a297b1b40cee7e07edeb50ae7a
|
refs/heads/master
| 2022-04-21T00:35:04.713087
| 2020-04-24T13:39:10
| 2020-04-24T13:39:10
| 180,194,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('polls/', include('polls.urls')),
]
|
[
"lbali@zeus2.oficina"
] |
lbali@zeus2.oficina
|
a824ba19031758c6659df47f02fba9cc4107b35f
|
a246c94ec383e1c22459c2079fe6bf9c478fa195
|
/multiplier-x.py
|
c39ea5b5389f8fcee9ad84f591d503366b645c03
|
[] |
no_license
|
LarsKalishoek/functions-tryout
|
9e2599bc33ff0a88d4fceefcd0b87e7d0dd10281
|
0557f9574e3fc4c57073fd2242c2b079a42e3d1f
|
refs/heads/main
| 2023-08-11T13:30:39.934632
| 2021-09-23T10:42:38
| 2021-09-23T10:42:38
| 409,102,130
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
getal = int(input("Van welk getal wilt u de tefal zien (1 t/m 10) :"))
def tafelVan(noemer: int):
for teller in range(1, 11):
print(teller, " x ", noemer, " = ", teller * noemer )
tafelVan(getal)
|
[
"99069312@mydavinci.nl"
] |
99069312@mydavinci.nl
|
dca1f095d3a2e0d51952de58aef418a3048b8ced
|
0c41c035ddd483b360a31a8aa67fb4be45bcbcd0
|
/motion_field_estimation_utils.py
|
fa9f55d4105470f1eec82c7efda96d7455504bb9
|
[
"MIT"
] |
permissive
|
almostdutch/motion-field-estimation
|
86d13da878ac4acfcfe2a41e90c299749e1e15cb
|
17592e6822cc39885f19f104fff955222cf55acc
|
refs/heads/main
| 2023-04-30T03:45:29.959235
| 2021-05-22T20:15:29
| 2021-05-22T20:15:29
| 356,052,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,374
|
py
|
'''
motion_field_estimation_utils.py
Utilities for estimating the motion field between two 2D (single channel) same size images \
by using the gradient constraint equation
'''
import numpy as np
from scipy.ndimage import shift, rotate
from skimage.transform import warp
from scipy.ndimage import gaussian_filter
from scipy.signal import convolve2d
def ImageTranslate(image_in, shift_row, shift_col):
image_out = shift(image_in, (shift_row, shift_col));
return image_out;
def ImageRotate(image_in, rot_angle):
image_out = rotate(image_in, rot_angle, reshape=False);
return image_out;
def MotionCorrection(image_in, delta_y, delta_x):
Nrows, Ncols = image_in.shape;
row_coords, col_coords = np.meshgrid(np.arange(Nrows), np.arange(Ncols), indexing='ij');
image_out = warp(image_in, np.array([row_coords + delta_y, col_coords + delta_x]), mode='edge');
return image_out;
def _MotionFieldEstimationSingleStep(image0, image1, neighborhood_size = 7, sigma = 3, reg_coef = 0):
delta_y = np.zeros(image0.shape);
delta_x = np.zeros(image0.shape);
# derivative kernels
kernel_x = np.array([[-1, 0, 1]]) / 2;
kernel_y = kernel_x.T;
# smothing to reduce the higher order terms in the Taylor expansion
image0_f = gaussian_filter(image0, sigma, mode = 'constant', cval = 0);
image1_f = gaussian_filter(image1, sigma, mode = 'constant', cval = 0);
# spatial and temporal image gradients
Ix = convolve2d(image0_f, kernel_x, boundary = 'fill', fillvalue = 0, mode = 'same');
Iy = convolve2d(image0_f, kernel_y, boundary = 'fill', fillvalue = 0, mode = 'same');
It = image1_f - image0_f;
IxIx = Ix * Ix;
IxIy = Ix * Iy;
IyIy = Iy * Iy;
IxIt = Ix * It;
IyIt = Iy * It;
# smothing at this stage is equivalent to giving a higher weighting to the center
# of the neighborhood
IxIx_f = gaussian_filter(IxIx, sigma, mode = 'constant', cval = 0);
IxIy_f = gaussian_filter(IxIy, sigma, mode = 'constant', cval = 0);
IyIy_f = gaussian_filter(IyIy, sigma, mode = 'constant', cval = 0);
IxIt_f = gaussian_filter(IxIt, sigma, mode = 'constant', cval = 0);
IyIt_f = gaussian_filter(IyIt, sigma, mode = 'constant', cval = 0);
nb = int((neighborhood_size - 1) / 2);
for ii in range(nb, image0.shape[0] - nb):
for jj in range(nb, image0.shape[1] - nb):
# elements of matrix A (2 x 2)
a = IxIx_f[ii - nb:ii + nb + 1, jj - nb:jj + nb + 1];
a = np.sum(a, axis = (0, 1));
b = IxIy_f[ii - nb:ii + nb + 1, jj - nb:jj + nb + 1];
b = np.sum(b, axis = (0, 1));
c = b;
d = IyIy_f[ii - nb:ii + nb + 1, jj - nb:jj + nb + 1];
d = np.sum(d, axis = (0, 1));
# elements of vector B (2, 1)
f = IxIt_f[ii - nb:ii + nb + 1, jj - nb:jj + nb + 1];
f = np.sum(f, axis = (0, 1));
g = IyIt_f[ii - nb:ii + nb + 1, jj - nb:jj + nb + 1];
g = np.sum(g, axis = (0, 1));
# system of linear eqs
A = np.array([[a, b], [c, d]]);
B = -np.array([[f], [g]]);
# normal equation with Tikhonov regularization
X = np.linalg.solve(A.T @ A + reg_coef * np.eye(2,2), A.T @ B);
delta_x[ii, jj], delta_y[ii, jj] = -X;
return delta_y, delta_x;
def MotionFieldEstimation(image0, image1, neighborhood_size = 7, sigma = 3, reg_coef = 0, Niter = 1):
delta_y_iter = np.zeros(image0.shape);
delta_x_iter = np.zeros(image0.shape);
for i in range(Niter):
# the residual motion field (after the 1st iteration) is very small
# (sub-pixel translation) so no need for a large local neighborhood
if i >= 1:
neighborhood_size = 5;
sigma = 3;
delta_y, delta_x = _MotionFieldEstimationSingleStep(image0, image1, neighborhood_size, sigma, reg_coef);
image1 = MotionCorrection(image1, delta_y, delta_x);
delta_y_iter += delta_y;
delta_x_iter += delta_x;
return delta_y_iter, delta_x_iter;
|
[
"noreply@github.com"
] |
almostdutch.noreply@github.com
|
9d93eddd62b7d733d11211546604e1cdfa1bcb98
|
64e9d7580d1add7a1a03a66c93a88478268acbea
|
/smallest.py
|
8f1752665ff6a4977382c352df75c9243ff1b5c7
|
[] |
no_license
|
Abhila400/Programmer
|
ee6ee4eeeec496bc147192d8df0802d39e2595c5
|
9e212cb6c05c2d503fdbad7f4cdf9381b7f3fae2
|
refs/heads/master
| 2020-06-10T11:58:48.075955
| 2019-08-17T13:25:46
| 2019-08-17T13:25:46
| 193,642,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
n=int(input())
a=[int(i) for i in input().split()]
print(min(a))
|
[
"noreply@github.com"
] |
Abhila400.noreply@github.com
|
58f0fa75e6115236e53c3e0c9fb0f86fc60cc515
|
8b84d29ec3d38a07b471890d6688da2aad5fb1b4
|
/rpsp/policy_opt/psr_policy_updaters.py
|
3fd151910db1e57c702d362a52e6bd94e4b90494
|
[
"Apache-2.0"
] |
permissive
|
ahefnycmu/rpsp
|
b0da320417f09dce2534f2aaa82a99d5a71855d7
|
ff3aa3e89a91bb4afb7bad932d2c04691a727a63
|
refs/heads/master
| 2020-03-18T06:28:20.425943
| 2018-09-08T16:56:07
| 2018-09-08T16:56:07
| 134,397,829
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,332
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat May 6 10:10:01 2017
@author: ahefny, zmarinho
"""
from collections import OrderedDict
from time import time
import numpy as np
import theano
import theano.compile
import theano.tensor as T
from theano.ifelse import ifelse
from rpsp import globalconfig
from rpsp.policy_opt.SGD_opt import optimizers
from rpsp.policy_opt.nn_policy_updaters import NNPolicyUpdater, GradientPolicyUpdater, VRPGPolicyUpdater, \
create_true_mean_function_nonseq, tf_cat_traj_info, t_vrpg_traj_cost, TRPOPolicyUpdater
from rpsp.rpspnets.psr_lite.utils.nn import tf_get_normalized_grad_per_param
def _add_feats_to_traj_info(psrnet, traj_info):
traj_info['obs'] = traj_info['post_states']
X = traj_info['obs']
traj_info['obs_feats'] = psrnet._process_obs(X.reshape((-1, X.shape[2]))).reshape((X.shape[0], X.shape[1], -1))
U = traj_info['act']
traj_info['act_feats'] = psrnet._process_act(U.reshape((-1, U.shape[2]))).reshape((U.shape[0], U.shape[1], -1))
def _get_psr_single_trajinfo(psrnet, t_single_traj_info):
'''
Given a psr replace the 'pre_state' item in t_traj_info with psr prestates.
The traj_info must contain obs_feats and act_feats for PSR filtering.
'''
valid_len = t_single_traj_info['length']
UF = t_single_traj_info['act_feats'][:valid_len]
XF = t_single_traj_info['obs_feats'][:valid_len]
H = psrnet.tf_compute_pre_states(XF, UF)
modified_traj = OrderedDict(t_single_traj_info.items())
modified_traj['pre_states'] = H
return modified_traj
def _get_psr_cat_trajinfo(psrnet, t_single_traj_info):
'''
Same as _get_psr_single_trajinfo but assumes the trajectory to be a concatentation
of multiple trajectories.
'''
valid_len = t_single_traj_info['length']
UF = t_single_traj_info['act_feats'][:valid_len]
XF = t_single_traj_info['obs_feats'][:valid_len]
SM = t_single_traj_info['start_mark'][:valid_len]
h0 = psrnet.t_initial_state
def update_psr_state(o, a, sm, h):
h = ifelse(T.eq(sm, 0.0), h, h0)
hp1 = psrnet.tf_update_state(h, o, a)
return [hp1, h]
H, _ = theano.scan(fn=update_psr_state,
outputs_info=[h0, None],
sequences=[XF, UF, SM])
modified_traj = OrderedDict(t_single_traj_info.items())
modified_traj['pre_states'] = H[1]
modified_traj['post_states'] = H[0]
return modified_traj
def _tf_get_psr_prestates_cat(psrnet, t_traj_info):
N, TT = t_traj_info['mask'].shape
t_cat_traj_info = tf_cat_traj_info(t_traj_info)
valid_len = t_cat_traj_info['length']
UF = t_cat_traj_info['act_feats'][:valid_len]
XF = t_cat_traj_info['obs_feats'][:valid_len]
SM = t_cat_traj_info['start_mark'][:valid_len]
h0 = psrnet.t_initial_state
def update_psr_state(o, a, sm, h):
h = ifelse(T.eq(sm, 0.0), h, h0)
hp1 = psrnet.tf_update_state(h, o, a)
return [hp1, h]
H, _ = theano.scan(fn=update_psr_state,
outputs_info=[h0, None],
sequences=[XF, UF, SM])
states = H[1].reshape((N, TT, -1))
states.name = 'psr_prestates'
return states
def _tf_get_psr_prestates_fixed_numtrajs(psrnet, t_traj_info, num_trajs):
states = [None] * num_trajs
for i in xrange(num_trajs):
UF = t_traj_info['act_feats'][i]
XF = t_traj_info['obs_feats'][i]
states[i] = psrnet.tf_compute_pre_states(XF, UF)
states = T.stack(states)
states.name = 'psr_prestates'
return states
def _tf_get_psr_prestates_batch(psrnet, t_traj_info):
N, TT = t_traj_info['mask'].shape
h0 = psrnet.t_initial_state
H0 = T.tile(h0, (N, 1))
UF = t_traj_info['act_feats'][:, :-1, :].transpose(1, 0, 2)
XF = t_traj_info['obs_feats'][:, :-1, :].transpose(1, 0, 2)
fn_update = lambda o, a, h: psrnet.tf_update_state_batch(h, o, a)
H, _ = theano.scan(fn=fn_update, outputs_info=[H0], sequences=[XF, UF])
H = T.concatenate([T.reshape(H0, (1, N, -1)), H], axis=0)
return H.transpose(1, 0, 2)
def _tf_get_psr_prestates(psrnet, t_traj_info, num_trajs=None):
if not globalconfig.vars.args.dbg_nobatchpsr:
try:
print 'Attempting to use batchified PSR filtering'
return _tf_get_psr_prestates_batch(psrnet, t_traj_info)
except:
print 'WARNING: Could not used batchified PSR filtering'
print 'num trajs is', num_trajs
if num_trajs > 0:
# Use fixed num trajs for faster execution
return _tf_get_psr_prestates_fixed_numtrajs(psrnet, t_traj_info, num_trajs)
else:
# Use concatenated trajectories
return _tf_get_psr_prestates_cat(psrnet, t_traj_info)
def get_grad_update(loss1, loss2, params, c1=1., c2=1., beta=0.1, normalize=True, clip_bounds=[], decay1=0.0,
decay2=0.0):
combined_grads = []
updates = []
it = theano.shared(1.0, name='decay_iter::get_grad_update')
g1, w1, u1 = tf_get_normalized_grad_per_param(loss1, params, beta=beta, normalize=normalize,
clip_bounds=clip_bounds)
g2, w2, u2 = tf_get_normalized_grad_per_param(loss2, params, beta=beta, normalize=normalize,
clip_bounds=clip_bounds)
updates.extend(u1)
updates.extend(u2)
for (gg1, gg2) in zip(g1, g2):
combined_grad = gg1 * c1 * (1. - decay1) ** it + gg2 * c2 * (1. - decay2) ** it
combined_grads.append(combined_grad)
combined_loss = loss1 * w1 * c1 * (1. - decay1) ** it + loss2 * w2 * c2 * (1. - decay2) ** it
updates.extend([(it, it + 1)])
results = {'total_cost': combined_loss, 'cost2_avg': loss2 * w2 * c2 * (1. - decay2) ** it,
'cost1_avg': loss1 * w1 * c1 * (1. - decay1) ** it, 'a1': w1, 'a2': w2,
'updates': updates, 'grads': combined_grads, 'params': params, 'total_grads': zip(g1, g2)}
return results
def _tf_get_learning_rate(grads, beta=0.1):
var = theano.shared(1.0, name='lr_g2')
grad_sq = T.sum([T.sum(g ** 2) for g in grads])
var_new = beta * var + (1.0 - beta) * grad_sq
weight = 1.0 / T.sqrt(var_new)
updates = [(var, var_new)]
return weight, updates
def t_psr_pred_loss(psrnet, t_single_traj_info):
valid_len = t_single_traj_info['length']
X = t_single_traj_info['obs'][:valid_len]
U = t_single_traj_info['act_feats'][:valid_len]
H = t_single_traj_info['pre_states'][:valid_len]
predictions = psrnet.tf_predict_obs(H, U)
# if globalconfig.vars.args.dbg_collapse:
# print 'checking collapse'
# predictions = dbg_nn_raise_PredictionError(predictions, 'trajectory is zero collapse!')
process_obs = X
pred_cost = T.mean((predictions - process_obs) ** 2, axis=1)
return pred_cost
def t_single_combined_cost(policy, t_single_traj_info):
pred_cost = t_psr_pred_loss(policy._psrnet, t_single_traj_info)
reinf_cost = t_vrpg_traj_cost(policy, t_single_traj_info)
comb_cost = T.stack([reinf_cost, pred_cost], axis=1).transpose()
return comb_cost
class PSR_VRPGPolicyUpdater(VRPGPolicyUpdater):
def __init__(self, *args, **kwargs):
self._beta_reinf = theano.shared(kwargs.pop('beta_reinf', 1.0))
self._beta_pred = theano.shared(kwargs.pop('beta_pred', 1.0))
self._grad_step = theano.shared(kwargs.pop('grad_step', 1.0))
self._beta_pred_decay = kwargs.pop('beta_pred_decay', 1.0)
self._beta_only_reinf = 0.0 # TODO:remove once debug is done
GradientPolicyUpdater.__init__(self, *args, **kwargs)
if globalconfig.vars.args.fix_psr:
self._params = self._policy._policy.params
else:
self._params = self._policy._psrnet.params + self._policy._policy.params
self._vrpg_cost = lambda t: VRPGPolicyUpdater._t_single_traj_cost(self, t)
# TODO: Now that we have normalization, should we include _proj_params
#self._proj_params = self._policy._psrnet._params_proj
# self._proj_step = self._policy._psrnet._opt_U
def _construct_traj_info(self, trajs):
out = VRPGPolicyUpdater._construct_traj_info(self, trajs)
_add_feats_to_traj_info(self._policy._psrnet, out)
return out
def _t_single_traj_cost(self, t_single_traj_info):
return t_vrpg_traj_cost(self._policy, t_single_traj_info)
def _t_single_psr_cost(self, t_traj_info):
return t_psr_pred_loss(self._policy._psrnet, t_traj_info)
def _t_psr_cost(self, t_traj_info):
return create_true_mean_function_nonseq(t_traj_info, self._t_single_psr_cost)
def _construct_updates(self, t_traj_info):
self._t_lr = theano.shared(self._lr, 'lr')
t_traj_info = t_traj_info.copy()
print 'Building PSR cost function ... ',
tic = time()
t_psr_traj_info = t_traj_info.copy()
t_psr_states = _tf_get_psr_prestates(self._policy._psrnet, t_psr_traj_info, self.num_trajs)
t_psr_traj_info['pre_states'] = t_psr_states
t_cost_reinf = self._t_cost(t_psr_traj_info)
t_cost_pred = self._t_psr_cost(t_psr_traj_info)
# if globalconfig.vars.args.dbg_prederror > 0.0:
# print 'checking pred error'
# t_cost_pred = dbg_raise_BadPrediction(t_cost_pred, 'bad prediction ')
# print 'finished in %f seconds' % (time() - tic)
print 'Computing gradients ... normalize:', self._normalize_grad,
tic = time()
gclip = globalconfig.vars.args.gclip
beta = globalconfig.vars.args.beta
decay1 = globalconfig.vars.args.decay1
decay2 = globalconfig.vars.args.decay2
results = get_grad_update(t_cost_pred, t_cost_reinf, self._params,
self._beta_pred, self._beta_reinf, beta=beta,
normalize=self._normalize_grad, clip_bounds=[-gclip, gclip],
decay1=decay1, decay2=decay2)
updates = results['updates']
t_grads = results['grads']
keys = ['cost1_avg', 'cost2_avg', 'total_cost', 'a1', 'a2']
out = dict([(key, results[key]) for key in keys])
out['reinf_loss'] = t_cost_reinf
out['pred_loss'] = t_cost_pred
out.update(self.policy._psrnet.tf_get_weight_projections(self.reactive_policy.params[0], t_psr_states))
out.update(
{'var_g': T.sum([T.sum(gg ** 2) for gg in t_grads]), 'sum_g': T.sum([T.sum(T.abs_(gg)) for gg in t_grads])})
print 'finished in {%f} seconds' % (time() - tic)
beta_lr = globalconfig.vars.args.beta_lr
lr = 1.0
if beta_lr != 0.0:
lr, lr_updates = _tf_get_learning_rate(t_grads,
beta=beta_lr) # TODO: try with combined grads and original grads
updates.extend(lr_updates)
print 'Computing optimizer updates ... ',
tic = time()
updates.extend(optimizers[self._optimizer](0.0, self._params, learning_rate=self._t_lr * lr, all_grads=t_grads))
updates.extend([(self._t_lr, self._beta_pred_decay * self._t_lr)])
print 'finished in %f seconds' % (time() - tic)
return updates, out
def _update(self, traj_info):
# try:
out = self._update_fn(*traj_info.values())
return {k: v for (k, v) in zip(self._out_names, out)}
# except PredictionError: # if no valid update no op
# print 'Catch Prediction error do not update, '
# return {} # Update PSR parameters
class PSR_AltOpt_TRPOPolicyUpdater(NNPolicyUpdater):
def __init__(self, *args, **kwargs):
self._grad_step = theano.shared(kwargs.pop('grad_step', 1e-3), 'grad_step')
self._lr = kwargs.pop('lr', 1e-3)
self._beta_pred = kwargs.pop('beta_pred', 1.0)
self._beta_reinf = kwargs.pop('beta_reinf', 0.0)
self._beta_pred_decay = kwargs.pop('beta_pred_decay', 1.0)
self._optimizer = kwargs.pop('cg_opt', 'adam')
TRPO_method = kwargs.pop('TRPO_method', TRPOPolicyUpdater)
super(PSR_AltOpt_TRPOPolicyUpdater, self).__init__(*args, **kwargs)
self._beta_only_reinf = 0.0
kwargs['lr'] = self._lr
kwargs.pop('policy', None)
trpo_args = (self._policy._policy,) + args[1:]
self._trpo = TRPO_method(*trpo_args, **kwargs)
self._normalize_grad = globalconfig.vars.args.norm_g
XF = T.matrix()
UF = T.matrix()
H = self._policy._psrnet.tf_compute_pre_states(XF, UF)
mu, S = self._policy._t_compute_gaussian(H)
self._act_dist_fn = theano.function(inputs=[XF, UF], outputs=[mu, S])
# self._proj_step = self._policy._psrnet._opt_U
self._policy_params = self._policy._policy.params
if globalconfig.vars.args.fix_psr:
self._params = []
else:
self._params = self._policy._psrnet.params
# self._proj_params = self._policy._psrnet._params_proj
def _construct_traj_info(self, trajs):
out = NNPolicyUpdater._construct_traj_info(self, trajs)
_add_feats_to_traj_info(self._policy._psrnet, out)
out['act_mean'] = np.empty_like(out['act'])
out['act_logstd'] = np.empty_like(out['act'])
for i in xrange(len(out['length'])):
out['act_mean'][i, :, :], out['act_logstd'][i, :, :] = \
self._act_dist_fn(out['obs_feats'][i], out['act_feats'][i])
return out
def _t_single_traj_cost(self, t_single_traj_info):
return t_vrpg_traj_cost(self._policy, t_single_traj_info)
def _t_single_psr_cost(self, t_traj_info):
return t_psr_pred_loss(self._policy._psrnet, t_traj_info)
def _t_psr_cost(self, t_traj_info):
return create_true_mean_function_nonseq(t_traj_info, self._t_single_psr_cost)
def _t_cost(self, t_traj_info):
return create_true_mean_function_nonseq(t_traj_info, self._t_single_traj_cost)
def _construct_updates(self, t_psr_traj_info):
print 'Building PSR cost function ... ',
tic = time()
print 'finished in %f seconds' % (time() - tic)
t_cost_reinf = self._t_cost(t_psr_traj_info)
t_cost_pred = self._t_psr_cost(t_psr_traj_info)
updates = []
if len(self._params) > 0:
# if globalconfig.vars.args.dbg_prederror > 0.0:
# print 'checking pred error'
# t_cost_pred = dbg_raise_BadPrediction(t_cost_pred, 'bad prediction ')
# print 'finished in %f seconds' % (time() - tic)
print 'Computing gradients ... normalize:', self._normalize_grad,
tic = time()
gclip = globalconfig.vars.args.gclip
beta = globalconfig.vars.args.beta
results = get_grad_update(t_cost_pred, t_cost_reinf, self._params,
self._beta_pred, self._beta_reinf, beta=beta,
normalize=self._normalize_grad, clip_bounds=[-gclip, gclip])
updates = results['updates']
t_grads = results['grads']
print 'finished in %f seconds' % (time() - tic)
beta_lr = globalconfig.vars.args.beta_lr
lr = 1.0
if beta_lr <> 0.0:
lr, lr_updates = _tf_get_learning_rate(t_grads,
beta=beta_lr) # TODO: try with combined grads and original grads
updates.extend(lr_updates)
print 'Computing optimizer updates ... ',
tic = time()
updates.extend(
optimizers[self._optimizer](0.0, self._params, learning_rate=self._grad_step * lr, all_grads=t_grads))
updates.extend([(self._grad_step, self._beta_pred_decay * self._grad_step)])
print 'finished in %f seconds' % (time() - tic)
return updates, {'reinf_loss': t_cost_reinf, 'pred_loss': t_cost_pred}
def _build_updater(self, t_traj_info):
print 'Building TRPO Component'
t_psr_traj_info = t_traj_info.copy()
self._trpo._build_updater(t_traj_info)
print 'Compiling state function ... ',
tic = time()
t_psr_states = _tf_get_psr_prestates(self._policy._psrnet, t_psr_traj_info, self.num_trajs)
self._state_fn = theano.function(inputs=t_psr_traj_info.values(),
outputs=t_psr_states,
on_unused_input='ignore')
print 'finished in %f seconds' % (time() - tic)
# Compute PSR parameter updates
t_psr_traj_info['pre_states'] = t_psr_states
updates, out = self._construct_updates(t_psr_traj_info)
self._psr_update_fn = theano.function(inputs=t_traj_info.values(), updates=updates,
on_unused_input='ignore', outputs=out.values())
self._out_names = out.keys()
print 'finished in %f seconds' % (time() - tic)
def _update(self, traj_info):
# try:
obs = np.copy(traj_info['pre_states'])
# Replace observation model states with PSR states
states = self._state_fn(*traj_info.values())
traj_info['pre_states'] = states
# Update reactive policy
out_trpo = self._trpo._update(traj_info)
# Update PSR Model
traj_info['pre_states'] = obs
out = self._psr_update_fn(*traj_info.values())
out = {k: v for (k, v) in zip(self._out_names, out)}
out.update(out_trpo)
return out
# except PredictionError: # as e: #if no valid update no op
# print 'Catch Prediction error do not update, '
# return {} # Update PSR parameters
#
# class jointOp_PolicyUpdater(object):
# def psr_pred_cost(self, t_single_traj_info):
# pred_cost = t_psr_pred_loss(self._policy._psrnet, t_single_traj_info)
# return pred_cost
#
# def _reinf_cost(self, t_single_traj_info):
# reinf_cost = t_vrpg_traj_cost(self._policy, t_single_traj_info)
# return reinf_cost
#
# def _construct_updates(self, t_traj_info):
# print 'Building PSR cost function ... ',
# tic = time()
# t_psr_states = _tf_get_psr_prestates(self._policy._psrnet, t_traj_info, self.num_trajs)
# t_psr_traj_info = t_traj_info.copy()
# t_psr_traj_info['pre_states'] = t_psr_states
# t_pred_cost = create_true_mean_function_nonseq(t_psr_traj_info, self.psr_pred_cost)
# t_reinf_cost = create_true_mean_function_nonseq(t_psr_traj_info, self._reinf_cost)
#
# print 'finished in %f seconds' % (time() - tic)
# print 'Get gradient function'
# tic = time()
# gclip = globalconfig.vars.args.gclip
# t_grads = get_grad_update_old(t_pred_cost, t_reinf_cost, self._params, self._beta_pred, self._beta_reinf,
# clip_bounds=[-gclip, gclip])
# print 'finished in %f seconds' % (time() - tic)
# keys = ['cost1_avg', 'cost2_avg', 'total_cost', 'a1', 'a2']
# out = dict([(key, t_grads[key]) for key in keys])
#
# out.update(self.policy._psrnet.tf_get_weight_projections(self.reactive_policy.params[0], t_psr_states))
#
# if globalconfig.vars.args.dbg_prederror > 0.0:
# print 'checking pred error'
# t_pred_cost = dbg_raise_BadPrediction(t_pred_cost, 'bad prediction ')
#
# print 'Compiling PSR update function ... ',
# tic = time()
# psr_updates = optimizers[self._optimizer](0.0, self._params, learning_rate=self._grad_step,
# all_grads=t_grads['grads'])
# proj_updates = [] if self._proj_step == 0.0 else optimizers[self._optimizer](t_pred_cost, self._proj_params,
# self._proj_step)
# reinf_updates = []
# if self._beta_only_reinf > 0:
# print '\nlr ', self._lr, self._policy_params
# t_rgrads = get_grad_update_old(0.0, t_reinf_cost, self._policy_params, 0.0, 1.0)
# reinf_updates = optimizers[self._optimizer](0.0, self._policy_params, learning_rate=self._lr,
# all_grads=t_rgrads['grads'])
#
# print 'finished in %f seconds' % (time() - tic)
# return t_grads['updates'] + psr_updates + proj_updates + reinf_updates, out
#
# class PSR_JointVRPG_PolicyUpdater(PSR_VRPGPolicyUpdater, jointOp_PolicyUpdater):
# def __init__(self, *args, **kwargs):
# self._beta_only_reinf = kwargs.pop('beta_only_reinf')
# PSR_VRPGPolicyUpdater.__init__(self, *args, **kwargs)
#
# # override
# def _construct_updates(self, t_psr_traj_info):
# return jointOp_PolicyUpdater._construct_updates(self, t_psr_traj_info)
#
#
# class PSR_JointAltOp_PolicyUpdater(PSR_AltOpt_TRPOPolicyUpdater, jointOp_PolicyUpdater):
# def __init__(self, *args, **kwargs):
# return PSR_AltOpt_TRPOPolicyUpdater.__init__(self, *args, **kwargs)
#
# # override
# def _construct_updates(self, t_psr_traj_info):
# return jointOp_PolicyUpdater._construct_updates(self, t_psr_traj_info)
#
#
# class NormVRPG_PolicyUpdater(VRPGPolicyUpdater, jointOp_PolicyUpdater):
# # override
# def _construct_updates(self, t_traj_info):
# tic = time()
# self._t_lr = theano.shared(self._lr, 'lr')
# t_reinf_cost = create_true_mean_function_nonseq(t_traj_info, self._reinf_cost)
# print 'finished in %f seconds' % (time() - tic)
#
# print 'Get gradient function'
# tic = time()
# t_grads = get_grad_update(0.0, t_reinf_cost, self._params, 0.0, 1.0)
# print 'finished in %f seconds' % (time() - tic)
# keys = ['cost2_avg', 'total_cost', 'a2']
# out = dict([(key, t_grads[key]) for key in keys])
# print 'Compiling PSR update function ... ',
# tic = time()
# updates = optimizers[self._optimizer](0.0, self._params, learning_rate=self._t_lr, all_grads=t_grads['grads'])
#
# print 'finished in %f seconds' % (time() - tic)
# return t_grads['updates'] + updates, out
|
[
"zmarinho@cmu.edu"
] |
zmarinho@cmu.edu
|
6922ab6a1df517a98324ed90d6e9147cf458066a
|
c53c623b6648d608d409ff7a45d1d735fecd35d9
|
/62p.py
|
472293b7924ca97433e3a8ff6fe9703199656a44
|
[] |
no_license
|
kasthuri2698/python
|
e15ac37773fd8250a520682eba54cbfe00676ccf
|
cdb8d85810eba18ed26afbfd8daa9eb1319f3a3f
|
refs/heads/master
| 2020-06-14T14:42:22.508096
| 2019-07-19T10:03:55
| 2019-07-19T10:03:55
| 195,030,084
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 99
|
py
|
n=input()
p=set(n)
s={'0','1'}
if s==p or s=={'0'} or s=={'1'}:
print("yes")
else:
print("no")
|
[
"noreply@github.com"
] |
kasthuri2698.noreply@github.com
|
61837a24ba3f7e8bc7d91df2e2d49c1a535ef240
|
e9dfdeccaf45ea99d80a74398f91065290eb66e8
|
/project2/model/lstm_trainer.py
|
c3c231d273d2fc54394589bc56273682d1159d47
|
[] |
no_license
|
puccife/ML_2017
|
ccd07954141b7de0df80752cb4f8e378f5178d35
|
27055ffd5ed3685f089b31b900198f132ddb7124
|
refs/heads/master
| 2021-03-24T11:59:50.621346
| 2017-12-21T22:11:41
| 2017-12-21T22:11:41
| 107,022,370
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,095
|
py
|
import re
import numpy as np
import pandas as pd
from nltk.corpus import stopwords
import tensorflow as tf
from nltk.stem import SnowballStemmer
from keras.preprocessing.text import Tokenizer
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
class LSTMTrainer:
FLAGS = None
embeddings_index = {}
embedding_matrix = None
x_train = None
y_train = None
x_val = None
y_val = None
model = None
test_data = None
test_ids = None
def __init__(self, FLAGS):
self.FLAGS = FLAGS
self.__init_model()
def __init_model(self):
# All of the train tweets are read from the data files and are being put in a list. Before that, they are cleaned by the text_to_wordlist function
tweets_pos = [self.text_to_wordlist(line.rstrip('\n')) for line in open(self.FLAGS.train_data_file_pos, 'r', encoding='utf-8')]
tweets_neg = [self.text_to_wordlist(line.rstrip('\n')) for line in open(self.FLAGS.train_data_file_neg, 'r', encoding='utf-8')]
tweets_train = tweets_pos + tweets_neg
labels = np.ones(len(tweets_train), dtype=np.int8)
for i in range(int(len(labels) / 2),
len(labels)): # this is for generating labels for the positive and negative tweets
labels[i] = 0
print('Number of train tweets: %d' % len(tweets_train))
print('Number of labels: %d' % len(labels))
# Reading and "cleaning" of the test dataset
tweets_test = []
tweets_test_ids = []
for line in open(self.FLAGS.test_data_file, 'r', encoding='utf-8'):
temp = line.split(
',') # we split the string, since the first element will be the id, and the rest is the whole tweet
tweets_test_ids.append(temp[0])
temp.pop(0)
temp = self.text_to_wordlist(" ".join(temp))
tweets_test.append(temp)
print('Number of test tweets: %d' % len(tweets_test))
######################################
# prepare tokenizer
######################################
print('Initializing Tokenizer')
# The tokenizer is fitted on both of the datasets and the maximum num words is being set manually
tokenizer = Tokenizer(num_words=self.FLAGS.max_nb_words)
tokenizer.fit_on_texts(tweets_train + tweets_test)
# tokenizer.fit_on_texts(tweets_test)
sequences = tokenizer.texts_to_sequences(tweets_train)
test_sequences = tokenizer.texts_to_sequences(tweets_test)
word_index = tokenizer.word_index
print('Found %s unique tokens' % len(word_index))
train_data = pad_sequences(sequences,
maxlen=self.FLAGS.max_sequence_length) # we are padding the sequences to the maximum length of 30
labels = to_categorical(np.array(labels),
nb_classes=2) # the labels are converted to binary matrix for the neural net, since we are using categorical_crossentropy
print('Shape of train_data tensor:', train_data.shape)
print('Shape of label tensor:', labels.shape)
self.test_data = pad_sequences(test_sequences, maxlen=self.FLAGS.max_sequence_length)
self.test_ids = np.array(tweets_test_ids)
print('Shape of test_data tensor:', self.test_data.shape)
######################################
# prepare embeddings
######################################
print('Preparing embedding matrix')
# We use the generated dictionary from the GloVe text file in order to get all of the word vectors
# from our word dictionary - word_index. This dictionary is generated by the Tokenizer from all of the possible train tweets.
num_words = min(self.FLAGS.max_nb_words, len(word_index))
self.embedding_matrix = np.zeros((num_words, self.FLAGS.embedding_dim))
for word, i in word_index.items():
if i >= num_words:
break
embedding_vector = self.embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
self.embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(self.embedding_matrix, axis=1) == 0))
######################################
# validation data
######################################
print('Preparing validation data')
# In this part of the code we generate the validation data from the train set
indices = np.arange(train_data.shape[0]) # we get the number of the max possible indices
np.random.shuffle(indices) # and we shuffle them
data = train_data[indices]
labels = labels[indices]
nb_validation_samples = int(self.FLAGS.validation_split * data.shape[0]) # our validation set is 20% from the train tweets
self.x_train = data[:-nb_validation_samples]
self.y_train = labels[:-nb_validation_samples]
self.x_val = data[-nb_validation_samples:]
self.y_val = labels[-nb_validation_samples:]
self.create_model()
def indexing_wordvectors(self):
print('Indexing word vectors')
f = open(self.FLAGS.embedding_dir, 'r', encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
self.embeddings_index[word] = coefs #
f.close()
# The function "text_to_wordlist" is from
# https://www.kaggle.com/currie32/quora-question-pairs/the-importance-of-cleaning-text
def text_to_wordlist(self, text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally, remove stop words
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if w not in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"<user>", "", text)
text = re.sub(r"<url>", "", text)
text = re.sub(r"plz", "please", text)
text = re.sub(r"dat", "that", text)
text = re.sub(r"bc", "because", text)
text = re.sub(r"jk", "joke", text)
text = re.sub(r"ya", "your", text)
text = re.sub(r"thang", "thing", text)
text = re.sub(r"dunno", "do not know", text)
text = re.sub(r"doin", "doing", text)
text = re.sub(r"lil", "little", text)
text = re.sub(r"tmr", "tomorrow", text)
text = re.sub(r"#", "", text)
text = re.sub(r">", "", text)
text = re.sub(r"> >", " ", text)
text = re.sub(r"[^A-Za-z0-9^,!./'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"-", " - ", text)
text = re.sub(r"=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"\s{2,}", " ", text)
# Optionally, shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return text
def create_model(self):
tf.reset_default_graph()
net = tflearn.input_data([None, self.FLAGS.max_sequence_length])
net = tflearn.embedding(net, input_dim=self.FLAGS.max_nb_words, output_dim=self.FLAGS.embedding_dim,
trainable=False,
name='embeddingLayer')
net = tflearn.lstm(net, 256, return_seq=True)
net = tflearn.dropout(net, 0.5)
net = tflearn.lstm(net, 256)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')
self.model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=3, best_val_accuracy=0.864,
# We are using tensorboard_verbose=3 for the best possible
best_checkpoint_path='checkpoints\\model6\\' + self.FLAGS.model_name) # visualisation and save checkpoints from the model if the
def train_model(self):
print('Starting the training process!')
embeddingsLayer = tflearn.get_layer_variables_by_name('embeddingLayer')[0] # validation accuracy is bigger than 0.864.
self.model.set_weights(embeddingsLayer,
self.embedding_matrix) # Custom weight matrix generated from the GloVe is set as weights for the Embedding layer
self.model.fit(self.x_train, self.y_train, validation_set=(self.x_val, self.y_val), n_epoch=5,
show_metric=True, batch_size=256, shuffle=True)
self.model.save(self.FLAGS.model_path)
print('Training done!')
def test_model(self):
print('Testing the model!')
self.model.load(model_file=self.FLAGS.model_path)
preds = self.model.predict(self.test_data)
preds_array = []
for i in range(0, len(preds)):
index = np.argmax(preds[i,
:]) # We have a predict matrix with a dimension of 10000x2. The column with index 0 is the probability for the negative sentiment
if index == 0: # and the column with index 1 is the probability for the positive sentiment.
preds_array.append(
-1) # If the value in column one is bigger, then the prediction for this tweet is negative (-1).
else: # The opposite is, of course, that this tweet has positive sentiment.
preds_array.append(1)
preds_array = np.array(preds_array)
# Generating submission file
submission = pd.DataFrame({'Id': self.test_ids, 'Prediction': preds_array})
submission.to_csv('./predictions_csv/LSTM_prediction.csv', sep=',', index=False)
|
[
"federico.pucci2@studio.unibo.it"
] |
federico.pucci2@studio.unibo.it
|
6951524d2ccc8dea70f436ce5f82d6cd1b26dda6
|
825da00a4adb85cc71b64d46a127abc3e2277431
|
/CJH-Blog/Blog/Permission.py
|
07352b8de7e8a963ca45fb842a341ecf3519f622
|
[] |
no_license
|
xcaojianhong/CJH-Blog
|
4289342125d506732210dcf95caf1b2bcb05b0c9
|
f7049b5e0f8b48b7cf8279d8527658fdb3dcd484
|
refs/heads/master
| 2021-06-20T02:27:29.533958
| 2017-08-15T06:33:44
| 2017-08-15T06:33:44
| 100,347,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,310
|
py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# =================================================
# @Time : 2017/7/26 14:47
# @Author : Cao jianhong
# @File : Permission.py
# @Software: PyCharm Edu
# =================================================
from flask_principal import Permission, RoleNeed , UserNeed, identity_loaded
from flask_login import current_user
'''
# 添加授权群
# 看到一教程反过来写的,也不是到是我理解错了,还是他写错了,先试验一下
# 确实需要反过来写,理解起来比较难
reader_permission = Permission(RoleNeed('reader'))
writer_permission = Permission(RoleNeed('writer')).union(reader_permission)
editor_permission = Permission(RoleNeed('editor')).union(writer_permission)
admin_permission = Permission(RoleNeed('admin')).union(editor_permission)
su_permission = Permission(RoleNeed('su')).union(admin_permission)
'''
# 添加授权群
su_permission = Permission(RoleNeed('su'))
admin_permission = Permission(RoleNeed('admin')).union(su_permission)
editor_permission = Permission(RoleNeed('editor')).union(admin_permission)
writer_permission = Permission(RoleNeed('writer')).union(editor_permission)
reader_permission = Permission(RoleNeed('reader')).union(writer_permission)
@identity_loaded.connect # 等价与下面的写法,自动绑定当前app
# @identity_loaded.connect_via(current_app)
def on_identity_loaded(sender, identity):
# 设置当前用户身份为login登录对象
identity.user = current_user
# 添加UserNeed到identity user对象
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
# 将Role添加到identity user对象
if hasattr(current_user, 'role'):
identity.provides.add(RoleNeed(current_user.role))
if hasattr(current_user, 'is_su_user') and current_user.is_su_user:
identity.provides.add(RoleNeed('su'))
# 把身份添加到权限里面
identity.allow_su = su_permission.allows(identity)
identity.allow_admin = admin_permission.allows(identity)
identity.allow_edit = editor_permission.allows(identity)
identity.allow_write = writer_permission.allows(identity)
identity.allow_read = reader_permission.allows(identity)
|
[
"1254798548@qq.com"
] |
1254798548@qq.com
|
83f15b271373d6431cf0cde4f1ee5a6159365e73
|
056a1050e0e0bf78eeedaf978841a61e5203936a
|
/Atom-Python/Test-Project/test-module.py
|
d462d8071b754abdf009309456b1bfbb603eadf8
|
[
"MIT"
] |
permissive
|
DSNR/snippets
|
cc3990f0ac467e19754f0a76598809eddede95fd
|
12006dd083be60c6444d8b5ca48fd917005e081b
|
refs/heads/master
| 2023-08-23T18:55:34.038472
| 2021-10-11T02:35:28
| 2021-10-11T02:35:28
| 415,750,242
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
class Employee:
"""A sample Employee class"""
def __init__(self, first, last):
self.first = first
self.last = last
print('Created Employee: {} - {}'.format(self.fullname, self.email))
@property
def email(self):
return '{}.{}@email.com'.format(self.first, self.last)
@property
def fullname(self):
return '{} {}'.format(self.first, self.last)
emp_1 = Employee('John', 'Smith')
|
[
"stuart@ariia.co.uk"
] |
stuart@ariia.co.uk
|
cdbe8e9bb63642c75190d90e755129166a9de7a0
|
92047ed9c5b5e681e80678dc0a7331e90de0efd0
|
/patterns/object_pool.py
|
c7d58335670a708c27f72df938fd9f10b4532b42
|
[] |
no_license
|
lmbonnefont/design_patterns
|
bf572c8535009c31cd31a5c747292c73fba8dd4b
|
066e0931106134fba7b645384137c525a09df35c
|
refs/heads/main
| 2023-06-02T03:40:54.895014
| 2021-06-18T12:35:15
| 2021-06-18T12:35:15
| 375,687,964
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,432
|
py
|
# Object pool design pattern
from exceptions import TooManyCatsAskedException, NoMoreCatException
class Cat:
def __init__(self, name: str):
self.name = name
class CatPoolMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
"""
Possible changes to the value of the `__init__` argument do not affect
the returned instance.
"""
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class CatPool(metaclass=CatPoolMeta):
def __init__(self, size: int):
names = ["Ricou", "Pilou", "Croqmou", "Voyou", "Picsou"]
if size > 5:
print("Cannot create more than 5 cats sorry")
raise TooManyCatsAskedException("You asked too many cats, the maximum is 5")
self.cats = [Cat(names[i]) for i in range(size)]
def get_cat(self):
if self.cats:
print(f"Here is your cat, his name is {self.cats[0].name}. Treat him nicely.")
return self.cats.pop(0)
else:
raise NoMoreCatException("No more cat available")
def release_cat(self, cat):
if isinstance(cat, Cat):
self.cats.append(cat)
print("Thank you, your cat has been reintegrated to the pool")
else:
raise ValueError("Cannot add anything else but cat")
if __name__ == '__main__':
# This design pattern is about limiting the creation of objects which are expensive to create (DB connexion for instance)
# We create a pool of instances which will be reused in the future. Each time we need an object we get it from the pool.
# When we do not need it anymore we release it
# It is commonly used with a singleton pattern to ensure the pool uniqueness
# The objects must be immutables so you don't get corrupted cats from the pool
# When a pool is empty, we can either raise an error to say there is no more cat or create new instances of cat and let the pool grow.
cp1 = CatPool(size=2)
cp2 = CatPool(size=1)
if id(cp1) == id(cp2):
print("Singleton works, both variables contain the same instance.")
else:
print("Singleton failed, variables contain different instances.")
first_cat = cp1.get_cat()
second_cat = cp1.get_cat()
cp1.release_cat(first_cat)
third_cat = cp1.get_cat()
|
[
"louis_marie.bonnefont@edu.escpeurope.eu"
] |
louis_marie.bonnefont@edu.escpeurope.eu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.